1 //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11 // both before and after the DAG is legalized. 12 // 13 // This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14 // primarily intended to handle simplification opportunities that are implicit 15 // in the LLVM IR and exposed by the various codegen lowering phases. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallBitVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Target/TargetLowering.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 #define DEBUG_TYPE "dagcombine" 46 47 STATISTIC(NodesCombined , "Number of dag nodes combined"); 48 STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 49 STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 50 STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 51 STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 52 STATISTIC(SlicedLoads, "Number of load sliced"); 53 54 namespace { 55 static cl::opt<bool> 56 CombinerAA("combiner-alias-analysis", cl::Hidden, 57 cl::desc("Enable DAG combiner alias-analysis heuristics")); 58 59 static cl::opt<bool> 60 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 61 cl::desc("Enable DAG combiner's use of IR alias analysis")); 62 63 static cl::opt<bool> 64 UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), 65 cl::desc("Enable DAG combiner's use of TBAA")); 66 67 #ifndef NDEBUG 68 static cl::opt<std::string> 69 CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, 70 cl::desc("Only use DAG-combiner alias analysis in this" 71 " function")); 72 #endif 73 74 /// Hidden option to stress test load slicing, i.e., when this option 75 /// is enabled, load slicing bypasses most of its profitability guards. 76 static cl::opt<bool> 77 StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, 78 cl::desc("Bypass the profitability model of load " 79 "slicing"), 80 cl::init(false)); 81 82 static cl::opt<bool> 83 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true), 84 cl::desc("DAG combiner may split indexing from loads")); 85 86 //------------------------------ DAGCombiner ---------------------------------// 87 88 class DAGCombiner { 89 SelectionDAG &DAG; 90 const TargetLowering &TLI; 91 CombineLevel Level; 92 CodeGenOpt::Level OptLevel; 93 bool LegalOperations; 94 bool LegalTypes; 95 bool ForCodeSize; 96 97 /// \brief Worklist of all of the nodes that need to be simplified. 98 /// 99 /// This must behave as a stack -- new nodes to process are pushed onto the 100 /// back and when processing we pop off of the back. 101 /// 102 /// The worklist will not contain duplicates but may contain null entries 103 /// due to nodes being deleted from the underlying DAG. 104 SmallVector<SDNode *, 64> Worklist; 105 106 /// \brief Mapping from an SDNode to its position on the worklist. 107 /// 108 /// This is used to find and remove nodes from the worklist (by nulling 109 /// them) when they are deleted from the underlying DAG. It relies on 110 /// stable indices of nodes within the worklist. 111 DenseMap<SDNode *, unsigned> WorklistMap; 112 113 /// \brief Set of nodes which have been combined (at least once). 114 /// 115 /// This is used to allow us to reliably add any operands of a DAG node 116 /// which have not yet been combined to the worklist. 117 SmallPtrSet<SDNode *, 32> CombinedNodes; 118 119 // AA - Used for DAG load/store alias analysis. 120 AliasAnalysis &AA; 121 122 /// When an instruction is simplified, add all users of the instruction to 123 /// the work lists because they might get more simplified now. 124 void AddUsersToWorklist(SDNode *N) { 125 for (SDNode *Node : N->uses()) 126 AddToWorklist(Node); 127 } 128 129 /// Call the node-specific routine that folds each particular type of node. 130 SDValue visit(SDNode *N); 131 132 public: 133 /// Add to the worklist making sure its instance is at the back (next to be 134 /// processed.) 135 void AddToWorklist(SDNode *N) { 136 // Skip handle nodes as they can't usefully be combined and confuse the 137 // zero-use deletion strategy. 138 if (N->getOpcode() == ISD::HANDLENODE) 139 return; 140 141 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second) 142 Worklist.push_back(N); 143 } 144 145 /// Remove all instances of N from the worklist. 146 void removeFromWorklist(SDNode *N) { 147 CombinedNodes.erase(N); 148 149 auto It = WorklistMap.find(N); 150 if (It == WorklistMap.end()) 151 return; // Not in the worklist. 152 153 // Null out the entry rather than erasing it to avoid a linear operation. 154 Worklist[It->second] = nullptr; 155 WorklistMap.erase(It); 156 } 157 158 void deleteAndRecombine(SDNode *N); 159 bool recursivelyDeleteUnusedNodes(SDNode *N); 160 161 /// Replaces all uses of the results of one DAG node with new values. 162 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 163 bool AddTo = true); 164 165 /// Replaces all uses of the results of one DAG node with new values. 166 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 167 return CombineTo(N, &Res, 1, AddTo); 168 } 169 170 /// Replaces all uses of the results of one DAG node with new values. 171 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 172 bool AddTo = true) { 173 SDValue To[] = { Res0, Res1 }; 174 return CombineTo(N, To, 2, AddTo); 175 } 176 177 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 178 179 private: 180 181 /// Check the specified integer node value to see if it can be simplified or 182 /// if things it uses can be simplified by bit propagation. 183 /// If so, return true. 184 bool SimplifyDemandedBits(SDValue Op) { 185 unsigned BitWidth = Op.getScalarValueSizeInBits(); 186 APInt Demanded = APInt::getAllOnesValue(BitWidth); 187 return SimplifyDemandedBits(Op, Demanded); 188 } 189 190 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 191 192 bool CombineToPreIndexedLoadStore(SDNode *N); 193 bool CombineToPostIndexedLoadStore(SDNode *N); 194 SDValue SplitIndexingFromLoad(LoadSDNode *LD); 195 bool SliceUpLoad(SDNode *N); 196 197 /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed 198 /// load. 199 /// 200 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. 201 /// \param InVecVT type of the input vector to EVE with bitcasts resolved. 202 /// \param EltNo index of the vector element to load. 203 /// \param OriginalLoad load that EVE came from to be replaced. 204 /// \returns EVE on success SDValue() on failure. 205 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 206 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad); 207 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 208 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 209 SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 210 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 211 SDValue PromoteIntBinOp(SDValue Op); 212 SDValue PromoteIntShiftOp(SDValue Op); 213 SDValue PromoteExtend(SDValue Op); 214 bool PromoteLoad(SDValue Op); 215 216 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, SDValue Trunc, 217 SDValue ExtLoad, const SDLoc &DL, 218 ISD::NodeType ExtType); 219 220 /// Call the node-specific routine that knows how to fold each 221 /// particular type of node. If that doesn't do anything, try the 222 /// target-specific DAG combines. 223 SDValue combine(SDNode *N); 224 225 // Visitation implementation - Implement dag node combining for different 226 // node types. The semantics are as follows: 227 // Return Value: 228 // SDValue.getNode() == 0 - No change was made 229 // SDValue.getNode() == N - N was replaced, is dead and has been handled. 230 // otherwise - N should be replaced by the returned Operand. 231 // 232 SDValue visitTokenFactor(SDNode *N); 233 SDValue visitMERGE_VALUES(SDNode *N); 234 SDValue visitADD(SDNode *N); 235 SDValue visitSUB(SDNode *N); 236 SDValue visitADDC(SDNode *N); 237 SDValue visitSUBC(SDNode *N); 238 SDValue visitADDE(SDNode *N); 239 SDValue visitSUBE(SDNode *N); 240 SDValue visitMUL(SDNode *N); 241 SDValue useDivRem(SDNode *N); 242 SDValue visitSDIV(SDNode *N); 243 SDValue visitUDIV(SDNode *N); 244 SDValue visitREM(SDNode *N); 245 SDValue visitMULHU(SDNode *N); 246 SDValue visitMULHS(SDNode *N); 247 SDValue visitSMUL_LOHI(SDNode *N); 248 SDValue visitUMUL_LOHI(SDNode *N); 249 SDValue visitSMULO(SDNode *N); 250 SDValue visitUMULO(SDNode *N); 251 SDValue visitIMINMAX(SDNode *N); 252 SDValue visitAND(SDNode *N); 253 SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *LocReference); 254 SDValue visitOR(SDNode *N); 255 SDValue visitORLike(SDValue N0, SDValue N1, SDNode *LocReference); 256 SDValue visitXOR(SDNode *N); 257 SDValue SimplifyVBinOp(SDNode *N); 258 SDValue visitSHL(SDNode *N); 259 SDValue visitSRA(SDNode *N); 260 SDValue visitSRL(SDNode *N); 261 SDValue visitRotate(SDNode *N); 262 SDValue visitBSWAP(SDNode *N); 263 SDValue visitBITREVERSE(SDNode *N); 264 SDValue visitCTLZ(SDNode *N); 265 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 266 SDValue visitCTTZ(SDNode *N); 267 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 268 SDValue visitCTPOP(SDNode *N); 269 SDValue visitSELECT(SDNode *N); 270 SDValue visitVSELECT(SDNode *N); 271 SDValue visitSELECT_CC(SDNode *N); 272 SDValue visitSETCC(SDNode *N); 273 SDValue visitSETCCE(SDNode *N); 274 SDValue visitSIGN_EXTEND(SDNode *N); 275 SDValue visitZERO_EXTEND(SDNode *N); 276 SDValue visitANY_EXTEND(SDNode *N); 277 SDValue visitSIGN_EXTEND_INREG(SDNode *N); 278 SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N); 279 SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N); 280 SDValue visitTRUNCATE(SDNode *N); 281 SDValue visitBITCAST(SDNode *N); 282 SDValue visitBUILD_PAIR(SDNode *N); 283 SDValue visitFADD(SDNode *N); 284 SDValue visitFSUB(SDNode *N); 285 SDValue visitFMUL(SDNode *N); 286 SDValue visitFMA(SDNode *N); 287 SDValue visitFDIV(SDNode *N); 288 SDValue visitFREM(SDNode *N); 289 SDValue visitFSQRT(SDNode *N); 290 SDValue visitFCOPYSIGN(SDNode *N); 291 SDValue visitSINT_TO_FP(SDNode *N); 292 SDValue visitUINT_TO_FP(SDNode *N); 293 SDValue visitFP_TO_SINT(SDNode *N); 294 SDValue visitFP_TO_UINT(SDNode *N); 295 SDValue visitFP_ROUND(SDNode *N); 296 SDValue visitFP_ROUND_INREG(SDNode *N); 297 SDValue visitFP_EXTEND(SDNode *N); 298 SDValue visitFNEG(SDNode *N); 299 SDValue visitFABS(SDNode *N); 300 SDValue visitFCEIL(SDNode *N); 301 SDValue visitFTRUNC(SDNode *N); 302 SDValue visitFFLOOR(SDNode *N); 303 SDValue visitFMINNUM(SDNode *N); 304 SDValue visitFMAXNUM(SDNode *N); 305 SDValue visitBRCOND(SDNode *N); 306 SDValue visitBR_CC(SDNode *N); 307 SDValue visitLOAD(SDNode *N); 308 309 SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain); 310 SDValue replaceStoreOfFPConstant(StoreSDNode *ST); 311 312 SDValue visitSTORE(SDNode *N); 313 SDValue visitINSERT_VECTOR_ELT(SDNode *N); 314 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 315 SDValue visitBUILD_VECTOR(SDNode *N); 316 SDValue visitCONCAT_VECTORS(SDNode *N); 317 SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 318 SDValue visitVECTOR_SHUFFLE(SDNode *N); 319 SDValue visitSCALAR_TO_VECTOR(SDNode *N); 320 SDValue visitINSERT_SUBVECTOR(SDNode *N); 321 SDValue visitMLOAD(SDNode *N); 322 SDValue visitMSTORE(SDNode *N); 323 SDValue visitMGATHER(SDNode *N); 324 SDValue visitMSCATTER(SDNode *N); 325 SDValue visitFP_TO_FP16(SDNode *N); 326 SDValue visitFP16_TO_FP(SDNode *N); 327 328 SDValue visitFADDForFMACombine(SDNode *N); 329 SDValue visitFSUBForFMACombine(SDNode *N); 330 SDValue visitFMULForFMADistributiveCombine(SDNode *N); 331 332 SDValue XformToShuffleWithZero(SDNode *N); 333 SDValue ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue LHS, 334 SDValue RHS); 335 336 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt); 337 338 SDValue foldSelectOfConstants(SDNode *N); 339 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 340 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 341 SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2); 342 SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 343 SDValue N2, SDValue N3, ISD::CondCode CC, 344 bool NotExtCompare = false); 345 SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1, 346 SDValue N2, SDValue N3, ISD::CondCode CC); 347 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 348 const SDLoc &DL, bool foldBooleans = true); 349 350 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 351 SDValue &CC) const; 352 bool isOneUseSetCC(SDValue N) const; 353 354 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 355 unsigned HiOp); 356 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 357 SDValue CombineExtLoad(SDNode *N); 358 SDValue combineRepeatedFPDivisors(SDNode *N); 359 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 360 SDValue BuildSDIV(SDNode *N); 361 SDValue BuildSDIVPow2(SDNode *N); 362 SDValue BuildUDIV(SDNode *N); 363 SDValue BuildLogBase2(SDValue Op, const SDLoc &DL); 364 SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags); 365 SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags); 366 SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags); 367 SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, bool Recip); 368 SDValue buildSqrtNROneConst(SDValue Op, SDValue Est, unsigned Iterations, 369 SDNodeFlags *Flags, bool Reciprocal); 370 SDValue buildSqrtNRTwoConst(SDValue Op, SDValue Est, unsigned Iterations, 371 SDNodeFlags *Flags, bool Reciprocal); 372 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 373 bool DemandHighBits = true); 374 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 375 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg, 376 SDValue InnerPos, SDValue InnerNeg, 377 unsigned PosOpcode, unsigned NegOpcode, 378 const SDLoc &DL); 379 SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL); 380 SDValue ReduceLoadWidth(SDNode *N); 381 SDValue ReduceLoadOpStoreWidth(SDNode *N); 382 SDValue splitMergedValStore(StoreSDNode *ST); 383 SDValue TransformFPLoadStorePair(SDNode *N); 384 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); 385 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); 386 SDValue reduceBuildVecToShuffle(SDNode *N); 387 SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N, 388 ArrayRef<int> VectorMask, SDValue VecIn1, 389 SDValue VecIn2, unsigned LeftIdx); 390 391 SDValue GetDemandedBits(SDValue V, const APInt &Mask); 392 393 /// Walk up chain skipping non-aliasing memory nodes, 394 /// looking for aliasing nodes and adding them to the Aliases vector. 395 void GatherAllAliases(SDNode *N, SDValue OriginalChain, 396 SmallVectorImpl<SDValue> &Aliases); 397 398 /// Return true if there is any possibility that the two addresses overlap. 399 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const; 400 401 /// Walk up chain skipping non-aliasing memory nodes, looking for a better 402 /// chain (aliasing node.) 403 SDValue FindBetterChain(SDNode *N, SDValue Chain); 404 405 /// Try to replace a store and any possibly adjacent stores on 406 /// consecutive chains with better chains. Return true only if St is 407 /// replaced. 408 /// 409 /// Notice that other chains may still be replaced even if the function 410 /// returns false. 411 bool findBetterNeighborChains(StoreSDNode *St); 412 413 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 414 bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask); 415 416 /// Holds a pointer to an LSBaseSDNode as well as information on where it 417 /// is located in a sequence of memory operations connected by a chain. 418 struct MemOpLink { 419 MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): 420 MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } 421 // Ptr to the mem node. 422 LSBaseSDNode *MemNode; 423 // Offset from the base ptr. 424 int64_t OffsetFromBase; 425 // What is the sequence number of this mem node. 426 // Lowest mem operand in the DAG starts at zero. 427 unsigned SequenceNum; 428 }; 429 430 /// This is a helper function for visitMUL to check the profitability 431 /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 432 /// MulNode is the original multiply, AddNode is (add x, c1), 433 /// and ConstNode is c2. 434 bool isMulAddWithConstProfitable(SDNode *MulNode, 435 SDValue &AddNode, 436 SDValue &ConstNode); 437 438 /// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a 439 /// constant build_vector of the stored constant values in Stores. 440 SDValue getMergedConstantVectorStore(SelectionDAG &DAG, const SDLoc &SL, 441 ArrayRef<MemOpLink> Stores, 442 SmallVectorImpl<SDValue> &Chains, 443 EVT Ty) const; 444 445 /// This is a helper function for visitAND and visitZERO_EXTEND. Returns 446 /// true if the (and (load x) c) pattern matches an extload. ExtVT returns 447 /// the type of the loaded value to be extended. LoadedVT returns the type 448 /// of the original loaded value. NarrowLoad returns whether the load would 449 /// need to be narrowed in order to match. 450 bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 451 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 452 bool &NarrowLoad); 453 454 /// This is a helper function for MergeConsecutiveStores. When the source 455 /// elements of the consecutive stores are all constants or all extracted 456 /// vector elements, try to merge them into one larger store. 457 /// \return number of stores that were merged into a merged store (always 458 /// a prefix of \p StoreNode). 459 bool MergeStoresOfConstantsOrVecElts( 460 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores, 461 bool IsConstantSrc, bool UseVector); 462 463 /// This is a helper function for MergeConsecutiveStores. 464 /// Stores that may be merged are placed in StoreNodes. 465 /// Loads that may alias with those stores are placed in AliasLoadNodes. 466 void getStoreMergeAndAliasCandidates( 467 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, 468 SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes); 469 470 /// Helper function for MergeConsecutiveStores. Checks if 471 /// Candidate stores have indirect dependency through their 472 /// operands. \return True if safe to merge 473 bool checkMergeStoreCandidatesForDependencies( 474 SmallVectorImpl<MemOpLink> &StoreNodes); 475 476 /// Merge consecutive store operations into a wide store. 477 /// This optimization uses wide integers or vectors when possible. 478 /// \return number of stores that were merged into a merged store (the 479 /// affected nodes are stored as a prefix in \p StoreNodes). 480 bool MergeConsecutiveStores(StoreSDNode *N, 481 SmallVectorImpl<MemOpLink> &StoreNodes); 482 483 /// \brief Try to transform a truncation where C is a constant: 484 /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) 485 /// 486 /// \p N needs to be a truncation and its first operand an AND. Other 487 /// requirements are checked by the function (e.g. that trunc is 488 /// single-use) and if missed an empty SDValue is returned. 489 SDValue distributeTruncateThroughAnd(SDNode *N); 490 491 public: 492 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 493 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 494 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { 495 ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize(); 496 } 497 498 /// Runs the dag combiner on all nodes in the work list 499 void Run(CombineLevel AtLevel); 500 501 SelectionDAG &getDAG() const { return DAG; } 502 503 /// Returns a type large enough to hold any valid shift amount - before type 504 /// legalization these can be huge. 505 EVT getShiftAmountTy(EVT LHSTy) { 506 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 507 if (LHSTy.isVector()) 508 return LHSTy; 509 auto &DL = DAG.getDataLayout(); 510 return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy) 511 : TLI.getPointerTy(DL); 512 } 513 514 /// This method returns true if we are running before type legalization or 515 /// if the specified VT is legal. 516 bool isTypeLegal(const EVT &VT) { 517 if (!LegalTypes) return true; 518 return TLI.isTypeLegal(VT); 519 } 520 521 /// Convenience wrapper around TargetLowering::getSetCCResultType 522 EVT getSetCCResultType(EVT VT) const { 523 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 524 } 525 }; 526 } 527 528 529 namespace { 530 /// This class is a DAGUpdateListener that removes any deleted 531 /// nodes from the worklist. 532 class WorklistRemover : public SelectionDAG::DAGUpdateListener { 533 DAGCombiner &DC; 534 public: 535 explicit WorklistRemover(DAGCombiner &dc) 536 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 537 538 void NodeDeleted(SDNode *N, SDNode *E) override { 539 DC.removeFromWorklist(N); 540 } 541 }; 542 } 543 544 //===----------------------------------------------------------------------===// 545 // TargetLowering::DAGCombinerInfo implementation 546 //===----------------------------------------------------------------------===// 547 548 void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 549 ((DAGCombiner*)DC)->AddToWorklist(N); 550 } 551 552 SDValue TargetLowering::DAGCombinerInfo:: 553 CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) { 554 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 555 } 556 557 SDValue TargetLowering::DAGCombinerInfo:: 558 CombineTo(SDNode *N, SDValue Res, bool AddTo) { 559 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 560 } 561 562 563 SDValue TargetLowering::DAGCombinerInfo:: 564 CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 565 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 566 } 567 568 void TargetLowering::DAGCombinerInfo:: 569 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 570 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 571 } 572 573 //===----------------------------------------------------------------------===// 574 // Helper Functions 575 //===----------------------------------------------------------------------===// 576 577 void DAGCombiner::deleteAndRecombine(SDNode *N) { 578 removeFromWorklist(N); 579 580 // If the operands of this node are only used by the node, they will now be 581 // dead. Make sure to re-visit them and recursively delete dead nodes. 582 for (const SDValue &Op : N->ops()) 583 // For an operand generating multiple values, one of the values may 584 // become dead allowing further simplification (e.g. split index 585 // arithmetic from an indexed load). 586 if (Op->hasOneUse() || Op->getNumValues() > 1) 587 AddToWorklist(Op.getNode()); 588 589 DAG.DeleteNode(N); 590 } 591 592 /// Return 1 if we can compute the negated form of the specified expression for 593 /// the same cost as the expression itself, or 2 if we can compute the negated 594 /// form more cheaply than the expression itself. 595 static char isNegatibleForFree(SDValue Op, bool LegalOperations, 596 const TargetLowering &TLI, 597 const TargetOptions *Options, 598 unsigned Depth = 0) { 599 // fneg is removable even if it has multiple uses. 600 if (Op.getOpcode() == ISD::FNEG) return 2; 601 602 // Don't allow anything with multiple uses. 603 if (!Op.hasOneUse()) return 0; 604 605 // Don't recurse exponentially. 606 if (Depth > 6) return 0; 607 608 switch (Op.getOpcode()) { 609 default: return false; 610 case ISD::ConstantFP: 611 // Don't invert constant FP values after legalize. The negated constant 612 // isn't necessarily legal. 613 return LegalOperations ? 0 : 1; 614 case ISD::FADD: 615 // FIXME: determine better conditions for this xform. 616 if (!Options->UnsafeFPMath) return 0; 617 618 // After operation legalization, it might not be legal to create new FSUBs. 619 if (LegalOperations && 620 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 621 return 0; 622 623 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 624 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 625 Options, Depth + 1)) 626 return V; 627 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 628 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 629 Depth + 1); 630 case ISD::FSUB: 631 // We can't turn -(A-B) into B-A when we honor signed zeros. 632 if (!Options->UnsafeFPMath && !Op.getNode()->getFlags()->hasNoSignedZeros()) 633 return 0; 634 635 // fold (fneg (fsub A, B)) -> (fsub B, A) 636 return 1; 637 638 case ISD::FMUL: 639 case ISD::FDIV: 640 if (Options->HonorSignDependentRoundingFPMath()) return 0; 641 642 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 643 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 644 Options, Depth + 1)) 645 return V; 646 647 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 648 Depth + 1); 649 650 case ISD::FP_EXTEND: 651 case ISD::FP_ROUND: 652 case ISD::FSIN: 653 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 654 Depth + 1); 655 } 656 } 657 658 /// If isNegatibleForFree returns true, return the newly negated expression. 659 static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 660 bool LegalOperations, unsigned Depth = 0) { 661 const TargetOptions &Options = DAG.getTarget().Options; 662 // fneg is removable even if it has multiple uses. 663 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 664 665 // Don't allow anything with multiple uses. 666 assert(Op.hasOneUse() && "Unknown reuse!"); 667 668 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 669 670 const SDNodeFlags *Flags = Op.getNode()->getFlags(); 671 672 switch (Op.getOpcode()) { 673 default: llvm_unreachable("Unknown code"); 674 case ISD::ConstantFP: { 675 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 676 V.changeSign(); 677 return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType()); 678 } 679 case ISD::FADD: 680 // FIXME: determine better conditions for this xform. 681 assert(Options.UnsafeFPMath); 682 683 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 684 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 685 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 686 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 687 GetNegatedExpression(Op.getOperand(0), DAG, 688 LegalOperations, Depth+1), 689 Op.getOperand(1), Flags); 690 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 691 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 692 GetNegatedExpression(Op.getOperand(1), DAG, 693 LegalOperations, Depth+1), 694 Op.getOperand(0), Flags); 695 case ISD::FSUB: 696 // fold (fneg (fsub 0, B)) -> B 697 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 698 if (N0CFP->isZero()) 699 return Op.getOperand(1); 700 701 // fold (fneg (fsub A, B)) -> (fsub B, A) 702 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 703 Op.getOperand(1), Op.getOperand(0), Flags); 704 705 case ISD::FMUL: 706 case ISD::FDIV: 707 assert(!Options.HonorSignDependentRoundingFPMath()); 708 709 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 710 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 711 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 712 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 713 GetNegatedExpression(Op.getOperand(0), DAG, 714 LegalOperations, Depth+1), 715 Op.getOperand(1), Flags); 716 717 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 718 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 719 Op.getOperand(0), 720 GetNegatedExpression(Op.getOperand(1), DAG, 721 LegalOperations, Depth+1), Flags); 722 723 case ISD::FP_EXTEND: 724 case ISD::FSIN: 725 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 726 GetNegatedExpression(Op.getOperand(0), DAG, 727 LegalOperations, Depth+1)); 728 case ISD::FP_ROUND: 729 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(), 730 GetNegatedExpression(Op.getOperand(0), DAG, 731 LegalOperations, Depth+1), 732 Op.getOperand(1)); 733 } 734 } 735 736 // APInts must be the same size for most operations, this helper 737 // function zero extends the shorter of the pair so that they match. 738 // We provide an Offset so that we can create bitwidths that won't overflow. 739 static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) { 740 unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth()); 741 LHS = LHS.zextOrSelf(Bits); 742 RHS = RHS.zextOrSelf(Bits); 743 } 744 745 // Return true if this node is a setcc, or is a select_cc 746 // that selects between the target values used for true and false, making it 747 // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to 748 // the appropriate nodes based on the type of node we are checking. This 749 // simplifies life a bit for the callers. 750 bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 751 SDValue &CC) const { 752 if (N.getOpcode() == ISD::SETCC) { 753 LHS = N.getOperand(0); 754 RHS = N.getOperand(1); 755 CC = N.getOperand(2); 756 return true; 757 } 758 759 if (N.getOpcode() != ISD::SELECT_CC || 760 !TLI.isConstTrueVal(N.getOperand(2).getNode()) || 761 !TLI.isConstFalseVal(N.getOperand(3).getNode())) 762 return false; 763 764 if (TLI.getBooleanContents(N.getValueType()) == 765 TargetLowering::UndefinedBooleanContent) 766 return false; 767 768 LHS = N.getOperand(0); 769 RHS = N.getOperand(1); 770 CC = N.getOperand(4); 771 return true; 772 } 773 774 /// Return true if this is a SetCC-equivalent operation with only one use. 775 /// If this is true, it allows the users to invert the operation for free when 776 /// it is profitable to do so. 777 bool DAGCombiner::isOneUseSetCC(SDValue N) const { 778 SDValue N0, N1, N2; 779 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 780 return true; 781 return false; 782 } 783 784 // \brief Returns the SDNode if it is a constant float BuildVector 785 // or constant float. 786 static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) { 787 if (isa<ConstantFPSDNode>(N)) 788 return N.getNode(); 789 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 790 return N.getNode(); 791 return nullptr; 792 } 793 794 // Determines if it is a constant integer or a build vector of constant 795 // integers (and undefs). 796 // Do not permit build vector implicit truncation. 797 static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) { 798 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N)) 799 return !(Const->isOpaque() && NoOpaques); 800 if (N.getOpcode() != ISD::BUILD_VECTOR) 801 return false; 802 unsigned BitWidth = N.getScalarValueSizeInBits(); 803 for (const SDValue &Op : N->op_values()) { 804 if (Op.isUndef()) 805 continue; 806 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op); 807 if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth || 808 (Const->isOpaque() && NoOpaques)) 809 return false; 810 } 811 return true; 812 } 813 814 // Determines if it is a constant null integer or a splatted vector of a 815 // constant null integer (with no undefs). 816 // Build vector implicit truncation is not an issue for null values. 817 static bool isNullConstantOrNullSplatConstant(SDValue N) { 818 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 819 return Splat->isNullValue(); 820 return false; 821 } 822 823 // Determines if it is a constant integer of one or a splatted vector of a 824 // constant integer of one (with no undefs). 825 // Do not permit build vector implicit truncation. 826 static bool isOneConstantOrOneSplatConstant(SDValue N) { 827 unsigned BitWidth = N.getScalarValueSizeInBits(); 828 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 829 return Splat->isOne() && Splat->getAPIntValue().getBitWidth() == BitWidth; 830 return false; 831 } 832 833 // Determines if it is a constant integer of all ones or a splatted vector of a 834 // constant integer of all ones (with no undefs). 835 // Do not permit build vector implicit truncation. 836 static bool isAllOnesConstantOrAllOnesSplatConstant(SDValue N) { 837 unsigned BitWidth = N.getScalarValueSizeInBits(); 838 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 839 return Splat->isAllOnesValue() && 840 Splat->getAPIntValue().getBitWidth() == BitWidth; 841 return false; 842 } 843 844 // Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with 845 // undef's. 846 static bool isAnyConstantBuildVector(const SDNode *N) { 847 return ISD::isBuildVectorOfConstantSDNodes(N) || 848 ISD::isBuildVectorOfConstantFPSDNodes(N); 849 } 850 851 SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0, 852 SDValue N1) { 853 EVT VT = N0.getValueType(); 854 if (N0.getOpcode() == Opc) { 855 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) { 856 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 857 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 858 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, L, R)) 859 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 860 return SDValue(); 861 } 862 if (N0.hasOneUse()) { 863 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one 864 // use 865 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1); 866 if (!OpNode.getNode()) 867 return SDValue(); 868 AddToWorklist(OpNode.getNode()); 869 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 870 } 871 } 872 } 873 874 if (N1.getOpcode() == Opc) { 875 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) { 876 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 877 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 878 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, R, L)) 879 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 880 return SDValue(); 881 } 882 if (N1.hasOneUse()) { 883 // reassoc. (op x, (op y, c1)) -> (op (op x, y), c1) iff x+c1 has one 884 // use 885 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0, N1.getOperand(0)); 886 if (!OpNode.getNode()) 887 return SDValue(); 888 AddToWorklist(OpNode.getNode()); 889 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 890 } 891 } 892 } 893 894 return SDValue(); 895 } 896 897 SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 898 bool AddTo) { 899 assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 900 ++NodesCombined; 901 DEBUG(dbgs() << "\nReplacing.1 "; 902 N->dump(&DAG); 903 dbgs() << "\nWith: "; 904 To[0].getNode()->dump(&DAG); 905 dbgs() << " and " << NumTo-1 << " other values\n"); 906 for (unsigned i = 0, e = NumTo; i != e; ++i) 907 assert((!To[i].getNode() || 908 N->getValueType(i) == To[i].getValueType()) && 909 "Cannot combine value to value of different type!"); 910 911 WorklistRemover DeadNodes(*this); 912 DAG.ReplaceAllUsesWith(N, To); 913 if (AddTo) { 914 // Push the new nodes and any users onto the worklist 915 for (unsigned i = 0, e = NumTo; i != e; ++i) { 916 if (To[i].getNode()) { 917 AddToWorklist(To[i].getNode()); 918 AddUsersToWorklist(To[i].getNode()); 919 } 920 } 921 } 922 923 // Finally, if the node is now dead, remove it from the graph. The node 924 // may not be dead if the replacement process recursively simplified to 925 // something else needing this node. 926 if (N->use_empty()) 927 deleteAndRecombine(N); 928 return SDValue(N, 0); 929 } 930 931 void DAGCombiner:: 932 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 933 // Replace all uses. If any nodes become isomorphic to other nodes and 934 // are deleted, make sure to remove them from our worklist. 935 WorklistRemover DeadNodes(*this); 936 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 937 938 // Push the new node and any (possibly new) users onto the worklist. 939 AddToWorklist(TLO.New.getNode()); 940 AddUsersToWorklist(TLO.New.getNode()); 941 942 // Finally, if the node is now dead, remove it from the graph. The node 943 // may not be dead if the replacement process recursively simplified to 944 // something else needing this node. 945 if (TLO.Old.getNode()->use_empty()) 946 deleteAndRecombine(TLO.Old.getNode()); 947 } 948 949 /// Check the specified integer node value to see if it can be simplified or if 950 /// things it uses can be simplified by bit propagation. If so, return true. 951 bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 952 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 953 APInt KnownZero, KnownOne; 954 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 955 return false; 956 957 // Revisit the node. 958 AddToWorklist(Op.getNode()); 959 960 // Replace the old value with the new one. 961 ++NodesCombined; 962 DEBUG(dbgs() << "\nReplacing.2 "; 963 TLO.Old.getNode()->dump(&DAG); 964 dbgs() << "\nWith: "; 965 TLO.New.getNode()->dump(&DAG); 966 dbgs() << '\n'); 967 968 CommitTargetLoweringOpt(TLO); 969 return true; 970 } 971 972 void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 973 SDLoc DL(Load); 974 EVT VT = Load->getValueType(0); 975 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0)); 976 977 DEBUG(dbgs() << "\nReplacing.9 "; 978 Load->dump(&DAG); 979 dbgs() << "\nWith: "; 980 Trunc.getNode()->dump(&DAG); 981 dbgs() << '\n'); 982 WorklistRemover DeadNodes(*this); 983 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 984 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 985 deleteAndRecombine(Load); 986 AddToWorklist(Trunc.getNode()); 987 } 988 989 SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 990 Replace = false; 991 SDLoc DL(Op); 992 if (ISD::isUNINDEXEDLoad(Op.getNode())) { 993 LoadSDNode *LD = cast<LoadSDNode>(Op); 994 EVT MemVT = LD->getMemoryVT(); 995 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 996 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 997 : ISD::EXTLOAD) 998 : LD->getExtensionType(); 999 Replace = true; 1000 return DAG.getExtLoad(ExtType, DL, PVT, 1001 LD->getChain(), LD->getBasePtr(), 1002 MemVT, LD->getMemOperand()); 1003 } 1004 1005 unsigned Opc = Op.getOpcode(); 1006 switch (Opc) { 1007 default: break; 1008 case ISD::AssertSext: 1009 return DAG.getNode(ISD::AssertSext, DL, PVT, 1010 SExtPromoteOperand(Op.getOperand(0), PVT), 1011 Op.getOperand(1)); 1012 case ISD::AssertZext: 1013 return DAG.getNode(ISD::AssertZext, DL, PVT, 1014 ZExtPromoteOperand(Op.getOperand(0), PVT), 1015 Op.getOperand(1)); 1016 case ISD::Constant: { 1017 unsigned ExtOpc = 1018 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1019 return DAG.getNode(ExtOpc, DL, PVT, Op); 1020 } 1021 } 1022 1023 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 1024 return SDValue(); 1025 return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op); 1026 } 1027 1028 SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 1029 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 1030 return SDValue(); 1031 EVT OldVT = Op.getValueType(); 1032 SDLoc DL(Op); 1033 bool Replace = false; 1034 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1035 if (!NewOp.getNode()) 1036 return SDValue(); 1037 AddToWorklist(NewOp.getNode()); 1038 1039 if (Replace) 1040 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1041 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp, 1042 DAG.getValueType(OldVT)); 1043 } 1044 1045 SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 1046 EVT OldVT = Op.getValueType(); 1047 SDLoc DL(Op); 1048 bool Replace = false; 1049 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1050 if (!NewOp.getNode()) 1051 return SDValue(); 1052 AddToWorklist(NewOp.getNode()); 1053 1054 if (Replace) 1055 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1056 return DAG.getZeroExtendInReg(NewOp, DL, OldVT); 1057 } 1058 1059 /// Promote the specified integer binary operation if the target indicates it is 1060 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1061 /// i32 since i16 instructions are longer. 1062 SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 1063 if (!LegalOperations) 1064 return SDValue(); 1065 1066 EVT VT = Op.getValueType(); 1067 if (VT.isVector() || !VT.isInteger()) 1068 return SDValue(); 1069 1070 // If operation type is 'undesirable', e.g. i16 on x86, consider 1071 // promoting it. 1072 unsigned Opc = Op.getOpcode(); 1073 if (TLI.isTypeDesirableForOp(Opc, VT)) 1074 return SDValue(); 1075 1076 EVT PVT = VT; 1077 // Consult target whether it is a good idea to promote this operation and 1078 // what's the right type to promote it to. 1079 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1080 assert(PVT != VT && "Don't know what type to promote to!"); 1081 1082 bool Replace0 = false; 1083 SDValue N0 = Op.getOperand(0); 1084 SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 1085 if (!NN0.getNode()) 1086 return SDValue(); 1087 1088 bool Replace1 = false; 1089 SDValue N1 = Op.getOperand(1); 1090 SDValue NN1; 1091 if (N0 == N1) 1092 NN1 = NN0; 1093 else { 1094 NN1 = PromoteOperand(N1, PVT, Replace1); 1095 if (!NN1.getNode()) 1096 return SDValue(); 1097 } 1098 1099 AddToWorklist(NN0.getNode()); 1100 if (NN1.getNode()) 1101 AddToWorklist(NN1.getNode()); 1102 1103 if (Replace0) 1104 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 1105 if (Replace1) 1106 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 1107 1108 DEBUG(dbgs() << "\nPromoting "; 1109 Op.getNode()->dump(&DAG)); 1110 SDLoc DL(Op); 1111 return DAG.getNode(ISD::TRUNCATE, DL, VT, 1112 DAG.getNode(Opc, DL, PVT, NN0, NN1)); 1113 } 1114 return SDValue(); 1115 } 1116 1117 /// Promote the specified integer shift operation if the target indicates it is 1118 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1119 /// i32 since i16 instructions are longer. 1120 SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 1121 if (!LegalOperations) 1122 return SDValue(); 1123 1124 EVT VT = Op.getValueType(); 1125 if (VT.isVector() || !VT.isInteger()) 1126 return SDValue(); 1127 1128 // If operation type is 'undesirable', e.g. i16 on x86, consider 1129 // promoting it. 1130 unsigned Opc = Op.getOpcode(); 1131 if (TLI.isTypeDesirableForOp(Opc, VT)) 1132 return SDValue(); 1133 1134 EVT PVT = VT; 1135 // Consult target whether it is a good idea to promote this operation and 1136 // what's the right type to promote it to. 1137 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1138 assert(PVT != VT && "Don't know what type to promote to!"); 1139 1140 bool Replace = false; 1141 SDValue N0 = Op.getOperand(0); 1142 if (Opc == ISD::SRA) 1143 N0 = SExtPromoteOperand(Op.getOperand(0), PVT); 1144 else if (Opc == ISD::SRL) 1145 N0 = ZExtPromoteOperand(Op.getOperand(0), PVT); 1146 else 1147 N0 = PromoteOperand(N0, PVT, Replace); 1148 if (!N0.getNode()) 1149 return SDValue(); 1150 1151 AddToWorklist(N0.getNode()); 1152 if (Replace) 1153 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 1154 1155 DEBUG(dbgs() << "\nPromoting "; 1156 Op.getNode()->dump(&DAG)); 1157 SDLoc DL(Op); 1158 return DAG.getNode(ISD::TRUNCATE, DL, VT, 1159 DAG.getNode(Opc, DL, PVT, N0, Op.getOperand(1))); 1160 } 1161 return SDValue(); 1162 } 1163 1164 SDValue DAGCombiner::PromoteExtend(SDValue Op) { 1165 if (!LegalOperations) 1166 return SDValue(); 1167 1168 EVT VT = Op.getValueType(); 1169 if (VT.isVector() || !VT.isInteger()) 1170 return SDValue(); 1171 1172 // If operation type is 'undesirable', e.g. i16 on x86, consider 1173 // promoting it. 1174 unsigned Opc = Op.getOpcode(); 1175 if (TLI.isTypeDesirableForOp(Opc, VT)) 1176 return SDValue(); 1177 1178 EVT PVT = VT; 1179 // Consult target whether it is a good idea to promote this operation and 1180 // what's the right type to promote it to. 1181 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1182 assert(PVT != VT && "Don't know what type to promote to!"); 1183 // fold (aext (aext x)) -> (aext x) 1184 // fold (aext (zext x)) -> (zext x) 1185 // fold (aext (sext x)) -> (sext x) 1186 DEBUG(dbgs() << "\nPromoting "; 1187 Op.getNode()->dump(&DAG)); 1188 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0)); 1189 } 1190 return SDValue(); 1191 } 1192 1193 bool DAGCombiner::PromoteLoad(SDValue Op) { 1194 if (!LegalOperations) 1195 return false; 1196 1197 if (!ISD::isUNINDEXEDLoad(Op.getNode())) 1198 return false; 1199 1200 EVT VT = Op.getValueType(); 1201 if (VT.isVector() || !VT.isInteger()) 1202 return false; 1203 1204 // If operation type is 'undesirable', e.g. i16 on x86, consider 1205 // promoting it. 1206 unsigned Opc = Op.getOpcode(); 1207 if (TLI.isTypeDesirableForOp(Opc, VT)) 1208 return false; 1209 1210 EVT PVT = VT; 1211 // Consult target whether it is a good idea to promote this operation and 1212 // what's the right type to promote it to. 1213 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1214 assert(PVT != VT && "Don't know what type to promote to!"); 1215 1216 SDLoc DL(Op); 1217 SDNode *N = Op.getNode(); 1218 LoadSDNode *LD = cast<LoadSDNode>(N); 1219 EVT MemVT = LD->getMemoryVT(); 1220 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1221 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 1222 : ISD::EXTLOAD) 1223 : LD->getExtensionType(); 1224 SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT, 1225 LD->getChain(), LD->getBasePtr(), 1226 MemVT, LD->getMemOperand()); 1227 SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD); 1228 1229 DEBUG(dbgs() << "\nPromoting "; 1230 N->dump(&DAG); 1231 dbgs() << "\nTo: "; 1232 Result.getNode()->dump(&DAG); 1233 dbgs() << '\n'); 1234 WorklistRemover DeadNodes(*this); 1235 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 1236 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 1237 deleteAndRecombine(N); 1238 AddToWorklist(Result.getNode()); 1239 return true; 1240 } 1241 return false; 1242 } 1243 1244 /// \brief Recursively delete a node which has no uses and any operands for 1245 /// which it is the only use. 1246 /// 1247 /// Note that this both deletes the nodes and removes them from the worklist. 1248 /// It also adds any nodes who have had a user deleted to the worklist as they 1249 /// may now have only one use and subject to other combines. 1250 bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) { 1251 if (!N->use_empty()) 1252 return false; 1253 1254 SmallSetVector<SDNode *, 16> Nodes; 1255 Nodes.insert(N); 1256 do { 1257 N = Nodes.pop_back_val(); 1258 if (!N) 1259 continue; 1260 1261 if (N->use_empty()) { 1262 for (const SDValue &ChildN : N->op_values()) 1263 Nodes.insert(ChildN.getNode()); 1264 1265 removeFromWorklist(N); 1266 DAG.DeleteNode(N); 1267 } else { 1268 AddToWorklist(N); 1269 } 1270 } while (!Nodes.empty()); 1271 return true; 1272 } 1273 1274 //===----------------------------------------------------------------------===// 1275 // Main DAG Combiner implementation 1276 //===----------------------------------------------------------------------===// 1277 1278 void DAGCombiner::Run(CombineLevel AtLevel) { 1279 // set the instance variables, so that the various visit routines may use it. 1280 Level = AtLevel; 1281 LegalOperations = Level >= AfterLegalizeVectorOps; 1282 LegalTypes = Level >= AfterLegalizeTypes; 1283 1284 // Add all the dag nodes to the worklist. 1285 for (SDNode &Node : DAG.allnodes()) 1286 AddToWorklist(&Node); 1287 1288 // Create a dummy node (which is not added to allnodes), that adds a reference 1289 // to the root node, preventing it from being deleted, and tracking any 1290 // changes of the root. 1291 HandleSDNode Dummy(DAG.getRoot()); 1292 1293 // While the worklist isn't empty, find a node and try to combine it. 1294 while (!WorklistMap.empty()) { 1295 SDNode *N; 1296 // The Worklist holds the SDNodes in order, but it may contain null entries. 1297 do { 1298 N = Worklist.pop_back_val(); 1299 } while (!N); 1300 1301 bool GoodWorklistEntry = WorklistMap.erase(N); 1302 (void)GoodWorklistEntry; 1303 assert(GoodWorklistEntry && 1304 "Found a worklist entry without a corresponding map entry!"); 1305 1306 // If N has no uses, it is dead. Make sure to revisit all N's operands once 1307 // N is deleted from the DAG, since they too may now be dead or may have a 1308 // reduced number of uses, allowing other xforms. 1309 if (recursivelyDeleteUnusedNodes(N)) 1310 continue; 1311 1312 WorklistRemover DeadNodes(*this); 1313 1314 // If this combine is running after legalizing the DAG, re-legalize any 1315 // nodes pulled off the worklist. 1316 if (Level == AfterLegalizeDAG) { 1317 SmallSetVector<SDNode *, 16> UpdatedNodes; 1318 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes); 1319 1320 for (SDNode *LN : UpdatedNodes) { 1321 AddToWorklist(LN); 1322 AddUsersToWorklist(LN); 1323 } 1324 if (!NIsValid) 1325 continue; 1326 } 1327 1328 DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG)); 1329 1330 // Add any operands of the new node which have not yet been combined to the 1331 // worklist as well. Because the worklist uniques things already, this 1332 // won't repeatedly process the same operand. 1333 CombinedNodes.insert(N); 1334 for (const SDValue &ChildN : N->op_values()) 1335 if (!CombinedNodes.count(ChildN.getNode())) 1336 AddToWorklist(ChildN.getNode()); 1337 1338 SDValue RV = combine(N); 1339 1340 if (!RV.getNode()) 1341 continue; 1342 1343 ++NodesCombined; 1344 1345 // If we get back the same node we passed in, rather than a new node or 1346 // zero, we know that the node must have defined multiple values and 1347 // CombineTo was used. Since CombineTo takes care of the worklist 1348 // mechanics for us, we have no work to do in this case. 1349 if (RV.getNode() == N) 1350 continue; 1351 1352 assert(N->getOpcode() != ISD::DELETED_NODE && 1353 RV.getOpcode() != ISD::DELETED_NODE && 1354 "Node was deleted but visit returned new node!"); 1355 1356 DEBUG(dbgs() << " ... into: "; 1357 RV.getNode()->dump(&DAG)); 1358 1359 if (N->getNumValues() == RV.getNode()->getNumValues()) 1360 DAG.ReplaceAllUsesWith(N, RV.getNode()); 1361 else { 1362 assert(N->getValueType(0) == RV.getValueType() && 1363 N->getNumValues() == 1 && "Type mismatch"); 1364 SDValue OpV = RV; 1365 DAG.ReplaceAllUsesWith(N, &OpV); 1366 } 1367 1368 // Push the new node and any users onto the worklist 1369 AddToWorklist(RV.getNode()); 1370 AddUsersToWorklist(RV.getNode()); 1371 1372 // Finally, if the node is now dead, remove it from the graph. The node 1373 // may not be dead if the replacement process recursively simplified to 1374 // something else needing this node. This will also take care of adding any 1375 // operands which have lost a user to the worklist. 1376 recursivelyDeleteUnusedNodes(N); 1377 } 1378 1379 // If the root changed (e.g. it was a dead load, update the root). 1380 DAG.setRoot(Dummy.getValue()); 1381 DAG.RemoveDeadNodes(); 1382 } 1383 1384 SDValue DAGCombiner::visit(SDNode *N) { 1385 switch (N->getOpcode()) { 1386 default: break; 1387 case ISD::TokenFactor: return visitTokenFactor(N); 1388 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1389 case ISD::ADD: return visitADD(N); 1390 case ISD::SUB: return visitSUB(N); 1391 case ISD::ADDC: return visitADDC(N); 1392 case ISD::SUBC: return visitSUBC(N); 1393 case ISD::ADDE: return visitADDE(N); 1394 case ISD::SUBE: return visitSUBE(N); 1395 case ISD::MUL: return visitMUL(N); 1396 case ISD::SDIV: return visitSDIV(N); 1397 case ISD::UDIV: return visitUDIV(N); 1398 case ISD::SREM: 1399 case ISD::UREM: return visitREM(N); 1400 case ISD::MULHU: return visitMULHU(N); 1401 case ISD::MULHS: return visitMULHS(N); 1402 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1403 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1404 case ISD::SMULO: return visitSMULO(N); 1405 case ISD::UMULO: return visitUMULO(N); 1406 case ISD::SMIN: 1407 case ISD::SMAX: 1408 case ISD::UMIN: 1409 case ISD::UMAX: return visitIMINMAX(N); 1410 case ISD::AND: return visitAND(N); 1411 case ISD::OR: return visitOR(N); 1412 case ISD::XOR: return visitXOR(N); 1413 case ISD::SHL: return visitSHL(N); 1414 case ISD::SRA: return visitSRA(N); 1415 case ISD::SRL: return visitSRL(N); 1416 case ISD::ROTR: 1417 case ISD::ROTL: return visitRotate(N); 1418 case ISD::BSWAP: return visitBSWAP(N); 1419 case ISD::BITREVERSE: return visitBITREVERSE(N); 1420 case ISD::CTLZ: return visitCTLZ(N); 1421 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1422 case ISD::CTTZ: return visitCTTZ(N); 1423 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1424 case ISD::CTPOP: return visitCTPOP(N); 1425 case ISD::SELECT: return visitSELECT(N); 1426 case ISD::VSELECT: return visitVSELECT(N); 1427 case ISD::SELECT_CC: return visitSELECT_CC(N); 1428 case ISD::SETCC: return visitSETCC(N); 1429 case ISD::SETCCE: return visitSETCCE(N); 1430 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1431 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1432 case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1433 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1434 case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N); 1435 case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N); 1436 case ISD::TRUNCATE: return visitTRUNCATE(N); 1437 case ISD::BITCAST: return visitBITCAST(N); 1438 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1439 case ISD::FADD: return visitFADD(N); 1440 case ISD::FSUB: return visitFSUB(N); 1441 case ISD::FMUL: return visitFMUL(N); 1442 case ISD::FMA: return visitFMA(N); 1443 case ISD::FDIV: return visitFDIV(N); 1444 case ISD::FREM: return visitFREM(N); 1445 case ISD::FSQRT: return visitFSQRT(N); 1446 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1447 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1448 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1449 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1450 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1451 case ISD::FP_ROUND: return visitFP_ROUND(N); 1452 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1453 case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1454 case ISD::FNEG: return visitFNEG(N); 1455 case ISD::FABS: return visitFABS(N); 1456 case ISD::FFLOOR: return visitFFLOOR(N); 1457 case ISD::FMINNUM: return visitFMINNUM(N); 1458 case ISD::FMAXNUM: return visitFMAXNUM(N); 1459 case ISD::FCEIL: return visitFCEIL(N); 1460 case ISD::FTRUNC: return visitFTRUNC(N); 1461 case ISD::BRCOND: return visitBRCOND(N); 1462 case ISD::BR_CC: return visitBR_CC(N); 1463 case ISD::LOAD: return visitLOAD(N); 1464 case ISD::STORE: return visitSTORE(N); 1465 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1466 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1467 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1468 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1469 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1470 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1471 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N); 1472 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); 1473 case ISD::MGATHER: return visitMGATHER(N); 1474 case ISD::MLOAD: return visitMLOAD(N); 1475 case ISD::MSCATTER: return visitMSCATTER(N); 1476 case ISD::MSTORE: return visitMSTORE(N); 1477 case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); 1478 case ISD::FP16_TO_FP: return visitFP16_TO_FP(N); 1479 } 1480 return SDValue(); 1481 } 1482 1483 SDValue DAGCombiner::combine(SDNode *N) { 1484 SDValue RV = visit(N); 1485 1486 // If nothing happened, try a target-specific DAG combine. 1487 if (!RV.getNode()) { 1488 assert(N->getOpcode() != ISD::DELETED_NODE && 1489 "Node was deleted but visit returned NULL!"); 1490 1491 if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1492 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1493 1494 // Expose the DAG combiner to the target combiner impls. 1495 TargetLowering::DAGCombinerInfo 1496 DagCombineInfo(DAG, Level, false, this); 1497 1498 RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1499 } 1500 } 1501 1502 // If nothing happened still, try promoting the operation. 1503 if (!RV.getNode()) { 1504 switch (N->getOpcode()) { 1505 default: break; 1506 case ISD::ADD: 1507 case ISD::SUB: 1508 case ISD::MUL: 1509 case ISD::AND: 1510 case ISD::OR: 1511 case ISD::XOR: 1512 RV = PromoteIntBinOp(SDValue(N, 0)); 1513 break; 1514 case ISD::SHL: 1515 case ISD::SRA: 1516 case ISD::SRL: 1517 RV = PromoteIntShiftOp(SDValue(N, 0)); 1518 break; 1519 case ISD::SIGN_EXTEND: 1520 case ISD::ZERO_EXTEND: 1521 case ISD::ANY_EXTEND: 1522 RV = PromoteExtend(SDValue(N, 0)); 1523 break; 1524 case ISD::LOAD: 1525 if (PromoteLoad(SDValue(N, 0))) 1526 RV = SDValue(N, 0); 1527 break; 1528 } 1529 } 1530 1531 // If N is a commutative binary node, try commuting it to enable more 1532 // sdisel CSE. 1533 if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1534 N->getNumValues() == 1) { 1535 SDValue N0 = N->getOperand(0); 1536 SDValue N1 = N->getOperand(1); 1537 1538 // Constant operands are canonicalized to RHS. 1539 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1540 SDValue Ops[] = {N1, N0}; 1541 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops, 1542 N->getFlags()); 1543 if (CSENode) 1544 return SDValue(CSENode, 0); 1545 } 1546 } 1547 1548 return RV; 1549 } 1550 1551 /// Given a node, return its input chain if it has one, otherwise return a null 1552 /// sd operand. 1553 static SDValue getInputChainForNode(SDNode *N) { 1554 if (unsigned NumOps = N->getNumOperands()) { 1555 if (N->getOperand(0).getValueType() == MVT::Other) 1556 return N->getOperand(0); 1557 if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1558 return N->getOperand(NumOps-1); 1559 for (unsigned i = 1; i < NumOps-1; ++i) 1560 if (N->getOperand(i).getValueType() == MVT::Other) 1561 return N->getOperand(i); 1562 } 1563 return SDValue(); 1564 } 1565 1566 SDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1567 // If N has two operands, where one has an input chain equal to the other, 1568 // the 'other' chain is redundant. 1569 if (N->getNumOperands() == 2) { 1570 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1571 return N->getOperand(0); 1572 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1573 return N->getOperand(1); 1574 } 1575 1576 SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1577 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1578 SmallPtrSet<SDNode*, 16> SeenOps; 1579 bool Changed = false; // If we should replace this token factor. 1580 1581 // Start out with this token factor. 1582 TFs.push_back(N); 1583 1584 // Iterate through token factors. The TFs grows when new token factors are 1585 // encountered. 1586 for (unsigned i = 0; i < TFs.size(); ++i) { 1587 SDNode *TF = TFs[i]; 1588 1589 // Check each of the operands. 1590 for (const SDValue &Op : TF->op_values()) { 1591 1592 switch (Op.getOpcode()) { 1593 case ISD::EntryToken: 1594 // Entry tokens don't need to be added to the list. They are 1595 // redundant. 1596 Changed = true; 1597 break; 1598 1599 case ISD::TokenFactor: 1600 if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) { 1601 // Queue up for processing. 1602 TFs.push_back(Op.getNode()); 1603 // Clean up in case the token factor is removed. 1604 AddToWorklist(Op.getNode()); 1605 Changed = true; 1606 break; 1607 } 1608 LLVM_FALLTHROUGH; 1609 1610 default: 1611 // Only add if it isn't already in the list. 1612 if (SeenOps.insert(Op.getNode()).second) 1613 Ops.push_back(Op); 1614 else 1615 Changed = true; 1616 break; 1617 } 1618 } 1619 } 1620 1621 SDValue Result; 1622 1623 // If we've changed things around then replace token factor. 1624 if (Changed) { 1625 if (Ops.empty()) { 1626 // The entry token is the only possible outcome. 1627 Result = DAG.getEntryNode(); 1628 } else { 1629 // New and improved token factor. 1630 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); 1631 } 1632 1633 // Add users to worklist if AA is enabled, since it may introduce 1634 // a lot of new chained token factors while removing memory deps. 1635 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 1636 : DAG.getSubtarget().useAA(); 1637 return CombineTo(N, Result, UseAA /*add to worklist*/); 1638 } 1639 1640 return Result; 1641 } 1642 1643 /// MERGE_VALUES can always be eliminated. 1644 SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1645 WorklistRemover DeadNodes(*this); 1646 // Replacing results may cause a different MERGE_VALUES to suddenly 1647 // be CSE'd with N, and carry its uses with it. Iterate until no 1648 // uses remain, to ensure that the node can be safely deleted. 1649 // First add the users of this node to the work list so that they 1650 // can be tried again once they have new operands. 1651 AddUsersToWorklist(N); 1652 do { 1653 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1654 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1655 } while (!N->use_empty()); 1656 deleteAndRecombine(N); 1657 return SDValue(N, 0); // Return N so it doesn't get rechecked! 1658 } 1659 1660 /// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a 1661 /// ConstantSDNode pointer else nullptr. 1662 static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) { 1663 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N); 1664 return Const != nullptr && !Const->isOpaque() ? Const : nullptr; 1665 } 1666 1667 SDValue DAGCombiner::visitADD(SDNode *N) { 1668 SDValue N0 = N->getOperand(0); 1669 SDValue N1 = N->getOperand(1); 1670 EVT VT = N0.getValueType(); 1671 SDLoc DL(N); 1672 1673 // fold vector ops 1674 if (VT.isVector()) { 1675 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 1676 return FoldedVOp; 1677 1678 // fold (add x, 0) -> x, vector edition 1679 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1680 return N0; 1681 if (ISD::isBuildVectorAllZeros(N0.getNode())) 1682 return N1; 1683 } 1684 1685 // fold (add x, undef) -> undef 1686 if (N0.isUndef()) 1687 return N0; 1688 1689 if (N1.isUndef()) 1690 return N1; 1691 1692 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 1693 // canonicalize constant to RHS 1694 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1)) 1695 return DAG.getNode(ISD::ADD, DL, VT, N1, N0); 1696 // fold (add c1, c2) -> c1+c2 1697 return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(), 1698 N1.getNode()); 1699 } 1700 1701 // fold (add x, 0) -> x 1702 if (isNullConstant(N1)) 1703 return N0; 1704 1705 // fold ((c1-A)+c2) -> (c1+c2)-A 1706 if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) { 1707 if (N0.getOpcode() == ISD::SUB) 1708 if (isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) { 1709 return DAG.getNode(ISD::SUB, DL, VT, 1710 DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)), 1711 N0.getOperand(1)); 1712 } 1713 } 1714 1715 // reassociate add 1716 if (SDValue RADD = ReassociateOps(ISD::ADD, DL, N0, N1)) 1717 return RADD; 1718 1719 // fold ((0-A) + B) -> B-A 1720 if (N0.getOpcode() == ISD::SUB && 1721 isNullConstantOrNullSplatConstant(N0.getOperand(0))) 1722 return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1)); 1723 1724 // fold (A + (0-B)) -> A-B 1725 if (N1.getOpcode() == ISD::SUB && 1726 isNullConstantOrNullSplatConstant(N1.getOperand(0))) 1727 return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1)); 1728 1729 // fold (A+(B-A)) -> B 1730 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1731 return N1.getOperand(0); 1732 1733 // fold ((B-A)+A) -> B 1734 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1735 return N0.getOperand(0); 1736 1737 // fold (A+(B-(A+C))) to (B-C) 1738 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1739 N0 == N1.getOperand(1).getOperand(0)) 1740 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1741 N1.getOperand(1).getOperand(1)); 1742 1743 // fold (A+(B-(C+A))) to (B-C) 1744 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1745 N0 == N1.getOperand(1).getOperand(1)) 1746 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1747 N1.getOperand(1).getOperand(0)); 1748 1749 // fold (A+((B-A)+or-C)) to (B+or-C) 1750 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1751 N1.getOperand(0).getOpcode() == ISD::SUB && 1752 N0 == N1.getOperand(0).getOperand(1)) 1753 return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0), 1754 N1.getOperand(1)); 1755 1756 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1757 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1758 SDValue N00 = N0.getOperand(0); 1759 SDValue N01 = N0.getOperand(1); 1760 SDValue N10 = N1.getOperand(0); 1761 SDValue N11 = N1.getOperand(1); 1762 1763 if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10)) 1764 return DAG.getNode(ISD::SUB, DL, VT, 1765 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10), 1766 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11)); 1767 } 1768 1769 if (SimplifyDemandedBits(SDValue(N, 0))) 1770 return SDValue(N, 0); 1771 1772 // fold (a+b) -> (a|b) iff a and b share no bits. 1773 if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) && 1774 VT.isInteger() && DAG.haveNoCommonBitsSet(N0, N1)) 1775 return DAG.getNode(ISD::OR, DL, VT, N0, N1); 1776 1777 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1778 if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB && 1779 isNullConstantOrNullSplatConstant(N1.getOperand(0).getOperand(0))) 1780 return DAG.getNode(ISD::SUB, DL, VT, N0, 1781 DAG.getNode(ISD::SHL, DL, VT, 1782 N1.getOperand(0).getOperand(1), 1783 N1.getOperand(1))); 1784 if (N0.getOpcode() == ISD::SHL && N0.getOperand(0).getOpcode() == ISD::SUB && 1785 isNullConstantOrNullSplatConstant(N0.getOperand(0).getOperand(0))) 1786 return DAG.getNode(ISD::SUB, DL, VT, N1, 1787 DAG.getNode(ISD::SHL, DL, VT, 1788 N0.getOperand(0).getOperand(1), 1789 N0.getOperand(1))); 1790 1791 if (N1.getOpcode() == ISD::AND) { 1792 SDValue AndOp0 = N1.getOperand(0); 1793 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1794 unsigned DestBits = VT.getScalarSizeInBits(); 1795 1796 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1797 // and similar xforms where the inner op is either ~0 or 0. 1798 if (NumSignBits == DestBits && 1799 isOneConstantOrOneSplatConstant(N1->getOperand(1))) 1800 return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); 1801 } 1802 1803 // add (sext i1), X -> sub X, (zext i1) 1804 if (N0.getOpcode() == ISD::SIGN_EXTEND && 1805 N0.getOperand(0).getValueType() == MVT::i1 && 1806 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1807 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1808 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1809 } 1810 1811 // add X, (sextinreg Y i1) -> sub X, (and Y 1) 1812 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 1813 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 1814 if (TN->getVT() == MVT::i1) { 1815 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 1816 DAG.getConstant(1, DL, VT)); 1817 return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt); 1818 } 1819 } 1820 1821 return SDValue(); 1822 } 1823 1824 SDValue DAGCombiner::visitADDC(SDNode *N) { 1825 SDValue N0 = N->getOperand(0); 1826 SDValue N1 = N->getOperand(1); 1827 EVT VT = N0.getValueType(); 1828 1829 // If the flag result is dead, turn this into an ADD. 1830 if (!N->hasAnyUseOfValue(1)) 1831 return CombineTo(N, DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N1), 1832 DAG.getNode(ISD::CARRY_FALSE, 1833 SDLoc(N), MVT::Glue)); 1834 1835 // canonicalize constant to RHS. 1836 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1837 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1838 if (N0C && !N1C) 1839 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0); 1840 1841 // fold (addc x, 0) -> x + no carry out 1842 if (isNullConstant(N1)) 1843 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 1844 SDLoc(N), MVT::Glue)); 1845 1846 // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. 1847 APInt LHSZero, LHSOne; 1848 APInt RHSZero, RHSOne; 1849 DAG.computeKnownBits(N0, LHSZero, LHSOne); 1850 1851 if (LHSZero.getBoolValue()) { 1852 DAG.computeKnownBits(N1, RHSZero, RHSOne); 1853 1854 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1855 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1856 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1857 return CombineTo(N, DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1), 1858 DAG.getNode(ISD::CARRY_FALSE, 1859 SDLoc(N), MVT::Glue)); 1860 } 1861 1862 return SDValue(); 1863 } 1864 1865 SDValue DAGCombiner::visitADDE(SDNode *N) { 1866 SDValue N0 = N->getOperand(0); 1867 SDValue N1 = N->getOperand(1); 1868 SDValue CarryIn = N->getOperand(2); 1869 1870 // canonicalize constant to RHS 1871 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1872 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1873 if (N0C && !N1C) 1874 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), 1875 N1, N0, CarryIn); 1876 1877 // fold (adde x, y, false) -> (addc x, y) 1878 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1879 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1); 1880 1881 return SDValue(); 1882 } 1883 1884 // Since it may not be valid to emit a fold to zero for vector initializers 1885 // check if we can before folding. 1886 static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, 1887 SelectionDAG &DAG, bool LegalOperations, 1888 bool LegalTypes) { 1889 if (!VT.isVector()) 1890 return DAG.getConstant(0, DL, VT); 1891 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 1892 return DAG.getConstant(0, DL, VT); 1893 return SDValue(); 1894 } 1895 1896 SDValue DAGCombiner::visitSUB(SDNode *N) { 1897 SDValue N0 = N->getOperand(0); 1898 SDValue N1 = N->getOperand(1); 1899 EVT VT = N0.getValueType(); 1900 SDLoc DL(N); 1901 1902 // fold vector ops 1903 if (VT.isVector()) { 1904 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 1905 return FoldedVOp; 1906 1907 // fold (sub x, 0) -> x, vector edition 1908 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1909 return N0; 1910 } 1911 1912 // fold (sub x, x) -> 0 1913 // FIXME: Refactor this and xor and other similar operations together. 1914 if (N0 == N1) 1915 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations, LegalTypes); 1916 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 1917 DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 1918 // fold (sub c1, c2) -> c1-c2 1919 return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(), 1920 N1.getNode()); 1921 } 1922 1923 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 1924 1925 // fold (sub x, c) -> (add x, -c) 1926 if (N1C) { 1927 return DAG.getNode(ISD::ADD, DL, VT, N0, 1928 DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); 1929 } 1930 1931 if (isNullConstantOrNullSplatConstant(N0)) { 1932 unsigned BitWidth = VT.getScalarSizeInBits(); 1933 // Right-shifting everything out but the sign bit followed by negation is 1934 // the same as flipping arithmetic/logical shift type without the negation: 1935 // -(X >>u 31) -> (X >>s 31) 1936 // -(X >>s 31) -> (X >>u 31) 1937 if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) { 1938 ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1)); 1939 if (ShiftAmt && ShiftAmt->getZExtValue() == BitWidth - 1) { 1940 auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA; 1941 if (!LegalOperations || TLI.isOperationLegal(NewSh, VT)) 1942 return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1)); 1943 } 1944 } 1945 1946 // 0 - X --> 0 if the sub is NUW. 1947 if (N->getFlags()->hasNoUnsignedWrap()) 1948 return N0; 1949 1950 if (DAG.MaskedValueIsZero(N1, ~APInt::getSignBit(BitWidth))) { 1951 // N1 is either 0 or the minimum signed value. If the sub is NSW, then 1952 // N1 must be 0 because negating the minimum signed value is undefined. 1953 if (N->getFlags()->hasNoSignedWrap()) 1954 return N0; 1955 1956 // 0 - X --> X if X is 0 or the minimum signed value. 1957 return N1; 1958 } 1959 } 1960 1961 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 1962 if (isAllOnesConstantOrAllOnesSplatConstant(N0)) 1963 return DAG.getNode(ISD::XOR, DL, VT, N1, N0); 1964 1965 // fold A-(A-B) -> B 1966 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 1967 return N1.getOperand(1); 1968 1969 // fold (A+B)-A -> B 1970 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 1971 return N0.getOperand(1); 1972 1973 // fold (A+B)-B -> A 1974 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 1975 return N0.getOperand(0); 1976 1977 // fold C2-(A+C1) -> (C2-C1)-A 1978 if (N1.getOpcode() == ISD::ADD) { 1979 SDValue N11 = N1.getOperand(1); 1980 if (isConstantOrConstantVector(N0, /* NoOpaques */ true) && 1981 isConstantOrConstantVector(N11, /* NoOpaques */ true)) { 1982 SDValue NewC = DAG.getNode(ISD::SUB, DL, VT, N0, N11); 1983 return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0)); 1984 } 1985 } 1986 1987 // fold ((A+(B+or-C))-B) -> A+or-C 1988 if (N0.getOpcode() == ISD::ADD && 1989 (N0.getOperand(1).getOpcode() == ISD::SUB || 1990 N0.getOperand(1).getOpcode() == ISD::ADD) && 1991 N0.getOperand(1).getOperand(0) == N1) 1992 return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0), 1993 N0.getOperand(1).getOperand(1)); 1994 1995 // fold ((A+(C+B))-B) -> A+C 1996 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD && 1997 N0.getOperand(1).getOperand(1) == N1) 1998 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), 1999 N0.getOperand(1).getOperand(0)); 2000 2001 // fold ((A-(B-C))-C) -> A-B 2002 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB && 2003 N0.getOperand(1).getOperand(1) == N1) 2004 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), 2005 N0.getOperand(1).getOperand(0)); 2006 2007 // If either operand of a sub is undef, the result is undef 2008 if (N0.isUndef()) 2009 return N0; 2010 if (N1.isUndef()) 2011 return N1; 2012 2013 // If the relocation model supports it, consider symbol offsets. 2014 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 2015 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 2016 // fold (sub Sym, c) -> Sym-c 2017 if (N1C && GA->getOpcode() == ISD::GlobalAddress) 2018 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT, 2019 GA->getOffset() - 2020 (uint64_t)N1C->getSExtValue()); 2021 // fold (sub Sym+c1, Sym+c2) -> c1-c2 2022 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 2023 if (GA->getGlobal() == GB->getGlobal()) 2024 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 2025 DL, VT); 2026 } 2027 2028 // sub X, (sextinreg Y i1) -> add X, (and Y 1) 2029 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 2030 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 2031 if (TN->getVT() == MVT::i1) { 2032 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 2033 DAG.getConstant(1, DL, VT)); 2034 return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt); 2035 } 2036 } 2037 2038 return SDValue(); 2039 } 2040 2041 SDValue DAGCombiner::visitSUBC(SDNode *N) { 2042 SDValue N0 = N->getOperand(0); 2043 SDValue N1 = N->getOperand(1); 2044 EVT VT = N0.getValueType(); 2045 SDLoc DL(N); 2046 2047 // If the flag result is dead, turn this into an SUB. 2048 if (!N->hasAnyUseOfValue(1)) 2049 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), 2050 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2051 2052 // fold (subc x, x) -> 0 + no borrow 2053 if (N0 == N1) 2054 return CombineTo(N, DAG.getConstant(0, DL, VT), 2055 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2056 2057 // fold (subc x, 0) -> x + no borrow 2058 if (isNullConstant(N1)) 2059 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2060 2061 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 2062 if (isAllOnesConstant(N0)) 2063 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), 2064 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2065 2066 return SDValue(); 2067 } 2068 2069 SDValue DAGCombiner::visitSUBE(SDNode *N) { 2070 SDValue N0 = N->getOperand(0); 2071 SDValue N1 = N->getOperand(1); 2072 SDValue CarryIn = N->getOperand(2); 2073 2074 // fold (sube x, y, false) -> (subc x, y) 2075 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 2076 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1); 2077 2078 return SDValue(); 2079 } 2080 2081 SDValue DAGCombiner::visitMUL(SDNode *N) { 2082 SDValue N0 = N->getOperand(0); 2083 SDValue N1 = N->getOperand(1); 2084 EVT VT = N0.getValueType(); 2085 2086 // fold (mul x, undef) -> 0 2087 if (N0.isUndef() || N1.isUndef()) 2088 return DAG.getConstant(0, SDLoc(N), VT); 2089 2090 bool N0IsConst = false; 2091 bool N1IsConst = false; 2092 bool N1IsOpaqueConst = false; 2093 bool N0IsOpaqueConst = false; 2094 APInt ConstValue0, ConstValue1; 2095 // fold vector ops 2096 if (VT.isVector()) { 2097 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2098 return FoldedVOp; 2099 2100 N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0); 2101 N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1); 2102 } else { 2103 N0IsConst = isa<ConstantSDNode>(N0); 2104 if (N0IsConst) { 2105 ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue(); 2106 N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque(); 2107 } 2108 N1IsConst = isa<ConstantSDNode>(N1); 2109 if (N1IsConst) { 2110 ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue(); 2111 N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque(); 2112 } 2113 } 2114 2115 // fold (mul c1, c2) -> c1*c2 2116 if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst) 2117 return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT, 2118 N0.getNode(), N1.getNode()); 2119 2120 // canonicalize constant to RHS (vector doesn't have to splat) 2121 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2122 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 2123 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0); 2124 // fold (mul x, 0) -> 0 2125 if (N1IsConst && ConstValue1 == 0) 2126 return N1; 2127 // We require a splat of the entire scalar bit width for non-contiguous 2128 // bit patterns. 2129 bool IsFullSplat = 2130 ConstValue1.getBitWidth() == VT.getScalarSizeInBits(); 2131 // fold (mul x, 1) -> x 2132 if (N1IsConst && ConstValue1 == 1 && IsFullSplat) 2133 return N0; 2134 // fold (mul x, -1) -> 0-x 2135 if (N1IsConst && ConstValue1.isAllOnesValue()) { 2136 SDLoc DL(N); 2137 return DAG.getNode(ISD::SUB, DL, VT, 2138 DAG.getConstant(0, DL, VT), N0); 2139 } 2140 // fold (mul x, (1 << c)) -> x << c 2141 if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isPowerOf2() && 2142 IsFullSplat) { 2143 SDLoc DL(N); 2144 return DAG.getNode(ISD::SHL, DL, VT, N0, 2145 DAG.getConstant(ConstValue1.logBase2(), DL, 2146 getShiftAmountTy(N0.getValueType()))); 2147 } 2148 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 2149 if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2() && 2150 IsFullSplat) { 2151 unsigned Log2Val = (-ConstValue1).logBase2(); 2152 SDLoc DL(N); 2153 // FIXME: If the input is something that is easily negated (e.g. a 2154 // single-use add), we should put the negate there. 2155 return DAG.getNode(ISD::SUB, DL, VT, 2156 DAG.getConstant(0, DL, VT), 2157 DAG.getNode(ISD::SHL, DL, VT, N0, 2158 DAG.getConstant(Log2Val, DL, 2159 getShiftAmountTy(N0.getValueType())))); 2160 } 2161 2162 // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 2163 if (N0.getOpcode() == ISD::SHL && 2164 isConstantOrConstantVector(N1, /* NoOpaques */ true) && 2165 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) { 2166 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1)); 2167 if (isConstantOrConstantVector(C3)) 2168 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3); 2169 } 2170 2171 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 2172 // use. 2173 { 2174 SDValue Sh(nullptr, 0), Y(nullptr, 0); 2175 2176 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 2177 if (N0.getOpcode() == ISD::SHL && 2178 isConstantOrConstantVector(N0.getOperand(1)) && 2179 N0.getNode()->hasOneUse()) { 2180 Sh = N0; Y = N1; 2181 } else if (N1.getOpcode() == ISD::SHL && 2182 isConstantOrConstantVector(N1.getOperand(1)) && 2183 N1.getNode()->hasOneUse()) { 2184 Sh = N1; Y = N0; 2185 } 2186 2187 if (Sh.getNode()) { 2188 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y); 2189 return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1)); 2190 } 2191 } 2192 2193 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 2194 if (DAG.isConstantIntBuildVectorOrConstantInt(N1) && 2195 N0.getOpcode() == ISD::ADD && 2196 DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) && 2197 isMulAddWithConstProfitable(N, N0, N1)) 2198 return DAG.getNode(ISD::ADD, SDLoc(N), VT, 2199 DAG.getNode(ISD::MUL, SDLoc(N0), VT, 2200 N0.getOperand(0), N1), 2201 DAG.getNode(ISD::MUL, SDLoc(N1), VT, 2202 N0.getOperand(1), N1)); 2203 2204 // reassociate mul 2205 if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1)) 2206 return RMUL; 2207 2208 return SDValue(); 2209 } 2210 2211 /// Return true if divmod libcall is available. 2212 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2213 const TargetLowering &TLI) { 2214 RTLIB::Libcall LC; 2215 EVT NodeType = Node->getValueType(0); 2216 if (!NodeType.isSimple()) 2217 return false; 2218 switch (NodeType.getSimpleVT().SimpleTy) { 2219 default: return false; // No libcall for vector types. 2220 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2221 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2222 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2223 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2224 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2225 } 2226 2227 return TLI.getLibcallName(LC) != nullptr; 2228 } 2229 2230 /// Issue divrem if both quotient and remainder are needed. 2231 SDValue DAGCombiner::useDivRem(SDNode *Node) { 2232 if (Node->use_empty()) 2233 return SDValue(); // This is a dead node, leave it alone. 2234 2235 unsigned Opcode = Node->getOpcode(); 2236 bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM); 2237 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2238 2239 // DivMod lib calls can still work on non-legal types if using lib-calls. 2240 EVT VT = Node->getValueType(0); 2241 if (VT.isVector() || !VT.isInteger()) 2242 return SDValue(); 2243 2244 if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT)) 2245 return SDValue(); 2246 2247 // If DIVREM is going to get expanded into a libcall, 2248 // but there is no libcall available, then don't combine. 2249 if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) && 2250 !isDivRemLibcallAvailable(Node, isSigned, TLI)) 2251 return SDValue(); 2252 2253 // If div is legal, it's better to do the normal expansion 2254 unsigned OtherOpcode = 0; 2255 if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) { 2256 OtherOpcode = isSigned ? ISD::SREM : ISD::UREM; 2257 if (TLI.isOperationLegalOrCustom(Opcode, VT)) 2258 return SDValue(); 2259 } else { 2260 OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2261 if (TLI.isOperationLegalOrCustom(OtherOpcode, VT)) 2262 return SDValue(); 2263 } 2264 2265 SDValue Op0 = Node->getOperand(0); 2266 SDValue Op1 = Node->getOperand(1); 2267 SDValue combined; 2268 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2269 UE = Op0.getNode()->use_end(); UI != UE;) { 2270 SDNode *User = *UI++; 2271 if (User == Node || User->use_empty()) 2272 continue; 2273 // Convert the other matching node(s), too; 2274 // otherwise, the DIVREM may get target-legalized into something 2275 // target-specific that we won't be able to recognize. 2276 unsigned UserOpc = User->getOpcode(); 2277 if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) && 2278 User->getOperand(0) == Op0 && 2279 User->getOperand(1) == Op1) { 2280 if (!combined) { 2281 if (UserOpc == OtherOpcode) { 2282 SDVTList VTs = DAG.getVTList(VT, VT); 2283 combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1); 2284 } else if (UserOpc == DivRemOpc) { 2285 combined = SDValue(User, 0); 2286 } else { 2287 assert(UserOpc == Opcode); 2288 continue; 2289 } 2290 } 2291 if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV) 2292 CombineTo(User, combined); 2293 else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM) 2294 CombineTo(User, combined.getValue(1)); 2295 } 2296 } 2297 return combined; 2298 } 2299 2300 SDValue DAGCombiner::visitSDIV(SDNode *N) { 2301 SDValue N0 = N->getOperand(0); 2302 SDValue N1 = N->getOperand(1); 2303 EVT VT = N->getValueType(0); 2304 2305 // fold vector ops 2306 if (VT.isVector()) 2307 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2308 return FoldedVOp; 2309 2310 SDLoc DL(N); 2311 2312 // fold (sdiv c1, c2) -> c1/c2 2313 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2314 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2315 if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque()) 2316 return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C); 2317 // fold (sdiv X, 1) -> X 2318 if (N1C && N1C->isOne()) 2319 return N0; 2320 // fold (sdiv X, -1) -> 0-X 2321 if (N1C && N1C->isAllOnesValue()) 2322 return DAG.getNode(ISD::SUB, DL, VT, 2323 DAG.getConstant(0, DL, VT), N0); 2324 2325 // If we know the sign bits of both operands are zero, strength reduce to a 2326 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 2327 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2328 return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1); 2329 2330 // fold (sdiv X, pow2) -> simple ops after legalize 2331 // FIXME: We check for the exact bit here because the generic lowering gives 2332 // better results in that case. The target-specific lowering should learn how 2333 // to handle exact sdivs efficiently. 2334 if (N1C && !N1C->isNullValue() && !N1C->isOpaque() && 2335 !cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() && 2336 (N1C->getAPIntValue().isPowerOf2() || 2337 (-N1C->getAPIntValue()).isPowerOf2())) { 2338 // Target-specific implementation of sdiv x, pow2. 2339 if (SDValue Res = BuildSDIVPow2(N)) 2340 return Res; 2341 2342 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 2343 2344 // Splat the sign bit into the register 2345 SDValue SGN = 2346 DAG.getNode(ISD::SRA, DL, VT, N0, 2347 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, 2348 getShiftAmountTy(N0.getValueType()))); 2349 AddToWorklist(SGN.getNode()); 2350 2351 // Add (N0 < 0) ? abs2 - 1 : 0; 2352 SDValue SRL = 2353 DAG.getNode(ISD::SRL, DL, VT, SGN, 2354 DAG.getConstant(VT.getScalarSizeInBits() - lg2, DL, 2355 getShiftAmountTy(SGN.getValueType()))); 2356 SDValue ADD = DAG.getNode(ISD::ADD, DL, VT, N0, SRL); 2357 AddToWorklist(SRL.getNode()); 2358 AddToWorklist(ADD.getNode()); // Divide by pow2 2359 SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, ADD, 2360 DAG.getConstant(lg2, DL, 2361 getShiftAmountTy(ADD.getValueType()))); 2362 2363 // If we're dividing by a positive value, we're done. Otherwise, we must 2364 // negate the result. 2365 if (N1C->getAPIntValue().isNonNegative()) 2366 return SRA; 2367 2368 AddToWorklist(SRA.getNode()); 2369 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); 2370 } 2371 2372 // If integer divide is expensive and we satisfy the requirements, emit an 2373 // alternate sequence. Targets may check function attributes for size/speed 2374 // trade-offs. 2375 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2376 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2377 if (SDValue Op = BuildSDIV(N)) 2378 return Op; 2379 2380 // sdiv, srem -> sdivrem 2381 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2382 // true. Otherwise, we break the simplification logic in visitREM(). 2383 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2384 if (SDValue DivRem = useDivRem(N)) 2385 return DivRem; 2386 2387 // undef / X -> 0 2388 if (N0.isUndef()) 2389 return DAG.getConstant(0, DL, VT); 2390 // X / undef -> undef 2391 if (N1.isUndef()) 2392 return N1; 2393 2394 return SDValue(); 2395 } 2396 2397 SDValue DAGCombiner::visitUDIV(SDNode *N) { 2398 SDValue N0 = N->getOperand(0); 2399 SDValue N1 = N->getOperand(1); 2400 EVT VT = N->getValueType(0); 2401 2402 // fold vector ops 2403 if (VT.isVector()) 2404 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2405 return FoldedVOp; 2406 2407 SDLoc DL(N); 2408 2409 // fold (udiv c1, c2) -> c1/c2 2410 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2411 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2412 if (N0C && N1C) 2413 if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT, 2414 N0C, N1C)) 2415 return Folded; 2416 2417 // fold (udiv x, (1 << c)) -> x >>u c 2418 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) && 2419 DAG.isKnownToBeAPowerOfTwo(N1)) { 2420 SDValue LogBase2 = BuildLogBase2(N1, DL); 2421 AddToWorklist(LogBase2.getNode()); 2422 2423 EVT ShiftVT = getShiftAmountTy(N0.getValueType()); 2424 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT); 2425 AddToWorklist(Trunc.getNode()); 2426 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc); 2427 } 2428 2429 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 2430 if (N1.getOpcode() == ISD::SHL) { 2431 SDValue N10 = N1.getOperand(0); 2432 if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) && 2433 DAG.isKnownToBeAPowerOfTwo(N10)) { 2434 SDValue LogBase2 = BuildLogBase2(N10, DL); 2435 AddToWorklist(LogBase2.getNode()); 2436 2437 EVT ADDVT = N1.getOperand(1).getValueType(); 2438 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT); 2439 AddToWorklist(Trunc.getNode()); 2440 SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc); 2441 AddToWorklist(Add.getNode()); 2442 return DAG.getNode(ISD::SRL, DL, VT, N0, Add); 2443 } 2444 } 2445 2446 // fold (udiv x, c) -> alternate 2447 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2448 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2449 if (SDValue Op = BuildUDIV(N)) 2450 return Op; 2451 2452 // sdiv, srem -> sdivrem 2453 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2454 // true. Otherwise, we break the simplification logic in visitREM(). 2455 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2456 if (SDValue DivRem = useDivRem(N)) 2457 return DivRem; 2458 2459 // undef / X -> 0 2460 if (N0.isUndef()) 2461 return DAG.getConstant(0, DL, VT); 2462 // X / undef -> undef 2463 if (N1.isUndef()) 2464 return N1; 2465 2466 return SDValue(); 2467 } 2468 2469 // handles ISD::SREM and ISD::UREM 2470 SDValue DAGCombiner::visitREM(SDNode *N) { 2471 unsigned Opcode = N->getOpcode(); 2472 SDValue N0 = N->getOperand(0); 2473 SDValue N1 = N->getOperand(1); 2474 EVT VT = N->getValueType(0); 2475 bool isSigned = (Opcode == ISD::SREM); 2476 SDLoc DL(N); 2477 2478 // fold (rem c1, c2) -> c1%c2 2479 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2480 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2481 if (N0C && N1C) 2482 if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C)) 2483 return Folded; 2484 2485 if (isSigned) { 2486 // If we know the sign bits of both operands are zero, strength reduce to a 2487 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 2488 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2489 return DAG.getNode(ISD::UREM, DL, VT, N0, N1); 2490 } else { 2491 // fold (urem x, pow2) -> (and x, pow2-1) 2492 if (DAG.isKnownToBeAPowerOfTwo(N1)) { 2493 APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2494 SDValue Add = 2495 DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT)); 2496 AddToWorklist(Add.getNode()); 2497 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2498 } 2499 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2500 if (N1.getOpcode() == ISD::SHL && 2501 DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) { 2502 APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2503 SDValue Add = 2504 DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT)); 2505 AddToWorklist(Add.getNode()); 2506 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2507 } 2508 } 2509 2510 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2511 2512 // If X/C can be simplified by the division-by-constant logic, lower 2513 // X%C to the equivalent of X-X/C*C. 2514 // To avoid mangling nodes, this simplification requires that the combine() 2515 // call for the speculative DIV must not cause a DIVREM conversion. We guard 2516 // against this by skipping the simplification if isIntDivCheap(). When 2517 // div is not cheap, combine will not return a DIVREM. Regardless, 2518 // checking cheapness here makes sense since the simplification results in 2519 // fatter code. 2520 if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) { 2521 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2522 SDValue Div = DAG.getNode(DivOpcode, DL, VT, N0, N1); 2523 AddToWorklist(Div.getNode()); 2524 SDValue OptimizedDiv = combine(Div.getNode()); 2525 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2526 assert((OptimizedDiv.getOpcode() != ISD::UDIVREM) && 2527 (OptimizedDiv.getOpcode() != ISD::SDIVREM)); 2528 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1); 2529 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); 2530 AddToWorklist(Mul.getNode()); 2531 return Sub; 2532 } 2533 } 2534 2535 // sdiv, srem -> sdivrem 2536 if (SDValue DivRem = useDivRem(N)) 2537 return DivRem.getValue(1); 2538 2539 // undef % X -> 0 2540 if (N0.isUndef()) 2541 return DAG.getConstant(0, DL, VT); 2542 // X % undef -> undef 2543 if (N1.isUndef()) 2544 return N1; 2545 2546 return SDValue(); 2547 } 2548 2549 SDValue DAGCombiner::visitMULHS(SDNode *N) { 2550 SDValue N0 = N->getOperand(0); 2551 SDValue N1 = N->getOperand(1); 2552 EVT VT = N->getValueType(0); 2553 SDLoc DL(N); 2554 2555 // fold (mulhs x, 0) -> 0 2556 if (isNullConstant(N1)) 2557 return N1; 2558 // fold (mulhs x, 1) -> (sra x, size(x)-1) 2559 if (isOneConstant(N1)) { 2560 SDLoc DL(N); 2561 return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0, 2562 DAG.getConstant(N0.getValueSizeInBits() - 1, DL, 2563 getShiftAmountTy(N0.getValueType()))); 2564 } 2565 // fold (mulhs x, undef) -> 0 2566 if (N0.isUndef() || N1.isUndef()) 2567 return DAG.getConstant(0, SDLoc(N), VT); 2568 2569 // If the type twice as wide is legal, transform the mulhs to a wider multiply 2570 // plus a shift. 2571 if (VT.isSimple() && !VT.isVector()) { 2572 MVT Simple = VT.getSimpleVT(); 2573 unsigned SimpleSize = Simple.getSizeInBits(); 2574 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2575 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2576 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2577 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2578 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2579 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2580 DAG.getConstant(SimpleSize, DL, 2581 getShiftAmountTy(N1.getValueType()))); 2582 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2583 } 2584 } 2585 2586 return SDValue(); 2587 } 2588 2589 SDValue DAGCombiner::visitMULHU(SDNode *N) { 2590 SDValue N0 = N->getOperand(0); 2591 SDValue N1 = N->getOperand(1); 2592 EVT VT = N->getValueType(0); 2593 SDLoc DL(N); 2594 2595 // fold (mulhu x, 0) -> 0 2596 if (isNullConstant(N1)) 2597 return N1; 2598 // fold (mulhu x, 1) -> 0 2599 if (isOneConstant(N1)) 2600 return DAG.getConstant(0, DL, N0.getValueType()); 2601 // fold (mulhu x, undef) -> 0 2602 if (N0.isUndef() || N1.isUndef()) 2603 return DAG.getConstant(0, DL, VT); 2604 2605 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2606 // plus a shift. 2607 if (VT.isSimple() && !VT.isVector()) { 2608 MVT Simple = VT.getSimpleVT(); 2609 unsigned SimpleSize = Simple.getSizeInBits(); 2610 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2611 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2612 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2613 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2614 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2615 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2616 DAG.getConstant(SimpleSize, DL, 2617 getShiftAmountTy(N1.getValueType()))); 2618 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2619 } 2620 } 2621 2622 return SDValue(); 2623 } 2624 2625 /// Perform optimizations common to nodes that compute two values. LoOp and HiOp 2626 /// give the opcodes for the two computations that are being performed. Return 2627 /// true if a simplification was made. 2628 SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2629 unsigned HiOp) { 2630 // If the high half is not needed, just compute the low half. 2631 bool HiExists = N->hasAnyUseOfValue(1); 2632 if (!HiExists && 2633 (!LegalOperations || 2634 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) { 2635 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2636 return CombineTo(N, Res, Res); 2637 } 2638 2639 // If the low half is not needed, just compute the high half. 2640 bool LoExists = N->hasAnyUseOfValue(0); 2641 if (!LoExists && 2642 (!LegalOperations || 2643 TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2644 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2645 return CombineTo(N, Res, Res); 2646 } 2647 2648 // If both halves are used, return as it is. 2649 if (LoExists && HiExists) 2650 return SDValue(); 2651 2652 // If the two computed results can be simplified separately, separate them. 2653 if (LoExists) { 2654 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2655 AddToWorklist(Lo.getNode()); 2656 SDValue LoOpt = combine(Lo.getNode()); 2657 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2658 (!LegalOperations || 2659 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2660 return CombineTo(N, LoOpt, LoOpt); 2661 } 2662 2663 if (HiExists) { 2664 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2665 AddToWorklist(Hi.getNode()); 2666 SDValue HiOpt = combine(Hi.getNode()); 2667 if (HiOpt.getNode() && HiOpt != Hi && 2668 (!LegalOperations || 2669 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2670 return CombineTo(N, HiOpt, HiOpt); 2671 } 2672 2673 return SDValue(); 2674 } 2675 2676 SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2677 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS)) 2678 return Res; 2679 2680 EVT VT = N->getValueType(0); 2681 SDLoc DL(N); 2682 2683 // If the type is twice as wide is legal, transform the mulhu to a wider 2684 // multiply plus a shift. 2685 if (VT.isSimple() && !VT.isVector()) { 2686 MVT Simple = VT.getSimpleVT(); 2687 unsigned SimpleSize = Simple.getSizeInBits(); 2688 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2689 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2690 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2691 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2692 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2693 // Compute the high part as N1. 2694 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2695 DAG.getConstant(SimpleSize, DL, 2696 getShiftAmountTy(Lo.getValueType()))); 2697 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2698 // Compute the low part as N0. 2699 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2700 return CombineTo(N, Lo, Hi); 2701 } 2702 } 2703 2704 return SDValue(); 2705 } 2706 2707 SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2708 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU)) 2709 return Res; 2710 2711 EVT VT = N->getValueType(0); 2712 SDLoc DL(N); 2713 2714 // If the type is twice as wide is legal, transform the mulhu to a wider 2715 // multiply plus a shift. 2716 if (VT.isSimple() && !VT.isVector()) { 2717 MVT Simple = VT.getSimpleVT(); 2718 unsigned SimpleSize = Simple.getSizeInBits(); 2719 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2720 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2721 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2722 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2723 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2724 // Compute the high part as N1. 2725 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2726 DAG.getConstant(SimpleSize, DL, 2727 getShiftAmountTy(Lo.getValueType()))); 2728 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2729 // Compute the low part as N0. 2730 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2731 return CombineTo(N, Lo, Hi); 2732 } 2733 } 2734 2735 return SDValue(); 2736 } 2737 2738 SDValue DAGCombiner::visitSMULO(SDNode *N) { 2739 // (smulo x, 2) -> (saddo x, x) 2740 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2741 if (C2->getAPIntValue() == 2) 2742 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), 2743 N->getOperand(0), N->getOperand(0)); 2744 2745 return SDValue(); 2746 } 2747 2748 SDValue DAGCombiner::visitUMULO(SDNode *N) { 2749 // (umulo x, 2) -> (uaddo x, x) 2750 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2751 if (C2->getAPIntValue() == 2) 2752 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), 2753 N->getOperand(0), N->getOperand(0)); 2754 2755 return SDValue(); 2756 } 2757 2758 SDValue DAGCombiner::visitIMINMAX(SDNode *N) { 2759 SDValue N0 = N->getOperand(0); 2760 SDValue N1 = N->getOperand(1); 2761 EVT VT = N0.getValueType(); 2762 2763 // fold vector ops 2764 if (VT.isVector()) 2765 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2766 return FoldedVOp; 2767 2768 // fold (add c1, c2) -> c1+c2 2769 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 2770 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 2771 if (N0C && N1C) 2772 return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C); 2773 2774 // canonicalize constant to RHS 2775 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2776 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 2777 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0); 2778 2779 return SDValue(); 2780 } 2781 2782 /// If this is a binary operator with two operands of the same opcode, try to 2783 /// simplify it. 2784 SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 2785 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2786 EVT VT = N0.getValueType(); 2787 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 2788 2789 // Bail early if none of these transforms apply. 2790 if (N0.getNumOperands() == 0) return SDValue(); 2791 2792 // For each of OP in AND/OR/XOR: 2793 // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 2794 // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 2795 // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 2796 // fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y)) 2797 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 2798 // 2799 // do not sink logical op inside of a vector extend, since it may combine 2800 // into a vsetcc. 2801 EVT Op0VT = N0.getOperand(0).getValueType(); 2802 if ((N0.getOpcode() == ISD::ZERO_EXTEND || 2803 N0.getOpcode() == ISD::SIGN_EXTEND || 2804 N0.getOpcode() == ISD::BSWAP || 2805 // Avoid infinite looping with PromoteIntBinOp. 2806 (N0.getOpcode() == ISD::ANY_EXTEND && 2807 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 2808 (N0.getOpcode() == ISD::TRUNCATE && 2809 (!TLI.isZExtFree(VT, Op0VT) || 2810 !TLI.isTruncateFree(Op0VT, VT)) && 2811 TLI.isTypeLegal(Op0VT))) && 2812 !VT.isVector() && 2813 Op0VT == N1.getOperand(0).getValueType() && 2814 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 2815 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2816 N0.getOperand(0).getValueType(), 2817 N0.getOperand(0), N1.getOperand(0)); 2818 AddToWorklist(ORNode.getNode()); 2819 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode); 2820 } 2821 2822 // For each of OP in SHL/SRL/SRA/AND... 2823 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 2824 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 2825 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 2826 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 2827 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 2828 N0.getOperand(1) == N1.getOperand(1)) { 2829 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2830 N0.getOperand(0).getValueType(), 2831 N0.getOperand(0), N1.getOperand(0)); 2832 AddToWorklist(ORNode.getNode()); 2833 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 2834 ORNode, N0.getOperand(1)); 2835 } 2836 2837 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 2838 // Only perform this optimization up until type legalization, before 2839 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 2840 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 2841 // we don't want to undo this promotion. 2842 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 2843 // on scalars. 2844 if ((N0.getOpcode() == ISD::BITCAST || 2845 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && 2846 Level <= AfterLegalizeTypes) { 2847 SDValue In0 = N0.getOperand(0); 2848 SDValue In1 = N1.getOperand(0); 2849 EVT In0Ty = In0.getValueType(); 2850 EVT In1Ty = In1.getValueType(); 2851 SDLoc DL(N); 2852 // If both incoming values are integers, and the original types are the 2853 // same. 2854 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 2855 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); 2856 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); 2857 AddToWorklist(Op.getNode()); 2858 return BC; 2859 } 2860 } 2861 2862 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 2863 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 2864 // If both shuffles use the same mask, and both shuffle within a single 2865 // vector, then it is worthwhile to move the swizzle after the operation. 2866 // The type-legalizer generates this pattern when loading illegal 2867 // vector types from memory. In many cases this allows additional shuffle 2868 // optimizations. 2869 // There are other cases where moving the shuffle after the xor/and/or 2870 // is profitable even if shuffles don't perform a swizzle. 2871 // If both shuffles use the same mask, and both shuffles have the same first 2872 // or second operand, then it might still be profitable to move the shuffle 2873 // after the xor/and/or operation. 2874 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { 2875 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 2876 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 2877 2878 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && 2879 "Inputs to shuffles are not the same type"); 2880 2881 // Check that both shuffles use the same mask. The masks are known to be of 2882 // the same length because the result vector type is the same. 2883 // Check also that shuffles have only one use to avoid introducing extra 2884 // instructions. 2885 if (SVN0->hasOneUse() && SVN1->hasOneUse() && 2886 SVN0->getMask().equals(SVN1->getMask())) { 2887 SDValue ShOp = N0->getOperand(1); 2888 2889 // Don't try to fold this node if it requires introducing a 2890 // build vector of all zeros that might be illegal at this stage. 2891 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 2892 if (!LegalTypes) 2893 ShOp = DAG.getConstant(0, SDLoc(N), VT); 2894 else 2895 ShOp = SDValue(); 2896 } 2897 2898 // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C) 2899 // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C) 2900 // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0) 2901 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) { 2902 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2903 N0->getOperand(0), N1->getOperand(0)); 2904 AddToWorklist(NewNode.getNode()); 2905 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp, 2906 SVN0->getMask()); 2907 } 2908 2909 // Don't try to fold this node if it requires introducing a 2910 // build vector of all zeros that might be illegal at this stage. 2911 ShOp = N0->getOperand(0); 2912 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 2913 if (!LegalTypes) 2914 ShOp = DAG.getConstant(0, SDLoc(N), VT); 2915 else 2916 ShOp = SDValue(); 2917 } 2918 2919 // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B)) 2920 // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B)) 2921 // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B)) 2922 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) { 2923 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2924 N0->getOperand(1), N1->getOperand(1)); 2925 AddToWorklist(NewNode.getNode()); 2926 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode, 2927 SVN0->getMask()); 2928 } 2929 } 2930 } 2931 2932 return SDValue(); 2933 } 2934 2935 /// This contains all DAGCombine rules which reduce two values combined by 2936 /// an And operation to a single value. This makes them reusable in the context 2937 /// of visitSELECT(). Rules involving constants are not included as 2938 /// visitSELECT() already handles those cases. 2939 SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, 2940 SDNode *LocReference) { 2941 EVT VT = N1.getValueType(); 2942 2943 // fold (and x, undef) -> 0 2944 if (N0.isUndef() || N1.isUndef()) 2945 return DAG.getConstant(0, SDLoc(LocReference), VT); 2946 // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) 2947 SDValue LL, LR, RL, RR, CC0, CC1; 2948 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 2949 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 2950 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 2951 2952 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 2953 LL.getValueType().isInteger()) { 2954 // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0) 2955 if (isNullConstant(LR) && Op1 == ISD::SETEQ) { 2956 EVT CCVT = getSetCCResultType(LR.getValueType()); 2957 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2958 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2959 LR.getValueType(), LL, RL); 2960 AddToWorklist(ORNode.getNode()); 2961 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 2962 } 2963 } 2964 if (isAllOnesConstant(LR)) { 2965 // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1) 2966 if (Op1 == ISD::SETEQ) { 2967 EVT CCVT = getSetCCResultType(LR.getValueType()); 2968 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2969 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0), 2970 LR.getValueType(), LL, RL); 2971 AddToWorklist(ANDNode.getNode()); 2972 return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1); 2973 } 2974 } 2975 // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1) 2976 if (Op1 == ISD::SETGT) { 2977 EVT CCVT = getSetCCResultType(LR.getValueType()); 2978 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2979 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2980 LR.getValueType(), LL, RL); 2981 AddToWorklist(ORNode.getNode()); 2982 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 2983 } 2984 } 2985 } 2986 } 2987 // Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2) 2988 if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) && 2989 Op0 == Op1 && LL.getValueType().isInteger() && 2990 Op0 == ISD::SETNE && ((isNullConstant(LR) && isAllOnesConstant(RR)) || 2991 (isAllOnesConstant(LR) && isNullConstant(RR)))) { 2992 EVT CCVT = getSetCCResultType(LL.getValueType()); 2993 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2994 SDLoc DL(N0); 2995 SDValue ADDNode = DAG.getNode(ISD::ADD, DL, LL.getValueType(), 2996 LL, DAG.getConstant(1, DL, 2997 LL.getValueType())); 2998 AddToWorklist(ADDNode.getNode()); 2999 return DAG.getSetCC(SDLoc(LocReference), VT, ADDNode, 3000 DAG.getConstant(2, DL, LL.getValueType()), 3001 ISD::SETUGE); 3002 } 3003 } 3004 // canonicalize equivalent to ll == rl 3005 if (LL == RR && LR == RL) { 3006 Op1 = ISD::getSetCCSwappedOperands(Op1); 3007 std::swap(RL, RR); 3008 } 3009 if (LL == RL && LR == RR) { 3010 bool isInteger = LL.getValueType().isInteger(); 3011 ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); 3012 if (Result != ISD::SETCC_INVALID && 3013 (!LegalOperations || 3014 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3015 TLI.isOperationLegal(ISD::SETCC, LL.getValueType())))) { 3016 EVT CCVT = getSetCCResultType(LL.getValueType()); 3017 if (N0.getValueType() == CCVT || 3018 (!LegalOperations && N0.getValueType() == MVT::i1)) 3019 return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(), 3020 LL, LR, Result); 3021 } 3022 } 3023 } 3024 3025 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 3026 VT.getSizeInBits() <= 64) { 3027 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3028 APInt ADDC = ADDI->getAPIntValue(); 3029 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3030 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 3031 // immediate for an add, but it is legal if its top c2 bits are set, 3032 // transform the ADD so the immediate doesn't need to be materialized 3033 // in a register. 3034 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 3035 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3036 SRLI->getZExtValue()); 3037 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 3038 ADDC |= Mask; 3039 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3040 SDLoc DL(N0); 3041 SDValue NewAdd = 3042 DAG.getNode(ISD::ADD, DL, VT, 3043 N0.getOperand(0), DAG.getConstant(ADDC, DL, VT)); 3044 CombineTo(N0.getNode(), NewAdd); 3045 // Return N so it doesn't get rechecked! 3046 return SDValue(LocReference, 0); 3047 } 3048 } 3049 } 3050 } 3051 } 3052 } 3053 3054 // Reduce bit extract of low half of an integer to the narrower type. 3055 // (and (srl i64:x, K), KMask) -> 3056 // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask) 3057 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 3058 if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) { 3059 if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3060 unsigned Size = VT.getSizeInBits(); 3061 const APInt &AndMask = CAnd->getAPIntValue(); 3062 unsigned ShiftBits = CShift->getZExtValue(); 3063 3064 // Bail out, this node will probably disappear anyway. 3065 if (ShiftBits == 0) 3066 return SDValue(); 3067 3068 unsigned MaskBits = AndMask.countTrailingOnes(); 3069 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2); 3070 3071 if (APIntOps::isMask(AndMask) && 3072 // Required bits must not span the two halves of the integer and 3073 // must fit in the half size type. 3074 (ShiftBits + MaskBits <= Size / 2) && 3075 TLI.isNarrowingProfitable(VT, HalfVT) && 3076 TLI.isTypeDesirableForOp(ISD::AND, HalfVT) && 3077 TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) && 3078 TLI.isTruncateFree(VT, HalfVT) && 3079 TLI.isZExtFree(HalfVT, VT)) { 3080 // The isNarrowingProfitable is to avoid regressions on PPC and 3081 // AArch64 which match a few 64-bit bit insert / bit extract patterns 3082 // on downstream users of this. Those patterns could probably be 3083 // extended to handle extensions mixed in. 3084 3085 SDValue SL(N0); 3086 assert(MaskBits <= Size); 3087 3088 // Extracting the highest bit of the low half. 3089 EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout()); 3090 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT, 3091 N0.getOperand(0)); 3092 3093 SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT); 3094 SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT); 3095 SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK); 3096 SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask); 3097 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And); 3098 } 3099 } 3100 } 3101 } 3102 3103 return SDValue(); 3104 } 3105 3106 bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 3107 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 3108 bool &NarrowLoad) { 3109 uint32_t ActiveBits = AndC->getAPIntValue().getActiveBits(); 3110 3111 if (ActiveBits == 0 || !APIntOps::isMask(ActiveBits, AndC->getAPIntValue())) 3112 return false; 3113 3114 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 3115 LoadedVT = LoadN->getMemoryVT(); 3116 3117 if (ExtVT == LoadedVT && 3118 (!LegalOperations || 3119 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) { 3120 // ZEXTLOAD will match without needing to change the size of the value being 3121 // loaded. 3122 NarrowLoad = false; 3123 return true; 3124 } 3125 3126 // Do not change the width of a volatile load. 3127 if (LoadN->isVolatile()) 3128 return false; 3129 3130 // Do not generate loads of non-round integer types since these can 3131 // be expensive (and would be wrong if the type is not byte sized). 3132 if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound()) 3133 return false; 3134 3135 if (LegalOperations && 3136 !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT)) 3137 return false; 3138 3139 if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT)) 3140 return false; 3141 3142 NarrowLoad = true; 3143 return true; 3144 } 3145 3146 SDValue DAGCombiner::visitAND(SDNode *N) { 3147 SDValue N0 = N->getOperand(0); 3148 SDValue N1 = N->getOperand(1); 3149 EVT VT = N1.getValueType(); 3150 3151 // x & x --> x 3152 if (N0 == N1) 3153 return N0; 3154 3155 // fold vector ops 3156 if (VT.isVector()) { 3157 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3158 return FoldedVOp; 3159 3160 // fold (and x, 0) -> 0, vector edition 3161 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3162 // do not return N0, because undef node may exist in N0 3163 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()), 3164 SDLoc(N), N0.getValueType()); 3165 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3166 // do not return N1, because undef node may exist in N1 3167 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()), 3168 SDLoc(N), N1.getValueType()); 3169 3170 // fold (and x, -1) -> x, vector edition 3171 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3172 return N1; 3173 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3174 return N0; 3175 } 3176 3177 // fold (and c1, c2) -> c1&c2 3178 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3179 ConstantSDNode *N1C = isConstOrConstSplat(N1); 3180 if (N0C && N1C && !N1C->isOpaque()) 3181 return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C); 3182 // canonicalize constant to RHS 3183 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3184 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3185 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0); 3186 // fold (and x, -1) -> x 3187 if (isAllOnesConstant(N1)) 3188 return N0; 3189 // if (and x, c) is known to be zero, return 0 3190 unsigned BitWidth = VT.getScalarSizeInBits(); 3191 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 3192 APInt::getAllOnesValue(BitWidth))) 3193 return DAG.getConstant(0, SDLoc(N), VT); 3194 // reassociate and 3195 if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1)) 3196 return RAND; 3197 // fold (and (or x, C), D) -> D if (C & D) == D 3198 if (N1C && N0.getOpcode() == ISD::OR) 3199 if (ConstantSDNode *ORI = isConstOrConstSplat(N0.getOperand(1))) 3200 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 3201 return N1; 3202 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 3203 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 3204 SDValue N0Op0 = N0.getOperand(0); 3205 APInt Mask = ~N1C->getAPIntValue(); 3206 Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits()); 3207 if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 3208 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), 3209 N0.getValueType(), N0Op0); 3210 3211 // Replace uses of the AND with uses of the Zero extend node. 3212 CombineTo(N, Zext); 3213 3214 // We actually want to replace all uses of the any_extend with the 3215 // zero_extend, to avoid duplicating things. This will later cause this 3216 // AND to be folded. 3217 CombineTo(N0.getNode(), Zext); 3218 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3219 } 3220 } 3221 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 3222 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 3223 // already be zero by virtue of the width of the base type of the load. 3224 // 3225 // the 'X' node here can either be nothing or an extract_vector_elt to catch 3226 // more cases. 3227 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 3228 N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() && 3229 N0.getOperand(0).getOpcode() == ISD::LOAD && 3230 N0.getOperand(0).getResNo() == 0) || 3231 (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) { 3232 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 3233 N0 : N0.getOperand(0) ); 3234 3235 // Get the constant (if applicable) the zero'th operand is being ANDed with. 3236 // This can be a pure constant or a vector splat, in which case we treat the 3237 // vector as a scalar and use the splat value. 3238 APInt Constant = APInt::getNullValue(1); 3239 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3240 Constant = C->getAPIntValue(); 3241 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 3242 APInt SplatValue, SplatUndef; 3243 unsigned SplatBitSize; 3244 bool HasAnyUndefs; 3245 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 3246 SplatBitSize, HasAnyUndefs); 3247 if (IsSplat) { 3248 // Undef bits can contribute to a possible optimisation if set, so 3249 // set them. 3250 SplatValue |= SplatUndef; 3251 3252 // The splat value may be something like "0x00FFFFFF", which means 0 for 3253 // the first vector value and FF for the rest, repeating. We need a mask 3254 // that will apply equally to all members of the vector, so AND all the 3255 // lanes of the constant together. 3256 EVT VT = Vector->getValueType(0); 3257 unsigned BitWidth = VT.getScalarSizeInBits(); 3258 3259 // If the splat value has been compressed to a bitlength lower 3260 // than the size of the vector lane, we need to re-expand it to 3261 // the lane size. 3262 if (BitWidth > SplatBitSize) 3263 for (SplatValue = SplatValue.zextOrTrunc(BitWidth); 3264 SplatBitSize < BitWidth; 3265 SplatBitSize = SplatBitSize * 2) 3266 SplatValue |= SplatValue.shl(SplatBitSize); 3267 3268 // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a 3269 // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value. 3270 if (SplatBitSize % BitWidth == 0) { 3271 Constant = APInt::getAllOnesValue(BitWidth); 3272 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) 3273 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 3274 } 3275 } 3276 } 3277 3278 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 3279 // actually legal and isn't going to get expanded, else this is a false 3280 // optimisation. 3281 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 3282 Load->getValueType(0), 3283 Load->getMemoryVT()); 3284 3285 // Resize the constant to the same size as the original memory access before 3286 // extension. If it is still the AllOnesValue then this AND is completely 3287 // unneeded. 3288 Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits()); 3289 3290 bool B; 3291 switch (Load->getExtensionType()) { 3292 default: B = false; break; 3293 case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 3294 case ISD::ZEXTLOAD: 3295 case ISD::NON_EXTLOAD: B = true; break; 3296 } 3297 3298 if (B && Constant.isAllOnesValue()) { 3299 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 3300 // preserve semantics once we get rid of the AND. 3301 SDValue NewLoad(Load, 0); 3302 if (Load->getExtensionType() == ISD::EXTLOAD) { 3303 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 3304 Load->getValueType(0), SDLoc(Load), 3305 Load->getChain(), Load->getBasePtr(), 3306 Load->getOffset(), Load->getMemoryVT(), 3307 Load->getMemOperand()); 3308 // Replace uses of the EXTLOAD with the new ZEXTLOAD. 3309 if (Load->getNumValues() == 3) { 3310 // PRE/POST_INC loads have 3 values. 3311 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 3312 NewLoad.getValue(2) }; 3313 CombineTo(Load, To, 3, true); 3314 } else { 3315 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 3316 } 3317 } 3318 3319 // Fold the AND away, taking care not to fold to the old load node if we 3320 // replaced it. 3321 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 3322 3323 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3324 } 3325 } 3326 3327 // fold (and (load x), 255) -> (zextload x, i8) 3328 // fold (and (extload x, i16), 255) -> (zextload x, i8) 3329 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 3330 if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD || 3331 (N0.getOpcode() == ISD::ANY_EXTEND && 3332 N0.getOperand(0).getOpcode() == ISD::LOAD))) { 3333 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 3334 LoadSDNode *LN0 = HasAnyExt 3335 ? cast<LoadSDNode>(N0.getOperand(0)) 3336 : cast<LoadSDNode>(N0); 3337 if (LN0->getExtensionType() != ISD::SEXTLOAD && 3338 LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) { 3339 auto NarrowLoad = false; 3340 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 3341 EVT ExtVT, LoadedVT; 3342 if (isAndLoadExtLoad(N1C, LN0, LoadResultTy, ExtVT, LoadedVT, 3343 NarrowLoad)) { 3344 if (!NarrowLoad) { 3345 SDValue NewLoad = 3346 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, 3347 LN0->getChain(), LN0->getBasePtr(), ExtVT, 3348 LN0->getMemOperand()); 3349 AddToWorklist(N); 3350 CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 3351 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3352 } else { 3353 EVT PtrType = LN0->getOperand(1).getValueType(); 3354 3355 unsigned Alignment = LN0->getAlignment(); 3356 SDValue NewPtr = LN0->getBasePtr(); 3357 3358 // For big endian targets, we need to add an offset to the pointer 3359 // to load the correct bytes. For little endian systems, we merely 3360 // need to read fewer bytes from the same pointer. 3361 if (DAG.getDataLayout().isBigEndian()) { 3362 unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 3363 unsigned EVTStoreBytes = ExtVT.getStoreSize(); 3364 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 3365 SDLoc DL(LN0); 3366 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, 3367 NewPtr, DAG.getConstant(PtrOff, DL, PtrType)); 3368 Alignment = MinAlign(Alignment, PtrOff); 3369 } 3370 3371 AddToWorklist(NewPtr.getNode()); 3372 3373 SDValue Load = DAG.getExtLoad( 3374 ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, LN0->getChain(), NewPtr, 3375 LN0->getPointerInfo(), ExtVT, Alignment, 3376 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 3377 AddToWorklist(N); 3378 CombineTo(LN0, Load, Load.getValue(1)); 3379 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3380 } 3381 } 3382 } 3383 } 3384 3385 if (SDValue Combined = visitANDLike(N0, N1, N)) 3386 return Combined; 3387 3388 // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 3389 if (N0.getOpcode() == N1.getOpcode()) 3390 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 3391 return Tmp; 3392 3393 // Masking the negated extension of a boolean is just the zero-extended 3394 // boolean: 3395 // and (sub 0, zext(bool X)), 1 --> zext(bool X) 3396 // and (sub 0, sext(bool X)), 1 --> zext(bool X) 3397 // 3398 // Note: the SimplifyDemandedBits fold below can make an information-losing 3399 // transform, and then we have no way to find this better fold. 3400 if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) { 3401 ConstantSDNode *SubLHS = isConstOrConstSplat(N0.getOperand(0)); 3402 SDValue SubRHS = N0.getOperand(1); 3403 if (SubLHS && SubLHS->isNullValue()) { 3404 if (SubRHS.getOpcode() == ISD::ZERO_EXTEND && 3405 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3406 return SubRHS; 3407 if (SubRHS.getOpcode() == ISD::SIGN_EXTEND && 3408 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3409 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0)); 3410 } 3411 } 3412 3413 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 3414 // fold (and (sra)) -> (and (srl)) when possible. 3415 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 3416 return SDValue(N, 0); 3417 3418 // fold (zext_inreg (extload x)) -> (zextload x) 3419 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 3420 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3421 EVT MemVT = LN0->getMemoryVT(); 3422 // If we zero all the possible extended bits, then we can turn this into 3423 // a zextload if we are running before legalize or the operation is legal. 3424 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3425 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3426 BitWidth - MemVT.getScalarSizeInBits())) && 3427 ((!LegalOperations && !LN0->isVolatile()) || 3428 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3429 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3430 LN0->getChain(), LN0->getBasePtr(), 3431 MemVT, LN0->getMemOperand()); 3432 AddToWorklist(N); 3433 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3434 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3435 } 3436 } 3437 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 3438 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 3439 N0.hasOneUse()) { 3440 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3441 EVT MemVT = LN0->getMemoryVT(); 3442 // If we zero all the possible extended bits, then we can turn this into 3443 // a zextload if we are running before legalize or the operation is legal. 3444 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3445 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3446 BitWidth - MemVT.getScalarSizeInBits())) && 3447 ((!LegalOperations && !LN0->isVolatile()) || 3448 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3449 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3450 LN0->getChain(), LN0->getBasePtr(), 3451 MemVT, LN0->getMemOperand()); 3452 AddToWorklist(N); 3453 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3454 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3455 } 3456 } 3457 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const) 3458 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) { 3459 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 3460 N0.getOperand(1), false)) 3461 return BSwap; 3462 } 3463 3464 return SDValue(); 3465 } 3466 3467 /// Match (a >> 8) | (a << 8) as (bswap a) >> 16. 3468 SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 3469 bool DemandHighBits) { 3470 if (!LegalOperations) 3471 return SDValue(); 3472 3473 EVT VT = N->getValueType(0); 3474 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 3475 return SDValue(); 3476 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3477 return SDValue(); 3478 3479 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 3480 bool LookPassAnd0 = false; 3481 bool LookPassAnd1 = false; 3482 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 3483 std::swap(N0, N1); 3484 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 3485 std::swap(N0, N1); 3486 if (N0.getOpcode() == ISD::AND) { 3487 if (!N0.getNode()->hasOneUse()) 3488 return SDValue(); 3489 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3490 if (!N01C || N01C->getZExtValue() != 0xFF00) 3491 return SDValue(); 3492 N0 = N0.getOperand(0); 3493 LookPassAnd0 = true; 3494 } 3495 3496 if (N1.getOpcode() == ISD::AND) { 3497 if (!N1.getNode()->hasOneUse()) 3498 return SDValue(); 3499 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3500 if (!N11C || N11C->getZExtValue() != 0xFF) 3501 return SDValue(); 3502 N1 = N1.getOperand(0); 3503 LookPassAnd1 = true; 3504 } 3505 3506 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 3507 std::swap(N0, N1); 3508 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 3509 return SDValue(); 3510 if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse()) 3511 return SDValue(); 3512 3513 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3514 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3515 if (!N01C || !N11C) 3516 return SDValue(); 3517 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 3518 return SDValue(); 3519 3520 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 3521 SDValue N00 = N0->getOperand(0); 3522 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 3523 if (!N00.getNode()->hasOneUse()) 3524 return SDValue(); 3525 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 3526 if (!N001C || N001C->getZExtValue() != 0xFF) 3527 return SDValue(); 3528 N00 = N00.getOperand(0); 3529 LookPassAnd0 = true; 3530 } 3531 3532 SDValue N10 = N1->getOperand(0); 3533 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 3534 if (!N10.getNode()->hasOneUse()) 3535 return SDValue(); 3536 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 3537 if (!N101C || N101C->getZExtValue() != 0xFF00) 3538 return SDValue(); 3539 N10 = N10.getOperand(0); 3540 LookPassAnd1 = true; 3541 } 3542 3543 if (N00 != N10) 3544 return SDValue(); 3545 3546 // Make sure everything beyond the low halfword gets set to zero since the SRL 3547 // 16 will clear the top bits. 3548 unsigned OpSizeInBits = VT.getSizeInBits(); 3549 if (DemandHighBits && OpSizeInBits > 16) { 3550 // If the left-shift isn't masked out then the only way this is a bswap is 3551 // if all bits beyond the low 8 are 0. In that case the entire pattern 3552 // reduces to a left shift anyway: leave it for other parts of the combiner. 3553 if (!LookPassAnd0) 3554 return SDValue(); 3555 3556 // However, if the right shift isn't masked out then it might be because 3557 // it's not needed. See if we can spot that too. 3558 if (!LookPassAnd1 && 3559 !DAG.MaskedValueIsZero( 3560 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16))) 3561 return SDValue(); 3562 } 3563 3564 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00); 3565 if (OpSizeInBits > 16) { 3566 SDLoc DL(N); 3567 Res = DAG.getNode(ISD::SRL, DL, VT, Res, 3568 DAG.getConstant(OpSizeInBits - 16, DL, 3569 getShiftAmountTy(VT))); 3570 } 3571 return Res; 3572 } 3573 3574 /// Return true if the specified node is an element that makes up a 32-bit 3575 /// packed halfword byteswap. 3576 /// ((x & 0x000000ff) << 8) | 3577 /// ((x & 0x0000ff00) >> 8) | 3578 /// ((x & 0x00ff0000) << 8) | 3579 /// ((x & 0xff000000) >> 8) 3580 static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) { 3581 if (!N.getNode()->hasOneUse()) 3582 return false; 3583 3584 unsigned Opc = N.getOpcode(); 3585 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 3586 return false; 3587 3588 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3589 if (!N1C) 3590 return false; 3591 3592 unsigned Num; 3593 switch (N1C->getZExtValue()) { 3594 default: 3595 return false; 3596 case 0xFF: Num = 0; break; 3597 case 0xFF00: Num = 1; break; 3598 case 0xFF0000: Num = 2; break; 3599 case 0xFF000000: Num = 3; break; 3600 } 3601 3602 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 3603 SDValue N0 = N.getOperand(0); 3604 if (Opc == ISD::AND) { 3605 if (Num == 0 || Num == 2) { 3606 // (x >> 8) & 0xff 3607 // (x >> 8) & 0xff0000 3608 if (N0.getOpcode() != ISD::SRL) 3609 return false; 3610 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3611 if (!C || C->getZExtValue() != 8) 3612 return false; 3613 } else { 3614 // (x << 8) & 0xff00 3615 // (x << 8) & 0xff000000 3616 if (N0.getOpcode() != ISD::SHL) 3617 return false; 3618 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3619 if (!C || C->getZExtValue() != 8) 3620 return false; 3621 } 3622 } else if (Opc == ISD::SHL) { 3623 // (x & 0xff) << 8 3624 // (x & 0xff0000) << 8 3625 if (Num != 0 && Num != 2) 3626 return false; 3627 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3628 if (!C || C->getZExtValue() != 8) 3629 return false; 3630 } else { // Opc == ISD::SRL 3631 // (x & 0xff00) >> 8 3632 // (x & 0xff000000) >> 8 3633 if (Num != 1 && Num != 3) 3634 return false; 3635 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3636 if (!C || C->getZExtValue() != 8) 3637 return false; 3638 } 3639 3640 if (Parts[Num]) 3641 return false; 3642 3643 Parts[Num] = N0.getOperand(0).getNode(); 3644 return true; 3645 } 3646 3647 /// Match a 32-bit packed halfword bswap. That is 3648 /// ((x & 0x000000ff) << 8) | 3649 /// ((x & 0x0000ff00) >> 8) | 3650 /// ((x & 0x00ff0000) << 8) | 3651 /// ((x & 0xff000000) >> 8) 3652 /// => (rotl (bswap x), 16) 3653 SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 3654 if (!LegalOperations) 3655 return SDValue(); 3656 3657 EVT VT = N->getValueType(0); 3658 if (VT != MVT::i32) 3659 return SDValue(); 3660 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3661 return SDValue(); 3662 3663 // Look for either 3664 // (or (or (and), (and)), (or (and), (and))) 3665 // (or (or (or (and), (and)), (and)), (and)) 3666 if (N0.getOpcode() != ISD::OR) 3667 return SDValue(); 3668 SDValue N00 = N0.getOperand(0); 3669 SDValue N01 = N0.getOperand(1); 3670 SDNode *Parts[4] = {}; 3671 3672 if (N1.getOpcode() == ISD::OR && 3673 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) { 3674 // (or (or (and), (and)), (or (and), (and))) 3675 SDValue N000 = N00.getOperand(0); 3676 if (!isBSwapHWordElement(N000, Parts)) 3677 return SDValue(); 3678 3679 SDValue N001 = N00.getOperand(1); 3680 if (!isBSwapHWordElement(N001, Parts)) 3681 return SDValue(); 3682 SDValue N010 = N01.getOperand(0); 3683 if (!isBSwapHWordElement(N010, Parts)) 3684 return SDValue(); 3685 SDValue N011 = N01.getOperand(1); 3686 if (!isBSwapHWordElement(N011, Parts)) 3687 return SDValue(); 3688 } else { 3689 // (or (or (or (and), (and)), (and)), (and)) 3690 if (!isBSwapHWordElement(N1, Parts)) 3691 return SDValue(); 3692 if (!isBSwapHWordElement(N01, Parts)) 3693 return SDValue(); 3694 if (N00.getOpcode() != ISD::OR) 3695 return SDValue(); 3696 SDValue N000 = N00.getOperand(0); 3697 if (!isBSwapHWordElement(N000, Parts)) 3698 return SDValue(); 3699 SDValue N001 = N00.getOperand(1); 3700 if (!isBSwapHWordElement(N001, Parts)) 3701 return SDValue(); 3702 } 3703 3704 // Make sure the parts are all coming from the same node. 3705 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 3706 return SDValue(); 3707 3708 SDLoc DL(N); 3709 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, 3710 SDValue(Parts[0], 0)); 3711 3712 // Result of the bswap should be rotated by 16. If it's not legal, then 3713 // do (x << 16) | (x >> 16). 3714 SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT)); 3715 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 3716 return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt); 3717 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 3718 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt); 3719 return DAG.getNode(ISD::OR, DL, VT, 3720 DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt), 3721 DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt)); 3722 } 3723 3724 /// This contains all DAGCombine rules which reduce two values combined by 3725 /// an Or operation to a single value \see visitANDLike(). 3726 SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *LocReference) { 3727 EVT VT = N1.getValueType(); 3728 // fold (or x, undef) -> -1 3729 if (!LegalOperations && 3730 (N0.isUndef() || N1.isUndef())) { 3731 EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; 3732 return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), 3733 SDLoc(LocReference), VT); 3734 } 3735 // fold (or (setcc x), (setcc y)) -> (setcc (or x, y)) 3736 SDValue LL, LR, RL, RR, CC0, CC1; 3737 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 3738 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 3739 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 3740 3741 if (LR == RR && Op0 == Op1 && LL.getValueType().isInteger()) { 3742 // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0) 3743 // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0) 3744 if (isNullConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { 3745 EVT CCVT = getSetCCResultType(LR.getValueType()); 3746 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 3747 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR), 3748 LR.getValueType(), LL, RL); 3749 AddToWorklist(ORNode.getNode()); 3750 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 3751 } 3752 } 3753 // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1) 3754 // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1) 3755 if (isAllOnesConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { 3756 EVT CCVT = getSetCCResultType(LR.getValueType()); 3757 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 3758 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR), 3759 LR.getValueType(), LL, RL); 3760 AddToWorklist(ANDNode.getNode()); 3761 return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1); 3762 } 3763 } 3764 } 3765 // canonicalize equivalent to ll == rl 3766 if (LL == RR && LR == RL) { 3767 Op1 = ISD::getSetCCSwappedOperands(Op1); 3768 std::swap(RL, RR); 3769 } 3770 if (LL == RL && LR == RR) { 3771 bool isInteger = LL.getValueType().isInteger(); 3772 ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); 3773 if (Result != ISD::SETCC_INVALID && 3774 (!LegalOperations || 3775 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3776 TLI.isOperationLegal(ISD::SETCC, LL.getValueType())))) { 3777 EVT CCVT = getSetCCResultType(LL.getValueType()); 3778 if (N0.getValueType() == CCVT || 3779 (!LegalOperations && N0.getValueType() == MVT::i1)) 3780 return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(), 3781 LL, LR, Result); 3782 } 3783 } 3784 } 3785 3786 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 3787 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND && 3788 // Don't increase # computations. 3789 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3790 // We can only do this xform if we know that bits from X that are set in C2 3791 // but not in C1 are already zero. Likewise for Y. 3792 if (const ConstantSDNode *N0O1C = 3793 getAsNonOpaqueConstant(N0.getOperand(1))) { 3794 if (const ConstantSDNode *N1O1C = 3795 getAsNonOpaqueConstant(N1.getOperand(1))) { 3796 // We can only do this xform if we know that bits from X that are set in 3797 // C2 but not in C1 are already zero. Likewise for Y. 3798 const APInt &LHSMask = N0O1C->getAPIntValue(); 3799 const APInt &RHSMask = N1O1C->getAPIntValue(); 3800 3801 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 3802 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 3803 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 3804 N0.getOperand(0), N1.getOperand(0)); 3805 SDLoc DL(LocReference); 3806 return DAG.getNode(ISD::AND, DL, VT, X, 3807 DAG.getConstant(LHSMask | RHSMask, DL, VT)); 3808 } 3809 } 3810 } 3811 } 3812 3813 // (or (and X, M), (and X, N)) -> (and X, (or M, N)) 3814 if (N0.getOpcode() == ISD::AND && 3815 N1.getOpcode() == ISD::AND && 3816 N0.getOperand(0) == N1.getOperand(0) && 3817 // Don't increase # computations. 3818 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3819 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 3820 N0.getOperand(1), N1.getOperand(1)); 3821 return DAG.getNode(ISD::AND, SDLoc(LocReference), VT, N0.getOperand(0), X); 3822 } 3823 3824 return SDValue(); 3825 } 3826 3827 SDValue DAGCombiner::visitOR(SDNode *N) { 3828 SDValue N0 = N->getOperand(0); 3829 SDValue N1 = N->getOperand(1); 3830 EVT VT = N1.getValueType(); 3831 3832 // x | x --> x 3833 if (N0 == N1) 3834 return N0; 3835 3836 // fold vector ops 3837 if (VT.isVector()) { 3838 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3839 return FoldedVOp; 3840 3841 // fold (or x, 0) -> x, vector edition 3842 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3843 return N1; 3844 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3845 return N0; 3846 3847 // fold (or x, -1) -> -1, vector edition 3848 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3849 // do not return N0, because undef node may exist in N0 3850 return DAG.getConstant( 3851 APInt::getAllOnesValue(N0.getScalarValueSizeInBits()), SDLoc(N), 3852 N0.getValueType()); 3853 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3854 // do not return N1, because undef node may exist in N1 3855 return DAG.getConstant( 3856 APInt::getAllOnesValue(N1.getScalarValueSizeInBits()), SDLoc(N), 3857 N1.getValueType()); 3858 3859 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask) 3860 // Do this only if the resulting shuffle is legal. 3861 if (isa<ShuffleVectorSDNode>(N0) && 3862 isa<ShuffleVectorSDNode>(N1) && 3863 // Avoid folding a node with illegal type. 3864 TLI.isTypeLegal(VT)) { 3865 bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode()); 3866 bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()); 3867 bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 3868 bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode()); 3869 // Ensure both shuffles have a zero input. 3870 if ((ZeroN00 || ZeroN01) && (ZeroN10 || ZeroN11)) { 3871 assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!"); 3872 assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!"); 3873 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0); 3874 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1); 3875 bool CanFold = true; 3876 int NumElts = VT.getVectorNumElements(); 3877 SmallVector<int, 4> Mask(NumElts); 3878 3879 for (int i = 0; i != NumElts; ++i) { 3880 int M0 = SV0->getMaskElt(i); 3881 int M1 = SV1->getMaskElt(i); 3882 3883 // Determine if either index is pointing to a zero vector. 3884 bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts)); 3885 bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts)); 3886 3887 // If one element is zero and the otherside is undef, keep undef. 3888 // This also handles the case that both are undef. 3889 if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) { 3890 Mask[i] = -1; 3891 continue; 3892 } 3893 3894 // Make sure only one of the elements is zero. 3895 if (M0Zero == M1Zero) { 3896 CanFold = false; 3897 break; 3898 } 3899 3900 assert((M0 >= 0 || M1 >= 0) && "Undef index!"); 3901 3902 // We have a zero and non-zero element. If the non-zero came from 3903 // SV0 make the index a LHS index. If it came from SV1, make it 3904 // a RHS index. We need to mod by NumElts because we don't care 3905 // which operand it came from in the original shuffles. 3906 Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts; 3907 } 3908 3909 if (CanFold) { 3910 SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0); 3911 SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0); 3912 3913 bool LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 3914 if (!LegalMask) { 3915 std::swap(NewLHS, NewRHS); 3916 ShuffleVectorSDNode::commuteMask(Mask); 3917 LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 3918 } 3919 3920 if (LegalMask) 3921 return DAG.getVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS, Mask); 3922 } 3923 } 3924 } 3925 } 3926 3927 // fold (or c1, c2) -> c1|c2 3928 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3929 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3930 if (N0C && N1C && !N1C->isOpaque()) 3931 return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C); 3932 // canonicalize constant to RHS 3933 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3934 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3935 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0); 3936 // fold (or x, 0) -> x 3937 if (isNullConstant(N1)) 3938 return N0; 3939 // fold (or x, -1) -> -1 3940 if (isAllOnesConstant(N1)) 3941 return N1; 3942 // fold (or x, c) -> c iff (x & ~c) == 0 3943 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 3944 return N1; 3945 3946 if (SDValue Combined = visitORLike(N0, N1, N)) 3947 return Combined; 3948 3949 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 3950 if (SDValue BSwap = MatchBSwapHWord(N, N0, N1)) 3951 return BSwap; 3952 if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1)) 3953 return BSwap; 3954 3955 // reassociate or 3956 if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1)) 3957 return ROR; 3958 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 3959 // iff (c1 & c2) == 0. 3960 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3961 isa<ConstantSDNode>(N0.getOperand(1))) { 3962 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 3963 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) { 3964 if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, 3965 N1C, C1)) 3966 return DAG.getNode( 3967 ISD::AND, SDLoc(N), VT, 3968 DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1), COR); 3969 return SDValue(); 3970 } 3971 } 3972 // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 3973 if (N0.getOpcode() == N1.getOpcode()) 3974 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 3975 return Tmp; 3976 3977 // See if this is some rotate idiom. 3978 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N))) 3979 return SDValue(Rot, 0); 3980 3981 // Simplify the operands using demanded-bits information. 3982 if (!VT.isVector() && 3983 SimplifyDemandedBits(SDValue(N, 0))) 3984 return SDValue(N, 0); 3985 3986 return SDValue(); 3987 } 3988 3989 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 3990 bool DAGCombiner::MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 3991 if (Op.getOpcode() == ISD::AND) { 3992 if (DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) { 3993 Mask = Op.getOperand(1); 3994 Op = Op.getOperand(0); 3995 } else { 3996 return false; 3997 } 3998 } 3999 4000 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 4001 Shift = Op; 4002 return true; 4003 } 4004 4005 return false; 4006 } 4007 4008 // Return true if we can prove that, whenever Neg and Pos are both in the 4009 // range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that 4010 // for two opposing shifts shift1 and shift2 and a value X with OpBits bits: 4011 // 4012 // (or (shift1 X, Neg), (shift2 X, Pos)) 4013 // 4014 // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate 4015 // in direction shift1 by Neg. The range [0, EltSize) means that we only need 4016 // to consider shift amounts with defined behavior. 4017 static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize) { 4018 // If EltSize is a power of 2 then: 4019 // 4020 // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1) 4021 // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize). 4022 // 4023 // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check 4024 // for the stronger condition: 4025 // 4026 // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A] 4027 // 4028 // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1) 4029 // we can just replace Neg with Neg' for the rest of the function. 4030 // 4031 // In other cases we check for the even stronger condition: 4032 // 4033 // Neg == EltSize - Pos [B] 4034 // 4035 // for all Neg and Pos. Note that the (or ...) then invokes undefined 4036 // behavior if Pos == 0 (and consequently Neg == EltSize). 4037 // 4038 // We could actually use [A] whenever EltSize is a power of 2, but the 4039 // only extra cases that it would match are those uninteresting ones 4040 // where Neg and Pos are never in range at the same time. E.g. for 4041 // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos) 4042 // as well as (sub 32, Pos), but: 4043 // 4044 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos)) 4045 // 4046 // always invokes undefined behavior for 32-bit X. 4047 // 4048 // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise. 4049 unsigned MaskLoBits = 0; 4050 if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) { 4051 if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) { 4052 if (NegC->getAPIntValue() == EltSize - 1) { 4053 Neg = Neg.getOperand(0); 4054 MaskLoBits = Log2_64(EltSize); 4055 } 4056 } 4057 } 4058 4059 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1. 4060 if (Neg.getOpcode() != ISD::SUB) 4061 return false; 4062 ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0)); 4063 if (!NegC) 4064 return false; 4065 SDValue NegOp1 = Neg.getOperand(1); 4066 4067 // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with 4068 // Pos'. The truncation is redundant for the purpose of the equality. 4069 if (MaskLoBits && Pos.getOpcode() == ISD::AND) 4070 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4071 if (PosC->getAPIntValue() == EltSize - 1) 4072 Pos = Pos.getOperand(0); 4073 4074 // The condition we need is now: 4075 // 4076 // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask 4077 // 4078 // If NegOp1 == Pos then we need: 4079 // 4080 // EltSize & Mask == NegC & Mask 4081 // 4082 // (because "x & Mask" is a truncation and distributes through subtraction). 4083 APInt Width; 4084 if (Pos == NegOp1) 4085 Width = NegC->getAPIntValue(); 4086 4087 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC. 4088 // Then the condition we want to prove becomes: 4089 // 4090 // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask 4091 // 4092 // which, again because "x & Mask" is a truncation, becomes: 4093 // 4094 // NegC & Mask == (EltSize - PosC) & Mask 4095 // EltSize & Mask == (NegC + PosC) & Mask 4096 else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) { 4097 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4098 Width = PosC->getAPIntValue() + NegC->getAPIntValue(); 4099 else 4100 return false; 4101 } else 4102 return false; 4103 4104 // Now we just need to check that EltSize & Mask == Width & Mask. 4105 if (MaskLoBits) 4106 // EltSize & Mask is 0 since Mask is EltSize - 1. 4107 return Width.getLoBits(MaskLoBits) == 0; 4108 return Width == EltSize; 4109 } 4110 4111 // A subroutine of MatchRotate used once we have found an OR of two opposite 4112 // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces 4113 // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the 4114 // former being preferred if supported. InnerPos and InnerNeg are Pos and 4115 // Neg with outer conversions stripped away. 4116 SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos, 4117 SDValue Neg, SDValue InnerPos, 4118 SDValue InnerNeg, unsigned PosOpcode, 4119 unsigned NegOpcode, const SDLoc &DL) { 4120 // fold (or (shl x, (*ext y)), 4121 // (srl x, (*ext (sub 32, y)))) -> 4122 // (rotl x, y) or (rotr x, (sub 32, y)) 4123 // 4124 // fold (or (shl x, (*ext (sub 32, y))), 4125 // (srl x, (*ext y))) -> 4126 // (rotr x, y) or (rotl x, (sub 32, y)) 4127 EVT VT = Shifted.getValueType(); 4128 if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits())) { 4129 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT); 4130 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted, 4131 HasPos ? Pos : Neg).getNode(); 4132 } 4133 4134 return nullptr; 4135 } 4136 4137 // MatchRotate - Handle an 'or' of two operands. If this is one of the many 4138 // idioms for rotate, and if the target supports rotation instructions, generate 4139 // a rot[lr]. 4140 SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) { 4141 // Must be a legal type. Expanded 'n promoted things won't work with rotates. 4142 EVT VT = LHS.getValueType(); 4143 if (!TLI.isTypeLegal(VT)) return nullptr; 4144 4145 // The target must have at least one rotate flavor. 4146 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 4147 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 4148 if (!HasROTL && !HasROTR) return nullptr; 4149 4150 // Match "(X shl/srl V1) & V2" where V2 may not be present. 4151 SDValue LHSShift; // The shift. 4152 SDValue LHSMask; // AND value if any. 4153 if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 4154 return nullptr; // Not part of a rotate. 4155 4156 SDValue RHSShift; // The shift. 4157 SDValue RHSMask; // AND value if any. 4158 if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 4159 return nullptr; // Not part of a rotate. 4160 4161 if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 4162 return nullptr; // Not shifting the same value. 4163 4164 if (LHSShift.getOpcode() == RHSShift.getOpcode()) 4165 return nullptr; // Shifts must disagree. 4166 4167 // Canonicalize shl to left side in a shl/srl pair. 4168 if (RHSShift.getOpcode() == ISD::SHL) { 4169 std::swap(LHS, RHS); 4170 std::swap(LHSShift, RHSShift); 4171 std::swap(LHSMask, RHSMask); 4172 } 4173 4174 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 4175 SDValue LHSShiftArg = LHSShift.getOperand(0); 4176 SDValue LHSShiftAmt = LHSShift.getOperand(1); 4177 SDValue RHSShiftArg = RHSShift.getOperand(0); 4178 SDValue RHSShiftAmt = RHSShift.getOperand(1); 4179 4180 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 4181 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 4182 if (isConstOrConstSplat(LHSShiftAmt) && isConstOrConstSplat(RHSShiftAmt)) { 4183 uint64_t LShVal = isConstOrConstSplat(LHSShiftAmt)->getZExtValue(); 4184 uint64_t RShVal = isConstOrConstSplat(RHSShiftAmt)->getZExtValue(); 4185 if ((LShVal + RShVal) != EltSizeInBits) 4186 return nullptr; 4187 4188 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 4189 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); 4190 4191 // If there is an AND of either shifted operand, apply it to the result. 4192 if (LHSMask.getNode() || RHSMask.getNode()) { 4193 APInt AllBits = APInt::getAllOnesValue(EltSizeInBits); 4194 SDValue Mask = DAG.getConstant(AllBits, DL, VT); 4195 4196 if (LHSMask.getNode()) { 4197 APInt RHSBits = APInt::getLowBitsSet(EltSizeInBits, LShVal); 4198 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4199 DAG.getNode(ISD::OR, DL, VT, LHSMask, 4200 DAG.getConstant(RHSBits, DL, VT))); 4201 } 4202 if (RHSMask.getNode()) { 4203 APInt LHSBits = APInt::getHighBitsSet(EltSizeInBits, RShVal); 4204 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4205 DAG.getNode(ISD::OR, DL, VT, RHSMask, 4206 DAG.getConstant(LHSBits, DL, VT))); 4207 } 4208 4209 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask); 4210 } 4211 4212 return Rot.getNode(); 4213 } 4214 4215 // If there is a mask here, and we have a variable shift, we can't be sure 4216 // that we're masking out the right stuff. 4217 if (LHSMask.getNode() || RHSMask.getNode()) 4218 return nullptr; 4219 4220 // If the shift amount is sign/zext/any-extended just peel it off. 4221 SDValue LExtOp0 = LHSShiftAmt; 4222 SDValue RExtOp0 = RHSShiftAmt; 4223 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4224 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4225 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4226 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 4227 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4228 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4229 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4230 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 4231 LExtOp0 = LHSShiftAmt.getOperand(0); 4232 RExtOp0 = RHSShiftAmt.getOperand(0); 4233 } 4234 4235 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, 4236 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL); 4237 if (TryL) 4238 return TryL; 4239 4240 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, 4241 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL); 4242 if (TryR) 4243 return TryR; 4244 4245 return nullptr; 4246 } 4247 4248 namespace { 4249 /// Helper struct to parse and store a memory address as base + index + offset. 4250 /// We ignore sign extensions when it is safe to do so. 4251 /// The following two expressions are not equivalent. To differentiate we need 4252 /// to store whether there was a sign extension involved in the index 4253 /// computation. 4254 /// (load (i64 add (i64 copyfromreg %c) 4255 /// (i64 signextend (add (i8 load %index) 4256 /// (i8 1)))) 4257 /// vs 4258 /// 4259 /// (load (i64 add (i64 copyfromreg %c) 4260 /// (i64 signextend (i32 add (i32 signextend (i8 load %index)) 4261 /// (i32 1))))) 4262 struct BaseIndexOffset { 4263 SDValue Base; 4264 SDValue Index; 4265 int64_t Offset; 4266 bool IsIndexSignExt; 4267 4268 BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} 4269 4270 BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, 4271 bool IsIndexSignExt) : 4272 Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} 4273 4274 bool equalBaseIndex(const BaseIndexOffset &Other) { 4275 return Other.Base == Base && Other.Index == Index && 4276 Other.IsIndexSignExt == IsIndexSignExt; 4277 } 4278 4279 /// Parses tree in Ptr for base, index, offset addresses. 4280 static BaseIndexOffset match(SDValue Ptr, SelectionDAG &DAG, 4281 int64_t PartialOffset = 0) { 4282 bool IsIndexSignExt = false; 4283 4284 // Split up a folded GlobalAddress+Offset into its component parts. 4285 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ptr)) 4286 if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { 4287 return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), 4288 SDLoc(GA), 4289 GA->getValueType(0), 4290 /*Offset=*/PartialOffset, 4291 /*isTargetGA=*/false, 4292 GA->getTargetFlags()), 4293 SDValue(), 4294 GA->getOffset(), 4295 IsIndexSignExt); 4296 } 4297 4298 // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD 4299 // instruction, then it could be just the BASE or everything else we don't 4300 // know how to handle. Just use Ptr as BASE and give up. 4301 if (Ptr->getOpcode() != ISD::ADD) 4302 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4303 4304 // We know that we have at least an ADD instruction. Try to pattern match 4305 // the simple case of BASE + OFFSET. 4306 if (isa<ConstantSDNode>(Ptr->getOperand(1))) { 4307 int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); 4308 return match(Ptr->getOperand(0), DAG, Offset + PartialOffset); 4309 } 4310 4311 // Inside a loop the current BASE pointer is calculated using an ADD and a 4312 // MUL instruction. In this case Ptr is the actual BASE pointer. 4313 // (i64 add (i64 %array_ptr) 4314 // (i64 mul (i64 %induction_var) 4315 // (i64 %element_size))) 4316 if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) 4317 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4318 4319 // Look at Base + Index + Offset cases. 4320 SDValue Base = Ptr->getOperand(0); 4321 SDValue IndexOffset = Ptr->getOperand(1); 4322 4323 // Skip signextends. 4324 if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { 4325 IndexOffset = IndexOffset->getOperand(0); 4326 IsIndexSignExt = true; 4327 } 4328 4329 // Either the case of Base + Index (no offset) or something else. 4330 if (IndexOffset->getOpcode() != ISD::ADD) 4331 return BaseIndexOffset(Base, IndexOffset, PartialOffset, IsIndexSignExt); 4332 4333 // Now we have the case of Base + Index + offset. 4334 SDValue Index = IndexOffset->getOperand(0); 4335 SDValue Offset = IndexOffset->getOperand(1); 4336 4337 if (!isa<ConstantSDNode>(Offset)) 4338 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4339 4340 // Ignore signextends. 4341 if (Index->getOpcode() == ISD::SIGN_EXTEND) { 4342 Index = Index->getOperand(0); 4343 IsIndexSignExt = true; 4344 } else IsIndexSignExt = false; 4345 4346 int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); 4347 return BaseIndexOffset(Base, Index, Off + PartialOffset, IsIndexSignExt); 4348 } 4349 }; 4350 } // namespace 4351 4352 SDValue DAGCombiner::visitXOR(SDNode *N) { 4353 SDValue N0 = N->getOperand(0); 4354 SDValue N1 = N->getOperand(1); 4355 EVT VT = N0.getValueType(); 4356 4357 // fold vector ops 4358 if (VT.isVector()) { 4359 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4360 return FoldedVOp; 4361 4362 // fold (xor x, 0) -> x, vector edition 4363 if (ISD::isBuildVectorAllZeros(N0.getNode())) 4364 return N1; 4365 if (ISD::isBuildVectorAllZeros(N1.getNode())) 4366 return N0; 4367 } 4368 4369 // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 4370 if (N0.isUndef() && N1.isUndef()) 4371 return DAG.getConstant(0, SDLoc(N), VT); 4372 // fold (xor x, undef) -> undef 4373 if (N0.isUndef()) 4374 return N0; 4375 if (N1.isUndef()) 4376 return N1; 4377 // fold (xor c1, c2) -> c1^c2 4378 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4379 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 4380 if (N0C && N1C) 4381 return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C); 4382 // canonicalize constant to RHS 4383 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 4384 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 4385 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0); 4386 // fold (xor x, 0) -> x 4387 if (isNullConstant(N1)) 4388 return N0; 4389 // reassociate xor 4390 if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1)) 4391 return RXOR; 4392 4393 // fold !(x cc y) -> (x !cc y) 4394 SDValue LHS, RHS, CC; 4395 if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) { 4396 bool isInt = LHS.getValueType().isInteger(); 4397 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 4398 isInt); 4399 4400 if (!LegalOperations || 4401 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { 4402 switch (N0.getOpcode()) { 4403 default: 4404 llvm_unreachable("Unhandled SetCC Equivalent!"); 4405 case ISD::SETCC: 4406 return DAG.getSetCC(SDLoc(N), VT, LHS, RHS, NotCC); 4407 case ISD::SELECT_CC: 4408 return DAG.getSelectCC(SDLoc(N), LHS, RHS, N0.getOperand(2), 4409 N0.getOperand(3), NotCC); 4410 } 4411 } 4412 } 4413 4414 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 4415 if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND && 4416 N0.getNode()->hasOneUse() && 4417 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 4418 SDValue V = N0.getOperand(0); 4419 SDLoc DL(N0); 4420 V = DAG.getNode(ISD::XOR, DL, V.getValueType(), V, 4421 DAG.getConstant(1, DL, V.getValueType())); 4422 AddToWorklist(V.getNode()); 4423 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V); 4424 } 4425 4426 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 4427 if (isOneConstant(N1) && VT == MVT::i1 && 4428 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4429 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4430 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 4431 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4432 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4433 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4434 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4435 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4436 } 4437 } 4438 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 4439 if (isAllOnesConstant(N1) && 4440 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4441 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4442 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 4443 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4444 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4445 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4446 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4447 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4448 } 4449 } 4450 // fold (xor (and x, y), y) -> (and (not x), y) 4451 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 4452 N0->getOperand(1) == N1) { 4453 SDValue X = N0->getOperand(0); 4454 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT); 4455 AddToWorklist(NotX.getNode()); 4456 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1); 4457 } 4458 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 4459 if (N1C && N0.getOpcode() == ISD::XOR) { 4460 if (const ConstantSDNode *N00C = getAsNonOpaqueConstant(N0.getOperand(0))) { 4461 SDLoc DL(N); 4462 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), 4463 DAG.getConstant(N1C->getAPIntValue() ^ 4464 N00C->getAPIntValue(), DL, VT)); 4465 } 4466 if (const ConstantSDNode *N01C = getAsNonOpaqueConstant(N0.getOperand(1))) { 4467 SDLoc DL(N); 4468 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0), 4469 DAG.getConstant(N1C->getAPIntValue() ^ 4470 N01C->getAPIntValue(), DL, VT)); 4471 } 4472 } 4473 // fold (xor x, x) -> 0 4474 if (N0 == N1) 4475 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes); 4476 4477 // fold (xor (shl 1, x), -1) -> (rotl ~1, x) 4478 // Here is a concrete example of this equivalence: 4479 // i16 x == 14 4480 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000 4481 // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111 4482 // 4483 // => 4484 // 4485 // i16 ~1 == 0b1111111111111110 4486 // i16 rol(~1, 14) == 0b1011111111111111 4487 // 4488 // Some additional tips to help conceptualize this transform: 4489 // - Try to see the operation as placing a single zero in a value of all ones. 4490 // - There exists no value for x which would allow the result to contain zero. 4491 // - Values of x larger than the bitwidth are undefined and do not require a 4492 // consistent result. 4493 // - Pushing the zero left requires shifting one bits in from the right. 4494 // A rotate left of ~1 is a nice way of achieving the desired result. 4495 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL 4496 && isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) { 4497 SDLoc DL(N); 4498 return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT), 4499 N0.getOperand(1)); 4500 } 4501 4502 // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 4503 if (N0.getOpcode() == N1.getOpcode()) 4504 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 4505 return Tmp; 4506 4507 // Simplify the expression using non-local knowledge. 4508 if (!VT.isVector() && 4509 SimplifyDemandedBits(SDValue(N, 0))) 4510 return SDValue(N, 0); 4511 4512 return SDValue(); 4513 } 4514 4515 /// Handle transforms common to the three shifts, when the shift amount is a 4516 /// constant. 4517 SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) { 4518 SDNode *LHS = N->getOperand(0).getNode(); 4519 if (!LHS->hasOneUse()) return SDValue(); 4520 4521 // We want to pull some binops through shifts, so that we have (and (shift)) 4522 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 4523 // thing happens with address calculations, so it's important to canonicalize 4524 // it. 4525 bool HighBitSet = false; // Can we transform this if the high bit is set? 4526 4527 switch (LHS->getOpcode()) { 4528 default: return SDValue(); 4529 case ISD::OR: 4530 case ISD::XOR: 4531 HighBitSet = false; // We can only transform sra if the high bit is clear. 4532 break; 4533 case ISD::AND: 4534 HighBitSet = true; // We can only transform sra if the high bit is set. 4535 break; 4536 case ISD::ADD: 4537 if (N->getOpcode() != ISD::SHL) 4538 return SDValue(); // only shl(add) not sr[al](add). 4539 HighBitSet = false; // We can only transform sra if the high bit is clear. 4540 break; 4541 } 4542 4543 // We require the RHS of the binop to be a constant and not opaque as well. 4544 ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1)); 4545 if (!BinOpCst) return SDValue(); 4546 4547 // FIXME: disable this unless the input to the binop is a shift by a constant 4548 // or is copy/select.Enable this in other cases when figure out it's exactly profitable. 4549 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 4550 bool isShift = BinOpLHSVal->getOpcode() == ISD::SHL || 4551 BinOpLHSVal->getOpcode() == ISD::SRA || 4552 BinOpLHSVal->getOpcode() == ISD::SRL; 4553 bool isCopyOrSelect = BinOpLHSVal->getOpcode() == ISD::CopyFromReg || 4554 BinOpLHSVal->getOpcode() == ISD::SELECT; 4555 4556 if ((!isShift || !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) && 4557 !isCopyOrSelect) 4558 return SDValue(); 4559 4560 if (isCopyOrSelect && N->hasOneUse()) 4561 return SDValue(); 4562 4563 EVT VT = N->getValueType(0); 4564 4565 // If this is a signed shift right, and the high bit is modified by the 4566 // logical operation, do not perform the transformation. The highBitSet 4567 // boolean indicates the value of the high bit of the constant which would 4568 // cause it to be modified for this operation. 4569 if (N->getOpcode() == ISD::SRA) { 4570 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 4571 if (BinOpRHSSignSet != HighBitSet) 4572 return SDValue(); 4573 } 4574 4575 if (!TLI.isDesirableToCommuteWithShift(LHS)) 4576 return SDValue(); 4577 4578 // Fold the constants, shifting the binop RHS by the shift amount. 4579 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)), 4580 N->getValueType(0), 4581 LHS->getOperand(1), N->getOperand(1)); 4582 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!"); 4583 4584 // Create the new shift. 4585 SDValue NewShift = DAG.getNode(N->getOpcode(), 4586 SDLoc(LHS->getOperand(0)), 4587 VT, LHS->getOperand(0), N->getOperand(1)); 4588 4589 // Create the new binop. 4590 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS); 4591 } 4592 4593 SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { 4594 assert(N->getOpcode() == ISD::TRUNCATE); 4595 assert(N->getOperand(0).getOpcode() == ISD::AND); 4596 4597 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) 4598 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) { 4599 SDValue N01 = N->getOperand(0).getOperand(1); 4600 if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) { 4601 SDLoc DL(N); 4602 EVT TruncVT = N->getValueType(0); 4603 SDValue N00 = N->getOperand(0).getOperand(0); 4604 SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00); 4605 SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01); 4606 AddToWorklist(Trunc00.getNode()); 4607 AddToWorklist(Trunc01.getNode()); 4608 return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01); 4609 } 4610 } 4611 4612 return SDValue(); 4613 } 4614 4615 SDValue DAGCombiner::visitRotate(SDNode *N) { 4616 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))). 4617 if (N->getOperand(1).getOpcode() == ISD::TRUNCATE && 4618 N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) { 4619 if (SDValue NewOp1 = 4620 distributeTruncateThroughAnd(N->getOperand(1).getNode())) 4621 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), 4622 N->getOperand(0), NewOp1); 4623 } 4624 return SDValue(); 4625 } 4626 4627 SDValue DAGCombiner::visitSHL(SDNode *N) { 4628 SDValue N0 = N->getOperand(0); 4629 SDValue N1 = N->getOperand(1); 4630 EVT VT = N0.getValueType(); 4631 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 4632 4633 // fold vector ops 4634 if (VT.isVector()) { 4635 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4636 return FoldedVOp; 4637 4638 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1); 4639 // If setcc produces all-one true value then: 4640 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV) 4641 if (N1CV && N1CV->isConstant()) { 4642 if (N0.getOpcode() == ISD::AND) { 4643 SDValue N00 = N0->getOperand(0); 4644 SDValue N01 = N0->getOperand(1); 4645 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01); 4646 4647 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && 4648 TLI.getBooleanContents(N00.getOperand(0).getValueType()) == 4649 TargetLowering::ZeroOrNegativeOneBooleanContent) { 4650 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, 4651 N01CV, N1CV)) 4652 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C); 4653 } 4654 } 4655 } 4656 } 4657 4658 ConstantSDNode *N1C = isConstOrConstSplat(N1); 4659 4660 // fold (shl c1, c2) -> c1<<c2 4661 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4662 if (N0C && N1C && !N1C->isOpaque()) 4663 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C); 4664 // fold (shl 0, x) -> 0 4665 if (isNullConstant(N0)) 4666 return N0; 4667 // fold (shl x, c >= size(x)) -> undef 4668 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 4669 return DAG.getUNDEF(VT); 4670 // fold (shl x, 0) -> x 4671 if (N1C && N1C->isNullValue()) 4672 return N0; 4673 // fold (shl undef, x) -> 0 4674 if (N0.isUndef()) 4675 return DAG.getConstant(0, SDLoc(N), VT); 4676 // if (shl x, c) is known to be zero, return 0 4677 if (DAG.MaskedValueIsZero(SDValue(N, 0), 4678 APInt::getAllOnesValue(OpSizeInBits))) 4679 return DAG.getConstant(0, SDLoc(N), VT); 4680 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 4681 if (N1.getOpcode() == ISD::TRUNCATE && 4682 N1.getOperand(0).getOpcode() == ISD::AND) { 4683 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 4684 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1); 4685 } 4686 4687 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4688 return SDValue(N, 0); 4689 4690 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 4691 if (N1C && N0.getOpcode() == ISD::SHL) { 4692 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4693 SDLoc DL(N); 4694 APInt c1 = N0C1->getAPIntValue(); 4695 APInt c2 = N1C->getAPIntValue(); 4696 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 4697 4698 APInt Sum = c1 + c2; 4699 if (Sum.uge(OpSizeInBits)) 4700 return DAG.getConstant(0, DL, VT); 4701 4702 return DAG.getNode( 4703 ISD::SHL, DL, VT, N0.getOperand(0), 4704 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 4705 } 4706 } 4707 4708 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 4709 // For this to be valid, the second form must not preserve any of the bits 4710 // that are shifted out by the inner shift in the first form. This means 4711 // the outer shift size must be >= the number of bits added by the ext. 4712 // As a corollary, we don't care what kind of ext it is. 4713 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 4714 N0.getOpcode() == ISD::ANY_EXTEND || 4715 N0.getOpcode() == ISD::SIGN_EXTEND) && 4716 N0.getOperand(0).getOpcode() == ISD::SHL) { 4717 SDValue N0Op0 = N0.getOperand(0); 4718 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 4719 APInt c1 = N0Op0C1->getAPIntValue(); 4720 APInt c2 = N1C->getAPIntValue(); 4721 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 4722 4723 EVT InnerShiftVT = N0Op0.getValueType(); 4724 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 4725 if (c2.uge(OpSizeInBits - InnerShiftSize)) { 4726 SDLoc DL(N0); 4727 APInt Sum = c1 + c2; 4728 if (Sum.uge(OpSizeInBits)) 4729 return DAG.getConstant(0, DL, VT); 4730 4731 return DAG.getNode( 4732 ISD::SHL, DL, VT, 4733 DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)), 4734 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 4735 } 4736 } 4737 } 4738 4739 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) 4740 // Only fold this if the inner zext has no other uses to avoid increasing 4741 // the total number of instructions. 4742 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && 4743 N0.getOperand(0).getOpcode() == ISD::SRL) { 4744 SDValue N0Op0 = N0.getOperand(0); 4745 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 4746 if (N0Op0C1->getAPIntValue().ult(VT.getScalarSizeInBits())) { 4747 uint64_t c1 = N0Op0C1->getZExtValue(); 4748 uint64_t c2 = N1C->getZExtValue(); 4749 if (c1 == c2) { 4750 SDValue NewOp0 = N0.getOperand(0); 4751 EVT CountVT = NewOp0.getOperand(1).getValueType(); 4752 SDLoc DL(N); 4753 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, NewOp0.getValueType(), 4754 NewOp0, 4755 DAG.getConstant(c2, DL, CountVT)); 4756 AddToWorklist(NewSHL.getNode()); 4757 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL); 4758 } 4759 } 4760 } 4761 } 4762 4763 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2 4764 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2 4765 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) && 4766 cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) { 4767 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4768 uint64_t C1 = N0C1->getZExtValue(); 4769 uint64_t C2 = N1C->getZExtValue(); 4770 SDLoc DL(N); 4771 if (C1 <= C2) 4772 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 4773 DAG.getConstant(C2 - C1, DL, N1.getValueType())); 4774 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0), 4775 DAG.getConstant(C1 - C2, DL, N1.getValueType())); 4776 } 4777 } 4778 4779 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 4780 // (and (srl x, (sub c1, c2), MASK) 4781 // Only fold this if the inner shift has no other uses -- if it does, folding 4782 // this will increase the total number of instructions. 4783 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 4784 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4785 uint64_t c1 = N0C1->getZExtValue(); 4786 if (c1 < OpSizeInBits) { 4787 uint64_t c2 = N1C->getZExtValue(); 4788 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); 4789 SDValue Shift; 4790 if (c2 > c1) { 4791 Mask = Mask.shl(c2 - c1); 4792 SDLoc DL(N); 4793 Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 4794 DAG.getConstant(c2 - c1, DL, N1.getValueType())); 4795 } else { 4796 Mask = Mask.lshr(c1 - c2); 4797 SDLoc DL(N); 4798 Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 4799 DAG.getConstant(c1 - c2, DL, N1.getValueType())); 4800 } 4801 SDLoc DL(N0); 4802 return DAG.getNode(ISD::AND, DL, VT, Shift, 4803 DAG.getConstant(Mask, DL, VT)); 4804 } 4805 } 4806 } 4807 4808 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 4809 if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) && 4810 isConstantOrConstantVector(N1, /* No Opaques */ true)) { 4811 unsigned BitSize = VT.getScalarSizeInBits(); 4812 SDLoc DL(N); 4813 SDValue AllBits = DAG.getConstant(APInt::getAllOnesValue(BitSize), DL, VT); 4814 SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1); 4815 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask); 4816 } 4817 4818 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 4819 // Variant of version done on multiply, except mul by a power of 2 is turned 4820 // into a shift. 4821 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 4822 isConstantOrConstantVector(N1, /* No Opaques */ true) && 4823 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 4824 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1); 4825 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 4826 AddToWorklist(Shl0.getNode()); 4827 AddToWorklist(Shl1.getNode()); 4828 return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1); 4829 } 4830 4831 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2) 4832 if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() && 4833 isConstantOrConstantVector(N1, /* No Opaques */ true) && 4834 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 4835 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 4836 if (isConstantOrConstantVector(Shl)) 4837 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl); 4838 } 4839 4840 if (N1C && !N1C->isOpaque()) 4841 if (SDValue NewSHL = visitShiftByConstant(N, N1C)) 4842 return NewSHL; 4843 4844 return SDValue(); 4845 } 4846 4847 SDValue DAGCombiner::visitSRA(SDNode *N) { 4848 SDValue N0 = N->getOperand(0); 4849 SDValue N1 = N->getOperand(1); 4850 EVT VT = N0.getValueType(); 4851 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 4852 4853 // Arithmetic shifting an all-sign-bit value is a no-op. 4854 if (DAG.ComputeNumSignBits(N0) == OpSizeInBits) 4855 return N0; 4856 4857 // fold vector ops 4858 if (VT.isVector()) 4859 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4860 return FoldedVOp; 4861 4862 ConstantSDNode *N1C = isConstOrConstSplat(N1); 4863 4864 // fold (sra c1, c2) -> (sra c1, c2) 4865 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4866 if (N0C && N1C && !N1C->isOpaque()) 4867 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C); 4868 // fold (sra 0, x) -> 0 4869 if (isNullConstant(N0)) 4870 return N0; 4871 // fold (sra -1, x) -> -1 4872 if (isAllOnesConstant(N0)) 4873 return N0; 4874 // fold (sra x, c >= size(x)) -> undef 4875 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 4876 return DAG.getUNDEF(VT); 4877 // fold (sra x, 0) -> x 4878 if (N1C && N1C->isNullValue()) 4879 return N0; 4880 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 4881 // sext_inreg. 4882 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 4883 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 4884 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 4885 if (VT.isVector()) 4886 ExtVT = EVT::getVectorVT(*DAG.getContext(), 4887 ExtVT, VT.getVectorNumElements()); 4888 if ((!LegalOperations || 4889 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 4890 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 4891 N0.getOperand(0), DAG.getValueType(ExtVT)); 4892 } 4893 4894 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 4895 if (N1C && N0.getOpcode() == ISD::SRA) { 4896 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4897 SDLoc DL(N); 4898 APInt c1 = N0C1->getAPIntValue(); 4899 APInt c2 = N1C->getAPIntValue(); 4900 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 4901 4902 APInt Sum = c1 + c2; 4903 if (Sum.uge(OpSizeInBits)) 4904 Sum = APInt(OpSizeInBits, OpSizeInBits - 1); 4905 4906 return DAG.getNode( 4907 ISD::SRA, DL, VT, N0.getOperand(0), 4908 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 4909 } 4910 } 4911 4912 // fold (sra (shl X, m), (sub result_size, n)) 4913 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 4914 // result_size - n != m. 4915 // If truncate is free for the target sext(shl) is likely to result in better 4916 // code. 4917 if (N0.getOpcode() == ISD::SHL && N1C) { 4918 // Get the two constanst of the shifts, CN0 = m, CN = n. 4919 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1)); 4920 if (N01C) { 4921 LLVMContext &Ctx = *DAG.getContext(); 4922 // Determine what the truncate's result bitsize and type would be. 4923 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()); 4924 4925 if (VT.isVector()) 4926 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements()); 4927 4928 // Determine the residual right-shift amount. 4929 int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 4930 4931 // If the shift is not a no-op (in which case this should be just a sign 4932 // extend already), the truncated to type is legal, sign_extend is legal 4933 // on that type, and the truncate to that type is both legal and free, 4934 // perform the transform. 4935 if ((ShiftAmt > 0) && 4936 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 4937 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 4938 TLI.isTruncateFree(VT, TruncVT)) { 4939 4940 SDLoc DL(N); 4941 SDValue Amt = DAG.getConstant(ShiftAmt, DL, 4942 getShiftAmountTy(N0.getOperand(0).getValueType())); 4943 SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, 4944 N0.getOperand(0), Amt); 4945 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, 4946 Shift); 4947 return DAG.getNode(ISD::SIGN_EXTEND, DL, 4948 N->getValueType(0), Trunc); 4949 } 4950 } 4951 } 4952 4953 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 4954 if (N1.getOpcode() == ISD::TRUNCATE && 4955 N1.getOperand(0).getOpcode() == ISD::AND) { 4956 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 4957 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1); 4958 } 4959 4960 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) 4961 // if c1 is equal to the number of bits the trunc removes 4962 if (N0.getOpcode() == ISD::TRUNCATE && 4963 (N0.getOperand(0).getOpcode() == ISD::SRL || 4964 N0.getOperand(0).getOpcode() == ISD::SRA) && 4965 N0.getOperand(0).hasOneUse() && 4966 N0.getOperand(0).getOperand(1).hasOneUse() && 4967 N1C) { 4968 SDValue N0Op0 = N0.getOperand(0); 4969 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) { 4970 unsigned LargeShiftVal = LargeShift->getZExtValue(); 4971 EVT LargeVT = N0Op0.getValueType(); 4972 4973 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) { 4974 SDLoc DL(N); 4975 SDValue Amt = 4976 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), DL, 4977 getShiftAmountTy(N0Op0.getOperand(0).getValueType())); 4978 SDValue SRA = DAG.getNode(ISD::SRA, DL, LargeVT, 4979 N0Op0.getOperand(0), Amt); 4980 return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA); 4981 } 4982 } 4983 } 4984 4985 // Simplify, based on bits shifted out of the LHS. 4986 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4987 return SDValue(N, 0); 4988 4989 4990 // If the sign bit is known to be zero, switch this to a SRL. 4991 if (DAG.SignBitIsZero(N0)) 4992 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1); 4993 4994 if (N1C && !N1C->isOpaque()) 4995 if (SDValue NewSRA = visitShiftByConstant(N, N1C)) 4996 return NewSRA; 4997 4998 return SDValue(); 4999 } 5000 5001 SDValue DAGCombiner::visitSRL(SDNode *N) { 5002 SDValue N0 = N->getOperand(0); 5003 SDValue N1 = N->getOperand(1); 5004 EVT VT = N0.getValueType(); 5005 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5006 5007 // fold vector ops 5008 if (VT.isVector()) 5009 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5010 return FoldedVOp; 5011 5012 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5013 5014 // fold (srl c1, c2) -> c1 >>u c2 5015 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5016 if (N0C && N1C && !N1C->isOpaque()) 5017 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C); 5018 // fold (srl 0, x) -> 0 5019 if (isNullConstant(N0)) 5020 return N0; 5021 // fold (srl x, c >= size(x)) -> undef 5022 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5023 return DAG.getUNDEF(VT); 5024 // fold (srl x, 0) -> x 5025 if (N1C && N1C->isNullValue()) 5026 return N0; 5027 // if (srl x, c) is known to be zero, return 0 5028 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 5029 APInt::getAllOnesValue(OpSizeInBits))) 5030 return DAG.getConstant(0, SDLoc(N), VT); 5031 5032 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 5033 if (N1C && N0.getOpcode() == ISD::SRL) { 5034 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5035 SDLoc DL(N); 5036 APInt c1 = N0C1->getAPIntValue(); 5037 APInt c2 = N1C->getAPIntValue(); 5038 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5039 5040 APInt Sum = c1 + c2; 5041 if (Sum.uge(OpSizeInBits)) 5042 return DAG.getConstant(0, DL, VT); 5043 5044 return DAG.getNode( 5045 ISD::SRL, DL, VT, N0.getOperand(0), 5046 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5047 } 5048 } 5049 5050 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 5051 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 5052 N0.getOperand(0).getOpcode() == ISD::SRL && 5053 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 5054 uint64_t c1 = 5055 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 5056 uint64_t c2 = N1C->getZExtValue(); 5057 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 5058 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 5059 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 5060 // This is only valid if the OpSizeInBits + c1 = size of inner shift. 5061 if (c1 + OpSizeInBits == InnerShiftSize) { 5062 SDLoc DL(N0); 5063 if (c1 + c2 >= InnerShiftSize) 5064 return DAG.getConstant(0, DL, VT); 5065 return DAG.getNode(ISD::TRUNCATE, DL, VT, 5066 DAG.getNode(ISD::SRL, DL, InnerShiftVT, 5067 N0.getOperand(0)->getOperand(0), 5068 DAG.getConstant(c1 + c2, DL, 5069 ShiftCountVT))); 5070 } 5071 } 5072 5073 // fold (srl (shl x, c), c) -> (and x, cst2) 5074 if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && 5075 isConstantOrConstantVector(N1, /* NoOpaques */ true)) { 5076 SDLoc DL(N); 5077 APInt AllBits = APInt::getAllOnesValue(N0.getScalarValueSizeInBits()); 5078 SDValue Mask = 5079 DAG.getNode(ISD::SRL, DL, VT, DAG.getConstant(AllBits, DL, VT), N1); 5080 AddToWorklist(Mask.getNode()); 5081 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask); 5082 } 5083 5084 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) 5085 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 5086 // Shifting in all undef bits? 5087 EVT SmallVT = N0.getOperand(0).getValueType(); 5088 unsigned BitSize = SmallVT.getScalarSizeInBits(); 5089 if (N1C->getZExtValue() >= BitSize) 5090 return DAG.getUNDEF(VT); 5091 5092 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 5093 uint64_t ShiftAmt = N1C->getZExtValue(); 5094 SDLoc DL0(N0); 5095 SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT, 5096 N0.getOperand(0), 5097 DAG.getConstant(ShiftAmt, DL0, 5098 getShiftAmountTy(SmallVT))); 5099 AddToWorklist(SmallShift.getNode()); 5100 APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); 5101 SDLoc DL(N); 5102 return DAG.getNode(ISD::AND, DL, VT, 5103 DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), 5104 DAG.getConstant(Mask, DL, VT)); 5105 } 5106 } 5107 5108 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 5109 // bit, which is unmodified by sra. 5110 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) { 5111 if (N0.getOpcode() == ISD::SRA) 5112 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1); 5113 } 5114 5115 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 5116 if (N1C && N0.getOpcode() == ISD::CTLZ && 5117 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) { 5118 APInt KnownZero, KnownOne; 5119 DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne); 5120 5121 // If any of the input bits are KnownOne, then the input couldn't be all 5122 // zeros, thus the result of the srl will always be zero. 5123 if (KnownOne.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT); 5124 5125 // If all of the bits input the to ctlz node are known to be zero, then 5126 // the result of the ctlz is "32" and the result of the shift is one. 5127 APInt UnknownBits = ~KnownZero; 5128 if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT); 5129 5130 // Otherwise, check to see if there is exactly one bit input to the ctlz. 5131 if ((UnknownBits & (UnknownBits - 1)) == 0) { 5132 // Okay, we know that only that the single bit specified by UnknownBits 5133 // could be set on input to the CTLZ node. If this bit is set, the SRL 5134 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 5135 // to an SRL/XOR pair, which is likely to simplify more. 5136 unsigned ShAmt = UnknownBits.countTrailingZeros(); 5137 SDValue Op = N0.getOperand(0); 5138 5139 if (ShAmt) { 5140 SDLoc DL(N0); 5141 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 5142 DAG.getConstant(ShAmt, DL, 5143 getShiftAmountTy(Op.getValueType()))); 5144 AddToWorklist(Op.getNode()); 5145 } 5146 5147 SDLoc DL(N); 5148 return DAG.getNode(ISD::XOR, DL, VT, 5149 Op, DAG.getConstant(1, DL, VT)); 5150 } 5151 } 5152 5153 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 5154 if (N1.getOpcode() == ISD::TRUNCATE && 5155 N1.getOperand(0).getOpcode() == ISD::AND) { 5156 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5157 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1); 5158 } 5159 5160 // fold operands of srl based on knowledge that the low bits are not 5161 // demanded. 5162 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5163 return SDValue(N, 0); 5164 5165 if (N1C && !N1C->isOpaque()) 5166 if (SDValue NewSRL = visitShiftByConstant(N, N1C)) 5167 return NewSRL; 5168 5169 // Attempt to convert a srl of a load into a narrower zero-extending load. 5170 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 5171 return NarrowLoad; 5172 5173 // Here is a common situation. We want to optimize: 5174 // 5175 // %a = ... 5176 // %b = and i32 %a, 2 5177 // %c = srl i32 %b, 1 5178 // brcond i32 %c ... 5179 // 5180 // into 5181 // 5182 // %a = ... 5183 // %b = and %a, 2 5184 // %c = setcc eq %b, 0 5185 // brcond %c ... 5186 // 5187 // However when after the source operand of SRL is optimized into AND, the SRL 5188 // itself may not be optimized further. Look for it and add the BRCOND into 5189 // the worklist. 5190 if (N->hasOneUse()) { 5191 SDNode *Use = *N->use_begin(); 5192 if (Use->getOpcode() == ISD::BRCOND) 5193 AddToWorklist(Use); 5194 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 5195 // Also look pass the truncate. 5196 Use = *Use->use_begin(); 5197 if (Use->getOpcode() == ISD::BRCOND) 5198 AddToWorklist(Use); 5199 } 5200 } 5201 5202 return SDValue(); 5203 } 5204 5205 SDValue DAGCombiner::visitBSWAP(SDNode *N) { 5206 SDValue N0 = N->getOperand(0); 5207 EVT VT = N->getValueType(0); 5208 5209 // fold (bswap c1) -> c2 5210 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5211 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0); 5212 // fold (bswap (bswap x)) -> x 5213 if (N0.getOpcode() == ISD::BSWAP) 5214 return N0->getOperand(0); 5215 return SDValue(); 5216 } 5217 5218 SDValue DAGCombiner::visitBITREVERSE(SDNode *N) { 5219 SDValue N0 = N->getOperand(0); 5220 EVT VT = N->getValueType(0); 5221 5222 // fold (bitreverse c1) -> c2 5223 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5224 return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0); 5225 // fold (bitreverse (bitreverse x)) -> x 5226 if (N0.getOpcode() == ISD::BITREVERSE) 5227 return N0.getOperand(0); 5228 return SDValue(); 5229 } 5230 5231 SDValue DAGCombiner::visitCTLZ(SDNode *N) { 5232 SDValue N0 = N->getOperand(0); 5233 EVT VT = N->getValueType(0); 5234 5235 // fold (ctlz c1) -> c2 5236 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5237 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0); 5238 return SDValue(); 5239 } 5240 5241 SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 5242 SDValue N0 = N->getOperand(0); 5243 EVT VT = N->getValueType(0); 5244 5245 // fold (ctlz_zero_undef c1) -> c2 5246 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5247 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5248 return SDValue(); 5249 } 5250 5251 SDValue DAGCombiner::visitCTTZ(SDNode *N) { 5252 SDValue N0 = N->getOperand(0); 5253 EVT VT = N->getValueType(0); 5254 5255 // fold (cttz c1) -> c2 5256 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5257 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0); 5258 return SDValue(); 5259 } 5260 5261 SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 5262 SDValue N0 = N->getOperand(0); 5263 EVT VT = N->getValueType(0); 5264 5265 // fold (cttz_zero_undef c1) -> c2 5266 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5267 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5268 return SDValue(); 5269 } 5270 5271 SDValue DAGCombiner::visitCTPOP(SDNode *N) { 5272 SDValue N0 = N->getOperand(0); 5273 EVT VT = N->getValueType(0); 5274 5275 // fold (ctpop c1) -> c2 5276 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5277 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0); 5278 return SDValue(); 5279 } 5280 5281 5282 /// \brief Generate Min/Max node 5283 static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, 5284 SDValue RHS, SDValue True, SDValue False, 5285 ISD::CondCode CC, const TargetLowering &TLI, 5286 SelectionDAG &DAG) { 5287 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 5288 return SDValue(); 5289 5290 switch (CC) { 5291 case ISD::SETOLT: 5292 case ISD::SETOLE: 5293 case ISD::SETLT: 5294 case ISD::SETLE: 5295 case ISD::SETULT: 5296 case ISD::SETULE: { 5297 unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM; 5298 if (TLI.isOperationLegal(Opcode, VT)) 5299 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5300 return SDValue(); 5301 } 5302 case ISD::SETOGT: 5303 case ISD::SETOGE: 5304 case ISD::SETGT: 5305 case ISD::SETGE: 5306 case ISD::SETUGT: 5307 case ISD::SETUGE: { 5308 unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM; 5309 if (TLI.isOperationLegal(Opcode, VT)) 5310 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5311 return SDValue(); 5312 } 5313 default: 5314 return SDValue(); 5315 } 5316 } 5317 5318 // TODO: We should handle other cases of selecting between {-1,0,1} here. 5319 SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) { 5320 SDValue Cond = N->getOperand(0); 5321 SDValue N1 = N->getOperand(1); 5322 SDValue N2 = N->getOperand(2); 5323 EVT VT = N->getValueType(0); 5324 EVT CondVT = Cond.getValueType(); 5325 SDLoc DL(N); 5326 5327 // fold (select Cond, 0, 1) -> (xor Cond, 1) 5328 // We can't do this reliably if integer based booleans have different contents 5329 // to floating point based booleans. This is because we can't tell whether we 5330 // have an integer-based boolean or a floating-point-based boolean unless we 5331 // can find the SETCC that produced it and inspect its operands. This is 5332 // fairly easy if C is the SETCC node, but it can potentially be 5333 // undiscoverable (or not reasonably discoverable). For example, it could be 5334 // in another basic block or it could require searching a complicated 5335 // expression. 5336 if (VT.isInteger() && 5337 (CondVT == MVT::i1 || (CondVT.isInteger() && 5338 TLI.getBooleanContents(false, true) == 5339 TargetLowering::ZeroOrOneBooleanContent && 5340 TLI.getBooleanContents(false, false) == 5341 TargetLowering::ZeroOrOneBooleanContent)) && 5342 isNullConstant(N1) && isOneConstant(N2)) { 5343 SDValue NotCond = DAG.getNode(ISD::XOR, DL, CondVT, Cond, 5344 DAG.getConstant(1, DL, CondVT)); 5345 if (VT.bitsEq(CondVT)) 5346 return NotCond; 5347 return DAG.getZExtOrTrunc(NotCond, DL, VT); 5348 } 5349 5350 return SDValue(); 5351 } 5352 5353 SDValue DAGCombiner::visitSELECT(SDNode *N) { 5354 SDValue N0 = N->getOperand(0); 5355 SDValue N1 = N->getOperand(1); 5356 SDValue N2 = N->getOperand(2); 5357 EVT VT = N->getValueType(0); 5358 EVT VT0 = N0.getValueType(); 5359 5360 // fold (select C, X, X) -> X 5361 if (N1 == N2) 5362 return N1; 5363 if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) { 5364 // fold (select true, X, Y) -> X 5365 // fold (select false, X, Y) -> Y 5366 return !N0C->isNullValue() ? N1 : N2; 5367 } 5368 // fold (select X, X, Y) -> (or X, Y) 5369 // fold (select X, 1, Y) -> (or C, Y) 5370 if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1))) 5371 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2); 5372 5373 if (SDValue V = foldSelectOfConstants(N)) 5374 return V; 5375 5376 // fold (select C, 0, X) -> (and (not C), X) 5377 if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) { 5378 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 5379 AddToWorklist(NOTNode.getNode()); 5380 return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2); 5381 } 5382 // fold (select C, X, 1) -> (or (not C), X) 5383 if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) { 5384 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 5385 AddToWorklist(NOTNode.getNode()); 5386 return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1); 5387 } 5388 // fold (select X, Y, X) -> (and X, Y) 5389 // fold (select X, Y, 0) -> (and X, Y) 5390 if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2))) 5391 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1); 5392 5393 // If we can fold this based on the true/false value, do so. 5394 if (SimplifySelectOps(N, N1, N2)) 5395 return SDValue(N, 0); // Don't revisit N. 5396 5397 if (VT0 == MVT::i1) { 5398 // The code in this block deals with the following 2 equivalences: 5399 // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y)) 5400 // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y) 5401 // The target can specify its preferred form with the 5402 // shouldNormalizeToSelectSequence() callback. However we always transform 5403 // to the right anyway if we find the inner select exists in the DAG anyway 5404 // and we always transform to the left side if we know that we can further 5405 // optimize the combination of the conditions. 5406 bool normalizeToSequence 5407 = TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT); 5408 // select (and Cond0, Cond1), X, Y 5409 // -> select Cond0, (select Cond1, X, Y), Y 5410 if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) { 5411 SDValue Cond0 = N0->getOperand(0); 5412 SDValue Cond1 = N0->getOperand(1); 5413 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 5414 N1.getValueType(), Cond1, N1, N2); 5415 if (normalizeToSequence || !InnerSelect.use_empty()) 5416 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, 5417 InnerSelect, N2); 5418 } 5419 // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y) 5420 if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) { 5421 SDValue Cond0 = N0->getOperand(0); 5422 SDValue Cond1 = N0->getOperand(1); 5423 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 5424 N1.getValueType(), Cond1, N1, N2); 5425 if (normalizeToSequence || !InnerSelect.use_empty()) 5426 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1, 5427 InnerSelect); 5428 } 5429 5430 // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y 5431 if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) { 5432 SDValue N1_0 = N1->getOperand(0); 5433 SDValue N1_1 = N1->getOperand(1); 5434 SDValue N1_2 = N1->getOperand(2); 5435 if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) { 5436 // Create the actual and node if we can generate good code for it. 5437 if (!normalizeToSequence) { 5438 SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(), 5439 N0, N1_0); 5440 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And, 5441 N1_1, N2); 5442 } 5443 // Otherwise see if we can optimize the "and" to a better pattern. 5444 if (SDValue Combined = visitANDLike(N0, N1_0, N)) 5445 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 5446 N1_1, N2); 5447 } 5448 } 5449 // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y 5450 if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) { 5451 SDValue N2_0 = N2->getOperand(0); 5452 SDValue N2_1 = N2->getOperand(1); 5453 SDValue N2_2 = N2->getOperand(2); 5454 if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) { 5455 // Create the actual or node if we can generate good code for it. 5456 if (!normalizeToSequence) { 5457 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(), 5458 N0, N2_0); 5459 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or, 5460 N1, N2_2); 5461 } 5462 // Otherwise see if we can optimize to a better pattern. 5463 if (SDValue Combined = visitORLike(N0, N2_0, N)) 5464 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 5465 N1, N2_2); 5466 } 5467 } 5468 } 5469 5470 // select (xor Cond, 1), X, Y -> select Cond, Y, X 5471 if (VT0 == MVT::i1) { 5472 if (N0->getOpcode() == ISD::XOR) { 5473 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) { 5474 SDValue Cond0 = N0->getOperand(0); 5475 if (C->isOne()) 5476 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), 5477 Cond0, N2, N1); 5478 } 5479 } 5480 } 5481 5482 // fold selects based on a setcc into other things, such as min/max/abs 5483 if (N0.getOpcode() == ISD::SETCC) { 5484 // select x, y (fcmp lt x, y) -> fminnum x, y 5485 // select x, y (fcmp gt x, y) -> fmaxnum x, y 5486 // 5487 // This is OK if we don't care about what happens if either operand is a 5488 // NaN. 5489 // 5490 5491 // FIXME: Instead of testing for UnsafeFPMath, this should be checking for 5492 // no signed zeros as well as no nans. 5493 const TargetOptions &Options = DAG.getTarget().Options; 5494 if (Options.UnsafeFPMath && 5495 VT.isFloatingPoint() && N0.hasOneUse() && 5496 DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) { 5497 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 5498 5499 if (SDValue FMinMax = combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0), 5500 N0.getOperand(1), N1, N2, CC, 5501 TLI, DAG)) 5502 return FMinMax; 5503 } 5504 5505 if ((!LegalOperations && 5506 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) || 5507 TLI.isOperationLegal(ISD::SELECT_CC, VT)) 5508 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, 5509 N0.getOperand(0), N0.getOperand(1), 5510 N1, N2, N0.getOperand(2)); 5511 return SimplifySelect(SDLoc(N), N0, N1, N2); 5512 } 5513 5514 return SDValue(); 5515 } 5516 5517 static 5518 std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) { 5519 SDLoc DL(N); 5520 EVT LoVT, HiVT; 5521 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 5522 5523 // Split the inputs. 5524 SDValue Lo, Hi, LL, LH, RL, RH; 5525 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0); 5526 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1); 5527 5528 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2)); 5529 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2)); 5530 5531 return std::make_pair(Lo, Hi); 5532 } 5533 5534 // This function assumes all the vselect's arguments are CONCAT_VECTOR 5535 // nodes and that the condition is a BV of ConstantSDNodes (or undefs). 5536 static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { 5537 SDLoc DL(N); 5538 SDValue Cond = N->getOperand(0); 5539 SDValue LHS = N->getOperand(1); 5540 SDValue RHS = N->getOperand(2); 5541 EVT VT = N->getValueType(0); 5542 int NumElems = VT.getVectorNumElements(); 5543 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS && 5544 RHS.getOpcode() == ISD::CONCAT_VECTORS && 5545 Cond.getOpcode() == ISD::BUILD_VECTOR); 5546 5547 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about 5548 // binary ones here. 5549 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2) 5550 return SDValue(); 5551 5552 // We're sure we have an even number of elements due to the 5553 // concat_vectors we have as arguments to vselect. 5554 // Skip BV elements until we find one that's not an UNDEF 5555 // After we find an UNDEF element, keep looping until we get to half the 5556 // length of the BV and see if all the non-undef nodes are the same. 5557 ConstantSDNode *BottomHalf = nullptr; 5558 for (int i = 0; i < NumElems / 2; ++i) { 5559 if (Cond->getOperand(i)->isUndef()) 5560 continue; 5561 5562 if (BottomHalf == nullptr) 5563 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 5564 else if (Cond->getOperand(i).getNode() != BottomHalf) 5565 return SDValue(); 5566 } 5567 5568 // Do the same for the second half of the BuildVector 5569 ConstantSDNode *TopHalf = nullptr; 5570 for (int i = NumElems / 2; i < NumElems; ++i) { 5571 if (Cond->getOperand(i)->isUndef()) 5572 continue; 5573 5574 if (TopHalf == nullptr) 5575 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 5576 else if (Cond->getOperand(i).getNode() != TopHalf) 5577 return SDValue(); 5578 } 5579 5580 assert(TopHalf && BottomHalf && 5581 "One half of the selector was all UNDEFs and the other was all the " 5582 "same value. This should have been addressed before this function."); 5583 return DAG.getNode( 5584 ISD::CONCAT_VECTORS, DL, VT, 5585 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0), 5586 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1)); 5587 } 5588 5589 SDValue DAGCombiner::visitMSCATTER(SDNode *N) { 5590 5591 if (Level >= AfterLegalizeTypes) 5592 return SDValue(); 5593 5594 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N); 5595 SDValue Mask = MSC->getMask(); 5596 SDValue Data = MSC->getValue(); 5597 SDLoc DL(N); 5598 5599 // If the MSCATTER data type requires splitting and the mask is provided by a 5600 // SETCC, then split both nodes and its operands before legalization. This 5601 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5602 // and enables future optimizations (e.g. min/max pattern matching on X86). 5603 if (Mask.getOpcode() != ISD::SETCC) 5604 return SDValue(); 5605 5606 // Check if any splitting is required. 5607 if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) != 5608 TargetLowering::TypeSplitVector) 5609 return SDValue(); 5610 SDValue MaskLo, MaskHi, Lo, Hi; 5611 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5612 5613 EVT LoVT, HiVT; 5614 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0)); 5615 5616 SDValue Chain = MSC->getChain(); 5617 5618 EVT MemoryVT = MSC->getMemoryVT(); 5619 unsigned Alignment = MSC->getOriginalAlignment(); 5620 5621 EVT LoMemVT, HiMemVT; 5622 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5623 5624 SDValue DataLo, DataHi; 5625 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 5626 5627 SDValue BasePtr = MSC->getBasePtr(); 5628 SDValue IndexLo, IndexHi; 5629 std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL); 5630 5631 MachineMemOperand *MMO = DAG.getMachineFunction(). 5632 getMachineMemOperand(MSC->getPointerInfo(), 5633 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 5634 Alignment, MSC->getAAInfo(), MSC->getRanges()); 5635 5636 SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo }; 5637 Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), 5638 DL, OpsLo, MMO); 5639 5640 SDValue OpsHi[] = {Chain, DataHi, MaskHi, BasePtr, IndexHi}; 5641 Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(), 5642 DL, OpsHi, MMO); 5643 5644 AddToWorklist(Lo.getNode()); 5645 AddToWorklist(Hi.getNode()); 5646 5647 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 5648 } 5649 5650 SDValue DAGCombiner::visitMSTORE(SDNode *N) { 5651 5652 if (Level >= AfterLegalizeTypes) 5653 return SDValue(); 5654 5655 MaskedStoreSDNode *MST = dyn_cast<MaskedStoreSDNode>(N); 5656 SDValue Mask = MST->getMask(); 5657 SDValue Data = MST->getValue(); 5658 EVT VT = Data.getValueType(); 5659 SDLoc DL(N); 5660 5661 // If the MSTORE data type requires splitting and the mask is provided by a 5662 // SETCC, then split both nodes and its operands before legalization. This 5663 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5664 // and enables future optimizations (e.g. min/max pattern matching on X86). 5665 if (Mask.getOpcode() == ISD::SETCC) { 5666 5667 // Check if any splitting is required. 5668 if (TLI.getTypeAction(*DAG.getContext(), VT) != 5669 TargetLowering::TypeSplitVector) 5670 return SDValue(); 5671 5672 SDValue MaskLo, MaskHi, Lo, Hi; 5673 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5674 5675 SDValue Chain = MST->getChain(); 5676 SDValue Ptr = MST->getBasePtr(); 5677 5678 EVT MemoryVT = MST->getMemoryVT(); 5679 unsigned Alignment = MST->getOriginalAlignment(); 5680 5681 // if Alignment is equal to the vector size, 5682 // take the half of it for the second part 5683 unsigned SecondHalfAlignment = 5684 (Alignment == VT.getSizeInBits() / 8) ? Alignment / 2 : Alignment; 5685 5686 EVT LoMemVT, HiMemVT; 5687 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5688 5689 SDValue DataLo, DataHi; 5690 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 5691 5692 MachineMemOperand *MMO = DAG.getMachineFunction(). 5693 getMachineMemOperand(MST->getPointerInfo(), 5694 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 5695 Alignment, MST->getAAInfo(), MST->getRanges()); 5696 5697 Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO, 5698 MST->isTruncatingStore(), 5699 MST->isCompressingStore()); 5700 5701 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 5702 MST->isCompressingStore()); 5703 5704 MMO = DAG.getMachineFunction(). 5705 getMachineMemOperand(MST->getPointerInfo(), 5706 MachineMemOperand::MOStore, HiMemVT.getStoreSize(), 5707 SecondHalfAlignment, MST->getAAInfo(), 5708 MST->getRanges()); 5709 5710 Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO, 5711 MST->isTruncatingStore(), 5712 MST->isCompressingStore()); 5713 5714 AddToWorklist(Lo.getNode()); 5715 AddToWorklist(Hi.getNode()); 5716 5717 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 5718 } 5719 return SDValue(); 5720 } 5721 5722 SDValue DAGCombiner::visitMGATHER(SDNode *N) { 5723 5724 if (Level >= AfterLegalizeTypes) 5725 return SDValue(); 5726 5727 MaskedGatherSDNode *MGT = dyn_cast<MaskedGatherSDNode>(N); 5728 SDValue Mask = MGT->getMask(); 5729 SDLoc DL(N); 5730 5731 // If the MGATHER result requires splitting and the mask is provided by a 5732 // SETCC, then split both nodes and its operands before legalization. This 5733 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5734 // and enables future optimizations (e.g. min/max pattern matching on X86). 5735 5736 if (Mask.getOpcode() != ISD::SETCC) 5737 return SDValue(); 5738 5739 EVT VT = N->getValueType(0); 5740 5741 // Check if any splitting is required. 5742 if (TLI.getTypeAction(*DAG.getContext(), VT) != 5743 TargetLowering::TypeSplitVector) 5744 return SDValue(); 5745 5746 SDValue MaskLo, MaskHi, Lo, Hi; 5747 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5748 5749 SDValue Src0 = MGT->getValue(); 5750 SDValue Src0Lo, Src0Hi; 5751 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 5752 5753 EVT LoVT, HiVT; 5754 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 5755 5756 SDValue Chain = MGT->getChain(); 5757 EVT MemoryVT = MGT->getMemoryVT(); 5758 unsigned Alignment = MGT->getOriginalAlignment(); 5759 5760 EVT LoMemVT, HiMemVT; 5761 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5762 5763 SDValue BasePtr = MGT->getBasePtr(); 5764 SDValue Index = MGT->getIndex(); 5765 SDValue IndexLo, IndexHi; 5766 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL); 5767 5768 MachineMemOperand *MMO = DAG.getMachineFunction(). 5769 getMachineMemOperand(MGT->getPointerInfo(), 5770 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 5771 Alignment, MGT->getAAInfo(), MGT->getRanges()); 5772 5773 SDValue OpsLo[] = { Chain, Src0Lo, MaskLo, BasePtr, IndexLo }; 5774 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo, 5775 MMO); 5776 5777 SDValue OpsHi[] = {Chain, Src0Hi, MaskHi, BasePtr, IndexHi}; 5778 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi, 5779 MMO); 5780 5781 AddToWorklist(Lo.getNode()); 5782 AddToWorklist(Hi.getNode()); 5783 5784 // Build a factor node to remember that this load is independent of the 5785 // other one. 5786 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 5787 Hi.getValue(1)); 5788 5789 // Legalized the chain result - switch anything that used the old chain to 5790 // use the new one. 5791 DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain); 5792 5793 SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 5794 5795 SDValue RetOps[] = { GatherRes, Chain }; 5796 return DAG.getMergeValues(RetOps, DL); 5797 } 5798 5799 SDValue DAGCombiner::visitMLOAD(SDNode *N) { 5800 5801 if (Level >= AfterLegalizeTypes) 5802 return SDValue(); 5803 5804 MaskedLoadSDNode *MLD = dyn_cast<MaskedLoadSDNode>(N); 5805 SDValue Mask = MLD->getMask(); 5806 SDLoc DL(N); 5807 5808 // If the MLOAD result requires splitting and the mask is provided by a 5809 // SETCC, then split both nodes and its operands before legalization. This 5810 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5811 // and enables future optimizations (e.g. min/max pattern matching on X86). 5812 5813 if (Mask.getOpcode() == ISD::SETCC) { 5814 EVT VT = N->getValueType(0); 5815 5816 // Check if any splitting is required. 5817 if (TLI.getTypeAction(*DAG.getContext(), VT) != 5818 TargetLowering::TypeSplitVector) 5819 return SDValue(); 5820 5821 SDValue MaskLo, MaskHi, Lo, Hi; 5822 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5823 5824 SDValue Src0 = MLD->getSrc0(); 5825 SDValue Src0Lo, Src0Hi; 5826 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 5827 5828 EVT LoVT, HiVT; 5829 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0)); 5830 5831 SDValue Chain = MLD->getChain(); 5832 SDValue Ptr = MLD->getBasePtr(); 5833 EVT MemoryVT = MLD->getMemoryVT(); 5834 unsigned Alignment = MLD->getOriginalAlignment(); 5835 5836 // if Alignment is equal to the vector size, 5837 // take the half of it for the second part 5838 unsigned SecondHalfAlignment = 5839 (Alignment == MLD->getValueType(0).getSizeInBits()/8) ? 5840 Alignment/2 : Alignment; 5841 5842 EVT LoMemVT, HiMemVT; 5843 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5844 5845 MachineMemOperand *MMO = DAG.getMachineFunction(). 5846 getMachineMemOperand(MLD->getPointerInfo(), 5847 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 5848 Alignment, MLD->getAAInfo(), MLD->getRanges()); 5849 5850 Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, Src0Lo, LoMemVT, MMO, 5851 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 5852 5853 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 5854 MLD->isExpandingLoad()); 5855 5856 MMO = DAG.getMachineFunction(). 5857 getMachineMemOperand(MLD->getPointerInfo(), 5858 MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), 5859 SecondHalfAlignment, MLD->getAAInfo(), MLD->getRanges()); 5860 5861 Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, Src0Hi, HiMemVT, MMO, 5862 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 5863 5864 AddToWorklist(Lo.getNode()); 5865 AddToWorklist(Hi.getNode()); 5866 5867 // Build a factor node to remember that this load is independent of the 5868 // other one. 5869 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 5870 Hi.getValue(1)); 5871 5872 // Legalized the chain result - switch anything that used the old chain to 5873 // use the new one. 5874 DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain); 5875 5876 SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 5877 5878 SDValue RetOps[] = { LoadRes, Chain }; 5879 return DAG.getMergeValues(RetOps, DL); 5880 } 5881 return SDValue(); 5882 } 5883 5884 SDValue DAGCombiner::visitVSELECT(SDNode *N) { 5885 SDValue N0 = N->getOperand(0); 5886 SDValue N1 = N->getOperand(1); 5887 SDValue N2 = N->getOperand(2); 5888 SDLoc DL(N); 5889 5890 // fold (vselect C, X, X) -> X 5891 if (N1 == N2) 5892 return N1; 5893 5894 // Canonicalize integer abs. 5895 // vselect (setg[te] X, 0), X, -X -> 5896 // vselect (setgt X, -1), X, -X -> 5897 // vselect (setl[te] X, 0), -X, X -> 5898 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 5899 if (N0.getOpcode() == ISD::SETCC) { 5900 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 5901 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 5902 bool isAbs = false; 5903 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 5904 5905 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || 5906 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && 5907 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) 5908 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); 5909 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && 5910 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) 5911 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 5912 5913 if (isAbs) { 5914 EVT VT = LHS.getValueType(); 5915 SDValue Shift = DAG.getNode( 5916 ISD::SRA, DL, VT, LHS, 5917 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT)); 5918 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); 5919 AddToWorklist(Shift.getNode()); 5920 AddToWorklist(Add.getNode()); 5921 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); 5922 } 5923 } 5924 5925 if (SimplifySelectOps(N, N1, N2)) 5926 return SDValue(N, 0); // Don't revisit N. 5927 5928 // If the VSELECT result requires splitting and the mask is provided by a 5929 // SETCC, then split both nodes and its operands before legalization. This 5930 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5931 // and enables future optimizations (e.g. min/max pattern matching on X86). 5932 if (N0.getOpcode() == ISD::SETCC) { 5933 EVT VT = N->getValueType(0); 5934 5935 // Check if any splitting is required. 5936 if (TLI.getTypeAction(*DAG.getContext(), VT) != 5937 TargetLowering::TypeSplitVector) 5938 return SDValue(); 5939 5940 SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH; 5941 std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG); 5942 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1); 5943 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2); 5944 5945 Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL); 5946 Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH); 5947 5948 // Add the new VSELECT nodes to the work list in case they need to be split 5949 // again. 5950 AddToWorklist(Lo.getNode()); 5951 AddToWorklist(Hi.getNode()); 5952 5953 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 5954 } 5955 5956 // Fold (vselect (build_vector all_ones), N1, N2) -> N1 5957 if (ISD::isBuildVectorAllOnes(N0.getNode())) 5958 return N1; 5959 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2 5960 if (ISD::isBuildVectorAllZeros(N0.getNode())) 5961 return N2; 5962 5963 // The ConvertSelectToConcatVector function is assuming both the above 5964 // checks for (vselect (build_vector all{ones,zeros) ...) have been made 5965 // and addressed. 5966 if (N1.getOpcode() == ISD::CONCAT_VECTORS && 5967 N2.getOpcode() == ISD::CONCAT_VECTORS && 5968 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { 5969 if (SDValue CV = ConvertSelectToConcatVector(N, DAG)) 5970 return CV; 5971 } 5972 5973 return SDValue(); 5974 } 5975 5976 SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 5977 SDValue N0 = N->getOperand(0); 5978 SDValue N1 = N->getOperand(1); 5979 SDValue N2 = N->getOperand(2); 5980 SDValue N3 = N->getOperand(3); 5981 SDValue N4 = N->getOperand(4); 5982 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 5983 5984 // fold select_cc lhs, rhs, x, x, cc -> x 5985 if (N2 == N3) 5986 return N2; 5987 5988 // Determine if the condition we're dealing with is constant 5989 if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1, 5990 CC, SDLoc(N), false)) { 5991 AddToWorklist(SCC.getNode()); 5992 5993 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) { 5994 if (!SCCC->isNullValue()) 5995 return N2; // cond always true -> true val 5996 else 5997 return N3; // cond always false -> false val 5998 } else if (SCC->isUndef()) { 5999 // When the condition is UNDEF, just return the first operand. This is 6000 // coherent the DAG creation, no setcc node is created in this case 6001 return N2; 6002 } else if (SCC.getOpcode() == ISD::SETCC) { 6003 // Fold to a simpler select_cc 6004 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(), 6005 SCC.getOperand(0), SCC.getOperand(1), N2, N3, 6006 SCC.getOperand(2)); 6007 } 6008 } 6009 6010 // If we can fold this based on the true/false value, do so. 6011 if (SimplifySelectOps(N, N2, N3)) 6012 return SDValue(N, 0); // Don't revisit N. 6013 6014 // fold select_cc into other things, such as min/max/abs 6015 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC); 6016 } 6017 6018 SDValue DAGCombiner::visitSETCC(SDNode *N) { 6019 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 6020 cast<CondCodeSDNode>(N->getOperand(2))->get(), 6021 SDLoc(N)); 6022 } 6023 6024 SDValue DAGCombiner::visitSETCCE(SDNode *N) { 6025 SDValue LHS = N->getOperand(0); 6026 SDValue RHS = N->getOperand(1); 6027 SDValue Carry = N->getOperand(2); 6028 SDValue Cond = N->getOperand(3); 6029 6030 // If Carry is false, fold to a regular SETCC. 6031 if (Carry.getOpcode() == ISD::CARRY_FALSE) 6032 return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond); 6033 6034 return SDValue(); 6035 } 6036 6037 /// Try to fold a sext/zext/aext dag node into a ConstantSDNode or 6038 /// a build_vector of constants. 6039 /// This function is called by the DAGCombiner when visiting sext/zext/aext 6040 /// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND). 6041 /// Vector extends are not folded if operations are legal; this is to 6042 /// avoid introducing illegal build_vector dag nodes. 6043 static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, 6044 SelectionDAG &DAG, bool LegalTypes, 6045 bool LegalOperations) { 6046 unsigned Opcode = N->getOpcode(); 6047 SDValue N0 = N->getOperand(0); 6048 EVT VT = N->getValueType(0); 6049 6050 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || 6051 Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 6052 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) 6053 && "Expected EXTEND dag node in input!"); 6054 6055 // fold (sext c1) -> c1 6056 // fold (zext c1) -> c1 6057 // fold (aext c1) -> c1 6058 if (isa<ConstantSDNode>(N0)) 6059 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode(); 6060 6061 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants) 6062 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants) 6063 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants) 6064 EVT SVT = VT.getScalarType(); 6065 if (!(VT.isVector() && 6066 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) && 6067 ISD::isBuildVectorOfConstantSDNodes(N0.getNode()))) 6068 return nullptr; 6069 6070 // We can fold this node into a build_vector. 6071 unsigned VTBits = SVT.getSizeInBits(); 6072 unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits(); 6073 SmallVector<SDValue, 8> Elts; 6074 unsigned NumElts = VT.getVectorNumElements(); 6075 SDLoc DL(N); 6076 6077 for (unsigned i=0; i != NumElts; ++i) { 6078 SDValue Op = N0->getOperand(i); 6079 if (Op->isUndef()) { 6080 Elts.push_back(DAG.getUNDEF(SVT)); 6081 continue; 6082 } 6083 6084 SDLoc DL(Op); 6085 // Get the constant value and if needed trunc it to the size of the type. 6086 // Nodes like build_vector might have constants wider than the scalar type. 6087 APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits); 6088 if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG) 6089 Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT)); 6090 else 6091 Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT)); 6092 } 6093 6094 return DAG.getBuildVector(VT, DL, Elts).getNode(); 6095 } 6096 6097 // ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 6098 // "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 6099 // transformation. Returns true if extension are possible and the above 6100 // mentioned transformation is profitable. 6101 static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 6102 unsigned ExtOpc, 6103 SmallVectorImpl<SDNode *> &ExtendNodes, 6104 const TargetLowering &TLI) { 6105 bool HasCopyToRegUses = false; 6106 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 6107 for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 6108 UE = N0.getNode()->use_end(); 6109 UI != UE; ++UI) { 6110 SDNode *User = *UI; 6111 if (User == N) 6112 continue; 6113 if (UI.getUse().getResNo() != N0.getResNo()) 6114 continue; 6115 // FIXME: Only extend SETCC N, N and SETCC N, c for now. 6116 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 6117 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 6118 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 6119 // Sign bits will be lost after a zext. 6120 return false; 6121 bool Add = false; 6122 for (unsigned i = 0; i != 2; ++i) { 6123 SDValue UseOp = User->getOperand(i); 6124 if (UseOp == N0) 6125 continue; 6126 if (!isa<ConstantSDNode>(UseOp)) 6127 return false; 6128 Add = true; 6129 } 6130 if (Add) 6131 ExtendNodes.push_back(User); 6132 continue; 6133 } 6134 // If truncates aren't free and there are users we can't 6135 // extend, it isn't worthwhile. 6136 if (!isTruncFree) 6137 return false; 6138 // Remember if this value is live-out. 6139 if (User->getOpcode() == ISD::CopyToReg) 6140 HasCopyToRegUses = true; 6141 } 6142 6143 if (HasCopyToRegUses) { 6144 bool BothLiveOut = false; 6145 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6146 UI != UE; ++UI) { 6147 SDUse &Use = UI.getUse(); 6148 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 6149 BothLiveOut = true; 6150 break; 6151 } 6152 } 6153 if (BothLiveOut) 6154 // Both unextended and extended values are live out. There had better be 6155 // a good reason for the transformation. 6156 return ExtendNodes.size(); 6157 } 6158 return true; 6159 } 6160 6161 void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, 6162 SDValue Trunc, SDValue ExtLoad, 6163 const SDLoc &DL, ISD::NodeType ExtType) { 6164 // Extend SetCC uses if necessary. 6165 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 6166 SDNode *SetCC = SetCCs[i]; 6167 SmallVector<SDValue, 4> Ops; 6168 6169 for (unsigned j = 0; j != 2; ++j) { 6170 SDValue SOp = SetCC->getOperand(j); 6171 if (SOp == Trunc) 6172 Ops.push_back(ExtLoad); 6173 else 6174 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 6175 } 6176 6177 Ops.push_back(SetCC->getOperand(2)); 6178 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); 6179 } 6180 } 6181 6182 // FIXME: Bring more similar combines here, common to sext/zext (maybe aext?). 6183 SDValue DAGCombiner::CombineExtLoad(SDNode *N) { 6184 SDValue N0 = N->getOperand(0); 6185 EVT DstVT = N->getValueType(0); 6186 EVT SrcVT = N0.getValueType(); 6187 6188 assert((N->getOpcode() == ISD::SIGN_EXTEND || 6189 N->getOpcode() == ISD::ZERO_EXTEND) && 6190 "Unexpected node type (not an extend)!"); 6191 6192 // fold (sext (load x)) to multiple smaller sextloads; same for zext. 6193 // For example, on a target with legal v4i32, but illegal v8i32, turn: 6194 // (v8i32 (sext (v8i16 (load x)))) 6195 // into: 6196 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6197 // (v4i32 (sextload (x + 16))))) 6198 // Where uses of the original load, i.e.: 6199 // (v8i16 (load x)) 6200 // are replaced with: 6201 // (v8i16 (truncate 6202 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6203 // (v4i32 (sextload (x + 16))))))) 6204 // 6205 // This combine is only applicable to illegal, but splittable, vectors. 6206 // All legal types, and illegal non-vector types, are handled elsewhere. 6207 // This combine is controlled by TargetLowering::isVectorLoadExtDesirable. 6208 // 6209 if (N0->getOpcode() != ISD::LOAD) 6210 return SDValue(); 6211 6212 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6213 6214 if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) || 6215 !N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() || 6216 !DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0))) 6217 return SDValue(); 6218 6219 SmallVector<SDNode *, 4> SetCCs; 6220 if (!ExtendUsesToFormExtLoad(N, N0, N->getOpcode(), SetCCs, TLI)) 6221 return SDValue(); 6222 6223 ISD::LoadExtType ExtType = 6224 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 6225 6226 // Try to split the vector types to get down to legal types. 6227 EVT SplitSrcVT = SrcVT; 6228 EVT SplitDstVT = DstVT; 6229 while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) && 6230 SplitSrcVT.getVectorNumElements() > 1) { 6231 SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first; 6232 SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first; 6233 } 6234 6235 if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT)) 6236 return SDValue(); 6237 6238 SDLoc DL(N); 6239 const unsigned NumSplits = 6240 DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements(); 6241 const unsigned Stride = SplitSrcVT.getStoreSize(); 6242 SmallVector<SDValue, 4> Loads; 6243 SmallVector<SDValue, 4> Chains; 6244 6245 SDValue BasePtr = LN0->getBasePtr(); 6246 for (unsigned Idx = 0; Idx < NumSplits; Idx++) { 6247 const unsigned Offset = Idx * Stride; 6248 const unsigned Align = MinAlign(LN0->getAlignment(), Offset); 6249 6250 SDValue SplitLoad = DAG.getExtLoad( 6251 ExtType, DL, SplitDstVT, LN0->getChain(), BasePtr, 6252 LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align, 6253 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 6254 6255 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 6256 DAG.getConstant(Stride, DL, BasePtr.getValueType())); 6257 6258 Loads.push_back(SplitLoad.getValue(0)); 6259 Chains.push_back(SplitLoad.getValue(1)); 6260 } 6261 6262 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 6263 SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads); 6264 6265 CombineTo(N, NewValue); 6266 6267 // Replace uses of the original load (before extension) 6268 // with a truncate of the concatenated sextloaded vectors. 6269 SDValue Trunc = 6270 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue); 6271 CombineTo(N0.getNode(), Trunc, NewChain); 6272 ExtendSetCCUses(SetCCs, Trunc, NewValue, DL, 6273 (ISD::NodeType)N->getOpcode()); 6274 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6275 } 6276 6277 SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 6278 SDValue N0 = N->getOperand(0); 6279 EVT VT = N->getValueType(0); 6280 6281 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6282 LegalOperations)) 6283 return SDValue(Res, 0); 6284 6285 // fold (sext (sext x)) -> (sext x) 6286 // fold (sext (aext x)) -> (sext x) 6287 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 6288 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, 6289 N0.getOperand(0)); 6290 6291 if (N0.getOpcode() == ISD::TRUNCATE) { 6292 // fold (sext (truncate (load x))) -> (sext (smaller load x)) 6293 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 6294 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6295 SDNode *oye = N0.getOperand(0).getNode(); 6296 if (NarrowLoad.getNode() != N0.getNode()) { 6297 CombineTo(N0.getNode(), NarrowLoad); 6298 // CombineTo deleted the truncate, if needed, but not what's under it. 6299 AddToWorklist(oye); 6300 } 6301 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6302 } 6303 6304 // See if the value being truncated is already sign extended. If so, just 6305 // eliminate the trunc/sext pair. 6306 SDValue Op = N0.getOperand(0); 6307 unsigned OpBits = Op.getScalarValueSizeInBits(); 6308 unsigned MidBits = N0.getScalarValueSizeInBits(); 6309 unsigned DestBits = VT.getScalarSizeInBits(); 6310 unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 6311 6312 if (OpBits == DestBits) { 6313 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 6314 // bits, it is already ready. 6315 if (NumSignBits > DestBits-MidBits) 6316 return Op; 6317 } else if (OpBits < DestBits) { 6318 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 6319 // bits, just sext from i32. 6320 if (NumSignBits > OpBits-MidBits) 6321 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, Op); 6322 } else { 6323 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 6324 // bits, just truncate to i32. 6325 if (NumSignBits > OpBits-MidBits) 6326 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6327 } 6328 6329 // fold (sext (truncate x)) -> (sextinreg x). 6330 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 6331 N0.getValueType())) { 6332 if (OpBits < DestBits) 6333 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op); 6334 else if (OpBits > DestBits) 6335 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op); 6336 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, Op, 6337 DAG.getValueType(N0.getValueType())); 6338 } 6339 } 6340 6341 // fold (sext (load x)) -> (sext (truncate (sextload x))) 6342 // Only generate vector extloads when 1) they're legal, and 2) they are 6343 // deemed desirable by the target. 6344 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6345 ((!LegalOperations && !VT.isVector() && 6346 !cast<LoadSDNode>(N0)->isVolatile()) || 6347 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()))) { 6348 bool DoXform = true; 6349 SmallVector<SDNode*, 4> SetCCs; 6350 if (!N0.hasOneUse()) 6351 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 6352 if (VT.isVector()) 6353 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 6354 if (DoXform) { 6355 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6356 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 6357 LN0->getChain(), 6358 LN0->getBasePtr(), N0.getValueType(), 6359 LN0->getMemOperand()); 6360 CombineTo(N, ExtLoad); 6361 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6362 N0.getValueType(), ExtLoad); 6363 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6364 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 6365 ISD::SIGN_EXTEND); 6366 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6367 } 6368 } 6369 6370 // fold (sext (load x)) to multiple smaller sextloads. 6371 // Only on illegal but splittable vectors. 6372 if (SDValue ExtLoad = CombineExtLoad(N)) 6373 return ExtLoad; 6374 6375 // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 6376 // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 6377 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 6378 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 6379 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6380 EVT MemVT = LN0->getMemoryVT(); 6381 if ((!LegalOperations && !LN0->isVolatile()) || 6382 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT)) { 6383 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 6384 LN0->getChain(), 6385 LN0->getBasePtr(), MemVT, 6386 LN0->getMemOperand()); 6387 CombineTo(N, ExtLoad); 6388 CombineTo(N0.getNode(), 6389 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6390 N0.getValueType(), ExtLoad), 6391 ExtLoad.getValue(1)); 6392 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6393 } 6394 } 6395 6396 // fold (sext (and/or/xor (load x), cst)) -> 6397 // (and/or/xor (sextload x), (sext cst)) 6398 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 6399 N0.getOpcode() == ISD::XOR) && 6400 isa<LoadSDNode>(N0.getOperand(0)) && 6401 N0.getOperand(1).getOpcode() == ISD::Constant && 6402 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()) && 6403 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 6404 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 6405 if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) { 6406 bool DoXform = true; 6407 SmallVector<SDNode*, 4> SetCCs; 6408 if (!N0.hasOneUse()) 6409 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 6410 SetCCs, TLI); 6411 if (DoXform) { 6412 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT, 6413 LN0->getChain(), LN0->getBasePtr(), 6414 LN0->getMemoryVT(), 6415 LN0->getMemOperand()); 6416 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6417 Mask = Mask.sext(VT.getSizeInBits()); 6418 SDLoc DL(N); 6419 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 6420 ExtLoad, DAG.getConstant(Mask, DL, VT)); 6421 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 6422 SDLoc(N0.getOperand(0)), 6423 N0.getOperand(0).getValueType(), ExtLoad); 6424 CombineTo(N, And); 6425 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 6426 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, 6427 ISD::SIGN_EXTEND); 6428 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6429 } 6430 } 6431 } 6432 6433 if (N0.getOpcode() == ISD::SETCC) { 6434 EVT N0VT = N0.getOperand(0).getValueType(); 6435 // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 6436 // Only do this before legalize for now. 6437 if (VT.isVector() && !LegalOperations && 6438 TLI.getBooleanContents(N0VT) == 6439 TargetLowering::ZeroOrNegativeOneBooleanContent) { 6440 // On some architectures (such as SSE/NEON/etc) the SETCC result type is 6441 // of the same size as the compared operands. Only optimize sext(setcc()) 6442 // if this is the case. 6443 EVT SVT = getSetCCResultType(N0VT); 6444 6445 // We know that the # elements of the results is the same as the 6446 // # elements of the compare (and the # elements of the compare result 6447 // for that matter). Check to see that they are the same size. If so, 6448 // we know that the element size of the sext'd result matches the 6449 // element size of the compare operands. 6450 if (VT.getSizeInBits() == SVT.getSizeInBits()) 6451 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 6452 N0.getOperand(1), 6453 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6454 6455 // If the desired elements are smaller or larger than the source 6456 // elements we can use a matching integer vector type and then 6457 // truncate/sign extend 6458 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 6459 if (SVT == MatchingVectorType) { 6460 SDValue VsetCC = DAG.getSetCC(SDLoc(N), MatchingVectorType, 6461 N0.getOperand(0), N0.getOperand(1), 6462 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6463 return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT); 6464 } 6465 } 6466 6467 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0) 6468 // Here, T can be 1 or -1, depending on the type of the setcc and 6469 // getBooleanContents(). 6470 unsigned SetCCWidth = N0.getScalarValueSizeInBits(); 6471 6472 SDLoc DL(N); 6473 // To determine the "true" side of the select, we need to know the high bit 6474 // of the value returned by the setcc if it evaluates to true. 6475 // If the type of the setcc is i1, then the true case of the select is just 6476 // sext(i1 1), that is, -1. 6477 // If the type of the setcc is larger (say, i8) then the value of the high 6478 // bit depends on getBooleanContents(). So, ask TLI for a real "true" value 6479 // of the appropriate width. 6480 SDValue ExtTrueVal = 6481 (SetCCWidth == 1) 6482 ? DAG.getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), 6483 DL, VT) 6484 : TLI.getConstTrueVal(DAG, VT, DL); 6485 6486 if (SDValue SCC = SimplifySelectCC( 6487 DL, N0.getOperand(0), N0.getOperand(1), ExtTrueVal, 6488 DAG.getConstant(0, DL, VT), 6489 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 6490 return SCC; 6491 6492 if (!VT.isVector()) { 6493 EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType()); 6494 if (!LegalOperations || 6495 TLI.isOperationLegal(ISD::SETCC, N0.getOperand(0).getValueType())) { 6496 SDLoc DL(N); 6497 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 6498 SDValue SetCC = 6499 DAG.getSetCC(DL, SetCCVT, N0.getOperand(0), N0.getOperand(1), CC); 6500 return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, 6501 DAG.getConstant(0, DL, VT)); 6502 } 6503 } 6504 } 6505 6506 // fold (sext x) -> (zext x) if the sign bit is known zero. 6507 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 6508 DAG.SignBitIsZero(N0)) 6509 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0); 6510 6511 return SDValue(); 6512 } 6513 6514 // isTruncateOf - If N is a truncate of some other value, return true, record 6515 // the value being truncated in Op and which of Op's bits are zero in KnownZero. 6516 // This function computes KnownZero to avoid a duplicated call to 6517 // computeKnownBits in the caller. 6518 static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 6519 APInt &KnownZero) { 6520 APInt KnownOne; 6521 if (N->getOpcode() == ISD::TRUNCATE) { 6522 Op = N->getOperand(0); 6523 DAG.computeKnownBits(Op, KnownZero, KnownOne); 6524 return true; 6525 } 6526 6527 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 6528 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 6529 return false; 6530 6531 SDValue Op0 = N->getOperand(0); 6532 SDValue Op1 = N->getOperand(1); 6533 assert(Op0.getValueType() == Op1.getValueType()); 6534 6535 if (isNullConstant(Op0)) 6536 Op = Op1; 6537 else if (isNullConstant(Op1)) 6538 Op = Op0; 6539 else 6540 return false; 6541 6542 DAG.computeKnownBits(Op, KnownZero, KnownOne); 6543 6544 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 6545 return false; 6546 6547 return true; 6548 } 6549 6550 SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 6551 SDValue N0 = N->getOperand(0); 6552 EVT VT = N->getValueType(0); 6553 6554 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6555 LegalOperations)) 6556 return SDValue(Res, 0); 6557 6558 // fold (zext (zext x)) -> (zext x) 6559 // fold (zext (aext x)) -> (zext x) 6560 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 6561 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, 6562 N0.getOperand(0)); 6563 6564 // fold (zext (truncate x)) -> (zext x) or 6565 // (zext (truncate x)) -> (truncate x) 6566 // This is valid when the truncated bits of x are already zero. 6567 // FIXME: We should extend this to work for vectors too. 6568 SDValue Op; 6569 APInt KnownZero; 6570 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 6571 APInt TruncatedBits = 6572 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 6573 APInt(Op.getValueSizeInBits(), 0) : 6574 APInt::getBitsSet(Op.getValueSizeInBits(), 6575 N0.getValueSizeInBits(), 6576 std::min(Op.getValueSizeInBits(), 6577 VT.getSizeInBits())); 6578 if (TruncatedBits == (KnownZero & TruncatedBits)) { 6579 if (VT.bitsGT(Op.getValueType())) 6580 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op); 6581 if (VT.bitsLT(Op.getValueType())) 6582 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6583 6584 return Op; 6585 } 6586 } 6587 6588 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 6589 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 6590 if (N0.getOpcode() == ISD::TRUNCATE) { 6591 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6592 SDNode *oye = N0.getOperand(0).getNode(); 6593 if (NarrowLoad.getNode() != N0.getNode()) { 6594 CombineTo(N0.getNode(), NarrowLoad); 6595 // CombineTo deleted the truncate, if needed, but not what's under it. 6596 AddToWorklist(oye); 6597 } 6598 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6599 } 6600 } 6601 6602 // fold (zext (truncate x)) -> (and x, mask) 6603 if (N0.getOpcode() == ISD::TRUNCATE) { 6604 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 6605 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 6606 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6607 SDNode *oye = N0.getOperand(0).getNode(); 6608 if (NarrowLoad.getNode() != N0.getNode()) { 6609 CombineTo(N0.getNode(), NarrowLoad); 6610 // CombineTo deleted the truncate, if needed, but not what's under it. 6611 AddToWorklist(oye); 6612 } 6613 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6614 } 6615 6616 EVT SrcVT = N0.getOperand(0).getValueType(); 6617 EVT MinVT = N0.getValueType(); 6618 6619 // Try to mask before the extension to avoid having to generate a larger mask, 6620 // possibly over several sub-vectors. 6621 if (SrcVT.bitsLT(VT)) { 6622 if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) && 6623 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) { 6624 SDValue Op = N0.getOperand(0); 6625 Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 6626 AddToWorklist(Op.getNode()); 6627 return DAG.getZExtOrTrunc(Op, SDLoc(N), VT); 6628 } 6629 } 6630 6631 if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) { 6632 SDValue Op = N0.getOperand(0); 6633 if (SrcVT.bitsLT(VT)) { 6634 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op); 6635 AddToWorklist(Op.getNode()); 6636 } else if (SrcVT.bitsGT(VT)) { 6637 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6638 AddToWorklist(Op.getNode()); 6639 } 6640 return DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 6641 } 6642 } 6643 6644 // Fold (zext (and (trunc x), cst)) -> (and x, cst), 6645 // if either of the casts is not free. 6646 if (N0.getOpcode() == ISD::AND && 6647 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 6648 N0.getOperand(1).getOpcode() == ISD::Constant && 6649 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 6650 N0.getValueType()) || 6651 !TLI.isZExtFree(N0.getValueType(), VT))) { 6652 SDValue X = N0.getOperand(0).getOperand(0); 6653 if (X.getValueType().bitsLT(VT)) { 6654 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X); 6655 } else if (X.getValueType().bitsGT(VT)) { 6656 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 6657 } 6658 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6659 Mask = Mask.zext(VT.getSizeInBits()); 6660 SDLoc DL(N); 6661 return DAG.getNode(ISD::AND, DL, VT, 6662 X, DAG.getConstant(Mask, DL, VT)); 6663 } 6664 6665 // fold (zext (load x)) -> (zext (truncate (zextload x))) 6666 // Only generate vector extloads when 1) they're legal, and 2) they are 6667 // deemed desirable by the target. 6668 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6669 ((!LegalOperations && !VT.isVector() && 6670 !cast<LoadSDNode>(N0)->isVolatile()) || 6671 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()))) { 6672 bool DoXform = true; 6673 SmallVector<SDNode*, 4> SetCCs; 6674 if (!N0.hasOneUse()) 6675 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 6676 if (VT.isVector()) 6677 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 6678 if (DoXform) { 6679 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6680 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 6681 LN0->getChain(), 6682 LN0->getBasePtr(), N0.getValueType(), 6683 LN0->getMemOperand()); 6684 CombineTo(N, ExtLoad); 6685 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6686 N0.getValueType(), ExtLoad); 6687 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6688 6689 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 6690 ISD::ZERO_EXTEND); 6691 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6692 } 6693 } 6694 6695 // fold (zext (load x)) to multiple smaller zextloads. 6696 // Only on illegal but splittable vectors. 6697 if (SDValue ExtLoad = CombineExtLoad(N)) 6698 return ExtLoad; 6699 6700 // fold (zext (and/or/xor (load x), cst)) -> 6701 // (and/or/xor (zextload x), (zext cst)) 6702 // Unless (and (load x) cst) will match as a zextload already and has 6703 // additional users. 6704 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 6705 N0.getOpcode() == ISD::XOR) && 6706 isa<LoadSDNode>(N0.getOperand(0)) && 6707 N0.getOperand(1).getOpcode() == ISD::Constant && 6708 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()) && 6709 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 6710 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 6711 if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) { 6712 bool DoXform = true; 6713 SmallVector<SDNode*, 4> SetCCs; 6714 if (!N0.hasOneUse()) { 6715 if (N0.getOpcode() == ISD::AND) { 6716 auto *AndC = cast<ConstantSDNode>(N0.getOperand(1)); 6717 auto NarrowLoad = false; 6718 EVT LoadResultTy = AndC->getValueType(0); 6719 EVT ExtVT, LoadedVT; 6720 if (isAndLoadExtLoad(AndC, LN0, LoadResultTy, ExtVT, LoadedVT, 6721 NarrowLoad)) 6722 DoXform = false; 6723 } 6724 if (DoXform) 6725 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), 6726 ISD::ZERO_EXTEND, SetCCs, TLI); 6727 } 6728 if (DoXform) { 6729 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT, 6730 LN0->getChain(), LN0->getBasePtr(), 6731 LN0->getMemoryVT(), 6732 LN0->getMemOperand()); 6733 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6734 Mask = Mask.zext(VT.getSizeInBits()); 6735 SDLoc DL(N); 6736 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 6737 ExtLoad, DAG.getConstant(Mask, DL, VT)); 6738 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 6739 SDLoc(N0.getOperand(0)), 6740 N0.getOperand(0).getValueType(), ExtLoad); 6741 CombineTo(N, And); 6742 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 6743 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, 6744 ISD::ZERO_EXTEND); 6745 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6746 } 6747 } 6748 } 6749 6750 // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 6751 // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 6752 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 6753 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 6754 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6755 EVT MemVT = LN0->getMemoryVT(); 6756 if ((!LegalOperations && !LN0->isVolatile()) || 6757 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT)) { 6758 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 6759 LN0->getChain(), 6760 LN0->getBasePtr(), MemVT, 6761 LN0->getMemOperand()); 6762 CombineTo(N, ExtLoad); 6763 CombineTo(N0.getNode(), 6764 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), 6765 ExtLoad), 6766 ExtLoad.getValue(1)); 6767 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6768 } 6769 } 6770 6771 if (N0.getOpcode() == ISD::SETCC) { 6772 // Only do this before legalize for now. 6773 if (!LegalOperations && VT.isVector() && 6774 N0.getValueType().getVectorElementType() == MVT::i1) { 6775 EVT N00VT = N0.getOperand(0).getValueType(); 6776 if (getSetCCResultType(N00VT) == N0.getValueType()) 6777 return SDValue(); 6778 6779 // We know that the # elements of the results is the same as the # 6780 // elements of the compare (and the # elements of the compare result for 6781 // that matter). Check to see that they are the same size. If so, we know 6782 // that the element size of the sext'd result matches the element size of 6783 // the compare operands. 6784 SDLoc DL(N); 6785 SDValue VecOnes = DAG.getConstant(1, DL, VT); 6786 if (VT.getSizeInBits() == N00VT.getSizeInBits()) { 6787 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 6788 SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0), 6789 N0.getOperand(1), N0.getOperand(2)); 6790 return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes); 6791 } 6792 6793 // If the desired elements are smaller or larger than the source 6794 // elements we can use a matching integer vector type and then 6795 // truncate/sign extend. 6796 EVT MatchingElementType = EVT::getIntegerVT( 6797 *DAG.getContext(), N00VT.getScalarSizeInBits()); 6798 EVT MatchingVectorType = EVT::getVectorVT( 6799 *DAG.getContext(), MatchingElementType, N00VT.getVectorNumElements()); 6800 SDValue VsetCC = 6801 DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0), 6802 N0.getOperand(1), N0.getOperand(2)); 6803 return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT), 6804 VecOnes); 6805 } 6806 6807 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 6808 SDLoc DL(N); 6809 if (SDValue SCC = SimplifySelectCC( 6810 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 6811 DAG.getConstant(0, DL, VT), 6812 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 6813 return SCC; 6814 } 6815 6816 // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 6817 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 6818 isa<ConstantSDNode>(N0.getOperand(1)) && 6819 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 6820 N0.hasOneUse()) { 6821 SDValue ShAmt = N0.getOperand(1); 6822 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 6823 if (N0.getOpcode() == ISD::SHL) { 6824 SDValue InnerZExt = N0.getOperand(0); 6825 // If the original shl may be shifting out bits, do not perform this 6826 // transformation. 6827 unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() - 6828 InnerZExt.getOperand(0).getValueSizeInBits(); 6829 if (ShAmtVal > KnownZeroBits) 6830 return SDValue(); 6831 } 6832 6833 SDLoc DL(N); 6834 6835 // Ensure that the shift amount is wide enough for the shifted value. 6836 if (VT.getSizeInBits() >= 256) 6837 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 6838 6839 return DAG.getNode(N0.getOpcode(), DL, VT, 6840 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 6841 ShAmt); 6842 } 6843 6844 return SDValue(); 6845 } 6846 6847 SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 6848 SDValue N0 = N->getOperand(0); 6849 EVT VT = N->getValueType(0); 6850 6851 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6852 LegalOperations)) 6853 return SDValue(Res, 0); 6854 6855 // fold (aext (aext x)) -> (aext x) 6856 // fold (aext (zext x)) -> (zext x) 6857 // fold (aext (sext x)) -> (sext x) 6858 if (N0.getOpcode() == ISD::ANY_EXTEND || 6859 N0.getOpcode() == ISD::ZERO_EXTEND || 6860 N0.getOpcode() == ISD::SIGN_EXTEND) 6861 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 6862 6863 // fold (aext (truncate (load x))) -> (aext (smaller load x)) 6864 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 6865 if (N0.getOpcode() == ISD::TRUNCATE) { 6866 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6867 SDNode *oye = N0.getOperand(0).getNode(); 6868 if (NarrowLoad.getNode() != N0.getNode()) { 6869 CombineTo(N0.getNode(), NarrowLoad); 6870 // CombineTo deleted the truncate, if needed, but not what's under it. 6871 AddToWorklist(oye); 6872 } 6873 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6874 } 6875 } 6876 6877 // fold (aext (truncate x)) 6878 if (N0.getOpcode() == ISD::TRUNCATE) { 6879 SDValue TruncOp = N0.getOperand(0); 6880 if (TruncOp.getValueType() == VT) 6881 return TruncOp; // x iff x size == zext size. 6882 if (TruncOp.getValueType().bitsGT(VT)) 6883 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp); 6884 return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp); 6885 } 6886 6887 // Fold (aext (and (trunc x), cst)) -> (and x, cst) 6888 // if the trunc is not free. 6889 if (N0.getOpcode() == ISD::AND && 6890 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 6891 N0.getOperand(1).getOpcode() == ISD::Constant && 6892 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 6893 N0.getValueType())) { 6894 SDLoc DL(N); 6895 SDValue X = N0.getOperand(0).getOperand(0); 6896 if (X.getValueType().bitsLT(VT)) { 6897 X = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X); 6898 } else if (X.getValueType().bitsGT(VT)) { 6899 X = DAG.getNode(ISD::TRUNCATE, DL, VT, X); 6900 } 6901 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6902 Mask = Mask.zext(VT.getSizeInBits()); 6903 return DAG.getNode(ISD::AND, DL, VT, 6904 X, DAG.getConstant(Mask, DL, VT)); 6905 } 6906 6907 // fold (aext (load x)) -> (aext (truncate (extload x))) 6908 // None of the supported targets knows how to perform load and any_ext 6909 // on vectors in one instruction. We only perform this transformation on 6910 // scalars. 6911 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 6912 ISD::isUNINDEXEDLoad(N0.getNode()) && 6913 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 6914 bool DoXform = true; 6915 SmallVector<SDNode*, 4> SetCCs; 6916 if (!N0.hasOneUse()) 6917 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 6918 if (DoXform) { 6919 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6920 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 6921 LN0->getChain(), 6922 LN0->getBasePtr(), N0.getValueType(), 6923 LN0->getMemOperand()); 6924 CombineTo(N, ExtLoad); 6925 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6926 N0.getValueType(), ExtLoad); 6927 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6928 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 6929 ISD::ANY_EXTEND); 6930 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6931 } 6932 } 6933 6934 // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 6935 // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 6936 // fold (aext ( extload x)) -> (aext (truncate (extload x))) 6937 if (N0.getOpcode() == ISD::LOAD && 6938 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6939 N0.hasOneUse()) { 6940 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6941 ISD::LoadExtType ExtType = LN0->getExtensionType(); 6942 EVT MemVT = LN0->getMemoryVT(); 6943 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) { 6944 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N), 6945 VT, LN0->getChain(), LN0->getBasePtr(), 6946 MemVT, LN0->getMemOperand()); 6947 CombineTo(N, ExtLoad); 6948 CombineTo(N0.getNode(), 6949 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6950 N0.getValueType(), ExtLoad), 6951 ExtLoad.getValue(1)); 6952 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6953 } 6954 } 6955 6956 if (N0.getOpcode() == ISD::SETCC) { 6957 // For vectors: 6958 // aext(setcc) -> vsetcc 6959 // aext(setcc) -> truncate(vsetcc) 6960 // aext(setcc) -> aext(vsetcc) 6961 // Only do this before legalize for now. 6962 if (VT.isVector() && !LegalOperations) { 6963 EVT N0VT = N0.getOperand(0).getValueType(); 6964 // We know that the # elements of the results is the same as the 6965 // # elements of the compare (and the # elements of the compare result 6966 // for that matter). Check to see that they are the same size. If so, 6967 // we know that the element size of the sext'd result matches the 6968 // element size of the compare operands. 6969 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 6970 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 6971 N0.getOperand(1), 6972 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6973 // If the desired elements are smaller or larger than the source 6974 // elements we can use a matching integer vector type and then 6975 // truncate/any extend 6976 else { 6977 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 6978 SDValue VsetCC = 6979 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0), 6980 N0.getOperand(1), 6981 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6982 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT); 6983 } 6984 } 6985 6986 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 6987 SDLoc DL(N); 6988 if (SDValue SCC = SimplifySelectCC( 6989 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 6990 DAG.getConstant(0, DL, VT), 6991 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 6992 return SCC; 6993 } 6994 6995 return SDValue(); 6996 } 6997 6998 /// See if the specified operand can be simplified with the knowledge that only 6999 /// the bits specified by Mask are used. If so, return the simpler operand, 7000 /// otherwise return a null SDValue. 7001 SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 7002 switch (V.getOpcode()) { 7003 default: break; 7004 case ISD::Constant: { 7005 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 7006 assert(CV && "Const value should be ConstSDNode."); 7007 const APInt &CVal = CV->getAPIntValue(); 7008 APInt NewVal = CVal & Mask; 7009 if (NewVal != CVal) 7010 return DAG.getConstant(NewVal, SDLoc(V), V.getValueType()); 7011 break; 7012 } 7013 case ISD::OR: 7014 case ISD::XOR: 7015 // If the LHS or RHS don't contribute bits to the or, drop them. 7016 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 7017 return V.getOperand(1); 7018 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 7019 return V.getOperand(0); 7020 break; 7021 case ISD::SRL: 7022 // Only look at single-use SRLs. 7023 if (!V.getNode()->hasOneUse()) 7024 break; 7025 if (ConstantSDNode *RHSC = getAsNonOpaqueConstant(V.getOperand(1))) { 7026 // See if we can recursively simplify the LHS. 7027 unsigned Amt = RHSC->getZExtValue(); 7028 7029 // Watch out for shift count overflow though. 7030 if (Amt >= Mask.getBitWidth()) break; 7031 APInt NewMask = Mask << Amt; 7032 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 7033 return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(), 7034 SimplifyLHS, V.getOperand(1)); 7035 } 7036 } 7037 return SDValue(); 7038 } 7039 7040 /// If the result of a wider load is shifted to right of N bits and then 7041 /// truncated to a narrower type and where N is a multiple of number of bits of 7042 /// the narrower type, transform it to a narrower load from address + N / num of 7043 /// bits of new type. If the result is to be extended, also fold the extension 7044 /// to form a extending load. 7045 SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 7046 unsigned Opc = N->getOpcode(); 7047 7048 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 7049 SDValue N0 = N->getOperand(0); 7050 EVT VT = N->getValueType(0); 7051 EVT ExtVT = VT; 7052 7053 // This transformation isn't valid for vector loads. 7054 if (VT.isVector()) 7055 return SDValue(); 7056 7057 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 7058 // extended to VT. 7059 if (Opc == ISD::SIGN_EXTEND_INREG) { 7060 ExtType = ISD::SEXTLOAD; 7061 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 7062 } else if (Opc == ISD::SRL) { 7063 // Another special-case: SRL is basically zero-extending a narrower value. 7064 ExtType = ISD::ZEXTLOAD; 7065 N0 = SDValue(N, 0); 7066 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7067 if (!N01) return SDValue(); 7068 ExtVT = EVT::getIntegerVT(*DAG.getContext(), 7069 VT.getSizeInBits() - N01->getZExtValue()); 7070 } 7071 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, VT, ExtVT)) 7072 return SDValue(); 7073 7074 unsigned EVTBits = ExtVT.getSizeInBits(); 7075 7076 // Do not generate loads of non-round integer types since these can 7077 // be expensive (and would be wrong if the type is not byte sized). 7078 if (!ExtVT.isRound()) 7079 return SDValue(); 7080 7081 unsigned ShAmt = 0; 7082 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 7083 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7084 ShAmt = N01->getZExtValue(); 7085 // Is the shift amount a multiple of size of VT? 7086 if ((ShAmt & (EVTBits-1)) == 0) { 7087 N0 = N0.getOperand(0); 7088 // Is the load width a multiple of size of VT? 7089 if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0) 7090 return SDValue(); 7091 } 7092 7093 // At this point, we must have a load or else we can't do the transform. 7094 if (!isa<LoadSDNode>(N0)) return SDValue(); 7095 7096 // Because a SRL must be assumed to *need* to zero-extend the high bits 7097 // (as opposed to anyext the high bits), we can't combine the zextload 7098 // lowering of SRL and an sextload. 7099 if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD) 7100 return SDValue(); 7101 7102 // If the shift amount is larger than the input type then we're not 7103 // accessing any of the loaded bytes. If the load was a zextload/extload 7104 // then the result of the shift+trunc is zero/undef (handled elsewhere). 7105 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 7106 return SDValue(); 7107 } 7108 } 7109 7110 // If the load is shifted left (and the result isn't shifted back right), 7111 // we can fold the truncate through the shift. 7112 unsigned ShLeftAmt = 0; 7113 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 7114 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 7115 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7116 ShLeftAmt = N01->getZExtValue(); 7117 N0 = N0.getOperand(0); 7118 } 7119 } 7120 7121 // If we haven't found a load, we can't narrow it. Don't transform one with 7122 // multiple uses, this would require adding a new load. 7123 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse()) 7124 return SDValue(); 7125 7126 // Don't change the width of a volatile load. 7127 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7128 if (LN0->isVolatile()) 7129 return SDValue(); 7130 7131 // Verify that we are actually reducing a load width here. 7132 if (LN0->getMemoryVT().getSizeInBits() < EVTBits) 7133 return SDValue(); 7134 7135 // For the transform to be legal, the load must produce only two values 7136 // (the value loaded and the chain). Don't transform a pre-increment 7137 // load, for example, which produces an extra value. Otherwise the 7138 // transformation is not equivalent, and the downstream logic to replace 7139 // uses gets things wrong. 7140 if (LN0->getNumValues() > 2) 7141 return SDValue(); 7142 7143 // If the load that we're shrinking is an extload and we're not just 7144 // discarding the extension we can't simply shrink the load. Bail. 7145 // TODO: It would be possible to merge the extensions in some cases. 7146 if (LN0->getExtensionType() != ISD::NON_EXTLOAD && 7147 LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt) 7148 return SDValue(); 7149 7150 if (!TLI.shouldReduceLoadWidth(LN0, ExtType, ExtVT)) 7151 return SDValue(); 7152 7153 EVT PtrType = N0.getOperand(1).getValueType(); 7154 7155 if (PtrType == MVT::Untyped || PtrType.isExtended()) 7156 // It's not possible to generate a constant of extended or untyped type. 7157 return SDValue(); 7158 7159 // For big endian targets, we need to adjust the offset to the pointer to 7160 // load the correct bytes. 7161 if (DAG.getDataLayout().isBigEndian()) { 7162 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 7163 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 7164 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 7165 } 7166 7167 uint64_t PtrOff = ShAmt / 8; 7168 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 7169 SDLoc DL(LN0); 7170 // The original load itself didn't wrap, so an offset within it doesn't. 7171 SDNodeFlags Flags; 7172 Flags.setNoUnsignedWrap(true); 7173 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, 7174 PtrType, LN0->getBasePtr(), 7175 DAG.getConstant(PtrOff, DL, PtrType), 7176 &Flags); 7177 AddToWorklist(NewPtr.getNode()); 7178 7179 SDValue Load; 7180 if (ExtType == ISD::NON_EXTLOAD) 7181 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr, 7182 LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign, 7183 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 7184 else 7185 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(), NewPtr, 7186 LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT, 7187 NewAlign, LN0->getMemOperand()->getFlags(), 7188 LN0->getAAInfo()); 7189 7190 // Replace the old load's chain with the new load's chain. 7191 WorklistRemover DeadNodes(*this); 7192 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 7193 7194 // Shift the result left, if we've swallowed a left shift. 7195 SDValue Result = Load; 7196 if (ShLeftAmt != 0) { 7197 EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 7198 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 7199 ShImmTy = VT; 7200 // If the shift amount is as large as the result size (but, presumably, 7201 // no larger than the source) then the useful bits of the result are 7202 // zero; we can't simply return the shortened shift, because the result 7203 // of that operation is undefined. 7204 SDLoc DL(N0); 7205 if (ShLeftAmt >= VT.getSizeInBits()) 7206 Result = DAG.getConstant(0, DL, VT); 7207 else 7208 Result = DAG.getNode(ISD::SHL, DL, VT, 7209 Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy)); 7210 } 7211 7212 // Return the new loaded value. 7213 return Result; 7214 } 7215 7216 SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 7217 SDValue N0 = N->getOperand(0); 7218 SDValue N1 = N->getOperand(1); 7219 EVT VT = N->getValueType(0); 7220 EVT EVT = cast<VTSDNode>(N1)->getVT(); 7221 unsigned VTBits = VT.getScalarSizeInBits(); 7222 unsigned EVTBits = EVT.getScalarSizeInBits(); 7223 7224 if (N0.isUndef()) 7225 return DAG.getUNDEF(VT); 7226 7227 // fold (sext_in_reg c1) -> c1 7228 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 7229 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1); 7230 7231 // If the input is already sign extended, just drop the extension. 7232 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 7233 return N0; 7234 7235 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 7236 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 7237 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) 7238 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7239 N0.getOperand(0), N1); 7240 7241 // fold (sext_in_reg (sext x)) -> (sext x) 7242 // fold (sext_in_reg (aext x)) -> (sext x) 7243 // if x is small enough. 7244 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 7245 SDValue N00 = N0.getOperand(0); 7246 if (N00.getScalarValueSizeInBits() <= EVTBits && 7247 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7248 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7249 } 7250 7251 // fold (sext_in_reg (zext x)) -> (sext x) 7252 // iff we are extending the source sign bit. 7253 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 7254 SDValue N00 = N0.getOperand(0); 7255 if (N00.getScalarValueSizeInBits() == EVTBits && 7256 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7257 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7258 } 7259 7260 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 7261 if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) 7262 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType()); 7263 7264 // fold operands of sext_in_reg based on knowledge that the top bits are not 7265 // demanded. 7266 if (SimplifyDemandedBits(SDValue(N, 0))) 7267 return SDValue(N, 0); 7268 7269 // fold (sext_in_reg (load x)) -> (smaller sextload x) 7270 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 7271 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 7272 return NarrowLoad; 7273 7274 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 7275 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 7276 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 7277 if (N0.getOpcode() == ISD::SRL) { 7278 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 7279 if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 7280 // We can turn this into an SRA iff the input to the SRL is already sign 7281 // extended enough. 7282 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 7283 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 7284 return DAG.getNode(ISD::SRA, SDLoc(N), VT, 7285 N0.getOperand(0), N0.getOperand(1)); 7286 } 7287 } 7288 7289 // fold (sext_inreg (extload x)) -> (sextload x) 7290 if (ISD::isEXTLoad(N0.getNode()) && 7291 ISD::isUNINDEXEDLoad(N0.getNode()) && 7292 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7293 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7294 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7295 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7296 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7297 LN0->getChain(), 7298 LN0->getBasePtr(), EVT, 7299 LN0->getMemOperand()); 7300 CombineTo(N, ExtLoad); 7301 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7302 AddToWorklist(ExtLoad.getNode()); 7303 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7304 } 7305 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 7306 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7307 N0.hasOneUse() && 7308 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7309 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7310 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7311 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7312 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7313 LN0->getChain(), 7314 LN0->getBasePtr(), EVT, 7315 LN0->getMemOperand()); 7316 CombineTo(N, ExtLoad); 7317 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7318 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7319 } 7320 7321 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 7322 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 7323 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 7324 N0.getOperand(1), false)) 7325 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7326 BSwap, N1); 7327 } 7328 7329 return SDValue(); 7330 } 7331 7332 SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) { 7333 SDValue N0 = N->getOperand(0); 7334 EVT VT = N->getValueType(0); 7335 7336 if (N0.isUndef()) 7337 return DAG.getUNDEF(VT); 7338 7339 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7340 LegalOperations)) 7341 return SDValue(Res, 0); 7342 7343 return SDValue(); 7344 } 7345 7346 SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) { 7347 SDValue N0 = N->getOperand(0); 7348 EVT VT = N->getValueType(0); 7349 7350 if (N0.isUndef()) 7351 return DAG.getUNDEF(VT); 7352 7353 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7354 LegalOperations)) 7355 return SDValue(Res, 0); 7356 7357 return SDValue(); 7358 } 7359 7360 SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 7361 SDValue N0 = N->getOperand(0); 7362 EVT VT = N->getValueType(0); 7363 bool isLE = DAG.getDataLayout().isLittleEndian(); 7364 7365 // noop truncate 7366 if (N0.getValueType() == N->getValueType(0)) 7367 return N0; 7368 // fold (truncate c1) -> c1 7369 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 7370 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0); 7371 // fold (truncate (truncate x)) -> (truncate x) 7372 if (N0.getOpcode() == ISD::TRUNCATE) 7373 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 7374 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 7375 if (N0.getOpcode() == ISD::ZERO_EXTEND || 7376 N0.getOpcode() == ISD::SIGN_EXTEND || 7377 N0.getOpcode() == ISD::ANY_EXTEND) { 7378 // if the source is smaller than the dest, we still need an extend. 7379 if (N0.getOperand(0).getValueType().bitsLT(VT)) 7380 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 7381 // if the source is larger than the dest, than we just need the truncate. 7382 if (N0.getOperand(0).getValueType().bitsGT(VT)) 7383 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 7384 // if the source and dest are the same type, we can drop both the extend 7385 // and the truncate. 7386 return N0.getOperand(0); 7387 } 7388 7389 // If this is anyext(trunc), don't fold it, allow ourselves to be folded. 7390 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND)) 7391 return SDValue(); 7392 7393 // Fold extract-and-trunc into a narrow extract. For example: 7394 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 7395 // i32 y = TRUNCATE(i64 x) 7396 // -- becomes -- 7397 // v16i8 b = BITCAST (v2i64 val) 7398 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 7399 // 7400 // Note: We only run this optimization after type legalization (which often 7401 // creates this pattern) and before operation legalization after which 7402 // we need to be more careful about the vector instructions that we generate. 7403 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7404 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) { 7405 7406 EVT VecTy = N0.getOperand(0).getValueType(); 7407 EVT ExTy = N0.getValueType(); 7408 EVT TrTy = N->getValueType(0); 7409 7410 unsigned NumElem = VecTy.getVectorNumElements(); 7411 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 7412 7413 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 7414 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 7415 7416 SDValue EltNo = N0->getOperand(1); 7417 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 7418 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 7419 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 7420 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 7421 7422 SDLoc DL(N); 7423 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy, 7424 DAG.getBitcast(NVT, N0.getOperand(0)), 7425 DAG.getConstant(Index, DL, IndexTy)); 7426 } 7427 } 7428 7429 // trunc (select c, a, b) -> select c, (trunc a), (trunc b) 7430 if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) { 7431 EVT SrcVT = N0.getValueType(); 7432 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) && 7433 TLI.isTruncateFree(SrcVT, VT)) { 7434 SDLoc SL(N0); 7435 SDValue Cond = N0.getOperand(0); 7436 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1)); 7437 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2)); 7438 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1); 7439 } 7440 } 7441 7442 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits() 7443 if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 7444 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) && 7445 TLI.isTypeDesirableForOp(ISD::SHL, VT)) { 7446 if (const ConstantSDNode *CAmt = isConstOrConstSplat(N0.getOperand(1))) { 7447 uint64_t Amt = CAmt->getZExtValue(); 7448 unsigned Size = VT.getScalarSizeInBits(); 7449 7450 if (Amt < Size) { 7451 SDLoc SL(N); 7452 EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 7453 7454 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0)); 7455 return DAG.getNode(ISD::SHL, SL, VT, Trunc, 7456 DAG.getConstant(Amt, SL, AmtVT)); 7457 } 7458 } 7459 } 7460 7461 // Fold a series of buildvector, bitcast, and truncate if possible. 7462 // For example fold 7463 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to 7464 // (2xi32 (buildvector x, y)). 7465 if (Level == AfterLegalizeVectorOps && VT.isVector() && 7466 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 7467 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 7468 N0.getOperand(0).hasOneUse()) { 7469 7470 SDValue BuildVect = N0.getOperand(0); 7471 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType(); 7472 EVT TruncVecEltTy = VT.getVectorElementType(); 7473 7474 // Check that the element types match. 7475 if (BuildVectEltTy == TruncVecEltTy) { 7476 // Now we only need to compute the offset of the truncated elements. 7477 unsigned BuildVecNumElts = BuildVect.getNumOperands(); 7478 unsigned TruncVecNumElts = VT.getVectorNumElements(); 7479 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts; 7480 7481 assert((BuildVecNumElts % TruncVecNumElts) == 0 && 7482 "Invalid number of elements"); 7483 7484 SmallVector<SDValue, 8> Opnds; 7485 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset) 7486 Opnds.push_back(BuildVect.getOperand(i)); 7487 7488 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 7489 } 7490 } 7491 7492 // See if we can simplify the input to this truncate through knowledge that 7493 // only the low bits are being used. 7494 // For example "trunc (or (shl x, 8), y)" // -> trunc y 7495 // Currently we only perform this optimization on scalars because vectors 7496 // may have different active low bits. 7497 if (!VT.isVector()) { 7498 if (SDValue Shorter = 7499 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 7500 VT.getSizeInBits()))) 7501 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter); 7502 } 7503 // fold (truncate (load x)) -> (smaller load x) 7504 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 7505 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 7506 if (SDValue Reduced = ReduceLoadWidth(N)) 7507 return Reduced; 7508 7509 // Handle the case where the load remains an extending load even 7510 // after truncation. 7511 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) { 7512 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7513 if (!LN0->isVolatile() && 7514 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) { 7515 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0), 7516 VT, LN0->getChain(), LN0->getBasePtr(), 7517 LN0->getMemoryVT(), 7518 LN0->getMemOperand()); 7519 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1)); 7520 return NewLoad; 7521 } 7522 } 7523 } 7524 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), 7525 // where ... are all 'undef'. 7526 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { 7527 SmallVector<EVT, 8> VTs; 7528 SDValue V; 7529 unsigned Idx = 0; 7530 unsigned NumDefs = 0; 7531 7532 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 7533 SDValue X = N0.getOperand(i); 7534 if (!X.isUndef()) { 7535 V = X; 7536 Idx = i; 7537 NumDefs++; 7538 } 7539 // Stop if more than one members are non-undef. 7540 if (NumDefs > 1) 7541 break; 7542 VTs.push_back(EVT::getVectorVT(*DAG.getContext(), 7543 VT.getVectorElementType(), 7544 X.getValueType().getVectorNumElements())); 7545 } 7546 7547 if (NumDefs == 0) 7548 return DAG.getUNDEF(VT); 7549 7550 if (NumDefs == 1) { 7551 assert(V.getNode() && "The single defined operand is empty!"); 7552 SmallVector<SDValue, 8> Opnds; 7553 for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 7554 if (i != Idx) { 7555 Opnds.push_back(DAG.getUNDEF(VTs[i])); 7556 continue; 7557 } 7558 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V); 7559 AddToWorklist(NV.getNode()); 7560 Opnds.push_back(NV); 7561 } 7562 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds); 7563 } 7564 } 7565 7566 // Fold truncate of a bitcast of a vector to an extract of the low vector 7567 // element. 7568 // 7569 // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, 0 7570 if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) { 7571 SDValue VecSrc = N0.getOperand(0); 7572 EVT SrcVT = VecSrc.getValueType(); 7573 if (SrcVT.isVector() && SrcVT.getScalarType() == VT && 7574 (!LegalOperations || 7575 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, SrcVT))) { 7576 SDLoc SL(N); 7577 7578 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); 7579 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT, 7580 VecSrc, DAG.getConstant(0, SL, IdxVT)); 7581 } 7582 } 7583 7584 // Simplify the operands using demanded-bits information. 7585 if (!VT.isVector() && 7586 SimplifyDemandedBits(SDValue(N, 0))) 7587 return SDValue(N, 0); 7588 7589 return SDValue(); 7590 } 7591 7592 static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 7593 SDValue Elt = N->getOperand(i); 7594 if (Elt.getOpcode() != ISD::MERGE_VALUES) 7595 return Elt.getNode(); 7596 return Elt.getOperand(Elt.getResNo()).getNode(); 7597 } 7598 7599 /// build_pair (load, load) -> load 7600 /// if load locations are consecutive. 7601 SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 7602 assert(N->getOpcode() == ISD::BUILD_PAIR); 7603 7604 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 7605 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 7606 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 7607 LD1->getAddressSpace() != LD2->getAddressSpace()) 7608 return SDValue(); 7609 EVT LD1VT = LD1->getValueType(0); 7610 unsigned LD1Bytes = LD1VT.getSizeInBits() / 8; 7611 if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() && 7612 DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) { 7613 unsigned Align = LD1->getAlignment(); 7614 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 7615 VT.getTypeForEVT(*DAG.getContext())); 7616 7617 if (NewAlign <= Align && 7618 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 7619 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(), 7620 LD1->getPointerInfo(), Align); 7621 } 7622 7623 return SDValue(); 7624 } 7625 7626 static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) { 7627 // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi 7628 // and Lo parts; on big-endian machines it doesn't. 7629 return DAG.getDataLayout().isBigEndian() ? 1 : 0; 7630 } 7631 7632 static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, 7633 const TargetLowering &TLI) { 7634 // If this is not a bitcast to an FP type or if the target doesn't have 7635 // IEEE754-compliant FP logic, we're done. 7636 EVT VT = N->getValueType(0); 7637 if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT)) 7638 return SDValue(); 7639 7640 // TODO: Use splat values for the constant-checking below and remove this 7641 // restriction. 7642 SDValue N0 = N->getOperand(0); 7643 EVT SourceVT = N0.getValueType(); 7644 if (SourceVT.isVector()) 7645 return SDValue(); 7646 7647 unsigned FPOpcode; 7648 APInt SignMask; 7649 switch (N0.getOpcode()) { 7650 case ISD::AND: 7651 FPOpcode = ISD::FABS; 7652 SignMask = ~APInt::getSignBit(SourceVT.getSizeInBits()); 7653 break; 7654 case ISD::XOR: 7655 FPOpcode = ISD::FNEG; 7656 SignMask = APInt::getSignBit(SourceVT.getSizeInBits()); 7657 break; 7658 // TODO: ISD::OR --> ISD::FNABS? 7659 default: 7660 return SDValue(); 7661 } 7662 7663 // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X 7664 // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X 7665 SDValue LogicOp0 = N0.getOperand(0); 7666 ConstantSDNode *LogicOp1 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7667 if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask && 7668 LogicOp0.getOpcode() == ISD::BITCAST && 7669 LogicOp0->getOperand(0).getValueType() == VT) 7670 return DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0->getOperand(0)); 7671 7672 return SDValue(); 7673 } 7674 7675 SDValue DAGCombiner::visitBITCAST(SDNode *N) { 7676 SDValue N0 = N->getOperand(0); 7677 EVT VT = N->getValueType(0); 7678 7679 // If the input is a BUILD_VECTOR with all constant elements, fold this now. 7680 // Only do this before legalize, since afterward the target may be depending 7681 // on the bitconvert. 7682 // First check to see if this is all constant. 7683 if (!LegalTypes && 7684 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 7685 VT.isVector()) { 7686 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant(); 7687 7688 EVT DestEltVT = N->getValueType(0).getVectorElementType(); 7689 assert(!DestEltVT.isVector() && 7690 "Element type of vector ValueType must not be vector!"); 7691 if (isSimple) 7692 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 7693 } 7694 7695 // If the input is a constant, let getNode fold it. 7696 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 7697 // If we can't allow illegal operations, we need to check that this is just 7698 // a fp -> int or int -> conversion and that the resulting operation will 7699 // be legal. 7700 if (!LegalOperations || 7701 (isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() && 7702 TLI.isOperationLegal(ISD::ConstantFP, VT)) || 7703 (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() && 7704 TLI.isOperationLegal(ISD::Constant, VT))) 7705 return DAG.getBitcast(VT, N0); 7706 } 7707 7708 // (conv (conv x, t1), t2) -> (conv x, t2) 7709 if (N0.getOpcode() == ISD::BITCAST) 7710 return DAG.getBitcast(VT, N0.getOperand(0)); 7711 7712 // fold (conv (load x)) -> (load (conv*)x) 7713 // If the resultant load doesn't need a higher alignment than the original! 7714 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 7715 // Do not change the width of a volatile load. 7716 !cast<LoadSDNode>(N0)->isVolatile() && 7717 // Do not remove the cast if the types differ in endian layout. 7718 TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) == 7719 TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) && 7720 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && 7721 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { 7722 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7723 unsigned OrigAlign = LN0->getAlignment(); 7724 7725 bool Fast = false; 7726 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 7727 LN0->getAddressSpace(), OrigAlign, &Fast) && 7728 Fast) { 7729 SDValue Load = 7730 DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), 7731 LN0->getPointerInfo(), OrigAlign, 7732 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 7733 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 7734 return Load; 7735 } 7736 } 7737 7738 if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI)) 7739 return V; 7740 7741 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 7742 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 7743 // 7744 // For ppc_fp128: 7745 // fold (bitcast (fneg x)) -> 7746 // flipbit = signbit 7747 // (xor (bitcast x) (build_pair flipbit, flipbit)) 7748 // 7749 // fold (bitcast (fabs x)) -> 7750 // flipbit = (and (extract_element (bitcast x), 0), signbit) 7751 // (xor (bitcast x) (build_pair flipbit, flipbit)) 7752 // This often reduces constant pool loads. 7753 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) || 7754 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) && 7755 N0.getNode()->hasOneUse() && VT.isInteger() && 7756 !VT.isVector() && !N0.getValueType().isVector()) { 7757 SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0)); 7758 AddToWorklist(NewConv.getNode()); 7759 7760 SDLoc DL(N); 7761 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 7762 assert(VT.getSizeInBits() == 128); 7763 SDValue SignBit = DAG.getConstant( 7764 APInt::getSignBit(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64); 7765 SDValue FlipBit; 7766 if (N0.getOpcode() == ISD::FNEG) { 7767 FlipBit = SignBit; 7768 AddToWorklist(FlipBit.getNode()); 7769 } else { 7770 assert(N0.getOpcode() == ISD::FABS); 7771 SDValue Hi = 7772 DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv, 7773 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 7774 SDLoc(NewConv))); 7775 AddToWorklist(Hi.getNode()); 7776 FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit); 7777 AddToWorklist(FlipBit.getNode()); 7778 } 7779 SDValue FlipBits = 7780 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 7781 AddToWorklist(FlipBits.getNode()); 7782 return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits); 7783 } 7784 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 7785 if (N0.getOpcode() == ISD::FNEG) 7786 return DAG.getNode(ISD::XOR, DL, VT, 7787 NewConv, DAG.getConstant(SignBit, DL, VT)); 7788 assert(N0.getOpcode() == ISD::FABS); 7789 return DAG.getNode(ISD::AND, DL, VT, 7790 NewConv, DAG.getConstant(~SignBit, DL, VT)); 7791 } 7792 7793 // fold (bitconvert (fcopysign cst, x)) -> 7794 // (or (and (bitconvert x), sign), (and cst, (not sign))) 7795 // Note that we don't handle (copysign x, cst) because this can always be 7796 // folded to an fneg or fabs. 7797 // 7798 // For ppc_fp128: 7799 // fold (bitcast (fcopysign cst, x)) -> 7800 // flipbit = (and (extract_element 7801 // (xor (bitcast cst), (bitcast x)), 0), 7802 // signbit) 7803 // (xor (bitcast cst) (build_pair flipbit, flipbit)) 7804 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 7805 isa<ConstantFPSDNode>(N0.getOperand(0)) && 7806 VT.isInteger() && !VT.isVector()) { 7807 unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits(); 7808 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 7809 if (isTypeLegal(IntXVT)) { 7810 SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1)); 7811 AddToWorklist(X.getNode()); 7812 7813 // If X has a different width than the result/lhs, sext it or truncate it. 7814 unsigned VTWidth = VT.getSizeInBits(); 7815 if (OrigXWidth < VTWidth) { 7816 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X); 7817 AddToWorklist(X.getNode()); 7818 } else if (OrigXWidth > VTWidth) { 7819 // To get the sign bit in the right place, we have to shift it right 7820 // before truncating. 7821 SDLoc DL(X); 7822 X = DAG.getNode(ISD::SRL, DL, 7823 X.getValueType(), X, 7824 DAG.getConstant(OrigXWidth-VTWidth, DL, 7825 X.getValueType())); 7826 AddToWorklist(X.getNode()); 7827 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 7828 AddToWorklist(X.getNode()); 7829 } 7830 7831 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 7832 APInt SignBit = APInt::getSignBit(VT.getSizeInBits() / 2); 7833 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 7834 AddToWorklist(Cst.getNode()); 7835 SDValue X = DAG.getBitcast(VT, N0.getOperand(1)); 7836 AddToWorklist(X.getNode()); 7837 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X); 7838 AddToWorklist(XorResult.getNode()); 7839 SDValue XorResult64 = DAG.getNode( 7840 ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult, 7841 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 7842 SDLoc(XorResult))); 7843 AddToWorklist(XorResult64.getNode()); 7844 SDValue FlipBit = 7845 DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64, 7846 DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64)); 7847 AddToWorklist(FlipBit.getNode()); 7848 SDValue FlipBits = 7849 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 7850 AddToWorklist(FlipBits.getNode()); 7851 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits); 7852 } 7853 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 7854 X = DAG.getNode(ISD::AND, SDLoc(X), VT, 7855 X, DAG.getConstant(SignBit, SDLoc(X), VT)); 7856 AddToWorklist(X.getNode()); 7857 7858 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 7859 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT, 7860 Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT)); 7861 AddToWorklist(Cst.getNode()); 7862 7863 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst); 7864 } 7865 } 7866 7867 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 7868 if (N0.getOpcode() == ISD::BUILD_PAIR) 7869 if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT)) 7870 return CombineLD; 7871 7872 // Remove double bitcasts from shuffles - this is often a legacy of 7873 // XformToShuffleWithZero being used to combine bitmaskings (of 7874 // float vectors bitcast to integer vectors) into shuffles. 7875 // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1) 7876 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() && 7877 N0->getOpcode() == ISD::VECTOR_SHUFFLE && 7878 VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() && 7879 !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) { 7880 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0); 7881 7882 // If operands are a bitcast, peek through if it casts the original VT. 7883 // If operands are a constant, just bitcast back to original VT. 7884 auto PeekThroughBitcast = [&](SDValue Op) { 7885 if (Op.getOpcode() == ISD::BITCAST && 7886 Op.getOperand(0).getValueType() == VT) 7887 return SDValue(Op.getOperand(0)); 7888 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) || 7889 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) 7890 return DAG.getBitcast(VT, Op); 7891 return SDValue(); 7892 }; 7893 7894 SDValue SV0 = PeekThroughBitcast(N0->getOperand(0)); 7895 SDValue SV1 = PeekThroughBitcast(N0->getOperand(1)); 7896 if (!(SV0 && SV1)) 7897 return SDValue(); 7898 7899 int MaskScale = 7900 VT.getVectorNumElements() / N0.getValueType().getVectorNumElements(); 7901 SmallVector<int, 8> NewMask; 7902 for (int M : SVN->getMask()) 7903 for (int i = 0; i != MaskScale; ++i) 7904 NewMask.push_back(M < 0 ? -1 : M * MaskScale + i); 7905 7906 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 7907 if (!LegalMask) { 7908 std::swap(SV0, SV1); 7909 ShuffleVectorSDNode::commuteMask(NewMask); 7910 LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 7911 } 7912 7913 if (LegalMask) 7914 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask); 7915 } 7916 7917 return SDValue(); 7918 } 7919 7920 SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 7921 EVT VT = N->getValueType(0); 7922 return CombineConsecutiveLoads(N, VT); 7923 } 7924 7925 /// We know that BV is a build_vector node with Constant, ConstantFP or Undef 7926 /// operands. DstEltVT indicates the destination element value type. 7927 SDValue DAGCombiner:: 7928 ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 7929 EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 7930 7931 // If this is already the right type, we're done. 7932 if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 7933 7934 unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 7935 unsigned DstBitSize = DstEltVT.getSizeInBits(); 7936 7937 // If this is a conversion of N elements of one type to N elements of another 7938 // type, convert each element. This handles FP<->INT cases. 7939 if (SrcBitSize == DstBitSize) { 7940 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 7941 BV->getValueType(0).getVectorNumElements()); 7942 7943 // Due to the FP element handling below calling this routine recursively, 7944 // we can end up with a scalar-to-vector node here. 7945 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 7946 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, 7947 DAG.getBitcast(DstEltVT, BV->getOperand(0))); 7948 7949 SmallVector<SDValue, 8> Ops; 7950 for (SDValue Op : BV->op_values()) { 7951 // If the vector element type is not legal, the BUILD_VECTOR operands 7952 // are promoted and implicitly truncated. Make that explicit here. 7953 if (Op.getValueType() != SrcEltVT) 7954 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op); 7955 Ops.push_back(DAG.getBitcast(DstEltVT, Op)); 7956 AddToWorklist(Ops.back().getNode()); 7957 } 7958 return DAG.getBuildVector(VT, SDLoc(BV), Ops); 7959 } 7960 7961 // Otherwise, we're growing or shrinking the elements. To avoid having to 7962 // handle annoying details of growing/shrinking FP values, we convert them to 7963 // int first. 7964 if (SrcEltVT.isFloatingPoint()) { 7965 // Convert the input float vector to a int vector where the elements are the 7966 // same sizes. 7967 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 7968 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 7969 SrcEltVT = IntVT; 7970 } 7971 7972 // Now we know the input is an integer vector. If the output is a FP type, 7973 // convert to integer first, then to FP of the right size. 7974 if (DstEltVT.isFloatingPoint()) { 7975 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 7976 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 7977 7978 // Next, convert to FP elements of the same size. 7979 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 7980 } 7981 7982 SDLoc DL(BV); 7983 7984 // Okay, we know the src/dst types are both integers of differing types. 7985 // Handling growing first. 7986 assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 7987 if (SrcBitSize < DstBitSize) { 7988 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 7989 7990 SmallVector<SDValue, 8> Ops; 7991 for (unsigned i = 0, e = BV->getNumOperands(); i != e; 7992 i += NumInputsPerOutput) { 7993 bool isLE = DAG.getDataLayout().isLittleEndian(); 7994 APInt NewBits = APInt(DstBitSize, 0); 7995 bool EltIsUndef = true; 7996 for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 7997 // Shift the previously computed bits over. 7998 NewBits <<= SrcBitSize; 7999 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 8000 if (Op.isUndef()) continue; 8001 EltIsUndef = false; 8002 8003 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 8004 zextOrTrunc(SrcBitSize).zext(DstBitSize); 8005 } 8006 8007 if (EltIsUndef) 8008 Ops.push_back(DAG.getUNDEF(DstEltVT)); 8009 else 8010 Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT)); 8011 } 8012 8013 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 8014 return DAG.getBuildVector(VT, DL, Ops); 8015 } 8016 8017 // Finally, this must be the case where we are shrinking elements: each input 8018 // turns into multiple outputs. 8019 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 8020 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 8021 NumOutputsPerInput*BV->getNumOperands()); 8022 SmallVector<SDValue, 8> Ops; 8023 8024 for (const SDValue &Op : BV->op_values()) { 8025 if (Op.isUndef()) { 8026 Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT)); 8027 continue; 8028 } 8029 8030 APInt OpVal = cast<ConstantSDNode>(Op)-> 8031 getAPIntValue().zextOrTrunc(SrcBitSize); 8032 8033 for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 8034 APInt ThisVal = OpVal.trunc(DstBitSize); 8035 Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT)); 8036 OpVal = OpVal.lshr(DstBitSize); 8037 } 8038 8039 // For big endian targets, swap the order of the pieces of each element. 8040 if (DAG.getDataLayout().isBigEndian()) 8041 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 8042 } 8043 8044 return DAG.getBuildVector(VT, DL, Ops); 8045 } 8046 8047 /// Try to perform FMA combining on a given FADD node. 8048 SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) { 8049 SDValue N0 = N->getOperand(0); 8050 SDValue N1 = N->getOperand(1); 8051 EVT VT = N->getValueType(0); 8052 SDLoc SL(N); 8053 8054 const TargetOptions &Options = DAG.getTarget().Options; 8055 bool AllowFusion = 8056 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath); 8057 8058 // Floating-point multiply-add with intermediate rounding. 8059 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8060 8061 // Floating-point multiply-add without intermediate rounding. 8062 bool HasFMA = 8063 AllowFusion && TLI.isFMAFasterThanFMulAndFAdd(VT) && 8064 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8065 8066 // No valid opcode, do not combine. 8067 if (!HasFMAD && !HasFMA) 8068 return SDValue(); 8069 8070 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8071 ; 8072 if (AllowFusion && STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8073 return SDValue(); 8074 8075 // Always prefer FMAD to FMA for precision. 8076 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8077 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8078 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8079 8080 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 8081 // prefer to fold the multiply with fewer uses. 8082 if (Aggressive && N0.getOpcode() == ISD::FMUL && 8083 N1.getOpcode() == ISD::FMUL) { 8084 if (N0.getNode()->use_size() > N1.getNode()->use_size()) 8085 std::swap(N0, N1); 8086 } 8087 8088 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 8089 if (N0.getOpcode() == ISD::FMUL && 8090 (Aggressive || N0->hasOneUse())) { 8091 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8092 N0.getOperand(0), N0.getOperand(1), N1); 8093 } 8094 8095 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 8096 // Note: Commutes FADD operands. 8097 if (N1.getOpcode() == ISD::FMUL && 8098 (Aggressive || N1->hasOneUse())) { 8099 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8100 N1.getOperand(0), N1.getOperand(1), N0); 8101 } 8102 8103 // Look through FP_EXTEND nodes to do more combining. 8104 if (AllowFusion && LookThroughFPExt) { 8105 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 8106 if (N0.getOpcode() == ISD::FP_EXTEND) { 8107 SDValue N00 = N0.getOperand(0); 8108 if (N00.getOpcode() == ISD::FMUL) 8109 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8110 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8111 N00.getOperand(0)), 8112 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8113 N00.getOperand(1)), N1); 8114 } 8115 8116 // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x) 8117 // Note: Commutes FADD operands. 8118 if (N1.getOpcode() == ISD::FP_EXTEND) { 8119 SDValue N10 = N1.getOperand(0); 8120 if (N10.getOpcode() == ISD::FMUL) 8121 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8122 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8123 N10.getOperand(0)), 8124 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8125 N10.getOperand(1)), N0); 8126 } 8127 } 8128 8129 // More folding opportunities when target permits. 8130 if ((AllowFusion || HasFMAD) && Aggressive) { 8131 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z)) 8132 if (N0.getOpcode() == PreferredFusedOpcode && 8133 N0.getOperand(2).getOpcode() == ISD::FMUL && 8134 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) { 8135 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8136 N0.getOperand(0), N0.getOperand(1), 8137 DAG.getNode(PreferredFusedOpcode, SL, VT, 8138 N0.getOperand(2).getOperand(0), 8139 N0.getOperand(2).getOperand(1), 8140 N1)); 8141 } 8142 8143 // fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x)) 8144 if (N1->getOpcode() == PreferredFusedOpcode && 8145 N1.getOperand(2).getOpcode() == ISD::FMUL && 8146 N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) { 8147 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8148 N1.getOperand(0), N1.getOperand(1), 8149 DAG.getNode(PreferredFusedOpcode, SL, VT, 8150 N1.getOperand(2).getOperand(0), 8151 N1.getOperand(2).getOperand(1), 8152 N0)); 8153 } 8154 8155 if (AllowFusion && LookThroughFPExt) { 8156 // fold (fadd (fma x, y, (fpext (fmul u, v))), z) 8157 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 8158 auto FoldFAddFMAFPExtFMul = [&] ( 8159 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8160 return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y, 8161 DAG.getNode(PreferredFusedOpcode, SL, VT, 8162 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8163 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8164 Z)); 8165 }; 8166 if (N0.getOpcode() == PreferredFusedOpcode) { 8167 SDValue N02 = N0.getOperand(2); 8168 if (N02.getOpcode() == ISD::FP_EXTEND) { 8169 SDValue N020 = N02.getOperand(0); 8170 if (N020.getOpcode() == ISD::FMUL) 8171 return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1), 8172 N020.getOperand(0), N020.getOperand(1), 8173 N1); 8174 } 8175 } 8176 8177 // fold (fadd (fpext (fma x, y, (fmul u, v))), z) 8178 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 8179 // FIXME: This turns two single-precision and one double-precision 8180 // operation into two double-precision operations, which might not be 8181 // interesting for all targets, especially GPUs. 8182 auto FoldFAddFPExtFMAFMul = [&] ( 8183 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8184 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8185 DAG.getNode(ISD::FP_EXTEND, SL, VT, X), 8186 DAG.getNode(ISD::FP_EXTEND, SL, VT, Y), 8187 DAG.getNode(PreferredFusedOpcode, SL, VT, 8188 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8189 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8190 Z)); 8191 }; 8192 if (N0.getOpcode() == ISD::FP_EXTEND) { 8193 SDValue N00 = N0.getOperand(0); 8194 if (N00.getOpcode() == PreferredFusedOpcode) { 8195 SDValue N002 = N00.getOperand(2); 8196 if (N002.getOpcode() == ISD::FMUL) 8197 return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1), 8198 N002.getOperand(0), N002.getOperand(1), 8199 N1); 8200 } 8201 } 8202 8203 // fold (fadd x, (fma y, z, (fpext (fmul u, v))) 8204 // -> (fma y, z, (fma (fpext u), (fpext v), x)) 8205 if (N1.getOpcode() == PreferredFusedOpcode) { 8206 SDValue N12 = N1.getOperand(2); 8207 if (N12.getOpcode() == ISD::FP_EXTEND) { 8208 SDValue N120 = N12.getOperand(0); 8209 if (N120.getOpcode() == ISD::FMUL) 8210 return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1), 8211 N120.getOperand(0), N120.getOperand(1), 8212 N0); 8213 } 8214 } 8215 8216 // fold (fadd x, (fpext (fma y, z, (fmul u, v))) 8217 // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) 8218 // FIXME: This turns two single-precision and one double-precision 8219 // operation into two double-precision operations, which might not be 8220 // interesting for all targets, especially GPUs. 8221 if (N1.getOpcode() == ISD::FP_EXTEND) { 8222 SDValue N10 = N1.getOperand(0); 8223 if (N10.getOpcode() == PreferredFusedOpcode) { 8224 SDValue N102 = N10.getOperand(2); 8225 if (N102.getOpcode() == ISD::FMUL) 8226 return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1), 8227 N102.getOperand(0), N102.getOperand(1), 8228 N0); 8229 } 8230 } 8231 } 8232 } 8233 8234 return SDValue(); 8235 } 8236 8237 /// Try to perform FMA combining on a given FSUB node. 8238 SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { 8239 SDValue N0 = N->getOperand(0); 8240 SDValue N1 = N->getOperand(1); 8241 EVT VT = N->getValueType(0); 8242 SDLoc SL(N); 8243 8244 const TargetOptions &Options = DAG.getTarget().Options; 8245 bool AllowFusion = 8246 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath); 8247 8248 // Floating-point multiply-add with intermediate rounding. 8249 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8250 8251 // Floating-point multiply-add without intermediate rounding. 8252 bool HasFMA = 8253 AllowFusion && TLI.isFMAFasterThanFMulAndFAdd(VT) && 8254 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8255 8256 // No valid opcode, do not combine. 8257 if (!HasFMAD && !HasFMA) 8258 return SDValue(); 8259 8260 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8261 if (AllowFusion && STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8262 return SDValue(); 8263 8264 // Always prefer FMAD to FMA for precision. 8265 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8266 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8267 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8268 8269 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 8270 if (N0.getOpcode() == ISD::FMUL && 8271 (Aggressive || N0->hasOneUse())) { 8272 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8273 N0.getOperand(0), N0.getOperand(1), 8274 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8275 } 8276 8277 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 8278 // Note: Commutes FSUB operands. 8279 if (N1.getOpcode() == ISD::FMUL && 8280 (Aggressive || N1->hasOneUse())) 8281 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8282 DAG.getNode(ISD::FNEG, SL, VT, 8283 N1.getOperand(0)), 8284 N1.getOperand(1), N0); 8285 8286 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 8287 if (N0.getOpcode() == ISD::FNEG && 8288 N0.getOperand(0).getOpcode() == ISD::FMUL && 8289 (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) { 8290 SDValue N00 = N0.getOperand(0).getOperand(0); 8291 SDValue N01 = N0.getOperand(0).getOperand(1); 8292 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8293 DAG.getNode(ISD::FNEG, SL, VT, N00), N01, 8294 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8295 } 8296 8297 // Look through FP_EXTEND nodes to do more combining. 8298 if (AllowFusion && LookThroughFPExt) { 8299 // fold (fsub (fpext (fmul x, y)), z) 8300 // -> (fma (fpext x), (fpext y), (fneg z)) 8301 if (N0.getOpcode() == ISD::FP_EXTEND) { 8302 SDValue N00 = N0.getOperand(0); 8303 if (N00.getOpcode() == ISD::FMUL) 8304 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8305 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8306 N00.getOperand(0)), 8307 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8308 N00.getOperand(1)), 8309 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8310 } 8311 8312 // fold (fsub x, (fpext (fmul y, z))) 8313 // -> (fma (fneg (fpext y)), (fpext z), x) 8314 // Note: Commutes FSUB operands. 8315 if (N1.getOpcode() == ISD::FP_EXTEND) { 8316 SDValue N10 = N1.getOperand(0); 8317 if (N10.getOpcode() == ISD::FMUL) 8318 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8319 DAG.getNode(ISD::FNEG, SL, VT, 8320 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8321 N10.getOperand(0))), 8322 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8323 N10.getOperand(1)), 8324 N0); 8325 } 8326 8327 // fold (fsub (fpext (fneg (fmul, x, y))), z) 8328 // -> (fneg (fma (fpext x), (fpext y), z)) 8329 // Note: This could be removed with appropriate canonicalization of the 8330 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 8331 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 8332 // from implementing the canonicalization in visitFSUB. 8333 if (N0.getOpcode() == ISD::FP_EXTEND) { 8334 SDValue N00 = N0.getOperand(0); 8335 if (N00.getOpcode() == ISD::FNEG) { 8336 SDValue N000 = N00.getOperand(0); 8337 if (N000.getOpcode() == ISD::FMUL) { 8338 return DAG.getNode(ISD::FNEG, SL, VT, 8339 DAG.getNode(PreferredFusedOpcode, SL, VT, 8340 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8341 N000.getOperand(0)), 8342 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8343 N000.getOperand(1)), 8344 N1)); 8345 } 8346 } 8347 } 8348 8349 // fold (fsub (fneg (fpext (fmul, x, y))), z) 8350 // -> (fneg (fma (fpext x)), (fpext y), z) 8351 // Note: This could be removed with appropriate canonicalization of the 8352 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 8353 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 8354 // from implementing the canonicalization in visitFSUB. 8355 if (N0.getOpcode() == ISD::FNEG) { 8356 SDValue N00 = N0.getOperand(0); 8357 if (N00.getOpcode() == ISD::FP_EXTEND) { 8358 SDValue N000 = N00.getOperand(0); 8359 if (N000.getOpcode() == ISD::FMUL) { 8360 return DAG.getNode(ISD::FNEG, SL, VT, 8361 DAG.getNode(PreferredFusedOpcode, SL, VT, 8362 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8363 N000.getOperand(0)), 8364 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8365 N000.getOperand(1)), 8366 N1)); 8367 } 8368 } 8369 } 8370 8371 } 8372 8373 // More folding opportunities when target permits. 8374 if ((AllowFusion || HasFMAD) && Aggressive) { 8375 // fold (fsub (fma x, y, (fmul u, v)), z) 8376 // -> (fma x, y (fma u, v, (fneg z))) 8377 if (N0.getOpcode() == PreferredFusedOpcode && 8378 N0.getOperand(2).getOpcode() == ISD::FMUL && 8379 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) { 8380 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8381 N0.getOperand(0), N0.getOperand(1), 8382 DAG.getNode(PreferredFusedOpcode, SL, VT, 8383 N0.getOperand(2).getOperand(0), 8384 N0.getOperand(2).getOperand(1), 8385 DAG.getNode(ISD::FNEG, SL, VT, 8386 N1))); 8387 } 8388 8389 // fold (fsub x, (fma y, z, (fmul u, v))) 8390 // -> (fma (fneg y), z, (fma (fneg u), v, x)) 8391 if (N1.getOpcode() == PreferredFusedOpcode && 8392 N1.getOperand(2).getOpcode() == ISD::FMUL) { 8393 SDValue N20 = N1.getOperand(2).getOperand(0); 8394 SDValue N21 = N1.getOperand(2).getOperand(1); 8395 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8396 DAG.getNode(ISD::FNEG, SL, VT, 8397 N1.getOperand(0)), 8398 N1.getOperand(1), 8399 DAG.getNode(PreferredFusedOpcode, SL, VT, 8400 DAG.getNode(ISD::FNEG, SL, VT, N20), 8401 8402 N21, N0)); 8403 } 8404 8405 if (AllowFusion && LookThroughFPExt) { 8406 // fold (fsub (fma x, y, (fpext (fmul u, v))), z) 8407 // -> (fma x, y (fma (fpext u), (fpext v), (fneg z))) 8408 if (N0.getOpcode() == PreferredFusedOpcode) { 8409 SDValue N02 = N0.getOperand(2); 8410 if (N02.getOpcode() == ISD::FP_EXTEND) { 8411 SDValue N020 = N02.getOperand(0); 8412 if (N020.getOpcode() == ISD::FMUL) 8413 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8414 N0.getOperand(0), N0.getOperand(1), 8415 DAG.getNode(PreferredFusedOpcode, SL, VT, 8416 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8417 N020.getOperand(0)), 8418 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8419 N020.getOperand(1)), 8420 DAG.getNode(ISD::FNEG, SL, VT, 8421 N1))); 8422 } 8423 } 8424 8425 // fold (fsub (fpext (fma x, y, (fmul u, v))), z) 8426 // -> (fma (fpext x), (fpext y), 8427 // (fma (fpext u), (fpext v), (fneg z))) 8428 // FIXME: This turns two single-precision and one double-precision 8429 // operation into two double-precision operations, which might not be 8430 // interesting for all targets, especially GPUs. 8431 if (N0.getOpcode() == ISD::FP_EXTEND) { 8432 SDValue N00 = N0.getOperand(0); 8433 if (N00.getOpcode() == PreferredFusedOpcode) { 8434 SDValue N002 = N00.getOperand(2); 8435 if (N002.getOpcode() == ISD::FMUL) 8436 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8437 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8438 N00.getOperand(0)), 8439 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8440 N00.getOperand(1)), 8441 DAG.getNode(PreferredFusedOpcode, SL, VT, 8442 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8443 N002.getOperand(0)), 8444 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8445 N002.getOperand(1)), 8446 DAG.getNode(ISD::FNEG, SL, VT, 8447 N1))); 8448 } 8449 } 8450 8451 // fold (fsub x, (fma y, z, (fpext (fmul u, v)))) 8452 // -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x)) 8453 if (N1.getOpcode() == PreferredFusedOpcode && 8454 N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) { 8455 SDValue N120 = N1.getOperand(2).getOperand(0); 8456 if (N120.getOpcode() == ISD::FMUL) { 8457 SDValue N1200 = N120.getOperand(0); 8458 SDValue N1201 = N120.getOperand(1); 8459 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8460 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), 8461 N1.getOperand(1), 8462 DAG.getNode(PreferredFusedOpcode, SL, VT, 8463 DAG.getNode(ISD::FNEG, SL, VT, 8464 DAG.getNode(ISD::FP_EXTEND, SL, 8465 VT, N1200)), 8466 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8467 N1201), 8468 N0)); 8469 } 8470 } 8471 8472 // fold (fsub x, (fpext (fma y, z, (fmul u, v)))) 8473 // -> (fma (fneg (fpext y)), (fpext z), 8474 // (fma (fneg (fpext u)), (fpext v), x)) 8475 // FIXME: This turns two single-precision and one double-precision 8476 // operation into two double-precision operations, which might not be 8477 // interesting for all targets, especially GPUs. 8478 if (N1.getOpcode() == ISD::FP_EXTEND && 8479 N1.getOperand(0).getOpcode() == PreferredFusedOpcode) { 8480 SDValue N100 = N1.getOperand(0).getOperand(0); 8481 SDValue N101 = N1.getOperand(0).getOperand(1); 8482 SDValue N102 = N1.getOperand(0).getOperand(2); 8483 if (N102.getOpcode() == ISD::FMUL) { 8484 SDValue N1020 = N102.getOperand(0); 8485 SDValue N1021 = N102.getOperand(1); 8486 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8487 DAG.getNode(ISD::FNEG, SL, VT, 8488 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8489 N100)), 8490 DAG.getNode(ISD::FP_EXTEND, SL, VT, N101), 8491 DAG.getNode(PreferredFusedOpcode, SL, VT, 8492 DAG.getNode(ISD::FNEG, SL, VT, 8493 DAG.getNode(ISD::FP_EXTEND, SL, 8494 VT, N1020)), 8495 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8496 N1021), 8497 N0)); 8498 } 8499 } 8500 } 8501 } 8502 8503 return SDValue(); 8504 } 8505 8506 /// Try to perform FMA combining on a given FMUL node based on the distributive 8507 /// law x * (y + 1) = x * y + x and variants thereof (commuted versions, 8508 /// subtraction instead of addition). 8509 SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) { 8510 SDValue N0 = N->getOperand(0); 8511 SDValue N1 = N->getOperand(1); 8512 EVT VT = N->getValueType(0); 8513 SDLoc SL(N); 8514 8515 assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation"); 8516 8517 const TargetOptions &Options = DAG.getTarget().Options; 8518 8519 // The transforms below are incorrect when x == 0 and y == inf, because the 8520 // intermediate multiplication produces a nan. 8521 if (!Options.NoInfsFPMath) 8522 return SDValue(); 8523 8524 // Floating-point multiply-add without intermediate rounding. 8525 bool HasFMA = 8526 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) && 8527 TLI.isFMAFasterThanFMulAndFAdd(VT) && 8528 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8529 8530 // Floating-point multiply-add with intermediate rounding. This can result 8531 // in a less precise result due to the changed rounding order. 8532 bool HasFMAD = Options.UnsafeFPMath && 8533 (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8534 8535 // No valid opcode, do not combine. 8536 if (!HasFMAD && !HasFMA) 8537 return SDValue(); 8538 8539 // Always prefer FMAD to FMA for precision. 8540 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8541 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8542 8543 // fold (fmul (fadd x, +1.0), y) -> (fma x, y, y) 8544 // fold (fmul (fadd x, -1.0), y) -> (fma x, y, (fneg y)) 8545 auto FuseFADD = [&](SDValue X, SDValue Y) { 8546 if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) { 8547 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 8548 if (XC1 && XC1->isExactlyValue(+1.0)) 8549 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 8550 if (XC1 && XC1->isExactlyValue(-1.0)) 8551 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 8552 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8553 } 8554 return SDValue(); 8555 }; 8556 8557 if (SDValue FMA = FuseFADD(N0, N1)) 8558 return FMA; 8559 if (SDValue FMA = FuseFADD(N1, N0)) 8560 return FMA; 8561 8562 // fold (fmul (fsub +1.0, x), y) -> (fma (fneg x), y, y) 8563 // fold (fmul (fsub -1.0, x), y) -> (fma (fneg x), y, (fneg y)) 8564 // fold (fmul (fsub x, +1.0), y) -> (fma x, y, (fneg y)) 8565 // fold (fmul (fsub x, -1.0), y) -> (fma x, y, y) 8566 auto FuseFSUB = [&](SDValue X, SDValue Y) { 8567 if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) { 8568 auto XC0 = isConstOrConstSplatFP(X.getOperand(0)); 8569 if (XC0 && XC0->isExactlyValue(+1.0)) 8570 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8571 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 8572 Y); 8573 if (XC0 && XC0->isExactlyValue(-1.0)) 8574 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8575 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 8576 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8577 8578 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 8579 if (XC1 && XC1->isExactlyValue(+1.0)) 8580 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 8581 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8582 if (XC1 && XC1->isExactlyValue(-1.0)) 8583 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 8584 } 8585 return SDValue(); 8586 }; 8587 8588 if (SDValue FMA = FuseFSUB(N0, N1)) 8589 return FMA; 8590 if (SDValue FMA = FuseFSUB(N1, N0)) 8591 return FMA; 8592 8593 return SDValue(); 8594 } 8595 8596 SDValue DAGCombiner::visitFADD(SDNode *N) { 8597 SDValue N0 = N->getOperand(0); 8598 SDValue N1 = N->getOperand(1); 8599 bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0); 8600 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1); 8601 EVT VT = N->getValueType(0); 8602 SDLoc DL(N); 8603 const TargetOptions &Options = DAG.getTarget().Options; 8604 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 8605 8606 // fold vector ops 8607 if (VT.isVector()) 8608 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 8609 return FoldedVOp; 8610 8611 // fold (fadd c1, c2) -> c1 + c2 8612 if (N0CFP && N1CFP) 8613 return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags); 8614 8615 // canonicalize constant to RHS 8616 if (N0CFP && !N1CFP) 8617 return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags); 8618 8619 // fold (fadd A, (fneg B)) -> (fsub A, B) 8620 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 8621 isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2) 8622 return DAG.getNode(ISD::FSUB, DL, VT, N0, 8623 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 8624 8625 // fold (fadd (fneg A), B) -> (fsub B, A) 8626 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 8627 isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2) 8628 return DAG.getNode(ISD::FSUB, DL, VT, N1, 8629 GetNegatedExpression(N0, DAG, LegalOperations), Flags); 8630 8631 // FIXME: Auto-upgrade the target/function-level option. 8632 if (Options.UnsafeFPMath || N->getFlags()->hasNoSignedZeros()) { 8633 // fold (fadd A, 0) -> A 8634 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1)) 8635 if (N1C->isZero()) 8636 return N0; 8637 } 8638 8639 // If 'unsafe math' is enabled, fold lots of things. 8640 if (Options.UnsafeFPMath) { 8641 // No FP constant should be created after legalization as Instruction 8642 // Selection pass has a hard time dealing with FP constants. 8643 bool AllowNewConst = (Level < AfterLegalizeDAG); 8644 8645 // fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 8646 if (N1CFP && N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 8647 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) 8648 return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), 8649 DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, 8650 Flags), 8651 Flags); 8652 8653 // If allowed, fold (fadd (fneg x), x) -> 0.0 8654 if (AllowNewConst && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) 8655 return DAG.getConstantFP(0.0, DL, VT); 8656 8657 // If allowed, fold (fadd x, (fneg x)) -> 0.0 8658 if (AllowNewConst && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) 8659 return DAG.getConstantFP(0.0, DL, VT); 8660 8661 // We can fold chains of FADD's of the same value into multiplications. 8662 // This transform is not safe in general because we are reducing the number 8663 // of rounding steps. 8664 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) { 8665 if (N0.getOpcode() == ISD::FMUL) { 8666 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 8667 bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1)); 8668 8669 // (fadd (fmul x, c), x) -> (fmul x, c+1) 8670 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 8671 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 8672 DAG.getConstantFP(1.0, DL, VT), Flags); 8673 return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags); 8674 } 8675 8676 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2) 8677 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 8678 N1.getOperand(0) == N1.getOperand(1) && 8679 N0.getOperand(0) == N1.getOperand(0)) { 8680 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 8681 DAG.getConstantFP(2.0, DL, VT), Flags); 8682 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags); 8683 } 8684 } 8685 8686 if (N1.getOpcode() == ISD::FMUL) { 8687 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 8688 bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1)); 8689 8690 // (fadd x, (fmul x, c)) -> (fmul x, c+1) 8691 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 8692 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 8693 DAG.getConstantFP(1.0, DL, VT), Flags); 8694 return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags); 8695 } 8696 8697 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2) 8698 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD && 8699 N0.getOperand(0) == N0.getOperand(1) && 8700 N1.getOperand(0) == N0.getOperand(0)) { 8701 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 8702 DAG.getConstantFP(2.0, DL, VT), Flags); 8703 return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags); 8704 } 8705 } 8706 8707 if (N0.getOpcode() == ISD::FADD && AllowNewConst) { 8708 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 8709 // (fadd (fadd x, x), x) -> (fmul x, 3.0) 8710 if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) && 8711 (N0.getOperand(0) == N1)) { 8712 return DAG.getNode(ISD::FMUL, DL, VT, 8713 N1, DAG.getConstantFP(3.0, DL, VT), Flags); 8714 } 8715 } 8716 8717 if (N1.getOpcode() == ISD::FADD && AllowNewConst) { 8718 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 8719 // (fadd x, (fadd x, x)) -> (fmul x, 3.0) 8720 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && 8721 N1.getOperand(0) == N0) { 8722 return DAG.getNode(ISD::FMUL, DL, VT, 8723 N0, DAG.getConstantFP(3.0, DL, VT), Flags); 8724 } 8725 } 8726 8727 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0) 8728 if (AllowNewConst && 8729 N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 8730 N0.getOperand(0) == N0.getOperand(1) && 8731 N1.getOperand(0) == N1.getOperand(1) && 8732 N0.getOperand(0) == N1.getOperand(0)) { 8733 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), 8734 DAG.getConstantFP(4.0, DL, VT), Flags); 8735 } 8736 } 8737 } // enable-unsafe-fp-math 8738 8739 // FADD -> FMA combines: 8740 if (SDValue Fused = visitFADDForFMACombine(N)) { 8741 AddToWorklist(Fused.getNode()); 8742 return Fused; 8743 } 8744 return SDValue(); 8745 } 8746 8747 SDValue DAGCombiner::visitFSUB(SDNode *N) { 8748 SDValue N0 = N->getOperand(0); 8749 SDValue N1 = N->getOperand(1); 8750 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 8751 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 8752 EVT VT = N->getValueType(0); 8753 SDLoc DL(N); 8754 const TargetOptions &Options = DAG.getTarget().Options; 8755 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 8756 8757 // fold vector ops 8758 if (VT.isVector()) 8759 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 8760 return FoldedVOp; 8761 8762 // fold (fsub c1, c2) -> c1-c2 8763 if (N0CFP && N1CFP) 8764 return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags); 8765 8766 // fold (fsub A, (fneg B)) -> (fadd A, B) 8767 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 8768 return DAG.getNode(ISD::FADD, DL, VT, N0, 8769 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 8770 8771 // FIXME: Auto-upgrade the target/function-level option. 8772 if (Options.UnsafeFPMath || N->getFlags()->hasNoSignedZeros()) { 8773 // (fsub 0, B) -> -B 8774 if (N0CFP && N0CFP->isZero()) { 8775 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 8776 return GetNegatedExpression(N1, DAG, LegalOperations); 8777 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 8778 return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags); 8779 } 8780 } 8781 8782 // If 'unsafe math' is enabled, fold lots of things. 8783 if (Options.UnsafeFPMath) { 8784 // (fsub A, 0) -> A 8785 if (N1CFP && N1CFP->isZero()) 8786 return N0; 8787 8788 // (fsub x, x) -> 0.0 8789 if (N0 == N1) 8790 return DAG.getConstantFP(0.0f, DL, VT); 8791 8792 // (fsub x, (fadd x, y)) -> (fneg y) 8793 // (fsub x, (fadd y, x)) -> (fneg y) 8794 if (N1.getOpcode() == ISD::FADD) { 8795 SDValue N10 = N1->getOperand(0); 8796 SDValue N11 = N1->getOperand(1); 8797 8798 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options)) 8799 return GetNegatedExpression(N11, DAG, LegalOperations); 8800 8801 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options)) 8802 return GetNegatedExpression(N10, DAG, LegalOperations); 8803 } 8804 } 8805 8806 // FSUB -> FMA combines: 8807 if (SDValue Fused = visitFSUBForFMACombine(N)) { 8808 AddToWorklist(Fused.getNode()); 8809 return Fused; 8810 } 8811 8812 return SDValue(); 8813 } 8814 8815 SDValue DAGCombiner::visitFMUL(SDNode *N) { 8816 SDValue N0 = N->getOperand(0); 8817 SDValue N1 = N->getOperand(1); 8818 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 8819 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 8820 EVT VT = N->getValueType(0); 8821 SDLoc DL(N); 8822 const TargetOptions &Options = DAG.getTarget().Options; 8823 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 8824 8825 // fold vector ops 8826 if (VT.isVector()) { 8827 // This just handles C1 * C2 for vectors. Other vector folds are below. 8828 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 8829 return FoldedVOp; 8830 } 8831 8832 // fold (fmul c1, c2) -> c1*c2 8833 if (N0CFP && N1CFP) 8834 return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags); 8835 8836 // canonicalize constant to RHS 8837 if (isConstantFPBuildVectorOrConstantFP(N0) && 8838 !isConstantFPBuildVectorOrConstantFP(N1)) 8839 return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags); 8840 8841 // fold (fmul A, 1.0) -> A 8842 if (N1CFP && N1CFP->isExactlyValue(1.0)) 8843 return N0; 8844 8845 if (Options.UnsafeFPMath) { 8846 // fold (fmul A, 0) -> 0 8847 if (N1CFP && N1CFP->isZero()) 8848 return N1; 8849 8850 // fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 8851 if (N0.getOpcode() == ISD::FMUL) { 8852 // Fold scalars or any vector constants (not just splats). 8853 // This fold is done in general by InstCombine, but extra fmul insts 8854 // may have been generated during lowering. 8855 SDValue N00 = N0.getOperand(0); 8856 SDValue N01 = N0.getOperand(1); 8857 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 8858 auto *BV00 = dyn_cast<BuildVectorSDNode>(N00); 8859 auto *BV01 = dyn_cast<BuildVectorSDNode>(N01); 8860 8861 // Check 1: Make sure that the first operand of the inner multiply is NOT 8862 // a constant. Otherwise, we may induce infinite looping. 8863 if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) { 8864 // Check 2: Make sure that the second operand of the inner multiply and 8865 // the second operand of the outer multiply are constants. 8866 if ((N1CFP && isConstOrConstSplatFP(N01)) || 8867 (BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) { 8868 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags); 8869 return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags); 8870 } 8871 } 8872 } 8873 8874 // fold (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c)) 8875 // Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs 8876 // during an early run of DAGCombiner can prevent folding with fmuls 8877 // inserted during lowering. 8878 if (N0.getOpcode() == ISD::FADD && 8879 (N0.getOperand(0) == N0.getOperand(1)) && 8880 N0.hasOneUse()) { 8881 const SDValue Two = DAG.getConstantFP(2.0, DL, VT); 8882 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags); 8883 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags); 8884 } 8885 } 8886 8887 // fold (fmul X, 2.0) -> (fadd X, X) 8888 if (N1CFP && N1CFP->isExactlyValue(+2.0)) 8889 return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags); 8890 8891 // fold (fmul X, -1.0) -> (fneg X) 8892 if (N1CFP && N1CFP->isExactlyValue(-1.0)) 8893 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 8894 return DAG.getNode(ISD::FNEG, DL, VT, N0); 8895 8896 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 8897 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 8898 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 8899 // Both can be negated for free, check to see if at least one is cheaper 8900 // negated. 8901 if (LHSNeg == 2 || RHSNeg == 2) 8902 return DAG.getNode(ISD::FMUL, DL, VT, 8903 GetNegatedExpression(N0, DAG, LegalOperations), 8904 GetNegatedExpression(N1, DAG, LegalOperations), 8905 Flags); 8906 } 8907 } 8908 8909 // FMUL -> FMA combines: 8910 if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) { 8911 AddToWorklist(Fused.getNode()); 8912 return Fused; 8913 } 8914 8915 return SDValue(); 8916 } 8917 8918 SDValue DAGCombiner::visitFMA(SDNode *N) { 8919 SDValue N0 = N->getOperand(0); 8920 SDValue N1 = N->getOperand(1); 8921 SDValue N2 = N->getOperand(2); 8922 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 8923 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 8924 EVT VT = N->getValueType(0); 8925 SDLoc DL(N); 8926 const TargetOptions &Options = DAG.getTarget().Options; 8927 8928 // Constant fold FMA. 8929 if (isa<ConstantFPSDNode>(N0) && 8930 isa<ConstantFPSDNode>(N1) && 8931 isa<ConstantFPSDNode>(N2)) { 8932 return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2); 8933 } 8934 8935 if (Options.UnsafeFPMath) { 8936 if (N0CFP && N0CFP->isZero()) 8937 return N2; 8938 if (N1CFP && N1CFP->isZero()) 8939 return N2; 8940 } 8941 // TODO: The FMA node should have flags that propagate to these nodes. 8942 if (N0CFP && N0CFP->isExactlyValue(1.0)) 8943 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2); 8944 if (N1CFP && N1CFP->isExactlyValue(1.0)) 8945 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2); 8946 8947 // Canonicalize (fma c, x, y) -> (fma x, c, y) 8948 if (isConstantFPBuildVectorOrConstantFP(N0) && 8949 !isConstantFPBuildVectorOrConstantFP(N1)) 8950 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2); 8951 8952 // TODO: FMA nodes should have flags that propagate to the created nodes. 8953 // For now, create a Flags object for use with all unsafe math transforms. 8954 SDNodeFlags Flags; 8955 Flags.setUnsafeAlgebra(true); 8956 8957 if (Options.UnsafeFPMath) { 8958 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) 8959 if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) && 8960 isConstantFPBuildVectorOrConstantFP(N1) && 8961 isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) { 8962 return DAG.getNode(ISD::FMUL, DL, VT, N0, 8963 DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1), 8964 &Flags), &Flags); 8965 } 8966 8967 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) 8968 if (N0.getOpcode() == ISD::FMUL && 8969 isConstantFPBuildVectorOrConstantFP(N1) && 8970 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) { 8971 return DAG.getNode(ISD::FMA, DL, VT, 8972 N0.getOperand(0), 8973 DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1), 8974 &Flags), 8975 N2); 8976 } 8977 } 8978 8979 // (fma x, 1, y) -> (fadd x, y) 8980 // (fma x, -1, y) -> (fadd (fneg x), y) 8981 if (N1CFP) { 8982 if (N1CFP->isExactlyValue(1.0)) 8983 // TODO: The FMA node should have flags that propagate to this node. 8984 return DAG.getNode(ISD::FADD, DL, VT, N0, N2); 8985 8986 if (N1CFP->isExactlyValue(-1.0) && 8987 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { 8988 SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0); 8989 AddToWorklist(RHSNeg.getNode()); 8990 // TODO: The FMA node should have flags that propagate to this node. 8991 return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg); 8992 } 8993 } 8994 8995 if (Options.UnsafeFPMath) { 8996 // (fma x, c, x) -> (fmul x, (c+1)) 8997 if (N1CFP && N0 == N2) { 8998 return DAG.getNode(ISD::FMUL, DL, VT, N0, 8999 DAG.getNode(ISD::FADD, DL, VT, N1, 9000 DAG.getConstantFP(1.0, DL, VT), &Flags), 9001 &Flags); 9002 } 9003 9004 // (fma x, c, (fneg x)) -> (fmul x, (c-1)) 9005 if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) { 9006 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9007 DAG.getNode(ISD::FADD, DL, VT, N1, 9008 DAG.getConstantFP(-1.0, DL, VT), &Flags), 9009 &Flags); 9010 } 9011 } 9012 9013 return SDValue(); 9014 } 9015 9016 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9017 // reciprocal. 9018 // E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip) 9019 // Notice that this is not always beneficial. One reason is different targets 9020 // may have different costs for FDIV and FMUL, so sometimes the cost of two 9021 // FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason 9022 // is the critical path is increased from "one FDIV" to "one FDIV + one FMUL". 9023 SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) { 9024 bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath; 9025 const SDNodeFlags *Flags = N->getFlags(); 9026 if (!UnsafeMath && !Flags->hasAllowReciprocal()) 9027 return SDValue(); 9028 9029 // Skip if current node is a reciprocal. 9030 SDValue N0 = N->getOperand(0); 9031 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9032 if (N0CFP && N0CFP->isExactlyValue(1.0)) 9033 return SDValue(); 9034 9035 // Exit early if the target does not want this transform or if there can't 9036 // possibly be enough uses of the divisor to make the transform worthwhile. 9037 SDValue N1 = N->getOperand(1); 9038 unsigned MinUses = TLI.combineRepeatedFPDivisors(); 9039 if (!MinUses || N1->use_size() < MinUses) 9040 return SDValue(); 9041 9042 // Find all FDIV users of the same divisor. 9043 // Use a set because duplicates may be present in the user list. 9044 SetVector<SDNode *> Users; 9045 for (auto *U : N1->uses()) { 9046 if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) { 9047 // This division is eligible for optimization only if global unsafe math 9048 // is enabled or if this division allows reciprocal formation. 9049 if (UnsafeMath || U->getFlags()->hasAllowReciprocal()) 9050 Users.insert(U); 9051 } 9052 } 9053 9054 // Now that we have the actual number of divisor uses, make sure it meets 9055 // the minimum threshold specified by the target. 9056 if (Users.size() < MinUses) 9057 return SDValue(); 9058 9059 EVT VT = N->getValueType(0); 9060 SDLoc DL(N); 9061 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 9062 SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags); 9063 9064 // Dividend / Divisor -> Dividend * Reciprocal 9065 for (auto *U : Users) { 9066 SDValue Dividend = U->getOperand(0); 9067 if (Dividend != FPOne) { 9068 SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend, 9069 Reciprocal, Flags); 9070 CombineTo(U, NewNode); 9071 } else if (U != Reciprocal.getNode()) { 9072 // In the absence of fast-math-flags, this user node is always the 9073 // same node as Reciprocal, but with FMF they may be different nodes. 9074 CombineTo(U, Reciprocal); 9075 } 9076 } 9077 return SDValue(N, 0); // N was replaced. 9078 } 9079 9080 SDValue DAGCombiner::visitFDIV(SDNode *N) { 9081 SDValue N0 = N->getOperand(0); 9082 SDValue N1 = N->getOperand(1); 9083 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9084 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9085 EVT VT = N->getValueType(0); 9086 SDLoc DL(N); 9087 const TargetOptions &Options = DAG.getTarget().Options; 9088 SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9089 9090 // fold vector ops 9091 if (VT.isVector()) 9092 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9093 return FoldedVOp; 9094 9095 // fold (fdiv c1, c2) -> c1/c2 9096 if (N0CFP && N1CFP) 9097 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags); 9098 9099 if (Options.UnsafeFPMath) { 9100 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 9101 if (N1CFP) { 9102 // Compute the reciprocal 1.0 / c2. 9103 const APFloat &N1APF = N1CFP->getValueAPF(); 9104 APFloat Recip(N1APF.getSemantics(), 1); // 1.0 9105 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 9106 // Only do the transform if the reciprocal is a legal fp immediate that 9107 // isn't too nasty (eg NaN, denormal, ...). 9108 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 9109 (!LegalOperations || 9110 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 9111 // backend)... we should handle this gracefully after Legalize. 9112 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 9113 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 9114 TLI.isFPImmLegal(Recip, VT))) 9115 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9116 DAG.getConstantFP(Recip, DL, VT), Flags); 9117 } 9118 9119 // If this FDIV is part of a reciprocal square root, it may be folded 9120 // into a target-specific square root estimate instruction. 9121 if (N1.getOpcode() == ISD::FSQRT) { 9122 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags)) { 9123 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9124 } 9125 } else if (N1.getOpcode() == ISD::FP_EXTEND && 9126 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9127 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9128 Flags)) { 9129 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV); 9130 AddToWorklist(RV.getNode()); 9131 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9132 } 9133 } else if (N1.getOpcode() == ISD::FP_ROUND && 9134 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9135 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9136 Flags)) { 9137 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1)); 9138 AddToWorklist(RV.getNode()); 9139 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9140 } 9141 } else if (N1.getOpcode() == ISD::FMUL) { 9142 // Look through an FMUL. Even though this won't remove the FDIV directly, 9143 // it's still worthwhile to get rid of the FSQRT if possible. 9144 SDValue SqrtOp; 9145 SDValue OtherOp; 9146 if (N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9147 SqrtOp = N1.getOperand(0); 9148 OtherOp = N1.getOperand(1); 9149 } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) { 9150 SqrtOp = N1.getOperand(1); 9151 OtherOp = N1.getOperand(0); 9152 } 9153 if (SqrtOp.getNode()) { 9154 // We found a FSQRT, so try to make this fold: 9155 // x / (y * sqrt(z)) -> x * (rsqrt(z) / y) 9156 if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) { 9157 RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags); 9158 AddToWorklist(RV.getNode()); 9159 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9160 } 9161 } 9162 } 9163 9164 // Fold into a reciprocal estimate and multiply instead of a real divide. 9165 if (SDValue RV = BuildReciprocalEstimate(N1, Flags)) { 9166 AddToWorklist(RV.getNode()); 9167 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9168 } 9169 } 9170 9171 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 9172 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 9173 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 9174 // Both can be negated for free, check to see if at least one is cheaper 9175 // negated. 9176 if (LHSNeg == 2 || RHSNeg == 2) 9177 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, 9178 GetNegatedExpression(N0, DAG, LegalOperations), 9179 GetNegatedExpression(N1, DAG, LegalOperations), 9180 Flags); 9181 } 9182 } 9183 9184 if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N)) 9185 return CombineRepeatedDivisors; 9186 9187 return SDValue(); 9188 } 9189 9190 SDValue DAGCombiner::visitFREM(SDNode *N) { 9191 SDValue N0 = N->getOperand(0); 9192 SDValue N1 = N->getOperand(1); 9193 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9194 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9195 EVT VT = N->getValueType(0); 9196 9197 // fold (frem c1, c2) -> fmod(c1,c2) 9198 if (N0CFP && N1CFP) 9199 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, 9200 &cast<BinaryWithFlagsSDNode>(N)->Flags); 9201 9202 return SDValue(); 9203 } 9204 9205 SDValue DAGCombiner::visitFSQRT(SDNode *N) { 9206 if (!DAG.getTarget().Options.UnsafeFPMath) 9207 return SDValue(); 9208 9209 SDValue N0 = N->getOperand(0); 9210 if (TLI.isFsqrtCheap(N0, DAG)) 9211 return SDValue(); 9212 9213 // TODO: FSQRT nodes should have flags that propagate to the created nodes. 9214 // For now, create a Flags object for use with all unsafe math transforms. 9215 SDNodeFlags Flags; 9216 Flags.setUnsafeAlgebra(true); 9217 return buildSqrtEstimate(N0, &Flags); 9218 } 9219 9220 /// copysign(x, fp_extend(y)) -> copysign(x, y) 9221 /// copysign(x, fp_round(y)) -> copysign(x, y) 9222 static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) { 9223 SDValue N1 = N->getOperand(1); 9224 if ((N1.getOpcode() == ISD::FP_EXTEND || 9225 N1.getOpcode() == ISD::FP_ROUND)) { 9226 // Do not optimize out type conversion of f128 type yet. 9227 // For some targets like x86_64, configuration is changed to keep one f128 9228 // value in one SSE register, but instruction selection cannot handle 9229 // FCOPYSIGN on SSE registers yet. 9230 EVT N1VT = N1->getValueType(0); 9231 EVT N1Op0VT = N1->getOperand(0)->getValueType(0); 9232 return (N1VT == N1Op0VT || N1Op0VT != MVT::f128); 9233 } 9234 return false; 9235 } 9236 9237 SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 9238 SDValue N0 = N->getOperand(0); 9239 SDValue N1 = N->getOperand(1); 9240 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9241 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9242 EVT VT = N->getValueType(0); 9243 9244 if (N0CFP && N1CFP) // Constant fold 9245 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1); 9246 9247 if (N1CFP) { 9248 const APFloat &V = N1CFP->getValueAPF(); 9249 // copysign(x, c1) -> fabs(x) iff ispos(c1) 9250 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 9251 if (!V.isNegative()) { 9252 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 9253 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9254 } else { 9255 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9256 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, 9257 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0)); 9258 } 9259 } 9260 9261 // copysign(fabs(x), y) -> copysign(x, y) 9262 // copysign(fneg(x), y) -> copysign(x, y) 9263 // copysign(copysign(x,z), y) -> copysign(x, y) 9264 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 9265 N0.getOpcode() == ISD::FCOPYSIGN) 9266 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1); 9267 9268 // copysign(x, abs(y)) -> abs(x) 9269 if (N1.getOpcode() == ISD::FABS) 9270 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9271 9272 // copysign(x, copysign(y,z)) -> copysign(x, z) 9273 if (N1.getOpcode() == ISD::FCOPYSIGN) 9274 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1)); 9275 9276 // copysign(x, fp_extend(y)) -> copysign(x, y) 9277 // copysign(x, fp_round(y)) -> copysign(x, y) 9278 if (CanCombineFCOPYSIGN_EXTEND_ROUND(N)) 9279 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0)); 9280 9281 return SDValue(); 9282 } 9283 9284 SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 9285 SDValue N0 = N->getOperand(0); 9286 EVT VT = N->getValueType(0); 9287 EVT OpVT = N0.getValueType(); 9288 9289 // fold (sint_to_fp c1) -> c1fp 9290 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 9291 // ...but only if the target supports immediate floating-point values 9292 (!LegalOperations || 9293 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 9294 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 9295 9296 // If the input is a legal type, and SINT_TO_FP is not legal on this target, 9297 // but UINT_TO_FP is legal on this target, try to convert. 9298 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 9299 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 9300 // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 9301 if (DAG.SignBitIsZero(N0)) 9302 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 9303 } 9304 9305 // The next optimizations are desirable only if SELECT_CC can be lowered. 9306 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 9307 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 9308 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 9309 !VT.isVector() && 9310 (!LegalOperations || 9311 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9312 SDLoc DL(N); 9313 SDValue Ops[] = 9314 { N0.getOperand(0), N0.getOperand(1), 9315 DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9316 N0.getOperand(2) }; 9317 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9318 } 9319 9320 // fold (sint_to_fp (zext (setcc x, y, cc))) -> 9321 // (select_cc x, y, 1.0, 0.0,, cc) 9322 if (N0.getOpcode() == ISD::ZERO_EXTEND && 9323 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 9324 (!LegalOperations || 9325 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9326 SDLoc DL(N); 9327 SDValue Ops[] = 9328 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 9329 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9330 N0.getOperand(0).getOperand(2) }; 9331 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9332 } 9333 } 9334 9335 return SDValue(); 9336 } 9337 9338 SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 9339 SDValue N0 = N->getOperand(0); 9340 EVT VT = N->getValueType(0); 9341 EVT OpVT = N0.getValueType(); 9342 9343 // fold (uint_to_fp c1) -> c1fp 9344 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 9345 // ...but only if the target supports immediate floating-point values 9346 (!LegalOperations || 9347 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 9348 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 9349 9350 // If the input is a legal type, and UINT_TO_FP is not legal on this target, 9351 // but SINT_TO_FP is legal on this target, try to convert. 9352 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 9353 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 9354 // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 9355 if (DAG.SignBitIsZero(N0)) 9356 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 9357 } 9358 9359 // The next optimizations are desirable only if SELECT_CC can be lowered. 9360 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 9361 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 9362 9363 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 9364 (!LegalOperations || 9365 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9366 SDLoc DL(N); 9367 SDValue Ops[] = 9368 { N0.getOperand(0), N0.getOperand(1), 9369 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9370 N0.getOperand(2) }; 9371 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9372 } 9373 } 9374 9375 return SDValue(); 9376 } 9377 9378 // Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x 9379 static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) { 9380 SDValue N0 = N->getOperand(0); 9381 EVT VT = N->getValueType(0); 9382 9383 if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP) 9384 return SDValue(); 9385 9386 SDValue Src = N0.getOperand(0); 9387 EVT SrcVT = Src.getValueType(); 9388 bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP; 9389 bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT; 9390 9391 // We can safely assume the conversion won't overflow the output range, 9392 // because (for example) (uint8_t)18293.f is undefined behavior. 9393 9394 // Since we can assume the conversion won't overflow, our decision as to 9395 // whether the input will fit in the float should depend on the minimum 9396 // of the input range and output range. 9397 9398 // This means this is also safe for a signed input and unsigned output, since 9399 // a negative input would lead to undefined behavior. 9400 unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned; 9401 unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned; 9402 unsigned ActualSize = std::min(InputSize, OutputSize); 9403 const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType()); 9404 9405 // We can only fold away the float conversion if the input range can be 9406 // represented exactly in the float range. 9407 if (APFloat::semanticsPrecision(sem) >= ActualSize) { 9408 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) { 9409 unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND 9410 : ISD::ZERO_EXTEND; 9411 return DAG.getNode(ExtOp, SDLoc(N), VT, Src); 9412 } 9413 if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits()) 9414 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src); 9415 return DAG.getBitcast(VT, Src); 9416 } 9417 return SDValue(); 9418 } 9419 9420 SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 9421 SDValue N0 = N->getOperand(0); 9422 EVT VT = N->getValueType(0); 9423 9424 // fold (fp_to_sint c1fp) -> c1 9425 if (isConstantFPBuildVectorOrConstantFP(N0)) 9426 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0); 9427 9428 return FoldIntToFPToInt(N, DAG); 9429 } 9430 9431 SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 9432 SDValue N0 = N->getOperand(0); 9433 EVT VT = N->getValueType(0); 9434 9435 // fold (fp_to_uint c1fp) -> c1 9436 if (isConstantFPBuildVectorOrConstantFP(N0)) 9437 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0); 9438 9439 return FoldIntToFPToInt(N, DAG); 9440 } 9441 9442 SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 9443 SDValue N0 = N->getOperand(0); 9444 SDValue N1 = N->getOperand(1); 9445 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9446 EVT VT = N->getValueType(0); 9447 9448 // fold (fp_round c1fp) -> c1fp 9449 if (N0CFP) 9450 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1); 9451 9452 // fold (fp_round (fp_extend x)) -> x 9453 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 9454 return N0.getOperand(0); 9455 9456 // fold (fp_round (fp_round x)) -> (fp_round x) 9457 if (N0.getOpcode() == ISD::FP_ROUND) { 9458 const bool NIsTrunc = N->getConstantOperandVal(1) == 1; 9459 const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1; 9460 9461 // Skip this folding if it results in an fp_round from f80 to f16. 9462 // 9463 // f80 to f16 always generates an expensive (and as yet, unimplemented) 9464 // libcall to __truncxfhf2 instead of selecting native f16 conversion 9465 // instructions from f32 or f64. Moreover, the first (value-preserving) 9466 // fp_round from f80 to either f32 or f64 may become a NOP in platforms like 9467 // x86. 9468 if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16) 9469 return SDValue(); 9470 9471 // If the first fp_round isn't a value preserving truncation, it might 9472 // introduce a tie in the second fp_round, that wouldn't occur in the 9473 // single-step fp_round we want to fold to. 9474 // In other words, double rounding isn't the same as rounding. 9475 // Also, this is a value preserving truncation iff both fp_round's are. 9476 if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) { 9477 SDLoc DL(N); 9478 return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0), 9479 DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL)); 9480 } 9481 } 9482 9483 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 9484 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 9485 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT, 9486 N0.getOperand(0), N1); 9487 AddToWorklist(Tmp.getNode()); 9488 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 9489 Tmp, N0.getOperand(1)); 9490 } 9491 9492 return SDValue(); 9493 } 9494 9495 SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 9496 SDValue N0 = N->getOperand(0); 9497 EVT VT = N->getValueType(0); 9498 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 9499 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9500 9501 // fold (fp_round_inreg c1fp) -> c1fp 9502 if (N0CFP && isTypeLegal(EVT)) { 9503 SDLoc DL(N); 9504 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT); 9505 return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round); 9506 } 9507 9508 return SDValue(); 9509 } 9510 9511 SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 9512 SDValue N0 = N->getOperand(0); 9513 EVT VT = N->getValueType(0); 9514 9515 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 9516 if (N->hasOneUse() && 9517 N->use_begin()->getOpcode() == ISD::FP_ROUND) 9518 return SDValue(); 9519 9520 // fold (fp_extend c1fp) -> c1fp 9521 if (isConstantFPBuildVectorOrConstantFP(N0)) 9522 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0); 9523 9524 // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op) 9525 if (N0.getOpcode() == ISD::FP16_TO_FP && 9526 TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal) 9527 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0)); 9528 9529 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 9530 // value of X. 9531 if (N0.getOpcode() == ISD::FP_ROUND 9532 && N0.getConstantOperandVal(1) == 1) { 9533 SDValue In = N0.getOperand(0); 9534 if (In.getValueType() == VT) return In; 9535 if (VT.bitsLT(In.getValueType())) 9536 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, 9537 In, N0.getOperand(1)); 9538 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In); 9539 } 9540 9541 // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 9542 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 9543 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 9544 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 9545 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 9546 LN0->getChain(), 9547 LN0->getBasePtr(), N0.getValueType(), 9548 LN0->getMemOperand()); 9549 CombineTo(N, ExtLoad); 9550 CombineTo(N0.getNode(), 9551 DAG.getNode(ISD::FP_ROUND, SDLoc(N0), 9552 N0.getValueType(), ExtLoad, 9553 DAG.getIntPtrConstant(1, SDLoc(N0))), 9554 ExtLoad.getValue(1)); 9555 return SDValue(N, 0); // Return N so it doesn't get rechecked! 9556 } 9557 9558 return SDValue(); 9559 } 9560 9561 SDValue DAGCombiner::visitFCEIL(SDNode *N) { 9562 SDValue N0 = N->getOperand(0); 9563 EVT VT = N->getValueType(0); 9564 9565 // fold (fceil c1) -> fceil(c1) 9566 if (isConstantFPBuildVectorOrConstantFP(N0)) 9567 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0); 9568 9569 return SDValue(); 9570 } 9571 9572 SDValue DAGCombiner::visitFTRUNC(SDNode *N) { 9573 SDValue N0 = N->getOperand(0); 9574 EVT VT = N->getValueType(0); 9575 9576 // fold (ftrunc c1) -> ftrunc(c1) 9577 if (isConstantFPBuildVectorOrConstantFP(N0)) 9578 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0); 9579 9580 return SDValue(); 9581 } 9582 9583 SDValue DAGCombiner::visitFFLOOR(SDNode *N) { 9584 SDValue N0 = N->getOperand(0); 9585 EVT VT = N->getValueType(0); 9586 9587 // fold (ffloor c1) -> ffloor(c1) 9588 if (isConstantFPBuildVectorOrConstantFP(N0)) 9589 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0); 9590 9591 return SDValue(); 9592 } 9593 9594 // FIXME: FNEG and FABS have a lot in common; refactor. 9595 SDValue DAGCombiner::visitFNEG(SDNode *N) { 9596 SDValue N0 = N->getOperand(0); 9597 EVT VT = N->getValueType(0); 9598 9599 // Constant fold FNEG. 9600 if (isConstantFPBuildVectorOrConstantFP(N0)) 9601 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0); 9602 9603 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 9604 &DAG.getTarget().Options)) 9605 return GetNegatedExpression(N0, DAG, LegalOperations); 9606 9607 // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading 9608 // constant pool values. 9609 if (!TLI.isFNegFree(VT) && 9610 N0.getOpcode() == ISD::BITCAST && 9611 N0.getNode()->hasOneUse()) { 9612 SDValue Int = N0.getOperand(0); 9613 EVT IntVT = Int.getValueType(); 9614 if (IntVT.isInteger() && !IntVT.isVector()) { 9615 APInt SignMask; 9616 if (N0.getValueType().isVector()) { 9617 // For a vector, get a mask such as 0x80... per scalar element 9618 // and splat it. 9619 SignMask = APInt::getSignBit(N0.getScalarValueSizeInBits()); 9620 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 9621 } else { 9622 // For a scalar, just generate 0x80... 9623 SignMask = APInt::getSignBit(IntVT.getSizeInBits()); 9624 } 9625 SDLoc DL0(N0); 9626 Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int, 9627 DAG.getConstant(SignMask, DL0, IntVT)); 9628 AddToWorklist(Int.getNode()); 9629 return DAG.getBitcast(VT, Int); 9630 } 9631 } 9632 9633 // (fneg (fmul c, x)) -> (fmul -c, x) 9634 if (N0.getOpcode() == ISD::FMUL && 9635 (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) { 9636 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 9637 if (CFP1) { 9638 APFloat CVal = CFP1->getValueAPF(); 9639 CVal.changeSign(); 9640 if (Level >= AfterLegalizeDAG && 9641 (TLI.isFPImmLegal(CVal, VT) || 9642 TLI.isOperationLegal(ISD::ConstantFP, VT))) 9643 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), 9644 DAG.getNode(ISD::FNEG, SDLoc(N), VT, 9645 N0.getOperand(1)), 9646 &cast<BinaryWithFlagsSDNode>(N0)->Flags); 9647 } 9648 } 9649 9650 return SDValue(); 9651 } 9652 9653 SDValue DAGCombiner::visitFMINNUM(SDNode *N) { 9654 SDValue N0 = N->getOperand(0); 9655 SDValue N1 = N->getOperand(1); 9656 EVT VT = N->getValueType(0); 9657 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9658 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9659 9660 if (N0CFP && N1CFP) { 9661 const APFloat &C0 = N0CFP->getValueAPF(); 9662 const APFloat &C1 = N1CFP->getValueAPF(); 9663 return DAG.getConstantFP(minnum(C0, C1), SDLoc(N), VT); 9664 } 9665 9666 // Canonicalize to constant on RHS. 9667 if (isConstantFPBuildVectorOrConstantFP(N0) && 9668 !isConstantFPBuildVectorOrConstantFP(N1)) 9669 return DAG.getNode(ISD::FMINNUM, SDLoc(N), VT, N1, N0); 9670 9671 return SDValue(); 9672 } 9673 9674 SDValue DAGCombiner::visitFMAXNUM(SDNode *N) { 9675 SDValue N0 = N->getOperand(0); 9676 SDValue N1 = N->getOperand(1); 9677 EVT VT = N->getValueType(0); 9678 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9679 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9680 9681 if (N0CFP && N1CFP) { 9682 const APFloat &C0 = N0CFP->getValueAPF(); 9683 const APFloat &C1 = N1CFP->getValueAPF(); 9684 return DAG.getConstantFP(maxnum(C0, C1), SDLoc(N), VT); 9685 } 9686 9687 // Canonicalize to constant on RHS. 9688 if (isConstantFPBuildVectorOrConstantFP(N0) && 9689 !isConstantFPBuildVectorOrConstantFP(N1)) 9690 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), VT, N1, N0); 9691 9692 return SDValue(); 9693 } 9694 9695 SDValue DAGCombiner::visitFABS(SDNode *N) { 9696 SDValue N0 = N->getOperand(0); 9697 EVT VT = N->getValueType(0); 9698 9699 // fold (fabs c1) -> fabs(c1) 9700 if (isConstantFPBuildVectorOrConstantFP(N0)) 9701 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9702 9703 // fold (fabs (fabs x)) -> (fabs x) 9704 if (N0.getOpcode() == ISD::FABS) 9705 return N->getOperand(0); 9706 9707 // fold (fabs (fneg x)) -> (fabs x) 9708 // fold (fabs (fcopysign x, y)) -> (fabs x) 9709 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 9710 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0)); 9711 9712 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading 9713 // constant pool values. 9714 if (!TLI.isFAbsFree(VT) && 9715 N0.getOpcode() == ISD::BITCAST && 9716 N0.getNode()->hasOneUse()) { 9717 SDValue Int = N0.getOperand(0); 9718 EVT IntVT = Int.getValueType(); 9719 if (IntVT.isInteger() && !IntVT.isVector()) { 9720 APInt SignMask; 9721 if (N0.getValueType().isVector()) { 9722 // For a vector, get a mask such as 0x7f... per scalar element 9723 // and splat it. 9724 SignMask = ~APInt::getSignBit(N0.getScalarValueSizeInBits()); 9725 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 9726 } else { 9727 // For a scalar, just generate 0x7f... 9728 SignMask = ~APInt::getSignBit(IntVT.getSizeInBits()); 9729 } 9730 SDLoc DL(N0); 9731 Int = DAG.getNode(ISD::AND, DL, IntVT, Int, 9732 DAG.getConstant(SignMask, DL, IntVT)); 9733 AddToWorklist(Int.getNode()); 9734 return DAG.getBitcast(N->getValueType(0), Int); 9735 } 9736 } 9737 9738 return SDValue(); 9739 } 9740 9741 SDValue DAGCombiner::visitBRCOND(SDNode *N) { 9742 SDValue Chain = N->getOperand(0); 9743 SDValue N1 = N->getOperand(1); 9744 SDValue N2 = N->getOperand(2); 9745 9746 // If N is a constant we could fold this into a fallthrough or unconditional 9747 // branch. However that doesn't happen very often in normal code, because 9748 // Instcombine/SimplifyCFG should have handled the available opportunities. 9749 // If we did this folding here, it would be necessary to update the 9750 // MachineBasicBlock CFG, which is awkward. 9751 9752 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 9753 // on the target. 9754 if (N1.getOpcode() == ISD::SETCC && 9755 TLI.isOperationLegalOrCustom(ISD::BR_CC, 9756 N1.getOperand(0).getValueType())) { 9757 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 9758 Chain, N1.getOperand(2), 9759 N1.getOperand(0), N1.getOperand(1), N2); 9760 } 9761 9762 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 9763 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 9764 (N1.getOperand(0).hasOneUse() && 9765 N1.getOperand(0).getOpcode() == ISD::SRL))) { 9766 SDNode *Trunc = nullptr; 9767 if (N1.getOpcode() == ISD::TRUNCATE) { 9768 // Look pass the truncate. 9769 Trunc = N1.getNode(); 9770 N1 = N1.getOperand(0); 9771 } 9772 9773 // Match this pattern so that we can generate simpler code: 9774 // 9775 // %a = ... 9776 // %b = and i32 %a, 2 9777 // %c = srl i32 %b, 1 9778 // brcond i32 %c ... 9779 // 9780 // into 9781 // 9782 // %a = ... 9783 // %b = and i32 %a, 2 9784 // %c = setcc eq %b, 0 9785 // brcond %c ... 9786 // 9787 // This applies only when the AND constant value has one bit set and the 9788 // SRL constant is equal to the log2 of the AND constant. The back-end is 9789 // smart enough to convert the result into a TEST/JMP sequence. 9790 SDValue Op0 = N1.getOperand(0); 9791 SDValue Op1 = N1.getOperand(1); 9792 9793 if (Op0.getOpcode() == ISD::AND && 9794 Op1.getOpcode() == ISD::Constant) { 9795 SDValue AndOp1 = Op0.getOperand(1); 9796 9797 if (AndOp1.getOpcode() == ISD::Constant) { 9798 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 9799 9800 if (AndConst.isPowerOf2() && 9801 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 9802 SDLoc DL(N); 9803 SDValue SetCC = 9804 DAG.getSetCC(DL, 9805 getSetCCResultType(Op0.getValueType()), 9806 Op0, DAG.getConstant(0, DL, Op0.getValueType()), 9807 ISD::SETNE); 9808 9809 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, DL, 9810 MVT::Other, Chain, SetCC, N2); 9811 // Don't add the new BRCond into the worklist or else SimplifySelectCC 9812 // will convert it back to (X & C1) >> C2. 9813 CombineTo(N, NewBRCond, false); 9814 // Truncate is dead. 9815 if (Trunc) 9816 deleteAndRecombine(Trunc); 9817 // Replace the uses of SRL with SETCC 9818 WorklistRemover DeadNodes(*this); 9819 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 9820 deleteAndRecombine(N1.getNode()); 9821 return SDValue(N, 0); // Return N so it doesn't get rechecked! 9822 } 9823 } 9824 } 9825 9826 if (Trunc) 9827 // Restore N1 if the above transformation doesn't match. 9828 N1 = N->getOperand(1); 9829 } 9830 9831 // Transform br(xor(x, y)) -> br(x != y) 9832 // Transform br(xor(xor(x,y), 1)) -> br (x == y) 9833 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 9834 SDNode *TheXor = N1.getNode(); 9835 SDValue Op0 = TheXor->getOperand(0); 9836 SDValue Op1 = TheXor->getOperand(1); 9837 if (Op0.getOpcode() == Op1.getOpcode()) { 9838 // Avoid missing important xor optimizations. 9839 if (SDValue Tmp = visitXOR(TheXor)) { 9840 if (Tmp.getNode() != TheXor) { 9841 DEBUG(dbgs() << "\nReplacing.8 "; 9842 TheXor->dump(&DAG); 9843 dbgs() << "\nWith: "; 9844 Tmp.getNode()->dump(&DAG); 9845 dbgs() << '\n'); 9846 WorklistRemover DeadNodes(*this); 9847 DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 9848 deleteAndRecombine(TheXor); 9849 return DAG.getNode(ISD::BRCOND, SDLoc(N), 9850 MVT::Other, Chain, Tmp, N2); 9851 } 9852 9853 // visitXOR has changed XOR's operands or replaced the XOR completely, 9854 // bail out. 9855 return SDValue(N, 0); 9856 } 9857 } 9858 9859 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 9860 bool Equal = false; 9861 if (isOneConstant(Op0) && Op0.hasOneUse() && 9862 Op0.getOpcode() == ISD::XOR) { 9863 TheXor = Op0.getNode(); 9864 Equal = true; 9865 } 9866 9867 EVT SetCCVT = N1.getValueType(); 9868 if (LegalTypes) 9869 SetCCVT = getSetCCResultType(SetCCVT); 9870 SDValue SetCC = DAG.getSetCC(SDLoc(TheXor), 9871 SetCCVT, 9872 Op0, Op1, 9873 Equal ? ISD::SETEQ : ISD::SETNE); 9874 // Replace the uses of XOR with SETCC 9875 WorklistRemover DeadNodes(*this); 9876 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 9877 deleteAndRecombine(N1.getNode()); 9878 return DAG.getNode(ISD::BRCOND, SDLoc(N), 9879 MVT::Other, Chain, SetCC, N2); 9880 } 9881 } 9882 9883 return SDValue(); 9884 } 9885 9886 // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 9887 // 9888 SDValue DAGCombiner::visitBR_CC(SDNode *N) { 9889 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 9890 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 9891 9892 // If N is a constant we could fold this into a fallthrough or unconditional 9893 // branch. However that doesn't happen very often in normal code, because 9894 // Instcombine/SimplifyCFG should have handled the available opportunities. 9895 // If we did this folding here, it would be necessary to update the 9896 // MachineBasicBlock CFG, which is awkward. 9897 9898 // Use SimplifySetCC to simplify SETCC's. 9899 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()), 9900 CondLHS, CondRHS, CC->get(), SDLoc(N), 9901 false); 9902 if (Simp.getNode()) AddToWorklist(Simp.getNode()); 9903 9904 // fold to a simpler setcc 9905 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 9906 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 9907 N->getOperand(0), Simp.getOperand(2), 9908 Simp.getOperand(0), Simp.getOperand(1), 9909 N->getOperand(4)); 9910 9911 return SDValue(); 9912 } 9913 9914 /// Return true if 'Use' is a load or a store that uses N as its base pointer 9915 /// and that N may be folded in the load / store addressing mode. 9916 static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 9917 SelectionDAG &DAG, 9918 const TargetLowering &TLI) { 9919 EVT VT; 9920 unsigned AS; 9921 9922 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 9923 if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 9924 return false; 9925 VT = LD->getMemoryVT(); 9926 AS = LD->getAddressSpace(); 9927 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 9928 if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 9929 return false; 9930 VT = ST->getMemoryVT(); 9931 AS = ST->getAddressSpace(); 9932 } else 9933 return false; 9934 9935 TargetLowering::AddrMode AM; 9936 if (N->getOpcode() == ISD::ADD) { 9937 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9938 if (Offset) 9939 // [reg +/- imm] 9940 AM.BaseOffs = Offset->getSExtValue(); 9941 else 9942 // [reg +/- reg] 9943 AM.Scale = 1; 9944 } else if (N->getOpcode() == ISD::SUB) { 9945 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9946 if (Offset) 9947 // [reg +/- imm] 9948 AM.BaseOffs = -Offset->getSExtValue(); 9949 else 9950 // [reg +/- reg] 9951 AM.Scale = 1; 9952 } else 9953 return false; 9954 9955 return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, 9956 VT.getTypeForEVT(*DAG.getContext()), AS); 9957 } 9958 9959 /// Try turning a load/store into a pre-indexed load/store when the base 9960 /// pointer is an add or subtract and it has other uses besides the load/store. 9961 /// After the transformation, the new indexed load/store has effectively folded 9962 /// the add/subtract in and all of its other uses are redirected to the 9963 /// new load/store. 9964 bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 9965 if (Level < AfterLegalizeDAG) 9966 return false; 9967 9968 bool isLoad = true; 9969 SDValue Ptr; 9970 EVT VT; 9971 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 9972 if (LD->isIndexed()) 9973 return false; 9974 VT = LD->getMemoryVT(); 9975 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 9976 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 9977 return false; 9978 Ptr = LD->getBasePtr(); 9979 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 9980 if (ST->isIndexed()) 9981 return false; 9982 VT = ST->getMemoryVT(); 9983 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 9984 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 9985 return false; 9986 Ptr = ST->getBasePtr(); 9987 isLoad = false; 9988 } else { 9989 return false; 9990 } 9991 9992 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 9993 // out. There is no reason to make this a preinc/predec. 9994 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 9995 Ptr.getNode()->hasOneUse()) 9996 return false; 9997 9998 // Ask the target to do addressing mode selection. 9999 SDValue BasePtr; 10000 SDValue Offset; 10001 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10002 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 10003 return false; 10004 10005 // Backends without true r+i pre-indexed forms may need to pass a 10006 // constant base with a variable offset so that constant coercion 10007 // will work with the patterns in canonical form. 10008 bool Swapped = false; 10009 if (isa<ConstantSDNode>(BasePtr)) { 10010 std::swap(BasePtr, Offset); 10011 Swapped = true; 10012 } 10013 10014 // Don't create a indexed load / store with zero offset. 10015 if (isNullConstant(Offset)) 10016 return false; 10017 10018 // Try turning it into a pre-indexed load / store except when: 10019 // 1) The new base ptr is a frame index. 10020 // 2) If N is a store and the new base ptr is either the same as or is a 10021 // predecessor of the value being stored. 10022 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 10023 // that would create a cycle. 10024 // 4) All uses are load / store ops that use it as old base ptr. 10025 10026 // Check #1. Preinc'ing a frame index would require copying the stack pointer 10027 // (plus the implicit offset) to a register to preinc anyway. 10028 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10029 return false; 10030 10031 // Check #2. 10032 if (!isLoad) { 10033 SDValue Val = cast<StoreSDNode>(N)->getValue(); 10034 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 10035 return false; 10036 } 10037 10038 // Caches for hasPredecessorHelper. 10039 SmallPtrSet<const SDNode *, 32> Visited; 10040 SmallVector<const SDNode *, 16> Worklist; 10041 Worklist.push_back(N); 10042 10043 // If the offset is a constant, there may be other adds of constants that 10044 // can be folded with this one. We should do this to avoid having to keep 10045 // a copy of the original base pointer. 10046 SmallVector<SDNode *, 16> OtherUses; 10047 if (isa<ConstantSDNode>(Offset)) 10048 for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(), 10049 UE = BasePtr.getNode()->use_end(); 10050 UI != UE; ++UI) { 10051 SDUse &Use = UI.getUse(); 10052 // Skip the use that is Ptr and uses of other results from BasePtr's 10053 // node (important for nodes that return multiple results). 10054 if (Use.getUser() == Ptr.getNode() || Use != BasePtr) 10055 continue; 10056 10057 if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist)) 10058 continue; 10059 10060 if (Use.getUser()->getOpcode() != ISD::ADD && 10061 Use.getUser()->getOpcode() != ISD::SUB) { 10062 OtherUses.clear(); 10063 break; 10064 } 10065 10066 SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1); 10067 if (!isa<ConstantSDNode>(Op1)) { 10068 OtherUses.clear(); 10069 break; 10070 } 10071 10072 // FIXME: In some cases, we can be smarter about this. 10073 if (Op1.getValueType() != Offset.getValueType()) { 10074 OtherUses.clear(); 10075 break; 10076 } 10077 10078 OtherUses.push_back(Use.getUser()); 10079 } 10080 10081 if (Swapped) 10082 std::swap(BasePtr, Offset); 10083 10084 // Now check for #3 and #4. 10085 bool RealUse = false; 10086 10087 for (SDNode *Use : Ptr.getNode()->uses()) { 10088 if (Use == N) 10089 continue; 10090 if (SDNode::hasPredecessorHelper(Use, Visited, Worklist)) 10091 return false; 10092 10093 // If Ptr may be folded in addressing mode of other use, then it's 10094 // not profitable to do this transformation. 10095 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 10096 RealUse = true; 10097 } 10098 10099 if (!RealUse) 10100 return false; 10101 10102 SDValue Result; 10103 if (isLoad) 10104 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10105 BasePtr, Offset, AM); 10106 else 10107 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10108 BasePtr, Offset, AM); 10109 ++PreIndexedNodes; 10110 ++NodesCombined; 10111 DEBUG(dbgs() << "\nReplacing.4 "; 10112 N->dump(&DAG); 10113 dbgs() << "\nWith: "; 10114 Result.getNode()->dump(&DAG); 10115 dbgs() << '\n'); 10116 WorklistRemover DeadNodes(*this); 10117 if (isLoad) { 10118 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10119 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10120 } else { 10121 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10122 } 10123 10124 // Finally, since the node is now dead, remove it from the graph. 10125 deleteAndRecombine(N); 10126 10127 if (Swapped) 10128 std::swap(BasePtr, Offset); 10129 10130 // Replace other uses of BasePtr that can be updated to use Ptr 10131 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) { 10132 unsigned OffsetIdx = 1; 10133 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode()) 10134 OffsetIdx = 0; 10135 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == 10136 BasePtr.getNode() && "Expected BasePtr operand"); 10137 10138 // We need to replace ptr0 in the following expression: 10139 // x0 * offset0 + y0 * ptr0 = t0 10140 // knowing that 10141 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) 10142 // 10143 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the 10144 // indexed load/store and the expresion that needs to be re-written. 10145 // 10146 // Therefore, we have: 10147 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 10148 10149 ConstantSDNode *CN = 10150 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); 10151 int X0, X1, Y0, Y1; 10152 const APInt &Offset0 = CN->getAPIntValue(); 10153 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); 10154 10155 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; 10156 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; 10157 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; 10158 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; 10159 10160 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; 10161 10162 APInt CNV = Offset0; 10163 if (X0 < 0) CNV = -CNV; 10164 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; 10165 else CNV = CNV - Offset1; 10166 10167 SDLoc DL(OtherUses[i]); 10168 10169 // We can now generate the new expression. 10170 SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0)); 10171 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); 10172 10173 SDValue NewUse = DAG.getNode(Opcode, 10174 DL, 10175 OtherUses[i]->getValueType(0), NewOp1, NewOp2); 10176 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); 10177 deleteAndRecombine(OtherUses[i]); 10178 } 10179 10180 // Replace the uses of Ptr with uses of the updated base value. 10181 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 10182 deleteAndRecombine(Ptr.getNode()); 10183 10184 return true; 10185 } 10186 10187 /// Try to combine a load/store with a add/sub of the base pointer node into a 10188 /// post-indexed load/store. The transformation folded the add/subtract into the 10189 /// new indexed load/store effectively and all of its uses are redirected to the 10190 /// new load/store. 10191 bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 10192 if (Level < AfterLegalizeDAG) 10193 return false; 10194 10195 bool isLoad = true; 10196 SDValue Ptr; 10197 EVT VT; 10198 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10199 if (LD->isIndexed()) 10200 return false; 10201 VT = LD->getMemoryVT(); 10202 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 10203 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 10204 return false; 10205 Ptr = LD->getBasePtr(); 10206 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10207 if (ST->isIndexed()) 10208 return false; 10209 VT = ST->getMemoryVT(); 10210 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 10211 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 10212 return false; 10213 Ptr = ST->getBasePtr(); 10214 isLoad = false; 10215 } else { 10216 return false; 10217 } 10218 10219 if (Ptr.getNode()->hasOneUse()) 10220 return false; 10221 10222 for (SDNode *Op : Ptr.getNode()->uses()) { 10223 if (Op == N || 10224 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 10225 continue; 10226 10227 SDValue BasePtr; 10228 SDValue Offset; 10229 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10230 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 10231 // Don't create a indexed load / store with zero offset. 10232 if (isNullConstant(Offset)) 10233 continue; 10234 10235 // Try turning it into a post-indexed load / store except when 10236 // 1) All uses are load / store ops that use it as base ptr (and 10237 // it may be folded as addressing mmode). 10238 // 2) Op must be independent of N, i.e. Op is neither a predecessor 10239 // nor a successor of N. Otherwise, if Op is folded that would 10240 // create a cycle. 10241 10242 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10243 continue; 10244 10245 // Check for #1. 10246 bool TryNext = false; 10247 for (SDNode *Use : BasePtr.getNode()->uses()) { 10248 if (Use == Ptr.getNode()) 10249 continue; 10250 10251 // If all the uses are load / store addresses, then don't do the 10252 // transformation. 10253 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 10254 bool RealUse = false; 10255 for (SDNode *UseUse : Use->uses()) { 10256 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 10257 RealUse = true; 10258 } 10259 10260 if (!RealUse) { 10261 TryNext = true; 10262 break; 10263 } 10264 } 10265 } 10266 10267 if (TryNext) 10268 continue; 10269 10270 // Check for #2 10271 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 10272 SDValue Result = isLoad 10273 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10274 BasePtr, Offset, AM) 10275 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10276 BasePtr, Offset, AM); 10277 ++PostIndexedNodes; 10278 ++NodesCombined; 10279 DEBUG(dbgs() << "\nReplacing.5 "; 10280 N->dump(&DAG); 10281 dbgs() << "\nWith: "; 10282 Result.getNode()->dump(&DAG); 10283 dbgs() << '\n'); 10284 WorklistRemover DeadNodes(*this); 10285 if (isLoad) { 10286 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10287 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10288 } else { 10289 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10290 } 10291 10292 // Finally, since the node is now dead, remove it from the graph. 10293 deleteAndRecombine(N); 10294 10295 // Replace the uses of Use with uses of the updated base value. 10296 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 10297 Result.getValue(isLoad ? 1 : 0)); 10298 deleteAndRecombine(Op); 10299 return true; 10300 } 10301 } 10302 } 10303 10304 return false; 10305 } 10306 10307 /// \brief Return the base-pointer arithmetic from an indexed \p LD. 10308 SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { 10309 ISD::MemIndexedMode AM = LD->getAddressingMode(); 10310 assert(AM != ISD::UNINDEXED); 10311 SDValue BP = LD->getOperand(1); 10312 SDValue Inc = LD->getOperand(2); 10313 10314 // Some backends use TargetConstants for load offsets, but don't expect 10315 // TargetConstants in general ADD nodes. We can convert these constants into 10316 // regular Constants (if the constant is not opaque). 10317 assert((Inc.getOpcode() != ISD::TargetConstant || 10318 !cast<ConstantSDNode>(Inc)->isOpaque()) && 10319 "Cannot split out indexing using opaque target constants"); 10320 if (Inc.getOpcode() == ISD::TargetConstant) { 10321 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc); 10322 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc), 10323 ConstInc->getValueType(0)); 10324 } 10325 10326 unsigned Opc = 10327 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB); 10328 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc); 10329 } 10330 10331 SDValue DAGCombiner::visitLOAD(SDNode *N) { 10332 LoadSDNode *LD = cast<LoadSDNode>(N); 10333 SDValue Chain = LD->getChain(); 10334 SDValue Ptr = LD->getBasePtr(); 10335 10336 // If load is not volatile and there are no uses of the loaded value (and 10337 // the updated indexed value in case of indexed loads), change uses of the 10338 // chain value into uses of the chain input (i.e. delete the dead load). 10339 if (!LD->isVolatile()) { 10340 if (N->getValueType(1) == MVT::Other) { 10341 // Unindexed loads. 10342 if (!N->hasAnyUseOfValue(0)) { 10343 // It's not safe to use the two value CombineTo variant here. e.g. 10344 // v1, chain2 = load chain1, loc 10345 // v2, chain3 = load chain2, loc 10346 // v3 = add v2, c 10347 // Now we replace use of chain2 with chain1. This makes the second load 10348 // isomorphic to the one we are deleting, and thus makes this load live. 10349 DEBUG(dbgs() << "\nReplacing.6 "; 10350 N->dump(&DAG); 10351 dbgs() << "\nWith chain: "; 10352 Chain.getNode()->dump(&DAG); 10353 dbgs() << "\n"); 10354 WorklistRemover DeadNodes(*this); 10355 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 10356 10357 if (N->use_empty()) 10358 deleteAndRecombine(N); 10359 10360 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10361 } 10362 } else { 10363 // Indexed loads. 10364 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 10365 10366 // If this load has an opaque TargetConstant offset, then we cannot split 10367 // the indexing into an add/sub directly (that TargetConstant may not be 10368 // valid for a different type of node, and we cannot convert an opaque 10369 // target constant into a regular constant). 10370 bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant && 10371 cast<ConstantSDNode>(LD->getOperand(2))->isOpaque(); 10372 10373 if (!N->hasAnyUseOfValue(0) && 10374 ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) { 10375 SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 10376 SDValue Index; 10377 if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) { 10378 Index = SplitIndexingFromLoad(LD); 10379 // Try to fold the base pointer arithmetic into subsequent loads and 10380 // stores. 10381 AddUsersToWorklist(N); 10382 } else 10383 Index = DAG.getUNDEF(N->getValueType(1)); 10384 DEBUG(dbgs() << "\nReplacing.7 "; 10385 N->dump(&DAG); 10386 dbgs() << "\nWith: "; 10387 Undef.getNode()->dump(&DAG); 10388 dbgs() << " and 2 other values\n"); 10389 WorklistRemover DeadNodes(*this); 10390 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 10391 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index); 10392 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 10393 deleteAndRecombine(N); 10394 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10395 } 10396 } 10397 } 10398 10399 // If this load is directly stored, replace the load value with the stored 10400 // value. 10401 // TODO: Handle store large -> read small portion. 10402 // TODO: Handle TRUNCSTORE/LOADEXT 10403 if (OptLevel != CodeGenOpt::None && 10404 ISD::isNormalLoad(N) && !LD->isVolatile()) { 10405 if (ISD::isNON_TRUNCStore(Chain.getNode())) { 10406 StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 10407 if (PrevST->getBasePtr() == Ptr && 10408 PrevST->getValue().getValueType() == N->getValueType(0)) 10409 return CombineTo(N, Chain.getOperand(1), Chain); 10410 } 10411 } 10412 10413 // Try to infer better alignment information than the load already has. 10414 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 10415 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 10416 if (Align > LD->getMemOperand()->getBaseAlignment()) { 10417 SDValue NewLoad = DAG.getExtLoad( 10418 LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr, 10419 LD->getPointerInfo(), LD->getMemoryVT(), Align, 10420 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 10421 if (NewLoad.getNode() != N) 10422 return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true); 10423 } 10424 } 10425 } 10426 10427 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 10428 : DAG.getSubtarget().useAA(); 10429 #ifndef NDEBUG 10430 if (CombinerAAOnlyFunc.getNumOccurrences() && 10431 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 10432 UseAA = false; 10433 #endif 10434 if (UseAA && LD->isUnindexed()) { 10435 // Walk up chain skipping non-aliasing memory nodes. 10436 SDValue BetterChain = FindBetterChain(N, Chain); 10437 10438 // If there is a better chain. 10439 if (Chain != BetterChain) { 10440 SDValue ReplLoad; 10441 10442 // Replace the chain to void dependency. 10443 if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 10444 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD), 10445 BetterChain, Ptr, LD->getMemOperand()); 10446 } else { 10447 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), 10448 LD->getValueType(0), 10449 BetterChain, Ptr, LD->getMemoryVT(), 10450 LD->getMemOperand()); 10451 } 10452 10453 // Create token factor to keep old chain connected. 10454 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N), 10455 MVT::Other, Chain, ReplLoad.getValue(1)); 10456 10457 // Make sure the new and old chains are cleaned up. 10458 AddToWorklist(Token.getNode()); 10459 10460 // Replace uses with load result and token factor. Don't add users 10461 // to work list. 10462 return CombineTo(N, ReplLoad.getValue(0), Token, false); 10463 } 10464 } 10465 10466 // Try transforming N to an indexed load. 10467 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 10468 return SDValue(N, 0); 10469 10470 // Try to slice up N to more direct loads if the slices are mapped to 10471 // different register banks or pairing can take place. 10472 if (SliceUpLoad(N)) 10473 return SDValue(N, 0); 10474 10475 return SDValue(); 10476 } 10477 10478 namespace { 10479 /// \brief Helper structure used to slice a load in smaller loads. 10480 /// Basically a slice is obtained from the following sequence: 10481 /// Origin = load Ty1, Base 10482 /// Shift = srl Ty1 Origin, CstTy Amount 10483 /// Inst = trunc Shift to Ty2 10484 /// 10485 /// Then, it will be rewriten into: 10486 /// Slice = load SliceTy, Base + SliceOffset 10487 /// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2 10488 /// 10489 /// SliceTy is deduced from the number of bits that are actually used to 10490 /// build Inst. 10491 struct LoadedSlice { 10492 /// \brief Helper structure used to compute the cost of a slice. 10493 struct Cost { 10494 /// Are we optimizing for code size. 10495 bool ForCodeSize; 10496 /// Various cost. 10497 unsigned Loads; 10498 unsigned Truncates; 10499 unsigned CrossRegisterBanksCopies; 10500 unsigned ZExts; 10501 unsigned Shift; 10502 10503 Cost(bool ForCodeSize = false) 10504 : ForCodeSize(ForCodeSize), Loads(0), Truncates(0), 10505 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {} 10506 10507 /// \brief Get the cost of one isolated slice. 10508 Cost(const LoadedSlice &LS, bool ForCodeSize = false) 10509 : ForCodeSize(ForCodeSize), Loads(1), Truncates(0), 10510 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) { 10511 EVT TruncType = LS.Inst->getValueType(0); 10512 EVT LoadedType = LS.getLoadedType(); 10513 if (TruncType != LoadedType && 10514 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType)) 10515 ZExts = 1; 10516 } 10517 10518 /// \brief Account for slicing gain in the current cost. 10519 /// Slicing provide a few gains like removing a shift or a 10520 /// truncate. This method allows to grow the cost of the original 10521 /// load with the gain from this slice. 10522 void addSliceGain(const LoadedSlice &LS) { 10523 // Each slice saves a truncate. 10524 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo(); 10525 if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(), 10526 LS.Inst->getValueType(0))) 10527 ++Truncates; 10528 // If there is a shift amount, this slice gets rid of it. 10529 if (LS.Shift) 10530 ++Shift; 10531 // If this slice can merge a cross register bank copy, account for it. 10532 if (LS.canMergeExpensiveCrossRegisterBankCopy()) 10533 ++CrossRegisterBanksCopies; 10534 } 10535 10536 Cost &operator+=(const Cost &RHS) { 10537 Loads += RHS.Loads; 10538 Truncates += RHS.Truncates; 10539 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies; 10540 ZExts += RHS.ZExts; 10541 Shift += RHS.Shift; 10542 return *this; 10543 } 10544 10545 bool operator==(const Cost &RHS) const { 10546 return Loads == RHS.Loads && Truncates == RHS.Truncates && 10547 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies && 10548 ZExts == RHS.ZExts && Shift == RHS.Shift; 10549 } 10550 10551 bool operator!=(const Cost &RHS) const { return !(*this == RHS); } 10552 10553 bool operator<(const Cost &RHS) const { 10554 // Assume cross register banks copies are as expensive as loads. 10555 // FIXME: Do we want some more target hooks? 10556 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies; 10557 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies; 10558 // Unless we are optimizing for code size, consider the 10559 // expensive operation first. 10560 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS) 10561 return ExpensiveOpsLHS < ExpensiveOpsRHS; 10562 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) < 10563 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS); 10564 } 10565 10566 bool operator>(const Cost &RHS) const { return RHS < *this; } 10567 10568 bool operator<=(const Cost &RHS) const { return !(RHS < *this); } 10569 10570 bool operator>=(const Cost &RHS) const { return !(*this < RHS); } 10571 }; 10572 // The last instruction that represent the slice. This should be a 10573 // truncate instruction. 10574 SDNode *Inst; 10575 // The original load instruction. 10576 LoadSDNode *Origin; 10577 // The right shift amount in bits from the original load. 10578 unsigned Shift; 10579 // The DAG from which Origin came from. 10580 // This is used to get some contextual information about legal types, etc. 10581 SelectionDAG *DAG; 10582 10583 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr, 10584 unsigned Shift = 0, SelectionDAG *DAG = nullptr) 10585 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} 10586 10587 /// \brief Get the bits used in a chunk of bits \p BitWidth large. 10588 /// \return Result is \p BitWidth and has used bits set to 1 and 10589 /// not used bits set to 0. 10590 APInt getUsedBits() const { 10591 // Reproduce the trunc(lshr) sequence: 10592 // - Start from the truncated value. 10593 // - Zero extend to the desired bit width. 10594 // - Shift left. 10595 assert(Origin && "No original load to compare against."); 10596 unsigned BitWidth = Origin->getValueSizeInBits(0); 10597 assert(Inst && "This slice is not bound to an instruction"); 10598 assert(Inst->getValueSizeInBits(0) <= BitWidth && 10599 "Extracted slice is bigger than the whole type!"); 10600 APInt UsedBits(Inst->getValueSizeInBits(0), 0); 10601 UsedBits.setAllBits(); 10602 UsedBits = UsedBits.zext(BitWidth); 10603 UsedBits <<= Shift; 10604 return UsedBits; 10605 } 10606 10607 /// \brief Get the size of the slice to be loaded in bytes. 10608 unsigned getLoadedSize() const { 10609 unsigned SliceSize = getUsedBits().countPopulation(); 10610 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); 10611 return SliceSize / 8; 10612 } 10613 10614 /// \brief Get the type that will be loaded for this slice. 10615 /// Note: This may not be the final type for the slice. 10616 EVT getLoadedType() const { 10617 assert(DAG && "Missing context"); 10618 LLVMContext &Ctxt = *DAG->getContext(); 10619 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); 10620 } 10621 10622 /// \brief Get the alignment of the load used for this slice. 10623 unsigned getAlignment() const { 10624 unsigned Alignment = Origin->getAlignment(); 10625 unsigned Offset = getOffsetFromBase(); 10626 if (Offset != 0) 10627 Alignment = MinAlign(Alignment, Alignment + Offset); 10628 return Alignment; 10629 } 10630 10631 /// \brief Check if this slice can be rewritten with legal operations. 10632 bool isLegal() const { 10633 // An invalid slice is not legal. 10634 if (!Origin || !Inst || !DAG) 10635 return false; 10636 10637 // Offsets are for indexed load only, we do not handle that. 10638 if (!Origin->getOffset().isUndef()) 10639 return false; 10640 10641 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 10642 10643 // Check that the type is legal. 10644 EVT SliceType = getLoadedType(); 10645 if (!TLI.isTypeLegal(SliceType)) 10646 return false; 10647 10648 // Check that the load is legal for this type. 10649 if (!TLI.isOperationLegal(ISD::LOAD, SliceType)) 10650 return false; 10651 10652 // Check that the offset can be computed. 10653 // 1. Check its type. 10654 EVT PtrType = Origin->getBasePtr().getValueType(); 10655 if (PtrType == MVT::Untyped || PtrType.isExtended()) 10656 return false; 10657 10658 // 2. Check that it fits in the immediate. 10659 if (!TLI.isLegalAddImmediate(getOffsetFromBase())) 10660 return false; 10661 10662 // 3. Check that the computation is legal. 10663 if (!TLI.isOperationLegal(ISD::ADD, PtrType)) 10664 return false; 10665 10666 // Check that the zext is legal if it needs one. 10667 EVT TruncateType = Inst->getValueType(0); 10668 if (TruncateType != SliceType && 10669 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType)) 10670 return false; 10671 10672 return true; 10673 } 10674 10675 /// \brief Get the offset in bytes of this slice in the original chunk of 10676 /// bits. 10677 /// \pre DAG != nullptr. 10678 uint64_t getOffsetFromBase() const { 10679 assert(DAG && "Missing context."); 10680 bool IsBigEndian = DAG->getDataLayout().isBigEndian(); 10681 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."); 10682 uint64_t Offset = Shift / 8; 10683 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8; 10684 assert(!(Origin->getValueSizeInBits(0) & 0x7) && 10685 "The size of the original loaded type is not a multiple of a" 10686 " byte."); 10687 // If Offset is bigger than TySizeInBytes, it means we are loading all 10688 // zeros. This should have been optimized before in the process. 10689 assert(TySizeInBytes > Offset && 10690 "Invalid shift amount for given loaded size"); 10691 if (IsBigEndian) 10692 Offset = TySizeInBytes - Offset - getLoadedSize(); 10693 return Offset; 10694 } 10695 10696 /// \brief Generate the sequence of instructions to load the slice 10697 /// represented by this object and redirect the uses of this slice to 10698 /// this new sequence of instructions. 10699 /// \pre this->Inst && this->Origin are valid Instructions and this 10700 /// object passed the legal check: LoadedSlice::isLegal returned true. 10701 /// \return The last instruction of the sequence used to load the slice. 10702 SDValue loadSlice() const { 10703 assert(Inst && Origin && "Unable to replace a non-existing slice."); 10704 const SDValue &OldBaseAddr = Origin->getBasePtr(); 10705 SDValue BaseAddr = OldBaseAddr; 10706 // Get the offset in that chunk of bytes w.r.t. the endianness. 10707 int64_t Offset = static_cast<int64_t>(getOffsetFromBase()); 10708 assert(Offset >= 0 && "Offset too big to fit in int64_t!"); 10709 if (Offset) { 10710 // BaseAddr = BaseAddr + Offset. 10711 EVT ArithType = BaseAddr.getValueType(); 10712 SDLoc DL(Origin); 10713 BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr, 10714 DAG->getConstant(Offset, DL, ArithType)); 10715 } 10716 10717 // Create the type of the loaded slice according to its size. 10718 EVT SliceType = getLoadedType(); 10719 10720 // Create the load for the slice. 10721 SDValue LastInst = 10722 DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr, 10723 Origin->getPointerInfo().getWithOffset(Offset), 10724 getAlignment(), Origin->getMemOperand()->getFlags()); 10725 // If the final type is not the same as the loaded type, this means that 10726 // we have to pad with zero. Create a zero extend for that. 10727 EVT FinalType = Inst->getValueType(0); 10728 if (SliceType != FinalType) 10729 LastInst = 10730 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst); 10731 return LastInst; 10732 } 10733 10734 /// \brief Check if this slice can be merged with an expensive cross register 10735 /// bank copy. E.g., 10736 /// i = load i32 10737 /// f = bitcast i32 i to float 10738 bool canMergeExpensiveCrossRegisterBankCopy() const { 10739 if (!Inst || !Inst->hasOneUse()) 10740 return false; 10741 SDNode *Use = *Inst->use_begin(); 10742 if (Use->getOpcode() != ISD::BITCAST) 10743 return false; 10744 assert(DAG && "Missing context"); 10745 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 10746 EVT ResVT = Use->getValueType(0); 10747 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT()); 10748 const TargetRegisterClass *ArgRC = 10749 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT()); 10750 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) 10751 return false; 10752 10753 // At this point, we know that we perform a cross-register-bank copy. 10754 // Check if it is expensive. 10755 const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo(); 10756 // Assume bitcasts are cheap, unless both register classes do not 10757 // explicitly share a common sub class. 10758 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC)) 10759 return false; 10760 10761 // Check if it will be merged with the load. 10762 // 1. Check the alignment constraint. 10763 unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment( 10764 ResVT.getTypeForEVT(*DAG->getContext())); 10765 10766 if (RequiredAlignment > getAlignment()) 10767 return false; 10768 10769 // 2. Check that the load is a legal operation for that type. 10770 if (!TLI.isOperationLegal(ISD::LOAD, ResVT)) 10771 return false; 10772 10773 // 3. Check that we do not have a zext in the way. 10774 if (Inst->getValueType(0) != getLoadedType()) 10775 return false; 10776 10777 return true; 10778 } 10779 }; 10780 } 10781 10782 /// \brief Check that all bits set in \p UsedBits form a dense region, i.e., 10783 /// \p UsedBits looks like 0..0 1..1 0..0. 10784 static bool areUsedBitsDense(const APInt &UsedBits) { 10785 // If all the bits are one, this is dense! 10786 if (UsedBits.isAllOnesValue()) 10787 return true; 10788 10789 // Get rid of the unused bits on the right. 10790 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros()); 10791 // Get rid of the unused bits on the left. 10792 if (NarrowedUsedBits.countLeadingZeros()) 10793 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits()); 10794 // Check that the chunk of bits is completely used. 10795 return NarrowedUsedBits.isAllOnesValue(); 10796 } 10797 10798 /// \brief Check whether or not \p First and \p Second are next to each other 10799 /// in memory. This means that there is no hole between the bits loaded 10800 /// by \p First and the bits loaded by \p Second. 10801 static bool areSlicesNextToEachOther(const LoadedSlice &First, 10802 const LoadedSlice &Second) { 10803 assert(First.Origin == Second.Origin && First.Origin && 10804 "Unable to match different memory origins."); 10805 APInt UsedBits = First.getUsedBits(); 10806 assert((UsedBits & Second.getUsedBits()) == 0 && 10807 "Slices are not supposed to overlap."); 10808 UsedBits |= Second.getUsedBits(); 10809 return areUsedBitsDense(UsedBits); 10810 } 10811 10812 /// \brief Adjust the \p GlobalLSCost according to the target 10813 /// paring capabilities and the layout of the slices. 10814 /// \pre \p GlobalLSCost should account for at least as many loads as 10815 /// there is in the slices in \p LoadedSlices. 10816 static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, 10817 LoadedSlice::Cost &GlobalLSCost) { 10818 unsigned NumberOfSlices = LoadedSlices.size(); 10819 // If there is less than 2 elements, no pairing is possible. 10820 if (NumberOfSlices < 2) 10821 return; 10822 10823 // Sort the slices so that elements that are likely to be next to each 10824 // other in memory are next to each other in the list. 10825 std::sort(LoadedSlices.begin(), LoadedSlices.end(), 10826 [](const LoadedSlice &LHS, const LoadedSlice &RHS) { 10827 assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); 10828 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); 10829 }); 10830 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo(); 10831 // First (resp. Second) is the first (resp. Second) potentially candidate 10832 // to be placed in a paired load. 10833 const LoadedSlice *First = nullptr; 10834 const LoadedSlice *Second = nullptr; 10835 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice, 10836 // Set the beginning of the pair. 10837 First = Second) { 10838 10839 Second = &LoadedSlices[CurrSlice]; 10840 10841 // If First is NULL, it means we start a new pair. 10842 // Get to the next slice. 10843 if (!First) 10844 continue; 10845 10846 EVT LoadedType = First->getLoadedType(); 10847 10848 // If the types of the slices are different, we cannot pair them. 10849 if (LoadedType != Second->getLoadedType()) 10850 continue; 10851 10852 // Check if the target supplies paired loads for this type. 10853 unsigned RequiredAlignment = 0; 10854 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) { 10855 // move to the next pair, this type is hopeless. 10856 Second = nullptr; 10857 continue; 10858 } 10859 // Check if we meet the alignment requirement. 10860 if (RequiredAlignment > First->getAlignment()) 10861 continue; 10862 10863 // Check that both loads are next to each other in memory. 10864 if (!areSlicesNextToEachOther(*First, *Second)) 10865 continue; 10866 10867 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!"); 10868 --GlobalLSCost.Loads; 10869 // Move to the next pair. 10870 Second = nullptr; 10871 } 10872 } 10873 10874 /// \brief Check the profitability of all involved LoadedSlice. 10875 /// Currently, it is considered profitable if there is exactly two 10876 /// involved slices (1) which are (2) next to each other in memory, and 10877 /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). 10878 /// 10879 /// Note: The order of the elements in \p LoadedSlices may be modified, but not 10880 /// the elements themselves. 10881 /// 10882 /// FIXME: When the cost model will be mature enough, we can relax 10883 /// constraints (1) and (2). 10884 static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, 10885 const APInt &UsedBits, bool ForCodeSize) { 10886 unsigned NumberOfSlices = LoadedSlices.size(); 10887 if (StressLoadSlicing) 10888 return NumberOfSlices > 1; 10889 10890 // Check (1). 10891 if (NumberOfSlices != 2) 10892 return false; 10893 10894 // Check (2). 10895 if (!areUsedBitsDense(UsedBits)) 10896 return false; 10897 10898 // Check (3). 10899 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize); 10900 // The original code has one big load. 10901 OrigCost.Loads = 1; 10902 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) { 10903 const LoadedSlice &LS = LoadedSlices[CurrSlice]; 10904 // Accumulate the cost of all the slices. 10905 LoadedSlice::Cost SliceCost(LS, ForCodeSize); 10906 GlobalSlicingCost += SliceCost; 10907 10908 // Account as cost in the original configuration the gain obtained 10909 // with the current slices. 10910 OrigCost.addSliceGain(LS); 10911 } 10912 10913 // If the target supports paired load, adjust the cost accordingly. 10914 adjustCostForPairing(LoadedSlices, GlobalSlicingCost); 10915 return OrigCost > GlobalSlicingCost; 10916 } 10917 10918 /// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) 10919 /// operations, split it in the various pieces being extracted. 10920 /// 10921 /// This sort of thing is introduced by SROA. 10922 /// This slicing takes care not to insert overlapping loads. 10923 /// \pre LI is a simple load (i.e., not an atomic or volatile load). 10924 bool DAGCombiner::SliceUpLoad(SDNode *N) { 10925 if (Level < AfterLegalizeDAG) 10926 return false; 10927 10928 LoadSDNode *LD = cast<LoadSDNode>(N); 10929 if (LD->isVolatile() || !ISD::isNormalLoad(LD) || 10930 !LD->getValueType(0).isInteger()) 10931 return false; 10932 10933 // Keep track of already used bits to detect overlapping values. 10934 // In that case, we will just abort the transformation. 10935 APInt UsedBits(LD->getValueSizeInBits(0), 0); 10936 10937 SmallVector<LoadedSlice, 4> LoadedSlices; 10938 10939 // Check if this load is used as several smaller chunks of bits. 10940 // Basically, look for uses in trunc or trunc(lshr) and record a new chain 10941 // of computation for each trunc. 10942 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); 10943 UI != UIEnd; ++UI) { 10944 // Skip the uses of the chain. 10945 if (UI.getUse().getResNo() != 0) 10946 continue; 10947 10948 SDNode *User = *UI; 10949 unsigned Shift = 0; 10950 10951 // Check if this is a trunc(lshr). 10952 if (User->getOpcode() == ISD::SRL && User->hasOneUse() && 10953 isa<ConstantSDNode>(User->getOperand(1))) { 10954 Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue(); 10955 User = *User->use_begin(); 10956 } 10957 10958 // At this point, User is a Truncate, iff we encountered, trunc or 10959 // trunc(lshr). 10960 if (User->getOpcode() != ISD::TRUNCATE) 10961 return false; 10962 10963 // The width of the type must be a power of 2 and greater than 8-bits. 10964 // Otherwise the load cannot be represented in LLVM IR. 10965 // Moreover, if we shifted with a non-8-bits multiple, the slice 10966 // will be across several bytes. We do not support that. 10967 unsigned Width = User->getValueSizeInBits(0); 10968 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) 10969 return 0; 10970 10971 // Build the slice for this chain of computations. 10972 LoadedSlice LS(User, LD, Shift, &DAG); 10973 APInt CurrentUsedBits = LS.getUsedBits(); 10974 10975 // Check if this slice overlaps with another. 10976 if ((CurrentUsedBits & UsedBits) != 0) 10977 return false; 10978 // Update the bits used globally. 10979 UsedBits |= CurrentUsedBits; 10980 10981 // Check if the new slice would be legal. 10982 if (!LS.isLegal()) 10983 return false; 10984 10985 // Record the slice. 10986 LoadedSlices.push_back(LS); 10987 } 10988 10989 // Abort slicing if it does not seem to be profitable. 10990 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize)) 10991 return false; 10992 10993 ++SlicedLoads; 10994 10995 // Rewrite each chain to use an independent load. 10996 // By construction, each chain can be represented by a unique load. 10997 10998 // Prepare the argument for the new token factor for all the slices. 10999 SmallVector<SDValue, 8> ArgChains; 11000 for (SmallVectorImpl<LoadedSlice>::const_iterator 11001 LSIt = LoadedSlices.begin(), 11002 LSItEnd = LoadedSlices.end(); 11003 LSIt != LSItEnd; ++LSIt) { 11004 SDValue SliceInst = LSIt->loadSlice(); 11005 CombineTo(LSIt->Inst, SliceInst, true); 11006 if (SliceInst.getOpcode() != ISD::LOAD) 11007 SliceInst = SliceInst.getOperand(0); 11008 assert(SliceInst->getOpcode() == ISD::LOAD && 11009 "It takes more than a zext to get to the loaded slice!!"); 11010 ArgChains.push_back(SliceInst.getValue(1)); 11011 } 11012 11013 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, 11014 ArgChains); 11015 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 11016 return true; 11017 } 11018 11019 /// Check to see if V is (and load (ptr), imm), where the load is having 11020 /// specific bytes cleared out. If so, return the byte size being masked out 11021 /// and the shift amount. 11022 static std::pair<unsigned, unsigned> 11023 CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 11024 std::pair<unsigned, unsigned> Result(0, 0); 11025 11026 // Check for the structure we're looking for. 11027 if (V->getOpcode() != ISD::AND || 11028 !isa<ConstantSDNode>(V->getOperand(1)) || 11029 !ISD::isNormalLoad(V->getOperand(0).getNode())) 11030 return Result; 11031 11032 // Check the chain and pointer. 11033 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 11034 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 11035 11036 // The store should be chained directly to the load or be an operand of a 11037 // tokenfactor. 11038 if (LD == Chain.getNode()) 11039 ; // ok. 11040 else if (Chain->getOpcode() != ISD::TokenFactor) 11041 return Result; // Fail. 11042 else { 11043 bool isOk = false; 11044 for (const SDValue &ChainOp : Chain->op_values()) 11045 if (ChainOp.getNode() == LD) { 11046 isOk = true; 11047 break; 11048 } 11049 if (!isOk) return Result; 11050 } 11051 11052 // This only handles simple types. 11053 if (V.getValueType() != MVT::i16 && 11054 V.getValueType() != MVT::i32 && 11055 V.getValueType() != MVT::i64) 11056 return Result; 11057 11058 // Check the constant mask. Invert it so that the bits being masked out are 11059 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 11060 // follow the sign bit for uniformity. 11061 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 11062 unsigned NotMaskLZ = countLeadingZeros(NotMask); 11063 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 11064 unsigned NotMaskTZ = countTrailingZeros(NotMask); 11065 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 11066 if (NotMaskLZ == 64) return Result; // All zero mask. 11067 11068 // See if we have a continuous run of bits. If so, we have 0*1+0* 11069 if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64) 11070 return Result; 11071 11072 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 11073 if (V.getValueType() != MVT::i64 && NotMaskLZ) 11074 NotMaskLZ -= 64-V.getValueSizeInBits(); 11075 11076 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 11077 switch (MaskedBytes) { 11078 case 1: 11079 case 2: 11080 case 4: break; 11081 default: return Result; // All one mask, or 5-byte mask. 11082 } 11083 11084 // Verify that the first bit starts at a multiple of mask so that the access 11085 // is aligned the same as the access width. 11086 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 11087 11088 Result.first = MaskedBytes; 11089 Result.second = NotMaskTZ/8; 11090 return Result; 11091 } 11092 11093 11094 /// Check to see if IVal is something that provides a value as specified by 11095 /// MaskInfo. If so, replace the specified store with a narrower store of 11096 /// truncated IVal. 11097 static SDNode * 11098 ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 11099 SDValue IVal, StoreSDNode *St, 11100 DAGCombiner *DC) { 11101 unsigned NumBytes = MaskInfo.first; 11102 unsigned ByteShift = MaskInfo.second; 11103 SelectionDAG &DAG = DC->getDAG(); 11104 11105 // Check to see if IVal is all zeros in the part being masked in by the 'or' 11106 // that uses this. If not, this is not a replacement. 11107 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 11108 ByteShift*8, (ByteShift+NumBytes)*8); 11109 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr; 11110 11111 // Check that it is legal on the target to do this. It is legal if the new 11112 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 11113 // legalization. 11114 MVT VT = MVT::getIntegerVT(NumBytes*8); 11115 if (!DC->isTypeLegal(VT)) 11116 return nullptr; 11117 11118 // Okay, we can do this! Replace the 'St' store with a store of IVal that is 11119 // shifted by ByteShift and truncated down to NumBytes. 11120 if (ByteShift) { 11121 SDLoc DL(IVal); 11122 IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal, 11123 DAG.getConstant(ByteShift*8, DL, 11124 DC->getShiftAmountTy(IVal.getValueType()))); 11125 } 11126 11127 // Figure out the offset for the store and the alignment of the access. 11128 unsigned StOffset; 11129 unsigned NewAlign = St->getAlignment(); 11130 11131 if (DAG.getDataLayout().isLittleEndian()) 11132 StOffset = ByteShift; 11133 else 11134 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 11135 11136 SDValue Ptr = St->getBasePtr(); 11137 if (StOffset) { 11138 SDLoc DL(IVal); 11139 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), 11140 Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType())); 11141 NewAlign = MinAlign(NewAlign, StOffset); 11142 } 11143 11144 // Truncate down to the new size. 11145 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal); 11146 11147 ++OpsNarrowed; 11148 return DAG 11149 .getStore(St->getChain(), SDLoc(St), IVal, Ptr, 11150 St->getPointerInfo().getWithOffset(StOffset), NewAlign) 11151 .getNode(); 11152 } 11153 11154 11155 /// Look for sequence of load / op / store where op is one of 'or', 'xor', and 11156 /// 'and' of immediates. If 'op' is only touching some of the loaded bits, try 11157 /// narrowing the load and store if it would end up being a win for performance 11158 /// or code size. 11159 SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 11160 StoreSDNode *ST = cast<StoreSDNode>(N); 11161 if (ST->isVolatile()) 11162 return SDValue(); 11163 11164 SDValue Chain = ST->getChain(); 11165 SDValue Value = ST->getValue(); 11166 SDValue Ptr = ST->getBasePtr(); 11167 EVT VT = Value.getValueType(); 11168 11169 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 11170 return SDValue(); 11171 11172 unsigned Opc = Value.getOpcode(); 11173 11174 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 11175 // is a byte mask indicating a consecutive number of bytes, check to see if 11176 // Y is known to provide just those bytes. If so, we try to replace the 11177 // load + replace + store sequence with a single (narrower) store, which makes 11178 // the load dead. 11179 if (Opc == ISD::OR) { 11180 std::pair<unsigned, unsigned> MaskedLoad; 11181 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 11182 if (MaskedLoad.first) 11183 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11184 Value.getOperand(1), ST,this)) 11185 return SDValue(NewST, 0); 11186 11187 // Or is commutative, so try swapping X and Y. 11188 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 11189 if (MaskedLoad.first) 11190 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11191 Value.getOperand(0), ST,this)) 11192 return SDValue(NewST, 0); 11193 } 11194 11195 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 11196 Value.getOperand(1).getOpcode() != ISD::Constant) 11197 return SDValue(); 11198 11199 SDValue N0 = Value.getOperand(0); 11200 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 11201 Chain == SDValue(N0.getNode(), 1)) { 11202 LoadSDNode *LD = cast<LoadSDNode>(N0); 11203 if (LD->getBasePtr() != Ptr || 11204 LD->getPointerInfo().getAddrSpace() != 11205 ST->getPointerInfo().getAddrSpace()) 11206 return SDValue(); 11207 11208 // Find the type to narrow it the load / op / store to. 11209 SDValue N1 = Value.getOperand(1); 11210 unsigned BitWidth = N1.getValueSizeInBits(); 11211 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 11212 if (Opc == ISD::AND) 11213 Imm ^= APInt::getAllOnesValue(BitWidth); 11214 if (Imm == 0 || Imm.isAllOnesValue()) 11215 return SDValue(); 11216 unsigned ShAmt = Imm.countTrailingZeros(); 11217 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 11218 unsigned NewBW = NextPowerOf2(MSB - ShAmt); 11219 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11220 // The narrowing should be profitable, the load/store operation should be 11221 // legal (or custom) and the store size should be equal to the NewVT width. 11222 while (NewBW < BitWidth && 11223 (NewVT.getStoreSizeInBits() != NewBW || 11224 !TLI.isOperationLegalOrCustom(Opc, NewVT) || 11225 !TLI.isNarrowingProfitable(VT, NewVT))) { 11226 NewBW = NextPowerOf2(NewBW); 11227 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11228 } 11229 if (NewBW >= BitWidth) 11230 return SDValue(); 11231 11232 // If the lsb changed does not start at the type bitwidth boundary, 11233 // start at the previous one. 11234 if (ShAmt % NewBW) 11235 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 11236 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, 11237 std::min(BitWidth, ShAmt + NewBW)); 11238 if ((Imm & Mask) == Imm) { 11239 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 11240 if (Opc == ISD::AND) 11241 NewImm ^= APInt::getAllOnesValue(NewBW); 11242 uint64_t PtrOff = ShAmt / 8; 11243 // For big endian targets, we need to adjust the offset to the pointer to 11244 // load the correct bytes. 11245 if (DAG.getDataLayout().isBigEndian()) 11246 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 11247 11248 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 11249 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 11250 if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy)) 11251 return SDValue(); 11252 11253 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD), 11254 Ptr.getValueType(), Ptr, 11255 DAG.getConstant(PtrOff, SDLoc(LD), 11256 Ptr.getValueType())); 11257 SDValue NewLD = 11258 DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr, 11259 LD->getPointerInfo().getWithOffset(PtrOff), NewAlign, 11260 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 11261 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD, 11262 DAG.getConstant(NewImm, SDLoc(Value), 11263 NewVT)); 11264 SDValue NewST = 11265 DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr, 11266 ST->getPointerInfo().getWithOffset(PtrOff), NewAlign); 11267 11268 AddToWorklist(NewPtr.getNode()); 11269 AddToWorklist(NewLD.getNode()); 11270 AddToWorklist(NewVal.getNode()); 11271 WorklistRemover DeadNodes(*this); 11272 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 11273 ++OpsNarrowed; 11274 return NewST; 11275 } 11276 } 11277 11278 return SDValue(); 11279 } 11280 11281 /// For a given floating point load / store pair, if the load value isn't used 11282 /// by any other operations, then consider transforming the pair to integer 11283 /// load / store operations if the target deems the transformation profitable. 11284 SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 11285 StoreSDNode *ST = cast<StoreSDNode>(N); 11286 SDValue Chain = ST->getChain(); 11287 SDValue Value = ST->getValue(); 11288 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 11289 Value.hasOneUse() && 11290 Chain == SDValue(Value.getNode(), 1)) { 11291 LoadSDNode *LD = cast<LoadSDNode>(Value); 11292 EVT VT = LD->getMemoryVT(); 11293 if (!VT.isFloatingPoint() || 11294 VT != ST->getMemoryVT() || 11295 LD->isNonTemporal() || 11296 ST->isNonTemporal() || 11297 LD->getPointerInfo().getAddrSpace() != 0 || 11298 ST->getPointerInfo().getAddrSpace() != 0) 11299 return SDValue(); 11300 11301 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 11302 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 11303 !TLI.isOperationLegal(ISD::STORE, IntVT) || 11304 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 11305 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 11306 return SDValue(); 11307 11308 unsigned LDAlign = LD->getAlignment(); 11309 unsigned STAlign = ST->getAlignment(); 11310 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 11311 unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy); 11312 if (LDAlign < ABIAlign || STAlign < ABIAlign) 11313 return SDValue(); 11314 11315 SDValue NewLD = 11316 DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(), 11317 LD->getPointerInfo(), LDAlign); 11318 11319 SDValue NewST = 11320 DAG.getStore(NewLD.getValue(1), SDLoc(N), NewLD, ST->getBasePtr(), 11321 ST->getPointerInfo(), STAlign); 11322 11323 AddToWorklist(NewLD.getNode()); 11324 AddToWorklist(NewST.getNode()); 11325 WorklistRemover DeadNodes(*this); 11326 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 11327 ++LdStFP2Int; 11328 return NewST; 11329 } 11330 11331 return SDValue(); 11332 } 11333 11334 // This is a helper function for visitMUL to check the profitability 11335 // of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 11336 // MulNode is the original multiply, AddNode is (add x, c1), 11337 // and ConstNode is c2. 11338 // 11339 // If the (add x, c1) has multiple uses, we could increase 11340 // the number of adds if we make this transformation. 11341 // It would only be worth doing this if we can remove a 11342 // multiply in the process. Check for that here. 11343 // To illustrate: 11344 // (A + c1) * c3 11345 // (A + c2) * c3 11346 // We're checking for cases where we have common "c3 * A" expressions. 11347 bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, 11348 SDValue &AddNode, 11349 SDValue &ConstNode) { 11350 APInt Val; 11351 11352 // If the add only has one use, this would be OK to do. 11353 if (AddNode.getNode()->hasOneUse()) 11354 return true; 11355 11356 // Walk all the users of the constant with which we're multiplying. 11357 for (SDNode *Use : ConstNode->uses()) { 11358 11359 if (Use == MulNode) // This use is the one we're on right now. Skip it. 11360 continue; 11361 11362 if (Use->getOpcode() == ISD::MUL) { // We have another multiply use. 11363 SDNode *OtherOp; 11364 SDNode *MulVar = AddNode.getOperand(0).getNode(); 11365 11366 // OtherOp is what we're multiplying against the constant. 11367 if (Use->getOperand(0) == ConstNode) 11368 OtherOp = Use->getOperand(1).getNode(); 11369 else 11370 OtherOp = Use->getOperand(0).getNode(); 11371 11372 // Check to see if multiply is with the same operand of our "add". 11373 // 11374 // ConstNode = CONST 11375 // Use = ConstNode * A <-- visiting Use. OtherOp is A. 11376 // ... 11377 // AddNode = (A + c1) <-- MulVar is A. 11378 // = AddNode * ConstNode <-- current visiting instruction. 11379 // 11380 // If we make this transformation, we will have a common 11381 // multiply (ConstNode * A) that we can save. 11382 if (OtherOp == MulVar) 11383 return true; 11384 11385 // Now check to see if a future expansion will give us a common 11386 // multiply. 11387 // 11388 // ConstNode = CONST 11389 // AddNode = (A + c1) 11390 // ... = AddNode * ConstNode <-- current visiting instruction. 11391 // ... 11392 // OtherOp = (A + c2) 11393 // Use = OtherOp * ConstNode <-- visiting Use. 11394 // 11395 // If we make this transformation, we will have a common 11396 // multiply (CONST * A) after we also do the same transformation 11397 // to the "t2" instruction. 11398 if (OtherOp->getOpcode() == ISD::ADD && 11399 DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) && 11400 OtherOp->getOperand(0).getNode() == MulVar) 11401 return true; 11402 } 11403 } 11404 11405 // Didn't find a case where this would be profitable. 11406 return false; 11407 } 11408 11409 SDValue DAGCombiner::getMergedConstantVectorStore( 11410 SelectionDAG &DAG, const SDLoc &SL, ArrayRef<MemOpLink> Stores, 11411 SmallVectorImpl<SDValue> &Chains, EVT Ty) const { 11412 SmallVector<SDValue, 8> BuildVector; 11413 11414 for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { 11415 StoreSDNode *St = cast<StoreSDNode>(Stores[I].MemNode); 11416 Chains.push_back(St->getChain()); 11417 BuildVector.push_back(St->getValue()); 11418 } 11419 11420 return DAG.getBuildVector(Ty, SL, BuildVector); 11421 } 11422 11423 bool DAGCombiner::MergeStoresOfConstantsOrVecElts( 11424 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, 11425 unsigned NumStores, bool IsConstantSrc, bool UseVector) { 11426 // Make sure we have something to merge. 11427 if (NumStores < 2) 11428 return false; 11429 11430 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 11431 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 11432 unsigned LatestNodeUsed = 0; 11433 11434 for (unsigned i=0; i < NumStores; ++i) { 11435 // Find a chain for the new wide-store operand. Notice that some 11436 // of the store nodes that we found may not be selected for inclusion 11437 // in the wide store. The chain we use needs to be the chain of the 11438 // latest store node which is *used* and replaced by the wide store. 11439 if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) 11440 LatestNodeUsed = i; 11441 } 11442 11443 SmallVector<SDValue, 8> Chains; 11444 11445 // The latest Node in the DAG. 11446 LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; 11447 SDLoc DL(StoreNodes[0].MemNode); 11448 11449 SDValue StoredVal; 11450 if (UseVector) { 11451 bool IsVec = MemVT.isVector(); 11452 unsigned Elts = NumStores; 11453 if (IsVec) { 11454 // When merging vector stores, get the total number of elements. 11455 Elts *= MemVT.getVectorNumElements(); 11456 } 11457 // Get the type for the merged vector store. 11458 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 11459 assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); 11460 11461 if (IsConstantSrc) { 11462 StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Chains, Ty); 11463 } else { 11464 SmallVector<SDValue, 8> Ops; 11465 for (unsigned i = 0; i < NumStores; ++i) { 11466 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11467 SDValue Val = St->getValue(); 11468 // All operands of BUILD_VECTOR / CONCAT_VECTOR must have the same type. 11469 if (Val.getValueType() != MemVT) 11470 return false; 11471 Ops.push_back(Val); 11472 Chains.push_back(St->getChain()); 11473 } 11474 11475 // Build the extracted vector elements back into a vector. 11476 StoredVal = DAG.getNode(IsVec ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, 11477 DL, Ty, Ops); } 11478 } else { 11479 // We should always use a vector store when merging extracted vector 11480 // elements, so this path implies a store of constants. 11481 assert(IsConstantSrc && "Merged vector elements should use vector store"); 11482 11483 unsigned SizeInBits = NumStores * ElementSizeBytes * 8; 11484 APInt StoreInt(SizeInBits, 0); 11485 11486 // Construct a single integer constant which is made of the smaller 11487 // constant inputs. 11488 bool IsLE = DAG.getDataLayout().isLittleEndian(); 11489 for (unsigned i = 0; i < NumStores; ++i) { 11490 unsigned Idx = IsLE ? (NumStores - 1 - i) : i; 11491 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); 11492 Chains.push_back(St->getChain()); 11493 11494 SDValue Val = St->getValue(); 11495 StoreInt <<= ElementSizeBytes * 8; 11496 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) { 11497 StoreInt |= C->getAPIntValue().zext(SizeInBits); 11498 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) { 11499 StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits); 11500 } else { 11501 llvm_unreachable("Invalid constant element type"); 11502 } 11503 } 11504 11505 // Create the new Load and Store operations. 11506 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits); 11507 StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); 11508 } 11509 11510 assert(!Chains.empty()); 11511 11512 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 11513 SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, 11514 FirstInChain->getBasePtr(), 11515 FirstInChain->getPointerInfo(), 11516 FirstInChain->getAlignment()); 11517 11518 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 11519 : DAG.getSubtarget().useAA(); 11520 if (UseAA) { 11521 // Replace all merged stores with the new store. 11522 for (unsigned i = 0; i < NumStores; ++i) 11523 CombineTo(StoreNodes[i].MemNode, NewStore); 11524 } else { 11525 // Replace the last store with the new store. 11526 CombineTo(LatestOp, NewStore); 11527 // Erase all other stores. 11528 for (unsigned i = 0; i < NumStores; ++i) { 11529 if (StoreNodes[i].MemNode == LatestOp) 11530 continue; 11531 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11532 // ReplaceAllUsesWith will replace all uses that existed when it was 11533 // called, but graph optimizations may cause new ones to appear. For 11534 // example, the case in pr14333 looks like 11535 // 11536 // St's chain -> St -> another store -> X 11537 // 11538 // And the only difference from St to the other store is the chain. 11539 // When we change it's chain to be St's chain they become identical, 11540 // get CSEed and the net result is that X is now a use of St. 11541 // Since we know that St is redundant, just iterate. 11542 while (!St->use_empty()) 11543 DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); 11544 deleteAndRecombine(St); 11545 } 11546 } 11547 11548 StoreNodes.erase(StoreNodes.begin() + NumStores, StoreNodes.end()); 11549 return true; 11550 } 11551 11552 void DAGCombiner::getStoreMergeAndAliasCandidates( 11553 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, 11554 SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) { 11555 // This holds the base pointer, index, and the offset in bytes from the base 11556 // pointer. 11557 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 11558 11559 // We must have a base and an offset. 11560 if (!BasePtr.Base.getNode()) 11561 return; 11562 11563 // Do not handle stores to undef base pointers. 11564 if (BasePtr.Base.isUndef()) 11565 return; 11566 11567 // Walk up the chain and look for nodes with offsets from the same 11568 // base pointer. Stop when reaching an instruction with a different kind 11569 // or instruction which has a different base pointer. 11570 EVT MemVT = St->getMemoryVT(); 11571 unsigned Seq = 0; 11572 StoreSDNode *Index = St; 11573 11574 11575 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 11576 : DAG.getSubtarget().useAA(); 11577 11578 if (UseAA) { 11579 // Look at other users of the same chain. Stores on the same chain do not 11580 // alias. If combiner-aa is enabled, non-aliasing stores are canonicalized 11581 // to be on the same chain, so don't bother looking at adjacent chains. 11582 11583 SDValue Chain = St->getChain(); 11584 for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { 11585 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) { 11586 if (I.getOperandNo() != 0) 11587 continue; 11588 11589 if (OtherST->isVolatile() || OtherST->isIndexed()) 11590 continue; 11591 11592 if (OtherST->getMemoryVT() != MemVT) 11593 continue; 11594 11595 BaseIndexOffset Ptr = BaseIndexOffset::match(OtherST->getBasePtr(), DAG); 11596 11597 if (Ptr.equalBaseIndex(BasePtr)) 11598 StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset, Seq++)); 11599 } 11600 } 11601 11602 return; 11603 } 11604 11605 while (Index) { 11606 // If the chain has more than one use, then we can't reorder the mem ops. 11607 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 11608 break; 11609 11610 // Find the base pointer and offset for this memory node. 11611 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); 11612 11613 // Check that the base pointer is the same as the original one. 11614 if (!Ptr.equalBaseIndex(BasePtr)) 11615 break; 11616 11617 // The memory operands must not be volatile. 11618 if (Index->isVolatile() || Index->isIndexed()) 11619 break; 11620 11621 // No truncation. 11622 if (Index->isTruncatingStore()) 11623 break; 11624 11625 // The stored memory type must be the same. 11626 if (Index->getMemoryVT() != MemVT) 11627 break; 11628 11629 // We do not allow under-aligned stores in order to prevent 11630 // overriding stores. NOTE: this is a bad hack. Alignment SHOULD 11631 // be irrelevant here; what MATTERS is that we not move memory 11632 // operations that potentially overlap past each-other. 11633 if (Index->getAlignment() < MemVT.getStoreSize()) 11634 break; 11635 11636 // We found a potential memory operand to merge. 11637 StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); 11638 11639 // Find the next memory operand in the chain. If the next operand in the 11640 // chain is a store then move up and continue the scan with the next 11641 // memory operand. If the next operand is a load save it and use alias 11642 // information to check if it interferes with anything. 11643 SDNode *NextInChain = Index->getChain().getNode(); 11644 while (1) { 11645 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 11646 // We found a store node. Use it for the next iteration. 11647 Index = STn; 11648 break; 11649 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 11650 if (Ldn->isVolatile()) { 11651 Index = nullptr; 11652 break; 11653 } 11654 11655 // Save the load node for later. Continue the scan. 11656 AliasLoadNodes.push_back(Ldn); 11657 NextInChain = Ldn->getChain().getNode(); 11658 continue; 11659 } else { 11660 Index = nullptr; 11661 break; 11662 } 11663 } 11664 } 11665 } 11666 11667 // We need to check that merging these stores does not cause a loop 11668 // in the DAG. Any store candidate may depend on another candidate 11669 // indirectly through its operand (we already consider dependencies 11670 // through the chain). Check in parallel by searching up from 11671 // non-chain operands of candidates. 11672 bool DAGCombiner::checkMergeStoreCandidatesForDependencies( 11673 SmallVectorImpl<MemOpLink> &StoreNodes) { 11674 SmallPtrSet<const SDNode *, 16> Visited; 11675 SmallVector<const SDNode *, 8> Worklist; 11676 // search ops of store candidates 11677 for (unsigned i = 0; i < StoreNodes.size(); ++i) { 11678 SDNode *n = StoreNodes[i].MemNode; 11679 // Potential loops may happen only through non-chain operands 11680 for (unsigned j = 1; j < n->getNumOperands(); ++j) 11681 Worklist.push_back(n->getOperand(j).getNode()); 11682 } 11683 // search through DAG. We can stop early if we find a storenode 11684 for (unsigned i = 0; i < StoreNodes.size(); ++i) { 11685 if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist)) 11686 return false; 11687 } 11688 return true; 11689 } 11690 11691 bool DAGCombiner::MergeConsecutiveStores( 11692 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes) { 11693 if (OptLevel == CodeGenOpt::None) 11694 return false; 11695 11696 EVT MemVT = St->getMemoryVT(); 11697 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 11698 bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute( 11699 Attribute::NoImplicitFloat); 11700 11701 // This function cannot currently deal with non-byte-sized memory sizes. 11702 if (ElementSizeBytes * 8 != MemVT.getSizeInBits()) 11703 return false; 11704 11705 if (!MemVT.isSimple()) 11706 return false; 11707 11708 // Perform an early exit check. Do not bother looking at stored values that 11709 // are not constants, loads, or extracted vector elements. 11710 SDValue StoredVal = St->getValue(); 11711 bool IsLoadSrc = isa<LoadSDNode>(StoredVal); 11712 bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) || 11713 isa<ConstantFPSDNode>(StoredVal); 11714 bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT || 11715 StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR); 11716 11717 if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc) 11718 return false; 11719 11720 // Don't merge vectors into wider vectors if the source data comes from loads. 11721 // TODO: This restriction can be lifted by using logic similar to the 11722 // ExtractVecSrc case. 11723 if (MemVT.isVector() && IsLoadSrc) 11724 return false; 11725 11726 // Only look at ends of store sequences. 11727 SDValue Chain = SDValue(St, 0); 11728 if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) 11729 return false; 11730 11731 // Save the LoadSDNodes that we find in the chain. 11732 // We need to make sure that these nodes do not interfere with 11733 // any of the store nodes. 11734 SmallVector<LSBaseSDNode*, 8> AliasLoadNodes; 11735 11736 getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes); 11737 11738 // Check if there is anything to merge. 11739 if (StoreNodes.size() < 2) 11740 return false; 11741 11742 // only do dependence check in AA case 11743 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 11744 : DAG.getSubtarget().useAA(); 11745 if (UseAA && !checkMergeStoreCandidatesForDependencies(StoreNodes)) 11746 return false; 11747 11748 // Sort the memory operands according to their distance from the 11749 // base pointer. As a secondary criteria: make sure stores coming 11750 // later in the code come first in the list. This is important for 11751 // the non-UseAA case, because we're merging stores into the FINAL 11752 // store along a chain which potentially contains aliasing stores. 11753 // Thus, if there are multiple stores to the same address, the last 11754 // one can be considered for merging but not the others. 11755 std::sort(StoreNodes.begin(), StoreNodes.end(), 11756 [](MemOpLink LHS, MemOpLink RHS) { 11757 return LHS.OffsetFromBase < RHS.OffsetFromBase || 11758 (LHS.OffsetFromBase == RHS.OffsetFromBase && 11759 LHS.SequenceNum < RHS.SequenceNum); 11760 }); 11761 11762 // Scan the memory operations on the chain and find the first non-consecutive 11763 // store memory address. 11764 unsigned LastConsecutiveStore = 0; 11765 int64_t StartAddress = StoreNodes[0].OffsetFromBase; 11766 for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { 11767 11768 // Check that the addresses are consecutive starting from the second 11769 // element in the list of stores. 11770 if (i > 0) { 11771 int64_t CurrAddress = StoreNodes[i].OffsetFromBase; 11772 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 11773 break; 11774 } 11775 11776 // Check if this store interferes with any of the loads that we found. 11777 // If we find a load that alias with this store. Stop the sequence. 11778 if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) { 11779 return isAlias(Ldn, StoreNodes[i].MemNode); 11780 })) 11781 break; 11782 11783 // Mark this node as useful. 11784 LastConsecutiveStore = i; 11785 } 11786 11787 // The node with the lowest store address. 11788 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 11789 unsigned FirstStoreAS = FirstInChain->getAddressSpace(); 11790 unsigned FirstStoreAlign = FirstInChain->getAlignment(); 11791 LLVMContext &Context = *DAG.getContext(); 11792 const DataLayout &DL = DAG.getDataLayout(); 11793 11794 // Store the constants into memory as one consecutive store. 11795 if (IsConstantSrc) { 11796 unsigned LastLegalType = 0; 11797 unsigned LastLegalVectorType = 0; 11798 bool NonZero = false; 11799 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 11800 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11801 SDValue StoredVal = St->getValue(); 11802 11803 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) { 11804 NonZero |= !C->isNullValue(); 11805 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) { 11806 NonZero |= !C->getConstantFPValue()->isNullValue(); 11807 } else { 11808 // Non-constant. 11809 break; 11810 } 11811 11812 // Find a legal type for the constant store. 11813 unsigned SizeInBits = (i+1) * ElementSizeBytes * 8; 11814 EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits); 11815 bool IsFast; 11816 if (TLI.isTypeLegal(StoreTy) && 11817 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 11818 FirstStoreAlign, &IsFast) && IsFast) { 11819 LastLegalType = i+1; 11820 // Or check whether a truncstore is legal. 11821 } else if (TLI.getTypeAction(Context, StoreTy) == 11822 TargetLowering::TypePromoteInteger) { 11823 EVT LegalizedStoredValueTy = 11824 TLI.getTypeToTransformTo(Context, StoredVal.getValueType()); 11825 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 11826 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 11827 FirstStoreAS, FirstStoreAlign, &IsFast) && 11828 IsFast) { 11829 LastLegalType = i + 1; 11830 } 11831 } 11832 11833 // We only use vectors if the constant is known to be zero or the target 11834 // allows it and the function is not marked with the noimplicitfloat 11835 // attribute. 11836 if ((!NonZero || TLI.storeOfVectorConstantIsCheap(MemVT, i+1, 11837 FirstStoreAS)) && 11838 !NoVectors) { 11839 // Find a legal type for the vector store. 11840 EVT Ty = EVT::getVectorVT(Context, MemVT, i+1); 11841 if (TLI.isTypeLegal(Ty) && 11842 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 11843 FirstStoreAlign, &IsFast) && IsFast) 11844 LastLegalVectorType = i + 1; 11845 } 11846 } 11847 11848 // Check if we found a legal integer type to store. 11849 if (LastLegalType == 0 && LastLegalVectorType == 0) 11850 return false; 11851 11852 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors; 11853 unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType; 11854 11855 return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, 11856 true, UseVector); 11857 } 11858 11859 // When extracting multiple vector elements, try to store them 11860 // in one vector store rather than a sequence of scalar stores. 11861 if (IsExtractVecSrc) { 11862 unsigned NumStoresToMerge = 0; 11863 bool IsVec = MemVT.isVector(); 11864 for (unsigned i = 0; i < LastConsecutiveStore + 1; ++i) { 11865 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11866 unsigned StoreValOpcode = St->getValue().getOpcode(); 11867 // This restriction could be loosened. 11868 // Bail out if any stored values are not elements extracted from a vector. 11869 // It should be possible to handle mixed sources, but load sources need 11870 // more careful handling (see the block of code below that handles 11871 // consecutive loads). 11872 if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT && 11873 StoreValOpcode != ISD::EXTRACT_SUBVECTOR) 11874 return false; 11875 11876 // Find a legal type for the vector store. 11877 unsigned Elts = i + 1; 11878 if (IsVec) { 11879 // When merging vector stores, get the total number of elements. 11880 Elts *= MemVT.getVectorNumElements(); 11881 } 11882 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 11883 bool IsFast; 11884 if (TLI.isTypeLegal(Ty) && 11885 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 11886 FirstStoreAlign, &IsFast) && IsFast) 11887 NumStoresToMerge = i + 1; 11888 } 11889 11890 return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumStoresToMerge, 11891 false, true); 11892 } 11893 11894 // Below we handle the case of multiple consecutive stores that 11895 // come from multiple consecutive loads. We merge them into a single 11896 // wide load and a single wide store. 11897 11898 // Look for load nodes which are used by the stored values. 11899 SmallVector<MemOpLink, 8> LoadNodes; 11900 11901 // Find acceptable loads. Loads need to have the same chain (token factor), 11902 // must not be zext, volatile, indexed, and they must be consecutive. 11903 BaseIndexOffset LdBasePtr; 11904 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 11905 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11906 LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); 11907 if (!Ld) break; 11908 11909 // Loads must only have one use. 11910 if (!Ld->hasNUsesOfValue(1, 0)) 11911 break; 11912 11913 // The memory operands must not be volatile. 11914 if (Ld->isVolatile() || Ld->isIndexed()) 11915 break; 11916 11917 // We do not accept ext loads. 11918 if (Ld->getExtensionType() != ISD::NON_EXTLOAD) 11919 break; 11920 11921 // The stored memory type must be the same. 11922 if (Ld->getMemoryVT() != MemVT) 11923 break; 11924 11925 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG); 11926 // If this is not the first ptr that we check. 11927 if (LdBasePtr.Base.getNode()) { 11928 // The base ptr must be the same. 11929 if (!LdPtr.equalBaseIndex(LdBasePtr)) 11930 break; 11931 } else { 11932 // Check that all other base pointers are the same as this one. 11933 LdBasePtr = LdPtr; 11934 } 11935 11936 // We found a potential memory operand to merge. 11937 LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); 11938 } 11939 11940 if (LoadNodes.size() < 2) 11941 return false; 11942 11943 // If we have load/store pair instructions and we only have two values, 11944 // don't bother. 11945 unsigned RequiredAlignment; 11946 if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) && 11947 St->getAlignment() >= RequiredAlignment) 11948 return false; 11949 11950 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode); 11951 unsigned FirstLoadAS = FirstLoad->getAddressSpace(); 11952 unsigned FirstLoadAlign = FirstLoad->getAlignment(); 11953 11954 // Scan the memory operations on the chain and find the first non-consecutive 11955 // load memory address. These variables hold the index in the store node 11956 // array. 11957 unsigned LastConsecutiveLoad = 0; 11958 // This variable refers to the size and not index in the array. 11959 unsigned LastLegalVectorType = 0; 11960 unsigned LastLegalIntegerType = 0; 11961 StartAddress = LoadNodes[0].OffsetFromBase; 11962 SDValue FirstChain = FirstLoad->getChain(); 11963 for (unsigned i = 1; i < LoadNodes.size(); ++i) { 11964 // All loads must share the same chain. 11965 if (LoadNodes[i].MemNode->getChain() != FirstChain) 11966 break; 11967 11968 int64_t CurrAddress = LoadNodes[i].OffsetFromBase; 11969 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 11970 break; 11971 LastConsecutiveLoad = i; 11972 // Find a legal type for the vector store. 11973 EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1); 11974 bool IsFastSt, IsFastLd; 11975 if (TLI.isTypeLegal(StoreTy) && 11976 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 11977 FirstStoreAlign, &IsFastSt) && IsFastSt && 11978 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 11979 FirstLoadAlign, &IsFastLd) && IsFastLd) { 11980 LastLegalVectorType = i + 1; 11981 } 11982 11983 // Find a legal type for the integer store. 11984 unsigned SizeInBits = (i+1) * ElementSizeBytes * 8; 11985 StoreTy = EVT::getIntegerVT(Context, SizeInBits); 11986 if (TLI.isTypeLegal(StoreTy) && 11987 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 11988 FirstStoreAlign, &IsFastSt) && IsFastSt && 11989 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 11990 FirstLoadAlign, &IsFastLd) && IsFastLd) 11991 LastLegalIntegerType = i + 1; 11992 // Or check whether a truncstore and extload is legal. 11993 else if (TLI.getTypeAction(Context, StoreTy) == 11994 TargetLowering::TypePromoteInteger) { 11995 EVT LegalizedStoredValueTy = 11996 TLI.getTypeToTransformTo(Context, StoreTy); 11997 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 11998 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) && 11999 TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) && 12000 TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) && 12001 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12002 FirstStoreAS, FirstStoreAlign, &IsFastSt) && 12003 IsFastSt && 12004 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12005 FirstLoadAS, FirstLoadAlign, &IsFastLd) && 12006 IsFastLd) 12007 LastLegalIntegerType = i+1; 12008 } 12009 } 12010 12011 // Only use vector types if the vector type is larger than the integer type. 12012 // If they are the same, use integers. 12013 bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors; 12014 unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); 12015 12016 // We add +1 here because the LastXXX variables refer to location while 12017 // the NumElem refers to array/index size. 12018 unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1; 12019 NumElem = std::min(LastLegalType, NumElem); 12020 12021 if (NumElem < 2) 12022 return false; 12023 12024 // Collect the chains from all merged stores. 12025 SmallVector<SDValue, 8> MergeStoreChains; 12026 MergeStoreChains.push_back(StoreNodes[0].MemNode->getChain()); 12027 12028 // The latest Node in the DAG. 12029 unsigned LatestNodeUsed = 0; 12030 for (unsigned i=1; i<NumElem; ++i) { 12031 // Find a chain for the new wide-store operand. Notice that some 12032 // of the store nodes that we found may not be selected for inclusion 12033 // in the wide store. The chain we use needs to be the chain of the 12034 // latest store node which is *used* and replaced by the wide store. 12035 if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) 12036 LatestNodeUsed = i; 12037 12038 MergeStoreChains.push_back(StoreNodes[i].MemNode->getChain()); 12039 } 12040 12041 LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; 12042 12043 // Find if it is better to use vectors or integers to load and store 12044 // to memory. 12045 EVT JointMemOpVT; 12046 if (UseVectorTy) { 12047 JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem); 12048 } else { 12049 unsigned SizeInBits = NumElem * ElementSizeBytes * 8; 12050 JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits); 12051 } 12052 12053 SDLoc LoadDL(LoadNodes[0].MemNode); 12054 SDLoc StoreDL(StoreNodes[0].MemNode); 12055 12056 // The merged loads are required to have the same incoming chain, so 12057 // using the first's chain is acceptable. 12058 SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(), 12059 FirstLoad->getBasePtr(), 12060 FirstLoad->getPointerInfo(), FirstLoadAlign); 12061 12062 SDValue NewStoreChain = 12063 DAG.getNode(ISD::TokenFactor, StoreDL, MVT::Other, MergeStoreChains); 12064 12065 SDValue NewStore = 12066 DAG.getStore(NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(), 12067 FirstInChain->getPointerInfo(), FirstStoreAlign); 12068 12069 // Transfer chain users from old loads to the new load. 12070 for (unsigned i = 0; i < NumElem; ++i) { 12071 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode); 12072 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), 12073 SDValue(NewLoad.getNode(), 1)); 12074 } 12075 12076 if (UseAA) { 12077 // Replace the all stores with the new store. 12078 for (unsigned i = 0; i < NumElem; ++i) 12079 CombineTo(StoreNodes[i].MemNode, NewStore); 12080 } else { 12081 // Replace the last store with the new store. 12082 CombineTo(LatestOp, NewStore); 12083 // Erase all other stores. 12084 for (unsigned i = 0; i < NumElem; ++i) { 12085 // Remove all Store nodes. 12086 if (StoreNodes[i].MemNode == LatestOp) 12087 continue; 12088 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12089 DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); 12090 deleteAndRecombine(St); 12091 } 12092 } 12093 12094 StoreNodes.erase(StoreNodes.begin() + NumElem, StoreNodes.end()); 12095 return true; 12096 } 12097 12098 SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) { 12099 SDLoc SL(ST); 12100 SDValue ReplStore; 12101 12102 // Replace the chain to avoid dependency. 12103 if (ST->isTruncatingStore()) { 12104 ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(), 12105 ST->getBasePtr(), ST->getMemoryVT(), 12106 ST->getMemOperand()); 12107 } else { 12108 ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(), 12109 ST->getMemOperand()); 12110 } 12111 12112 // Create token to keep both nodes around. 12113 SDValue Token = DAG.getNode(ISD::TokenFactor, SL, 12114 MVT::Other, ST->getChain(), ReplStore); 12115 12116 // Make sure the new and old chains are cleaned up. 12117 AddToWorklist(Token.getNode()); 12118 12119 // Don't add users to work list. 12120 return CombineTo(ST, Token, false); 12121 } 12122 12123 SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) { 12124 SDValue Value = ST->getValue(); 12125 if (Value.getOpcode() == ISD::TargetConstantFP) 12126 return SDValue(); 12127 12128 SDLoc DL(ST); 12129 12130 SDValue Chain = ST->getChain(); 12131 SDValue Ptr = ST->getBasePtr(); 12132 12133 const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value); 12134 12135 // NOTE: If the original store is volatile, this transform must not increase 12136 // the number of stores. For example, on x86-32 an f64 can be stored in one 12137 // processor operation but an i64 (which is not legal) requires two. So the 12138 // transform should not be done in this case. 12139 12140 SDValue Tmp; 12141 switch (CFP->getSimpleValueType(0).SimpleTy) { 12142 default: 12143 llvm_unreachable("Unknown FP type"); 12144 case MVT::f16: // We don't do this for these yet. 12145 case MVT::f80: 12146 case MVT::f128: 12147 case MVT::ppcf128: 12148 return SDValue(); 12149 case MVT::f32: 12150 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 12151 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12152 ; 12153 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 12154 bitcastToAPInt().getZExtValue(), SDLoc(CFP), 12155 MVT::i32); 12156 return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand()); 12157 } 12158 12159 return SDValue(); 12160 case MVT::f64: 12161 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 12162 !ST->isVolatile()) || 12163 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 12164 ; 12165 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 12166 getZExtValue(), SDLoc(CFP), MVT::i64); 12167 return DAG.getStore(Chain, DL, Tmp, 12168 Ptr, ST->getMemOperand()); 12169 } 12170 12171 if (!ST->isVolatile() && 12172 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12173 // Many FP stores are not made apparent until after legalize, e.g. for 12174 // argument passing. Since this is so common, custom legalize the 12175 // 64-bit integer store into two 32-bit stores. 12176 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 12177 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32); 12178 SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32); 12179 if (DAG.getDataLayout().isBigEndian()) 12180 std::swap(Lo, Hi); 12181 12182 unsigned Alignment = ST->getAlignment(); 12183 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 12184 AAMDNodes AAInfo = ST->getAAInfo(); 12185 12186 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 12187 ST->getAlignment(), MMOFlags, AAInfo); 12188 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 12189 DAG.getConstant(4, DL, Ptr.getValueType())); 12190 Alignment = MinAlign(Alignment, 4U); 12191 SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr, 12192 ST->getPointerInfo().getWithOffset(4), 12193 Alignment, MMOFlags, AAInfo); 12194 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 12195 St0, St1); 12196 } 12197 12198 return SDValue(); 12199 } 12200 } 12201 12202 SDValue DAGCombiner::visitSTORE(SDNode *N) { 12203 StoreSDNode *ST = cast<StoreSDNode>(N); 12204 SDValue Chain = ST->getChain(); 12205 SDValue Value = ST->getValue(); 12206 SDValue Ptr = ST->getBasePtr(); 12207 12208 // If this is a store of a bit convert, store the input value if the 12209 // resultant store does not need a higher alignment than the original. 12210 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 12211 ST->isUnindexed()) { 12212 EVT SVT = Value.getOperand(0).getValueType(); 12213 if (((!LegalOperations && !ST->isVolatile()) || 12214 TLI.isOperationLegalOrCustom(ISD::STORE, SVT)) && 12215 TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) { 12216 unsigned OrigAlign = ST->getAlignment(); 12217 bool Fast = false; 12218 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT, 12219 ST->getAddressSpace(), OrigAlign, &Fast) && 12220 Fast) { 12221 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, 12222 ST->getPointerInfo(), OrigAlign, 12223 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12224 } 12225 } 12226 } 12227 12228 // Turn 'store undef, Ptr' -> nothing. 12229 if (Value.isUndef() && ST->isUnindexed()) 12230 return Chain; 12231 12232 // Try to infer better alignment information than the store already has. 12233 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 12234 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 12235 if (Align > ST->getAlignment()) { 12236 SDValue NewStore = 12237 DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(), 12238 ST->getMemoryVT(), Align, 12239 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12240 if (NewStore.getNode() != N) 12241 return CombineTo(ST, NewStore, true); 12242 } 12243 } 12244 } 12245 12246 // Try transforming a pair floating point load / store ops to integer 12247 // load / store ops. 12248 if (SDValue NewST = TransformFPLoadStorePair(N)) 12249 return NewST; 12250 12251 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 12252 : DAG.getSubtarget().useAA(); 12253 #ifndef NDEBUG 12254 if (CombinerAAOnlyFunc.getNumOccurrences() && 12255 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 12256 UseAA = false; 12257 #endif 12258 if (UseAA && ST->isUnindexed()) { 12259 // FIXME: We should do this even without AA enabled. AA will just allow 12260 // FindBetterChain to work in more situations. The problem with this is that 12261 // any combine that expects memory operations to be on consecutive chains 12262 // first needs to be updated to look for users of the same chain. 12263 12264 // Walk up chain skipping non-aliasing memory nodes, on this store and any 12265 // adjacent stores. 12266 if (findBetterNeighborChains(ST)) { 12267 // replaceStoreChain uses CombineTo, which handled all of the worklist 12268 // manipulation. Return the original node to not do anything else. 12269 return SDValue(ST, 0); 12270 } 12271 Chain = ST->getChain(); 12272 } 12273 12274 // Try transforming N to an indexed store. 12275 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 12276 return SDValue(N, 0); 12277 12278 // FIXME: is there such a thing as a truncating indexed store? 12279 if (ST->isTruncatingStore() && ST->isUnindexed() && 12280 Value.getValueType().isInteger()) { 12281 // See if we can simplify the input to this truncstore with knowledge that 12282 // only the low bits are being used. For example: 12283 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 12284 SDValue Shorter = GetDemandedBits( 12285 Value, APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12286 ST->getMemoryVT().getScalarSizeInBits())); 12287 AddToWorklist(Value.getNode()); 12288 if (Shorter.getNode()) 12289 return DAG.getTruncStore(Chain, SDLoc(N), Shorter, 12290 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12291 12292 // Otherwise, see if we can simplify the operation with 12293 // SimplifyDemandedBits, which only works if the value has a single use. 12294 if (SimplifyDemandedBits( 12295 Value, 12296 APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12297 ST->getMemoryVT().getScalarSizeInBits()))) 12298 return SDValue(N, 0); 12299 } 12300 12301 // If this is a load followed by a store to the same location, then the store 12302 // is dead/noop. 12303 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 12304 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 12305 ST->isUnindexed() && !ST->isVolatile() && 12306 // There can't be any side effects between the load and store, such as 12307 // a call or store. 12308 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 12309 // The store is dead, remove it. 12310 return Chain; 12311 } 12312 } 12313 12314 // If this is a store followed by a store with the same value to the same 12315 // location, then the store is dead/noop. 12316 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) { 12317 if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() && 12318 ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() && 12319 ST1->isUnindexed() && !ST1->isVolatile()) { 12320 // The store is dead, remove it. 12321 return Chain; 12322 } 12323 } 12324 12325 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 12326 // truncating store. We can do this even if this is already a truncstore. 12327 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 12328 && Value.getNode()->hasOneUse() && ST->isUnindexed() && 12329 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 12330 ST->getMemoryVT())) { 12331 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), 12332 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12333 } 12334 12335 // Only perform this optimization before the types are legal, because we 12336 // don't want to perform this optimization on every DAGCombine invocation. 12337 if (!LegalTypes) { 12338 for (;;) { 12339 // There can be multiple store sequences on the same chain. 12340 // Keep trying to merge store sequences until we are unable to do so 12341 // or until we merge the last store on the chain. 12342 SmallVector<MemOpLink, 8> StoreNodes; 12343 bool Changed = MergeConsecutiveStores(ST, StoreNodes); 12344 if (!Changed) break; 12345 12346 if (any_of(StoreNodes, 12347 [ST](const MemOpLink &Link) { return Link.MemNode == ST; })) { 12348 // ST has been merged and no longer exists. 12349 return SDValue(N, 0); 12350 } 12351 } 12352 } 12353 12354 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 12355 // 12356 // Make sure to do this only after attempting to merge stores in order to 12357 // avoid changing the types of some subset of stores due to visit order, 12358 // preventing their merging. 12359 if (isa<ConstantFPSDNode>(Value)) { 12360 if (SDValue NewSt = replaceStoreOfFPConstant(ST)) 12361 return NewSt; 12362 } 12363 12364 if (SDValue NewSt = splitMergedValStore(ST)) 12365 return NewSt; 12366 12367 return ReduceLoadOpStoreWidth(N); 12368 } 12369 12370 /// For the instruction sequence of store below, F and I values 12371 /// are bundled together as an i64 value before being stored into memory. 12372 /// Sometimes it is more efficent to generate separate stores for F and I, 12373 /// which can remove the bitwise instructions or sink them to colder places. 12374 /// 12375 /// (store (or (zext (bitcast F to i32) to i64), 12376 /// (shl (zext I to i64), 32)), addr) --> 12377 /// (store F, addr) and (store I, addr+4) 12378 /// 12379 /// Similarly, splitting for other merged store can also be beneficial, like: 12380 /// For pair of {i32, i32}, i64 store --> two i32 stores. 12381 /// For pair of {i32, i16}, i64 store --> two i32 stores. 12382 /// For pair of {i16, i16}, i32 store --> two i16 stores. 12383 /// For pair of {i16, i8}, i32 store --> two i16 stores. 12384 /// For pair of {i8, i8}, i16 store --> two i8 stores. 12385 /// 12386 /// We allow each target to determine specifically which kind of splitting is 12387 /// supported. 12388 /// 12389 /// The store patterns are commonly seen from the simple code snippet below 12390 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 12391 /// void goo(const std::pair<int, float> &); 12392 /// hoo() { 12393 /// ... 12394 /// goo(std::make_pair(tmp, ftmp)); 12395 /// ... 12396 /// } 12397 /// 12398 SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) { 12399 if (OptLevel == CodeGenOpt::None) 12400 return SDValue(); 12401 12402 SDValue Val = ST->getValue(); 12403 SDLoc DL(ST); 12404 12405 // Match OR operand. 12406 if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR) 12407 return SDValue(); 12408 12409 // Match SHL operand and get Lower and Higher parts of Val. 12410 SDValue Op1 = Val.getOperand(0); 12411 SDValue Op2 = Val.getOperand(1); 12412 SDValue Lo, Hi; 12413 if (Op1.getOpcode() != ISD::SHL) { 12414 std::swap(Op1, Op2); 12415 if (Op1.getOpcode() != ISD::SHL) 12416 return SDValue(); 12417 } 12418 Lo = Op2; 12419 Hi = Op1.getOperand(0); 12420 if (!Op1.hasOneUse()) 12421 return SDValue(); 12422 12423 // Match shift amount to HalfValBitSize. 12424 unsigned HalfValBitSize = Val.getValueSizeInBits() / 2; 12425 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1)); 12426 if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize) 12427 return SDValue(); 12428 12429 // Lo and Hi are zero-extended from int with size less equal than 32 12430 // to i64. 12431 if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() || 12432 !Lo.getOperand(0).getValueType().isScalarInteger() || 12433 Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize || 12434 Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() || 12435 !Hi.getOperand(0).getValueType().isScalarInteger() || 12436 Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize) 12437 return SDValue(); 12438 12439 // Use the EVT of low and high parts before bitcast as the input 12440 // of target query. 12441 EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST) 12442 ? Lo.getOperand(0).getValueType() 12443 : Lo.getValueType(); 12444 EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST) 12445 ? Hi.getOperand(0).getValueType() 12446 : Hi.getValueType(); 12447 if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 12448 return SDValue(); 12449 12450 // Start to split store. 12451 unsigned Alignment = ST->getAlignment(); 12452 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 12453 AAMDNodes AAInfo = ST->getAAInfo(); 12454 12455 // Change the sizes of Lo and Hi's value types to HalfValBitSize. 12456 EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize); 12457 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0)); 12458 Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0)); 12459 12460 SDValue Chain = ST->getChain(); 12461 SDValue Ptr = ST->getBasePtr(); 12462 // Lower value store. 12463 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 12464 ST->getAlignment(), MMOFlags, AAInfo); 12465 Ptr = 12466 DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 12467 DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType())); 12468 // Higher value store. 12469 SDValue St1 = 12470 DAG.getStore(St0, DL, Hi, Ptr, 12471 ST->getPointerInfo().getWithOffset(HalfValBitSize / 8), 12472 Alignment / 2, MMOFlags, AAInfo); 12473 return St1; 12474 } 12475 12476 SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 12477 SDValue InVec = N->getOperand(0); 12478 SDValue InVal = N->getOperand(1); 12479 SDValue EltNo = N->getOperand(2); 12480 SDLoc DL(N); 12481 12482 // If the inserted element is an UNDEF, just use the input vector. 12483 if (InVal.isUndef()) 12484 return InVec; 12485 12486 EVT VT = InVec.getValueType(); 12487 12488 // If we can't generate a legal BUILD_VECTOR, exit 12489 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 12490 return SDValue(); 12491 12492 // Check that we know which element is being inserted 12493 if (!isa<ConstantSDNode>(EltNo)) 12494 return SDValue(); 12495 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 12496 12497 // Canonicalize insert_vector_elt dag nodes. 12498 // Example: 12499 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1) 12500 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0) 12501 // 12502 // Do this only if the child insert_vector node has one use; also 12503 // do this only if indices are both constants and Idx1 < Idx0. 12504 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse() 12505 && isa<ConstantSDNode>(InVec.getOperand(2))) { 12506 unsigned OtherElt = 12507 cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue(); 12508 if (Elt < OtherElt) { 12509 // Swap nodes. 12510 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, 12511 InVec.getOperand(0), InVal, EltNo); 12512 AddToWorklist(NewOp.getNode()); 12513 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()), 12514 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2)); 12515 } 12516 } 12517 12518 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 12519 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 12520 // vector elements. 12521 SmallVector<SDValue, 8> Ops; 12522 // Do not combine these two vectors if the output vector will not replace 12523 // the input vector. 12524 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) { 12525 Ops.append(InVec.getNode()->op_begin(), 12526 InVec.getNode()->op_end()); 12527 } else if (InVec.isUndef()) { 12528 unsigned NElts = VT.getVectorNumElements(); 12529 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 12530 } else { 12531 return SDValue(); 12532 } 12533 12534 // Insert the element 12535 if (Elt < Ops.size()) { 12536 // All the operands of BUILD_VECTOR must have the same type; 12537 // we enforce that here. 12538 EVT OpVT = Ops[0].getValueType(); 12539 if (InVal.getValueType() != OpVT) 12540 InVal = OpVT.bitsGT(InVal.getValueType()) ? 12541 DAG.getNode(ISD::ANY_EXTEND, DL, OpVT, InVal) : 12542 DAG.getNode(ISD::TRUNCATE, DL, OpVT, InVal); 12543 Ops[Elt] = InVal; 12544 } 12545 12546 // Return the new vector 12547 return DAG.getBuildVector(VT, DL, Ops); 12548 } 12549 12550 SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 12551 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) { 12552 assert(!OriginalLoad->isVolatile()); 12553 12554 EVT ResultVT = EVE->getValueType(0); 12555 EVT VecEltVT = InVecVT.getVectorElementType(); 12556 unsigned Align = OriginalLoad->getAlignment(); 12557 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 12558 VecEltVT.getTypeForEVT(*DAG.getContext())); 12559 12560 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) 12561 return SDValue(); 12562 12563 ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ? 12564 ISD::NON_EXTLOAD : ISD::EXTLOAD; 12565 if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT)) 12566 return SDValue(); 12567 12568 Align = NewAlign; 12569 12570 SDValue NewPtr = OriginalLoad->getBasePtr(); 12571 SDValue Offset; 12572 EVT PtrType = NewPtr.getValueType(); 12573 MachinePointerInfo MPI; 12574 SDLoc DL(EVE); 12575 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) { 12576 int Elt = ConstEltNo->getZExtValue(); 12577 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8; 12578 Offset = DAG.getConstant(PtrOff, DL, PtrType); 12579 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff); 12580 } else { 12581 Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType); 12582 Offset = DAG.getNode( 12583 ISD::MUL, DL, PtrType, Offset, 12584 DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType)); 12585 MPI = OriginalLoad->getPointerInfo(); 12586 } 12587 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset); 12588 12589 // The replacement we need to do here is a little tricky: we need to 12590 // replace an extractelement of a load with a load. 12591 // Use ReplaceAllUsesOfValuesWith to do the replacement. 12592 // Note that this replacement assumes that the extractvalue is the only 12593 // use of the load; that's okay because we don't want to perform this 12594 // transformation in other cases anyway. 12595 SDValue Load; 12596 SDValue Chain; 12597 if (ResultVT.bitsGT(VecEltVT)) { 12598 // If the result type of vextract is wider than the load, then issue an 12599 // extending load instead. 12600 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT, 12601 VecEltVT) 12602 ? ISD::ZEXTLOAD 12603 : ISD::EXTLOAD; 12604 Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT, 12605 OriginalLoad->getChain(), NewPtr, MPI, VecEltVT, 12606 Align, OriginalLoad->getMemOperand()->getFlags(), 12607 OriginalLoad->getAAInfo()); 12608 Chain = Load.getValue(1); 12609 } else { 12610 Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, 12611 MPI, Align, OriginalLoad->getMemOperand()->getFlags(), 12612 OriginalLoad->getAAInfo()); 12613 Chain = Load.getValue(1); 12614 if (ResultVT.bitsLT(VecEltVT)) 12615 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load); 12616 else 12617 Load = DAG.getBitcast(ResultVT, Load); 12618 } 12619 WorklistRemover DeadNodes(*this); 12620 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) }; 12621 SDValue To[] = { Load, Chain }; 12622 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 12623 // Since we're explicitly calling ReplaceAllUses, add the new node to the 12624 // worklist explicitly as well. 12625 AddToWorklist(Load.getNode()); 12626 AddUsersToWorklist(Load.getNode()); // Add users too 12627 // Make sure to revisit this node to clean it up; it will usually be dead. 12628 AddToWorklist(EVE); 12629 ++OpsNarrowed; 12630 return SDValue(EVE, 0); 12631 } 12632 12633 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 12634 // (vextract (scalar_to_vector val, 0) -> val 12635 SDValue InVec = N->getOperand(0); 12636 EVT VT = InVec.getValueType(); 12637 EVT NVT = N->getValueType(0); 12638 12639 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 12640 // Check if the result type doesn't match the inserted element type. A 12641 // SCALAR_TO_VECTOR may truncate the inserted element and the 12642 // EXTRACT_VECTOR_ELT may widen the extracted vector. 12643 SDValue InOp = InVec.getOperand(0); 12644 if (InOp.getValueType() != NVT) { 12645 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 12646 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT); 12647 } 12648 return InOp; 12649 } 12650 12651 SDValue EltNo = N->getOperand(1); 12652 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 12653 12654 // extract_vector_elt (build_vector x, y), 1 -> y 12655 if (ConstEltNo && 12656 InVec.getOpcode() == ISD::BUILD_VECTOR && 12657 TLI.isTypeLegal(VT) && 12658 (InVec.hasOneUse() || 12659 TLI.aggressivelyPreferBuildVectorSources(VT))) { 12660 SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue()); 12661 EVT InEltVT = Elt.getValueType(); 12662 12663 // Sometimes build_vector's scalar input types do not match result type. 12664 if (NVT == InEltVT) 12665 return Elt; 12666 12667 // TODO: It may be useful to truncate if free if the build_vector implicitly 12668 // converts. 12669 } 12670 12671 // extract_vector_elt (v2i32 (bitcast i64:x)), 0 -> i32 (trunc i64:x) 12672 if (ConstEltNo && InVec.getOpcode() == ISD::BITCAST && InVec.hasOneUse() && 12673 ConstEltNo->isNullValue() && VT.isInteger()) { 12674 SDValue BCSrc = InVec.getOperand(0); 12675 if (BCSrc.getValueType().isScalarInteger()) 12676 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), NVT, BCSrc); 12677 } 12678 12679 // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val 12680 // 12681 // This only really matters if the index is non-constant since other combines 12682 // on the constant elements already work. 12683 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && 12684 EltNo == InVec.getOperand(2)) { 12685 SDValue Elt = InVec.getOperand(1); 12686 return VT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, SDLoc(N), NVT) : Elt; 12687 } 12688 12689 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 12690 // We only perform this optimization before the op legalization phase because 12691 // we may introduce new vector instructions which are not backed by TD 12692 // patterns. For example on AVX, extracting elements from a wide vector 12693 // without using extract_subvector. However, if we can find an underlying 12694 // scalar value, then we can always use that. 12695 if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { 12696 int NumElem = VT.getVectorNumElements(); 12697 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 12698 // Find the new index to extract from. 12699 int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); 12700 12701 // Extracting an undef index is undef. 12702 if (OrigElt == -1) 12703 return DAG.getUNDEF(NVT); 12704 12705 // Select the right vector half to extract from. 12706 SDValue SVInVec; 12707 if (OrigElt < NumElem) { 12708 SVInVec = InVec->getOperand(0); 12709 } else { 12710 SVInVec = InVec->getOperand(1); 12711 OrigElt -= NumElem; 12712 } 12713 12714 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) { 12715 SDValue InOp = SVInVec.getOperand(OrigElt); 12716 if (InOp.getValueType() != NVT) { 12717 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 12718 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT); 12719 } 12720 12721 return InOp; 12722 } 12723 12724 // FIXME: We should handle recursing on other vector shuffles and 12725 // scalar_to_vector here as well. 12726 12727 if (!LegalOperations) { 12728 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 12729 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec, 12730 DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy)); 12731 } 12732 } 12733 12734 bool BCNumEltsChanged = false; 12735 EVT ExtVT = VT.getVectorElementType(); 12736 EVT LVT = ExtVT; 12737 12738 // If the result of load has to be truncated, then it's not necessarily 12739 // profitable. 12740 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 12741 return SDValue(); 12742 12743 if (InVec.getOpcode() == ISD::BITCAST) { 12744 // Don't duplicate a load with other uses. 12745 if (!InVec.hasOneUse()) 12746 return SDValue(); 12747 12748 EVT BCVT = InVec.getOperand(0).getValueType(); 12749 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 12750 return SDValue(); 12751 if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 12752 BCNumEltsChanged = true; 12753 InVec = InVec.getOperand(0); 12754 ExtVT = BCVT.getVectorElementType(); 12755 } 12756 12757 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size) 12758 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() && 12759 ISD::isNormalLoad(InVec.getNode()) && 12760 !N->getOperand(1)->hasPredecessor(InVec.getNode())) { 12761 SDValue Index = N->getOperand(1); 12762 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec)) { 12763 if (!OrigLoad->isVolatile()) { 12764 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index, 12765 OrigLoad); 12766 } 12767 } 12768 } 12769 12770 // Perform only after legalization to ensure build_vector / vector_shuffle 12771 // optimizations have already been done. 12772 if (!LegalOperations) return SDValue(); 12773 12774 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 12775 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 12776 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 12777 12778 if (ConstEltNo) { 12779 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 12780 12781 LoadSDNode *LN0 = nullptr; 12782 const ShuffleVectorSDNode *SVN = nullptr; 12783 if (ISD::isNormalLoad(InVec.getNode())) { 12784 LN0 = cast<LoadSDNode>(InVec); 12785 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 12786 InVec.getOperand(0).getValueType() == ExtVT && 12787 ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 12788 // Don't duplicate a load with other uses. 12789 if (!InVec.hasOneUse()) 12790 return SDValue(); 12791 12792 LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 12793 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 12794 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 12795 // => 12796 // (load $addr+1*size) 12797 12798 // Don't duplicate a load with other uses. 12799 if (!InVec.hasOneUse()) 12800 return SDValue(); 12801 12802 // If the bit convert changed the number of elements, it is unsafe 12803 // to examine the mask. 12804 if (BCNumEltsChanged) 12805 return SDValue(); 12806 12807 // Select the input vector, guarding against out of range extract vector. 12808 unsigned NumElems = VT.getVectorNumElements(); 12809 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 12810 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 12811 12812 if (InVec.getOpcode() == ISD::BITCAST) { 12813 // Don't duplicate a load with other uses. 12814 if (!InVec.hasOneUse()) 12815 return SDValue(); 12816 12817 InVec = InVec.getOperand(0); 12818 } 12819 if (ISD::isNormalLoad(InVec.getNode())) { 12820 LN0 = cast<LoadSDNode>(InVec); 12821 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 12822 EltNo = DAG.getConstant(Elt, SDLoc(EltNo), EltNo.getValueType()); 12823 } 12824 } 12825 12826 // Make sure we found a non-volatile load and the extractelement is 12827 // the only use. 12828 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 12829 return SDValue(); 12830 12831 // If Idx was -1 above, Elt is going to be -1, so just return undef. 12832 if (Elt == -1) 12833 return DAG.getUNDEF(LVT); 12834 12835 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0); 12836 } 12837 12838 return SDValue(); 12839 } 12840 12841 // Simplify (build_vec (ext )) to (bitcast (build_vec )) 12842 SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { 12843 // We perform this optimization post type-legalization because 12844 // the type-legalizer often scalarizes integer-promoted vectors. 12845 // Performing this optimization before may create bit-casts which 12846 // will be type-legalized to complex code sequences. 12847 // We perform this optimization only before the operation legalizer because we 12848 // may introduce illegal operations. 12849 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) 12850 return SDValue(); 12851 12852 unsigned NumInScalars = N->getNumOperands(); 12853 SDLoc DL(N); 12854 EVT VT = N->getValueType(0); 12855 12856 // Check to see if this is a BUILD_VECTOR of a bunch of values 12857 // which come from any_extend or zero_extend nodes. If so, we can create 12858 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 12859 // optimizations. We do not handle sign-extend because we can't fill the sign 12860 // using shuffles. 12861 EVT SourceType = MVT::Other; 12862 bool AllAnyExt = true; 12863 12864 for (unsigned i = 0; i != NumInScalars; ++i) { 12865 SDValue In = N->getOperand(i); 12866 // Ignore undef inputs. 12867 if (In.isUndef()) continue; 12868 12869 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 12870 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 12871 12872 // Abort if the element is not an extension. 12873 if (!ZeroExt && !AnyExt) { 12874 SourceType = MVT::Other; 12875 break; 12876 } 12877 12878 // The input is a ZeroExt or AnyExt. Check the original type. 12879 EVT InTy = In.getOperand(0).getValueType(); 12880 12881 // Check that all of the widened source types are the same. 12882 if (SourceType == MVT::Other) 12883 // First time. 12884 SourceType = InTy; 12885 else if (InTy != SourceType) { 12886 // Multiple income types. Abort. 12887 SourceType = MVT::Other; 12888 break; 12889 } 12890 12891 // Check if all of the extends are ANY_EXTENDs. 12892 AllAnyExt &= AnyExt; 12893 } 12894 12895 // In order to have valid types, all of the inputs must be extended from the 12896 // same source type and all of the inputs must be any or zero extend. 12897 // Scalar sizes must be a power of two. 12898 EVT OutScalarTy = VT.getScalarType(); 12899 bool ValidTypes = SourceType != MVT::Other && 12900 isPowerOf2_32(OutScalarTy.getSizeInBits()) && 12901 isPowerOf2_32(SourceType.getSizeInBits()); 12902 12903 // Create a new simpler BUILD_VECTOR sequence which other optimizations can 12904 // turn into a single shuffle instruction. 12905 if (!ValidTypes) 12906 return SDValue(); 12907 12908 bool isLE = DAG.getDataLayout().isLittleEndian(); 12909 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 12910 assert(ElemRatio > 1 && "Invalid element size ratio"); 12911 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 12912 DAG.getConstant(0, DL, SourceType); 12913 12914 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); 12915 SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 12916 12917 // Populate the new build_vector 12918 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 12919 SDValue Cast = N->getOperand(i); 12920 assert((Cast.getOpcode() == ISD::ANY_EXTEND || 12921 Cast.getOpcode() == ISD::ZERO_EXTEND || 12922 Cast.isUndef()) && "Invalid cast opcode"); 12923 SDValue In; 12924 if (Cast.isUndef()) 12925 In = DAG.getUNDEF(SourceType); 12926 else 12927 In = Cast->getOperand(0); 12928 unsigned Index = isLE ? (i * ElemRatio) : 12929 (i * ElemRatio + (ElemRatio - 1)); 12930 12931 assert(Index < Ops.size() && "Invalid index"); 12932 Ops[Index] = In; 12933 } 12934 12935 // The type of the new BUILD_VECTOR node. 12936 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 12937 assert(VecVT.getSizeInBits() == VT.getSizeInBits() && 12938 "Invalid vector size"); 12939 // Check if the new vector type is legal. 12940 if (!isTypeLegal(VecVT)) return SDValue(); 12941 12942 // Make the new BUILD_VECTOR. 12943 SDValue BV = DAG.getBuildVector(VecVT, DL, Ops); 12944 12945 // The new BUILD_VECTOR node has the potential to be further optimized. 12946 AddToWorklist(BV.getNode()); 12947 // Bitcast to the desired type. 12948 return DAG.getBitcast(VT, BV); 12949 } 12950 12951 SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { 12952 EVT VT = N->getValueType(0); 12953 12954 unsigned NumInScalars = N->getNumOperands(); 12955 SDLoc DL(N); 12956 12957 EVT SrcVT = MVT::Other; 12958 unsigned Opcode = ISD::DELETED_NODE; 12959 unsigned NumDefs = 0; 12960 12961 for (unsigned i = 0; i != NumInScalars; ++i) { 12962 SDValue In = N->getOperand(i); 12963 unsigned Opc = In.getOpcode(); 12964 12965 if (Opc == ISD::UNDEF) 12966 continue; 12967 12968 // If all scalar values are floats and converted from integers. 12969 if (Opcode == ISD::DELETED_NODE && 12970 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { 12971 Opcode = Opc; 12972 } 12973 12974 if (Opc != Opcode) 12975 return SDValue(); 12976 12977 EVT InVT = In.getOperand(0).getValueType(); 12978 12979 // If all scalar values are typed differently, bail out. It's chosen to 12980 // simplify BUILD_VECTOR of integer types. 12981 if (SrcVT == MVT::Other) 12982 SrcVT = InVT; 12983 if (SrcVT != InVT) 12984 return SDValue(); 12985 NumDefs++; 12986 } 12987 12988 // If the vector has just one element defined, it's not worth to fold it into 12989 // a vectorized one. 12990 if (NumDefs < 2) 12991 return SDValue(); 12992 12993 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) 12994 && "Should only handle conversion from integer to float."); 12995 assert(SrcVT != MVT::Other && "Cannot determine source type!"); 12996 12997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); 12998 12999 if (!TLI.isOperationLegalOrCustom(Opcode, NVT)) 13000 return SDValue(); 13001 13002 // Just because the floating-point vector type is legal does not necessarily 13003 // mean that the corresponding integer vector type is. 13004 if (!isTypeLegal(NVT)) 13005 return SDValue(); 13006 13007 SmallVector<SDValue, 8> Opnds; 13008 for (unsigned i = 0; i != NumInScalars; ++i) { 13009 SDValue In = N->getOperand(i); 13010 13011 if (In.isUndef()) 13012 Opnds.push_back(DAG.getUNDEF(SrcVT)); 13013 else 13014 Opnds.push_back(In.getOperand(0)); 13015 } 13016 SDValue BV = DAG.getBuildVector(NVT, DL, Opnds); 13017 AddToWorklist(BV.getNode()); 13018 13019 return DAG.getNode(Opcode, DL, VT, BV); 13020 } 13021 13022 SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N, 13023 ArrayRef<int> VectorMask, 13024 SDValue VecIn1, SDValue VecIn2, 13025 unsigned LeftIdx) { 13026 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 13027 SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy); 13028 13029 EVT VT = N->getValueType(0); 13030 EVT InVT1 = VecIn1.getValueType(); 13031 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1; 13032 13033 unsigned Vec2Offset = InVT1.getVectorNumElements(); 13034 unsigned NumElems = VT.getVectorNumElements(); 13035 unsigned ShuffleNumElems = NumElems; 13036 13037 // We can't generate a shuffle node with mismatched input and output types. 13038 // Try to make the types match the type of the output. 13039 if (InVT1 != VT || InVT2 != VT) { 13040 if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) { 13041 // If the output vector length is a multiple of both input lengths, 13042 // we can concatenate them and pad the rest with undefs. 13043 unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits(); 13044 assert(NumConcats >= 2 && "Concat needs at least two inputs!"); 13045 SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1)); 13046 ConcatOps[0] = VecIn1; 13047 ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1); 13048 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps); 13049 VecIn2 = SDValue(); 13050 } else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) { 13051 if (!TLI.isExtractSubvectorCheap(VT, NumElems)) 13052 return SDValue(); 13053 13054 if (!VecIn2.getNode()) { 13055 // If we only have one input vector, and it's twice the size of the 13056 // output, split it in two. 13057 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, 13058 DAG.getConstant(NumElems, DL, IdxTy)); 13059 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx); 13060 // Since we now have shorter input vectors, adjust the offset of the 13061 // second vector's start. 13062 Vec2Offset = NumElems; 13063 } else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) { 13064 // VecIn1 is wider than the output, and we have another, possibly 13065 // smaller input. Pad the smaller input with undefs, shuffle at the 13066 // input vector width, and extract the output. 13067 // The shuffle type is different than VT, so check legality again. 13068 if (LegalOperations && 13069 !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1)) 13070 return SDValue(); 13071 13072 if (InVT1 != InVT2) 13073 VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1, 13074 DAG.getUNDEF(InVT1), VecIn2, ZeroIdx); 13075 ShuffleNumElems = NumElems * 2; 13076 } else { 13077 // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider 13078 // than VecIn1. We can't handle this for now - this case will disappear 13079 // when we start sorting the vectors by type. 13080 return SDValue(); 13081 } 13082 } else { 13083 // TODO: Support cases where the length mismatch isn't exactly by a 13084 // factor of 2. 13085 // TODO: Move this check upwards, so that if we have bad type 13086 // mismatches, we don't create any DAG nodes. 13087 return SDValue(); 13088 } 13089 } 13090 13091 // Initialize mask to undef. 13092 SmallVector<int, 8> Mask(ShuffleNumElems, -1); 13093 13094 // Only need to run up to the number of elements actually used, not the 13095 // total number of elements in the shuffle - if we are shuffling a wider 13096 // vector, the high lanes should be set to undef. 13097 for (unsigned i = 0; i != NumElems; ++i) { 13098 if (VectorMask[i] <= 0) 13099 continue; 13100 13101 unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1); 13102 if (VectorMask[i] == (int)LeftIdx) { 13103 Mask[i] = ExtIndex; 13104 } else if (VectorMask[i] == (int)LeftIdx + 1) { 13105 Mask[i] = Vec2Offset + ExtIndex; 13106 } 13107 } 13108 13109 // The type the input vectors may have changed above. 13110 InVT1 = VecIn1.getValueType(); 13111 13112 // If we already have a VecIn2, it should have the same type as VecIn1. 13113 // If we don't, get an undef/zero vector of the appropriate type. 13114 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1); 13115 assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type."); 13116 13117 SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask); 13118 if (ShuffleNumElems > NumElems) 13119 Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx); 13120 13121 return Shuffle; 13122 } 13123 13124 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 13125 // operations. If the types of the vectors we're extracting from allow it, 13126 // turn this into a vector_shuffle node. 13127 SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { 13128 SDLoc DL(N); 13129 EVT VT = N->getValueType(0); 13130 13131 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 13132 if (!isTypeLegal(VT)) 13133 return SDValue(); 13134 13135 // May only combine to shuffle after legalize if shuffle is legal. 13136 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT)) 13137 return SDValue(); 13138 13139 bool UsesZeroVector = false; 13140 unsigned NumElems = N->getNumOperands(); 13141 13142 // Record, for each element of the newly built vector, which input vector 13143 // that element comes from. -1 stands for undef, 0 for the zero vector, 13144 // and positive values for the input vectors. 13145 // VectorMask maps each element to its vector number, and VecIn maps vector 13146 // numbers to their initial SDValues. 13147 13148 SmallVector<int, 8> VectorMask(NumElems, -1); 13149 SmallVector<SDValue, 8> VecIn; 13150 VecIn.push_back(SDValue()); 13151 13152 for (unsigned i = 0; i != NumElems; ++i) { 13153 SDValue Op = N->getOperand(i); 13154 13155 if (Op.isUndef()) 13156 continue; 13157 13158 // See if we can use a blend with a zero vector. 13159 // TODO: Should we generalize this to a blend with an arbitrary constant 13160 // vector? 13161 if (isNullConstant(Op) || isNullFPConstant(Op)) { 13162 UsesZeroVector = true; 13163 VectorMask[i] = 0; 13164 continue; 13165 } 13166 13167 // Not an undef or zero. If the input is something other than an 13168 // EXTRACT_VECTOR_ELT with a constant index, bail out. 13169 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13170 !isa<ConstantSDNode>(Op.getOperand(1))) 13171 return SDValue(); 13172 13173 SDValue ExtractedFromVec = Op.getOperand(0); 13174 13175 // All inputs must have the same element type as the output. 13176 if (VT.getVectorElementType() != 13177 ExtractedFromVec.getValueType().getVectorElementType()) 13178 return SDValue(); 13179 13180 // Have we seen this input vector before? 13181 // The vectors are expected to be tiny (usually 1 or 2 elements), so using 13182 // a map back from SDValues to numbers isn't worth it. 13183 unsigned Idx = std::distance( 13184 VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec)); 13185 if (Idx == VecIn.size()) 13186 VecIn.push_back(ExtractedFromVec); 13187 13188 VectorMask[i] = Idx; 13189 } 13190 13191 // If we didn't find at least one input vector, bail out. 13192 if (VecIn.size() < 2) 13193 return SDValue(); 13194 13195 // TODO: We want to sort the vectors by descending length, so that adjacent 13196 // pairs have similar length, and the longer vector is always first in the 13197 // pair. 13198 13199 // TODO: Should this fire if some of the input vectors has illegal type (like 13200 // it does now), or should we let legalization run its course first? 13201 13202 // Shuffle phase: 13203 // Take pairs of vectors, and shuffle them so that the result has elements 13204 // from these vectors in the correct places. 13205 // For example, given: 13206 // t10: i32 = extract_vector_elt t1, Constant:i64<0> 13207 // t11: i32 = extract_vector_elt t2, Constant:i64<0> 13208 // t12: i32 = extract_vector_elt t3, Constant:i64<0> 13209 // t13: i32 = extract_vector_elt t1, Constant:i64<1> 13210 // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13 13211 // We will generate: 13212 // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2 13213 // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef 13214 SmallVector<SDValue, 4> Shuffles; 13215 for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) { 13216 unsigned LeftIdx = 2 * In + 1; 13217 SDValue VecLeft = VecIn[LeftIdx]; 13218 SDValue VecRight = 13219 (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue(); 13220 13221 if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft, 13222 VecRight, LeftIdx)) 13223 Shuffles.push_back(Shuffle); 13224 else 13225 return SDValue(); 13226 } 13227 13228 // If we need the zero vector as an "ingredient" in the blend tree, add it 13229 // to the list of shuffles. 13230 if (UsesZeroVector) 13231 Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT) 13232 : DAG.getConstantFP(0.0, DL, VT)); 13233 13234 // If we only have one shuffle, we're done. 13235 if (Shuffles.size() == 1) 13236 return Shuffles[0]; 13237 13238 // Update the vector mask to point to the post-shuffle vectors. 13239 for (int &Vec : VectorMask) 13240 if (Vec == 0) 13241 Vec = Shuffles.size() - 1; 13242 else 13243 Vec = (Vec - 1) / 2; 13244 13245 // More than one shuffle. Generate a binary tree of blends, e.g. if from 13246 // the previous step we got the set of shuffles t10, t11, t12, t13, we will 13247 // generate: 13248 // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2 13249 // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4 13250 // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6 13251 // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8 13252 // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11 13253 // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13 13254 // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21 13255 13256 // Make sure the initial size of the shuffle list is even. 13257 if (Shuffles.size() % 2) 13258 Shuffles.push_back(DAG.getUNDEF(VT)); 13259 13260 for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) { 13261 if (CurSize % 2) { 13262 Shuffles[CurSize] = DAG.getUNDEF(VT); 13263 CurSize++; 13264 } 13265 for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) { 13266 int Left = 2 * In; 13267 int Right = 2 * In + 1; 13268 SmallVector<int, 8> Mask(NumElems, -1); 13269 for (unsigned i = 0; i != NumElems; ++i) { 13270 if (VectorMask[i] == Left) { 13271 Mask[i] = i; 13272 VectorMask[i] = In; 13273 } else if (VectorMask[i] == Right) { 13274 Mask[i] = i + NumElems; 13275 VectorMask[i] = In; 13276 } 13277 } 13278 13279 Shuffles[In] = 13280 DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask); 13281 } 13282 } 13283 13284 return Shuffles[0]; 13285 } 13286 13287 SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 13288 EVT VT = N->getValueType(0); 13289 13290 // A vector built entirely of undefs is undef. 13291 if (ISD::allOperandsUndef(N)) 13292 return DAG.getUNDEF(VT); 13293 13294 if (SDValue V = reduceBuildVecExtToExtBuildVec(N)) 13295 return V; 13296 13297 if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N)) 13298 return V; 13299 13300 if (SDValue V = reduceBuildVecToShuffle(N)) 13301 return V; 13302 13303 return SDValue(); 13304 } 13305 13306 static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) { 13307 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13308 EVT OpVT = N->getOperand(0).getValueType(); 13309 13310 // If the operands are legal vectors, leave them alone. 13311 if (TLI.isTypeLegal(OpVT)) 13312 return SDValue(); 13313 13314 SDLoc DL(N); 13315 EVT VT = N->getValueType(0); 13316 SmallVector<SDValue, 8> Ops; 13317 13318 EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits()); 13319 SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13320 13321 // Keep track of what we encounter. 13322 bool AnyInteger = false; 13323 bool AnyFP = false; 13324 for (const SDValue &Op : N->ops()) { 13325 if (ISD::BITCAST == Op.getOpcode() && 13326 !Op.getOperand(0).getValueType().isVector()) 13327 Ops.push_back(Op.getOperand(0)); 13328 else if (ISD::UNDEF == Op.getOpcode()) 13329 Ops.push_back(ScalarUndef); 13330 else 13331 return SDValue(); 13332 13333 // Note whether we encounter an integer or floating point scalar. 13334 // If it's neither, bail out, it could be something weird like x86mmx. 13335 EVT LastOpVT = Ops.back().getValueType(); 13336 if (LastOpVT.isFloatingPoint()) 13337 AnyFP = true; 13338 else if (LastOpVT.isInteger()) 13339 AnyInteger = true; 13340 else 13341 return SDValue(); 13342 } 13343 13344 // If any of the operands is a floating point scalar bitcast to a vector, 13345 // use floating point types throughout, and bitcast everything. 13346 // Replace UNDEFs by another scalar UNDEF node, of the final desired type. 13347 if (AnyFP) { 13348 SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits()); 13349 ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13350 if (AnyInteger) { 13351 for (SDValue &Op : Ops) { 13352 if (Op.getValueType() == SVT) 13353 continue; 13354 if (Op.isUndef()) 13355 Op = ScalarUndef; 13356 else 13357 Op = DAG.getBitcast(SVT, Op); 13358 } 13359 } 13360 } 13361 13362 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT, 13363 VT.getSizeInBits() / SVT.getSizeInBits()); 13364 return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops)); 13365 } 13366 13367 // Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR 13368 // operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at 13369 // most two distinct vectors the same size as the result, attempt to turn this 13370 // into a legal shuffle. 13371 static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { 13372 EVT VT = N->getValueType(0); 13373 EVT OpVT = N->getOperand(0).getValueType(); 13374 int NumElts = VT.getVectorNumElements(); 13375 int NumOpElts = OpVT.getVectorNumElements(); 13376 13377 SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT); 13378 SmallVector<int, 8> Mask; 13379 13380 for (SDValue Op : N->ops()) { 13381 // Peek through any bitcast. 13382 while (Op.getOpcode() == ISD::BITCAST) 13383 Op = Op.getOperand(0); 13384 13385 // UNDEF nodes convert to UNDEF shuffle mask values. 13386 if (Op.isUndef()) { 13387 Mask.append((unsigned)NumOpElts, -1); 13388 continue; 13389 } 13390 13391 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 13392 return SDValue(); 13393 13394 // What vector are we extracting the subvector from and at what index? 13395 SDValue ExtVec = Op.getOperand(0); 13396 13397 // We want the EVT of the original extraction to correctly scale the 13398 // extraction index. 13399 EVT ExtVT = ExtVec.getValueType(); 13400 13401 // Peek through any bitcast. 13402 while (ExtVec.getOpcode() == ISD::BITCAST) 13403 ExtVec = ExtVec.getOperand(0); 13404 13405 // UNDEF nodes convert to UNDEF shuffle mask values. 13406 if (ExtVec.isUndef()) { 13407 Mask.append((unsigned)NumOpElts, -1); 13408 continue; 13409 } 13410 13411 if (!isa<ConstantSDNode>(Op.getOperand(1))) 13412 return SDValue(); 13413 int ExtIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 13414 13415 // Ensure that we are extracting a subvector from a vector the same 13416 // size as the result. 13417 if (ExtVT.getSizeInBits() != VT.getSizeInBits()) 13418 return SDValue(); 13419 13420 // Scale the subvector index to account for any bitcast. 13421 int NumExtElts = ExtVT.getVectorNumElements(); 13422 if (0 == (NumExtElts % NumElts)) 13423 ExtIdx /= (NumExtElts / NumElts); 13424 else if (0 == (NumElts % NumExtElts)) 13425 ExtIdx *= (NumElts / NumExtElts); 13426 else 13427 return SDValue(); 13428 13429 // At most we can reference 2 inputs in the final shuffle. 13430 if (SV0.isUndef() || SV0 == ExtVec) { 13431 SV0 = ExtVec; 13432 for (int i = 0; i != NumOpElts; ++i) 13433 Mask.push_back(i + ExtIdx); 13434 } else if (SV1.isUndef() || SV1 == ExtVec) { 13435 SV1 = ExtVec; 13436 for (int i = 0; i != NumOpElts; ++i) 13437 Mask.push_back(i + ExtIdx + NumElts); 13438 } else { 13439 return SDValue(); 13440 } 13441 } 13442 13443 if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT)) 13444 return SDValue(); 13445 13446 return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0), 13447 DAG.getBitcast(VT, SV1), Mask); 13448 } 13449 13450 SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 13451 // If we only have one input vector, we don't need to do any concatenation. 13452 if (N->getNumOperands() == 1) 13453 return N->getOperand(0); 13454 13455 // Check if all of the operands are undefs. 13456 EVT VT = N->getValueType(0); 13457 if (ISD::allOperandsUndef(N)) 13458 return DAG.getUNDEF(VT); 13459 13460 // Optimize concat_vectors where all but the first of the vectors are undef. 13461 if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) { 13462 return Op.isUndef(); 13463 })) { 13464 SDValue In = N->getOperand(0); 13465 assert(In.getValueType().isVector() && "Must concat vectors"); 13466 13467 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr). 13468 if (In->getOpcode() == ISD::BITCAST && 13469 !In->getOperand(0)->getValueType(0).isVector()) { 13470 SDValue Scalar = In->getOperand(0); 13471 13472 // If the bitcast type isn't legal, it might be a trunc of a legal type; 13473 // look through the trunc so we can still do the transform: 13474 // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar) 13475 if (Scalar->getOpcode() == ISD::TRUNCATE && 13476 !TLI.isTypeLegal(Scalar.getValueType()) && 13477 TLI.isTypeLegal(Scalar->getOperand(0).getValueType())) 13478 Scalar = Scalar->getOperand(0); 13479 13480 EVT SclTy = Scalar->getValueType(0); 13481 13482 if (!SclTy.isFloatingPoint() && !SclTy.isInteger()) 13483 return SDValue(); 13484 13485 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, 13486 VT.getSizeInBits() / SclTy.getSizeInBits()); 13487 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType())) 13488 return SDValue(); 13489 13490 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar); 13491 return DAG.getBitcast(VT, Res); 13492 } 13493 } 13494 13495 // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR. 13496 // We have already tested above for an UNDEF only concatenation. 13497 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...)) 13498 // -> (BUILD_VECTOR A, B, ..., C, D, ...) 13499 auto IsBuildVectorOrUndef = [](const SDValue &Op) { 13500 return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode(); 13501 }; 13502 if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) { 13503 SmallVector<SDValue, 8> Opnds; 13504 EVT SVT = VT.getScalarType(); 13505 13506 EVT MinVT = SVT; 13507 if (!SVT.isFloatingPoint()) { 13508 // If BUILD_VECTOR are from built from integer, they may have different 13509 // operand types. Get the smallest type and truncate all operands to it. 13510 bool FoundMinVT = false; 13511 for (const SDValue &Op : N->ops()) 13512 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 13513 EVT OpSVT = Op.getOperand(0)->getValueType(0); 13514 MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT; 13515 FoundMinVT = true; 13516 } 13517 assert(FoundMinVT && "Concat vector type mismatch"); 13518 } 13519 13520 for (const SDValue &Op : N->ops()) { 13521 EVT OpVT = Op.getValueType(); 13522 unsigned NumElts = OpVT.getVectorNumElements(); 13523 13524 if (ISD::UNDEF == Op.getOpcode()) 13525 Opnds.append(NumElts, DAG.getUNDEF(MinVT)); 13526 13527 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 13528 if (SVT.isFloatingPoint()) { 13529 assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch"); 13530 Opnds.append(Op->op_begin(), Op->op_begin() + NumElts); 13531 } else { 13532 for (unsigned i = 0; i != NumElts; ++i) 13533 Opnds.push_back( 13534 DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i))); 13535 } 13536 } 13537 } 13538 13539 assert(VT.getVectorNumElements() == Opnds.size() && 13540 "Concat vector type mismatch"); 13541 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 13542 } 13543 13544 // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR. 13545 if (SDValue V = combineConcatVectorOfScalars(N, DAG)) 13546 return V; 13547 13548 // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE. 13549 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 13550 if (SDValue V = combineConcatVectorOfExtracts(N, DAG)) 13551 return V; 13552 13553 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR 13554 // nodes often generate nop CONCAT_VECTOR nodes. 13555 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that 13556 // place the incoming vectors at the exact same location. 13557 SDValue SingleSource = SDValue(); 13558 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); 13559 13560 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 13561 SDValue Op = N->getOperand(i); 13562 13563 if (Op.isUndef()) 13564 continue; 13565 13566 // Check if this is the identity extract: 13567 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 13568 return SDValue(); 13569 13570 // Find the single incoming vector for the extract_subvector. 13571 if (SingleSource.getNode()) { 13572 if (Op.getOperand(0) != SingleSource) 13573 return SDValue(); 13574 } else { 13575 SingleSource = Op.getOperand(0); 13576 13577 // Check the source type is the same as the type of the result. 13578 // If not, this concat may extend the vector, so we can not 13579 // optimize it away. 13580 if (SingleSource.getValueType() != N->getValueType(0)) 13581 return SDValue(); 13582 } 13583 13584 unsigned IdentityIndex = i * PartNumElem; 13585 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 13586 // The extract index must be constant. 13587 if (!CS) 13588 return SDValue(); 13589 13590 // Check that we are reading from the identity index. 13591 if (CS->getZExtValue() != IdentityIndex) 13592 return SDValue(); 13593 } 13594 13595 if (SingleSource.getNode()) 13596 return SingleSource; 13597 13598 return SDValue(); 13599 } 13600 13601 SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 13602 EVT NVT = N->getValueType(0); 13603 SDValue V = N->getOperand(0); 13604 13605 if (V->getOpcode() == ISD::CONCAT_VECTORS) { 13606 // Combine: 13607 // (extract_subvec (concat V1, V2, ...), i) 13608 // Into: 13609 // Vi if possible 13610 // Only operand 0 is checked as 'concat' assumes all inputs of the same 13611 // type. 13612 if (V->getOperand(0).getValueType() != NVT) 13613 return SDValue(); 13614 unsigned Idx = N->getConstantOperandVal(1); 13615 unsigned NumElems = NVT.getVectorNumElements(); 13616 assert((Idx % NumElems) == 0 && 13617 "IDX in concat is not a multiple of the result vector length."); 13618 return V->getOperand(Idx / NumElems); 13619 } 13620 13621 // Skip bitcasting 13622 if (V->getOpcode() == ISD::BITCAST) 13623 V = V.getOperand(0); 13624 13625 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 13626 // Handle only simple case where vector being inserted and vector 13627 // being extracted are of same type, and are half size of larger vectors. 13628 EVT BigVT = V->getOperand(0).getValueType(); 13629 EVT SmallVT = V->getOperand(1).getValueType(); 13630 if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) 13631 return SDValue(); 13632 13633 // Only handle cases where both indexes are constants with the same type. 13634 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13635 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 13636 13637 if (InsIdx && ExtIdx && 13638 InsIdx->getValueType(0).getSizeInBits() <= 64 && 13639 ExtIdx->getValueType(0).getSizeInBits() <= 64) { 13640 // Combine: 13641 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 13642 // Into: 13643 // indices are equal or bit offsets are equal => V1 13644 // otherwise => (extract_subvec V1, ExtIdx) 13645 if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() == 13646 ExtIdx->getZExtValue() * NVT.getScalarSizeInBits()) 13647 return DAG.getBitcast(NVT, V->getOperand(1)); 13648 return DAG.getNode( 13649 ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, 13650 DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)), 13651 N->getOperand(1)); 13652 } 13653 } 13654 13655 return SDValue(); 13656 } 13657 13658 static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements, 13659 SDValue V, SelectionDAG &DAG) { 13660 SDLoc DL(V); 13661 EVT VT = V.getValueType(); 13662 13663 switch (V.getOpcode()) { 13664 default: 13665 return V; 13666 13667 case ISD::CONCAT_VECTORS: { 13668 EVT OpVT = V->getOperand(0).getValueType(); 13669 int OpSize = OpVT.getVectorNumElements(); 13670 SmallBitVector OpUsedElements(OpSize, false); 13671 bool FoundSimplification = false; 13672 SmallVector<SDValue, 4> NewOps; 13673 NewOps.reserve(V->getNumOperands()); 13674 for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) { 13675 SDValue Op = V->getOperand(i); 13676 bool OpUsed = false; 13677 for (int j = 0; j < OpSize; ++j) 13678 if (UsedElements[i * OpSize + j]) { 13679 OpUsedElements[j] = true; 13680 OpUsed = true; 13681 } 13682 NewOps.push_back( 13683 OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG) 13684 : DAG.getUNDEF(OpVT)); 13685 FoundSimplification |= Op == NewOps.back(); 13686 OpUsedElements.reset(); 13687 } 13688 if (FoundSimplification) 13689 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps); 13690 return V; 13691 } 13692 13693 case ISD::INSERT_SUBVECTOR: { 13694 SDValue BaseV = V->getOperand(0); 13695 SDValue SubV = V->getOperand(1); 13696 auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2)); 13697 if (!IdxN) 13698 return V; 13699 13700 int SubSize = SubV.getValueType().getVectorNumElements(); 13701 int Idx = IdxN->getZExtValue(); 13702 bool SubVectorUsed = false; 13703 SmallBitVector SubUsedElements(SubSize, false); 13704 for (int i = 0; i < SubSize; ++i) 13705 if (UsedElements[i + Idx]) { 13706 SubVectorUsed = true; 13707 SubUsedElements[i] = true; 13708 UsedElements[i + Idx] = false; 13709 } 13710 13711 // Now recurse on both the base and sub vectors. 13712 SDValue SimplifiedSubV = 13713 SubVectorUsed 13714 ? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG) 13715 : DAG.getUNDEF(SubV.getValueType()); 13716 SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG); 13717 if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV) 13718 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 13719 SimplifiedBaseV, SimplifiedSubV, V->getOperand(2)); 13720 return V; 13721 } 13722 } 13723 } 13724 13725 static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0, 13726 SDValue N1, SelectionDAG &DAG) { 13727 EVT VT = SVN->getValueType(0); 13728 int NumElts = VT.getVectorNumElements(); 13729 SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false); 13730 for (int M : SVN->getMask()) 13731 if (M >= 0 && M < NumElts) 13732 N0UsedElements[M] = true; 13733 else if (M >= NumElts) 13734 N1UsedElements[M - NumElts] = true; 13735 13736 SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG); 13737 SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG); 13738 if (S0 == N0 && S1 == N1) 13739 return SDValue(); 13740 13741 return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask()); 13742 } 13743 13744 // Tries to turn a shuffle of two CONCAT_VECTORS into a single concat, 13745 // or turn a shuffle of a single concat into simpler shuffle then concat. 13746 static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { 13747 EVT VT = N->getValueType(0); 13748 unsigned NumElts = VT.getVectorNumElements(); 13749 13750 SDValue N0 = N->getOperand(0); 13751 SDValue N1 = N->getOperand(1); 13752 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 13753 13754 SmallVector<SDValue, 4> Ops; 13755 EVT ConcatVT = N0.getOperand(0).getValueType(); 13756 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); 13757 unsigned NumConcats = NumElts / NumElemsPerConcat; 13758 13759 // Special case: shuffle(concat(A,B)) can be more efficiently represented 13760 // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high 13761 // half vector elements. 13762 if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() && 13763 std::all_of(SVN->getMask().begin() + NumElemsPerConcat, 13764 SVN->getMask().end(), [](int i) { return i == -1; })) { 13765 N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1), 13766 makeArrayRef(SVN->getMask().begin(), NumElemsPerConcat)); 13767 N1 = DAG.getUNDEF(ConcatVT); 13768 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1); 13769 } 13770 13771 // Look at every vector that's inserted. We're looking for exact 13772 // subvector-sized copies from a concatenated vector 13773 for (unsigned I = 0; I != NumConcats; ++I) { 13774 // Make sure we're dealing with a copy. 13775 unsigned Begin = I * NumElemsPerConcat; 13776 bool AllUndef = true, NoUndef = true; 13777 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) { 13778 if (SVN->getMaskElt(J) >= 0) 13779 AllUndef = false; 13780 else 13781 NoUndef = false; 13782 } 13783 13784 if (NoUndef) { 13785 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) 13786 return SDValue(); 13787 13788 for (unsigned J = 1; J != NumElemsPerConcat; ++J) 13789 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) 13790 return SDValue(); 13791 13792 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; 13793 if (FirstElt < N0.getNumOperands()) 13794 Ops.push_back(N0.getOperand(FirstElt)); 13795 else 13796 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); 13797 13798 } else if (AllUndef) { 13799 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType())); 13800 } else { // Mixed with general masks and undefs, can't do optimization. 13801 return SDValue(); 13802 } 13803 } 13804 13805 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops); 13806 } 13807 13808 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 13809 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 13810 // 13811 // SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always 13812 // a simplification in some sense, but it isn't appropriate in general: some 13813 // BUILD_VECTORs are substantially cheaper than others. The general case 13814 // of a BUILD_VECTOR requires inserting each element individually (or 13815 // performing the equivalent in a temporary stack variable). A BUILD_VECTOR of 13816 // all constants is a single constant pool load. A BUILD_VECTOR where each 13817 // element is identical is a splat. A BUILD_VECTOR where most of the operands 13818 // are undef lowers to a small number of element insertions. 13819 // 13820 // To deal with this, we currently use a bunch of mostly arbitrary heuristics. 13821 // We don't fold shuffles where one side is a non-zero constant, and we don't 13822 // fold shuffles if the resulting BUILD_VECTOR would have duplicate 13823 // non-constant operands. This seems to work out reasonably well in practice. 13824 static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN, 13825 SelectionDAG &DAG, 13826 const TargetLowering &TLI) { 13827 EVT VT = SVN->getValueType(0); 13828 unsigned NumElts = VT.getVectorNumElements(); 13829 SDValue N0 = SVN->getOperand(0); 13830 SDValue N1 = SVN->getOperand(1); 13831 13832 if (!N0->hasOneUse() || !N1->hasOneUse()) 13833 return SDValue(); 13834 // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as 13835 // discussed above. 13836 if (!N1.isUndef()) { 13837 bool N0AnyConst = isAnyConstantBuildVector(N0.getNode()); 13838 bool N1AnyConst = isAnyConstantBuildVector(N1.getNode()); 13839 if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode())) 13840 return SDValue(); 13841 if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode())) 13842 return SDValue(); 13843 } 13844 13845 SmallVector<SDValue, 8> Ops; 13846 SmallSet<SDValue, 16> DuplicateOps; 13847 for (int M : SVN->getMask()) { 13848 SDValue Op = DAG.getUNDEF(VT.getScalarType()); 13849 if (M >= 0) { 13850 int Idx = M < (int)NumElts ? M : M - NumElts; 13851 SDValue &S = (M < (int)NumElts ? N0 : N1); 13852 if (S.getOpcode() == ISD::BUILD_VECTOR) { 13853 Op = S.getOperand(Idx); 13854 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) { 13855 if (Idx == 0) 13856 Op = S.getOperand(0); 13857 } else { 13858 // Operand can't be combined - bail out. 13859 return SDValue(); 13860 } 13861 } 13862 13863 // Don't duplicate a non-constant BUILD_VECTOR operand; semantically, this is 13864 // fine, but it's likely to generate low-quality code if the target can't 13865 // reconstruct an appropriate shuffle. 13866 if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op)) 13867 if (!DuplicateOps.insert(Op).second) 13868 return SDValue(); 13869 13870 Ops.push_back(Op); 13871 } 13872 // BUILD_VECTOR requires all inputs to be of the same type, find the 13873 // maximum type and extend them all. 13874 EVT SVT = VT.getScalarType(); 13875 if (SVT.isInteger()) 13876 for (SDValue &Op : Ops) 13877 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 13878 if (SVT != VT.getScalarType()) 13879 for (SDValue &Op : Ops) 13880 Op = TLI.isZExtFree(Op.getValueType(), SVT) 13881 ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT) 13882 : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT); 13883 return DAG.getBuildVector(VT, SDLoc(SVN), Ops); 13884 } 13885 13886 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 13887 EVT VT = N->getValueType(0); 13888 unsigned NumElts = VT.getVectorNumElements(); 13889 13890 SDValue N0 = N->getOperand(0); 13891 SDValue N1 = N->getOperand(1); 13892 13893 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 13894 13895 // Canonicalize shuffle undef, undef -> undef 13896 if (N0.isUndef() && N1.isUndef()) 13897 return DAG.getUNDEF(VT); 13898 13899 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 13900 13901 // Canonicalize shuffle v, v -> v, undef 13902 if (N0 == N1) { 13903 SmallVector<int, 8> NewMask; 13904 for (unsigned i = 0; i != NumElts; ++i) { 13905 int Idx = SVN->getMaskElt(i); 13906 if (Idx >= (int)NumElts) Idx -= NumElts; 13907 NewMask.push_back(Idx); 13908 } 13909 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask); 13910 } 13911 13912 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 13913 if (N0.isUndef()) 13914 return DAG.getCommutedVectorShuffle(*SVN); 13915 13916 // Remove references to rhs if it is undef 13917 if (N1.isUndef()) { 13918 bool Changed = false; 13919 SmallVector<int, 8> NewMask; 13920 for (unsigned i = 0; i != NumElts; ++i) { 13921 int Idx = SVN->getMaskElt(i); 13922 if (Idx >= (int)NumElts) { 13923 Idx = -1; 13924 Changed = true; 13925 } 13926 NewMask.push_back(Idx); 13927 } 13928 if (Changed) 13929 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask); 13930 } 13931 13932 // If it is a splat, check if the argument vector is another splat or a 13933 // build_vector. 13934 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 13935 SDNode *V = N0.getNode(); 13936 13937 // If this is a bit convert that changes the element type of the vector but 13938 // not the number of vector elements, look through it. Be careful not to 13939 // look though conversions that change things like v4f32 to v2f64. 13940 if (V->getOpcode() == ISD::BITCAST) { 13941 SDValue ConvInput = V->getOperand(0); 13942 if (ConvInput.getValueType().isVector() && 13943 ConvInput.getValueType().getVectorNumElements() == NumElts) 13944 V = ConvInput.getNode(); 13945 } 13946 13947 if (V->getOpcode() == ISD::BUILD_VECTOR) { 13948 assert(V->getNumOperands() == NumElts && 13949 "BUILD_VECTOR has wrong number of operands"); 13950 SDValue Base; 13951 bool AllSame = true; 13952 for (unsigned i = 0; i != NumElts; ++i) { 13953 if (!V->getOperand(i).isUndef()) { 13954 Base = V->getOperand(i); 13955 break; 13956 } 13957 } 13958 // Splat of <u, u, u, u>, return <u, u, u, u> 13959 if (!Base.getNode()) 13960 return N0; 13961 for (unsigned i = 0; i != NumElts; ++i) { 13962 if (V->getOperand(i) != Base) { 13963 AllSame = false; 13964 break; 13965 } 13966 } 13967 // Splat of <x, x, x, x>, return <x, x, x, x> 13968 if (AllSame) 13969 return N0; 13970 13971 // Canonicalize any other splat as a build_vector. 13972 const SDValue &Splatted = V->getOperand(SVN->getSplatIndex()); 13973 SmallVector<SDValue, 8> Ops(NumElts, Splatted); 13974 SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops); 13975 13976 // We may have jumped through bitcasts, so the type of the 13977 // BUILD_VECTOR may not match the type of the shuffle. 13978 if (V->getValueType(0) != VT) 13979 NewBV = DAG.getBitcast(VT, NewBV); 13980 return NewBV; 13981 } 13982 } 13983 13984 // There are various patterns used to build up a vector from smaller vectors, 13985 // subvectors, or elements. Scan chains of these and replace unused insertions 13986 // or components with undef. 13987 if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG)) 13988 return S; 13989 13990 if (N0.getOpcode() == ISD::CONCAT_VECTORS && 13991 Level < AfterLegalizeVectorOps && 13992 (N1.isUndef() || 13993 (N1.getOpcode() == ISD::CONCAT_VECTORS && 13994 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { 13995 if (SDValue V = partitionShuffleOfConcats(N, DAG)) 13996 return V; 13997 } 13998 13999 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 14000 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 14001 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 14002 if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI)) 14003 return Res; 14004 14005 // If this shuffle only has a single input that is a bitcasted shuffle, 14006 // attempt to merge the 2 shuffles and suitably bitcast the inputs/output 14007 // back to their original types. 14008 if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 14009 N1.isUndef() && Level < AfterLegalizeVectorOps && 14010 TLI.isTypeLegal(VT)) { 14011 14012 // Peek through the bitcast only if there is one user. 14013 SDValue BC0 = N0; 14014 while (BC0.getOpcode() == ISD::BITCAST) { 14015 if (!BC0.hasOneUse()) 14016 break; 14017 BC0 = BC0.getOperand(0); 14018 } 14019 14020 auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) { 14021 if (Scale == 1) 14022 return SmallVector<int, 8>(Mask.begin(), Mask.end()); 14023 14024 SmallVector<int, 8> NewMask; 14025 for (int M : Mask) 14026 for (int s = 0; s != Scale; ++s) 14027 NewMask.push_back(M < 0 ? -1 : Scale * M + s); 14028 return NewMask; 14029 }; 14030 14031 if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) { 14032 EVT SVT = VT.getScalarType(); 14033 EVT InnerVT = BC0->getValueType(0); 14034 EVT InnerSVT = InnerVT.getScalarType(); 14035 14036 // Determine which shuffle works with the smaller scalar type. 14037 EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT; 14038 EVT ScaleSVT = ScaleVT.getScalarType(); 14039 14040 if (TLI.isTypeLegal(ScaleVT) && 14041 0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) && 14042 0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) { 14043 14044 int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14045 int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14046 14047 // Scale the shuffle masks to the smaller scalar type. 14048 ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0); 14049 SmallVector<int, 8> InnerMask = 14050 ScaleShuffleMask(InnerSVN->getMask(), InnerScale); 14051 SmallVector<int, 8> OuterMask = 14052 ScaleShuffleMask(SVN->getMask(), OuterScale); 14053 14054 // Merge the shuffle masks. 14055 SmallVector<int, 8> NewMask; 14056 for (int M : OuterMask) 14057 NewMask.push_back(M < 0 ? -1 : InnerMask[M]); 14058 14059 // Test for shuffle mask legality over both commutations. 14060 SDValue SV0 = BC0->getOperand(0); 14061 SDValue SV1 = BC0->getOperand(1); 14062 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14063 if (!LegalMask) { 14064 std::swap(SV0, SV1); 14065 ShuffleVectorSDNode::commuteMask(NewMask); 14066 LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14067 } 14068 14069 if (LegalMask) { 14070 SV0 = DAG.getBitcast(ScaleVT, SV0); 14071 SV1 = DAG.getBitcast(ScaleVT, SV1); 14072 return DAG.getBitcast( 14073 VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask)); 14074 } 14075 } 14076 } 14077 } 14078 14079 // Canonicalize shuffles according to rules: 14080 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A) 14081 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B) 14082 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B) 14083 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && 14084 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 14085 TLI.isTypeLegal(VT)) { 14086 // The incoming shuffle must be of the same type as the result of the 14087 // current shuffle. 14088 assert(N1->getOperand(0).getValueType() == VT && 14089 "Shuffle types don't match"); 14090 14091 SDValue SV0 = N1->getOperand(0); 14092 SDValue SV1 = N1->getOperand(1); 14093 bool HasSameOp0 = N0 == SV0; 14094 bool IsSV1Undef = SV1.isUndef(); 14095 if (HasSameOp0 || IsSV1Undef || N0 == SV1) 14096 // Commute the operands of this shuffle so that next rule 14097 // will trigger. 14098 return DAG.getCommutedVectorShuffle(*SVN); 14099 } 14100 14101 // Try to fold according to rules: 14102 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14103 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14104 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14105 // Don't try to fold shuffles with illegal type. 14106 // Only fold if this shuffle is the only user of the other shuffle. 14107 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) && 14108 Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) { 14109 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 14110 14111 // Don't try to fold splats; they're likely to simplify somehow, or they 14112 // might be free. 14113 if (OtherSV->isSplat()) 14114 return SDValue(); 14115 14116 // The incoming shuffle must be of the same type as the result of the 14117 // current shuffle. 14118 assert(OtherSV->getOperand(0).getValueType() == VT && 14119 "Shuffle types don't match"); 14120 14121 SDValue SV0, SV1; 14122 SmallVector<int, 4> Mask; 14123 // Compute the combined shuffle mask for a shuffle with SV0 as the first 14124 // operand, and SV1 as the second operand. 14125 for (unsigned i = 0; i != NumElts; ++i) { 14126 int Idx = SVN->getMaskElt(i); 14127 if (Idx < 0) { 14128 // Propagate Undef. 14129 Mask.push_back(Idx); 14130 continue; 14131 } 14132 14133 SDValue CurrentVec; 14134 if (Idx < (int)NumElts) { 14135 // This shuffle index refers to the inner shuffle N0. Lookup the inner 14136 // shuffle mask to identify which vector is actually referenced. 14137 Idx = OtherSV->getMaskElt(Idx); 14138 if (Idx < 0) { 14139 // Propagate Undef. 14140 Mask.push_back(Idx); 14141 continue; 14142 } 14143 14144 CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0) 14145 : OtherSV->getOperand(1); 14146 } else { 14147 // This shuffle index references an element within N1. 14148 CurrentVec = N1; 14149 } 14150 14151 // Simple case where 'CurrentVec' is UNDEF. 14152 if (CurrentVec.isUndef()) { 14153 Mask.push_back(-1); 14154 continue; 14155 } 14156 14157 // Canonicalize the shuffle index. We don't know yet if CurrentVec 14158 // will be the first or second operand of the combined shuffle. 14159 Idx = Idx % NumElts; 14160 if (!SV0.getNode() || SV0 == CurrentVec) { 14161 // Ok. CurrentVec is the left hand side. 14162 // Update the mask accordingly. 14163 SV0 = CurrentVec; 14164 Mask.push_back(Idx); 14165 continue; 14166 } 14167 14168 // Bail out if we cannot convert the shuffle pair into a single shuffle. 14169 if (SV1.getNode() && SV1 != CurrentVec) 14170 return SDValue(); 14171 14172 // Ok. CurrentVec is the right hand side. 14173 // Update the mask accordingly. 14174 SV1 = CurrentVec; 14175 Mask.push_back(Idx + NumElts); 14176 } 14177 14178 // Check if all indices in Mask are Undef. In case, propagate Undef. 14179 bool isUndefMask = true; 14180 for (unsigned i = 0; i != NumElts && isUndefMask; ++i) 14181 isUndefMask &= Mask[i] < 0; 14182 14183 if (isUndefMask) 14184 return DAG.getUNDEF(VT); 14185 14186 if (!SV0.getNode()) 14187 SV0 = DAG.getUNDEF(VT); 14188 if (!SV1.getNode()) 14189 SV1 = DAG.getUNDEF(VT); 14190 14191 // Avoid introducing shuffles with illegal mask. 14192 if (!TLI.isShuffleMaskLegal(Mask, VT)) { 14193 ShuffleVectorSDNode::commuteMask(Mask); 14194 14195 if (!TLI.isShuffleMaskLegal(Mask, VT)) 14196 return SDValue(); 14197 14198 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2) 14199 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2) 14200 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2) 14201 std::swap(SV0, SV1); 14202 } 14203 14204 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14205 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14206 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14207 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask); 14208 } 14209 14210 return SDValue(); 14211 } 14212 14213 SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) { 14214 SDValue InVal = N->getOperand(0); 14215 EVT VT = N->getValueType(0); 14216 14217 // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern 14218 // with a VECTOR_SHUFFLE. 14219 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14220 SDValue InVec = InVal->getOperand(0); 14221 SDValue EltNo = InVal->getOperand(1); 14222 14223 // FIXME: We could support implicit truncation if the shuffle can be 14224 // scaled to a smaller vector scalar type. 14225 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo); 14226 if (C0 && VT == InVec.getValueType() && 14227 VT.getScalarType() == InVal.getValueType()) { 14228 SmallVector<int, 8> NewMask(VT.getVectorNumElements(), -1); 14229 int Elt = C0->getZExtValue(); 14230 NewMask[0] = Elt; 14231 14232 if (TLI.isShuffleMaskLegal(NewMask, VT)) 14233 return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), 14234 NewMask); 14235 } 14236 } 14237 14238 return SDValue(); 14239 } 14240 14241 SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) { 14242 EVT VT = N->getValueType(0); 14243 SDValue N0 = N->getOperand(0); 14244 SDValue N1 = N->getOperand(1); 14245 SDValue N2 = N->getOperand(2); 14246 14247 // Combine INSERT_SUBVECTORs where we are inserting to the same index. 14248 // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx ) 14249 // --> INSERT_SUBVECTOR( Vec, SubNew, Idx ) 14250 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && 14251 N0.getOperand(1).getValueType() == N1.getValueType() && 14252 N0.getOperand(2) == N2) 14253 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0), 14254 N1, N2); 14255 14256 if (N0.getValueType() != N1.getValueType()) 14257 return SDValue(); 14258 14259 // If the input vector is a concatenation, and the insert replaces 14260 // one of the halves, we can optimize into a single concat_vectors. 14261 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0->getNumOperands() == 2 && 14262 N2.getOpcode() == ISD::Constant) { 14263 APInt InsIdx = cast<ConstantSDNode>(N2)->getAPIntValue(); 14264 14265 // Lower half: fold (insert_subvector (concat_vectors X, Y), Z) -> 14266 // (concat_vectors Z, Y) 14267 if (InsIdx == 0) 14268 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N1, 14269 N0.getOperand(1)); 14270 14271 // Upper half: fold (insert_subvector (concat_vectors X, Y), Z) -> 14272 // (concat_vectors X, Z) 14273 if (InsIdx == VT.getVectorNumElements() / 2) 14274 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0.getOperand(0), 14275 N1); 14276 } 14277 14278 return SDValue(); 14279 } 14280 14281 SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) { 14282 SDValue N0 = N->getOperand(0); 14283 14284 // fold (fp_to_fp16 (fp16_to_fp op)) -> op 14285 if (N0->getOpcode() == ISD::FP16_TO_FP) 14286 return N0->getOperand(0); 14287 14288 return SDValue(); 14289 } 14290 14291 SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) { 14292 SDValue N0 = N->getOperand(0); 14293 14294 // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op) 14295 if (N0->getOpcode() == ISD::AND) { 14296 ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1)); 14297 if (AndConst && AndConst->getAPIntValue() == 0xffff) { 14298 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0), 14299 N0.getOperand(0)); 14300 } 14301 } 14302 14303 return SDValue(); 14304 } 14305 14306 /// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle 14307 /// with the destination vector and a zero vector. 14308 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 14309 /// vector_shuffle V, Zero, <0, 4, 2, 4> 14310 SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 14311 EVT VT = N->getValueType(0); 14312 SDValue LHS = N->getOperand(0); 14313 SDValue RHS = N->getOperand(1); 14314 SDLoc DL(N); 14315 14316 // Make sure we're not running after operation legalization where it 14317 // may have custom lowered the vector shuffles. 14318 if (LegalOperations) 14319 return SDValue(); 14320 14321 if (N->getOpcode() != ISD::AND) 14322 return SDValue(); 14323 14324 if (RHS.getOpcode() == ISD::BITCAST) 14325 RHS = RHS.getOperand(0); 14326 14327 if (RHS.getOpcode() != ISD::BUILD_VECTOR) 14328 return SDValue(); 14329 14330 EVT RVT = RHS.getValueType(); 14331 unsigned NumElts = RHS.getNumOperands(); 14332 14333 // Attempt to create a valid clear mask, splitting the mask into 14334 // sub elements and checking to see if each is 14335 // all zeros or all ones - suitable for shuffle masking. 14336 auto BuildClearMask = [&](int Split) { 14337 int NumSubElts = NumElts * Split; 14338 int NumSubBits = RVT.getScalarSizeInBits() / Split; 14339 14340 SmallVector<int, 8> Indices; 14341 for (int i = 0; i != NumSubElts; ++i) { 14342 int EltIdx = i / Split; 14343 int SubIdx = i % Split; 14344 SDValue Elt = RHS.getOperand(EltIdx); 14345 if (Elt.isUndef()) { 14346 Indices.push_back(-1); 14347 continue; 14348 } 14349 14350 APInt Bits; 14351 if (isa<ConstantSDNode>(Elt)) 14352 Bits = cast<ConstantSDNode>(Elt)->getAPIntValue(); 14353 else if (isa<ConstantFPSDNode>(Elt)) 14354 Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt(); 14355 else 14356 return SDValue(); 14357 14358 // Extract the sub element from the constant bit mask. 14359 if (DAG.getDataLayout().isBigEndian()) { 14360 Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits); 14361 } else { 14362 Bits = Bits.lshr(SubIdx * NumSubBits); 14363 } 14364 14365 if (Split > 1) 14366 Bits = Bits.trunc(NumSubBits); 14367 14368 if (Bits.isAllOnesValue()) 14369 Indices.push_back(i); 14370 else if (Bits == 0) 14371 Indices.push_back(i + NumSubElts); 14372 else 14373 return SDValue(); 14374 } 14375 14376 // Let's see if the target supports this vector_shuffle. 14377 EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits); 14378 EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts); 14379 if (!TLI.isVectorClearMaskLegal(Indices, ClearVT)) 14380 return SDValue(); 14381 14382 SDValue Zero = DAG.getConstant(0, DL, ClearVT); 14383 return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL, 14384 DAG.getBitcast(ClearVT, LHS), 14385 Zero, Indices)); 14386 }; 14387 14388 // Determine maximum split level (byte level masking). 14389 int MaxSplit = 1; 14390 if (RVT.getScalarSizeInBits() % 8 == 0) 14391 MaxSplit = RVT.getScalarSizeInBits() / 8; 14392 14393 for (int Split = 1; Split <= MaxSplit; ++Split) 14394 if (RVT.getScalarSizeInBits() % Split == 0) 14395 if (SDValue S = BuildClearMask(Split)) 14396 return S; 14397 14398 return SDValue(); 14399 } 14400 14401 /// Visit a binary vector operation, like ADD. 14402 SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 14403 assert(N->getValueType(0).isVector() && 14404 "SimplifyVBinOp only works on vectors!"); 14405 14406 SDValue LHS = N->getOperand(0); 14407 SDValue RHS = N->getOperand(1); 14408 SDValue Ops[] = {LHS, RHS}; 14409 14410 // See if we can constant fold the vector operation. 14411 if (SDValue Fold = DAG.FoldConstantVectorArithmetic( 14412 N->getOpcode(), SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags())) 14413 return Fold; 14414 14415 // Try to convert a constant mask AND into a shuffle clear mask. 14416 if (SDValue Shuffle = XformToShuffleWithZero(N)) 14417 return Shuffle; 14418 14419 // Type legalization might introduce new shuffles in the DAG. 14420 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask))) 14421 // -> (shuffle (VBinOp (A, B)), Undef, Mask). 14422 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) && 14423 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && 14424 LHS.getOperand(1).isUndef() && 14425 RHS.getOperand(1).isUndef()) { 14426 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); 14427 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); 14428 14429 if (SVN0->getMask().equals(SVN1->getMask())) { 14430 EVT VT = N->getValueType(0); 14431 SDValue UndefVector = LHS.getOperand(1); 14432 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 14433 LHS.getOperand(0), RHS.getOperand(0), 14434 N->getFlags()); 14435 AddUsersToWorklist(N); 14436 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector, 14437 SVN0->getMask()); 14438 } 14439 } 14440 14441 return SDValue(); 14442 } 14443 14444 SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, 14445 SDValue N2) { 14446 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 14447 14448 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 14449 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 14450 14451 // If we got a simplified select_cc node back from SimplifySelectCC, then 14452 // break it down into a new SETCC node, and a new SELECT node, and then return 14453 // the SELECT node, since we were called with a SELECT node. 14454 if (SCC.getNode()) { 14455 // Check to see if we got a select_cc back (to turn into setcc/select). 14456 // Otherwise, just return whatever node we got back, like fabs. 14457 if (SCC.getOpcode() == ISD::SELECT_CC) { 14458 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0), 14459 N0.getValueType(), 14460 SCC.getOperand(0), SCC.getOperand(1), 14461 SCC.getOperand(4)); 14462 AddToWorklist(SETCC.getNode()); 14463 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC, 14464 SCC.getOperand(2), SCC.getOperand(3)); 14465 } 14466 14467 return SCC; 14468 } 14469 return SDValue(); 14470 } 14471 14472 /// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values 14473 /// being selected between, see if we can simplify the select. Callers of this 14474 /// should assume that TheSelect is deleted if this returns true. As such, they 14475 /// should return the appropriate thing (e.g. the node) back to the top-level of 14476 /// the DAG combiner loop to avoid it being looked at. 14477 bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 14478 SDValue RHS) { 14479 14480 // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 14481 // The select + setcc is redundant, because fsqrt returns NaN for X < 0. 14482 if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) { 14483 if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) { 14484 // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?)) 14485 SDValue Sqrt = RHS; 14486 ISD::CondCode CC; 14487 SDValue CmpLHS; 14488 const ConstantFPSDNode *Zero = nullptr; 14489 14490 if (TheSelect->getOpcode() == ISD::SELECT_CC) { 14491 CC = dyn_cast<CondCodeSDNode>(TheSelect->getOperand(4))->get(); 14492 CmpLHS = TheSelect->getOperand(0); 14493 Zero = isConstOrConstSplatFP(TheSelect->getOperand(1)); 14494 } else { 14495 // SELECT or VSELECT 14496 SDValue Cmp = TheSelect->getOperand(0); 14497 if (Cmp.getOpcode() == ISD::SETCC) { 14498 CC = dyn_cast<CondCodeSDNode>(Cmp.getOperand(2))->get(); 14499 CmpLHS = Cmp.getOperand(0); 14500 Zero = isConstOrConstSplatFP(Cmp.getOperand(1)); 14501 } 14502 } 14503 if (Zero && Zero->isZero() && 14504 Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT || 14505 CC == ISD::SETULT || CC == ISD::SETLT)) { 14506 // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 14507 CombineTo(TheSelect, Sqrt); 14508 return true; 14509 } 14510 } 14511 } 14512 // Cannot simplify select with vector condition 14513 if (TheSelect->getOperand(0).getValueType().isVector()) return false; 14514 14515 // If this is a select from two identical things, try to pull the operation 14516 // through the select. 14517 if (LHS.getOpcode() != RHS.getOpcode() || 14518 !LHS.hasOneUse() || !RHS.hasOneUse()) 14519 return false; 14520 14521 // If this is a load and the token chain is identical, replace the select 14522 // of two loads with a load through a select of the address to load from. 14523 // This triggers in things like "select bool X, 10.0, 123.0" after the FP 14524 // constants have been dropped into the constant pool. 14525 if (LHS.getOpcode() == ISD::LOAD) { 14526 LoadSDNode *LLD = cast<LoadSDNode>(LHS); 14527 LoadSDNode *RLD = cast<LoadSDNode>(RHS); 14528 14529 // Token chains must be identical. 14530 if (LHS.getOperand(0) != RHS.getOperand(0) || 14531 // Do not let this transformation reduce the number of volatile loads. 14532 LLD->isVolatile() || RLD->isVolatile() || 14533 // FIXME: If either is a pre/post inc/dec load, 14534 // we'd need to split out the address adjustment. 14535 LLD->isIndexed() || RLD->isIndexed() || 14536 // If this is an EXTLOAD, the VT's must match. 14537 LLD->getMemoryVT() != RLD->getMemoryVT() || 14538 // If this is an EXTLOAD, the kind of extension must match. 14539 (LLD->getExtensionType() != RLD->getExtensionType() && 14540 // The only exception is if one of the extensions is anyext. 14541 LLD->getExtensionType() != ISD::EXTLOAD && 14542 RLD->getExtensionType() != ISD::EXTLOAD) || 14543 // FIXME: this discards src value information. This is 14544 // over-conservative. It would be beneficial to be able to remember 14545 // both potential memory locations. Since we are discarding 14546 // src value info, don't do the transformation if the memory 14547 // locations are not in the default address space. 14548 LLD->getPointerInfo().getAddrSpace() != 0 || 14549 RLD->getPointerInfo().getAddrSpace() != 0 || 14550 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), 14551 LLD->getBasePtr().getValueType())) 14552 return false; 14553 14554 // Check that the select condition doesn't reach either load. If so, 14555 // folding this will induce a cycle into the DAG. If not, this is safe to 14556 // xform, so create a select of the addresses. 14557 SDValue Addr; 14558 if (TheSelect->getOpcode() == ISD::SELECT) { 14559 SDNode *CondNode = TheSelect->getOperand(0).getNode(); 14560 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 14561 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 14562 return false; 14563 // The loads must not depend on one another. 14564 if (LLD->isPredecessorOf(RLD) || 14565 RLD->isPredecessorOf(LLD)) 14566 return false; 14567 Addr = DAG.getSelect(SDLoc(TheSelect), 14568 LLD->getBasePtr().getValueType(), 14569 TheSelect->getOperand(0), LLD->getBasePtr(), 14570 RLD->getBasePtr()); 14571 } else { // Otherwise SELECT_CC 14572 SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 14573 SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 14574 14575 if ((LLD->hasAnyUseOfValue(1) && 14576 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 14577 (RLD->hasAnyUseOfValue(1) && 14578 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 14579 return false; 14580 14581 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect), 14582 LLD->getBasePtr().getValueType(), 14583 TheSelect->getOperand(0), 14584 TheSelect->getOperand(1), 14585 LLD->getBasePtr(), RLD->getBasePtr(), 14586 TheSelect->getOperand(4)); 14587 } 14588 14589 SDValue Load; 14590 // It is safe to replace the two loads if they have different alignments, 14591 // but the new load must be the minimum (most restrictive) alignment of the 14592 // inputs. 14593 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment()); 14594 MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags(); 14595 if (!RLD->isInvariant()) 14596 MMOFlags &= ~MachineMemOperand::MOInvariant; 14597 if (!RLD->isDereferenceable()) 14598 MMOFlags &= ~MachineMemOperand::MODereferenceable; 14599 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 14600 // FIXME: Discards pointer and AA info. 14601 Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect), 14602 LLD->getChain(), Addr, MachinePointerInfo(), Alignment, 14603 MMOFlags); 14604 } else { 14605 // FIXME: Discards pointer and AA info. 14606 Load = DAG.getExtLoad( 14607 LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType() 14608 : LLD->getExtensionType(), 14609 SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr, 14610 MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags); 14611 } 14612 14613 // Users of the select now use the result of the load. 14614 CombineTo(TheSelect, Load); 14615 14616 // Users of the old loads now use the new load's chain. We know the 14617 // old-load value is dead now. 14618 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 14619 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 14620 return true; 14621 } 14622 14623 return false; 14624 } 14625 14626 /// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and 14627 /// bitwise 'and'. 14628 SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, 14629 SDValue N1, SDValue N2, SDValue N3, 14630 ISD::CondCode CC) { 14631 // If this is a select where the false operand is zero and the compare is a 14632 // check of the sign bit, see if we can perform the "gzip trick": 14633 // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A 14634 // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A 14635 EVT XType = N0.getValueType(); 14636 EVT AType = N2.getValueType(); 14637 if (!isNullConstant(N3) || !XType.bitsGE(AType)) 14638 return SDValue(); 14639 14640 // If the comparison is testing for a positive value, we have to invert 14641 // the sign bit mask, so only do that transform if the target has a bitwise 14642 // 'and not' instruction (the invert is free). 14643 if (CC == ISD::SETGT && TLI.hasAndNot(N2)) { 14644 // (X > -1) ? A : 0 14645 // (X > 0) ? X : 0 <-- This is canonical signed max. 14646 if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2))) 14647 return SDValue(); 14648 } else if (CC == ISD::SETLT) { 14649 // (X < 0) ? A : 0 14650 // (X < 1) ? X : 0 <-- This is un-canonicalized signed min. 14651 if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2))) 14652 return SDValue(); 14653 } else { 14654 return SDValue(); 14655 } 14656 14657 // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit 14658 // constant. 14659 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType()); 14660 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 14661 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) { 14662 unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1; 14663 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy); 14664 SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt); 14665 AddToWorklist(Shift.getNode()); 14666 14667 if (XType.bitsGT(AType)) { 14668 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 14669 AddToWorklist(Shift.getNode()); 14670 } 14671 14672 if (CC == ISD::SETGT) 14673 Shift = DAG.getNOT(DL, Shift, AType); 14674 14675 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 14676 } 14677 14678 SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy); 14679 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt); 14680 AddToWorklist(Shift.getNode()); 14681 14682 if (XType.bitsGT(AType)) { 14683 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 14684 AddToWorklist(Shift.getNode()); 14685 } 14686 14687 if (CC == ISD::SETGT) 14688 Shift = DAG.getNOT(DL, Shift, AType); 14689 14690 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 14691 } 14692 14693 /// Simplify an expression of the form (N0 cond N1) ? N2 : N3 14694 /// where 'cond' is the comparison specified by CC. 14695 SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 14696 SDValue N2, SDValue N3, ISD::CondCode CC, 14697 bool NotExtCompare) { 14698 // (x ? y : y) -> y. 14699 if (N2 == N3) return N2; 14700 14701 EVT VT = N2.getValueType(); 14702 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 14703 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 14704 14705 // Determine if the condition we're dealing with is constant 14706 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), 14707 N0, N1, CC, DL, false); 14708 if (SCC.getNode()) AddToWorklist(SCC.getNode()); 14709 14710 if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) { 14711 // fold select_cc true, x, y -> x 14712 // fold select_cc false, x, y -> y 14713 return !SCCC->isNullValue() ? N2 : N3; 14714 } 14715 14716 // Check to see if we can simplify the select into an fabs node 14717 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 14718 // Allow either -0.0 or 0.0 14719 if (CFP->isZero()) { 14720 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 14721 if ((CC == ISD::SETGE || CC == ISD::SETGT) && 14722 N0 == N2 && N3.getOpcode() == ISD::FNEG && 14723 N2 == N3.getOperand(0)) 14724 return DAG.getNode(ISD::FABS, DL, VT, N0); 14725 14726 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 14727 if ((CC == ISD::SETLT || CC == ISD::SETLE) && 14728 N0 == N3 && N2.getOpcode() == ISD::FNEG && 14729 N2.getOperand(0) == N3) 14730 return DAG.getNode(ISD::FABS, DL, VT, N3); 14731 } 14732 } 14733 14734 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 14735 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 14736 // in it. This is a win when the constant is not otherwise available because 14737 // it replaces two constant pool loads with one. We only do this if the FP 14738 // type is known to be legal, because if it isn't, then we are before legalize 14739 // types an we want the other legalization to happen first (e.g. to avoid 14740 // messing with soft float) and if the ConstantFP is not legal, because if 14741 // it is legal, we may not need to store the FP constant in a constant pool. 14742 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 14743 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 14744 if (TLI.isTypeLegal(N2.getValueType()) && 14745 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 14746 TargetLowering::Legal && 14747 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) && 14748 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) && 14749 // If both constants have multiple uses, then we won't need to do an 14750 // extra load, they are likely around in registers for other users. 14751 (TV->hasOneUse() || FV->hasOneUse())) { 14752 Constant *Elts[] = { 14753 const_cast<ConstantFP*>(FV->getConstantFPValue()), 14754 const_cast<ConstantFP*>(TV->getConstantFPValue()) 14755 }; 14756 Type *FPTy = Elts[0]->getType(); 14757 const DataLayout &TD = DAG.getDataLayout(); 14758 14759 // Create a ConstantArray of the two constants. 14760 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 14761 SDValue CPIdx = 14762 DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()), 14763 TD.getPrefTypeAlignment(FPTy)); 14764 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 14765 14766 // Get the offsets to the 0 and 1 element of the array so that we can 14767 // select between them. 14768 SDValue Zero = DAG.getIntPtrConstant(0, DL); 14769 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 14770 SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV)); 14771 14772 SDValue Cond = DAG.getSetCC(DL, 14773 getSetCCResultType(N0.getValueType()), 14774 N0, N1, CC); 14775 AddToWorklist(Cond.getNode()); 14776 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), 14777 Cond, One, Zero); 14778 AddToWorklist(CstOffset.getNode()); 14779 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, 14780 CstOffset); 14781 AddToWorklist(CPIdx.getNode()); 14782 return DAG.getLoad( 14783 TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 14784 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 14785 Alignment); 14786 } 14787 } 14788 14789 if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC)) 14790 return V; 14791 14792 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 14793 // where y is has a single bit set. 14794 // A plaintext description would be, we can turn the SELECT_CC into an AND 14795 // when the condition can be materialized as an all-ones register. Any 14796 // single bit-test can be materialized as an all-ones register with 14797 // shift-left and shift-right-arith. 14798 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 14799 N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) { 14800 SDValue AndLHS = N0->getOperand(0); 14801 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 14802 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 14803 // Shift the tested bit over the sign bit. 14804 const APInt &AndMask = ConstAndRHS->getAPIntValue(); 14805 SDValue ShlAmt = 14806 DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS), 14807 getShiftAmountTy(AndLHS.getValueType())); 14808 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt); 14809 14810 // Now arithmetic right shift it all the way over, so the result is either 14811 // all-ones, or zero. 14812 SDValue ShrAmt = 14813 DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl), 14814 getShiftAmountTy(Shl.getValueType())); 14815 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt); 14816 14817 return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 14818 } 14819 } 14820 14821 // fold select C, 16, 0 -> shl C, 4 14822 if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() && 14823 TLI.getBooleanContents(N0.getValueType()) == 14824 TargetLowering::ZeroOrOneBooleanContent) { 14825 14826 // If the caller doesn't want us to simplify this into a zext of a compare, 14827 // don't do it. 14828 if (NotExtCompare && N2C->isOne()) 14829 return SDValue(); 14830 14831 // Get a SetCC of the condition 14832 // NOTE: Don't create a SETCC if it's not legal on this target. 14833 if (!LegalOperations || 14834 TLI.isOperationLegal(ISD::SETCC, N0.getValueType())) { 14835 SDValue Temp, SCC; 14836 // cast from setcc result type to select result type 14837 if (LegalTypes) { 14838 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), 14839 N0, N1, CC); 14840 if (N2.getValueType().bitsLT(SCC.getValueType())) 14841 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), 14842 N2.getValueType()); 14843 else 14844 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 14845 N2.getValueType(), SCC); 14846 } else { 14847 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC); 14848 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 14849 N2.getValueType(), SCC); 14850 } 14851 14852 AddToWorklist(SCC.getNode()); 14853 AddToWorklist(Temp.getNode()); 14854 14855 if (N2C->isOne()) 14856 return Temp; 14857 14858 // shl setcc result by log2 n2c 14859 return DAG.getNode( 14860 ISD::SHL, DL, N2.getValueType(), Temp, 14861 DAG.getConstant(N2C->getAPIntValue().logBase2(), SDLoc(Temp), 14862 getShiftAmountTy(Temp.getValueType()))); 14863 } 14864 } 14865 14866 // Check to see if this is an integer abs. 14867 // select_cc setg[te] X, 0, X, -X -> 14868 // select_cc setgt X, -1, X, -X -> 14869 // select_cc setl[te] X, 0, -X, X -> 14870 // select_cc setlt X, 1, -X, X -> 14871 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 14872 if (N1C) { 14873 ConstantSDNode *SubC = nullptr; 14874 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 14875 (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 14876 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 14877 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 14878 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 14879 (N1C->isOne() && CC == ISD::SETLT)) && 14880 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 14881 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 14882 14883 EVT XType = N0.getValueType(); 14884 if (SubC && SubC->isNullValue() && XType.isInteger()) { 14885 SDLoc DL(N0); 14886 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, 14887 N0, 14888 DAG.getConstant(XType.getSizeInBits() - 1, DL, 14889 getShiftAmountTy(N0.getValueType()))); 14890 SDValue Add = DAG.getNode(ISD::ADD, DL, 14891 XType, N0, Shift); 14892 AddToWorklist(Shift.getNode()); 14893 AddToWorklist(Add.getNode()); 14894 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 14895 } 14896 } 14897 14898 // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X) 14899 // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X) 14900 // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X) 14901 // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X) 14902 // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X) 14903 // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X) 14904 // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X) 14905 // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X) 14906 if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 14907 SDValue ValueOnZero = N2; 14908 SDValue Count = N3; 14909 // If the condition is NE instead of E, swap the operands. 14910 if (CC == ISD::SETNE) 14911 std::swap(ValueOnZero, Count); 14912 // Check if the value on zero is a constant equal to the bits in the type. 14913 if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) { 14914 if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) { 14915 // If the other operand is cttz/cttz_zero_undef of N0, and cttz is 14916 // legal, combine to just cttz. 14917 if ((Count.getOpcode() == ISD::CTTZ || 14918 Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) && 14919 N0 == Count.getOperand(0) && 14920 (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT))) 14921 return DAG.getNode(ISD::CTTZ, DL, VT, N0); 14922 // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is 14923 // legal, combine to just ctlz. 14924 if ((Count.getOpcode() == ISD::CTLZ || 14925 Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) && 14926 N0 == Count.getOperand(0) && 14927 (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT))) 14928 return DAG.getNode(ISD::CTLZ, DL, VT, N0); 14929 } 14930 } 14931 } 14932 14933 return SDValue(); 14934 } 14935 14936 /// This is a stub for TargetLowering::SimplifySetCC. 14937 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 14938 ISD::CondCode Cond, const SDLoc &DL, 14939 bool foldBooleans) { 14940 TargetLowering::DAGCombinerInfo 14941 DagCombineInfo(DAG, Level, false, this); 14942 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 14943 } 14944 14945 /// Given an ISD::SDIV node expressing a divide by constant, return 14946 /// a DAG expression to select that will generate the same value by multiplying 14947 /// by a magic number. 14948 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 14949 SDValue DAGCombiner::BuildSDIV(SDNode *N) { 14950 // when optimising for minimum size, we don't want to expand a div to a mul 14951 // and a shift. 14952 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 14953 return SDValue(); 14954 14955 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 14956 if (!C) 14957 return SDValue(); 14958 14959 // Avoid division by zero. 14960 if (C->isNullValue()) 14961 return SDValue(); 14962 14963 std::vector<SDNode*> Built; 14964 SDValue S = 14965 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 14966 14967 for (SDNode *N : Built) 14968 AddToWorklist(N); 14969 return S; 14970 } 14971 14972 /// Given an ISD::SDIV node expressing a divide by constant power of 2, return a 14973 /// DAG expression that will generate the same value by right shifting. 14974 SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { 14975 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 14976 if (!C) 14977 return SDValue(); 14978 14979 // Avoid division by zero. 14980 if (C->isNullValue()) 14981 return SDValue(); 14982 14983 std::vector<SDNode *> Built; 14984 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built); 14985 14986 for (SDNode *N : Built) 14987 AddToWorklist(N); 14988 return S; 14989 } 14990 14991 /// Given an ISD::UDIV node expressing a divide by constant, return a DAG 14992 /// expression that will generate the same value by multiplying by a magic 14993 /// number. 14994 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 14995 SDValue DAGCombiner::BuildUDIV(SDNode *N) { 14996 // when optimising for minimum size, we don't want to expand a div to a mul 14997 // and a shift. 14998 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 14999 return SDValue(); 15000 15001 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15002 if (!C) 15003 return SDValue(); 15004 15005 // Avoid division by zero. 15006 if (C->isNullValue()) 15007 return SDValue(); 15008 15009 std::vector<SDNode*> Built; 15010 SDValue S = 15011 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 15012 15013 for (SDNode *N : Built) 15014 AddToWorklist(N); 15015 return S; 15016 } 15017 15018 /// Determines the LogBase2 value for a non-null input value using the 15019 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). 15020 SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) { 15021 EVT VT = V.getValueType(); 15022 unsigned EltBits = VT.getScalarSizeInBits(); 15023 SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V); 15024 SDValue Base = DAG.getConstant(EltBits - 1, DL, VT); 15025 SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz); 15026 return LogBase2; 15027 } 15028 15029 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15030 /// For the reciprocal, we need to find the zero of the function: 15031 /// F(X) = A X - 1 [which has a zero at X = 1/A] 15032 /// => 15033 /// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 15034 /// does not require additional intermediate precision] 15035 SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) { 15036 if (Level >= AfterLegalizeDAG) 15037 return SDValue(); 15038 15039 // TODO: Handle half and/or extended types? 15040 EVT VT = Op.getValueType(); 15041 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15042 return SDValue(); 15043 15044 // If estimates are explicitly disabled for this function, we're done. 15045 MachineFunction &MF = DAG.getMachineFunction(); 15046 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF); 15047 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15048 return SDValue(); 15049 15050 // Estimates may be explicitly enabled for this type with a custom number of 15051 // refinement steps. 15052 int Iterations = TLI.getDivRefinementSteps(VT, MF); 15053 if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) { 15054 AddToWorklist(Est.getNode()); 15055 15056 if (Iterations) { 15057 EVT VT = Op.getValueType(); 15058 SDLoc DL(Op); 15059 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 15060 15061 // Newton iterations: Est = Est + Est (1 - Arg * Est) 15062 for (int i = 0; i < Iterations; ++i) { 15063 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est, Flags); 15064 AddToWorklist(NewEst.getNode()); 15065 15066 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst, Flags); 15067 AddToWorklist(NewEst.getNode()); 15068 15069 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15070 AddToWorklist(NewEst.getNode()); 15071 15072 Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst, Flags); 15073 AddToWorklist(Est.getNode()); 15074 } 15075 } 15076 return Est; 15077 } 15078 15079 return SDValue(); 15080 } 15081 15082 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15083 /// For the reciprocal sqrt, we need to find the zero of the function: 15084 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15085 /// => 15086 /// X_{i+1} = X_i (1.5 - A X_i^2 / 2) 15087 /// As a result, we precompute A/2 prior to the iteration loop. 15088 SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est, 15089 unsigned Iterations, 15090 SDNodeFlags *Flags, bool Reciprocal) { 15091 EVT VT = Arg.getValueType(); 15092 SDLoc DL(Arg); 15093 SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT); 15094 15095 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that 15096 // this entire sequence requires only one FP constant. 15097 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags); 15098 AddToWorklist(HalfArg.getNode()); 15099 15100 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags); 15101 AddToWorklist(HalfArg.getNode()); 15102 15103 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 15104 for (unsigned i = 0; i < Iterations; ++i) { 15105 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags); 15106 AddToWorklist(NewEst.getNode()); 15107 15108 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags); 15109 AddToWorklist(NewEst.getNode()); 15110 15111 NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags); 15112 AddToWorklist(NewEst.getNode()); 15113 15114 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15115 AddToWorklist(Est.getNode()); 15116 } 15117 15118 // If non-reciprocal square root is requested, multiply the result by Arg. 15119 if (!Reciprocal) { 15120 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags); 15121 AddToWorklist(Est.getNode()); 15122 } 15123 15124 return Est; 15125 } 15126 15127 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15128 /// For the reciprocal sqrt, we need to find the zero of the function: 15129 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15130 /// => 15131 /// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0)) 15132 SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est, 15133 unsigned Iterations, 15134 SDNodeFlags *Flags, bool Reciprocal) { 15135 EVT VT = Arg.getValueType(); 15136 SDLoc DL(Arg); 15137 SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT); 15138 SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT); 15139 15140 // This routine must enter the loop below to work correctly 15141 // when (Reciprocal == false). 15142 assert(Iterations > 0); 15143 15144 // Newton iterations for reciprocal square root: 15145 // E = (E * -0.5) * ((A * E) * E + -3.0) 15146 for (unsigned i = 0; i < Iterations; ++i) { 15147 SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags); 15148 AddToWorklist(AE.getNode()); 15149 15150 SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags); 15151 AddToWorklist(AEE.getNode()); 15152 15153 SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags); 15154 AddToWorklist(RHS.getNode()); 15155 15156 // When calculating a square root at the last iteration build: 15157 // S = ((A * E) * -0.5) * ((A * E) * E + -3.0) 15158 // (notice a common subexpression) 15159 SDValue LHS; 15160 if (Reciprocal || (i + 1) < Iterations) { 15161 // RSQRT: LHS = (E * -0.5) 15162 LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags); 15163 } else { 15164 // SQRT: LHS = (A * E) * -0.5 15165 LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags); 15166 } 15167 AddToWorklist(LHS.getNode()); 15168 15169 Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags); 15170 AddToWorklist(Est.getNode()); 15171 } 15172 15173 return Est; 15174 } 15175 15176 /// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case 15177 /// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if 15178 /// Op can be zero. 15179 SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, 15180 bool Reciprocal) { 15181 if (Level >= AfterLegalizeDAG) 15182 return SDValue(); 15183 15184 // TODO: Handle half and/or extended types? 15185 EVT VT = Op.getValueType(); 15186 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15187 return SDValue(); 15188 15189 // If estimates are explicitly disabled for this function, we're done. 15190 MachineFunction &MF = DAG.getMachineFunction(); 15191 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF); 15192 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15193 return SDValue(); 15194 15195 // Estimates may be explicitly enabled for this type with a custom number of 15196 // refinement steps. 15197 int Iterations = TLI.getSqrtRefinementSteps(VT, MF); 15198 15199 bool UseOneConstNR = false; 15200 if (SDValue Est = 15201 TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR, 15202 Reciprocal)) { 15203 AddToWorklist(Est.getNode()); 15204 15205 if (Iterations) { 15206 Est = UseOneConstNR 15207 ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal) 15208 : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal); 15209 15210 if (!Reciprocal) { 15211 // Unfortunately, Est is now NaN if the input was exactly 0.0. 15212 // Select out this case and force the answer to 0.0. 15213 EVT VT = Op.getValueType(); 15214 SDLoc DL(Op); 15215 15216 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 15217 EVT CCVT = getSetCCResultType(VT); 15218 SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 15219 AddToWorklist(ZeroCmp.getNode()); 15220 15221 Est = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT, 15222 ZeroCmp, FPZero, Est); 15223 AddToWorklist(Est.getNode()); 15224 } 15225 } 15226 return Est; 15227 } 15228 15229 return SDValue(); 15230 } 15231 15232 SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 15233 return buildSqrtEstimateImpl(Op, Flags, true); 15234 } 15235 15236 SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 15237 return buildSqrtEstimateImpl(Op, Flags, false); 15238 } 15239 15240 /// Return true if base is a frame index, which is known not to alias with 15241 /// anything but itself. Provides base object and offset as results. 15242 static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 15243 const GlobalValue *&GV, const void *&CV) { 15244 // Assume it is a primitive operation. 15245 Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; 15246 15247 // If it's an adding a simple constant then integrate the offset. 15248 if (Base.getOpcode() == ISD::ADD) { 15249 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 15250 Base = Base.getOperand(0); 15251 Offset += C->getZExtValue(); 15252 } 15253 } 15254 15255 // Return the underlying GlobalValue, and update the Offset. Return false 15256 // for GlobalAddressSDNode since the same GlobalAddress may be represented 15257 // by multiple nodes with different offsets. 15258 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 15259 GV = G->getGlobal(); 15260 Offset += G->getOffset(); 15261 return false; 15262 } 15263 15264 // Return the underlying Constant value, and update the Offset. Return false 15265 // for ConstantSDNodes since the same constant pool entry may be represented 15266 // by multiple nodes with different offsets. 15267 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 15268 CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() 15269 : (const void *)C->getConstVal(); 15270 Offset += C->getOffset(); 15271 return false; 15272 } 15273 // If it's any of the following then it can't alias with anything but itself. 15274 return isa<FrameIndexSDNode>(Base); 15275 } 15276 15277 /// Return true if there is any possibility that the two addresses overlap. 15278 bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { 15279 // If they are the same then they must be aliases. 15280 if (Op0->getBasePtr() == Op1->getBasePtr()) return true; 15281 15282 // If they are both volatile then they cannot be reordered. 15283 if (Op0->isVolatile() && Op1->isVolatile()) return true; 15284 15285 // If one operation reads from invariant memory, and the other may store, they 15286 // cannot alias. These should really be checking the equivalent of mayWrite, 15287 // but it only matters for memory nodes other than load /store. 15288 if (Op0->isInvariant() && Op1->writeMem()) 15289 return false; 15290 15291 if (Op1->isInvariant() && Op0->writeMem()) 15292 return false; 15293 15294 // Gather base node and offset information. 15295 SDValue Base1, Base2; 15296 int64_t Offset1, Offset2; 15297 const GlobalValue *GV1, *GV2; 15298 const void *CV1, *CV2; 15299 bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), 15300 Base1, Offset1, GV1, CV1); 15301 bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), 15302 Base2, Offset2, GV2, CV2); 15303 15304 // If they have a same base address then check to see if they overlap. 15305 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 15306 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 15307 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 15308 15309 // It is possible for different frame indices to alias each other, mostly 15310 // when tail call optimization reuses return address slots for arguments. 15311 // To catch this case, look up the actual index of frame indices to compute 15312 // the real alias relationship. 15313 if (isFrameIndex1 && isFrameIndex2) { 15314 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 15315 Offset1 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 15316 Offset2 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 15317 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 15318 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 15319 } 15320 15321 // Otherwise, if we know what the bases are, and they aren't identical, then 15322 // we know they cannot alias. 15323 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 15324 return false; 15325 15326 // If we know required SrcValue1 and SrcValue2 have relatively large alignment 15327 // compared to the size and offset of the access, we may be able to prove they 15328 // do not alias. This check is conservative for now to catch cases created by 15329 // splitting vector types. 15330 if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) && 15331 (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) && 15332 (Op0->getMemoryVT().getSizeInBits() >> 3 == 15333 Op1->getMemoryVT().getSizeInBits() >> 3) && 15334 (Op0->getOriginalAlignment() > (Op0->getMemoryVT().getSizeInBits() >> 3))) { 15335 int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment(); 15336 int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment(); 15337 15338 // There is no overlap between these relatively aligned accesses of similar 15339 // size, return no alias. 15340 if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 || 15341 (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1) 15342 return false; 15343 } 15344 15345 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 15346 ? CombinerGlobalAA 15347 : DAG.getSubtarget().useAA(); 15348 #ifndef NDEBUG 15349 if (CombinerAAOnlyFunc.getNumOccurrences() && 15350 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 15351 UseAA = false; 15352 #endif 15353 if (UseAA && 15354 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) { 15355 // Use alias analysis information. 15356 int64_t MinOffset = std::min(Op0->getSrcValueOffset(), 15357 Op1->getSrcValueOffset()); 15358 int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) + 15359 Op0->getSrcValueOffset() - MinOffset; 15360 int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) + 15361 Op1->getSrcValueOffset() - MinOffset; 15362 AliasResult AAResult = 15363 AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1, 15364 UseTBAA ? Op0->getAAInfo() : AAMDNodes()), 15365 MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2, 15366 UseTBAA ? Op1->getAAInfo() : AAMDNodes())); 15367 if (AAResult == NoAlias) 15368 return false; 15369 } 15370 15371 // Otherwise we have to assume they alias. 15372 return true; 15373 } 15374 15375 /// Walk up chain skipping non-aliasing memory nodes, 15376 /// looking for aliasing nodes and adding them to the Aliases vector. 15377 void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 15378 SmallVectorImpl<SDValue> &Aliases) { 15379 SmallVector<SDValue, 8> Chains; // List of chains to visit. 15380 SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 15381 15382 // Get alias information for node. 15383 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile(); 15384 15385 // Starting off. 15386 Chains.push_back(OriginalChain); 15387 unsigned Depth = 0; 15388 15389 // Look at each chain and determine if it is an alias. If so, add it to the 15390 // aliases list. If not, then continue up the chain looking for the next 15391 // candidate. 15392 while (!Chains.empty()) { 15393 SDValue Chain = Chains.pop_back_val(); 15394 15395 // For TokenFactor nodes, look at each operand and only continue up the 15396 // chain until we reach the depth limit. 15397 // 15398 // FIXME: The depth check could be made to return the last non-aliasing 15399 // chain we found before we hit a tokenfactor rather than the original 15400 // chain. 15401 if (Depth > TLI.getGatherAllAliasesMaxDepth()) { 15402 Aliases.clear(); 15403 Aliases.push_back(OriginalChain); 15404 return; 15405 } 15406 15407 // Don't bother if we've been before. 15408 if (!Visited.insert(Chain.getNode()).second) 15409 continue; 15410 15411 switch (Chain.getOpcode()) { 15412 case ISD::EntryToken: 15413 // Entry token is ideal chain operand, but handled in FindBetterChain. 15414 break; 15415 15416 case ISD::LOAD: 15417 case ISD::STORE: { 15418 // Get alias information for Chain. 15419 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) && 15420 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile(); 15421 15422 // If chain is alias then stop here. 15423 if (!(IsLoad && IsOpLoad) && 15424 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) { 15425 Aliases.push_back(Chain); 15426 } else { 15427 // Look further up the chain. 15428 Chains.push_back(Chain.getOperand(0)); 15429 ++Depth; 15430 } 15431 break; 15432 } 15433 15434 case ISD::TokenFactor: 15435 // We have to check each of the operands of the token factor for "small" 15436 // token factors, so we queue them up. Adding the operands to the queue 15437 // (stack) in reverse order maintains the original order and increases the 15438 // likelihood that getNode will find a matching token factor (CSE.) 15439 if (Chain.getNumOperands() > 16) { 15440 Aliases.push_back(Chain); 15441 break; 15442 } 15443 for (unsigned n = Chain.getNumOperands(); n;) 15444 Chains.push_back(Chain.getOperand(--n)); 15445 ++Depth; 15446 break; 15447 15448 default: 15449 // For all other instructions we will just have to take what we can get. 15450 Aliases.push_back(Chain); 15451 break; 15452 } 15453 } 15454 } 15455 15456 /// Walk up chain skipping non-aliasing memory nodes, looking for a better chain 15457 /// (aliasing node.) 15458 SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 15459 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 15460 15461 // Accumulate all the aliases to this node. 15462 GatherAllAliases(N, OldChain, Aliases); 15463 15464 // If no operands then chain to entry token. 15465 if (Aliases.size() == 0) 15466 return DAG.getEntryNode(); 15467 15468 // If a single operand then chain to it. We don't need to revisit it. 15469 if (Aliases.size() == 1) 15470 return Aliases[0]; 15471 15472 // Construct a custom tailored token factor. 15473 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); 15474 } 15475 15476 bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { 15477 // This holds the base pointer, index, and the offset in bytes from the base 15478 // pointer. 15479 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 15480 15481 // We must have a base and an offset. 15482 if (!BasePtr.Base.getNode()) 15483 return false; 15484 15485 // Do not handle stores to undef base pointers. 15486 if (BasePtr.Base.isUndef()) 15487 return false; 15488 15489 SmallVector<StoreSDNode *, 8> ChainedStores; 15490 ChainedStores.push_back(St); 15491 15492 // Walk up the chain and look for nodes with offsets from the same 15493 // base pointer. Stop when reaching an instruction with a different kind 15494 // or instruction which has a different base pointer. 15495 StoreSDNode *Index = St; 15496 while (Index) { 15497 // If the chain has more than one use, then we can't reorder the mem ops. 15498 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 15499 break; 15500 15501 if (Index->isVolatile() || Index->isIndexed()) 15502 break; 15503 15504 // Find the base pointer and offset for this memory node. 15505 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); 15506 15507 // Check that the base pointer is the same as the original one. 15508 if (!Ptr.equalBaseIndex(BasePtr)) 15509 break; 15510 15511 // Find the next memory operand in the chain. If the next operand in the 15512 // chain is a store then move up and continue the scan with the next 15513 // memory operand. If the next operand is a load save it and use alias 15514 // information to check if it interferes with anything. 15515 SDNode *NextInChain = Index->getChain().getNode(); 15516 while (true) { 15517 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 15518 // We found a store node. Use it for the next iteration. 15519 if (STn->isVolatile() || STn->isIndexed()) { 15520 Index = nullptr; 15521 break; 15522 } 15523 ChainedStores.push_back(STn); 15524 Index = STn; 15525 break; 15526 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 15527 NextInChain = Ldn->getChain().getNode(); 15528 continue; 15529 } else { 15530 Index = nullptr; 15531 break; 15532 } 15533 } 15534 } 15535 15536 bool MadeChangeToSt = false; 15537 SmallVector<std::pair<StoreSDNode *, SDValue>, 8> BetterChains; 15538 15539 for (StoreSDNode *ChainedStore : ChainedStores) { 15540 SDValue Chain = ChainedStore->getChain(); 15541 SDValue BetterChain = FindBetterChain(ChainedStore, Chain); 15542 15543 if (Chain != BetterChain) { 15544 if (ChainedStore == St) 15545 MadeChangeToSt = true; 15546 BetterChains.push_back(std::make_pair(ChainedStore, BetterChain)); 15547 } 15548 } 15549 15550 // Do all replacements after finding the replacements to make to avoid making 15551 // the chains more complicated by introducing new TokenFactors. 15552 for (auto Replacement : BetterChains) 15553 replaceStoreChain(Replacement.first, Replacement.second); 15554 15555 return MadeChangeToSt; 15556 } 15557 15558 /// This is the entry point for the file. 15559 void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 15560 CodeGenOpt::Level OptLevel) { 15561 /// This is the main entry point to this class. 15562 DAGCombiner(*this, AA, OptLevel).Run(Level); 15563 } 15564