1 //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11 // both before and after the DAG is legalized. 12 // 13 // This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14 // primarily intended to handle simplification opportunities that are implicit 15 // in the LLVM IR and exposed by the various codegen lowering phases. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallBitVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Target/TargetLowering.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 #define DEBUG_TYPE "dagcombine" 46 47 STATISTIC(NodesCombined , "Number of dag nodes combined"); 48 STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 49 STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 50 STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 51 STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 52 STATISTIC(SlicedLoads, "Number of load sliced"); 53 54 namespace { 55 static cl::opt<bool> 56 CombinerAA("combiner-alias-analysis", cl::Hidden, 57 cl::desc("Enable DAG combiner alias-analysis heuristics")); 58 59 static cl::opt<bool> 60 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 61 cl::desc("Enable DAG combiner's use of IR alias analysis")); 62 63 static cl::opt<bool> 64 UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), 65 cl::desc("Enable DAG combiner's use of TBAA")); 66 67 #ifndef NDEBUG 68 static cl::opt<std::string> 69 CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, 70 cl::desc("Only use DAG-combiner alias analysis in this" 71 " function")); 72 #endif 73 74 /// Hidden option to stress test load slicing, i.e., when this option 75 /// is enabled, load slicing bypasses most of its profitability guards. 76 static cl::opt<bool> 77 StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, 78 cl::desc("Bypass the profitability model of load " 79 "slicing"), 80 cl::init(false)); 81 82 static cl::opt<bool> 83 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true), 84 cl::desc("DAG combiner may split indexing from loads")); 85 86 //------------------------------ DAGCombiner ---------------------------------// 87 88 class DAGCombiner { 89 SelectionDAG &DAG; 90 const TargetLowering &TLI; 91 CombineLevel Level; 92 CodeGenOpt::Level OptLevel; 93 bool LegalOperations; 94 bool LegalTypes; 95 bool ForCodeSize; 96 97 /// \brief Worklist of all of the nodes that need to be simplified. 98 /// 99 /// This must behave as a stack -- new nodes to process are pushed onto the 100 /// back and when processing we pop off of the back. 101 /// 102 /// The worklist will not contain duplicates but may contain null entries 103 /// due to nodes being deleted from the underlying DAG. 104 SmallVector<SDNode *, 64> Worklist; 105 106 /// \brief Mapping from an SDNode to its position on the worklist. 107 /// 108 /// This is used to find and remove nodes from the worklist (by nulling 109 /// them) when they are deleted from the underlying DAG. It relies on 110 /// stable indices of nodes within the worklist. 111 DenseMap<SDNode *, unsigned> WorklistMap; 112 113 /// \brief Set of nodes which have been combined (at least once). 114 /// 115 /// This is used to allow us to reliably add any operands of a DAG node 116 /// which have not yet been combined to the worklist. 117 SmallPtrSet<SDNode *, 32> CombinedNodes; 118 119 // AA - Used for DAG load/store alias analysis. 120 AliasAnalysis &AA; 121 122 /// When an instruction is simplified, add all users of the instruction to 123 /// the work lists because they might get more simplified now. 124 void AddUsersToWorklist(SDNode *N) { 125 for (SDNode *Node : N->uses()) 126 AddToWorklist(Node); 127 } 128 129 /// Call the node-specific routine that folds each particular type of node. 130 SDValue visit(SDNode *N); 131 132 public: 133 /// Add to the worklist making sure its instance is at the back (next to be 134 /// processed.) 135 void AddToWorklist(SDNode *N) { 136 // Skip handle nodes as they can't usefully be combined and confuse the 137 // zero-use deletion strategy. 138 if (N->getOpcode() == ISD::HANDLENODE) 139 return; 140 141 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second) 142 Worklist.push_back(N); 143 } 144 145 /// Remove all instances of N from the worklist. 146 void removeFromWorklist(SDNode *N) { 147 CombinedNodes.erase(N); 148 149 auto It = WorklistMap.find(N); 150 if (It == WorklistMap.end()) 151 return; // Not in the worklist. 152 153 // Null out the entry rather than erasing it to avoid a linear operation. 154 Worklist[It->second] = nullptr; 155 WorklistMap.erase(It); 156 } 157 158 void deleteAndRecombine(SDNode *N); 159 bool recursivelyDeleteUnusedNodes(SDNode *N); 160 161 /// Replaces all uses of the results of one DAG node with new values. 162 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 163 bool AddTo = true); 164 165 /// Replaces all uses of the results of one DAG node with new values. 166 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 167 return CombineTo(N, &Res, 1, AddTo); 168 } 169 170 /// Replaces all uses of the results of one DAG node with new values. 171 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 172 bool AddTo = true) { 173 SDValue To[] = { Res0, Res1 }; 174 return CombineTo(N, To, 2, AddTo); 175 } 176 177 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 178 179 private: 180 181 /// Check the specified integer node value to see if it can be simplified or 182 /// if things it uses can be simplified by bit propagation. 183 /// If so, return true. 184 bool SimplifyDemandedBits(SDValue Op) { 185 unsigned BitWidth = Op.getScalarValueSizeInBits(); 186 APInt Demanded = APInt::getAllOnesValue(BitWidth); 187 return SimplifyDemandedBits(Op, Demanded); 188 } 189 190 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 191 192 bool CombineToPreIndexedLoadStore(SDNode *N); 193 bool CombineToPostIndexedLoadStore(SDNode *N); 194 SDValue SplitIndexingFromLoad(LoadSDNode *LD); 195 bool SliceUpLoad(SDNode *N); 196 197 /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed 198 /// load. 199 /// 200 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. 201 /// \param InVecVT type of the input vector to EVE with bitcasts resolved. 202 /// \param EltNo index of the vector element to load. 203 /// \param OriginalLoad load that EVE came from to be replaced. 204 /// \returns EVE on success SDValue() on failure. 205 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 206 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad); 207 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 208 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 209 SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 210 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 211 SDValue PromoteIntBinOp(SDValue Op); 212 SDValue PromoteIntShiftOp(SDValue Op); 213 SDValue PromoteExtend(SDValue Op); 214 bool PromoteLoad(SDValue Op); 215 216 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, SDValue Trunc, 217 SDValue ExtLoad, const SDLoc &DL, 218 ISD::NodeType ExtType); 219 220 /// Call the node-specific routine that knows how to fold each 221 /// particular type of node. If that doesn't do anything, try the 222 /// target-specific DAG combines. 223 SDValue combine(SDNode *N); 224 225 // Visitation implementation - Implement dag node combining for different 226 // node types. The semantics are as follows: 227 // Return Value: 228 // SDValue.getNode() == 0 - No change was made 229 // SDValue.getNode() == N - N was replaced, is dead and has been handled. 230 // otherwise - N should be replaced by the returned Operand. 231 // 232 SDValue visitTokenFactor(SDNode *N); 233 SDValue visitMERGE_VALUES(SDNode *N); 234 SDValue visitADD(SDNode *N); 235 SDValue visitSUB(SDNode *N); 236 SDValue visitADDC(SDNode *N); 237 SDValue visitSUBC(SDNode *N); 238 SDValue visitADDE(SDNode *N); 239 SDValue visitSUBE(SDNode *N); 240 SDValue visitMUL(SDNode *N); 241 SDValue useDivRem(SDNode *N); 242 SDValue visitSDIV(SDNode *N); 243 SDValue visitUDIV(SDNode *N); 244 SDValue visitREM(SDNode *N); 245 SDValue visitMULHU(SDNode *N); 246 SDValue visitMULHS(SDNode *N); 247 SDValue visitSMUL_LOHI(SDNode *N); 248 SDValue visitUMUL_LOHI(SDNode *N); 249 SDValue visitSMULO(SDNode *N); 250 SDValue visitUMULO(SDNode *N); 251 SDValue visitIMINMAX(SDNode *N); 252 SDValue visitAND(SDNode *N); 253 SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *LocReference); 254 SDValue visitOR(SDNode *N); 255 SDValue visitORLike(SDValue N0, SDValue N1, SDNode *LocReference); 256 SDValue visitXOR(SDNode *N); 257 SDValue SimplifyVBinOp(SDNode *N); 258 SDValue visitSHL(SDNode *N); 259 SDValue visitSRA(SDNode *N); 260 SDValue visitSRL(SDNode *N); 261 SDValue visitRotate(SDNode *N); 262 SDValue visitBSWAP(SDNode *N); 263 SDValue visitBITREVERSE(SDNode *N); 264 SDValue visitCTLZ(SDNode *N); 265 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 266 SDValue visitCTTZ(SDNode *N); 267 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 268 SDValue visitCTPOP(SDNode *N); 269 SDValue visitSELECT(SDNode *N); 270 SDValue visitVSELECT(SDNode *N); 271 SDValue visitSELECT_CC(SDNode *N); 272 SDValue visitSETCC(SDNode *N); 273 SDValue visitSETCCE(SDNode *N); 274 SDValue visitSIGN_EXTEND(SDNode *N); 275 SDValue visitZERO_EXTEND(SDNode *N); 276 SDValue visitANY_EXTEND(SDNode *N); 277 SDValue visitSIGN_EXTEND_INREG(SDNode *N); 278 SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N); 279 SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N); 280 SDValue visitTRUNCATE(SDNode *N); 281 SDValue visitBITCAST(SDNode *N); 282 SDValue visitBUILD_PAIR(SDNode *N); 283 SDValue visitFADD(SDNode *N); 284 SDValue visitFSUB(SDNode *N); 285 SDValue visitFMUL(SDNode *N); 286 SDValue visitFMA(SDNode *N); 287 SDValue visitFDIV(SDNode *N); 288 SDValue visitFREM(SDNode *N); 289 SDValue visitFSQRT(SDNode *N); 290 SDValue visitFCOPYSIGN(SDNode *N); 291 SDValue visitSINT_TO_FP(SDNode *N); 292 SDValue visitUINT_TO_FP(SDNode *N); 293 SDValue visitFP_TO_SINT(SDNode *N); 294 SDValue visitFP_TO_UINT(SDNode *N); 295 SDValue visitFP_ROUND(SDNode *N); 296 SDValue visitFP_ROUND_INREG(SDNode *N); 297 SDValue visitFP_EXTEND(SDNode *N); 298 SDValue visitFNEG(SDNode *N); 299 SDValue visitFABS(SDNode *N); 300 SDValue visitFCEIL(SDNode *N); 301 SDValue visitFTRUNC(SDNode *N); 302 SDValue visitFFLOOR(SDNode *N); 303 SDValue visitFMINNUM(SDNode *N); 304 SDValue visitFMAXNUM(SDNode *N); 305 SDValue visitBRCOND(SDNode *N); 306 SDValue visitBR_CC(SDNode *N); 307 SDValue visitLOAD(SDNode *N); 308 309 SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain); 310 SDValue replaceStoreOfFPConstant(StoreSDNode *ST); 311 312 SDValue visitSTORE(SDNode *N); 313 SDValue visitINSERT_VECTOR_ELT(SDNode *N); 314 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 315 SDValue visitBUILD_VECTOR(SDNode *N); 316 SDValue visitCONCAT_VECTORS(SDNode *N); 317 SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 318 SDValue visitVECTOR_SHUFFLE(SDNode *N); 319 SDValue visitSCALAR_TO_VECTOR(SDNode *N); 320 SDValue visitINSERT_SUBVECTOR(SDNode *N); 321 SDValue visitMLOAD(SDNode *N); 322 SDValue visitMSTORE(SDNode *N); 323 SDValue visitMGATHER(SDNode *N); 324 SDValue visitMSCATTER(SDNode *N); 325 SDValue visitFP_TO_FP16(SDNode *N); 326 SDValue visitFP16_TO_FP(SDNode *N); 327 328 SDValue visitFADDForFMACombine(SDNode *N); 329 SDValue visitFSUBForFMACombine(SDNode *N); 330 SDValue visitFMULForFMADistributiveCombine(SDNode *N); 331 332 SDValue XformToShuffleWithZero(SDNode *N); 333 SDValue ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue LHS, 334 SDValue RHS); 335 336 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt); 337 338 SDValue foldSelectOfConstants(SDNode *N); 339 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 340 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 341 SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2); 342 SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 343 SDValue N2, SDValue N3, ISD::CondCode CC, 344 bool NotExtCompare = false); 345 SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1, 346 SDValue N2, SDValue N3, ISD::CondCode CC); 347 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 348 const SDLoc &DL, bool foldBooleans = true); 349 350 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 351 SDValue &CC) const; 352 bool isOneUseSetCC(SDValue N) const; 353 354 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 355 unsigned HiOp); 356 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 357 SDValue CombineExtLoad(SDNode *N); 358 SDValue combineRepeatedFPDivisors(SDNode *N); 359 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 360 SDValue BuildSDIV(SDNode *N); 361 SDValue BuildSDIVPow2(SDNode *N); 362 SDValue BuildUDIV(SDNode *N); 363 SDValue BuildLogBase2(SDValue Op, const SDLoc &DL); 364 SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags); 365 SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags); 366 SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags); 367 SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, bool Recip); 368 SDValue buildSqrtNROneConst(SDValue Op, SDValue Est, unsigned Iterations, 369 SDNodeFlags *Flags, bool Reciprocal); 370 SDValue buildSqrtNRTwoConst(SDValue Op, SDValue Est, unsigned Iterations, 371 SDNodeFlags *Flags, bool Reciprocal); 372 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 373 bool DemandHighBits = true); 374 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 375 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg, 376 SDValue InnerPos, SDValue InnerNeg, 377 unsigned PosOpcode, unsigned NegOpcode, 378 const SDLoc &DL); 379 SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL); 380 SDValue MatchLoadCombine(SDNode *N); 381 SDValue ReduceLoadWidth(SDNode *N); 382 SDValue ReduceLoadOpStoreWidth(SDNode *N); 383 SDValue splitMergedValStore(StoreSDNode *ST); 384 SDValue TransformFPLoadStorePair(SDNode *N); 385 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); 386 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); 387 SDValue reduceBuildVecToShuffle(SDNode *N); 388 SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N, 389 ArrayRef<int> VectorMask, SDValue VecIn1, 390 SDValue VecIn2, unsigned LeftIdx); 391 392 SDValue GetDemandedBits(SDValue V, const APInt &Mask); 393 394 /// Walk up chain skipping non-aliasing memory nodes, 395 /// looking for aliasing nodes and adding them to the Aliases vector. 396 void GatherAllAliases(SDNode *N, SDValue OriginalChain, 397 SmallVectorImpl<SDValue> &Aliases); 398 399 /// Return true if there is any possibility that the two addresses overlap. 400 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const; 401 402 /// Walk up chain skipping non-aliasing memory nodes, looking for a better 403 /// chain (aliasing node.) 404 SDValue FindBetterChain(SDNode *N, SDValue Chain); 405 406 /// Try to replace a store and any possibly adjacent stores on 407 /// consecutive chains with better chains. Return true only if St is 408 /// replaced. 409 /// 410 /// Notice that other chains may still be replaced even if the function 411 /// returns false. 412 bool findBetterNeighborChains(StoreSDNode *St); 413 414 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 415 bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask); 416 417 /// Holds a pointer to an LSBaseSDNode as well as information on where it 418 /// is located in a sequence of memory operations connected by a chain. 419 struct MemOpLink { 420 MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): 421 MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } 422 // Ptr to the mem node. 423 LSBaseSDNode *MemNode; 424 // Offset from the base ptr. 425 int64_t OffsetFromBase; 426 // What is the sequence number of this mem node. 427 // Lowest mem operand in the DAG starts at zero. 428 unsigned SequenceNum; 429 }; 430 431 /// This is a helper function for visitMUL to check the profitability 432 /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 433 /// MulNode is the original multiply, AddNode is (add x, c1), 434 /// and ConstNode is c2. 435 bool isMulAddWithConstProfitable(SDNode *MulNode, 436 SDValue &AddNode, 437 SDValue &ConstNode); 438 439 /// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a 440 /// constant build_vector of the stored constant values in Stores. 441 SDValue getMergedConstantVectorStore(SelectionDAG &DAG, const SDLoc &SL, 442 ArrayRef<MemOpLink> Stores, 443 SmallVectorImpl<SDValue> &Chains, 444 EVT Ty) const; 445 446 /// This is a helper function for visitAND and visitZERO_EXTEND. Returns 447 /// true if the (and (load x) c) pattern matches an extload. ExtVT returns 448 /// the type of the loaded value to be extended. LoadedVT returns the type 449 /// of the original loaded value. NarrowLoad returns whether the load would 450 /// need to be narrowed in order to match. 451 bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 452 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 453 bool &NarrowLoad); 454 455 /// This is a helper function for MergeConsecutiveStores. When the source 456 /// elements of the consecutive stores are all constants or all extracted 457 /// vector elements, try to merge them into one larger store. 458 /// \return number of stores that were merged into a merged store (always 459 /// a prefix of \p StoreNode). 460 bool MergeStoresOfConstantsOrVecElts( 461 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores, 462 bool IsConstantSrc, bool UseVector); 463 464 /// This is a helper function for MergeConsecutiveStores. 465 /// Stores that may be merged are placed in StoreNodes. 466 /// Loads that may alias with those stores are placed in AliasLoadNodes. 467 void getStoreMergeAndAliasCandidates( 468 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, 469 SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes); 470 471 /// Helper function for MergeConsecutiveStores. Checks if 472 /// Candidate stores have indirect dependency through their 473 /// operands. \return True if safe to merge 474 bool checkMergeStoreCandidatesForDependencies( 475 SmallVectorImpl<MemOpLink> &StoreNodes); 476 477 /// Merge consecutive store operations into a wide store. 478 /// This optimization uses wide integers or vectors when possible. 479 /// \return number of stores that were merged into a merged store (the 480 /// affected nodes are stored as a prefix in \p StoreNodes). 481 bool MergeConsecutiveStores(StoreSDNode *N, 482 SmallVectorImpl<MemOpLink> &StoreNodes); 483 484 /// \brief Try to transform a truncation where C is a constant: 485 /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) 486 /// 487 /// \p N needs to be a truncation and its first operand an AND. Other 488 /// requirements are checked by the function (e.g. that trunc is 489 /// single-use) and if missed an empty SDValue is returned. 490 SDValue distributeTruncateThroughAnd(SDNode *N); 491 492 public: 493 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 494 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 495 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { 496 ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize(); 497 } 498 499 /// Runs the dag combiner on all nodes in the work list 500 void Run(CombineLevel AtLevel); 501 502 SelectionDAG &getDAG() const { return DAG; } 503 504 /// Returns a type large enough to hold any valid shift amount - before type 505 /// legalization these can be huge. 506 EVT getShiftAmountTy(EVT LHSTy) { 507 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 508 if (LHSTy.isVector()) 509 return LHSTy; 510 auto &DL = DAG.getDataLayout(); 511 return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy) 512 : TLI.getPointerTy(DL); 513 } 514 515 /// This method returns true if we are running before type legalization or 516 /// if the specified VT is legal. 517 bool isTypeLegal(const EVT &VT) { 518 if (!LegalTypes) return true; 519 return TLI.isTypeLegal(VT); 520 } 521 522 /// Convenience wrapper around TargetLowering::getSetCCResultType 523 EVT getSetCCResultType(EVT VT) const { 524 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 525 } 526 }; 527 } 528 529 530 namespace { 531 /// This class is a DAGUpdateListener that removes any deleted 532 /// nodes from the worklist. 533 class WorklistRemover : public SelectionDAG::DAGUpdateListener { 534 DAGCombiner &DC; 535 public: 536 explicit WorklistRemover(DAGCombiner &dc) 537 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 538 539 void NodeDeleted(SDNode *N, SDNode *E) override { 540 DC.removeFromWorklist(N); 541 } 542 }; 543 } 544 545 //===----------------------------------------------------------------------===// 546 // TargetLowering::DAGCombinerInfo implementation 547 //===----------------------------------------------------------------------===// 548 549 void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 550 ((DAGCombiner*)DC)->AddToWorklist(N); 551 } 552 553 SDValue TargetLowering::DAGCombinerInfo:: 554 CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) { 555 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 556 } 557 558 SDValue TargetLowering::DAGCombinerInfo:: 559 CombineTo(SDNode *N, SDValue Res, bool AddTo) { 560 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 561 } 562 563 564 SDValue TargetLowering::DAGCombinerInfo:: 565 CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 566 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 567 } 568 569 void TargetLowering::DAGCombinerInfo:: 570 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 571 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 572 } 573 574 //===----------------------------------------------------------------------===// 575 // Helper Functions 576 //===----------------------------------------------------------------------===// 577 578 void DAGCombiner::deleteAndRecombine(SDNode *N) { 579 removeFromWorklist(N); 580 581 // If the operands of this node are only used by the node, they will now be 582 // dead. Make sure to re-visit them and recursively delete dead nodes. 583 for (const SDValue &Op : N->ops()) 584 // For an operand generating multiple values, one of the values may 585 // become dead allowing further simplification (e.g. split index 586 // arithmetic from an indexed load). 587 if (Op->hasOneUse() || Op->getNumValues() > 1) 588 AddToWorklist(Op.getNode()); 589 590 DAG.DeleteNode(N); 591 } 592 593 /// Return 1 if we can compute the negated form of the specified expression for 594 /// the same cost as the expression itself, or 2 if we can compute the negated 595 /// form more cheaply than the expression itself. 596 static char isNegatibleForFree(SDValue Op, bool LegalOperations, 597 const TargetLowering &TLI, 598 const TargetOptions *Options, 599 unsigned Depth = 0) { 600 // fneg is removable even if it has multiple uses. 601 if (Op.getOpcode() == ISD::FNEG) return 2; 602 603 // Don't allow anything with multiple uses. 604 if (!Op.hasOneUse()) return 0; 605 606 // Don't recurse exponentially. 607 if (Depth > 6) return 0; 608 609 switch (Op.getOpcode()) { 610 default: return false; 611 case ISD::ConstantFP: { 612 if (!LegalOperations) 613 return 1; 614 615 // Don't invert constant FP values after legalization unless the target says 616 // the negated constant is legal. 617 EVT VT = Op.getValueType(); 618 return TLI.isOperationLegal(ISD::ConstantFP, VT) || 619 TLI.isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT); 620 } 621 case ISD::FADD: 622 // FIXME: determine better conditions for this xform. 623 if (!Options->UnsafeFPMath) return 0; 624 625 // After operation legalization, it might not be legal to create new FSUBs. 626 if (LegalOperations && 627 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 628 return 0; 629 630 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 631 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 632 Options, Depth + 1)) 633 return V; 634 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 635 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 636 Depth + 1); 637 case ISD::FSUB: 638 // We can't turn -(A-B) into B-A when we honor signed zeros. 639 if (!Options->NoSignedZerosFPMath && 640 !Op.getNode()->getFlags()->hasNoSignedZeros()) 641 return 0; 642 643 // fold (fneg (fsub A, B)) -> (fsub B, A) 644 return 1; 645 646 case ISD::FMUL: 647 case ISD::FDIV: 648 if (Options->HonorSignDependentRoundingFPMath()) return 0; 649 650 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 651 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 652 Options, Depth + 1)) 653 return V; 654 655 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 656 Depth + 1); 657 658 case ISD::FP_EXTEND: 659 case ISD::FP_ROUND: 660 case ISD::FSIN: 661 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 662 Depth + 1); 663 } 664 } 665 666 /// If isNegatibleForFree returns true, return the newly negated expression. 667 static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 668 bool LegalOperations, unsigned Depth = 0) { 669 const TargetOptions &Options = DAG.getTarget().Options; 670 // fneg is removable even if it has multiple uses. 671 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 672 673 // Don't allow anything with multiple uses. 674 assert(Op.hasOneUse() && "Unknown reuse!"); 675 676 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 677 678 const SDNodeFlags *Flags = Op.getNode()->getFlags(); 679 680 switch (Op.getOpcode()) { 681 default: llvm_unreachable("Unknown code"); 682 case ISD::ConstantFP: { 683 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 684 V.changeSign(); 685 return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType()); 686 } 687 case ISD::FADD: 688 // FIXME: determine better conditions for this xform. 689 assert(Options.UnsafeFPMath); 690 691 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 692 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 693 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 694 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 695 GetNegatedExpression(Op.getOperand(0), DAG, 696 LegalOperations, Depth+1), 697 Op.getOperand(1), Flags); 698 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 699 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 700 GetNegatedExpression(Op.getOperand(1), DAG, 701 LegalOperations, Depth+1), 702 Op.getOperand(0), Flags); 703 case ISD::FSUB: 704 // fold (fneg (fsub 0, B)) -> B 705 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 706 if (N0CFP->isZero()) 707 return Op.getOperand(1); 708 709 // fold (fneg (fsub A, B)) -> (fsub B, A) 710 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 711 Op.getOperand(1), Op.getOperand(0), Flags); 712 713 case ISD::FMUL: 714 case ISD::FDIV: 715 assert(!Options.HonorSignDependentRoundingFPMath()); 716 717 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 718 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 719 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 720 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 721 GetNegatedExpression(Op.getOperand(0), DAG, 722 LegalOperations, Depth+1), 723 Op.getOperand(1), Flags); 724 725 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 726 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 727 Op.getOperand(0), 728 GetNegatedExpression(Op.getOperand(1), DAG, 729 LegalOperations, Depth+1), Flags); 730 731 case ISD::FP_EXTEND: 732 case ISD::FSIN: 733 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 734 GetNegatedExpression(Op.getOperand(0), DAG, 735 LegalOperations, Depth+1)); 736 case ISD::FP_ROUND: 737 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(), 738 GetNegatedExpression(Op.getOperand(0), DAG, 739 LegalOperations, Depth+1), 740 Op.getOperand(1)); 741 } 742 } 743 744 // APInts must be the same size for most operations, this helper 745 // function zero extends the shorter of the pair so that they match. 746 // We provide an Offset so that we can create bitwidths that won't overflow. 747 static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) { 748 unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth()); 749 LHS = LHS.zextOrSelf(Bits); 750 RHS = RHS.zextOrSelf(Bits); 751 } 752 753 // Return true if this node is a setcc, or is a select_cc 754 // that selects between the target values used for true and false, making it 755 // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to 756 // the appropriate nodes based on the type of node we are checking. This 757 // simplifies life a bit for the callers. 758 bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 759 SDValue &CC) const { 760 if (N.getOpcode() == ISD::SETCC) { 761 LHS = N.getOperand(0); 762 RHS = N.getOperand(1); 763 CC = N.getOperand(2); 764 return true; 765 } 766 767 if (N.getOpcode() != ISD::SELECT_CC || 768 !TLI.isConstTrueVal(N.getOperand(2).getNode()) || 769 !TLI.isConstFalseVal(N.getOperand(3).getNode())) 770 return false; 771 772 if (TLI.getBooleanContents(N.getValueType()) == 773 TargetLowering::UndefinedBooleanContent) 774 return false; 775 776 LHS = N.getOperand(0); 777 RHS = N.getOperand(1); 778 CC = N.getOperand(4); 779 return true; 780 } 781 782 /// Return true if this is a SetCC-equivalent operation with only one use. 783 /// If this is true, it allows the users to invert the operation for free when 784 /// it is profitable to do so. 785 bool DAGCombiner::isOneUseSetCC(SDValue N) const { 786 SDValue N0, N1, N2; 787 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 788 return true; 789 return false; 790 } 791 792 // \brief Returns the SDNode if it is a constant float BuildVector 793 // or constant float. 794 static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) { 795 if (isa<ConstantFPSDNode>(N)) 796 return N.getNode(); 797 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 798 return N.getNode(); 799 return nullptr; 800 } 801 802 // Determines if it is a constant integer or a build vector of constant 803 // integers (and undefs). 804 // Do not permit build vector implicit truncation. 805 static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) { 806 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N)) 807 return !(Const->isOpaque() && NoOpaques); 808 if (N.getOpcode() != ISD::BUILD_VECTOR) 809 return false; 810 unsigned BitWidth = N.getScalarValueSizeInBits(); 811 for (const SDValue &Op : N->op_values()) { 812 if (Op.isUndef()) 813 continue; 814 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op); 815 if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth || 816 (Const->isOpaque() && NoOpaques)) 817 return false; 818 } 819 return true; 820 } 821 822 // Determines if it is a constant null integer or a splatted vector of a 823 // constant null integer (with no undefs). 824 // Build vector implicit truncation is not an issue for null values. 825 static bool isNullConstantOrNullSplatConstant(SDValue N) { 826 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 827 return Splat->isNullValue(); 828 return false; 829 } 830 831 // Determines if it is a constant integer of one or a splatted vector of a 832 // constant integer of one (with no undefs). 833 // Do not permit build vector implicit truncation. 834 static bool isOneConstantOrOneSplatConstant(SDValue N) { 835 unsigned BitWidth = N.getScalarValueSizeInBits(); 836 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 837 return Splat->isOne() && Splat->getAPIntValue().getBitWidth() == BitWidth; 838 return false; 839 } 840 841 // Determines if it is a constant integer of all ones or a splatted vector of a 842 // constant integer of all ones (with no undefs). 843 // Do not permit build vector implicit truncation. 844 static bool isAllOnesConstantOrAllOnesSplatConstant(SDValue N) { 845 unsigned BitWidth = N.getScalarValueSizeInBits(); 846 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 847 return Splat->isAllOnesValue() && 848 Splat->getAPIntValue().getBitWidth() == BitWidth; 849 return false; 850 } 851 852 // Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with 853 // undef's. 854 static bool isAnyConstantBuildVector(const SDNode *N) { 855 return ISD::isBuildVectorOfConstantSDNodes(N) || 856 ISD::isBuildVectorOfConstantFPSDNodes(N); 857 } 858 859 SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0, 860 SDValue N1) { 861 EVT VT = N0.getValueType(); 862 if (N0.getOpcode() == Opc) { 863 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) { 864 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 865 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 866 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, L, R)) 867 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 868 return SDValue(); 869 } 870 if (N0.hasOneUse()) { 871 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one 872 // use 873 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1); 874 if (!OpNode.getNode()) 875 return SDValue(); 876 AddToWorklist(OpNode.getNode()); 877 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 878 } 879 } 880 } 881 882 if (N1.getOpcode() == Opc) { 883 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) { 884 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 885 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 886 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, R, L)) 887 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 888 return SDValue(); 889 } 890 if (N1.hasOneUse()) { 891 // reassoc. (op x, (op y, c1)) -> (op (op x, y), c1) iff x+c1 has one 892 // use 893 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0, N1.getOperand(0)); 894 if (!OpNode.getNode()) 895 return SDValue(); 896 AddToWorklist(OpNode.getNode()); 897 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 898 } 899 } 900 } 901 902 return SDValue(); 903 } 904 905 SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 906 bool AddTo) { 907 assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 908 ++NodesCombined; 909 DEBUG(dbgs() << "\nReplacing.1 "; 910 N->dump(&DAG); 911 dbgs() << "\nWith: "; 912 To[0].getNode()->dump(&DAG); 913 dbgs() << " and " << NumTo-1 << " other values\n"); 914 for (unsigned i = 0, e = NumTo; i != e; ++i) 915 assert((!To[i].getNode() || 916 N->getValueType(i) == To[i].getValueType()) && 917 "Cannot combine value to value of different type!"); 918 919 WorklistRemover DeadNodes(*this); 920 DAG.ReplaceAllUsesWith(N, To); 921 if (AddTo) { 922 // Push the new nodes and any users onto the worklist 923 for (unsigned i = 0, e = NumTo; i != e; ++i) { 924 if (To[i].getNode()) { 925 AddToWorklist(To[i].getNode()); 926 AddUsersToWorklist(To[i].getNode()); 927 } 928 } 929 } 930 931 // Finally, if the node is now dead, remove it from the graph. The node 932 // may not be dead if the replacement process recursively simplified to 933 // something else needing this node. 934 if (N->use_empty()) 935 deleteAndRecombine(N); 936 return SDValue(N, 0); 937 } 938 939 void DAGCombiner:: 940 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 941 // Replace all uses. If any nodes become isomorphic to other nodes and 942 // are deleted, make sure to remove them from our worklist. 943 WorklistRemover DeadNodes(*this); 944 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 945 946 // Push the new node and any (possibly new) users onto the worklist. 947 AddToWorklist(TLO.New.getNode()); 948 AddUsersToWorklist(TLO.New.getNode()); 949 950 // Finally, if the node is now dead, remove it from the graph. The node 951 // may not be dead if the replacement process recursively simplified to 952 // something else needing this node. 953 if (TLO.Old.getNode()->use_empty()) 954 deleteAndRecombine(TLO.Old.getNode()); 955 } 956 957 /// Check the specified integer node value to see if it can be simplified or if 958 /// things it uses can be simplified by bit propagation. If so, return true. 959 bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 960 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 961 APInt KnownZero, KnownOne; 962 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 963 return false; 964 965 // Revisit the node. 966 AddToWorklist(Op.getNode()); 967 968 // Replace the old value with the new one. 969 ++NodesCombined; 970 DEBUG(dbgs() << "\nReplacing.2 "; 971 TLO.Old.getNode()->dump(&DAG); 972 dbgs() << "\nWith: "; 973 TLO.New.getNode()->dump(&DAG); 974 dbgs() << '\n'); 975 976 CommitTargetLoweringOpt(TLO); 977 return true; 978 } 979 980 void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 981 SDLoc DL(Load); 982 EVT VT = Load->getValueType(0); 983 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0)); 984 985 DEBUG(dbgs() << "\nReplacing.9 "; 986 Load->dump(&DAG); 987 dbgs() << "\nWith: "; 988 Trunc.getNode()->dump(&DAG); 989 dbgs() << '\n'); 990 WorklistRemover DeadNodes(*this); 991 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 992 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 993 deleteAndRecombine(Load); 994 AddToWorklist(Trunc.getNode()); 995 } 996 997 SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 998 Replace = false; 999 SDLoc DL(Op); 1000 if (ISD::isUNINDEXEDLoad(Op.getNode())) { 1001 LoadSDNode *LD = cast<LoadSDNode>(Op); 1002 EVT MemVT = LD->getMemoryVT(); 1003 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1004 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 1005 : ISD::EXTLOAD) 1006 : LD->getExtensionType(); 1007 Replace = true; 1008 return DAG.getExtLoad(ExtType, DL, PVT, 1009 LD->getChain(), LD->getBasePtr(), 1010 MemVT, LD->getMemOperand()); 1011 } 1012 1013 unsigned Opc = Op.getOpcode(); 1014 switch (Opc) { 1015 default: break; 1016 case ISD::AssertSext: 1017 return DAG.getNode(ISD::AssertSext, DL, PVT, 1018 SExtPromoteOperand(Op.getOperand(0), PVT), 1019 Op.getOperand(1)); 1020 case ISD::AssertZext: 1021 return DAG.getNode(ISD::AssertZext, DL, PVT, 1022 ZExtPromoteOperand(Op.getOperand(0), PVT), 1023 Op.getOperand(1)); 1024 case ISD::Constant: { 1025 unsigned ExtOpc = 1026 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1027 return DAG.getNode(ExtOpc, DL, PVT, Op); 1028 } 1029 } 1030 1031 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 1032 return SDValue(); 1033 return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op); 1034 } 1035 1036 SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 1037 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 1038 return SDValue(); 1039 EVT OldVT = Op.getValueType(); 1040 SDLoc DL(Op); 1041 bool Replace = false; 1042 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1043 if (!NewOp.getNode()) 1044 return SDValue(); 1045 AddToWorklist(NewOp.getNode()); 1046 1047 if (Replace) 1048 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1049 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp, 1050 DAG.getValueType(OldVT)); 1051 } 1052 1053 SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 1054 EVT OldVT = Op.getValueType(); 1055 SDLoc DL(Op); 1056 bool Replace = false; 1057 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1058 if (!NewOp.getNode()) 1059 return SDValue(); 1060 AddToWorklist(NewOp.getNode()); 1061 1062 if (Replace) 1063 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1064 return DAG.getZeroExtendInReg(NewOp, DL, OldVT); 1065 } 1066 1067 /// Promote the specified integer binary operation if the target indicates it is 1068 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1069 /// i32 since i16 instructions are longer. 1070 SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 1071 if (!LegalOperations) 1072 return SDValue(); 1073 1074 EVT VT = Op.getValueType(); 1075 if (VT.isVector() || !VT.isInteger()) 1076 return SDValue(); 1077 1078 // If operation type is 'undesirable', e.g. i16 on x86, consider 1079 // promoting it. 1080 unsigned Opc = Op.getOpcode(); 1081 if (TLI.isTypeDesirableForOp(Opc, VT)) 1082 return SDValue(); 1083 1084 EVT PVT = VT; 1085 // Consult target whether it is a good idea to promote this operation and 1086 // what's the right type to promote it to. 1087 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1088 assert(PVT != VT && "Don't know what type to promote to!"); 1089 1090 bool Replace0 = false; 1091 SDValue N0 = Op.getOperand(0); 1092 SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 1093 if (!NN0.getNode()) 1094 return SDValue(); 1095 1096 bool Replace1 = false; 1097 SDValue N1 = Op.getOperand(1); 1098 SDValue NN1; 1099 if (N0 == N1) 1100 NN1 = NN0; 1101 else { 1102 NN1 = PromoteOperand(N1, PVT, Replace1); 1103 if (!NN1.getNode()) 1104 return SDValue(); 1105 } 1106 1107 AddToWorklist(NN0.getNode()); 1108 if (NN1.getNode()) 1109 AddToWorklist(NN1.getNode()); 1110 1111 if (Replace0) 1112 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 1113 if (Replace1) 1114 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 1115 1116 DEBUG(dbgs() << "\nPromoting "; 1117 Op.getNode()->dump(&DAG)); 1118 SDLoc DL(Op); 1119 return DAG.getNode(ISD::TRUNCATE, DL, VT, 1120 DAG.getNode(Opc, DL, PVT, NN0, NN1)); 1121 } 1122 return SDValue(); 1123 } 1124 1125 /// Promote the specified integer shift operation if the target indicates it is 1126 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1127 /// i32 since i16 instructions are longer. 1128 SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 1129 if (!LegalOperations) 1130 return SDValue(); 1131 1132 EVT VT = Op.getValueType(); 1133 if (VT.isVector() || !VT.isInteger()) 1134 return SDValue(); 1135 1136 // If operation type is 'undesirable', e.g. i16 on x86, consider 1137 // promoting it. 1138 unsigned Opc = Op.getOpcode(); 1139 if (TLI.isTypeDesirableForOp(Opc, VT)) 1140 return SDValue(); 1141 1142 EVT PVT = VT; 1143 // Consult target whether it is a good idea to promote this operation and 1144 // what's the right type to promote it to. 1145 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1146 assert(PVT != VT && "Don't know what type to promote to!"); 1147 1148 bool Replace = false; 1149 SDValue N0 = Op.getOperand(0); 1150 if (Opc == ISD::SRA) 1151 N0 = SExtPromoteOperand(Op.getOperand(0), PVT); 1152 else if (Opc == ISD::SRL) 1153 N0 = ZExtPromoteOperand(Op.getOperand(0), PVT); 1154 else 1155 N0 = PromoteOperand(N0, PVT, Replace); 1156 if (!N0.getNode()) 1157 return SDValue(); 1158 1159 AddToWorklist(N0.getNode()); 1160 if (Replace) 1161 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 1162 1163 DEBUG(dbgs() << "\nPromoting "; 1164 Op.getNode()->dump(&DAG)); 1165 SDLoc DL(Op); 1166 return DAG.getNode(ISD::TRUNCATE, DL, VT, 1167 DAG.getNode(Opc, DL, PVT, N0, Op.getOperand(1))); 1168 } 1169 return SDValue(); 1170 } 1171 1172 SDValue DAGCombiner::PromoteExtend(SDValue Op) { 1173 if (!LegalOperations) 1174 return SDValue(); 1175 1176 EVT VT = Op.getValueType(); 1177 if (VT.isVector() || !VT.isInteger()) 1178 return SDValue(); 1179 1180 // If operation type is 'undesirable', e.g. i16 on x86, consider 1181 // promoting it. 1182 unsigned Opc = Op.getOpcode(); 1183 if (TLI.isTypeDesirableForOp(Opc, VT)) 1184 return SDValue(); 1185 1186 EVT PVT = VT; 1187 // Consult target whether it is a good idea to promote this operation and 1188 // what's the right type to promote it to. 1189 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1190 assert(PVT != VT && "Don't know what type to promote to!"); 1191 // fold (aext (aext x)) -> (aext x) 1192 // fold (aext (zext x)) -> (zext x) 1193 // fold (aext (sext x)) -> (sext x) 1194 DEBUG(dbgs() << "\nPromoting "; 1195 Op.getNode()->dump(&DAG)); 1196 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0)); 1197 } 1198 return SDValue(); 1199 } 1200 1201 bool DAGCombiner::PromoteLoad(SDValue Op) { 1202 if (!LegalOperations) 1203 return false; 1204 1205 if (!ISD::isUNINDEXEDLoad(Op.getNode())) 1206 return false; 1207 1208 EVT VT = Op.getValueType(); 1209 if (VT.isVector() || !VT.isInteger()) 1210 return false; 1211 1212 // If operation type is 'undesirable', e.g. i16 on x86, consider 1213 // promoting it. 1214 unsigned Opc = Op.getOpcode(); 1215 if (TLI.isTypeDesirableForOp(Opc, VT)) 1216 return false; 1217 1218 EVT PVT = VT; 1219 // Consult target whether it is a good idea to promote this operation and 1220 // what's the right type to promote it to. 1221 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1222 assert(PVT != VT && "Don't know what type to promote to!"); 1223 1224 SDLoc DL(Op); 1225 SDNode *N = Op.getNode(); 1226 LoadSDNode *LD = cast<LoadSDNode>(N); 1227 EVT MemVT = LD->getMemoryVT(); 1228 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1229 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 1230 : ISD::EXTLOAD) 1231 : LD->getExtensionType(); 1232 SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT, 1233 LD->getChain(), LD->getBasePtr(), 1234 MemVT, LD->getMemOperand()); 1235 SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD); 1236 1237 DEBUG(dbgs() << "\nPromoting "; 1238 N->dump(&DAG); 1239 dbgs() << "\nTo: "; 1240 Result.getNode()->dump(&DAG); 1241 dbgs() << '\n'); 1242 WorklistRemover DeadNodes(*this); 1243 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 1244 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 1245 deleteAndRecombine(N); 1246 AddToWorklist(Result.getNode()); 1247 return true; 1248 } 1249 return false; 1250 } 1251 1252 /// \brief Recursively delete a node which has no uses and any operands for 1253 /// which it is the only use. 1254 /// 1255 /// Note that this both deletes the nodes and removes them from the worklist. 1256 /// It also adds any nodes who have had a user deleted to the worklist as they 1257 /// may now have only one use and subject to other combines. 1258 bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) { 1259 if (!N->use_empty()) 1260 return false; 1261 1262 SmallSetVector<SDNode *, 16> Nodes; 1263 Nodes.insert(N); 1264 do { 1265 N = Nodes.pop_back_val(); 1266 if (!N) 1267 continue; 1268 1269 if (N->use_empty()) { 1270 for (const SDValue &ChildN : N->op_values()) 1271 Nodes.insert(ChildN.getNode()); 1272 1273 removeFromWorklist(N); 1274 DAG.DeleteNode(N); 1275 } else { 1276 AddToWorklist(N); 1277 } 1278 } while (!Nodes.empty()); 1279 return true; 1280 } 1281 1282 //===----------------------------------------------------------------------===// 1283 // Main DAG Combiner implementation 1284 //===----------------------------------------------------------------------===// 1285 1286 void DAGCombiner::Run(CombineLevel AtLevel) { 1287 // set the instance variables, so that the various visit routines may use it. 1288 Level = AtLevel; 1289 LegalOperations = Level >= AfterLegalizeVectorOps; 1290 LegalTypes = Level >= AfterLegalizeTypes; 1291 1292 // Add all the dag nodes to the worklist. 1293 for (SDNode &Node : DAG.allnodes()) 1294 AddToWorklist(&Node); 1295 1296 // Create a dummy node (which is not added to allnodes), that adds a reference 1297 // to the root node, preventing it from being deleted, and tracking any 1298 // changes of the root. 1299 HandleSDNode Dummy(DAG.getRoot()); 1300 1301 // While the worklist isn't empty, find a node and try to combine it. 1302 while (!WorklistMap.empty()) { 1303 SDNode *N; 1304 // The Worklist holds the SDNodes in order, but it may contain null entries. 1305 do { 1306 N = Worklist.pop_back_val(); 1307 } while (!N); 1308 1309 bool GoodWorklistEntry = WorklistMap.erase(N); 1310 (void)GoodWorklistEntry; 1311 assert(GoodWorklistEntry && 1312 "Found a worklist entry without a corresponding map entry!"); 1313 1314 // If N has no uses, it is dead. Make sure to revisit all N's operands once 1315 // N is deleted from the DAG, since they too may now be dead or may have a 1316 // reduced number of uses, allowing other xforms. 1317 if (recursivelyDeleteUnusedNodes(N)) 1318 continue; 1319 1320 WorklistRemover DeadNodes(*this); 1321 1322 // If this combine is running after legalizing the DAG, re-legalize any 1323 // nodes pulled off the worklist. 1324 if (Level == AfterLegalizeDAG) { 1325 SmallSetVector<SDNode *, 16> UpdatedNodes; 1326 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes); 1327 1328 for (SDNode *LN : UpdatedNodes) { 1329 AddToWorklist(LN); 1330 AddUsersToWorklist(LN); 1331 } 1332 if (!NIsValid) 1333 continue; 1334 } 1335 1336 DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG)); 1337 1338 // Add any operands of the new node which have not yet been combined to the 1339 // worklist as well. Because the worklist uniques things already, this 1340 // won't repeatedly process the same operand. 1341 CombinedNodes.insert(N); 1342 for (const SDValue &ChildN : N->op_values()) 1343 if (!CombinedNodes.count(ChildN.getNode())) 1344 AddToWorklist(ChildN.getNode()); 1345 1346 SDValue RV = combine(N); 1347 1348 if (!RV.getNode()) 1349 continue; 1350 1351 ++NodesCombined; 1352 1353 // If we get back the same node we passed in, rather than a new node or 1354 // zero, we know that the node must have defined multiple values and 1355 // CombineTo was used. Since CombineTo takes care of the worklist 1356 // mechanics for us, we have no work to do in this case. 1357 if (RV.getNode() == N) 1358 continue; 1359 1360 assert(N->getOpcode() != ISD::DELETED_NODE && 1361 RV.getOpcode() != ISD::DELETED_NODE && 1362 "Node was deleted but visit returned new node!"); 1363 1364 DEBUG(dbgs() << " ... into: "; 1365 RV.getNode()->dump(&DAG)); 1366 1367 if (N->getNumValues() == RV.getNode()->getNumValues()) 1368 DAG.ReplaceAllUsesWith(N, RV.getNode()); 1369 else { 1370 assert(N->getValueType(0) == RV.getValueType() && 1371 N->getNumValues() == 1 && "Type mismatch"); 1372 SDValue OpV = RV; 1373 DAG.ReplaceAllUsesWith(N, &OpV); 1374 } 1375 1376 // Push the new node and any users onto the worklist 1377 AddToWorklist(RV.getNode()); 1378 AddUsersToWorklist(RV.getNode()); 1379 1380 // Finally, if the node is now dead, remove it from the graph. The node 1381 // may not be dead if the replacement process recursively simplified to 1382 // something else needing this node. This will also take care of adding any 1383 // operands which have lost a user to the worklist. 1384 recursivelyDeleteUnusedNodes(N); 1385 } 1386 1387 // If the root changed (e.g. it was a dead load, update the root). 1388 DAG.setRoot(Dummy.getValue()); 1389 DAG.RemoveDeadNodes(); 1390 } 1391 1392 SDValue DAGCombiner::visit(SDNode *N) { 1393 switch (N->getOpcode()) { 1394 default: break; 1395 case ISD::TokenFactor: return visitTokenFactor(N); 1396 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1397 case ISD::ADD: return visitADD(N); 1398 case ISD::SUB: return visitSUB(N); 1399 case ISD::ADDC: return visitADDC(N); 1400 case ISD::SUBC: return visitSUBC(N); 1401 case ISD::ADDE: return visitADDE(N); 1402 case ISD::SUBE: return visitSUBE(N); 1403 case ISD::MUL: return visitMUL(N); 1404 case ISD::SDIV: return visitSDIV(N); 1405 case ISD::UDIV: return visitUDIV(N); 1406 case ISD::SREM: 1407 case ISD::UREM: return visitREM(N); 1408 case ISD::MULHU: return visitMULHU(N); 1409 case ISD::MULHS: return visitMULHS(N); 1410 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1411 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1412 case ISD::SMULO: return visitSMULO(N); 1413 case ISD::UMULO: return visitUMULO(N); 1414 case ISD::SMIN: 1415 case ISD::SMAX: 1416 case ISD::UMIN: 1417 case ISD::UMAX: return visitIMINMAX(N); 1418 case ISD::AND: return visitAND(N); 1419 case ISD::OR: return visitOR(N); 1420 case ISD::XOR: return visitXOR(N); 1421 case ISD::SHL: return visitSHL(N); 1422 case ISD::SRA: return visitSRA(N); 1423 case ISD::SRL: return visitSRL(N); 1424 case ISD::ROTR: 1425 case ISD::ROTL: return visitRotate(N); 1426 case ISD::BSWAP: return visitBSWAP(N); 1427 case ISD::BITREVERSE: return visitBITREVERSE(N); 1428 case ISD::CTLZ: return visitCTLZ(N); 1429 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1430 case ISD::CTTZ: return visitCTTZ(N); 1431 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1432 case ISD::CTPOP: return visitCTPOP(N); 1433 case ISD::SELECT: return visitSELECT(N); 1434 case ISD::VSELECT: return visitVSELECT(N); 1435 case ISD::SELECT_CC: return visitSELECT_CC(N); 1436 case ISD::SETCC: return visitSETCC(N); 1437 case ISD::SETCCE: return visitSETCCE(N); 1438 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1439 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1440 case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1441 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1442 case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N); 1443 case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N); 1444 case ISD::TRUNCATE: return visitTRUNCATE(N); 1445 case ISD::BITCAST: return visitBITCAST(N); 1446 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1447 case ISD::FADD: return visitFADD(N); 1448 case ISD::FSUB: return visitFSUB(N); 1449 case ISD::FMUL: return visitFMUL(N); 1450 case ISD::FMA: return visitFMA(N); 1451 case ISD::FDIV: return visitFDIV(N); 1452 case ISD::FREM: return visitFREM(N); 1453 case ISD::FSQRT: return visitFSQRT(N); 1454 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1455 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1456 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1457 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1458 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1459 case ISD::FP_ROUND: return visitFP_ROUND(N); 1460 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1461 case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1462 case ISD::FNEG: return visitFNEG(N); 1463 case ISD::FABS: return visitFABS(N); 1464 case ISD::FFLOOR: return visitFFLOOR(N); 1465 case ISD::FMINNUM: return visitFMINNUM(N); 1466 case ISD::FMAXNUM: return visitFMAXNUM(N); 1467 case ISD::FCEIL: return visitFCEIL(N); 1468 case ISD::FTRUNC: return visitFTRUNC(N); 1469 case ISD::BRCOND: return visitBRCOND(N); 1470 case ISD::BR_CC: return visitBR_CC(N); 1471 case ISD::LOAD: return visitLOAD(N); 1472 case ISD::STORE: return visitSTORE(N); 1473 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1474 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1475 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1476 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1477 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1478 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1479 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N); 1480 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); 1481 case ISD::MGATHER: return visitMGATHER(N); 1482 case ISD::MLOAD: return visitMLOAD(N); 1483 case ISD::MSCATTER: return visitMSCATTER(N); 1484 case ISD::MSTORE: return visitMSTORE(N); 1485 case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); 1486 case ISD::FP16_TO_FP: return visitFP16_TO_FP(N); 1487 } 1488 return SDValue(); 1489 } 1490 1491 SDValue DAGCombiner::combine(SDNode *N) { 1492 SDValue RV = visit(N); 1493 1494 // If nothing happened, try a target-specific DAG combine. 1495 if (!RV.getNode()) { 1496 assert(N->getOpcode() != ISD::DELETED_NODE && 1497 "Node was deleted but visit returned NULL!"); 1498 1499 if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1500 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1501 1502 // Expose the DAG combiner to the target combiner impls. 1503 TargetLowering::DAGCombinerInfo 1504 DagCombineInfo(DAG, Level, false, this); 1505 1506 RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1507 } 1508 } 1509 1510 // If nothing happened still, try promoting the operation. 1511 if (!RV.getNode()) { 1512 switch (N->getOpcode()) { 1513 default: break; 1514 case ISD::ADD: 1515 case ISD::SUB: 1516 case ISD::MUL: 1517 case ISD::AND: 1518 case ISD::OR: 1519 case ISD::XOR: 1520 RV = PromoteIntBinOp(SDValue(N, 0)); 1521 break; 1522 case ISD::SHL: 1523 case ISD::SRA: 1524 case ISD::SRL: 1525 RV = PromoteIntShiftOp(SDValue(N, 0)); 1526 break; 1527 case ISD::SIGN_EXTEND: 1528 case ISD::ZERO_EXTEND: 1529 case ISD::ANY_EXTEND: 1530 RV = PromoteExtend(SDValue(N, 0)); 1531 break; 1532 case ISD::LOAD: 1533 if (PromoteLoad(SDValue(N, 0))) 1534 RV = SDValue(N, 0); 1535 break; 1536 } 1537 } 1538 1539 // If N is a commutative binary node, try commuting it to enable more 1540 // sdisel CSE. 1541 if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1542 N->getNumValues() == 1) { 1543 SDValue N0 = N->getOperand(0); 1544 SDValue N1 = N->getOperand(1); 1545 1546 // Constant operands are canonicalized to RHS. 1547 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1548 SDValue Ops[] = {N1, N0}; 1549 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops, 1550 N->getFlags()); 1551 if (CSENode) 1552 return SDValue(CSENode, 0); 1553 } 1554 } 1555 1556 return RV; 1557 } 1558 1559 /// Given a node, return its input chain if it has one, otherwise return a null 1560 /// sd operand. 1561 static SDValue getInputChainForNode(SDNode *N) { 1562 if (unsigned NumOps = N->getNumOperands()) { 1563 if (N->getOperand(0).getValueType() == MVT::Other) 1564 return N->getOperand(0); 1565 if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1566 return N->getOperand(NumOps-1); 1567 for (unsigned i = 1; i < NumOps-1; ++i) 1568 if (N->getOperand(i).getValueType() == MVT::Other) 1569 return N->getOperand(i); 1570 } 1571 return SDValue(); 1572 } 1573 1574 SDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1575 // If N has two operands, where one has an input chain equal to the other, 1576 // the 'other' chain is redundant. 1577 if (N->getNumOperands() == 2) { 1578 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1579 return N->getOperand(0); 1580 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1581 return N->getOperand(1); 1582 } 1583 1584 SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1585 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1586 SmallPtrSet<SDNode*, 16> SeenOps; 1587 bool Changed = false; // If we should replace this token factor. 1588 1589 // Start out with this token factor. 1590 TFs.push_back(N); 1591 1592 // Iterate through token factors. The TFs grows when new token factors are 1593 // encountered. 1594 for (unsigned i = 0; i < TFs.size(); ++i) { 1595 SDNode *TF = TFs[i]; 1596 1597 // Check each of the operands. 1598 for (const SDValue &Op : TF->op_values()) { 1599 1600 switch (Op.getOpcode()) { 1601 case ISD::EntryToken: 1602 // Entry tokens don't need to be added to the list. They are 1603 // redundant. 1604 Changed = true; 1605 break; 1606 1607 case ISD::TokenFactor: 1608 if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) { 1609 // Queue up for processing. 1610 TFs.push_back(Op.getNode()); 1611 // Clean up in case the token factor is removed. 1612 AddToWorklist(Op.getNode()); 1613 Changed = true; 1614 break; 1615 } 1616 LLVM_FALLTHROUGH; 1617 1618 default: 1619 // Only add if it isn't already in the list. 1620 if (SeenOps.insert(Op.getNode()).second) 1621 Ops.push_back(Op); 1622 else 1623 Changed = true; 1624 break; 1625 } 1626 } 1627 } 1628 1629 SDValue Result; 1630 1631 // If we've changed things around then replace token factor. 1632 if (Changed) { 1633 if (Ops.empty()) { 1634 // The entry token is the only possible outcome. 1635 Result = DAG.getEntryNode(); 1636 } else { 1637 // New and improved token factor. 1638 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); 1639 } 1640 1641 // Add users to worklist if AA is enabled, since it may introduce 1642 // a lot of new chained token factors while removing memory deps. 1643 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 1644 : DAG.getSubtarget().useAA(); 1645 return CombineTo(N, Result, UseAA /*add to worklist*/); 1646 } 1647 1648 return Result; 1649 } 1650 1651 /// MERGE_VALUES can always be eliminated. 1652 SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1653 WorklistRemover DeadNodes(*this); 1654 // Replacing results may cause a different MERGE_VALUES to suddenly 1655 // be CSE'd with N, and carry its uses with it. Iterate until no 1656 // uses remain, to ensure that the node can be safely deleted. 1657 // First add the users of this node to the work list so that they 1658 // can be tried again once they have new operands. 1659 AddUsersToWorklist(N); 1660 do { 1661 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1662 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1663 } while (!N->use_empty()); 1664 deleteAndRecombine(N); 1665 return SDValue(N, 0); // Return N so it doesn't get rechecked! 1666 } 1667 1668 /// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a 1669 /// ConstantSDNode pointer else nullptr. 1670 static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) { 1671 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N); 1672 return Const != nullptr && !Const->isOpaque() ? Const : nullptr; 1673 } 1674 1675 SDValue DAGCombiner::visitADD(SDNode *N) { 1676 SDValue N0 = N->getOperand(0); 1677 SDValue N1 = N->getOperand(1); 1678 EVT VT = N0.getValueType(); 1679 SDLoc DL(N); 1680 1681 // fold vector ops 1682 if (VT.isVector()) { 1683 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 1684 return FoldedVOp; 1685 1686 // fold (add x, 0) -> x, vector edition 1687 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1688 return N0; 1689 if (ISD::isBuildVectorAllZeros(N0.getNode())) 1690 return N1; 1691 } 1692 1693 // fold (add x, undef) -> undef 1694 if (N0.isUndef()) 1695 return N0; 1696 1697 if (N1.isUndef()) 1698 return N1; 1699 1700 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 1701 // canonicalize constant to RHS 1702 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1)) 1703 return DAG.getNode(ISD::ADD, DL, VT, N1, N0); 1704 // fold (add c1, c2) -> c1+c2 1705 return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(), 1706 N1.getNode()); 1707 } 1708 1709 // fold (add x, 0) -> x 1710 if (isNullConstant(N1)) 1711 return N0; 1712 1713 // fold ((c1-A)+c2) -> (c1+c2)-A 1714 if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) { 1715 if (N0.getOpcode() == ISD::SUB) 1716 if (isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) { 1717 return DAG.getNode(ISD::SUB, DL, VT, 1718 DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)), 1719 N0.getOperand(1)); 1720 } 1721 } 1722 1723 // reassociate add 1724 if (SDValue RADD = ReassociateOps(ISD::ADD, DL, N0, N1)) 1725 return RADD; 1726 1727 // fold ((0-A) + B) -> B-A 1728 if (N0.getOpcode() == ISD::SUB && 1729 isNullConstantOrNullSplatConstant(N0.getOperand(0))) 1730 return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1)); 1731 1732 // fold (A + (0-B)) -> A-B 1733 if (N1.getOpcode() == ISD::SUB && 1734 isNullConstantOrNullSplatConstant(N1.getOperand(0))) 1735 return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1)); 1736 1737 // fold (A+(B-A)) -> B 1738 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1739 return N1.getOperand(0); 1740 1741 // fold ((B-A)+A) -> B 1742 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1743 return N0.getOperand(0); 1744 1745 // fold (A+(B-(A+C))) to (B-C) 1746 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1747 N0 == N1.getOperand(1).getOperand(0)) 1748 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1749 N1.getOperand(1).getOperand(1)); 1750 1751 // fold (A+(B-(C+A))) to (B-C) 1752 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1753 N0 == N1.getOperand(1).getOperand(1)) 1754 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1755 N1.getOperand(1).getOperand(0)); 1756 1757 // fold (A+((B-A)+or-C)) to (B+or-C) 1758 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1759 N1.getOperand(0).getOpcode() == ISD::SUB && 1760 N0 == N1.getOperand(0).getOperand(1)) 1761 return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0), 1762 N1.getOperand(1)); 1763 1764 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1765 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1766 SDValue N00 = N0.getOperand(0); 1767 SDValue N01 = N0.getOperand(1); 1768 SDValue N10 = N1.getOperand(0); 1769 SDValue N11 = N1.getOperand(1); 1770 1771 if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10)) 1772 return DAG.getNode(ISD::SUB, DL, VT, 1773 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10), 1774 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11)); 1775 } 1776 1777 if (SimplifyDemandedBits(SDValue(N, 0))) 1778 return SDValue(N, 0); 1779 1780 // fold (a+b) -> (a|b) iff a and b share no bits. 1781 if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) && 1782 VT.isInteger() && DAG.haveNoCommonBitsSet(N0, N1)) 1783 return DAG.getNode(ISD::OR, DL, VT, N0, N1); 1784 1785 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1786 if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB && 1787 isNullConstantOrNullSplatConstant(N1.getOperand(0).getOperand(0))) 1788 return DAG.getNode(ISD::SUB, DL, VT, N0, 1789 DAG.getNode(ISD::SHL, DL, VT, 1790 N1.getOperand(0).getOperand(1), 1791 N1.getOperand(1))); 1792 if (N0.getOpcode() == ISD::SHL && N0.getOperand(0).getOpcode() == ISD::SUB && 1793 isNullConstantOrNullSplatConstant(N0.getOperand(0).getOperand(0))) 1794 return DAG.getNode(ISD::SUB, DL, VT, N1, 1795 DAG.getNode(ISD::SHL, DL, VT, 1796 N0.getOperand(0).getOperand(1), 1797 N0.getOperand(1))); 1798 1799 if (N1.getOpcode() == ISD::AND) { 1800 SDValue AndOp0 = N1.getOperand(0); 1801 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1802 unsigned DestBits = VT.getScalarSizeInBits(); 1803 1804 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1805 // and similar xforms where the inner op is either ~0 or 0. 1806 if (NumSignBits == DestBits && 1807 isOneConstantOrOneSplatConstant(N1->getOperand(1))) 1808 return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); 1809 } 1810 1811 // add (sext i1), X -> sub X, (zext i1) 1812 if (N0.getOpcode() == ISD::SIGN_EXTEND && 1813 N0.getOperand(0).getValueType() == MVT::i1 && 1814 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1815 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1816 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1817 } 1818 1819 // add X, (sextinreg Y i1) -> sub X, (and Y 1) 1820 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 1821 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 1822 if (TN->getVT() == MVT::i1) { 1823 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 1824 DAG.getConstant(1, DL, VT)); 1825 return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt); 1826 } 1827 } 1828 1829 return SDValue(); 1830 } 1831 1832 SDValue DAGCombiner::visitADDC(SDNode *N) { 1833 SDValue N0 = N->getOperand(0); 1834 SDValue N1 = N->getOperand(1); 1835 EVT VT = N0.getValueType(); 1836 1837 // If the flag result is dead, turn this into an ADD. 1838 if (!N->hasAnyUseOfValue(1)) 1839 return CombineTo(N, DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N1), 1840 DAG.getNode(ISD::CARRY_FALSE, 1841 SDLoc(N), MVT::Glue)); 1842 1843 // canonicalize constant to RHS. 1844 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1845 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1846 if (N0C && !N1C) 1847 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0); 1848 1849 // fold (addc x, 0) -> x + no carry out 1850 if (isNullConstant(N1)) 1851 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 1852 SDLoc(N), MVT::Glue)); 1853 1854 // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. 1855 APInt LHSZero, LHSOne; 1856 APInt RHSZero, RHSOne; 1857 DAG.computeKnownBits(N0, LHSZero, LHSOne); 1858 1859 if (LHSZero.getBoolValue()) { 1860 DAG.computeKnownBits(N1, RHSZero, RHSOne); 1861 1862 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1863 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1864 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1865 return CombineTo(N, DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1), 1866 DAG.getNode(ISD::CARRY_FALSE, 1867 SDLoc(N), MVT::Glue)); 1868 } 1869 1870 return SDValue(); 1871 } 1872 1873 SDValue DAGCombiner::visitADDE(SDNode *N) { 1874 SDValue N0 = N->getOperand(0); 1875 SDValue N1 = N->getOperand(1); 1876 SDValue CarryIn = N->getOperand(2); 1877 1878 // canonicalize constant to RHS 1879 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1880 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1881 if (N0C && !N1C) 1882 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), 1883 N1, N0, CarryIn); 1884 1885 // fold (adde x, y, false) -> (addc x, y) 1886 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1887 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1); 1888 1889 return SDValue(); 1890 } 1891 1892 // Since it may not be valid to emit a fold to zero for vector initializers 1893 // check if we can before folding. 1894 static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, 1895 SelectionDAG &DAG, bool LegalOperations, 1896 bool LegalTypes) { 1897 if (!VT.isVector()) 1898 return DAG.getConstant(0, DL, VT); 1899 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 1900 return DAG.getConstant(0, DL, VT); 1901 return SDValue(); 1902 } 1903 1904 SDValue DAGCombiner::visitSUB(SDNode *N) { 1905 SDValue N0 = N->getOperand(0); 1906 SDValue N1 = N->getOperand(1); 1907 EVT VT = N0.getValueType(); 1908 SDLoc DL(N); 1909 1910 // fold vector ops 1911 if (VT.isVector()) { 1912 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 1913 return FoldedVOp; 1914 1915 // fold (sub x, 0) -> x, vector edition 1916 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1917 return N0; 1918 } 1919 1920 // fold (sub x, x) -> 0 1921 // FIXME: Refactor this and xor and other similar operations together. 1922 if (N0 == N1) 1923 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations, LegalTypes); 1924 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 1925 DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 1926 // fold (sub c1, c2) -> c1-c2 1927 return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(), 1928 N1.getNode()); 1929 } 1930 1931 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 1932 1933 // fold (sub x, c) -> (add x, -c) 1934 if (N1C) { 1935 return DAG.getNode(ISD::ADD, DL, VT, N0, 1936 DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); 1937 } 1938 1939 if (isNullConstantOrNullSplatConstant(N0)) { 1940 unsigned BitWidth = VT.getScalarSizeInBits(); 1941 // Right-shifting everything out but the sign bit followed by negation is 1942 // the same as flipping arithmetic/logical shift type without the negation: 1943 // -(X >>u 31) -> (X >>s 31) 1944 // -(X >>s 31) -> (X >>u 31) 1945 if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) { 1946 ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1)); 1947 if (ShiftAmt && ShiftAmt->getZExtValue() == BitWidth - 1) { 1948 auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA; 1949 if (!LegalOperations || TLI.isOperationLegal(NewSh, VT)) 1950 return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1)); 1951 } 1952 } 1953 1954 // 0 - X --> 0 if the sub is NUW. 1955 if (N->getFlags()->hasNoUnsignedWrap()) 1956 return N0; 1957 1958 if (DAG.MaskedValueIsZero(N1, ~APInt::getSignBit(BitWidth))) { 1959 // N1 is either 0 or the minimum signed value. If the sub is NSW, then 1960 // N1 must be 0 because negating the minimum signed value is undefined. 1961 if (N->getFlags()->hasNoSignedWrap()) 1962 return N0; 1963 1964 // 0 - X --> X if X is 0 or the minimum signed value. 1965 return N1; 1966 } 1967 } 1968 1969 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 1970 if (isAllOnesConstantOrAllOnesSplatConstant(N0)) 1971 return DAG.getNode(ISD::XOR, DL, VT, N1, N0); 1972 1973 // fold A-(A-B) -> B 1974 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 1975 return N1.getOperand(1); 1976 1977 // fold (A+B)-A -> B 1978 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 1979 return N0.getOperand(1); 1980 1981 // fold (A+B)-B -> A 1982 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 1983 return N0.getOperand(0); 1984 1985 // fold C2-(A+C1) -> (C2-C1)-A 1986 if (N1.getOpcode() == ISD::ADD) { 1987 SDValue N11 = N1.getOperand(1); 1988 if (isConstantOrConstantVector(N0, /* NoOpaques */ true) && 1989 isConstantOrConstantVector(N11, /* NoOpaques */ true)) { 1990 SDValue NewC = DAG.getNode(ISD::SUB, DL, VT, N0, N11); 1991 return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0)); 1992 } 1993 } 1994 1995 // fold ((A+(B+or-C))-B) -> A+or-C 1996 if (N0.getOpcode() == ISD::ADD && 1997 (N0.getOperand(1).getOpcode() == ISD::SUB || 1998 N0.getOperand(1).getOpcode() == ISD::ADD) && 1999 N0.getOperand(1).getOperand(0) == N1) 2000 return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0), 2001 N0.getOperand(1).getOperand(1)); 2002 2003 // fold ((A+(C+B))-B) -> A+C 2004 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD && 2005 N0.getOperand(1).getOperand(1) == N1) 2006 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), 2007 N0.getOperand(1).getOperand(0)); 2008 2009 // fold ((A-(B-C))-C) -> A-B 2010 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB && 2011 N0.getOperand(1).getOperand(1) == N1) 2012 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), 2013 N0.getOperand(1).getOperand(0)); 2014 2015 // If either operand of a sub is undef, the result is undef 2016 if (N0.isUndef()) 2017 return N0; 2018 if (N1.isUndef()) 2019 return N1; 2020 2021 // If the relocation model supports it, consider symbol offsets. 2022 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 2023 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 2024 // fold (sub Sym, c) -> Sym-c 2025 if (N1C && GA->getOpcode() == ISD::GlobalAddress) 2026 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT, 2027 GA->getOffset() - 2028 (uint64_t)N1C->getSExtValue()); 2029 // fold (sub Sym+c1, Sym+c2) -> c1-c2 2030 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 2031 if (GA->getGlobal() == GB->getGlobal()) 2032 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 2033 DL, VT); 2034 } 2035 2036 // sub X, (sextinreg Y i1) -> add X, (and Y 1) 2037 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 2038 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 2039 if (TN->getVT() == MVT::i1) { 2040 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 2041 DAG.getConstant(1, DL, VT)); 2042 return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt); 2043 } 2044 } 2045 2046 return SDValue(); 2047 } 2048 2049 SDValue DAGCombiner::visitSUBC(SDNode *N) { 2050 SDValue N0 = N->getOperand(0); 2051 SDValue N1 = N->getOperand(1); 2052 EVT VT = N0.getValueType(); 2053 SDLoc DL(N); 2054 2055 // If the flag result is dead, turn this into an SUB. 2056 if (!N->hasAnyUseOfValue(1)) 2057 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), 2058 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2059 2060 // fold (subc x, x) -> 0 + no borrow 2061 if (N0 == N1) 2062 return CombineTo(N, DAG.getConstant(0, DL, VT), 2063 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2064 2065 // fold (subc x, 0) -> x + no borrow 2066 if (isNullConstant(N1)) 2067 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2068 2069 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 2070 if (isAllOnesConstant(N0)) 2071 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), 2072 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2073 2074 return SDValue(); 2075 } 2076 2077 SDValue DAGCombiner::visitSUBE(SDNode *N) { 2078 SDValue N0 = N->getOperand(0); 2079 SDValue N1 = N->getOperand(1); 2080 SDValue CarryIn = N->getOperand(2); 2081 2082 // fold (sube x, y, false) -> (subc x, y) 2083 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 2084 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1); 2085 2086 return SDValue(); 2087 } 2088 2089 SDValue DAGCombiner::visitMUL(SDNode *N) { 2090 SDValue N0 = N->getOperand(0); 2091 SDValue N1 = N->getOperand(1); 2092 EVT VT = N0.getValueType(); 2093 2094 // fold (mul x, undef) -> 0 2095 if (N0.isUndef() || N1.isUndef()) 2096 return DAG.getConstant(0, SDLoc(N), VT); 2097 2098 bool N0IsConst = false; 2099 bool N1IsConst = false; 2100 bool N1IsOpaqueConst = false; 2101 bool N0IsOpaqueConst = false; 2102 APInt ConstValue0, ConstValue1; 2103 // fold vector ops 2104 if (VT.isVector()) { 2105 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2106 return FoldedVOp; 2107 2108 N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0); 2109 N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1); 2110 } else { 2111 N0IsConst = isa<ConstantSDNode>(N0); 2112 if (N0IsConst) { 2113 ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue(); 2114 N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque(); 2115 } 2116 N1IsConst = isa<ConstantSDNode>(N1); 2117 if (N1IsConst) { 2118 ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue(); 2119 N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque(); 2120 } 2121 } 2122 2123 // fold (mul c1, c2) -> c1*c2 2124 if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst) 2125 return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT, 2126 N0.getNode(), N1.getNode()); 2127 2128 // canonicalize constant to RHS (vector doesn't have to splat) 2129 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2130 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 2131 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0); 2132 // fold (mul x, 0) -> 0 2133 if (N1IsConst && ConstValue1 == 0) 2134 return N1; 2135 // We require a splat of the entire scalar bit width for non-contiguous 2136 // bit patterns. 2137 bool IsFullSplat = 2138 ConstValue1.getBitWidth() == VT.getScalarSizeInBits(); 2139 // fold (mul x, 1) -> x 2140 if (N1IsConst && ConstValue1 == 1 && IsFullSplat) 2141 return N0; 2142 // fold (mul x, -1) -> 0-x 2143 if (N1IsConst && ConstValue1.isAllOnesValue()) { 2144 SDLoc DL(N); 2145 return DAG.getNode(ISD::SUB, DL, VT, 2146 DAG.getConstant(0, DL, VT), N0); 2147 } 2148 // fold (mul x, (1 << c)) -> x << c 2149 if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isPowerOf2() && 2150 IsFullSplat) { 2151 SDLoc DL(N); 2152 return DAG.getNode(ISD::SHL, DL, VT, N0, 2153 DAG.getConstant(ConstValue1.logBase2(), DL, 2154 getShiftAmountTy(N0.getValueType()))); 2155 } 2156 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 2157 if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2() && 2158 IsFullSplat) { 2159 unsigned Log2Val = (-ConstValue1).logBase2(); 2160 SDLoc DL(N); 2161 // FIXME: If the input is something that is easily negated (e.g. a 2162 // single-use add), we should put the negate there. 2163 return DAG.getNode(ISD::SUB, DL, VT, 2164 DAG.getConstant(0, DL, VT), 2165 DAG.getNode(ISD::SHL, DL, VT, N0, 2166 DAG.getConstant(Log2Val, DL, 2167 getShiftAmountTy(N0.getValueType())))); 2168 } 2169 2170 // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 2171 if (N0.getOpcode() == ISD::SHL && 2172 isConstantOrConstantVector(N1, /* NoOpaques */ true) && 2173 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) { 2174 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1)); 2175 if (isConstantOrConstantVector(C3)) 2176 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3); 2177 } 2178 2179 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 2180 // use. 2181 { 2182 SDValue Sh(nullptr, 0), Y(nullptr, 0); 2183 2184 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 2185 if (N0.getOpcode() == ISD::SHL && 2186 isConstantOrConstantVector(N0.getOperand(1)) && 2187 N0.getNode()->hasOneUse()) { 2188 Sh = N0; Y = N1; 2189 } else if (N1.getOpcode() == ISD::SHL && 2190 isConstantOrConstantVector(N1.getOperand(1)) && 2191 N1.getNode()->hasOneUse()) { 2192 Sh = N1; Y = N0; 2193 } 2194 2195 if (Sh.getNode()) { 2196 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y); 2197 return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1)); 2198 } 2199 } 2200 2201 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 2202 if (DAG.isConstantIntBuildVectorOrConstantInt(N1) && 2203 N0.getOpcode() == ISD::ADD && 2204 DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) && 2205 isMulAddWithConstProfitable(N, N0, N1)) 2206 return DAG.getNode(ISD::ADD, SDLoc(N), VT, 2207 DAG.getNode(ISD::MUL, SDLoc(N0), VT, 2208 N0.getOperand(0), N1), 2209 DAG.getNode(ISD::MUL, SDLoc(N1), VT, 2210 N0.getOperand(1), N1)); 2211 2212 // reassociate mul 2213 if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1)) 2214 return RMUL; 2215 2216 return SDValue(); 2217 } 2218 2219 /// Return true if divmod libcall is available. 2220 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2221 const TargetLowering &TLI) { 2222 RTLIB::Libcall LC; 2223 EVT NodeType = Node->getValueType(0); 2224 if (!NodeType.isSimple()) 2225 return false; 2226 switch (NodeType.getSimpleVT().SimpleTy) { 2227 default: return false; // No libcall for vector types. 2228 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2229 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2230 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2231 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2232 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2233 } 2234 2235 return TLI.getLibcallName(LC) != nullptr; 2236 } 2237 2238 /// Issue divrem if both quotient and remainder are needed. 2239 SDValue DAGCombiner::useDivRem(SDNode *Node) { 2240 if (Node->use_empty()) 2241 return SDValue(); // This is a dead node, leave it alone. 2242 2243 unsigned Opcode = Node->getOpcode(); 2244 bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM); 2245 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2246 2247 // DivMod lib calls can still work on non-legal types if using lib-calls. 2248 EVT VT = Node->getValueType(0); 2249 if (VT.isVector() || !VT.isInteger()) 2250 return SDValue(); 2251 2252 if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT)) 2253 return SDValue(); 2254 2255 // If DIVREM is going to get expanded into a libcall, 2256 // but there is no libcall available, then don't combine. 2257 if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) && 2258 !isDivRemLibcallAvailable(Node, isSigned, TLI)) 2259 return SDValue(); 2260 2261 // If div is legal, it's better to do the normal expansion 2262 unsigned OtherOpcode = 0; 2263 if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) { 2264 OtherOpcode = isSigned ? ISD::SREM : ISD::UREM; 2265 if (TLI.isOperationLegalOrCustom(Opcode, VT)) 2266 return SDValue(); 2267 } else { 2268 OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2269 if (TLI.isOperationLegalOrCustom(OtherOpcode, VT)) 2270 return SDValue(); 2271 } 2272 2273 SDValue Op0 = Node->getOperand(0); 2274 SDValue Op1 = Node->getOperand(1); 2275 SDValue combined; 2276 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2277 UE = Op0.getNode()->use_end(); UI != UE;) { 2278 SDNode *User = *UI++; 2279 if (User == Node || User->use_empty()) 2280 continue; 2281 // Convert the other matching node(s), too; 2282 // otherwise, the DIVREM may get target-legalized into something 2283 // target-specific that we won't be able to recognize. 2284 unsigned UserOpc = User->getOpcode(); 2285 if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) && 2286 User->getOperand(0) == Op0 && 2287 User->getOperand(1) == Op1) { 2288 if (!combined) { 2289 if (UserOpc == OtherOpcode) { 2290 SDVTList VTs = DAG.getVTList(VT, VT); 2291 combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1); 2292 } else if (UserOpc == DivRemOpc) { 2293 combined = SDValue(User, 0); 2294 } else { 2295 assert(UserOpc == Opcode); 2296 continue; 2297 } 2298 } 2299 if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV) 2300 CombineTo(User, combined); 2301 else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM) 2302 CombineTo(User, combined.getValue(1)); 2303 } 2304 } 2305 return combined; 2306 } 2307 2308 SDValue DAGCombiner::visitSDIV(SDNode *N) { 2309 SDValue N0 = N->getOperand(0); 2310 SDValue N1 = N->getOperand(1); 2311 EVT VT = N->getValueType(0); 2312 2313 // fold vector ops 2314 if (VT.isVector()) 2315 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2316 return FoldedVOp; 2317 2318 SDLoc DL(N); 2319 2320 // fold (sdiv c1, c2) -> c1/c2 2321 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2322 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2323 if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque()) 2324 return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C); 2325 // fold (sdiv X, 1) -> X 2326 if (N1C && N1C->isOne()) 2327 return N0; 2328 // fold (sdiv X, -1) -> 0-X 2329 if (N1C && N1C->isAllOnesValue()) 2330 return DAG.getNode(ISD::SUB, DL, VT, 2331 DAG.getConstant(0, DL, VT), N0); 2332 2333 // If we know the sign bits of both operands are zero, strength reduce to a 2334 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 2335 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2336 return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1); 2337 2338 // fold (sdiv X, pow2) -> simple ops after legalize 2339 // FIXME: We check for the exact bit here because the generic lowering gives 2340 // better results in that case. The target-specific lowering should learn how 2341 // to handle exact sdivs efficiently. 2342 if (N1C && !N1C->isNullValue() && !N1C->isOpaque() && 2343 !cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() && 2344 (N1C->getAPIntValue().isPowerOf2() || 2345 (-N1C->getAPIntValue()).isPowerOf2())) { 2346 // Target-specific implementation of sdiv x, pow2. 2347 if (SDValue Res = BuildSDIVPow2(N)) 2348 return Res; 2349 2350 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 2351 2352 // Splat the sign bit into the register 2353 SDValue SGN = 2354 DAG.getNode(ISD::SRA, DL, VT, N0, 2355 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, 2356 getShiftAmountTy(N0.getValueType()))); 2357 AddToWorklist(SGN.getNode()); 2358 2359 // Add (N0 < 0) ? abs2 - 1 : 0; 2360 SDValue SRL = 2361 DAG.getNode(ISD::SRL, DL, VT, SGN, 2362 DAG.getConstant(VT.getScalarSizeInBits() - lg2, DL, 2363 getShiftAmountTy(SGN.getValueType()))); 2364 SDValue ADD = DAG.getNode(ISD::ADD, DL, VT, N0, SRL); 2365 AddToWorklist(SRL.getNode()); 2366 AddToWorklist(ADD.getNode()); // Divide by pow2 2367 SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, ADD, 2368 DAG.getConstant(lg2, DL, 2369 getShiftAmountTy(ADD.getValueType()))); 2370 2371 // If we're dividing by a positive value, we're done. Otherwise, we must 2372 // negate the result. 2373 if (N1C->getAPIntValue().isNonNegative()) 2374 return SRA; 2375 2376 AddToWorklist(SRA.getNode()); 2377 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); 2378 } 2379 2380 // If integer divide is expensive and we satisfy the requirements, emit an 2381 // alternate sequence. Targets may check function attributes for size/speed 2382 // trade-offs. 2383 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2384 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2385 if (SDValue Op = BuildSDIV(N)) 2386 return Op; 2387 2388 // sdiv, srem -> sdivrem 2389 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2390 // true. Otherwise, we break the simplification logic in visitREM(). 2391 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2392 if (SDValue DivRem = useDivRem(N)) 2393 return DivRem; 2394 2395 // undef / X -> 0 2396 if (N0.isUndef()) 2397 return DAG.getConstant(0, DL, VT); 2398 // X / undef -> undef 2399 if (N1.isUndef()) 2400 return N1; 2401 2402 return SDValue(); 2403 } 2404 2405 SDValue DAGCombiner::visitUDIV(SDNode *N) { 2406 SDValue N0 = N->getOperand(0); 2407 SDValue N1 = N->getOperand(1); 2408 EVT VT = N->getValueType(0); 2409 2410 // fold vector ops 2411 if (VT.isVector()) 2412 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2413 return FoldedVOp; 2414 2415 SDLoc DL(N); 2416 2417 // fold (udiv c1, c2) -> c1/c2 2418 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2419 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2420 if (N0C && N1C) 2421 if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT, 2422 N0C, N1C)) 2423 return Folded; 2424 2425 // fold (udiv x, (1 << c)) -> x >>u c 2426 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) && 2427 DAG.isKnownToBeAPowerOfTwo(N1)) { 2428 SDValue LogBase2 = BuildLogBase2(N1, DL); 2429 AddToWorklist(LogBase2.getNode()); 2430 2431 EVT ShiftVT = getShiftAmountTy(N0.getValueType()); 2432 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT); 2433 AddToWorklist(Trunc.getNode()); 2434 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc); 2435 } 2436 2437 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 2438 if (N1.getOpcode() == ISD::SHL) { 2439 SDValue N10 = N1.getOperand(0); 2440 if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) && 2441 DAG.isKnownToBeAPowerOfTwo(N10)) { 2442 SDValue LogBase2 = BuildLogBase2(N10, DL); 2443 AddToWorklist(LogBase2.getNode()); 2444 2445 EVT ADDVT = N1.getOperand(1).getValueType(); 2446 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT); 2447 AddToWorklist(Trunc.getNode()); 2448 SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc); 2449 AddToWorklist(Add.getNode()); 2450 return DAG.getNode(ISD::SRL, DL, VT, N0, Add); 2451 } 2452 } 2453 2454 // fold (udiv x, c) -> alternate 2455 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2456 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2457 if (SDValue Op = BuildUDIV(N)) 2458 return Op; 2459 2460 // sdiv, srem -> sdivrem 2461 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2462 // true. Otherwise, we break the simplification logic in visitREM(). 2463 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2464 if (SDValue DivRem = useDivRem(N)) 2465 return DivRem; 2466 2467 // undef / X -> 0 2468 if (N0.isUndef()) 2469 return DAG.getConstant(0, DL, VT); 2470 // X / undef -> undef 2471 if (N1.isUndef()) 2472 return N1; 2473 2474 return SDValue(); 2475 } 2476 2477 // handles ISD::SREM and ISD::UREM 2478 SDValue DAGCombiner::visitREM(SDNode *N) { 2479 unsigned Opcode = N->getOpcode(); 2480 SDValue N0 = N->getOperand(0); 2481 SDValue N1 = N->getOperand(1); 2482 EVT VT = N->getValueType(0); 2483 bool isSigned = (Opcode == ISD::SREM); 2484 SDLoc DL(N); 2485 2486 // fold (rem c1, c2) -> c1%c2 2487 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2488 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2489 if (N0C && N1C) 2490 if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C)) 2491 return Folded; 2492 2493 if (isSigned) { 2494 // If we know the sign bits of both operands are zero, strength reduce to a 2495 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 2496 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2497 return DAG.getNode(ISD::UREM, DL, VT, N0, N1); 2498 } else { 2499 // fold (urem x, pow2) -> (and x, pow2-1) 2500 if (DAG.isKnownToBeAPowerOfTwo(N1)) { 2501 APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2502 SDValue Add = 2503 DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT)); 2504 AddToWorklist(Add.getNode()); 2505 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2506 } 2507 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2508 if (N1.getOpcode() == ISD::SHL && 2509 DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) { 2510 APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2511 SDValue Add = 2512 DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT)); 2513 AddToWorklist(Add.getNode()); 2514 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2515 } 2516 } 2517 2518 AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2519 2520 // If X/C can be simplified by the division-by-constant logic, lower 2521 // X%C to the equivalent of X-X/C*C. 2522 // To avoid mangling nodes, this simplification requires that the combine() 2523 // call for the speculative DIV must not cause a DIVREM conversion. We guard 2524 // against this by skipping the simplification if isIntDivCheap(). When 2525 // div is not cheap, combine will not return a DIVREM. Regardless, 2526 // checking cheapness here makes sense since the simplification results in 2527 // fatter code. 2528 if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) { 2529 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2530 SDValue Div = DAG.getNode(DivOpcode, DL, VT, N0, N1); 2531 AddToWorklist(Div.getNode()); 2532 SDValue OptimizedDiv = combine(Div.getNode()); 2533 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2534 assert((OptimizedDiv.getOpcode() != ISD::UDIVREM) && 2535 (OptimizedDiv.getOpcode() != ISD::SDIVREM)); 2536 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1); 2537 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); 2538 AddToWorklist(Mul.getNode()); 2539 return Sub; 2540 } 2541 } 2542 2543 // sdiv, srem -> sdivrem 2544 if (SDValue DivRem = useDivRem(N)) 2545 return DivRem.getValue(1); 2546 2547 // undef % X -> 0 2548 if (N0.isUndef()) 2549 return DAG.getConstant(0, DL, VT); 2550 // X % undef -> undef 2551 if (N1.isUndef()) 2552 return N1; 2553 2554 return SDValue(); 2555 } 2556 2557 SDValue DAGCombiner::visitMULHS(SDNode *N) { 2558 SDValue N0 = N->getOperand(0); 2559 SDValue N1 = N->getOperand(1); 2560 EVT VT = N->getValueType(0); 2561 SDLoc DL(N); 2562 2563 // fold (mulhs x, 0) -> 0 2564 if (isNullConstant(N1)) 2565 return N1; 2566 // fold (mulhs x, 1) -> (sra x, size(x)-1) 2567 if (isOneConstant(N1)) { 2568 SDLoc DL(N); 2569 return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0, 2570 DAG.getConstant(N0.getValueSizeInBits() - 1, DL, 2571 getShiftAmountTy(N0.getValueType()))); 2572 } 2573 // fold (mulhs x, undef) -> 0 2574 if (N0.isUndef() || N1.isUndef()) 2575 return DAG.getConstant(0, SDLoc(N), VT); 2576 2577 // If the type twice as wide is legal, transform the mulhs to a wider multiply 2578 // plus a shift. 2579 if (VT.isSimple() && !VT.isVector()) { 2580 MVT Simple = VT.getSimpleVT(); 2581 unsigned SimpleSize = Simple.getSizeInBits(); 2582 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2583 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2584 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2585 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2586 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2587 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2588 DAG.getConstant(SimpleSize, DL, 2589 getShiftAmountTy(N1.getValueType()))); 2590 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2591 } 2592 } 2593 2594 return SDValue(); 2595 } 2596 2597 SDValue DAGCombiner::visitMULHU(SDNode *N) { 2598 SDValue N0 = N->getOperand(0); 2599 SDValue N1 = N->getOperand(1); 2600 EVT VT = N->getValueType(0); 2601 SDLoc DL(N); 2602 2603 // fold (mulhu x, 0) -> 0 2604 if (isNullConstant(N1)) 2605 return N1; 2606 // fold (mulhu x, 1) -> 0 2607 if (isOneConstant(N1)) 2608 return DAG.getConstant(0, DL, N0.getValueType()); 2609 // fold (mulhu x, undef) -> 0 2610 if (N0.isUndef() || N1.isUndef()) 2611 return DAG.getConstant(0, DL, VT); 2612 2613 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2614 // plus a shift. 2615 if (VT.isSimple() && !VT.isVector()) { 2616 MVT Simple = VT.getSimpleVT(); 2617 unsigned SimpleSize = Simple.getSizeInBits(); 2618 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2619 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2620 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2621 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2622 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2623 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2624 DAG.getConstant(SimpleSize, DL, 2625 getShiftAmountTy(N1.getValueType()))); 2626 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2627 } 2628 } 2629 2630 return SDValue(); 2631 } 2632 2633 /// Perform optimizations common to nodes that compute two values. LoOp and HiOp 2634 /// give the opcodes for the two computations that are being performed. Return 2635 /// true if a simplification was made. 2636 SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2637 unsigned HiOp) { 2638 // If the high half is not needed, just compute the low half. 2639 bool HiExists = N->hasAnyUseOfValue(1); 2640 if (!HiExists && 2641 (!LegalOperations || 2642 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) { 2643 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2644 return CombineTo(N, Res, Res); 2645 } 2646 2647 // If the low half is not needed, just compute the high half. 2648 bool LoExists = N->hasAnyUseOfValue(0); 2649 if (!LoExists && 2650 (!LegalOperations || 2651 TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2652 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2653 return CombineTo(N, Res, Res); 2654 } 2655 2656 // If both halves are used, return as it is. 2657 if (LoExists && HiExists) 2658 return SDValue(); 2659 2660 // If the two computed results can be simplified separately, separate them. 2661 if (LoExists) { 2662 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2663 AddToWorklist(Lo.getNode()); 2664 SDValue LoOpt = combine(Lo.getNode()); 2665 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2666 (!LegalOperations || 2667 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2668 return CombineTo(N, LoOpt, LoOpt); 2669 } 2670 2671 if (HiExists) { 2672 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2673 AddToWorklist(Hi.getNode()); 2674 SDValue HiOpt = combine(Hi.getNode()); 2675 if (HiOpt.getNode() && HiOpt != Hi && 2676 (!LegalOperations || 2677 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2678 return CombineTo(N, HiOpt, HiOpt); 2679 } 2680 2681 return SDValue(); 2682 } 2683 2684 SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2685 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS)) 2686 return Res; 2687 2688 EVT VT = N->getValueType(0); 2689 SDLoc DL(N); 2690 2691 // If the type is twice as wide is legal, transform the mulhu to a wider 2692 // multiply plus a shift. 2693 if (VT.isSimple() && !VT.isVector()) { 2694 MVT Simple = VT.getSimpleVT(); 2695 unsigned SimpleSize = Simple.getSizeInBits(); 2696 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2697 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2698 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2699 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2700 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2701 // Compute the high part as N1. 2702 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2703 DAG.getConstant(SimpleSize, DL, 2704 getShiftAmountTy(Lo.getValueType()))); 2705 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2706 // Compute the low part as N0. 2707 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2708 return CombineTo(N, Lo, Hi); 2709 } 2710 } 2711 2712 return SDValue(); 2713 } 2714 2715 SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2716 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU)) 2717 return Res; 2718 2719 EVT VT = N->getValueType(0); 2720 SDLoc DL(N); 2721 2722 // If the type is twice as wide is legal, transform the mulhu to a wider 2723 // multiply plus a shift. 2724 if (VT.isSimple() && !VT.isVector()) { 2725 MVT Simple = VT.getSimpleVT(); 2726 unsigned SimpleSize = Simple.getSizeInBits(); 2727 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2728 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2729 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2730 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2731 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2732 // Compute the high part as N1. 2733 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2734 DAG.getConstant(SimpleSize, DL, 2735 getShiftAmountTy(Lo.getValueType()))); 2736 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2737 // Compute the low part as N0. 2738 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2739 return CombineTo(N, Lo, Hi); 2740 } 2741 } 2742 2743 return SDValue(); 2744 } 2745 2746 SDValue DAGCombiner::visitSMULO(SDNode *N) { 2747 // (smulo x, 2) -> (saddo x, x) 2748 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2749 if (C2->getAPIntValue() == 2) 2750 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), 2751 N->getOperand(0), N->getOperand(0)); 2752 2753 return SDValue(); 2754 } 2755 2756 SDValue DAGCombiner::visitUMULO(SDNode *N) { 2757 // (umulo x, 2) -> (uaddo x, x) 2758 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2759 if (C2->getAPIntValue() == 2) 2760 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), 2761 N->getOperand(0), N->getOperand(0)); 2762 2763 return SDValue(); 2764 } 2765 2766 SDValue DAGCombiner::visitIMINMAX(SDNode *N) { 2767 SDValue N0 = N->getOperand(0); 2768 SDValue N1 = N->getOperand(1); 2769 EVT VT = N0.getValueType(); 2770 2771 // fold vector ops 2772 if (VT.isVector()) 2773 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2774 return FoldedVOp; 2775 2776 // fold (add c1, c2) -> c1+c2 2777 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 2778 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 2779 if (N0C && N1C) 2780 return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C); 2781 2782 // canonicalize constant to RHS 2783 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2784 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 2785 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0); 2786 2787 return SDValue(); 2788 } 2789 2790 /// If this is a binary operator with two operands of the same opcode, try to 2791 /// simplify it. 2792 SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 2793 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2794 EVT VT = N0.getValueType(); 2795 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 2796 2797 // Bail early if none of these transforms apply. 2798 if (N0.getNumOperands() == 0) return SDValue(); 2799 2800 // For each of OP in AND/OR/XOR: 2801 // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 2802 // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 2803 // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 2804 // fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y)) 2805 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 2806 // 2807 // do not sink logical op inside of a vector extend, since it may combine 2808 // into a vsetcc. 2809 EVT Op0VT = N0.getOperand(0).getValueType(); 2810 if ((N0.getOpcode() == ISD::ZERO_EXTEND || 2811 N0.getOpcode() == ISD::SIGN_EXTEND || 2812 N0.getOpcode() == ISD::BSWAP || 2813 // Avoid infinite looping with PromoteIntBinOp. 2814 (N0.getOpcode() == ISD::ANY_EXTEND && 2815 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 2816 (N0.getOpcode() == ISD::TRUNCATE && 2817 (!TLI.isZExtFree(VT, Op0VT) || 2818 !TLI.isTruncateFree(Op0VT, VT)) && 2819 TLI.isTypeLegal(Op0VT))) && 2820 !VT.isVector() && 2821 Op0VT == N1.getOperand(0).getValueType() && 2822 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 2823 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2824 N0.getOperand(0).getValueType(), 2825 N0.getOperand(0), N1.getOperand(0)); 2826 AddToWorklist(ORNode.getNode()); 2827 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode); 2828 } 2829 2830 // For each of OP in SHL/SRL/SRA/AND... 2831 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 2832 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 2833 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 2834 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 2835 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 2836 N0.getOperand(1) == N1.getOperand(1)) { 2837 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2838 N0.getOperand(0).getValueType(), 2839 N0.getOperand(0), N1.getOperand(0)); 2840 AddToWorklist(ORNode.getNode()); 2841 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 2842 ORNode, N0.getOperand(1)); 2843 } 2844 2845 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 2846 // Only perform this optimization up until type legalization, before 2847 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 2848 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 2849 // we don't want to undo this promotion. 2850 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 2851 // on scalars. 2852 if ((N0.getOpcode() == ISD::BITCAST || 2853 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && 2854 Level <= AfterLegalizeTypes) { 2855 SDValue In0 = N0.getOperand(0); 2856 SDValue In1 = N1.getOperand(0); 2857 EVT In0Ty = In0.getValueType(); 2858 EVT In1Ty = In1.getValueType(); 2859 SDLoc DL(N); 2860 // If both incoming values are integers, and the original types are the 2861 // same. 2862 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 2863 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); 2864 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); 2865 AddToWorklist(Op.getNode()); 2866 return BC; 2867 } 2868 } 2869 2870 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 2871 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 2872 // If both shuffles use the same mask, and both shuffle within a single 2873 // vector, then it is worthwhile to move the swizzle after the operation. 2874 // The type-legalizer generates this pattern when loading illegal 2875 // vector types from memory. In many cases this allows additional shuffle 2876 // optimizations. 2877 // There are other cases where moving the shuffle after the xor/and/or 2878 // is profitable even if shuffles don't perform a swizzle. 2879 // If both shuffles use the same mask, and both shuffles have the same first 2880 // or second operand, then it might still be profitable to move the shuffle 2881 // after the xor/and/or operation. 2882 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { 2883 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 2884 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 2885 2886 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && 2887 "Inputs to shuffles are not the same type"); 2888 2889 // Check that both shuffles use the same mask. The masks are known to be of 2890 // the same length because the result vector type is the same. 2891 // Check also that shuffles have only one use to avoid introducing extra 2892 // instructions. 2893 if (SVN0->hasOneUse() && SVN1->hasOneUse() && 2894 SVN0->getMask().equals(SVN1->getMask())) { 2895 SDValue ShOp = N0->getOperand(1); 2896 2897 // Don't try to fold this node if it requires introducing a 2898 // build vector of all zeros that might be illegal at this stage. 2899 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 2900 if (!LegalTypes) 2901 ShOp = DAG.getConstant(0, SDLoc(N), VT); 2902 else 2903 ShOp = SDValue(); 2904 } 2905 2906 // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C) 2907 // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C) 2908 // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0) 2909 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) { 2910 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2911 N0->getOperand(0), N1->getOperand(0)); 2912 AddToWorklist(NewNode.getNode()); 2913 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp, 2914 SVN0->getMask()); 2915 } 2916 2917 // Don't try to fold this node if it requires introducing a 2918 // build vector of all zeros that might be illegal at this stage. 2919 ShOp = N0->getOperand(0); 2920 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 2921 if (!LegalTypes) 2922 ShOp = DAG.getConstant(0, SDLoc(N), VT); 2923 else 2924 ShOp = SDValue(); 2925 } 2926 2927 // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B)) 2928 // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B)) 2929 // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B)) 2930 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) { 2931 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2932 N0->getOperand(1), N1->getOperand(1)); 2933 AddToWorklist(NewNode.getNode()); 2934 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode, 2935 SVN0->getMask()); 2936 } 2937 } 2938 } 2939 2940 return SDValue(); 2941 } 2942 2943 /// This contains all DAGCombine rules which reduce two values combined by 2944 /// an And operation to a single value. This makes them reusable in the context 2945 /// of visitSELECT(). Rules involving constants are not included as 2946 /// visitSELECT() already handles those cases. 2947 SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, 2948 SDNode *LocReference) { 2949 EVT VT = N1.getValueType(); 2950 2951 // fold (and x, undef) -> 0 2952 if (N0.isUndef() || N1.isUndef()) 2953 return DAG.getConstant(0, SDLoc(LocReference), VT); 2954 // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) 2955 SDValue LL, LR, RL, RR, CC0, CC1; 2956 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 2957 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 2958 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 2959 2960 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 2961 LL.getValueType().isInteger()) { 2962 // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0) 2963 if (isNullConstant(LR) && Op1 == ISD::SETEQ) { 2964 EVT CCVT = getSetCCResultType(LR.getValueType()); 2965 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2966 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2967 LR.getValueType(), LL, RL); 2968 AddToWorklist(ORNode.getNode()); 2969 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 2970 } 2971 } 2972 if (isAllOnesConstant(LR)) { 2973 // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1) 2974 if (Op1 == ISD::SETEQ) { 2975 EVT CCVT = getSetCCResultType(LR.getValueType()); 2976 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2977 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0), 2978 LR.getValueType(), LL, RL); 2979 AddToWorklist(ANDNode.getNode()); 2980 return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1); 2981 } 2982 } 2983 // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1) 2984 if (Op1 == ISD::SETGT) { 2985 EVT CCVT = getSetCCResultType(LR.getValueType()); 2986 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 2987 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2988 LR.getValueType(), LL, RL); 2989 AddToWorklist(ORNode.getNode()); 2990 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 2991 } 2992 } 2993 } 2994 } 2995 // Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2) 2996 if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) && 2997 Op0 == Op1 && LL.getValueType().isInteger() && 2998 Op0 == ISD::SETNE && ((isNullConstant(LR) && isAllOnesConstant(RR)) || 2999 (isAllOnesConstant(LR) && isNullConstant(RR)))) { 3000 EVT CCVT = getSetCCResultType(LL.getValueType()); 3001 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 3002 SDLoc DL(N0); 3003 SDValue ADDNode = DAG.getNode(ISD::ADD, DL, LL.getValueType(), 3004 LL, DAG.getConstant(1, DL, 3005 LL.getValueType())); 3006 AddToWorklist(ADDNode.getNode()); 3007 return DAG.getSetCC(SDLoc(LocReference), VT, ADDNode, 3008 DAG.getConstant(2, DL, LL.getValueType()), 3009 ISD::SETUGE); 3010 } 3011 } 3012 // canonicalize equivalent to ll == rl 3013 if (LL == RR && LR == RL) { 3014 Op1 = ISD::getSetCCSwappedOperands(Op1); 3015 std::swap(RL, RR); 3016 } 3017 if (LL == RL && LR == RR) { 3018 bool isInteger = LL.getValueType().isInteger(); 3019 ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); 3020 if (Result != ISD::SETCC_INVALID && 3021 (!LegalOperations || 3022 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3023 TLI.isOperationLegal(ISD::SETCC, LL.getValueType())))) { 3024 EVT CCVT = getSetCCResultType(LL.getValueType()); 3025 if (N0.getValueType() == CCVT || 3026 (!LegalOperations && N0.getValueType() == MVT::i1)) 3027 return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(), 3028 LL, LR, Result); 3029 } 3030 } 3031 } 3032 3033 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 3034 VT.getSizeInBits() <= 64) { 3035 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3036 APInt ADDC = ADDI->getAPIntValue(); 3037 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3038 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 3039 // immediate for an add, but it is legal if its top c2 bits are set, 3040 // transform the ADD so the immediate doesn't need to be materialized 3041 // in a register. 3042 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 3043 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3044 SRLI->getZExtValue()); 3045 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 3046 ADDC |= Mask; 3047 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3048 SDLoc DL(N0); 3049 SDValue NewAdd = 3050 DAG.getNode(ISD::ADD, DL, VT, 3051 N0.getOperand(0), DAG.getConstant(ADDC, DL, VT)); 3052 CombineTo(N0.getNode(), NewAdd); 3053 // Return N so it doesn't get rechecked! 3054 return SDValue(LocReference, 0); 3055 } 3056 } 3057 } 3058 } 3059 } 3060 } 3061 3062 // Reduce bit extract of low half of an integer to the narrower type. 3063 // (and (srl i64:x, K), KMask) -> 3064 // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask) 3065 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 3066 if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) { 3067 if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3068 unsigned Size = VT.getSizeInBits(); 3069 const APInt &AndMask = CAnd->getAPIntValue(); 3070 unsigned ShiftBits = CShift->getZExtValue(); 3071 3072 // Bail out, this node will probably disappear anyway. 3073 if (ShiftBits == 0) 3074 return SDValue(); 3075 3076 unsigned MaskBits = AndMask.countTrailingOnes(); 3077 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2); 3078 3079 if (APIntOps::isMask(AndMask) && 3080 // Required bits must not span the two halves of the integer and 3081 // must fit in the half size type. 3082 (ShiftBits + MaskBits <= Size / 2) && 3083 TLI.isNarrowingProfitable(VT, HalfVT) && 3084 TLI.isTypeDesirableForOp(ISD::AND, HalfVT) && 3085 TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) && 3086 TLI.isTruncateFree(VT, HalfVT) && 3087 TLI.isZExtFree(HalfVT, VT)) { 3088 // The isNarrowingProfitable is to avoid regressions on PPC and 3089 // AArch64 which match a few 64-bit bit insert / bit extract patterns 3090 // on downstream users of this. Those patterns could probably be 3091 // extended to handle extensions mixed in. 3092 3093 SDValue SL(N0); 3094 assert(MaskBits <= Size); 3095 3096 // Extracting the highest bit of the low half. 3097 EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout()); 3098 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT, 3099 N0.getOperand(0)); 3100 3101 SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT); 3102 SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT); 3103 SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK); 3104 SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask); 3105 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And); 3106 } 3107 } 3108 } 3109 } 3110 3111 return SDValue(); 3112 } 3113 3114 bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 3115 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 3116 bool &NarrowLoad) { 3117 uint32_t ActiveBits = AndC->getAPIntValue().getActiveBits(); 3118 3119 if (ActiveBits == 0 || !APIntOps::isMask(ActiveBits, AndC->getAPIntValue())) 3120 return false; 3121 3122 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 3123 LoadedVT = LoadN->getMemoryVT(); 3124 3125 if (ExtVT == LoadedVT && 3126 (!LegalOperations || 3127 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) { 3128 // ZEXTLOAD will match without needing to change the size of the value being 3129 // loaded. 3130 NarrowLoad = false; 3131 return true; 3132 } 3133 3134 // Do not change the width of a volatile load. 3135 if (LoadN->isVolatile()) 3136 return false; 3137 3138 // Do not generate loads of non-round integer types since these can 3139 // be expensive (and would be wrong if the type is not byte sized). 3140 if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound()) 3141 return false; 3142 3143 if (LegalOperations && 3144 !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT)) 3145 return false; 3146 3147 if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT)) 3148 return false; 3149 3150 NarrowLoad = true; 3151 return true; 3152 } 3153 3154 SDValue DAGCombiner::visitAND(SDNode *N) { 3155 SDValue N0 = N->getOperand(0); 3156 SDValue N1 = N->getOperand(1); 3157 EVT VT = N1.getValueType(); 3158 3159 // x & x --> x 3160 if (N0 == N1) 3161 return N0; 3162 3163 // fold vector ops 3164 if (VT.isVector()) { 3165 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3166 return FoldedVOp; 3167 3168 // fold (and x, 0) -> 0, vector edition 3169 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3170 // do not return N0, because undef node may exist in N0 3171 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()), 3172 SDLoc(N), N0.getValueType()); 3173 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3174 // do not return N1, because undef node may exist in N1 3175 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()), 3176 SDLoc(N), N1.getValueType()); 3177 3178 // fold (and x, -1) -> x, vector edition 3179 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3180 return N1; 3181 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3182 return N0; 3183 } 3184 3185 // fold (and c1, c2) -> c1&c2 3186 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3187 ConstantSDNode *N1C = isConstOrConstSplat(N1); 3188 if (N0C && N1C && !N1C->isOpaque()) 3189 return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C); 3190 // canonicalize constant to RHS 3191 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3192 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3193 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0); 3194 // fold (and x, -1) -> x 3195 if (isAllOnesConstant(N1)) 3196 return N0; 3197 // if (and x, c) is known to be zero, return 0 3198 unsigned BitWidth = VT.getScalarSizeInBits(); 3199 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 3200 APInt::getAllOnesValue(BitWidth))) 3201 return DAG.getConstant(0, SDLoc(N), VT); 3202 // reassociate and 3203 if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1)) 3204 return RAND; 3205 // fold (and (or x, C), D) -> D if (C & D) == D 3206 if (N1C && N0.getOpcode() == ISD::OR) 3207 if (ConstantSDNode *ORI = isConstOrConstSplat(N0.getOperand(1))) 3208 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 3209 return N1; 3210 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 3211 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 3212 SDValue N0Op0 = N0.getOperand(0); 3213 APInt Mask = ~N1C->getAPIntValue(); 3214 Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits()); 3215 if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 3216 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), 3217 N0.getValueType(), N0Op0); 3218 3219 // Replace uses of the AND with uses of the Zero extend node. 3220 CombineTo(N, Zext); 3221 3222 // We actually want to replace all uses of the any_extend with the 3223 // zero_extend, to avoid duplicating things. This will later cause this 3224 // AND to be folded. 3225 CombineTo(N0.getNode(), Zext); 3226 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3227 } 3228 } 3229 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 3230 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 3231 // already be zero by virtue of the width of the base type of the load. 3232 // 3233 // the 'X' node here can either be nothing or an extract_vector_elt to catch 3234 // more cases. 3235 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 3236 N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() && 3237 N0.getOperand(0).getOpcode() == ISD::LOAD && 3238 N0.getOperand(0).getResNo() == 0) || 3239 (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) { 3240 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 3241 N0 : N0.getOperand(0) ); 3242 3243 // Get the constant (if applicable) the zero'th operand is being ANDed with. 3244 // This can be a pure constant or a vector splat, in which case we treat the 3245 // vector as a scalar and use the splat value. 3246 APInt Constant = APInt::getNullValue(1); 3247 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3248 Constant = C->getAPIntValue(); 3249 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 3250 APInt SplatValue, SplatUndef; 3251 unsigned SplatBitSize; 3252 bool HasAnyUndefs; 3253 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 3254 SplatBitSize, HasAnyUndefs); 3255 if (IsSplat) { 3256 // Undef bits can contribute to a possible optimisation if set, so 3257 // set them. 3258 SplatValue |= SplatUndef; 3259 3260 // The splat value may be something like "0x00FFFFFF", which means 0 for 3261 // the first vector value and FF for the rest, repeating. We need a mask 3262 // that will apply equally to all members of the vector, so AND all the 3263 // lanes of the constant together. 3264 EVT VT = Vector->getValueType(0); 3265 unsigned BitWidth = VT.getScalarSizeInBits(); 3266 3267 // If the splat value has been compressed to a bitlength lower 3268 // than the size of the vector lane, we need to re-expand it to 3269 // the lane size. 3270 if (BitWidth > SplatBitSize) 3271 for (SplatValue = SplatValue.zextOrTrunc(BitWidth); 3272 SplatBitSize < BitWidth; 3273 SplatBitSize = SplatBitSize * 2) 3274 SplatValue |= SplatValue.shl(SplatBitSize); 3275 3276 // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a 3277 // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value. 3278 if (SplatBitSize % BitWidth == 0) { 3279 Constant = APInt::getAllOnesValue(BitWidth); 3280 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) 3281 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 3282 } 3283 } 3284 } 3285 3286 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 3287 // actually legal and isn't going to get expanded, else this is a false 3288 // optimisation. 3289 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 3290 Load->getValueType(0), 3291 Load->getMemoryVT()); 3292 3293 // Resize the constant to the same size as the original memory access before 3294 // extension. If it is still the AllOnesValue then this AND is completely 3295 // unneeded. 3296 Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits()); 3297 3298 bool B; 3299 switch (Load->getExtensionType()) { 3300 default: B = false; break; 3301 case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 3302 case ISD::ZEXTLOAD: 3303 case ISD::NON_EXTLOAD: B = true; break; 3304 } 3305 3306 if (B && Constant.isAllOnesValue()) { 3307 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 3308 // preserve semantics once we get rid of the AND. 3309 SDValue NewLoad(Load, 0); 3310 if (Load->getExtensionType() == ISD::EXTLOAD) { 3311 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 3312 Load->getValueType(0), SDLoc(Load), 3313 Load->getChain(), Load->getBasePtr(), 3314 Load->getOffset(), Load->getMemoryVT(), 3315 Load->getMemOperand()); 3316 // Replace uses of the EXTLOAD with the new ZEXTLOAD. 3317 if (Load->getNumValues() == 3) { 3318 // PRE/POST_INC loads have 3 values. 3319 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 3320 NewLoad.getValue(2) }; 3321 CombineTo(Load, To, 3, true); 3322 } else { 3323 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 3324 } 3325 } 3326 3327 // Fold the AND away, taking care not to fold to the old load node if we 3328 // replaced it. 3329 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 3330 3331 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3332 } 3333 } 3334 3335 // fold (and (load x), 255) -> (zextload x, i8) 3336 // fold (and (extload x, i16), 255) -> (zextload x, i8) 3337 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 3338 if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD || 3339 (N0.getOpcode() == ISD::ANY_EXTEND && 3340 N0.getOperand(0).getOpcode() == ISD::LOAD))) { 3341 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 3342 LoadSDNode *LN0 = HasAnyExt 3343 ? cast<LoadSDNode>(N0.getOperand(0)) 3344 : cast<LoadSDNode>(N0); 3345 if (LN0->getExtensionType() != ISD::SEXTLOAD && 3346 LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) { 3347 auto NarrowLoad = false; 3348 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 3349 EVT ExtVT, LoadedVT; 3350 if (isAndLoadExtLoad(N1C, LN0, LoadResultTy, ExtVT, LoadedVT, 3351 NarrowLoad)) { 3352 if (!NarrowLoad) { 3353 SDValue NewLoad = 3354 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, 3355 LN0->getChain(), LN0->getBasePtr(), ExtVT, 3356 LN0->getMemOperand()); 3357 AddToWorklist(N); 3358 CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 3359 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3360 } else { 3361 EVT PtrType = LN0->getOperand(1).getValueType(); 3362 3363 unsigned Alignment = LN0->getAlignment(); 3364 SDValue NewPtr = LN0->getBasePtr(); 3365 3366 // For big endian targets, we need to add an offset to the pointer 3367 // to load the correct bytes. For little endian systems, we merely 3368 // need to read fewer bytes from the same pointer. 3369 if (DAG.getDataLayout().isBigEndian()) { 3370 unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 3371 unsigned EVTStoreBytes = ExtVT.getStoreSize(); 3372 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 3373 SDLoc DL(LN0); 3374 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, 3375 NewPtr, DAG.getConstant(PtrOff, DL, PtrType)); 3376 Alignment = MinAlign(Alignment, PtrOff); 3377 } 3378 3379 AddToWorklist(NewPtr.getNode()); 3380 3381 SDValue Load = DAG.getExtLoad( 3382 ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, LN0->getChain(), NewPtr, 3383 LN0->getPointerInfo(), ExtVT, Alignment, 3384 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 3385 AddToWorklist(N); 3386 CombineTo(LN0, Load, Load.getValue(1)); 3387 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3388 } 3389 } 3390 } 3391 } 3392 3393 if (SDValue Combined = visitANDLike(N0, N1, N)) 3394 return Combined; 3395 3396 // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 3397 if (N0.getOpcode() == N1.getOpcode()) 3398 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 3399 return Tmp; 3400 3401 // Masking the negated extension of a boolean is just the zero-extended 3402 // boolean: 3403 // and (sub 0, zext(bool X)), 1 --> zext(bool X) 3404 // and (sub 0, sext(bool X)), 1 --> zext(bool X) 3405 // 3406 // Note: the SimplifyDemandedBits fold below can make an information-losing 3407 // transform, and then we have no way to find this better fold. 3408 if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) { 3409 ConstantSDNode *SubLHS = isConstOrConstSplat(N0.getOperand(0)); 3410 SDValue SubRHS = N0.getOperand(1); 3411 if (SubLHS && SubLHS->isNullValue()) { 3412 if (SubRHS.getOpcode() == ISD::ZERO_EXTEND && 3413 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3414 return SubRHS; 3415 if (SubRHS.getOpcode() == ISD::SIGN_EXTEND && 3416 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3417 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0)); 3418 } 3419 } 3420 3421 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 3422 // fold (and (sra)) -> (and (srl)) when possible. 3423 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 3424 return SDValue(N, 0); 3425 3426 // fold (zext_inreg (extload x)) -> (zextload x) 3427 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 3428 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3429 EVT MemVT = LN0->getMemoryVT(); 3430 // If we zero all the possible extended bits, then we can turn this into 3431 // a zextload if we are running before legalize or the operation is legal. 3432 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3433 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3434 BitWidth - MemVT.getScalarSizeInBits())) && 3435 ((!LegalOperations && !LN0->isVolatile()) || 3436 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3437 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3438 LN0->getChain(), LN0->getBasePtr(), 3439 MemVT, LN0->getMemOperand()); 3440 AddToWorklist(N); 3441 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3442 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3443 } 3444 } 3445 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 3446 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 3447 N0.hasOneUse()) { 3448 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3449 EVT MemVT = LN0->getMemoryVT(); 3450 // If we zero all the possible extended bits, then we can turn this into 3451 // a zextload if we are running before legalize or the operation is legal. 3452 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3453 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3454 BitWidth - MemVT.getScalarSizeInBits())) && 3455 ((!LegalOperations && !LN0->isVolatile()) || 3456 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3457 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3458 LN0->getChain(), LN0->getBasePtr(), 3459 MemVT, LN0->getMemOperand()); 3460 AddToWorklist(N); 3461 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3462 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3463 } 3464 } 3465 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const) 3466 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) { 3467 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 3468 N0.getOperand(1), false)) 3469 return BSwap; 3470 } 3471 3472 return SDValue(); 3473 } 3474 3475 /// Match (a >> 8) | (a << 8) as (bswap a) >> 16. 3476 SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 3477 bool DemandHighBits) { 3478 if (!LegalOperations) 3479 return SDValue(); 3480 3481 EVT VT = N->getValueType(0); 3482 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 3483 return SDValue(); 3484 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3485 return SDValue(); 3486 3487 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 3488 bool LookPassAnd0 = false; 3489 bool LookPassAnd1 = false; 3490 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 3491 std::swap(N0, N1); 3492 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 3493 std::swap(N0, N1); 3494 if (N0.getOpcode() == ISD::AND) { 3495 if (!N0.getNode()->hasOneUse()) 3496 return SDValue(); 3497 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3498 if (!N01C || N01C->getZExtValue() != 0xFF00) 3499 return SDValue(); 3500 N0 = N0.getOperand(0); 3501 LookPassAnd0 = true; 3502 } 3503 3504 if (N1.getOpcode() == ISD::AND) { 3505 if (!N1.getNode()->hasOneUse()) 3506 return SDValue(); 3507 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3508 if (!N11C || N11C->getZExtValue() != 0xFF) 3509 return SDValue(); 3510 N1 = N1.getOperand(0); 3511 LookPassAnd1 = true; 3512 } 3513 3514 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 3515 std::swap(N0, N1); 3516 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 3517 return SDValue(); 3518 if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse()) 3519 return SDValue(); 3520 3521 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3522 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3523 if (!N01C || !N11C) 3524 return SDValue(); 3525 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 3526 return SDValue(); 3527 3528 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 3529 SDValue N00 = N0->getOperand(0); 3530 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 3531 if (!N00.getNode()->hasOneUse()) 3532 return SDValue(); 3533 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 3534 if (!N001C || N001C->getZExtValue() != 0xFF) 3535 return SDValue(); 3536 N00 = N00.getOperand(0); 3537 LookPassAnd0 = true; 3538 } 3539 3540 SDValue N10 = N1->getOperand(0); 3541 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 3542 if (!N10.getNode()->hasOneUse()) 3543 return SDValue(); 3544 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 3545 if (!N101C || N101C->getZExtValue() != 0xFF00) 3546 return SDValue(); 3547 N10 = N10.getOperand(0); 3548 LookPassAnd1 = true; 3549 } 3550 3551 if (N00 != N10) 3552 return SDValue(); 3553 3554 // Make sure everything beyond the low halfword gets set to zero since the SRL 3555 // 16 will clear the top bits. 3556 unsigned OpSizeInBits = VT.getSizeInBits(); 3557 if (DemandHighBits && OpSizeInBits > 16) { 3558 // If the left-shift isn't masked out then the only way this is a bswap is 3559 // if all bits beyond the low 8 are 0. In that case the entire pattern 3560 // reduces to a left shift anyway: leave it for other parts of the combiner. 3561 if (!LookPassAnd0) 3562 return SDValue(); 3563 3564 // However, if the right shift isn't masked out then it might be because 3565 // it's not needed. See if we can spot that too. 3566 if (!LookPassAnd1 && 3567 !DAG.MaskedValueIsZero( 3568 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16))) 3569 return SDValue(); 3570 } 3571 3572 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00); 3573 if (OpSizeInBits > 16) { 3574 SDLoc DL(N); 3575 Res = DAG.getNode(ISD::SRL, DL, VT, Res, 3576 DAG.getConstant(OpSizeInBits - 16, DL, 3577 getShiftAmountTy(VT))); 3578 } 3579 return Res; 3580 } 3581 3582 /// Return true if the specified node is an element that makes up a 32-bit 3583 /// packed halfword byteswap. 3584 /// ((x & 0x000000ff) << 8) | 3585 /// ((x & 0x0000ff00) >> 8) | 3586 /// ((x & 0x00ff0000) << 8) | 3587 /// ((x & 0xff000000) >> 8) 3588 static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) { 3589 if (!N.getNode()->hasOneUse()) 3590 return false; 3591 3592 unsigned Opc = N.getOpcode(); 3593 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 3594 return false; 3595 3596 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3597 if (!N1C) 3598 return false; 3599 3600 unsigned Num; 3601 switch (N1C->getZExtValue()) { 3602 default: 3603 return false; 3604 case 0xFF: Num = 0; break; 3605 case 0xFF00: Num = 1; break; 3606 case 0xFF0000: Num = 2; break; 3607 case 0xFF000000: Num = 3; break; 3608 } 3609 3610 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 3611 SDValue N0 = N.getOperand(0); 3612 if (Opc == ISD::AND) { 3613 if (Num == 0 || Num == 2) { 3614 // (x >> 8) & 0xff 3615 // (x >> 8) & 0xff0000 3616 if (N0.getOpcode() != ISD::SRL) 3617 return false; 3618 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3619 if (!C || C->getZExtValue() != 8) 3620 return false; 3621 } else { 3622 // (x << 8) & 0xff00 3623 // (x << 8) & 0xff000000 3624 if (N0.getOpcode() != ISD::SHL) 3625 return false; 3626 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3627 if (!C || C->getZExtValue() != 8) 3628 return false; 3629 } 3630 } else if (Opc == ISD::SHL) { 3631 // (x & 0xff) << 8 3632 // (x & 0xff0000) << 8 3633 if (Num != 0 && Num != 2) 3634 return false; 3635 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3636 if (!C || C->getZExtValue() != 8) 3637 return false; 3638 } else { // Opc == ISD::SRL 3639 // (x & 0xff00) >> 8 3640 // (x & 0xff000000) >> 8 3641 if (Num != 1 && Num != 3) 3642 return false; 3643 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3644 if (!C || C->getZExtValue() != 8) 3645 return false; 3646 } 3647 3648 if (Parts[Num]) 3649 return false; 3650 3651 Parts[Num] = N0.getOperand(0).getNode(); 3652 return true; 3653 } 3654 3655 /// Match a 32-bit packed halfword bswap. That is 3656 /// ((x & 0x000000ff) << 8) | 3657 /// ((x & 0x0000ff00) >> 8) | 3658 /// ((x & 0x00ff0000) << 8) | 3659 /// ((x & 0xff000000) >> 8) 3660 /// => (rotl (bswap x), 16) 3661 SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 3662 if (!LegalOperations) 3663 return SDValue(); 3664 3665 EVT VT = N->getValueType(0); 3666 if (VT != MVT::i32) 3667 return SDValue(); 3668 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3669 return SDValue(); 3670 3671 // Look for either 3672 // (or (or (and), (and)), (or (and), (and))) 3673 // (or (or (or (and), (and)), (and)), (and)) 3674 if (N0.getOpcode() != ISD::OR) 3675 return SDValue(); 3676 SDValue N00 = N0.getOperand(0); 3677 SDValue N01 = N0.getOperand(1); 3678 SDNode *Parts[4] = {}; 3679 3680 if (N1.getOpcode() == ISD::OR && 3681 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) { 3682 // (or (or (and), (and)), (or (and), (and))) 3683 SDValue N000 = N00.getOperand(0); 3684 if (!isBSwapHWordElement(N000, Parts)) 3685 return SDValue(); 3686 3687 SDValue N001 = N00.getOperand(1); 3688 if (!isBSwapHWordElement(N001, Parts)) 3689 return SDValue(); 3690 SDValue N010 = N01.getOperand(0); 3691 if (!isBSwapHWordElement(N010, Parts)) 3692 return SDValue(); 3693 SDValue N011 = N01.getOperand(1); 3694 if (!isBSwapHWordElement(N011, Parts)) 3695 return SDValue(); 3696 } else { 3697 // (or (or (or (and), (and)), (and)), (and)) 3698 if (!isBSwapHWordElement(N1, Parts)) 3699 return SDValue(); 3700 if (!isBSwapHWordElement(N01, Parts)) 3701 return SDValue(); 3702 if (N00.getOpcode() != ISD::OR) 3703 return SDValue(); 3704 SDValue N000 = N00.getOperand(0); 3705 if (!isBSwapHWordElement(N000, Parts)) 3706 return SDValue(); 3707 SDValue N001 = N00.getOperand(1); 3708 if (!isBSwapHWordElement(N001, Parts)) 3709 return SDValue(); 3710 } 3711 3712 // Make sure the parts are all coming from the same node. 3713 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 3714 return SDValue(); 3715 3716 SDLoc DL(N); 3717 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, 3718 SDValue(Parts[0], 0)); 3719 3720 // Result of the bswap should be rotated by 16. If it's not legal, then 3721 // do (x << 16) | (x >> 16). 3722 SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT)); 3723 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 3724 return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt); 3725 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 3726 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt); 3727 return DAG.getNode(ISD::OR, DL, VT, 3728 DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt), 3729 DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt)); 3730 } 3731 3732 /// This contains all DAGCombine rules which reduce two values combined by 3733 /// an Or operation to a single value \see visitANDLike(). 3734 SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *LocReference) { 3735 EVT VT = N1.getValueType(); 3736 // fold (or x, undef) -> -1 3737 if (!LegalOperations && 3738 (N0.isUndef() || N1.isUndef())) { 3739 EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; 3740 return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), 3741 SDLoc(LocReference), VT); 3742 } 3743 // fold (or (setcc x), (setcc y)) -> (setcc (or x, y)) 3744 SDValue LL, LR, RL, RR, CC0, CC1; 3745 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 3746 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 3747 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 3748 3749 if (LR == RR && Op0 == Op1 && LL.getValueType().isInteger()) { 3750 // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0) 3751 // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0) 3752 if (isNullConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { 3753 EVT CCVT = getSetCCResultType(LR.getValueType()); 3754 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 3755 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR), 3756 LR.getValueType(), LL, RL); 3757 AddToWorklist(ORNode.getNode()); 3758 return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1); 3759 } 3760 } 3761 // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1) 3762 // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1) 3763 if (isAllOnesConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { 3764 EVT CCVT = getSetCCResultType(LR.getValueType()); 3765 if (VT == CCVT || (!LegalOperations && VT == MVT::i1)) { 3766 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR), 3767 LR.getValueType(), LL, RL); 3768 AddToWorklist(ANDNode.getNode()); 3769 return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1); 3770 } 3771 } 3772 } 3773 // canonicalize equivalent to ll == rl 3774 if (LL == RR && LR == RL) { 3775 Op1 = ISD::getSetCCSwappedOperands(Op1); 3776 std::swap(RL, RR); 3777 } 3778 if (LL == RL && LR == RR) { 3779 bool isInteger = LL.getValueType().isInteger(); 3780 ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); 3781 if (Result != ISD::SETCC_INVALID && 3782 (!LegalOperations || 3783 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3784 TLI.isOperationLegal(ISD::SETCC, LL.getValueType())))) { 3785 EVT CCVT = getSetCCResultType(LL.getValueType()); 3786 if (N0.getValueType() == CCVT || 3787 (!LegalOperations && N0.getValueType() == MVT::i1)) 3788 return DAG.getSetCC(SDLoc(LocReference), N0.getValueType(), 3789 LL, LR, Result); 3790 } 3791 } 3792 } 3793 3794 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 3795 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND && 3796 // Don't increase # computations. 3797 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3798 // We can only do this xform if we know that bits from X that are set in C2 3799 // but not in C1 are already zero. Likewise for Y. 3800 if (const ConstantSDNode *N0O1C = 3801 getAsNonOpaqueConstant(N0.getOperand(1))) { 3802 if (const ConstantSDNode *N1O1C = 3803 getAsNonOpaqueConstant(N1.getOperand(1))) { 3804 // We can only do this xform if we know that bits from X that are set in 3805 // C2 but not in C1 are already zero. Likewise for Y. 3806 const APInt &LHSMask = N0O1C->getAPIntValue(); 3807 const APInt &RHSMask = N1O1C->getAPIntValue(); 3808 3809 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 3810 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 3811 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 3812 N0.getOperand(0), N1.getOperand(0)); 3813 SDLoc DL(LocReference); 3814 return DAG.getNode(ISD::AND, DL, VT, X, 3815 DAG.getConstant(LHSMask | RHSMask, DL, VT)); 3816 } 3817 } 3818 } 3819 } 3820 3821 // (or (and X, M), (and X, N)) -> (and X, (or M, N)) 3822 if (N0.getOpcode() == ISD::AND && 3823 N1.getOpcode() == ISD::AND && 3824 N0.getOperand(0) == N1.getOperand(0) && 3825 // Don't increase # computations. 3826 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3827 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 3828 N0.getOperand(1), N1.getOperand(1)); 3829 return DAG.getNode(ISD::AND, SDLoc(LocReference), VT, N0.getOperand(0), X); 3830 } 3831 3832 return SDValue(); 3833 } 3834 3835 SDValue DAGCombiner::visitOR(SDNode *N) { 3836 SDValue N0 = N->getOperand(0); 3837 SDValue N1 = N->getOperand(1); 3838 EVT VT = N1.getValueType(); 3839 3840 // x | x --> x 3841 if (N0 == N1) 3842 return N0; 3843 3844 // fold vector ops 3845 if (VT.isVector()) { 3846 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3847 return FoldedVOp; 3848 3849 // fold (or x, 0) -> x, vector edition 3850 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3851 return N1; 3852 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3853 return N0; 3854 3855 // fold (or x, -1) -> -1, vector edition 3856 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3857 // do not return N0, because undef node may exist in N0 3858 return DAG.getConstant( 3859 APInt::getAllOnesValue(N0.getScalarValueSizeInBits()), SDLoc(N), 3860 N0.getValueType()); 3861 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3862 // do not return N1, because undef node may exist in N1 3863 return DAG.getConstant( 3864 APInt::getAllOnesValue(N1.getScalarValueSizeInBits()), SDLoc(N), 3865 N1.getValueType()); 3866 3867 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask) 3868 // Do this only if the resulting shuffle is legal. 3869 if (isa<ShuffleVectorSDNode>(N0) && 3870 isa<ShuffleVectorSDNode>(N1) && 3871 // Avoid folding a node with illegal type. 3872 TLI.isTypeLegal(VT)) { 3873 bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode()); 3874 bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()); 3875 bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 3876 bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode()); 3877 // Ensure both shuffles have a zero input. 3878 if ((ZeroN00 || ZeroN01) && (ZeroN10 || ZeroN11)) { 3879 assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!"); 3880 assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!"); 3881 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0); 3882 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1); 3883 bool CanFold = true; 3884 int NumElts = VT.getVectorNumElements(); 3885 SmallVector<int, 4> Mask(NumElts); 3886 3887 for (int i = 0; i != NumElts; ++i) { 3888 int M0 = SV0->getMaskElt(i); 3889 int M1 = SV1->getMaskElt(i); 3890 3891 // Determine if either index is pointing to a zero vector. 3892 bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts)); 3893 bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts)); 3894 3895 // If one element is zero and the otherside is undef, keep undef. 3896 // This also handles the case that both are undef. 3897 if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) { 3898 Mask[i] = -1; 3899 continue; 3900 } 3901 3902 // Make sure only one of the elements is zero. 3903 if (M0Zero == M1Zero) { 3904 CanFold = false; 3905 break; 3906 } 3907 3908 assert((M0 >= 0 || M1 >= 0) && "Undef index!"); 3909 3910 // We have a zero and non-zero element. If the non-zero came from 3911 // SV0 make the index a LHS index. If it came from SV1, make it 3912 // a RHS index. We need to mod by NumElts because we don't care 3913 // which operand it came from in the original shuffles. 3914 Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts; 3915 } 3916 3917 if (CanFold) { 3918 SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0); 3919 SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0); 3920 3921 bool LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 3922 if (!LegalMask) { 3923 std::swap(NewLHS, NewRHS); 3924 ShuffleVectorSDNode::commuteMask(Mask); 3925 LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 3926 } 3927 3928 if (LegalMask) 3929 return DAG.getVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS, Mask); 3930 } 3931 } 3932 } 3933 } 3934 3935 // fold (or c1, c2) -> c1|c2 3936 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3937 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3938 if (N0C && N1C && !N1C->isOpaque()) 3939 return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C); 3940 // canonicalize constant to RHS 3941 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3942 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3943 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0); 3944 // fold (or x, 0) -> x 3945 if (isNullConstant(N1)) 3946 return N0; 3947 // fold (or x, -1) -> -1 3948 if (isAllOnesConstant(N1)) 3949 return N1; 3950 // fold (or x, c) -> c iff (x & ~c) == 0 3951 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 3952 return N1; 3953 3954 if (SDValue Combined = visitORLike(N0, N1, N)) 3955 return Combined; 3956 3957 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 3958 if (SDValue BSwap = MatchBSwapHWord(N, N0, N1)) 3959 return BSwap; 3960 if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1)) 3961 return BSwap; 3962 3963 // reassociate or 3964 if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1)) 3965 return ROR; 3966 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 3967 // iff (c1 & c2) == 0. 3968 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3969 isa<ConstantSDNode>(N0.getOperand(1))) { 3970 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 3971 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) { 3972 if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, 3973 N1C, C1)) 3974 return DAG.getNode( 3975 ISD::AND, SDLoc(N), VT, 3976 DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1), COR); 3977 return SDValue(); 3978 } 3979 } 3980 // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 3981 if (N0.getOpcode() == N1.getOpcode()) 3982 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 3983 return Tmp; 3984 3985 // See if this is some rotate idiom. 3986 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N))) 3987 return SDValue(Rot, 0); 3988 3989 if (SDValue Load = MatchLoadCombine(N)) 3990 return Load; 3991 3992 // Simplify the operands using demanded-bits information. 3993 if (!VT.isVector() && 3994 SimplifyDemandedBits(SDValue(N, 0))) 3995 return SDValue(N, 0); 3996 3997 return SDValue(); 3998 } 3999 4000 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 4001 bool DAGCombiner::MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 4002 if (Op.getOpcode() == ISD::AND) { 4003 if (DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) { 4004 Mask = Op.getOperand(1); 4005 Op = Op.getOperand(0); 4006 } else { 4007 return false; 4008 } 4009 } 4010 4011 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 4012 Shift = Op; 4013 return true; 4014 } 4015 4016 return false; 4017 } 4018 4019 // Return true if we can prove that, whenever Neg and Pos are both in the 4020 // range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that 4021 // for two opposing shifts shift1 and shift2 and a value X with OpBits bits: 4022 // 4023 // (or (shift1 X, Neg), (shift2 X, Pos)) 4024 // 4025 // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate 4026 // in direction shift1 by Neg. The range [0, EltSize) means that we only need 4027 // to consider shift amounts with defined behavior. 4028 static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize) { 4029 // If EltSize is a power of 2 then: 4030 // 4031 // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1) 4032 // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize). 4033 // 4034 // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check 4035 // for the stronger condition: 4036 // 4037 // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A] 4038 // 4039 // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1) 4040 // we can just replace Neg with Neg' for the rest of the function. 4041 // 4042 // In other cases we check for the even stronger condition: 4043 // 4044 // Neg == EltSize - Pos [B] 4045 // 4046 // for all Neg and Pos. Note that the (or ...) then invokes undefined 4047 // behavior if Pos == 0 (and consequently Neg == EltSize). 4048 // 4049 // We could actually use [A] whenever EltSize is a power of 2, but the 4050 // only extra cases that it would match are those uninteresting ones 4051 // where Neg and Pos are never in range at the same time. E.g. for 4052 // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos) 4053 // as well as (sub 32, Pos), but: 4054 // 4055 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos)) 4056 // 4057 // always invokes undefined behavior for 32-bit X. 4058 // 4059 // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise. 4060 unsigned MaskLoBits = 0; 4061 if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) { 4062 if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) { 4063 if (NegC->getAPIntValue() == EltSize - 1) { 4064 Neg = Neg.getOperand(0); 4065 MaskLoBits = Log2_64(EltSize); 4066 } 4067 } 4068 } 4069 4070 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1. 4071 if (Neg.getOpcode() != ISD::SUB) 4072 return false; 4073 ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0)); 4074 if (!NegC) 4075 return false; 4076 SDValue NegOp1 = Neg.getOperand(1); 4077 4078 // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with 4079 // Pos'. The truncation is redundant for the purpose of the equality. 4080 if (MaskLoBits && Pos.getOpcode() == ISD::AND) 4081 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4082 if (PosC->getAPIntValue() == EltSize - 1) 4083 Pos = Pos.getOperand(0); 4084 4085 // The condition we need is now: 4086 // 4087 // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask 4088 // 4089 // If NegOp1 == Pos then we need: 4090 // 4091 // EltSize & Mask == NegC & Mask 4092 // 4093 // (because "x & Mask" is a truncation and distributes through subtraction). 4094 APInt Width; 4095 if (Pos == NegOp1) 4096 Width = NegC->getAPIntValue(); 4097 4098 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC. 4099 // Then the condition we want to prove becomes: 4100 // 4101 // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask 4102 // 4103 // which, again because "x & Mask" is a truncation, becomes: 4104 // 4105 // NegC & Mask == (EltSize - PosC) & Mask 4106 // EltSize & Mask == (NegC + PosC) & Mask 4107 else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) { 4108 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4109 Width = PosC->getAPIntValue() + NegC->getAPIntValue(); 4110 else 4111 return false; 4112 } else 4113 return false; 4114 4115 // Now we just need to check that EltSize & Mask == Width & Mask. 4116 if (MaskLoBits) 4117 // EltSize & Mask is 0 since Mask is EltSize - 1. 4118 return Width.getLoBits(MaskLoBits) == 0; 4119 return Width == EltSize; 4120 } 4121 4122 // A subroutine of MatchRotate used once we have found an OR of two opposite 4123 // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces 4124 // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the 4125 // former being preferred if supported. InnerPos and InnerNeg are Pos and 4126 // Neg with outer conversions stripped away. 4127 SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos, 4128 SDValue Neg, SDValue InnerPos, 4129 SDValue InnerNeg, unsigned PosOpcode, 4130 unsigned NegOpcode, const SDLoc &DL) { 4131 // fold (or (shl x, (*ext y)), 4132 // (srl x, (*ext (sub 32, y)))) -> 4133 // (rotl x, y) or (rotr x, (sub 32, y)) 4134 // 4135 // fold (or (shl x, (*ext (sub 32, y))), 4136 // (srl x, (*ext y))) -> 4137 // (rotr x, y) or (rotl x, (sub 32, y)) 4138 EVT VT = Shifted.getValueType(); 4139 if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits())) { 4140 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT); 4141 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted, 4142 HasPos ? Pos : Neg).getNode(); 4143 } 4144 4145 return nullptr; 4146 } 4147 4148 // MatchRotate - Handle an 'or' of two operands. If this is one of the many 4149 // idioms for rotate, and if the target supports rotation instructions, generate 4150 // a rot[lr]. 4151 SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) { 4152 // Must be a legal type. Expanded 'n promoted things won't work with rotates. 4153 EVT VT = LHS.getValueType(); 4154 if (!TLI.isTypeLegal(VT)) return nullptr; 4155 4156 // The target must have at least one rotate flavor. 4157 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 4158 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 4159 if (!HasROTL && !HasROTR) return nullptr; 4160 4161 // Match "(X shl/srl V1) & V2" where V2 may not be present. 4162 SDValue LHSShift; // The shift. 4163 SDValue LHSMask; // AND value if any. 4164 if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 4165 return nullptr; // Not part of a rotate. 4166 4167 SDValue RHSShift; // The shift. 4168 SDValue RHSMask; // AND value if any. 4169 if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 4170 return nullptr; // Not part of a rotate. 4171 4172 if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 4173 return nullptr; // Not shifting the same value. 4174 4175 if (LHSShift.getOpcode() == RHSShift.getOpcode()) 4176 return nullptr; // Shifts must disagree. 4177 4178 // Canonicalize shl to left side in a shl/srl pair. 4179 if (RHSShift.getOpcode() == ISD::SHL) { 4180 std::swap(LHS, RHS); 4181 std::swap(LHSShift, RHSShift); 4182 std::swap(LHSMask, RHSMask); 4183 } 4184 4185 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 4186 SDValue LHSShiftArg = LHSShift.getOperand(0); 4187 SDValue LHSShiftAmt = LHSShift.getOperand(1); 4188 SDValue RHSShiftArg = RHSShift.getOperand(0); 4189 SDValue RHSShiftAmt = RHSShift.getOperand(1); 4190 4191 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 4192 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 4193 if (isConstOrConstSplat(LHSShiftAmt) && isConstOrConstSplat(RHSShiftAmt)) { 4194 uint64_t LShVal = isConstOrConstSplat(LHSShiftAmt)->getZExtValue(); 4195 uint64_t RShVal = isConstOrConstSplat(RHSShiftAmt)->getZExtValue(); 4196 if ((LShVal + RShVal) != EltSizeInBits) 4197 return nullptr; 4198 4199 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 4200 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); 4201 4202 // If there is an AND of either shifted operand, apply it to the result. 4203 if (LHSMask.getNode() || RHSMask.getNode()) { 4204 APInt AllBits = APInt::getAllOnesValue(EltSizeInBits); 4205 SDValue Mask = DAG.getConstant(AllBits, DL, VT); 4206 4207 if (LHSMask.getNode()) { 4208 APInt RHSBits = APInt::getLowBitsSet(EltSizeInBits, LShVal); 4209 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4210 DAG.getNode(ISD::OR, DL, VT, LHSMask, 4211 DAG.getConstant(RHSBits, DL, VT))); 4212 } 4213 if (RHSMask.getNode()) { 4214 APInt LHSBits = APInt::getHighBitsSet(EltSizeInBits, RShVal); 4215 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4216 DAG.getNode(ISD::OR, DL, VT, RHSMask, 4217 DAG.getConstant(LHSBits, DL, VT))); 4218 } 4219 4220 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask); 4221 } 4222 4223 return Rot.getNode(); 4224 } 4225 4226 // If there is a mask here, and we have a variable shift, we can't be sure 4227 // that we're masking out the right stuff. 4228 if (LHSMask.getNode() || RHSMask.getNode()) 4229 return nullptr; 4230 4231 // If the shift amount is sign/zext/any-extended just peel it off. 4232 SDValue LExtOp0 = LHSShiftAmt; 4233 SDValue RExtOp0 = RHSShiftAmt; 4234 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4235 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4236 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4237 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 4238 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4239 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4240 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4241 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 4242 LExtOp0 = LHSShiftAmt.getOperand(0); 4243 RExtOp0 = RHSShiftAmt.getOperand(0); 4244 } 4245 4246 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, 4247 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL); 4248 if (TryL) 4249 return TryL; 4250 4251 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, 4252 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL); 4253 if (TryR) 4254 return TryR; 4255 4256 return nullptr; 4257 } 4258 4259 namespace { 4260 /// Helper struct to parse and store a memory address as base + index + offset. 4261 /// We ignore sign extensions when it is safe to do so. 4262 /// The following two expressions are not equivalent. To differentiate we need 4263 /// to store whether there was a sign extension involved in the index 4264 /// computation. 4265 /// (load (i64 add (i64 copyfromreg %c) 4266 /// (i64 signextend (add (i8 load %index) 4267 /// (i8 1)))) 4268 /// vs 4269 /// 4270 /// (load (i64 add (i64 copyfromreg %c) 4271 /// (i64 signextend (i32 add (i32 signextend (i8 load %index)) 4272 /// (i32 1))))) 4273 struct BaseIndexOffset { 4274 SDValue Base; 4275 SDValue Index; 4276 int64_t Offset; 4277 bool IsIndexSignExt; 4278 4279 BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} 4280 4281 BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, 4282 bool IsIndexSignExt) : 4283 Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} 4284 4285 bool equalBaseIndex(const BaseIndexOffset &Other) { 4286 return Other.Base == Base && Other.Index == Index && 4287 Other.IsIndexSignExt == IsIndexSignExt; 4288 } 4289 4290 /// Parses tree in Ptr for base, index, offset addresses. 4291 static BaseIndexOffset match(SDValue Ptr, SelectionDAG &DAG, 4292 int64_t PartialOffset = 0) { 4293 bool IsIndexSignExt = false; 4294 4295 // Split up a folded GlobalAddress+Offset into its component parts. 4296 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ptr)) 4297 if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { 4298 return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), 4299 SDLoc(GA), 4300 GA->getValueType(0), 4301 /*Offset=*/PartialOffset, 4302 /*isTargetGA=*/false, 4303 GA->getTargetFlags()), 4304 SDValue(), 4305 GA->getOffset(), 4306 IsIndexSignExt); 4307 } 4308 4309 // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD 4310 // instruction, then it could be just the BASE or everything else we don't 4311 // know how to handle. Just use Ptr as BASE and give up. 4312 if (Ptr->getOpcode() != ISD::ADD) 4313 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4314 4315 // We know that we have at least an ADD instruction. Try to pattern match 4316 // the simple case of BASE + OFFSET. 4317 if (isa<ConstantSDNode>(Ptr->getOperand(1))) { 4318 int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); 4319 return match(Ptr->getOperand(0), DAG, Offset + PartialOffset); 4320 } 4321 4322 // Inside a loop the current BASE pointer is calculated using an ADD and a 4323 // MUL instruction. In this case Ptr is the actual BASE pointer. 4324 // (i64 add (i64 %array_ptr) 4325 // (i64 mul (i64 %induction_var) 4326 // (i64 %element_size))) 4327 if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) 4328 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4329 4330 // Look at Base + Index + Offset cases. 4331 SDValue Base = Ptr->getOperand(0); 4332 SDValue IndexOffset = Ptr->getOperand(1); 4333 4334 // Skip signextends. 4335 if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { 4336 IndexOffset = IndexOffset->getOperand(0); 4337 IsIndexSignExt = true; 4338 } 4339 4340 // Either the case of Base + Index (no offset) or something else. 4341 if (IndexOffset->getOpcode() != ISD::ADD) 4342 return BaseIndexOffset(Base, IndexOffset, PartialOffset, IsIndexSignExt); 4343 4344 // Now we have the case of Base + Index + offset. 4345 SDValue Index = IndexOffset->getOperand(0); 4346 SDValue Offset = IndexOffset->getOperand(1); 4347 4348 if (!isa<ConstantSDNode>(Offset)) 4349 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4350 4351 // Ignore signextends. 4352 if (Index->getOpcode() == ISD::SIGN_EXTEND) { 4353 Index = Index->getOperand(0); 4354 IsIndexSignExt = true; 4355 } else IsIndexSignExt = false; 4356 4357 int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); 4358 return BaseIndexOffset(Base, Index, Off + PartialOffset, IsIndexSignExt); 4359 } 4360 }; 4361 } // namespace 4362 4363 namespace { 4364 /// Represents known origin of an individual byte in load combine pattern. The 4365 /// value of the byte is either constant zero or comes from memory. 4366 struct ByteProvider { 4367 // For constant zero providers Load is set to nullptr. For memory providers 4368 // Load represents the node which loads the byte from memory. 4369 // ByteOffset is the offset of the byte in the value produced by the load. 4370 LoadSDNode *Load; 4371 unsigned ByteOffset; 4372 4373 ByteProvider() : Load(nullptr), ByteOffset(0) {} 4374 4375 static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) { 4376 return ByteProvider(Load, ByteOffset); 4377 } 4378 static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); } 4379 4380 bool isConstantZero() { return !Load; } 4381 bool isMemory() { return Load; } 4382 4383 bool operator==(const ByteProvider &Other) const { 4384 return Other.Load == Load && Other.ByteOffset == ByteOffset; 4385 } 4386 4387 private: 4388 ByteProvider(LoadSDNode *Load, unsigned ByteOffset) 4389 : Load(Load), ByteOffset(ByteOffset) {} 4390 }; 4391 4392 /// Recursively traverses the expression calculating the origin of the requested 4393 /// byte of the given value. Returns None if the provider can't be calculated. 4394 /// 4395 /// For all the values except the root of the expression verifies that the value 4396 /// has exactly one use and if it's not true return None. This way if the origin 4397 /// of the byte is returned it's guaranteed that the values which contribute to 4398 /// the byte are not used outside of this expression. 4399 /// 4400 /// Because the parts of the expression are not allowed to have more than one 4401 /// use this function iterates over trees, not DAGs. So it never visits the same 4402 /// node more than once. 4403 const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, 4404 unsigned Depth, 4405 bool Root = false) { 4406 // Typical i64 by i8 pattern requires recursion up to 8 calls depth 4407 if (Depth == 10) 4408 return None; 4409 4410 if (!Root && !Op.hasOneUse()) 4411 return None; 4412 4413 assert(Op.getValueType().isScalarInteger() && "can't handle other types"); 4414 unsigned BitWidth = Op.getValueSizeInBits(); 4415 if (BitWidth % 8 != 0) 4416 return None; 4417 unsigned ByteWidth = BitWidth / 8; 4418 assert(Index < ByteWidth && "invalid index requested"); 4419 (void) ByteWidth; 4420 4421 switch (Op.getOpcode()) { 4422 case ISD::OR: { 4423 auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1); 4424 if (!LHS) 4425 return None; 4426 auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1); 4427 if (!RHS) 4428 return None; 4429 4430 if (LHS->isConstantZero()) 4431 return RHS; 4432 else if (RHS->isConstantZero()) 4433 return LHS; 4434 else 4435 return None; 4436 } 4437 case ISD::SHL: { 4438 auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 4439 if (!ShiftOp) 4440 return None; 4441 4442 uint64_t BitShift = ShiftOp->getZExtValue(); 4443 if (BitShift % 8 != 0) 4444 return None; 4445 uint64_t ByteShift = BitShift / 8; 4446 4447 return Index < ByteShift 4448 ? ByteProvider::getConstantZero() 4449 : calculateByteProvider(Op->getOperand(0), Index - ByteShift, 4450 Depth + 1); 4451 } 4452 case ISD::ZERO_EXTEND: { 4453 SDValue NarrowOp = Op->getOperand(0); 4454 unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); 4455 if (NarrowBitWidth % 8 != 0) 4456 return None; 4457 uint64_t NarrowByteWidth = NarrowBitWidth / 8; 4458 4459 return Index >= NarrowByteWidth 4460 ? ByteProvider::getConstantZero() 4461 : calculateByteProvider(NarrowOp, Index, Depth + 1); 4462 } 4463 case ISD::LOAD: { 4464 auto L = cast<LoadSDNode>(Op.getNode()); 4465 4466 // TODO: support ext loads 4467 if (L->isVolatile() || L->isIndexed() || 4468 L->getExtensionType() != ISD::NON_EXTLOAD) 4469 return None; 4470 4471 return ByteProvider::getMemory(L, Index); 4472 } 4473 } 4474 4475 return None; 4476 } 4477 } // namespace 4478 4479 /// Match a pattern where a wide type scalar value is loaded by several narrow 4480 /// loads and combined by shifts and ors. Fold it into a single load or a load 4481 /// and a BSWAP if the targets supports it. 4482 /// 4483 /// Assuming little endian target: 4484 /// i8 *a = ... 4485 /// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) 4486 /// => 4487 /// i32 val = *((i32)a) 4488 /// 4489 /// i8 *a = ... 4490 /// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] 4491 /// => 4492 /// i32 val = BSWAP(*((i32)a)) 4493 /// 4494 /// TODO: This rule matches complex patterns with OR node roots and doesn't 4495 /// interact well with the worklist mechanism. When a part of the pattern is 4496 /// updated (e.g. one of the loads) its direct users are put into the worklist, 4497 /// but the root node of the pattern which triggers the load combine is not 4498 /// necessarily a direct user of the changed node. For example, once the address 4499 /// of t28 load is reassociated load combine won't be triggered: 4500 /// t25: i32 = add t4, Constant:i32<2> 4501 /// t26: i64 = sign_extend t25 4502 /// t27: i64 = add t2, t26 4503 /// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64 4504 /// t29: i32 = zero_extend t28 4505 /// t32: i32 = shl t29, Constant:i8<8> 4506 /// t33: i32 = or t23, t32 4507 /// As a possible fix visitLoad can check if the load can be a part of a load 4508 /// combine pattern and add corresponding OR roots to the worklist. 4509 SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { 4510 assert(N->getOpcode() == ISD::OR && 4511 "Can only match load combining against OR nodes"); 4512 4513 // Handles simple types only 4514 EVT VT = N->getValueType(0); 4515 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 4516 return SDValue(); 4517 unsigned ByteWidth = VT.getSizeInBits() / 8; 4518 4519 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4520 // Before legalize we can introduce too wide illegal loads which will be later 4521 // split into legal sized loads. This enables us to combine i64 load by i8 4522 // patterns to a couple of i32 loads on 32 bit targets. 4523 if (LegalOperations && !TLI.isOperationLegal(ISD::LOAD, VT)) 4524 return SDValue(); 4525 4526 std::function<unsigned(unsigned, unsigned)> LittleEndianByteAt = []( 4527 unsigned BW, unsigned i) { return i; }; 4528 std::function<unsigned(unsigned, unsigned)> BigEndianByteAt = []( 4529 unsigned BW, unsigned i) { return BW - i - 1; }; 4530 4531 Optional<BaseIndexOffset> Base; 4532 SDValue Chain; 4533 4534 SmallSet<LoadSDNode *, 8> Loads; 4535 LoadSDNode *FirstLoad = nullptr; 4536 4537 bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian(); 4538 auto ByteAt = IsBigEndianTarget ? BigEndianByteAt : LittleEndianByteAt; 4539 4540 // Check if all the bytes of the OR we are looking at are loaded from the same 4541 // base address. Collect bytes offsets from Base address in ByteOffsets. 4542 SmallVector<int64_t, 4> ByteOffsets(ByteWidth); 4543 for (unsigned i = 0; i < ByteWidth; i++) { 4544 auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true); 4545 if (!P || !P->isMemory()) // All the bytes must be loaded from memory 4546 return SDValue(); 4547 4548 LoadSDNode *L = P->Load; 4549 assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && 4550 (L->getExtensionType() == ISD::NON_EXTLOAD) && 4551 "Must be enforced by calculateByteProvider"); 4552 assert(L->getOffset().isUndef() && "Unindexed load must have undef offset"); 4553 4554 // All loads must share the same chain 4555 SDValue LChain = L->getChain(); 4556 if (!Chain) 4557 Chain = LChain; 4558 else if (Chain != LChain) 4559 return SDValue(); 4560 4561 // Loads must share the same base address 4562 BaseIndexOffset Ptr = BaseIndexOffset::match(L->getBasePtr(), DAG); 4563 if (!Base) 4564 Base = Ptr; 4565 else if (!Base->equalBaseIndex(Ptr)) 4566 return SDValue(); 4567 4568 // Calculate the offset of the current byte from the base address 4569 unsigned LoadBitWidth = L->getMemoryVT().getSizeInBits(); 4570 assert(LoadBitWidth % 8 == 0 && 4571 "can only analyze providers for individual bytes not bit"); 4572 unsigned LoadByteWidth = LoadBitWidth / 8; 4573 int64_t MemoryByteOffset = ByteAt(LoadByteWidth, P->ByteOffset); 4574 int64_t ByteOffsetFromBase = Ptr.Offset + MemoryByteOffset; 4575 ByteOffsets[i] = ByteOffsetFromBase; 4576 4577 // Remember the first byte load 4578 if (ByteOffsetFromBase == 0) 4579 FirstLoad = L; 4580 4581 Loads.insert(L); 4582 } 4583 assert(Loads.size() > 0 && "All the bytes of the value must be loaded from " 4584 "memory, so there must be at least one load which produces the value"); 4585 assert(Base && "Base address of the accessed memory location must be set"); 4586 4587 // Check if the bytes of the OR we are looking at match with either big or 4588 // little endian value load 4589 bool BigEndian = true, LittleEndian = true; 4590 for (unsigned i = 0; i < ByteWidth; i++) { 4591 LittleEndian &= ByteOffsets[i] == LittleEndianByteAt(ByteWidth, i); 4592 BigEndian &= ByteOffsets[i] == BigEndianByteAt(ByteWidth, i); 4593 if (!BigEndian && !LittleEndian) 4594 return SDValue(); 4595 } 4596 assert((BigEndian != LittleEndian) && "should be either or"); 4597 assert(FirstLoad && "must be set"); 4598 4599 // The node we are looking at matches with the pattern, check if we can 4600 // replace it with a single load and bswap if needed. 4601 4602 // If the load needs byte swap check if the target supports it 4603 bool NeedsBswap = IsBigEndianTarget != BigEndian; 4604 4605 // Before legalize we can introduce illegal bswaps which will be later 4606 // converted to an explicit bswap sequence. This way we end up with a single 4607 // load and byte shuffling instead of several loads and byte shuffling. 4608 if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT)) 4609 return SDValue(); 4610 4611 // Check that a load of the wide type is both allowed and fast on the target 4612 bool Fast = false; 4613 bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), 4614 VT, FirstLoad->getAddressSpace(), 4615 FirstLoad->getAlignment(), &Fast); 4616 if (!Allowed || !Fast) 4617 return SDValue(); 4618 4619 SDValue NewLoad = 4620 DAG.getLoad(VT, SDLoc(N), Chain, FirstLoad->getBasePtr(), 4621 FirstLoad->getPointerInfo(), FirstLoad->getAlignment()); 4622 4623 // Transfer chain users from old loads to the new load. 4624 for (LoadSDNode *L : Loads) 4625 DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1)); 4626 4627 return NeedsBswap ? DAG.getNode(ISD::BSWAP, SDLoc(N), VT, NewLoad) : NewLoad; 4628 } 4629 4630 SDValue DAGCombiner::visitXOR(SDNode *N) { 4631 SDValue N0 = N->getOperand(0); 4632 SDValue N1 = N->getOperand(1); 4633 EVT VT = N0.getValueType(); 4634 4635 // fold vector ops 4636 if (VT.isVector()) { 4637 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4638 return FoldedVOp; 4639 4640 // fold (xor x, 0) -> x, vector edition 4641 if (ISD::isBuildVectorAllZeros(N0.getNode())) 4642 return N1; 4643 if (ISD::isBuildVectorAllZeros(N1.getNode())) 4644 return N0; 4645 } 4646 4647 // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 4648 if (N0.isUndef() && N1.isUndef()) 4649 return DAG.getConstant(0, SDLoc(N), VT); 4650 // fold (xor x, undef) -> undef 4651 if (N0.isUndef()) 4652 return N0; 4653 if (N1.isUndef()) 4654 return N1; 4655 // fold (xor c1, c2) -> c1^c2 4656 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4657 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 4658 if (N0C && N1C) 4659 return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C); 4660 // canonicalize constant to RHS 4661 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 4662 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 4663 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0); 4664 // fold (xor x, 0) -> x 4665 if (isNullConstant(N1)) 4666 return N0; 4667 // reassociate xor 4668 if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1)) 4669 return RXOR; 4670 4671 // fold !(x cc y) -> (x !cc y) 4672 SDValue LHS, RHS, CC; 4673 if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) { 4674 bool isInt = LHS.getValueType().isInteger(); 4675 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 4676 isInt); 4677 4678 if (!LegalOperations || 4679 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { 4680 switch (N0.getOpcode()) { 4681 default: 4682 llvm_unreachable("Unhandled SetCC Equivalent!"); 4683 case ISD::SETCC: 4684 return DAG.getSetCC(SDLoc(N), VT, LHS, RHS, NotCC); 4685 case ISD::SELECT_CC: 4686 return DAG.getSelectCC(SDLoc(N), LHS, RHS, N0.getOperand(2), 4687 N0.getOperand(3), NotCC); 4688 } 4689 } 4690 } 4691 4692 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 4693 if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND && 4694 N0.getNode()->hasOneUse() && 4695 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 4696 SDValue V = N0.getOperand(0); 4697 SDLoc DL(N0); 4698 V = DAG.getNode(ISD::XOR, DL, V.getValueType(), V, 4699 DAG.getConstant(1, DL, V.getValueType())); 4700 AddToWorklist(V.getNode()); 4701 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V); 4702 } 4703 4704 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 4705 if (isOneConstant(N1) && VT == MVT::i1 && 4706 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4707 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4708 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 4709 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4710 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4711 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4712 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4713 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4714 } 4715 } 4716 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 4717 if (isAllOnesConstant(N1) && 4718 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4719 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4720 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 4721 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4722 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4723 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4724 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4725 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4726 } 4727 } 4728 // fold (xor (and x, y), y) -> (and (not x), y) 4729 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 4730 N0->getOperand(1) == N1) { 4731 SDValue X = N0->getOperand(0); 4732 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT); 4733 AddToWorklist(NotX.getNode()); 4734 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1); 4735 } 4736 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 4737 if (N1C && N0.getOpcode() == ISD::XOR) { 4738 if (const ConstantSDNode *N00C = getAsNonOpaqueConstant(N0.getOperand(0))) { 4739 SDLoc DL(N); 4740 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), 4741 DAG.getConstant(N1C->getAPIntValue() ^ 4742 N00C->getAPIntValue(), DL, VT)); 4743 } 4744 if (const ConstantSDNode *N01C = getAsNonOpaqueConstant(N0.getOperand(1))) { 4745 SDLoc DL(N); 4746 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0), 4747 DAG.getConstant(N1C->getAPIntValue() ^ 4748 N01C->getAPIntValue(), DL, VT)); 4749 } 4750 } 4751 // fold (xor x, x) -> 0 4752 if (N0 == N1) 4753 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes); 4754 4755 // fold (xor (shl 1, x), -1) -> (rotl ~1, x) 4756 // Here is a concrete example of this equivalence: 4757 // i16 x == 14 4758 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000 4759 // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111 4760 // 4761 // => 4762 // 4763 // i16 ~1 == 0b1111111111111110 4764 // i16 rol(~1, 14) == 0b1011111111111111 4765 // 4766 // Some additional tips to help conceptualize this transform: 4767 // - Try to see the operation as placing a single zero in a value of all ones. 4768 // - There exists no value for x which would allow the result to contain zero. 4769 // - Values of x larger than the bitwidth are undefined and do not require a 4770 // consistent result. 4771 // - Pushing the zero left requires shifting one bits in from the right. 4772 // A rotate left of ~1 is a nice way of achieving the desired result. 4773 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL 4774 && isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) { 4775 SDLoc DL(N); 4776 return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT), 4777 N0.getOperand(1)); 4778 } 4779 4780 // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 4781 if (N0.getOpcode() == N1.getOpcode()) 4782 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 4783 return Tmp; 4784 4785 // Simplify the expression using non-local knowledge. 4786 if (!VT.isVector() && 4787 SimplifyDemandedBits(SDValue(N, 0))) 4788 return SDValue(N, 0); 4789 4790 return SDValue(); 4791 } 4792 4793 /// Handle transforms common to the three shifts, when the shift amount is a 4794 /// constant. 4795 SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) { 4796 SDNode *LHS = N->getOperand(0).getNode(); 4797 if (!LHS->hasOneUse()) return SDValue(); 4798 4799 // We want to pull some binops through shifts, so that we have (and (shift)) 4800 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 4801 // thing happens with address calculations, so it's important to canonicalize 4802 // it. 4803 bool HighBitSet = false; // Can we transform this if the high bit is set? 4804 4805 switch (LHS->getOpcode()) { 4806 default: return SDValue(); 4807 case ISD::OR: 4808 case ISD::XOR: 4809 HighBitSet = false; // We can only transform sra if the high bit is clear. 4810 break; 4811 case ISD::AND: 4812 HighBitSet = true; // We can only transform sra if the high bit is set. 4813 break; 4814 case ISD::ADD: 4815 if (N->getOpcode() != ISD::SHL) 4816 return SDValue(); // only shl(add) not sr[al](add). 4817 HighBitSet = false; // We can only transform sra if the high bit is clear. 4818 break; 4819 } 4820 4821 // We require the RHS of the binop to be a constant and not opaque as well. 4822 ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1)); 4823 if (!BinOpCst) return SDValue(); 4824 4825 // FIXME: disable this unless the input to the binop is a shift by a constant 4826 // or is copy/select.Enable this in other cases when figure out it's exactly profitable. 4827 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 4828 bool isShift = BinOpLHSVal->getOpcode() == ISD::SHL || 4829 BinOpLHSVal->getOpcode() == ISD::SRA || 4830 BinOpLHSVal->getOpcode() == ISD::SRL; 4831 bool isCopyOrSelect = BinOpLHSVal->getOpcode() == ISD::CopyFromReg || 4832 BinOpLHSVal->getOpcode() == ISD::SELECT; 4833 4834 if ((!isShift || !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) && 4835 !isCopyOrSelect) 4836 return SDValue(); 4837 4838 if (isCopyOrSelect && N->hasOneUse()) 4839 return SDValue(); 4840 4841 EVT VT = N->getValueType(0); 4842 4843 // If this is a signed shift right, and the high bit is modified by the 4844 // logical operation, do not perform the transformation. The highBitSet 4845 // boolean indicates the value of the high bit of the constant which would 4846 // cause it to be modified for this operation. 4847 if (N->getOpcode() == ISD::SRA) { 4848 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 4849 if (BinOpRHSSignSet != HighBitSet) 4850 return SDValue(); 4851 } 4852 4853 if (!TLI.isDesirableToCommuteWithShift(LHS)) 4854 return SDValue(); 4855 4856 // Fold the constants, shifting the binop RHS by the shift amount. 4857 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)), 4858 N->getValueType(0), 4859 LHS->getOperand(1), N->getOperand(1)); 4860 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!"); 4861 4862 // Create the new shift. 4863 SDValue NewShift = DAG.getNode(N->getOpcode(), 4864 SDLoc(LHS->getOperand(0)), 4865 VT, LHS->getOperand(0), N->getOperand(1)); 4866 4867 // Create the new binop. 4868 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS); 4869 } 4870 4871 SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { 4872 assert(N->getOpcode() == ISD::TRUNCATE); 4873 assert(N->getOperand(0).getOpcode() == ISD::AND); 4874 4875 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) 4876 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) { 4877 SDValue N01 = N->getOperand(0).getOperand(1); 4878 if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) { 4879 SDLoc DL(N); 4880 EVT TruncVT = N->getValueType(0); 4881 SDValue N00 = N->getOperand(0).getOperand(0); 4882 SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00); 4883 SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01); 4884 AddToWorklist(Trunc00.getNode()); 4885 AddToWorklist(Trunc01.getNode()); 4886 return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01); 4887 } 4888 } 4889 4890 return SDValue(); 4891 } 4892 4893 SDValue DAGCombiner::visitRotate(SDNode *N) { 4894 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))). 4895 if (N->getOperand(1).getOpcode() == ISD::TRUNCATE && 4896 N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) { 4897 if (SDValue NewOp1 = 4898 distributeTruncateThroughAnd(N->getOperand(1).getNode())) 4899 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), 4900 N->getOperand(0), NewOp1); 4901 } 4902 return SDValue(); 4903 } 4904 4905 SDValue DAGCombiner::visitSHL(SDNode *N) { 4906 SDValue N0 = N->getOperand(0); 4907 SDValue N1 = N->getOperand(1); 4908 EVT VT = N0.getValueType(); 4909 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 4910 4911 // fold vector ops 4912 if (VT.isVector()) { 4913 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4914 return FoldedVOp; 4915 4916 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1); 4917 // If setcc produces all-one true value then: 4918 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV) 4919 if (N1CV && N1CV->isConstant()) { 4920 if (N0.getOpcode() == ISD::AND) { 4921 SDValue N00 = N0->getOperand(0); 4922 SDValue N01 = N0->getOperand(1); 4923 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01); 4924 4925 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && 4926 TLI.getBooleanContents(N00.getOperand(0).getValueType()) == 4927 TargetLowering::ZeroOrNegativeOneBooleanContent) { 4928 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, 4929 N01CV, N1CV)) 4930 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C); 4931 } 4932 } 4933 } 4934 } 4935 4936 ConstantSDNode *N1C = isConstOrConstSplat(N1); 4937 4938 // fold (shl c1, c2) -> c1<<c2 4939 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4940 if (N0C && N1C && !N1C->isOpaque()) 4941 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C); 4942 // fold (shl 0, x) -> 0 4943 if (isNullConstant(N0)) 4944 return N0; 4945 // fold (shl x, c >= size(x)) -> undef 4946 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 4947 return DAG.getUNDEF(VT); 4948 // fold (shl x, 0) -> x 4949 if (N1C && N1C->isNullValue()) 4950 return N0; 4951 // fold (shl undef, x) -> 0 4952 if (N0.isUndef()) 4953 return DAG.getConstant(0, SDLoc(N), VT); 4954 // if (shl x, c) is known to be zero, return 0 4955 if (DAG.MaskedValueIsZero(SDValue(N, 0), 4956 APInt::getAllOnesValue(OpSizeInBits))) 4957 return DAG.getConstant(0, SDLoc(N), VT); 4958 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 4959 if (N1.getOpcode() == ISD::TRUNCATE && 4960 N1.getOperand(0).getOpcode() == ISD::AND) { 4961 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 4962 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1); 4963 } 4964 4965 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4966 return SDValue(N, 0); 4967 4968 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 4969 if (N1C && N0.getOpcode() == ISD::SHL) { 4970 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4971 SDLoc DL(N); 4972 APInt c1 = N0C1->getAPIntValue(); 4973 APInt c2 = N1C->getAPIntValue(); 4974 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 4975 4976 APInt Sum = c1 + c2; 4977 if (Sum.uge(OpSizeInBits)) 4978 return DAG.getConstant(0, DL, VT); 4979 4980 return DAG.getNode( 4981 ISD::SHL, DL, VT, N0.getOperand(0), 4982 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 4983 } 4984 } 4985 4986 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 4987 // For this to be valid, the second form must not preserve any of the bits 4988 // that are shifted out by the inner shift in the first form. This means 4989 // the outer shift size must be >= the number of bits added by the ext. 4990 // As a corollary, we don't care what kind of ext it is. 4991 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 4992 N0.getOpcode() == ISD::ANY_EXTEND || 4993 N0.getOpcode() == ISD::SIGN_EXTEND) && 4994 N0.getOperand(0).getOpcode() == ISD::SHL) { 4995 SDValue N0Op0 = N0.getOperand(0); 4996 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 4997 APInt c1 = N0Op0C1->getAPIntValue(); 4998 APInt c2 = N1C->getAPIntValue(); 4999 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5000 5001 EVT InnerShiftVT = N0Op0.getValueType(); 5002 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 5003 if (c2.uge(OpSizeInBits - InnerShiftSize)) { 5004 SDLoc DL(N0); 5005 APInt Sum = c1 + c2; 5006 if (Sum.uge(OpSizeInBits)) 5007 return DAG.getConstant(0, DL, VT); 5008 5009 return DAG.getNode( 5010 ISD::SHL, DL, VT, 5011 DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)), 5012 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5013 } 5014 } 5015 } 5016 5017 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) 5018 // Only fold this if the inner zext has no other uses to avoid increasing 5019 // the total number of instructions. 5020 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && 5021 N0.getOperand(0).getOpcode() == ISD::SRL) { 5022 SDValue N0Op0 = N0.getOperand(0); 5023 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 5024 if (N0Op0C1->getAPIntValue().ult(VT.getScalarSizeInBits())) { 5025 uint64_t c1 = N0Op0C1->getZExtValue(); 5026 uint64_t c2 = N1C->getZExtValue(); 5027 if (c1 == c2) { 5028 SDValue NewOp0 = N0.getOperand(0); 5029 EVT CountVT = NewOp0.getOperand(1).getValueType(); 5030 SDLoc DL(N); 5031 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, NewOp0.getValueType(), 5032 NewOp0, 5033 DAG.getConstant(c2, DL, CountVT)); 5034 AddToWorklist(NewSHL.getNode()); 5035 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL); 5036 } 5037 } 5038 } 5039 } 5040 5041 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2 5042 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2 5043 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) && 5044 cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) { 5045 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5046 uint64_t C1 = N0C1->getZExtValue(); 5047 uint64_t C2 = N1C->getZExtValue(); 5048 SDLoc DL(N); 5049 if (C1 <= C2) 5050 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 5051 DAG.getConstant(C2 - C1, DL, N1.getValueType())); 5052 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0), 5053 DAG.getConstant(C1 - C2, DL, N1.getValueType())); 5054 } 5055 } 5056 5057 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 5058 // (and (srl x, (sub c1, c2), MASK) 5059 // Only fold this if the inner shift has no other uses -- if it does, folding 5060 // this will increase the total number of instructions. 5061 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 5062 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5063 uint64_t c1 = N0C1->getZExtValue(); 5064 if (c1 < OpSizeInBits) { 5065 uint64_t c2 = N1C->getZExtValue(); 5066 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); 5067 SDValue Shift; 5068 if (c2 > c1) { 5069 Mask = Mask.shl(c2 - c1); 5070 SDLoc DL(N); 5071 Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 5072 DAG.getConstant(c2 - c1, DL, N1.getValueType())); 5073 } else { 5074 Mask = Mask.lshr(c1 - c2); 5075 SDLoc DL(N); 5076 Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 5077 DAG.getConstant(c1 - c2, DL, N1.getValueType())); 5078 } 5079 SDLoc DL(N0); 5080 return DAG.getNode(ISD::AND, DL, VT, Shift, 5081 DAG.getConstant(Mask, DL, VT)); 5082 } 5083 } 5084 } 5085 5086 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 5087 if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) && 5088 isConstantOrConstantVector(N1, /* No Opaques */ true)) { 5089 unsigned BitSize = VT.getScalarSizeInBits(); 5090 SDLoc DL(N); 5091 SDValue AllBits = DAG.getConstant(APInt::getAllOnesValue(BitSize), DL, VT); 5092 SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1); 5093 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask); 5094 } 5095 5096 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 5097 // Variant of version done on multiply, except mul by a power of 2 is turned 5098 // into a shift. 5099 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 5100 isConstantOrConstantVector(N1, /* No Opaques */ true) && 5101 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 5102 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1); 5103 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 5104 AddToWorklist(Shl0.getNode()); 5105 AddToWorklist(Shl1.getNode()); 5106 return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1); 5107 } 5108 5109 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2) 5110 if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() && 5111 isConstantOrConstantVector(N1, /* No Opaques */ true) && 5112 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 5113 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 5114 if (isConstantOrConstantVector(Shl)) 5115 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl); 5116 } 5117 5118 if (N1C && !N1C->isOpaque()) 5119 if (SDValue NewSHL = visitShiftByConstant(N, N1C)) 5120 return NewSHL; 5121 5122 return SDValue(); 5123 } 5124 5125 SDValue DAGCombiner::visitSRA(SDNode *N) { 5126 SDValue N0 = N->getOperand(0); 5127 SDValue N1 = N->getOperand(1); 5128 EVT VT = N0.getValueType(); 5129 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5130 5131 // Arithmetic shifting an all-sign-bit value is a no-op. 5132 if (DAG.ComputeNumSignBits(N0) == OpSizeInBits) 5133 return N0; 5134 5135 // fold vector ops 5136 if (VT.isVector()) 5137 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5138 return FoldedVOp; 5139 5140 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5141 5142 // fold (sra c1, c2) -> (sra c1, c2) 5143 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5144 if (N0C && N1C && !N1C->isOpaque()) 5145 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C); 5146 // fold (sra 0, x) -> 0 5147 if (isNullConstant(N0)) 5148 return N0; 5149 // fold (sra -1, x) -> -1 5150 if (isAllOnesConstant(N0)) 5151 return N0; 5152 // fold (sra x, c >= size(x)) -> undef 5153 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5154 return DAG.getUNDEF(VT); 5155 // fold (sra x, 0) -> x 5156 if (N1C && N1C->isNullValue()) 5157 return N0; 5158 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 5159 // sext_inreg. 5160 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 5161 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 5162 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 5163 if (VT.isVector()) 5164 ExtVT = EVT::getVectorVT(*DAG.getContext(), 5165 ExtVT, VT.getVectorNumElements()); 5166 if ((!LegalOperations || 5167 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 5168 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 5169 N0.getOperand(0), DAG.getValueType(ExtVT)); 5170 } 5171 5172 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 5173 if (N1C && N0.getOpcode() == ISD::SRA) { 5174 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5175 SDLoc DL(N); 5176 APInt c1 = N0C1->getAPIntValue(); 5177 APInt c2 = N1C->getAPIntValue(); 5178 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5179 5180 APInt Sum = c1 + c2; 5181 if (Sum.uge(OpSizeInBits)) 5182 Sum = APInt(OpSizeInBits, OpSizeInBits - 1); 5183 5184 return DAG.getNode( 5185 ISD::SRA, DL, VT, N0.getOperand(0), 5186 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5187 } 5188 } 5189 5190 // fold (sra (shl X, m), (sub result_size, n)) 5191 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 5192 // result_size - n != m. 5193 // If truncate is free for the target sext(shl) is likely to result in better 5194 // code. 5195 if (N0.getOpcode() == ISD::SHL && N1C) { 5196 // Get the two constanst of the shifts, CN0 = m, CN = n. 5197 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1)); 5198 if (N01C) { 5199 LLVMContext &Ctx = *DAG.getContext(); 5200 // Determine what the truncate's result bitsize and type would be. 5201 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()); 5202 5203 if (VT.isVector()) 5204 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements()); 5205 5206 // Determine the residual right-shift amount. 5207 int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 5208 5209 // If the shift is not a no-op (in which case this should be just a sign 5210 // extend already), the truncated to type is legal, sign_extend is legal 5211 // on that type, and the truncate to that type is both legal and free, 5212 // perform the transform. 5213 if ((ShiftAmt > 0) && 5214 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 5215 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 5216 TLI.isTruncateFree(VT, TruncVT)) { 5217 5218 SDLoc DL(N); 5219 SDValue Amt = DAG.getConstant(ShiftAmt, DL, 5220 getShiftAmountTy(N0.getOperand(0).getValueType())); 5221 SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, 5222 N0.getOperand(0), Amt); 5223 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, 5224 Shift); 5225 return DAG.getNode(ISD::SIGN_EXTEND, DL, 5226 N->getValueType(0), Trunc); 5227 } 5228 } 5229 } 5230 5231 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 5232 if (N1.getOpcode() == ISD::TRUNCATE && 5233 N1.getOperand(0).getOpcode() == ISD::AND) { 5234 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5235 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1); 5236 } 5237 5238 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) 5239 // if c1 is equal to the number of bits the trunc removes 5240 if (N0.getOpcode() == ISD::TRUNCATE && 5241 (N0.getOperand(0).getOpcode() == ISD::SRL || 5242 N0.getOperand(0).getOpcode() == ISD::SRA) && 5243 N0.getOperand(0).hasOneUse() && 5244 N0.getOperand(0).getOperand(1).hasOneUse() && 5245 N1C) { 5246 SDValue N0Op0 = N0.getOperand(0); 5247 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) { 5248 unsigned LargeShiftVal = LargeShift->getZExtValue(); 5249 EVT LargeVT = N0Op0.getValueType(); 5250 5251 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) { 5252 SDLoc DL(N); 5253 SDValue Amt = 5254 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), DL, 5255 getShiftAmountTy(N0Op0.getOperand(0).getValueType())); 5256 SDValue SRA = DAG.getNode(ISD::SRA, DL, LargeVT, 5257 N0Op0.getOperand(0), Amt); 5258 return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA); 5259 } 5260 } 5261 } 5262 5263 // Simplify, based on bits shifted out of the LHS. 5264 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5265 return SDValue(N, 0); 5266 5267 5268 // If the sign bit is known to be zero, switch this to a SRL. 5269 if (DAG.SignBitIsZero(N0)) 5270 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1); 5271 5272 if (N1C && !N1C->isOpaque()) 5273 if (SDValue NewSRA = visitShiftByConstant(N, N1C)) 5274 return NewSRA; 5275 5276 return SDValue(); 5277 } 5278 5279 SDValue DAGCombiner::visitSRL(SDNode *N) { 5280 SDValue N0 = N->getOperand(0); 5281 SDValue N1 = N->getOperand(1); 5282 EVT VT = N0.getValueType(); 5283 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5284 5285 // fold vector ops 5286 if (VT.isVector()) 5287 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5288 return FoldedVOp; 5289 5290 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5291 5292 // fold (srl c1, c2) -> c1 >>u c2 5293 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5294 if (N0C && N1C && !N1C->isOpaque()) 5295 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C); 5296 // fold (srl 0, x) -> 0 5297 if (isNullConstant(N0)) 5298 return N0; 5299 // fold (srl x, c >= size(x)) -> undef 5300 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5301 return DAG.getUNDEF(VT); 5302 // fold (srl x, 0) -> x 5303 if (N1C && N1C->isNullValue()) 5304 return N0; 5305 // if (srl x, c) is known to be zero, return 0 5306 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 5307 APInt::getAllOnesValue(OpSizeInBits))) 5308 return DAG.getConstant(0, SDLoc(N), VT); 5309 5310 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 5311 if (N1C && N0.getOpcode() == ISD::SRL) { 5312 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5313 SDLoc DL(N); 5314 APInt c1 = N0C1->getAPIntValue(); 5315 APInt c2 = N1C->getAPIntValue(); 5316 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5317 5318 APInt Sum = c1 + c2; 5319 if (Sum.uge(OpSizeInBits)) 5320 return DAG.getConstant(0, DL, VT); 5321 5322 return DAG.getNode( 5323 ISD::SRL, DL, VT, N0.getOperand(0), 5324 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5325 } 5326 } 5327 5328 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 5329 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 5330 N0.getOperand(0).getOpcode() == ISD::SRL && 5331 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 5332 uint64_t c1 = 5333 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 5334 uint64_t c2 = N1C->getZExtValue(); 5335 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 5336 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 5337 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 5338 // This is only valid if the OpSizeInBits + c1 = size of inner shift. 5339 if (c1 + OpSizeInBits == InnerShiftSize) { 5340 SDLoc DL(N0); 5341 if (c1 + c2 >= InnerShiftSize) 5342 return DAG.getConstant(0, DL, VT); 5343 return DAG.getNode(ISD::TRUNCATE, DL, VT, 5344 DAG.getNode(ISD::SRL, DL, InnerShiftVT, 5345 N0.getOperand(0)->getOperand(0), 5346 DAG.getConstant(c1 + c2, DL, 5347 ShiftCountVT))); 5348 } 5349 } 5350 5351 // fold (srl (shl x, c), c) -> (and x, cst2) 5352 if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && 5353 isConstantOrConstantVector(N1, /* NoOpaques */ true)) { 5354 SDLoc DL(N); 5355 APInt AllBits = APInt::getAllOnesValue(N0.getScalarValueSizeInBits()); 5356 SDValue Mask = 5357 DAG.getNode(ISD::SRL, DL, VT, DAG.getConstant(AllBits, DL, VT), N1); 5358 AddToWorklist(Mask.getNode()); 5359 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask); 5360 } 5361 5362 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) 5363 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 5364 // Shifting in all undef bits? 5365 EVT SmallVT = N0.getOperand(0).getValueType(); 5366 unsigned BitSize = SmallVT.getScalarSizeInBits(); 5367 if (N1C->getZExtValue() >= BitSize) 5368 return DAG.getUNDEF(VT); 5369 5370 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 5371 uint64_t ShiftAmt = N1C->getZExtValue(); 5372 SDLoc DL0(N0); 5373 SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT, 5374 N0.getOperand(0), 5375 DAG.getConstant(ShiftAmt, DL0, 5376 getShiftAmountTy(SmallVT))); 5377 AddToWorklist(SmallShift.getNode()); 5378 APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); 5379 SDLoc DL(N); 5380 return DAG.getNode(ISD::AND, DL, VT, 5381 DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), 5382 DAG.getConstant(Mask, DL, VT)); 5383 } 5384 } 5385 5386 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 5387 // bit, which is unmodified by sra. 5388 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) { 5389 if (N0.getOpcode() == ISD::SRA) 5390 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1); 5391 } 5392 5393 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 5394 if (N1C && N0.getOpcode() == ISD::CTLZ && 5395 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) { 5396 APInt KnownZero, KnownOne; 5397 DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne); 5398 5399 // If any of the input bits are KnownOne, then the input couldn't be all 5400 // zeros, thus the result of the srl will always be zero. 5401 if (KnownOne.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT); 5402 5403 // If all of the bits input the to ctlz node are known to be zero, then 5404 // the result of the ctlz is "32" and the result of the shift is one. 5405 APInt UnknownBits = ~KnownZero; 5406 if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT); 5407 5408 // Otherwise, check to see if there is exactly one bit input to the ctlz. 5409 if ((UnknownBits & (UnknownBits - 1)) == 0) { 5410 // Okay, we know that only that the single bit specified by UnknownBits 5411 // could be set on input to the CTLZ node. If this bit is set, the SRL 5412 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 5413 // to an SRL/XOR pair, which is likely to simplify more. 5414 unsigned ShAmt = UnknownBits.countTrailingZeros(); 5415 SDValue Op = N0.getOperand(0); 5416 5417 if (ShAmt) { 5418 SDLoc DL(N0); 5419 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 5420 DAG.getConstant(ShAmt, DL, 5421 getShiftAmountTy(Op.getValueType()))); 5422 AddToWorklist(Op.getNode()); 5423 } 5424 5425 SDLoc DL(N); 5426 return DAG.getNode(ISD::XOR, DL, VT, 5427 Op, DAG.getConstant(1, DL, VT)); 5428 } 5429 } 5430 5431 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 5432 if (N1.getOpcode() == ISD::TRUNCATE && 5433 N1.getOperand(0).getOpcode() == ISD::AND) { 5434 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5435 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1); 5436 } 5437 5438 // fold operands of srl based on knowledge that the low bits are not 5439 // demanded. 5440 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5441 return SDValue(N, 0); 5442 5443 if (N1C && !N1C->isOpaque()) 5444 if (SDValue NewSRL = visitShiftByConstant(N, N1C)) 5445 return NewSRL; 5446 5447 // Attempt to convert a srl of a load into a narrower zero-extending load. 5448 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 5449 return NarrowLoad; 5450 5451 // Here is a common situation. We want to optimize: 5452 // 5453 // %a = ... 5454 // %b = and i32 %a, 2 5455 // %c = srl i32 %b, 1 5456 // brcond i32 %c ... 5457 // 5458 // into 5459 // 5460 // %a = ... 5461 // %b = and %a, 2 5462 // %c = setcc eq %b, 0 5463 // brcond %c ... 5464 // 5465 // However when after the source operand of SRL is optimized into AND, the SRL 5466 // itself may not be optimized further. Look for it and add the BRCOND into 5467 // the worklist. 5468 if (N->hasOneUse()) { 5469 SDNode *Use = *N->use_begin(); 5470 if (Use->getOpcode() == ISD::BRCOND) 5471 AddToWorklist(Use); 5472 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 5473 // Also look pass the truncate. 5474 Use = *Use->use_begin(); 5475 if (Use->getOpcode() == ISD::BRCOND) 5476 AddToWorklist(Use); 5477 } 5478 } 5479 5480 return SDValue(); 5481 } 5482 5483 SDValue DAGCombiner::visitBSWAP(SDNode *N) { 5484 SDValue N0 = N->getOperand(0); 5485 EVT VT = N->getValueType(0); 5486 5487 // fold (bswap c1) -> c2 5488 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5489 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0); 5490 // fold (bswap (bswap x)) -> x 5491 if (N0.getOpcode() == ISD::BSWAP) 5492 return N0->getOperand(0); 5493 return SDValue(); 5494 } 5495 5496 SDValue DAGCombiner::visitBITREVERSE(SDNode *N) { 5497 SDValue N0 = N->getOperand(0); 5498 EVT VT = N->getValueType(0); 5499 5500 // fold (bitreverse c1) -> c2 5501 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5502 return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0); 5503 // fold (bitreverse (bitreverse x)) -> x 5504 if (N0.getOpcode() == ISD::BITREVERSE) 5505 return N0.getOperand(0); 5506 return SDValue(); 5507 } 5508 5509 SDValue DAGCombiner::visitCTLZ(SDNode *N) { 5510 SDValue N0 = N->getOperand(0); 5511 EVT VT = N->getValueType(0); 5512 5513 // fold (ctlz c1) -> c2 5514 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5515 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0); 5516 return SDValue(); 5517 } 5518 5519 SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 5520 SDValue N0 = N->getOperand(0); 5521 EVT VT = N->getValueType(0); 5522 5523 // fold (ctlz_zero_undef c1) -> c2 5524 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5525 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5526 return SDValue(); 5527 } 5528 5529 SDValue DAGCombiner::visitCTTZ(SDNode *N) { 5530 SDValue N0 = N->getOperand(0); 5531 EVT VT = N->getValueType(0); 5532 5533 // fold (cttz c1) -> c2 5534 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5535 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0); 5536 return SDValue(); 5537 } 5538 5539 SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 5540 SDValue N0 = N->getOperand(0); 5541 EVT VT = N->getValueType(0); 5542 5543 // fold (cttz_zero_undef c1) -> c2 5544 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5545 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5546 return SDValue(); 5547 } 5548 5549 SDValue DAGCombiner::visitCTPOP(SDNode *N) { 5550 SDValue N0 = N->getOperand(0); 5551 EVT VT = N->getValueType(0); 5552 5553 // fold (ctpop c1) -> c2 5554 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5555 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0); 5556 return SDValue(); 5557 } 5558 5559 5560 /// \brief Generate Min/Max node 5561 static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, 5562 SDValue RHS, SDValue True, SDValue False, 5563 ISD::CondCode CC, const TargetLowering &TLI, 5564 SelectionDAG &DAG) { 5565 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 5566 return SDValue(); 5567 5568 switch (CC) { 5569 case ISD::SETOLT: 5570 case ISD::SETOLE: 5571 case ISD::SETLT: 5572 case ISD::SETLE: 5573 case ISD::SETULT: 5574 case ISD::SETULE: { 5575 unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM; 5576 if (TLI.isOperationLegal(Opcode, VT)) 5577 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5578 return SDValue(); 5579 } 5580 case ISD::SETOGT: 5581 case ISD::SETOGE: 5582 case ISD::SETGT: 5583 case ISD::SETGE: 5584 case ISD::SETUGT: 5585 case ISD::SETUGE: { 5586 unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM; 5587 if (TLI.isOperationLegal(Opcode, VT)) 5588 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5589 return SDValue(); 5590 } 5591 default: 5592 return SDValue(); 5593 } 5594 } 5595 5596 // TODO: We should handle other cases of selecting between {-1,0,1} here. 5597 SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) { 5598 SDValue Cond = N->getOperand(0); 5599 SDValue N1 = N->getOperand(1); 5600 SDValue N2 = N->getOperand(2); 5601 EVT VT = N->getValueType(0); 5602 EVT CondVT = Cond.getValueType(); 5603 SDLoc DL(N); 5604 5605 // fold (select Cond, 0, 1) -> (xor Cond, 1) 5606 // We can't do this reliably if integer based booleans have different contents 5607 // to floating point based booleans. This is because we can't tell whether we 5608 // have an integer-based boolean or a floating-point-based boolean unless we 5609 // can find the SETCC that produced it and inspect its operands. This is 5610 // fairly easy if C is the SETCC node, but it can potentially be 5611 // undiscoverable (or not reasonably discoverable). For example, it could be 5612 // in another basic block or it could require searching a complicated 5613 // expression. 5614 if (VT.isInteger() && 5615 (CondVT == MVT::i1 || (CondVT.isInteger() && 5616 TLI.getBooleanContents(false, true) == 5617 TargetLowering::ZeroOrOneBooleanContent && 5618 TLI.getBooleanContents(false, false) == 5619 TargetLowering::ZeroOrOneBooleanContent)) && 5620 isNullConstant(N1) && isOneConstant(N2)) { 5621 SDValue NotCond = DAG.getNode(ISD::XOR, DL, CondVT, Cond, 5622 DAG.getConstant(1, DL, CondVT)); 5623 if (VT.bitsEq(CondVT)) 5624 return NotCond; 5625 return DAG.getZExtOrTrunc(NotCond, DL, VT); 5626 } 5627 5628 return SDValue(); 5629 } 5630 5631 SDValue DAGCombiner::visitSELECT(SDNode *N) { 5632 SDValue N0 = N->getOperand(0); 5633 SDValue N1 = N->getOperand(1); 5634 SDValue N2 = N->getOperand(2); 5635 EVT VT = N->getValueType(0); 5636 EVT VT0 = N0.getValueType(); 5637 5638 // fold (select C, X, X) -> X 5639 if (N1 == N2) 5640 return N1; 5641 if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) { 5642 // fold (select true, X, Y) -> X 5643 // fold (select false, X, Y) -> Y 5644 return !N0C->isNullValue() ? N1 : N2; 5645 } 5646 // fold (select X, X, Y) -> (or X, Y) 5647 // fold (select X, 1, Y) -> (or C, Y) 5648 if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1))) 5649 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2); 5650 5651 if (SDValue V = foldSelectOfConstants(N)) 5652 return V; 5653 5654 // fold (select C, 0, X) -> (and (not C), X) 5655 if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) { 5656 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 5657 AddToWorklist(NOTNode.getNode()); 5658 return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2); 5659 } 5660 // fold (select C, X, 1) -> (or (not C), X) 5661 if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) { 5662 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 5663 AddToWorklist(NOTNode.getNode()); 5664 return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1); 5665 } 5666 // fold (select X, Y, X) -> (and X, Y) 5667 // fold (select X, Y, 0) -> (and X, Y) 5668 if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2))) 5669 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1); 5670 5671 // If we can fold this based on the true/false value, do so. 5672 if (SimplifySelectOps(N, N1, N2)) 5673 return SDValue(N, 0); // Don't revisit N. 5674 5675 if (VT0 == MVT::i1) { 5676 // The code in this block deals with the following 2 equivalences: 5677 // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y)) 5678 // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y) 5679 // The target can specify its preferred form with the 5680 // shouldNormalizeToSelectSequence() callback. However we always transform 5681 // to the right anyway if we find the inner select exists in the DAG anyway 5682 // and we always transform to the left side if we know that we can further 5683 // optimize the combination of the conditions. 5684 bool normalizeToSequence 5685 = TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT); 5686 // select (and Cond0, Cond1), X, Y 5687 // -> select Cond0, (select Cond1, X, Y), Y 5688 if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) { 5689 SDValue Cond0 = N0->getOperand(0); 5690 SDValue Cond1 = N0->getOperand(1); 5691 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 5692 N1.getValueType(), Cond1, N1, N2); 5693 if (normalizeToSequence || !InnerSelect.use_empty()) 5694 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, 5695 InnerSelect, N2); 5696 } 5697 // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y) 5698 if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) { 5699 SDValue Cond0 = N0->getOperand(0); 5700 SDValue Cond1 = N0->getOperand(1); 5701 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 5702 N1.getValueType(), Cond1, N1, N2); 5703 if (normalizeToSequence || !InnerSelect.use_empty()) 5704 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1, 5705 InnerSelect); 5706 } 5707 5708 // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y 5709 if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) { 5710 SDValue N1_0 = N1->getOperand(0); 5711 SDValue N1_1 = N1->getOperand(1); 5712 SDValue N1_2 = N1->getOperand(2); 5713 if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) { 5714 // Create the actual and node if we can generate good code for it. 5715 if (!normalizeToSequence) { 5716 SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(), 5717 N0, N1_0); 5718 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And, 5719 N1_1, N2); 5720 } 5721 // Otherwise see if we can optimize the "and" to a better pattern. 5722 if (SDValue Combined = visitANDLike(N0, N1_0, N)) 5723 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 5724 N1_1, N2); 5725 } 5726 } 5727 // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y 5728 if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) { 5729 SDValue N2_0 = N2->getOperand(0); 5730 SDValue N2_1 = N2->getOperand(1); 5731 SDValue N2_2 = N2->getOperand(2); 5732 if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) { 5733 // Create the actual or node if we can generate good code for it. 5734 if (!normalizeToSequence) { 5735 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(), 5736 N0, N2_0); 5737 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or, 5738 N1, N2_2); 5739 } 5740 // Otherwise see if we can optimize to a better pattern. 5741 if (SDValue Combined = visitORLike(N0, N2_0, N)) 5742 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 5743 N1, N2_2); 5744 } 5745 } 5746 } 5747 5748 // select (xor Cond, 1), X, Y -> select Cond, Y, X 5749 if (VT0 == MVT::i1) { 5750 if (N0->getOpcode() == ISD::XOR) { 5751 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) { 5752 SDValue Cond0 = N0->getOperand(0); 5753 if (C->isOne()) 5754 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), 5755 Cond0, N2, N1); 5756 } 5757 } 5758 } 5759 5760 // fold selects based on a setcc into other things, such as min/max/abs 5761 if (N0.getOpcode() == ISD::SETCC) { 5762 // select x, y (fcmp lt x, y) -> fminnum x, y 5763 // select x, y (fcmp gt x, y) -> fmaxnum x, y 5764 // 5765 // This is OK if we don't care about what happens if either operand is a 5766 // NaN. 5767 // 5768 5769 // FIXME: Instead of testing for UnsafeFPMath, this should be checking for 5770 // no signed zeros as well as no nans. 5771 const TargetOptions &Options = DAG.getTarget().Options; 5772 if (Options.UnsafeFPMath && 5773 VT.isFloatingPoint() && N0.hasOneUse() && 5774 DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) { 5775 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 5776 5777 if (SDValue FMinMax = combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0), 5778 N0.getOperand(1), N1, N2, CC, 5779 TLI, DAG)) 5780 return FMinMax; 5781 } 5782 5783 if ((!LegalOperations && 5784 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) || 5785 TLI.isOperationLegal(ISD::SELECT_CC, VT)) 5786 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, 5787 N0.getOperand(0), N0.getOperand(1), 5788 N1, N2, N0.getOperand(2)); 5789 return SimplifySelect(SDLoc(N), N0, N1, N2); 5790 } 5791 5792 return SDValue(); 5793 } 5794 5795 static 5796 std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) { 5797 SDLoc DL(N); 5798 EVT LoVT, HiVT; 5799 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 5800 5801 // Split the inputs. 5802 SDValue Lo, Hi, LL, LH, RL, RH; 5803 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0); 5804 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1); 5805 5806 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2)); 5807 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2)); 5808 5809 return std::make_pair(Lo, Hi); 5810 } 5811 5812 // This function assumes all the vselect's arguments are CONCAT_VECTOR 5813 // nodes and that the condition is a BV of ConstantSDNodes (or undefs). 5814 static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { 5815 SDLoc DL(N); 5816 SDValue Cond = N->getOperand(0); 5817 SDValue LHS = N->getOperand(1); 5818 SDValue RHS = N->getOperand(2); 5819 EVT VT = N->getValueType(0); 5820 int NumElems = VT.getVectorNumElements(); 5821 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS && 5822 RHS.getOpcode() == ISD::CONCAT_VECTORS && 5823 Cond.getOpcode() == ISD::BUILD_VECTOR); 5824 5825 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about 5826 // binary ones here. 5827 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2) 5828 return SDValue(); 5829 5830 // We're sure we have an even number of elements due to the 5831 // concat_vectors we have as arguments to vselect. 5832 // Skip BV elements until we find one that's not an UNDEF 5833 // After we find an UNDEF element, keep looping until we get to half the 5834 // length of the BV and see if all the non-undef nodes are the same. 5835 ConstantSDNode *BottomHalf = nullptr; 5836 for (int i = 0; i < NumElems / 2; ++i) { 5837 if (Cond->getOperand(i)->isUndef()) 5838 continue; 5839 5840 if (BottomHalf == nullptr) 5841 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 5842 else if (Cond->getOperand(i).getNode() != BottomHalf) 5843 return SDValue(); 5844 } 5845 5846 // Do the same for the second half of the BuildVector 5847 ConstantSDNode *TopHalf = nullptr; 5848 for (int i = NumElems / 2; i < NumElems; ++i) { 5849 if (Cond->getOperand(i)->isUndef()) 5850 continue; 5851 5852 if (TopHalf == nullptr) 5853 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 5854 else if (Cond->getOperand(i).getNode() != TopHalf) 5855 return SDValue(); 5856 } 5857 5858 assert(TopHalf && BottomHalf && 5859 "One half of the selector was all UNDEFs and the other was all the " 5860 "same value. This should have been addressed before this function."); 5861 return DAG.getNode( 5862 ISD::CONCAT_VECTORS, DL, VT, 5863 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0), 5864 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1)); 5865 } 5866 5867 SDValue DAGCombiner::visitMSCATTER(SDNode *N) { 5868 5869 if (Level >= AfterLegalizeTypes) 5870 return SDValue(); 5871 5872 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N); 5873 SDValue Mask = MSC->getMask(); 5874 SDValue Data = MSC->getValue(); 5875 SDLoc DL(N); 5876 5877 // If the MSCATTER data type requires splitting and the mask is provided by a 5878 // SETCC, then split both nodes and its operands before legalization. This 5879 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5880 // and enables future optimizations (e.g. min/max pattern matching on X86). 5881 if (Mask.getOpcode() != ISD::SETCC) 5882 return SDValue(); 5883 5884 // Check if any splitting is required. 5885 if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) != 5886 TargetLowering::TypeSplitVector) 5887 return SDValue(); 5888 SDValue MaskLo, MaskHi, Lo, Hi; 5889 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5890 5891 EVT LoVT, HiVT; 5892 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0)); 5893 5894 SDValue Chain = MSC->getChain(); 5895 5896 EVT MemoryVT = MSC->getMemoryVT(); 5897 unsigned Alignment = MSC->getOriginalAlignment(); 5898 5899 EVT LoMemVT, HiMemVT; 5900 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5901 5902 SDValue DataLo, DataHi; 5903 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 5904 5905 SDValue BasePtr = MSC->getBasePtr(); 5906 SDValue IndexLo, IndexHi; 5907 std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL); 5908 5909 MachineMemOperand *MMO = DAG.getMachineFunction(). 5910 getMachineMemOperand(MSC->getPointerInfo(), 5911 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 5912 Alignment, MSC->getAAInfo(), MSC->getRanges()); 5913 5914 SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo }; 5915 Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), 5916 DL, OpsLo, MMO); 5917 5918 SDValue OpsHi[] = {Chain, DataHi, MaskHi, BasePtr, IndexHi}; 5919 Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(), 5920 DL, OpsHi, MMO); 5921 5922 AddToWorklist(Lo.getNode()); 5923 AddToWorklist(Hi.getNode()); 5924 5925 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 5926 } 5927 5928 SDValue DAGCombiner::visitMSTORE(SDNode *N) { 5929 5930 if (Level >= AfterLegalizeTypes) 5931 return SDValue(); 5932 5933 MaskedStoreSDNode *MST = dyn_cast<MaskedStoreSDNode>(N); 5934 SDValue Mask = MST->getMask(); 5935 SDValue Data = MST->getValue(); 5936 EVT VT = Data.getValueType(); 5937 SDLoc DL(N); 5938 5939 // If the MSTORE data type requires splitting and the mask is provided by a 5940 // SETCC, then split both nodes and its operands before legalization. This 5941 // prevents the type legalizer from unrolling SETCC into scalar comparisons 5942 // and enables future optimizations (e.g. min/max pattern matching on X86). 5943 if (Mask.getOpcode() == ISD::SETCC) { 5944 5945 // Check if any splitting is required. 5946 if (TLI.getTypeAction(*DAG.getContext(), VT) != 5947 TargetLowering::TypeSplitVector) 5948 return SDValue(); 5949 5950 SDValue MaskLo, MaskHi, Lo, Hi; 5951 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 5952 5953 SDValue Chain = MST->getChain(); 5954 SDValue Ptr = MST->getBasePtr(); 5955 5956 EVT MemoryVT = MST->getMemoryVT(); 5957 unsigned Alignment = MST->getOriginalAlignment(); 5958 5959 // if Alignment is equal to the vector size, 5960 // take the half of it for the second part 5961 unsigned SecondHalfAlignment = 5962 (Alignment == VT.getSizeInBits() / 8) ? Alignment / 2 : Alignment; 5963 5964 EVT LoMemVT, HiMemVT; 5965 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 5966 5967 SDValue DataLo, DataHi; 5968 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 5969 5970 MachineMemOperand *MMO = DAG.getMachineFunction(). 5971 getMachineMemOperand(MST->getPointerInfo(), 5972 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 5973 Alignment, MST->getAAInfo(), MST->getRanges()); 5974 5975 Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO, 5976 MST->isTruncatingStore(), 5977 MST->isCompressingStore()); 5978 5979 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 5980 MST->isCompressingStore()); 5981 5982 MMO = DAG.getMachineFunction(). 5983 getMachineMemOperand(MST->getPointerInfo(), 5984 MachineMemOperand::MOStore, HiMemVT.getStoreSize(), 5985 SecondHalfAlignment, MST->getAAInfo(), 5986 MST->getRanges()); 5987 5988 Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO, 5989 MST->isTruncatingStore(), 5990 MST->isCompressingStore()); 5991 5992 AddToWorklist(Lo.getNode()); 5993 AddToWorklist(Hi.getNode()); 5994 5995 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 5996 } 5997 return SDValue(); 5998 } 5999 6000 SDValue DAGCombiner::visitMGATHER(SDNode *N) { 6001 6002 if (Level >= AfterLegalizeTypes) 6003 return SDValue(); 6004 6005 MaskedGatherSDNode *MGT = dyn_cast<MaskedGatherSDNode>(N); 6006 SDValue Mask = MGT->getMask(); 6007 SDLoc DL(N); 6008 6009 // If the MGATHER result requires splitting and the mask is provided by a 6010 // SETCC, then split both nodes and its operands before legalization. This 6011 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6012 // and enables future optimizations (e.g. min/max pattern matching on X86). 6013 6014 if (Mask.getOpcode() != ISD::SETCC) 6015 return SDValue(); 6016 6017 EVT VT = N->getValueType(0); 6018 6019 // Check if any splitting is required. 6020 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6021 TargetLowering::TypeSplitVector) 6022 return SDValue(); 6023 6024 SDValue MaskLo, MaskHi, Lo, Hi; 6025 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6026 6027 SDValue Src0 = MGT->getValue(); 6028 SDValue Src0Lo, Src0Hi; 6029 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 6030 6031 EVT LoVT, HiVT; 6032 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 6033 6034 SDValue Chain = MGT->getChain(); 6035 EVT MemoryVT = MGT->getMemoryVT(); 6036 unsigned Alignment = MGT->getOriginalAlignment(); 6037 6038 EVT LoMemVT, HiMemVT; 6039 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6040 6041 SDValue BasePtr = MGT->getBasePtr(); 6042 SDValue Index = MGT->getIndex(); 6043 SDValue IndexLo, IndexHi; 6044 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL); 6045 6046 MachineMemOperand *MMO = DAG.getMachineFunction(). 6047 getMachineMemOperand(MGT->getPointerInfo(), 6048 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 6049 Alignment, MGT->getAAInfo(), MGT->getRanges()); 6050 6051 SDValue OpsLo[] = { Chain, Src0Lo, MaskLo, BasePtr, IndexLo }; 6052 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo, 6053 MMO); 6054 6055 SDValue OpsHi[] = {Chain, Src0Hi, MaskHi, BasePtr, IndexHi}; 6056 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi, 6057 MMO); 6058 6059 AddToWorklist(Lo.getNode()); 6060 AddToWorklist(Hi.getNode()); 6061 6062 // Build a factor node to remember that this load is independent of the 6063 // other one. 6064 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 6065 Hi.getValue(1)); 6066 6067 // Legalized the chain result - switch anything that used the old chain to 6068 // use the new one. 6069 DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain); 6070 6071 SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 6072 6073 SDValue RetOps[] = { GatherRes, Chain }; 6074 return DAG.getMergeValues(RetOps, DL); 6075 } 6076 6077 SDValue DAGCombiner::visitMLOAD(SDNode *N) { 6078 6079 if (Level >= AfterLegalizeTypes) 6080 return SDValue(); 6081 6082 MaskedLoadSDNode *MLD = dyn_cast<MaskedLoadSDNode>(N); 6083 SDValue Mask = MLD->getMask(); 6084 SDLoc DL(N); 6085 6086 // If the MLOAD result requires splitting and the mask is provided by a 6087 // SETCC, then split both nodes and its operands before legalization. This 6088 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6089 // and enables future optimizations (e.g. min/max pattern matching on X86). 6090 6091 if (Mask.getOpcode() == ISD::SETCC) { 6092 EVT VT = N->getValueType(0); 6093 6094 // Check if any splitting is required. 6095 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6096 TargetLowering::TypeSplitVector) 6097 return SDValue(); 6098 6099 SDValue MaskLo, MaskHi, Lo, Hi; 6100 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6101 6102 SDValue Src0 = MLD->getSrc0(); 6103 SDValue Src0Lo, Src0Hi; 6104 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 6105 6106 EVT LoVT, HiVT; 6107 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0)); 6108 6109 SDValue Chain = MLD->getChain(); 6110 SDValue Ptr = MLD->getBasePtr(); 6111 EVT MemoryVT = MLD->getMemoryVT(); 6112 unsigned Alignment = MLD->getOriginalAlignment(); 6113 6114 // if Alignment is equal to the vector size, 6115 // take the half of it for the second part 6116 unsigned SecondHalfAlignment = 6117 (Alignment == MLD->getValueType(0).getSizeInBits()/8) ? 6118 Alignment/2 : Alignment; 6119 6120 EVT LoMemVT, HiMemVT; 6121 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6122 6123 MachineMemOperand *MMO = DAG.getMachineFunction(). 6124 getMachineMemOperand(MLD->getPointerInfo(), 6125 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 6126 Alignment, MLD->getAAInfo(), MLD->getRanges()); 6127 6128 Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, Src0Lo, LoMemVT, MMO, 6129 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 6130 6131 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 6132 MLD->isExpandingLoad()); 6133 6134 MMO = DAG.getMachineFunction(). 6135 getMachineMemOperand(MLD->getPointerInfo(), 6136 MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), 6137 SecondHalfAlignment, MLD->getAAInfo(), MLD->getRanges()); 6138 6139 Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, Src0Hi, HiMemVT, MMO, 6140 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 6141 6142 AddToWorklist(Lo.getNode()); 6143 AddToWorklist(Hi.getNode()); 6144 6145 // Build a factor node to remember that this load is independent of the 6146 // other one. 6147 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 6148 Hi.getValue(1)); 6149 6150 // Legalized the chain result - switch anything that used the old chain to 6151 // use the new one. 6152 DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain); 6153 6154 SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 6155 6156 SDValue RetOps[] = { LoadRes, Chain }; 6157 return DAG.getMergeValues(RetOps, DL); 6158 } 6159 return SDValue(); 6160 } 6161 6162 SDValue DAGCombiner::visitVSELECT(SDNode *N) { 6163 SDValue N0 = N->getOperand(0); 6164 SDValue N1 = N->getOperand(1); 6165 SDValue N2 = N->getOperand(2); 6166 SDLoc DL(N); 6167 6168 // fold (vselect C, X, X) -> X 6169 if (N1 == N2) 6170 return N1; 6171 6172 // Canonicalize integer abs. 6173 // vselect (setg[te] X, 0), X, -X -> 6174 // vselect (setgt X, -1), X, -X -> 6175 // vselect (setl[te] X, 0), -X, X -> 6176 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 6177 if (N0.getOpcode() == ISD::SETCC) { 6178 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 6179 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 6180 bool isAbs = false; 6181 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 6182 6183 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || 6184 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && 6185 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) 6186 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); 6187 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && 6188 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) 6189 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 6190 6191 if (isAbs) { 6192 EVT VT = LHS.getValueType(); 6193 SDValue Shift = DAG.getNode( 6194 ISD::SRA, DL, VT, LHS, 6195 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT)); 6196 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); 6197 AddToWorklist(Shift.getNode()); 6198 AddToWorklist(Add.getNode()); 6199 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); 6200 } 6201 } 6202 6203 if (SimplifySelectOps(N, N1, N2)) 6204 return SDValue(N, 0); // Don't revisit N. 6205 6206 // If the VSELECT result requires splitting and the mask is provided by a 6207 // SETCC, then split both nodes and its operands before legalization. This 6208 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6209 // and enables future optimizations (e.g. min/max pattern matching on X86). 6210 if (N0.getOpcode() == ISD::SETCC) { 6211 EVT VT = N->getValueType(0); 6212 6213 // Check if any splitting is required. 6214 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6215 TargetLowering::TypeSplitVector) 6216 return SDValue(); 6217 6218 SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH; 6219 std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG); 6220 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1); 6221 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2); 6222 6223 Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL); 6224 Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH); 6225 6226 // Add the new VSELECT nodes to the work list in case they need to be split 6227 // again. 6228 AddToWorklist(Lo.getNode()); 6229 AddToWorklist(Hi.getNode()); 6230 6231 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 6232 } 6233 6234 // Fold (vselect (build_vector all_ones), N1, N2) -> N1 6235 if (ISD::isBuildVectorAllOnes(N0.getNode())) 6236 return N1; 6237 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2 6238 if (ISD::isBuildVectorAllZeros(N0.getNode())) 6239 return N2; 6240 6241 // The ConvertSelectToConcatVector function is assuming both the above 6242 // checks for (vselect (build_vector all{ones,zeros) ...) have been made 6243 // and addressed. 6244 if (N1.getOpcode() == ISD::CONCAT_VECTORS && 6245 N2.getOpcode() == ISD::CONCAT_VECTORS && 6246 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { 6247 if (SDValue CV = ConvertSelectToConcatVector(N, DAG)) 6248 return CV; 6249 } 6250 6251 return SDValue(); 6252 } 6253 6254 SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 6255 SDValue N0 = N->getOperand(0); 6256 SDValue N1 = N->getOperand(1); 6257 SDValue N2 = N->getOperand(2); 6258 SDValue N3 = N->getOperand(3); 6259 SDValue N4 = N->getOperand(4); 6260 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 6261 6262 // fold select_cc lhs, rhs, x, x, cc -> x 6263 if (N2 == N3) 6264 return N2; 6265 6266 // Determine if the condition we're dealing with is constant 6267 if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1, 6268 CC, SDLoc(N), false)) { 6269 AddToWorklist(SCC.getNode()); 6270 6271 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) { 6272 if (!SCCC->isNullValue()) 6273 return N2; // cond always true -> true val 6274 else 6275 return N3; // cond always false -> false val 6276 } else if (SCC->isUndef()) { 6277 // When the condition is UNDEF, just return the first operand. This is 6278 // coherent the DAG creation, no setcc node is created in this case 6279 return N2; 6280 } else if (SCC.getOpcode() == ISD::SETCC) { 6281 // Fold to a simpler select_cc 6282 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(), 6283 SCC.getOperand(0), SCC.getOperand(1), N2, N3, 6284 SCC.getOperand(2)); 6285 } 6286 } 6287 6288 // If we can fold this based on the true/false value, do so. 6289 if (SimplifySelectOps(N, N2, N3)) 6290 return SDValue(N, 0); // Don't revisit N. 6291 6292 // fold select_cc into other things, such as min/max/abs 6293 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC); 6294 } 6295 6296 SDValue DAGCombiner::visitSETCC(SDNode *N) { 6297 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 6298 cast<CondCodeSDNode>(N->getOperand(2))->get(), 6299 SDLoc(N)); 6300 } 6301 6302 SDValue DAGCombiner::visitSETCCE(SDNode *N) { 6303 SDValue LHS = N->getOperand(0); 6304 SDValue RHS = N->getOperand(1); 6305 SDValue Carry = N->getOperand(2); 6306 SDValue Cond = N->getOperand(3); 6307 6308 // If Carry is false, fold to a regular SETCC. 6309 if (Carry.getOpcode() == ISD::CARRY_FALSE) 6310 return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond); 6311 6312 return SDValue(); 6313 } 6314 6315 /// Try to fold a sext/zext/aext dag node into a ConstantSDNode or 6316 /// a build_vector of constants. 6317 /// This function is called by the DAGCombiner when visiting sext/zext/aext 6318 /// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND). 6319 /// Vector extends are not folded if operations are legal; this is to 6320 /// avoid introducing illegal build_vector dag nodes. 6321 static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, 6322 SelectionDAG &DAG, bool LegalTypes, 6323 bool LegalOperations) { 6324 unsigned Opcode = N->getOpcode(); 6325 SDValue N0 = N->getOperand(0); 6326 EVT VT = N->getValueType(0); 6327 6328 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || 6329 Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 6330 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) 6331 && "Expected EXTEND dag node in input!"); 6332 6333 // fold (sext c1) -> c1 6334 // fold (zext c1) -> c1 6335 // fold (aext c1) -> c1 6336 if (isa<ConstantSDNode>(N0)) 6337 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode(); 6338 6339 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants) 6340 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants) 6341 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants) 6342 EVT SVT = VT.getScalarType(); 6343 if (!(VT.isVector() && 6344 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) && 6345 ISD::isBuildVectorOfConstantSDNodes(N0.getNode()))) 6346 return nullptr; 6347 6348 // We can fold this node into a build_vector. 6349 unsigned VTBits = SVT.getSizeInBits(); 6350 unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits(); 6351 SmallVector<SDValue, 8> Elts; 6352 unsigned NumElts = VT.getVectorNumElements(); 6353 SDLoc DL(N); 6354 6355 for (unsigned i=0; i != NumElts; ++i) { 6356 SDValue Op = N0->getOperand(i); 6357 if (Op->isUndef()) { 6358 Elts.push_back(DAG.getUNDEF(SVT)); 6359 continue; 6360 } 6361 6362 SDLoc DL(Op); 6363 // Get the constant value and if needed trunc it to the size of the type. 6364 // Nodes like build_vector might have constants wider than the scalar type. 6365 APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits); 6366 if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG) 6367 Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT)); 6368 else 6369 Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT)); 6370 } 6371 6372 return DAG.getBuildVector(VT, DL, Elts).getNode(); 6373 } 6374 6375 // ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 6376 // "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 6377 // transformation. Returns true if extension are possible and the above 6378 // mentioned transformation is profitable. 6379 static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 6380 unsigned ExtOpc, 6381 SmallVectorImpl<SDNode *> &ExtendNodes, 6382 const TargetLowering &TLI) { 6383 bool HasCopyToRegUses = false; 6384 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 6385 for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 6386 UE = N0.getNode()->use_end(); 6387 UI != UE; ++UI) { 6388 SDNode *User = *UI; 6389 if (User == N) 6390 continue; 6391 if (UI.getUse().getResNo() != N0.getResNo()) 6392 continue; 6393 // FIXME: Only extend SETCC N, N and SETCC N, c for now. 6394 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 6395 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 6396 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 6397 // Sign bits will be lost after a zext. 6398 return false; 6399 bool Add = false; 6400 for (unsigned i = 0; i != 2; ++i) { 6401 SDValue UseOp = User->getOperand(i); 6402 if (UseOp == N0) 6403 continue; 6404 if (!isa<ConstantSDNode>(UseOp)) 6405 return false; 6406 Add = true; 6407 } 6408 if (Add) 6409 ExtendNodes.push_back(User); 6410 continue; 6411 } 6412 // If truncates aren't free and there are users we can't 6413 // extend, it isn't worthwhile. 6414 if (!isTruncFree) 6415 return false; 6416 // Remember if this value is live-out. 6417 if (User->getOpcode() == ISD::CopyToReg) 6418 HasCopyToRegUses = true; 6419 } 6420 6421 if (HasCopyToRegUses) { 6422 bool BothLiveOut = false; 6423 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6424 UI != UE; ++UI) { 6425 SDUse &Use = UI.getUse(); 6426 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 6427 BothLiveOut = true; 6428 break; 6429 } 6430 } 6431 if (BothLiveOut) 6432 // Both unextended and extended values are live out. There had better be 6433 // a good reason for the transformation. 6434 return ExtendNodes.size(); 6435 } 6436 return true; 6437 } 6438 6439 void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, 6440 SDValue Trunc, SDValue ExtLoad, 6441 const SDLoc &DL, ISD::NodeType ExtType) { 6442 // Extend SetCC uses if necessary. 6443 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 6444 SDNode *SetCC = SetCCs[i]; 6445 SmallVector<SDValue, 4> Ops; 6446 6447 for (unsigned j = 0; j != 2; ++j) { 6448 SDValue SOp = SetCC->getOperand(j); 6449 if (SOp == Trunc) 6450 Ops.push_back(ExtLoad); 6451 else 6452 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 6453 } 6454 6455 Ops.push_back(SetCC->getOperand(2)); 6456 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); 6457 } 6458 } 6459 6460 // FIXME: Bring more similar combines here, common to sext/zext (maybe aext?). 6461 SDValue DAGCombiner::CombineExtLoad(SDNode *N) { 6462 SDValue N0 = N->getOperand(0); 6463 EVT DstVT = N->getValueType(0); 6464 EVT SrcVT = N0.getValueType(); 6465 6466 assert((N->getOpcode() == ISD::SIGN_EXTEND || 6467 N->getOpcode() == ISD::ZERO_EXTEND) && 6468 "Unexpected node type (not an extend)!"); 6469 6470 // fold (sext (load x)) to multiple smaller sextloads; same for zext. 6471 // For example, on a target with legal v4i32, but illegal v8i32, turn: 6472 // (v8i32 (sext (v8i16 (load x)))) 6473 // into: 6474 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6475 // (v4i32 (sextload (x + 16))))) 6476 // Where uses of the original load, i.e.: 6477 // (v8i16 (load x)) 6478 // are replaced with: 6479 // (v8i16 (truncate 6480 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6481 // (v4i32 (sextload (x + 16))))))) 6482 // 6483 // This combine is only applicable to illegal, but splittable, vectors. 6484 // All legal types, and illegal non-vector types, are handled elsewhere. 6485 // This combine is controlled by TargetLowering::isVectorLoadExtDesirable. 6486 // 6487 if (N0->getOpcode() != ISD::LOAD) 6488 return SDValue(); 6489 6490 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6491 6492 if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) || 6493 !N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() || 6494 !DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0))) 6495 return SDValue(); 6496 6497 SmallVector<SDNode *, 4> SetCCs; 6498 if (!ExtendUsesToFormExtLoad(N, N0, N->getOpcode(), SetCCs, TLI)) 6499 return SDValue(); 6500 6501 ISD::LoadExtType ExtType = 6502 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 6503 6504 // Try to split the vector types to get down to legal types. 6505 EVT SplitSrcVT = SrcVT; 6506 EVT SplitDstVT = DstVT; 6507 while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) && 6508 SplitSrcVT.getVectorNumElements() > 1) { 6509 SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first; 6510 SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first; 6511 } 6512 6513 if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT)) 6514 return SDValue(); 6515 6516 SDLoc DL(N); 6517 const unsigned NumSplits = 6518 DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements(); 6519 const unsigned Stride = SplitSrcVT.getStoreSize(); 6520 SmallVector<SDValue, 4> Loads; 6521 SmallVector<SDValue, 4> Chains; 6522 6523 SDValue BasePtr = LN0->getBasePtr(); 6524 for (unsigned Idx = 0; Idx < NumSplits; Idx++) { 6525 const unsigned Offset = Idx * Stride; 6526 const unsigned Align = MinAlign(LN0->getAlignment(), Offset); 6527 6528 SDValue SplitLoad = DAG.getExtLoad( 6529 ExtType, DL, SplitDstVT, LN0->getChain(), BasePtr, 6530 LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align, 6531 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 6532 6533 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 6534 DAG.getConstant(Stride, DL, BasePtr.getValueType())); 6535 6536 Loads.push_back(SplitLoad.getValue(0)); 6537 Chains.push_back(SplitLoad.getValue(1)); 6538 } 6539 6540 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 6541 SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads); 6542 6543 CombineTo(N, NewValue); 6544 6545 // Replace uses of the original load (before extension) 6546 // with a truncate of the concatenated sextloaded vectors. 6547 SDValue Trunc = 6548 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue); 6549 CombineTo(N0.getNode(), Trunc, NewChain); 6550 ExtendSetCCUses(SetCCs, Trunc, NewValue, DL, 6551 (ISD::NodeType)N->getOpcode()); 6552 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6553 } 6554 6555 SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 6556 SDValue N0 = N->getOperand(0); 6557 EVT VT = N->getValueType(0); 6558 6559 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6560 LegalOperations)) 6561 return SDValue(Res, 0); 6562 6563 // fold (sext (sext x)) -> (sext x) 6564 // fold (sext (aext x)) -> (sext x) 6565 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 6566 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, 6567 N0.getOperand(0)); 6568 6569 if (N0.getOpcode() == ISD::TRUNCATE) { 6570 // fold (sext (truncate (load x))) -> (sext (smaller load x)) 6571 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 6572 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6573 SDNode *oye = N0.getOperand(0).getNode(); 6574 if (NarrowLoad.getNode() != N0.getNode()) { 6575 CombineTo(N0.getNode(), NarrowLoad); 6576 // CombineTo deleted the truncate, if needed, but not what's under it. 6577 AddToWorklist(oye); 6578 } 6579 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6580 } 6581 6582 // See if the value being truncated is already sign extended. If so, just 6583 // eliminate the trunc/sext pair. 6584 SDValue Op = N0.getOperand(0); 6585 unsigned OpBits = Op.getScalarValueSizeInBits(); 6586 unsigned MidBits = N0.getScalarValueSizeInBits(); 6587 unsigned DestBits = VT.getScalarSizeInBits(); 6588 unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 6589 6590 if (OpBits == DestBits) { 6591 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 6592 // bits, it is already ready. 6593 if (NumSignBits > DestBits-MidBits) 6594 return Op; 6595 } else if (OpBits < DestBits) { 6596 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 6597 // bits, just sext from i32. 6598 if (NumSignBits > OpBits-MidBits) 6599 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, Op); 6600 } else { 6601 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 6602 // bits, just truncate to i32. 6603 if (NumSignBits > OpBits-MidBits) 6604 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6605 } 6606 6607 // fold (sext (truncate x)) -> (sextinreg x). 6608 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 6609 N0.getValueType())) { 6610 if (OpBits < DestBits) 6611 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op); 6612 else if (OpBits > DestBits) 6613 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op); 6614 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, Op, 6615 DAG.getValueType(N0.getValueType())); 6616 } 6617 } 6618 6619 // fold (sext (load x)) -> (sext (truncate (sextload x))) 6620 // Only generate vector extloads when 1) they're legal, and 2) they are 6621 // deemed desirable by the target. 6622 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6623 ((!LegalOperations && !VT.isVector() && 6624 !cast<LoadSDNode>(N0)->isVolatile()) || 6625 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()))) { 6626 bool DoXform = true; 6627 SmallVector<SDNode*, 4> SetCCs; 6628 if (!N0.hasOneUse()) 6629 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 6630 if (VT.isVector()) 6631 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 6632 if (DoXform) { 6633 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6634 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 6635 LN0->getChain(), 6636 LN0->getBasePtr(), N0.getValueType(), 6637 LN0->getMemOperand()); 6638 CombineTo(N, ExtLoad); 6639 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6640 N0.getValueType(), ExtLoad); 6641 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6642 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 6643 ISD::SIGN_EXTEND); 6644 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6645 } 6646 } 6647 6648 // fold (sext (load x)) to multiple smaller sextloads. 6649 // Only on illegal but splittable vectors. 6650 if (SDValue ExtLoad = CombineExtLoad(N)) 6651 return ExtLoad; 6652 6653 // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 6654 // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 6655 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 6656 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 6657 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6658 EVT MemVT = LN0->getMemoryVT(); 6659 if ((!LegalOperations && !LN0->isVolatile()) || 6660 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT)) { 6661 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 6662 LN0->getChain(), 6663 LN0->getBasePtr(), MemVT, 6664 LN0->getMemOperand()); 6665 CombineTo(N, ExtLoad); 6666 CombineTo(N0.getNode(), 6667 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6668 N0.getValueType(), ExtLoad), 6669 ExtLoad.getValue(1)); 6670 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6671 } 6672 } 6673 6674 // fold (sext (and/or/xor (load x), cst)) -> 6675 // (and/or/xor (sextload x), (sext cst)) 6676 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 6677 N0.getOpcode() == ISD::XOR) && 6678 isa<LoadSDNode>(N0.getOperand(0)) && 6679 N0.getOperand(1).getOpcode() == ISD::Constant && 6680 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()) && 6681 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 6682 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 6683 if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) { 6684 bool DoXform = true; 6685 SmallVector<SDNode*, 4> SetCCs; 6686 if (!N0.hasOneUse()) 6687 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 6688 SetCCs, TLI); 6689 if (DoXform) { 6690 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT, 6691 LN0->getChain(), LN0->getBasePtr(), 6692 LN0->getMemoryVT(), 6693 LN0->getMemOperand()); 6694 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6695 Mask = Mask.sext(VT.getSizeInBits()); 6696 SDLoc DL(N); 6697 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 6698 ExtLoad, DAG.getConstant(Mask, DL, VT)); 6699 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 6700 SDLoc(N0.getOperand(0)), 6701 N0.getOperand(0).getValueType(), ExtLoad); 6702 CombineTo(N, And); 6703 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 6704 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, 6705 ISD::SIGN_EXTEND); 6706 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6707 } 6708 } 6709 } 6710 6711 if (N0.getOpcode() == ISD::SETCC) { 6712 EVT N0VT = N0.getOperand(0).getValueType(); 6713 // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 6714 // Only do this before legalize for now. 6715 if (VT.isVector() && !LegalOperations && 6716 TLI.getBooleanContents(N0VT) == 6717 TargetLowering::ZeroOrNegativeOneBooleanContent) { 6718 // On some architectures (such as SSE/NEON/etc) the SETCC result type is 6719 // of the same size as the compared operands. Only optimize sext(setcc()) 6720 // if this is the case. 6721 EVT SVT = getSetCCResultType(N0VT); 6722 6723 // We know that the # elements of the results is the same as the 6724 // # elements of the compare (and the # elements of the compare result 6725 // for that matter). Check to see that they are the same size. If so, 6726 // we know that the element size of the sext'd result matches the 6727 // element size of the compare operands. 6728 if (VT.getSizeInBits() == SVT.getSizeInBits()) 6729 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 6730 N0.getOperand(1), 6731 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6732 6733 // If the desired elements are smaller or larger than the source 6734 // elements we can use a matching integer vector type and then 6735 // truncate/sign extend 6736 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 6737 if (SVT == MatchingVectorType) { 6738 SDValue VsetCC = DAG.getSetCC(SDLoc(N), MatchingVectorType, 6739 N0.getOperand(0), N0.getOperand(1), 6740 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 6741 return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT); 6742 } 6743 } 6744 6745 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0) 6746 // Here, T can be 1 or -1, depending on the type of the setcc and 6747 // getBooleanContents(). 6748 unsigned SetCCWidth = N0.getScalarValueSizeInBits(); 6749 6750 SDLoc DL(N); 6751 // To determine the "true" side of the select, we need to know the high bit 6752 // of the value returned by the setcc if it evaluates to true. 6753 // If the type of the setcc is i1, then the true case of the select is just 6754 // sext(i1 1), that is, -1. 6755 // If the type of the setcc is larger (say, i8) then the value of the high 6756 // bit depends on getBooleanContents(). So, ask TLI for a real "true" value 6757 // of the appropriate width. 6758 SDValue ExtTrueVal = 6759 (SetCCWidth == 1) 6760 ? DAG.getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), 6761 DL, VT) 6762 : TLI.getConstTrueVal(DAG, VT, DL); 6763 6764 if (SDValue SCC = SimplifySelectCC( 6765 DL, N0.getOperand(0), N0.getOperand(1), ExtTrueVal, 6766 DAG.getConstant(0, DL, VT), 6767 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 6768 return SCC; 6769 6770 if (!VT.isVector()) { 6771 EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType()); 6772 if (!LegalOperations || 6773 TLI.isOperationLegal(ISD::SETCC, N0.getOperand(0).getValueType())) { 6774 SDLoc DL(N); 6775 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 6776 SDValue SetCC = 6777 DAG.getSetCC(DL, SetCCVT, N0.getOperand(0), N0.getOperand(1), CC); 6778 return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, 6779 DAG.getConstant(0, DL, VT)); 6780 } 6781 } 6782 } 6783 6784 // fold (sext x) -> (zext x) if the sign bit is known zero. 6785 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 6786 DAG.SignBitIsZero(N0)) 6787 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0); 6788 6789 return SDValue(); 6790 } 6791 6792 // isTruncateOf - If N is a truncate of some other value, return true, record 6793 // the value being truncated in Op and which of Op's bits are zero in KnownZero. 6794 // This function computes KnownZero to avoid a duplicated call to 6795 // computeKnownBits in the caller. 6796 static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 6797 APInt &KnownZero) { 6798 APInt KnownOne; 6799 if (N->getOpcode() == ISD::TRUNCATE) { 6800 Op = N->getOperand(0); 6801 DAG.computeKnownBits(Op, KnownZero, KnownOne); 6802 return true; 6803 } 6804 6805 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 6806 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 6807 return false; 6808 6809 SDValue Op0 = N->getOperand(0); 6810 SDValue Op1 = N->getOperand(1); 6811 assert(Op0.getValueType() == Op1.getValueType()); 6812 6813 if (isNullConstant(Op0)) 6814 Op = Op1; 6815 else if (isNullConstant(Op1)) 6816 Op = Op0; 6817 else 6818 return false; 6819 6820 DAG.computeKnownBits(Op, KnownZero, KnownOne); 6821 6822 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 6823 return false; 6824 6825 return true; 6826 } 6827 6828 SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 6829 SDValue N0 = N->getOperand(0); 6830 EVT VT = N->getValueType(0); 6831 6832 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6833 LegalOperations)) 6834 return SDValue(Res, 0); 6835 6836 // fold (zext (zext x)) -> (zext x) 6837 // fold (zext (aext x)) -> (zext x) 6838 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 6839 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, 6840 N0.getOperand(0)); 6841 6842 // fold (zext (truncate x)) -> (zext x) or 6843 // (zext (truncate x)) -> (truncate x) 6844 // This is valid when the truncated bits of x are already zero. 6845 // FIXME: We should extend this to work for vectors too. 6846 SDValue Op; 6847 APInt KnownZero; 6848 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 6849 APInt TruncatedBits = 6850 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 6851 APInt(Op.getValueSizeInBits(), 0) : 6852 APInt::getBitsSet(Op.getValueSizeInBits(), 6853 N0.getValueSizeInBits(), 6854 std::min(Op.getValueSizeInBits(), 6855 VT.getSizeInBits())); 6856 if (TruncatedBits == (KnownZero & TruncatedBits)) { 6857 if (VT.bitsGT(Op.getValueType())) 6858 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op); 6859 if (VT.bitsLT(Op.getValueType())) 6860 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6861 6862 return Op; 6863 } 6864 } 6865 6866 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 6867 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 6868 if (N0.getOpcode() == ISD::TRUNCATE) { 6869 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6870 SDNode *oye = N0.getOperand(0).getNode(); 6871 if (NarrowLoad.getNode() != N0.getNode()) { 6872 CombineTo(N0.getNode(), NarrowLoad); 6873 // CombineTo deleted the truncate, if needed, but not what's under it. 6874 AddToWorklist(oye); 6875 } 6876 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6877 } 6878 } 6879 6880 // fold (zext (truncate x)) -> (and x, mask) 6881 if (N0.getOpcode() == ISD::TRUNCATE) { 6882 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 6883 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 6884 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6885 SDNode *oye = N0.getOperand(0).getNode(); 6886 if (NarrowLoad.getNode() != N0.getNode()) { 6887 CombineTo(N0.getNode(), NarrowLoad); 6888 // CombineTo deleted the truncate, if needed, but not what's under it. 6889 AddToWorklist(oye); 6890 } 6891 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6892 } 6893 6894 EVT SrcVT = N0.getOperand(0).getValueType(); 6895 EVT MinVT = N0.getValueType(); 6896 6897 // Try to mask before the extension to avoid having to generate a larger mask, 6898 // possibly over several sub-vectors. 6899 if (SrcVT.bitsLT(VT)) { 6900 if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) && 6901 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) { 6902 SDValue Op = N0.getOperand(0); 6903 Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 6904 AddToWorklist(Op.getNode()); 6905 return DAG.getZExtOrTrunc(Op, SDLoc(N), VT); 6906 } 6907 } 6908 6909 if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) { 6910 SDValue Op = N0.getOperand(0); 6911 if (SrcVT.bitsLT(VT)) { 6912 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op); 6913 AddToWorklist(Op.getNode()); 6914 } else if (SrcVT.bitsGT(VT)) { 6915 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 6916 AddToWorklist(Op.getNode()); 6917 } 6918 return DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 6919 } 6920 } 6921 6922 // Fold (zext (and (trunc x), cst)) -> (and x, cst), 6923 // if either of the casts is not free. 6924 if (N0.getOpcode() == ISD::AND && 6925 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 6926 N0.getOperand(1).getOpcode() == ISD::Constant && 6927 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 6928 N0.getValueType()) || 6929 !TLI.isZExtFree(N0.getValueType(), VT))) { 6930 SDValue X = N0.getOperand(0).getOperand(0); 6931 if (X.getValueType().bitsLT(VT)) { 6932 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X); 6933 } else if (X.getValueType().bitsGT(VT)) { 6934 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 6935 } 6936 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 6937 Mask = Mask.zext(VT.getSizeInBits()); 6938 SDLoc DL(N); 6939 return DAG.getNode(ISD::AND, DL, VT, 6940 X, DAG.getConstant(Mask, DL, VT)); 6941 } 6942 6943 // fold (zext (load x)) -> (zext (truncate (zextload x))) 6944 // Only generate vector extloads when 1) they're legal, and 2) they are 6945 // deemed desirable by the target. 6946 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6947 ((!LegalOperations && !VT.isVector() && 6948 !cast<LoadSDNode>(N0)->isVolatile()) || 6949 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()))) { 6950 bool DoXform = true; 6951 SmallVector<SDNode*, 4> SetCCs; 6952 if (!N0.hasOneUse()) 6953 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 6954 if (VT.isVector()) 6955 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 6956 if (DoXform) { 6957 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6958 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 6959 LN0->getChain(), 6960 LN0->getBasePtr(), N0.getValueType(), 6961 LN0->getMemOperand()); 6962 CombineTo(N, ExtLoad); 6963 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6964 N0.getValueType(), ExtLoad); 6965 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6966 6967 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 6968 ISD::ZERO_EXTEND); 6969 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6970 } 6971 } 6972 6973 // fold (zext (load x)) to multiple smaller zextloads. 6974 // Only on illegal but splittable vectors. 6975 if (SDValue ExtLoad = CombineExtLoad(N)) 6976 return ExtLoad; 6977 6978 // fold (zext (and/or/xor (load x), cst)) -> 6979 // (and/or/xor (zextload x), (zext cst)) 6980 // Unless (and (load x) cst) will match as a zextload already and has 6981 // additional users. 6982 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 6983 N0.getOpcode() == ISD::XOR) && 6984 isa<LoadSDNode>(N0.getOperand(0)) && 6985 N0.getOperand(1).getOpcode() == ISD::Constant && 6986 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()) && 6987 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 6988 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 6989 if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) { 6990 bool DoXform = true; 6991 SmallVector<SDNode*, 4> SetCCs; 6992 if (!N0.hasOneUse()) { 6993 if (N0.getOpcode() == ISD::AND) { 6994 auto *AndC = cast<ConstantSDNode>(N0.getOperand(1)); 6995 auto NarrowLoad = false; 6996 EVT LoadResultTy = AndC->getValueType(0); 6997 EVT ExtVT, LoadedVT; 6998 if (isAndLoadExtLoad(AndC, LN0, LoadResultTy, ExtVT, LoadedVT, 6999 NarrowLoad)) 7000 DoXform = false; 7001 } 7002 if (DoXform) 7003 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), 7004 ISD::ZERO_EXTEND, SetCCs, TLI); 7005 } 7006 if (DoXform) { 7007 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT, 7008 LN0->getChain(), LN0->getBasePtr(), 7009 LN0->getMemoryVT(), 7010 LN0->getMemOperand()); 7011 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7012 Mask = Mask.zext(VT.getSizeInBits()); 7013 SDLoc DL(N); 7014 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 7015 ExtLoad, DAG.getConstant(Mask, DL, VT)); 7016 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 7017 SDLoc(N0.getOperand(0)), 7018 N0.getOperand(0).getValueType(), ExtLoad); 7019 CombineTo(N, And); 7020 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 7021 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, 7022 ISD::ZERO_EXTEND); 7023 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7024 } 7025 } 7026 } 7027 7028 // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 7029 // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 7030 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 7031 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 7032 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7033 EVT MemVT = LN0->getMemoryVT(); 7034 if ((!LegalOperations && !LN0->isVolatile()) || 7035 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT)) { 7036 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 7037 LN0->getChain(), 7038 LN0->getBasePtr(), MemVT, 7039 LN0->getMemOperand()); 7040 CombineTo(N, ExtLoad); 7041 CombineTo(N0.getNode(), 7042 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), 7043 ExtLoad), 7044 ExtLoad.getValue(1)); 7045 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7046 } 7047 } 7048 7049 if (N0.getOpcode() == ISD::SETCC) { 7050 // Only do this before legalize for now. 7051 if (!LegalOperations && VT.isVector() && 7052 N0.getValueType().getVectorElementType() == MVT::i1) { 7053 EVT N00VT = N0.getOperand(0).getValueType(); 7054 if (getSetCCResultType(N00VT) == N0.getValueType()) 7055 return SDValue(); 7056 7057 // We know that the # elements of the results is the same as the # 7058 // elements of the compare (and the # elements of the compare result for 7059 // that matter). Check to see that they are the same size. If so, we know 7060 // that the element size of the sext'd result matches the element size of 7061 // the compare operands. 7062 SDLoc DL(N); 7063 SDValue VecOnes = DAG.getConstant(1, DL, VT); 7064 if (VT.getSizeInBits() == N00VT.getSizeInBits()) { 7065 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 7066 SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0), 7067 N0.getOperand(1), N0.getOperand(2)); 7068 return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes); 7069 } 7070 7071 // If the desired elements are smaller or larger than the source 7072 // elements we can use a matching integer vector type and then 7073 // truncate/sign extend. 7074 EVT MatchingElementType = EVT::getIntegerVT( 7075 *DAG.getContext(), N00VT.getScalarSizeInBits()); 7076 EVT MatchingVectorType = EVT::getVectorVT( 7077 *DAG.getContext(), MatchingElementType, N00VT.getVectorNumElements()); 7078 SDValue VsetCC = 7079 DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0), 7080 N0.getOperand(1), N0.getOperand(2)); 7081 return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT), 7082 VecOnes); 7083 } 7084 7085 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 7086 SDLoc DL(N); 7087 if (SDValue SCC = SimplifySelectCC( 7088 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 7089 DAG.getConstant(0, DL, VT), 7090 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 7091 return SCC; 7092 } 7093 7094 // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 7095 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 7096 isa<ConstantSDNode>(N0.getOperand(1)) && 7097 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 7098 N0.hasOneUse()) { 7099 SDValue ShAmt = N0.getOperand(1); 7100 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7101 if (N0.getOpcode() == ISD::SHL) { 7102 SDValue InnerZExt = N0.getOperand(0); 7103 // If the original shl may be shifting out bits, do not perform this 7104 // transformation. 7105 unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() - 7106 InnerZExt.getOperand(0).getValueSizeInBits(); 7107 if (ShAmtVal > KnownZeroBits) 7108 return SDValue(); 7109 } 7110 7111 SDLoc DL(N); 7112 7113 // Ensure that the shift amount is wide enough for the shifted value. 7114 if (VT.getSizeInBits() >= 256) 7115 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 7116 7117 return DAG.getNode(N0.getOpcode(), DL, VT, 7118 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 7119 ShAmt); 7120 } 7121 7122 return SDValue(); 7123 } 7124 7125 SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 7126 SDValue N0 = N->getOperand(0); 7127 EVT VT = N->getValueType(0); 7128 7129 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7130 LegalOperations)) 7131 return SDValue(Res, 0); 7132 7133 // fold (aext (aext x)) -> (aext x) 7134 // fold (aext (zext x)) -> (zext x) 7135 // fold (aext (sext x)) -> (sext x) 7136 if (N0.getOpcode() == ISD::ANY_EXTEND || 7137 N0.getOpcode() == ISD::ZERO_EXTEND || 7138 N0.getOpcode() == ISD::SIGN_EXTEND) 7139 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 7140 7141 // fold (aext (truncate (load x))) -> (aext (smaller load x)) 7142 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 7143 if (N0.getOpcode() == ISD::TRUNCATE) { 7144 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 7145 SDNode *oye = N0.getOperand(0).getNode(); 7146 if (NarrowLoad.getNode() != N0.getNode()) { 7147 CombineTo(N0.getNode(), NarrowLoad); 7148 // CombineTo deleted the truncate, if needed, but not what's under it. 7149 AddToWorklist(oye); 7150 } 7151 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7152 } 7153 } 7154 7155 // fold (aext (truncate x)) 7156 if (N0.getOpcode() == ISD::TRUNCATE) { 7157 SDValue TruncOp = N0.getOperand(0); 7158 if (TruncOp.getValueType() == VT) 7159 return TruncOp; // x iff x size == zext size. 7160 if (TruncOp.getValueType().bitsGT(VT)) 7161 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp); 7162 return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp); 7163 } 7164 7165 // Fold (aext (and (trunc x), cst)) -> (and x, cst) 7166 // if the trunc is not free. 7167 if (N0.getOpcode() == ISD::AND && 7168 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 7169 N0.getOperand(1).getOpcode() == ISD::Constant && 7170 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 7171 N0.getValueType())) { 7172 SDLoc DL(N); 7173 SDValue X = N0.getOperand(0).getOperand(0); 7174 if (X.getValueType().bitsLT(VT)) { 7175 X = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X); 7176 } else if (X.getValueType().bitsGT(VT)) { 7177 X = DAG.getNode(ISD::TRUNCATE, DL, VT, X); 7178 } 7179 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7180 Mask = Mask.zext(VT.getSizeInBits()); 7181 return DAG.getNode(ISD::AND, DL, VT, 7182 X, DAG.getConstant(Mask, DL, VT)); 7183 } 7184 7185 // fold (aext (load x)) -> (aext (truncate (extload x))) 7186 // None of the supported targets knows how to perform load and any_ext 7187 // on vectors in one instruction. We only perform this transformation on 7188 // scalars. 7189 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 7190 ISD::isUNINDEXEDLoad(N0.getNode()) && 7191 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 7192 bool DoXform = true; 7193 SmallVector<SDNode*, 4> SetCCs; 7194 if (!N0.hasOneUse()) 7195 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 7196 if (DoXform) { 7197 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7198 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 7199 LN0->getChain(), 7200 LN0->getBasePtr(), N0.getValueType(), 7201 LN0->getMemOperand()); 7202 CombineTo(N, ExtLoad); 7203 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7204 N0.getValueType(), ExtLoad); 7205 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 7206 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 7207 ISD::ANY_EXTEND); 7208 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7209 } 7210 } 7211 7212 // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 7213 // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 7214 // fold (aext ( extload x)) -> (aext (truncate (extload x))) 7215 if (N0.getOpcode() == ISD::LOAD && 7216 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7217 N0.hasOneUse()) { 7218 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7219 ISD::LoadExtType ExtType = LN0->getExtensionType(); 7220 EVT MemVT = LN0->getMemoryVT(); 7221 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) { 7222 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N), 7223 VT, LN0->getChain(), LN0->getBasePtr(), 7224 MemVT, LN0->getMemOperand()); 7225 CombineTo(N, ExtLoad); 7226 CombineTo(N0.getNode(), 7227 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7228 N0.getValueType(), ExtLoad), 7229 ExtLoad.getValue(1)); 7230 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7231 } 7232 } 7233 7234 if (N0.getOpcode() == ISD::SETCC) { 7235 // For vectors: 7236 // aext(setcc) -> vsetcc 7237 // aext(setcc) -> truncate(vsetcc) 7238 // aext(setcc) -> aext(vsetcc) 7239 // Only do this before legalize for now. 7240 if (VT.isVector() && !LegalOperations) { 7241 EVT N0VT = N0.getOperand(0).getValueType(); 7242 // We know that the # elements of the results is the same as the 7243 // # elements of the compare (and the # elements of the compare result 7244 // for that matter). Check to see that they are the same size. If so, 7245 // we know that the element size of the sext'd result matches the 7246 // element size of the compare operands. 7247 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 7248 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 7249 N0.getOperand(1), 7250 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 7251 // If the desired elements are smaller or larger than the source 7252 // elements we can use a matching integer vector type and then 7253 // truncate/any extend 7254 else { 7255 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 7256 SDValue VsetCC = 7257 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0), 7258 N0.getOperand(1), 7259 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 7260 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT); 7261 } 7262 } 7263 7264 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 7265 SDLoc DL(N); 7266 if (SDValue SCC = SimplifySelectCC( 7267 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 7268 DAG.getConstant(0, DL, VT), 7269 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 7270 return SCC; 7271 } 7272 7273 return SDValue(); 7274 } 7275 7276 /// See if the specified operand can be simplified with the knowledge that only 7277 /// the bits specified by Mask are used. If so, return the simpler operand, 7278 /// otherwise return a null SDValue. 7279 SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 7280 switch (V.getOpcode()) { 7281 default: break; 7282 case ISD::Constant: { 7283 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 7284 assert(CV && "Const value should be ConstSDNode."); 7285 const APInt &CVal = CV->getAPIntValue(); 7286 APInt NewVal = CVal & Mask; 7287 if (NewVal != CVal) 7288 return DAG.getConstant(NewVal, SDLoc(V), V.getValueType()); 7289 break; 7290 } 7291 case ISD::OR: 7292 case ISD::XOR: 7293 // If the LHS or RHS don't contribute bits to the or, drop them. 7294 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 7295 return V.getOperand(1); 7296 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 7297 return V.getOperand(0); 7298 break; 7299 case ISD::SRL: 7300 // Only look at single-use SRLs. 7301 if (!V.getNode()->hasOneUse()) 7302 break; 7303 if (ConstantSDNode *RHSC = getAsNonOpaqueConstant(V.getOperand(1))) { 7304 // See if we can recursively simplify the LHS. 7305 unsigned Amt = RHSC->getZExtValue(); 7306 7307 // Watch out for shift count overflow though. 7308 if (Amt >= Mask.getBitWidth()) break; 7309 APInt NewMask = Mask << Amt; 7310 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 7311 return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(), 7312 SimplifyLHS, V.getOperand(1)); 7313 } 7314 } 7315 return SDValue(); 7316 } 7317 7318 /// If the result of a wider load is shifted to right of N bits and then 7319 /// truncated to a narrower type and where N is a multiple of number of bits of 7320 /// the narrower type, transform it to a narrower load from address + N / num of 7321 /// bits of new type. If the result is to be extended, also fold the extension 7322 /// to form a extending load. 7323 SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 7324 unsigned Opc = N->getOpcode(); 7325 7326 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 7327 SDValue N0 = N->getOperand(0); 7328 EVT VT = N->getValueType(0); 7329 EVT ExtVT = VT; 7330 7331 // This transformation isn't valid for vector loads. 7332 if (VT.isVector()) 7333 return SDValue(); 7334 7335 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 7336 // extended to VT. 7337 if (Opc == ISD::SIGN_EXTEND_INREG) { 7338 ExtType = ISD::SEXTLOAD; 7339 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 7340 } else if (Opc == ISD::SRL) { 7341 // Another special-case: SRL is basically zero-extending a narrower value. 7342 ExtType = ISD::ZEXTLOAD; 7343 N0 = SDValue(N, 0); 7344 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7345 if (!N01) return SDValue(); 7346 ExtVT = EVT::getIntegerVT(*DAG.getContext(), 7347 VT.getSizeInBits() - N01->getZExtValue()); 7348 } 7349 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, VT, ExtVT)) 7350 return SDValue(); 7351 7352 unsigned EVTBits = ExtVT.getSizeInBits(); 7353 7354 // Do not generate loads of non-round integer types since these can 7355 // be expensive (and would be wrong if the type is not byte sized). 7356 if (!ExtVT.isRound()) 7357 return SDValue(); 7358 7359 unsigned ShAmt = 0; 7360 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 7361 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7362 ShAmt = N01->getZExtValue(); 7363 // Is the shift amount a multiple of size of VT? 7364 if ((ShAmt & (EVTBits-1)) == 0) { 7365 N0 = N0.getOperand(0); 7366 // Is the load width a multiple of size of VT? 7367 if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0) 7368 return SDValue(); 7369 } 7370 7371 // At this point, we must have a load or else we can't do the transform. 7372 if (!isa<LoadSDNode>(N0)) return SDValue(); 7373 7374 // Because a SRL must be assumed to *need* to zero-extend the high bits 7375 // (as opposed to anyext the high bits), we can't combine the zextload 7376 // lowering of SRL and an sextload. 7377 if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD) 7378 return SDValue(); 7379 7380 // If the shift amount is larger than the input type then we're not 7381 // accessing any of the loaded bytes. If the load was a zextload/extload 7382 // then the result of the shift+trunc is zero/undef (handled elsewhere). 7383 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 7384 return SDValue(); 7385 } 7386 } 7387 7388 // If the load is shifted left (and the result isn't shifted back right), 7389 // we can fold the truncate through the shift. 7390 unsigned ShLeftAmt = 0; 7391 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 7392 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 7393 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7394 ShLeftAmt = N01->getZExtValue(); 7395 N0 = N0.getOperand(0); 7396 } 7397 } 7398 7399 // If we haven't found a load, we can't narrow it. Don't transform one with 7400 // multiple uses, this would require adding a new load. 7401 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse()) 7402 return SDValue(); 7403 7404 // Don't change the width of a volatile load. 7405 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7406 if (LN0->isVolatile()) 7407 return SDValue(); 7408 7409 // Verify that we are actually reducing a load width here. 7410 if (LN0->getMemoryVT().getSizeInBits() < EVTBits) 7411 return SDValue(); 7412 7413 // For the transform to be legal, the load must produce only two values 7414 // (the value loaded and the chain). Don't transform a pre-increment 7415 // load, for example, which produces an extra value. Otherwise the 7416 // transformation is not equivalent, and the downstream logic to replace 7417 // uses gets things wrong. 7418 if (LN0->getNumValues() > 2) 7419 return SDValue(); 7420 7421 // If the load that we're shrinking is an extload and we're not just 7422 // discarding the extension we can't simply shrink the load. Bail. 7423 // TODO: It would be possible to merge the extensions in some cases. 7424 if (LN0->getExtensionType() != ISD::NON_EXTLOAD && 7425 LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt) 7426 return SDValue(); 7427 7428 if (!TLI.shouldReduceLoadWidth(LN0, ExtType, ExtVT)) 7429 return SDValue(); 7430 7431 EVT PtrType = N0.getOperand(1).getValueType(); 7432 7433 if (PtrType == MVT::Untyped || PtrType.isExtended()) 7434 // It's not possible to generate a constant of extended or untyped type. 7435 return SDValue(); 7436 7437 // For big endian targets, we need to adjust the offset to the pointer to 7438 // load the correct bytes. 7439 if (DAG.getDataLayout().isBigEndian()) { 7440 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 7441 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 7442 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 7443 } 7444 7445 uint64_t PtrOff = ShAmt / 8; 7446 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 7447 SDLoc DL(LN0); 7448 // The original load itself didn't wrap, so an offset within it doesn't. 7449 SDNodeFlags Flags; 7450 Flags.setNoUnsignedWrap(true); 7451 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, 7452 PtrType, LN0->getBasePtr(), 7453 DAG.getConstant(PtrOff, DL, PtrType), 7454 &Flags); 7455 AddToWorklist(NewPtr.getNode()); 7456 7457 SDValue Load; 7458 if (ExtType == ISD::NON_EXTLOAD) 7459 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr, 7460 LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign, 7461 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 7462 else 7463 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(), NewPtr, 7464 LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT, 7465 NewAlign, LN0->getMemOperand()->getFlags(), 7466 LN0->getAAInfo()); 7467 7468 // Replace the old load's chain with the new load's chain. 7469 WorklistRemover DeadNodes(*this); 7470 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 7471 7472 // Shift the result left, if we've swallowed a left shift. 7473 SDValue Result = Load; 7474 if (ShLeftAmt != 0) { 7475 EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 7476 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 7477 ShImmTy = VT; 7478 // If the shift amount is as large as the result size (but, presumably, 7479 // no larger than the source) then the useful bits of the result are 7480 // zero; we can't simply return the shortened shift, because the result 7481 // of that operation is undefined. 7482 SDLoc DL(N0); 7483 if (ShLeftAmt >= VT.getSizeInBits()) 7484 Result = DAG.getConstant(0, DL, VT); 7485 else 7486 Result = DAG.getNode(ISD::SHL, DL, VT, 7487 Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy)); 7488 } 7489 7490 // Return the new loaded value. 7491 return Result; 7492 } 7493 7494 SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 7495 SDValue N0 = N->getOperand(0); 7496 SDValue N1 = N->getOperand(1); 7497 EVT VT = N->getValueType(0); 7498 EVT EVT = cast<VTSDNode>(N1)->getVT(); 7499 unsigned VTBits = VT.getScalarSizeInBits(); 7500 unsigned EVTBits = EVT.getScalarSizeInBits(); 7501 7502 if (N0.isUndef()) 7503 return DAG.getUNDEF(VT); 7504 7505 // fold (sext_in_reg c1) -> c1 7506 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 7507 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1); 7508 7509 // If the input is already sign extended, just drop the extension. 7510 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 7511 return N0; 7512 7513 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 7514 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 7515 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) 7516 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7517 N0.getOperand(0), N1); 7518 7519 // fold (sext_in_reg (sext x)) -> (sext x) 7520 // fold (sext_in_reg (aext x)) -> (sext x) 7521 // if x is small enough. 7522 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 7523 SDValue N00 = N0.getOperand(0); 7524 if (N00.getScalarValueSizeInBits() <= EVTBits && 7525 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7526 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7527 } 7528 7529 // fold (sext_in_reg (zext x)) -> (sext x) 7530 // iff we are extending the source sign bit. 7531 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 7532 SDValue N00 = N0.getOperand(0); 7533 if (N00.getScalarValueSizeInBits() == EVTBits && 7534 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7535 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7536 } 7537 7538 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 7539 if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) 7540 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType()); 7541 7542 // fold operands of sext_in_reg based on knowledge that the top bits are not 7543 // demanded. 7544 if (SimplifyDemandedBits(SDValue(N, 0))) 7545 return SDValue(N, 0); 7546 7547 // fold (sext_in_reg (load x)) -> (smaller sextload x) 7548 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 7549 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 7550 return NarrowLoad; 7551 7552 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 7553 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 7554 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 7555 if (N0.getOpcode() == ISD::SRL) { 7556 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 7557 if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 7558 // We can turn this into an SRA iff the input to the SRL is already sign 7559 // extended enough. 7560 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 7561 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 7562 return DAG.getNode(ISD::SRA, SDLoc(N), VT, 7563 N0.getOperand(0), N0.getOperand(1)); 7564 } 7565 } 7566 7567 // fold (sext_inreg (extload x)) -> (sextload x) 7568 if (ISD::isEXTLoad(N0.getNode()) && 7569 ISD::isUNINDEXEDLoad(N0.getNode()) && 7570 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7571 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7572 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7573 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7574 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7575 LN0->getChain(), 7576 LN0->getBasePtr(), EVT, 7577 LN0->getMemOperand()); 7578 CombineTo(N, ExtLoad); 7579 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7580 AddToWorklist(ExtLoad.getNode()); 7581 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7582 } 7583 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 7584 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7585 N0.hasOneUse() && 7586 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7587 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7588 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7589 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7590 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7591 LN0->getChain(), 7592 LN0->getBasePtr(), EVT, 7593 LN0->getMemOperand()); 7594 CombineTo(N, ExtLoad); 7595 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7596 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7597 } 7598 7599 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 7600 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 7601 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 7602 N0.getOperand(1), false)) 7603 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7604 BSwap, N1); 7605 } 7606 7607 return SDValue(); 7608 } 7609 7610 SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) { 7611 SDValue N0 = N->getOperand(0); 7612 EVT VT = N->getValueType(0); 7613 7614 if (N0.isUndef()) 7615 return DAG.getUNDEF(VT); 7616 7617 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7618 LegalOperations)) 7619 return SDValue(Res, 0); 7620 7621 return SDValue(); 7622 } 7623 7624 SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) { 7625 SDValue N0 = N->getOperand(0); 7626 EVT VT = N->getValueType(0); 7627 7628 if (N0.isUndef()) 7629 return DAG.getUNDEF(VT); 7630 7631 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7632 LegalOperations)) 7633 return SDValue(Res, 0); 7634 7635 return SDValue(); 7636 } 7637 7638 SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 7639 SDValue N0 = N->getOperand(0); 7640 EVT VT = N->getValueType(0); 7641 bool isLE = DAG.getDataLayout().isLittleEndian(); 7642 7643 // noop truncate 7644 if (N0.getValueType() == N->getValueType(0)) 7645 return N0; 7646 // fold (truncate c1) -> c1 7647 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 7648 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0); 7649 // fold (truncate (truncate x)) -> (truncate x) 7650 if (N0.getOpcode() == ISD::TRUNCATE) 7651 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 7652 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 7653 if (N0.getOpcode() == ISD::ZERO_EXTEND || 7654 N0.getOpcode() == ISD::SIGN_EXTEND || 7655 N0.getOpcode() == ISD::ANY_EXTEND) { 7656 // if the source is smaller than the dest, we still need an extend. 7657 if (N0.getOperand(0).getValueType().bitsLT(VT)) 7658 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 7659 // if the source is larger than the dest, than we just need the truncate. 7660 if (N0.getOperand(0).getValueType().bitsGT(VT)) 7661 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 7662 // if the source and dest are the same type, we can drop both the extend 7663 // and the truncate. 7664 return N0.getOperand(0); 7665 } 7666 7667 // If this is anyext(trunc), don't fold it, allow ourselves to be folded. 7668 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND)) 7669 return SDValue(); 7670 7671 // Fold extract-and-trunc into a narrow extract. For example: 7672 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 7673 // i32 y = TRUNCATE(i64 x) 7674 // -- becomes -- 7675 // v16i8 b = BITCAST (v2i64 val) 7676 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 7677 // 7678 // Note: We only run this optimization after type legalization (which often 7679 // creates this pattern) and before operation legalization after which 7680 // we need to be more careful about the vector instructions that we generate. 7681 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7682 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) { 7683 7684 EVT VecTy = N0.getOperand(0).getValueType(); 7685 EVT ExTy = N0.getValueType(); 7686 EVT TrTy = N->getValueType(0); 7687 7688 unsigned NumElem = VecTy.getVectorNumElements(); 7689 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 7690 7691 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 7692 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 7693 7694 SDValue EltNo = N0->getOperand(1); 7695 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 7696 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 7697 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 7698 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 7699 7700 SDLoc DL(N); 7701 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy, 7702 DAG.getBitcast(NVT, N0.getOperand(0)), 7703 DAG.getConstant(Index, DL, IndexTy)); 7704 } 7705 } 7706 7707 // trunc (select c, a, b) -> select c, (trunc a), (trunc b) 7708 if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) { 7709 EVT SrcVT = N0.getValueType(); 7710 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) && 7711 TLI.isTruncateFree(SrcVT, VT)) { 7712 SDLoc SL(N0); 7713 SDValue Cond = N0.getOperand(0); 7714 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1)); 7715 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2)); 7716 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1); 7717 } 7718 } 7719 7720 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits() 7721 if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 7722 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) && 7723 TLI.isTypeDesirableForOp(ISD::SHL, VT)) { 7724 if (const ConstantSDNode *CAmt = isConstOrConstSplat(N0.getOperand(1))) { 7725 uint64_t Amt = CAmt->getZExtValue(); 7726 unsigned Size = VT.getScalarSizeInBits(); 7727 7728 if (Amt < Size) { 7729 SDLoc SL(N); 7730 EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 7731 7732 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0)); 7733 return DAG.getNode(ISD::SHL, SL, VT, Trunc, 7734 DAG.getConstant(Amt, SL, AmtVT)); 7735 } 7736 } 7737 } 7738 7739 // Fold a series of buildvector, bitcast, and truncate if possible. 7740 // For example fold 7741 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to 7742 // (2xi32 (buildvector x, y)). 7743 if (Level == AfterLegalizeVectorOps && VT.isVector() && 7744 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 7745 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 7746 N0.getOperand(0).hasOneUse()) { 7747 7748 SDValue BuildVect = N0.getOperand(0); 7749 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType(); 7750 EVT TruncVecEltTy = VT.getVectorElementType(); 7751 7752 // Check that the element types match. 7753 if (BuildVectEltTy == TruncVecEltTy) { 7754 // Now we only need to compute the offset of the truncated elements. 7755 unsigned BuildVecNumElts = BuildVect.getNumOperands(); 7756 unsigned TruncVecNumElts = VT.getVectorNumElements(); 7757 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts; 7758 7759 assert((BuildVecNumElts % TruncVecNumElts) == 0 && 7760 "Invalid number of elements"); 7761 7762 SmallVector<SDValue, 8> Opnds; 7763 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset) 7764 Opnds.push_back(BuildVect.getOperand(i)); 7765 7766 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 7767 } 7768 } 7769 7770 // See if we can simplify the input to this truncate through knowledge that 7771 // only the low bits are being used. 7772 // For example "trunc (or (shl x, 8), y)" // -> trunc y 7773 // Currently we only perform this optimization on scalars because vectors 7774 // may have different active low bits. 7775 if (!VT.isVector()) { 7776 if (SDValue Shorter = 7777 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 7778 VT.getSizeInBits()))) 7779 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter); 7780 } 7781 // fold (truncate (load x)) -> (smaller load x) 7782 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 7783 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 7784 if (SDValue Reduced = ReduceLoadWidth(N)) 7785 return Reduced; 7786 7787 // Handle the case where the load remains an extending load even 7788 // after truncation. 7789 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) { 7790 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7791 if (!LN0->isVolatile() && 7792 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) { 7793 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0), 7794 VT, LN0->getChain(), LN0->getBasePtr(), 7795 LN0->getMemoryVT(), 7796 LN0->getMemOperand()); 7797 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1)); 7798 return NewLoad; 7799 } 7800 } 7801 } 7802 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), 7803 // where ... are all 'undef'. 7804 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { 7805 SmallVector<EVT, 8> VTs; 7806 SDValue V; 7807 unsigned Idx = 0; 7808 unsigned NumDefs = 0; 7809 7810 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 7811 SDValue X = N0.getOperand(i); 7812 if (!X.isUndef()) { 7813 V = X; 7814 Idx = i; 7815 NumDefs++; 7816 } 7817 // Stop if more than one members are non-undef. 7818 if (NumDefs > 1) 7819 break; 7820 VTs.push_back(EVT::getVectorVT(*DAG.getContext(), 7821 VT.getVectorElementType(), 7822 X.getValueType().getVectorNumElements())); 7823 } 7824 7825 if (NumDefs == 0) 7826 return DAG.getUNDEF(VT); 7827 7828 if (NumDefs == 1) { 7829 assert(V.getNode() && "The single defined operand is empty!"); 7830 SmallVector<SDValue, 8> Opnds; 7831 for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 7832 if (i != Idx) { 7833 Opnds.push_back(DAG.getUNDEF(VTs[i])); 7834 continue; 7835 } 7836 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V); 7837 AddToWorklist(NV.getNode()); 7838 Opnds.push_back(NV); 7839 } 7840 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds); 7841 } 7842 } 7843 7844 // Fold truncate of a bitcast of a vector to an extract of the low vector 7845 // element. 7846 // 7847 // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, 0 7848 if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) { 7849 SDValue VecSrc = N0.getOperand(0); 7850 EVT SrcVT = VecSrc.getValueType(); 7851 if (SrcVT.isVector() && SrcVT.getScalarType() == VT && 7852 (!LegalOperations || 7853 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, SrcVT))) { 7854 SDLoc SL(N); 7855 7856 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); 7857 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT, 7858 VecSrc, DAG.getConstant(0, SL, IdxVT)); 7859 } 7860 } 7861 7862 // Simplify the operands using demanded-bits information. 7863 if (!VT.isVector() && 7864 SimplifyDemandedBits(SDValue(N, 0))) 7865 return SDValue(N, 0); 7866 7867 return SDValue(); 7868 } 7869 7870 static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 7871 SDValue Elt = N->getOperand(i); 7872 if (Elt.getOpcode() != ISD::MERGE_VALUES) 7873 return Elt.getNode(); 7874 return Elt.getOperand(Elt.getResNo()).getNode(); 7875 } 7876 7877 /// build_pair (load, load) -> load 7878 /// if load locations are consecutive. 7879 SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 7880 assert(N->getOpcode() == ISD::BUILD_PAIR); 7881 7882 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 7883 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 7884 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 7885 LD1->getAddressSpace() != LD2->getAddressSpace()) 7886 return SDValue(); 7887 EVT LD1VT = LD1->getValueType(0); 7888 unsigned LD1Bytes = LD1VT.getSizeInBits() / 8; 7889 if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() && 7890 DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) { 7891 unsigned Align = LD1->getAlignment(); 7892 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 7893 VT.getTypeForEVT(*DAG.getContext())); 7894 7895 if (NewAlign <= Align && 7896 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 7897 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(), 7898 LD1->getPointerInfo(), Align); 7899 } 7900 7901 return SDValue(); 7902 } 7903 7904 static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) { 7905 // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi 7906 // and Lo parts; on big-endian machines it doesn't. 7907 return DAG.getDataLayout().isBigEndian() ? 1 : 0; 7908 } 7909 7910 static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, 7911 const TargetLowering &TLI) { 7912 // If this is not a bitcast to an FP type or if the target doesn't have 7913 // IEEE754-compliant FP logic, we're done. 7914 EVT VT = N->getValueType(0); 7915 if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT)) 7916 return SDValue(); 7917 7918 // TODO: Use splat values for the constant-checking below and remove this 7919 // restriction. 7920 SDValue N0 = N->getOperand(0); 7921 EVT SourceVT = N0.getValueType(); 7922 if (SourceVT.isVector()) 7923 return SDValue(); 7924 7925 unsigned FPOpcode; 7926 APInt SignMask; 7927 switch (N0.getOpcode()) { 7928 case ISD::AND: 7929 FPOpcode = ISD::FABS; 7930 SignMask = ~APInt::getSignBit(SourceVT.getSizeInBits()); 7931 break; 7932 case ISD::XOR: 7933 FPOpcode = ISD::FNEG; 7934 SignMask = APInt::getSignBit(SourceVT.getSizeInBits()); 7935 break; 7936 // TODO: ISD::OR --> ISD::FNABS? 7937 default: 7938 return SDValue(); 7939 } 7940 7941 // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X 7942 // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X 7943 SDValue LogicOp0 = N0.getOperand(0); 7944 ConstantSDNode *LogicOp1 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7945 if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask && 7946 LogicOp0.getOpcode() == ISD::BITCAST && 7947 LogicOp0->getOperand(0).getValueType() == VT) 7948 return DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0->getOperand(0)); 7949 7950 return SDValue(); 7951 } 7952 7953 SDValue DAGCombiner::visitBITCAST(SDNode *N) { 7954 SDValue N0 = N->getOperand(0); 7955 EVT VT = N->getValueType(0); 7956 7957 // If the input is a BUILD_VECTOR with all constant elements, fold this now. 7958 // Only do this before legalize, since afterward the target may be depending 7959 // on the bitconvert. 7960 // First check to see if this is all constant. 7961 if (!LegalTypes && 7962 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 7963 VT.isVector()) { 7964 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant(); 7965 7966 EVT DestEltVT = N->getValueType(0).getVectorElementType(); 7967 assert(!DestEltVT.isVector() && 7968 "Element type of vector ValueType must not be vector!"); 7969 if (isSimple) 7970 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 7971 } 7972 7973 // If the input is a constant, let getNode fold it. 7974 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 7975 // If we can't allow illegal operations, we need to check that this is just 7976 // a fp -> int or int -> conversion and that the resulting operation will 7977 // be legal. 7978 if (!LegalOperations || 7979 (isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() && 7980 TLI.isOperationLegal(ISD::ConstantFP, VT)) || 7981 (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() && 7982 TLI.isOperationLegal(ISD::Constant, VT))) 7983 return DAG.getBitcast(VT, N0); 7984 } 7985 7986 // (conv (conv x, t1), t2) -> (conv x, t2) 7987 if (N0.getOpcode() == ISD::BITCAST) 7988 return DAG.getBitcast(VT, N0.getOperand(0)); 7989 7990 // fold (conv (load x)) -> (load (conv*)x) 7991 // If the resultant load doesn't need a higher alignment than the original! 7992 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 7993 // Do not change the width of a volatile load. 7994 !cast<LoadSDNode>(N0)->isVolatile() && 7995 // Do not remove the cast if the types differ in endian layout. 7996 TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) == 7997 TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) && 7998 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && 7999 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { 8000 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 8001 unsigned OrigAlign = LN0->getAlignment(); 8002 8003 bool Fast = false; 8004 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 8005 LN0->getAddressSpace(), OrigAlign, &Fast) && 8006 Fast) { 8007 SDValue Load = 8008 DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), 8009 LN0->getPointerInfo(), OrigAlign, 8010 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 8011 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 8012 return Load; 8013 } 8014 } 8015 8016 if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI)) 8017 return V; 8018 8019 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 8020 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 8021 // 8022 // For ppc_fp128: 8023 // fold (bitcast (fneg x)) -> 8024 // flipbit = signbit 8025 // (xor (bitcast x) (build_pair flipbit, flipbit)) 8026 // 8027 // fold (bitcast (fabs x)) -> 8028 // flipbit = (and (extract_element (bitcast x), 0), signbit) 8029 // (xor (bitcast x) (build_pair flipbit, flipbit)) 8030 // This often reduces constant pool loads. 8031 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) || 8032 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) && 8033 N0.getNode()->hasOneUse() && VT.isInteger() && 8034 !VT.isVector() && !N0.getValueType().isVector()) { 8035 SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0)); 8036 AddToWorklist(NewConv.getNode()); 8037 8038 SDLoc DL(N); 8039 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 8040 assert(VT.getSizeInBits() == 128); 8041 SDValue SignBit = DAG.getConstant( 8042 APInt::getSignBit(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64); 8043 SDValue FlipBit; 8044 if (N0.getOpcode() == ISD::FNEG) { 8045 FlipBit = SignBit; 8046 AddToWorklist(FlipBit.getNode()); 8047 } else { 8048 assert(N0.getOpcode() == ISD::FABS); 8049 SDValue Hi = 8050 DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv, 8051 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 8052 SDLoc(NewConv))); 8053 AddToWorklist(Hi.getNode()); 8054 FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit); 8055 AddToWorklist(FlipBit.getNode()); 8056 } 8057 SDValue FlipBits = 8058 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 8059 AddToWorklist(FlipBits.getNode()); 8060 return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits); 8061 } 8062 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 8063 if (N0.getOpcode() == ISD::FNEG) 8064 return DAG.getNode(ISD::XOR, DL, VT, 8065 NewConv, DAG.getConstant(SignBit, DL, VT)); 8066 assert(N0.getOpcode() == ISD::FABS); 8067 return DAG.getNode(ISD::AND, DL, VT, 8068 NewConv, DAG.getConstant(~SignBit, DL, VT)); 8069 } 8070 8071 // fold (bitconvert (fcopysign cst, x)) -> 8072 // (or (and (bitconvert x), sign), (and cst, (not sign))) 8073 // Note that we don't handle (copysign x, cst) because this can always be 8074 // folded to an fneg or fabs. 8075 // 8076 // For ppc_fp128: 8077 // fold (bitcast (fcopysign cst, x)) -> 8078 // flipbit = (and (extract_element 8079 // (xor (bitcast cst), (bitcast x)), 0), 8080 // signbit) 8081 // (xor (bitcast cst) (build_pair flipbit, flipbit)) 8082 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 8083 isa<ConstantFPSDNode>(N0.getOperand(0)) && 8084 VT.isInteger() && !VT.isVector()) { 8085 unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits(); 8086 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 8087 if (isTypeLegal(IntXVT)) { 8088 SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1)); 8089 AddToWorklist(X.getNode()); 8090 8091 // If X has a different width than the result/lhs, sext it or truncate it. 8092 unsigned VTWidth = VT.getSizeInBits(); 8093 if (OrigXWidth < VTWidth) { 8094 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X); 8095 AddToWorklist(X.getNode()); 8096 } else if (OrigXWidth > VTWidth) { 8097 // To get the sign bit in the right place, we have to shift it right 8098 // before truncating. 8099 SDLoc DL(X); 8100 X = DAG.getNode(ISD::SRL, DL, 8101 X.getValueType(), X, 8102 DAG.getConstant(OrigXWidth-VTWidth, DL, 8103 X.getValueType())); 8104 AddToWorklist(X.getNode()); 8105 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 8106 AddToWorklist(X.getNode()); 8107 } 8108 8109 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 8110 APInt SignBit = APInt::getSignBit(VT.getSizeInBits() / 2); 8111 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 8112 AddToWorklist(Cst.getNode()); 8113 SDValue X = DAG.getBitcast(VT, N0.getOperand(1)); 8114 AddToWorklist(X.getNode()); 8115 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X); 8116 AddToWorklist(XorResult.getNode()); 8117 SDValue XorResult64 = DAG.getNode( 8118 ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult, 8119 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 8120 SDLoc(XorResult))); 8121 AddToWorklist(XorResult64.getNode()); 8122 SDValue FlipBit = 8123 DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64, 8124 DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64)); 8125 AddToWorklist(FlipBit.getNode()); 8126 SDValue FlipBits = 8127 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 8128 AddToWorklist(FlipBits.getNode()); 8129 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits); 8130 } 8131 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 8132 X = DAG.getNode(ISD::AND, SDLoc(X), VT, 8133 X, DAG.getConstant(SignBit, SDLoc(X), VT)); 8134 AddToWorklist(X.getNode()); 8135 8136 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 8137 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT, 8138 Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT)); 8139 AddToWorklist(Cst.getNode()); 8140 8141 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst); 8142 } 8143 } 8144 8145 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 8146 if (N0.getOpcode() == ISD::BUILD_PAIR) 8147 if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT)) 8148 return CombineLD; 8149 8150 // Remove double bitcasts from shuffles - this is often a legacy of 8151 // XformToShuffleWithZero being used to combine bitmaskings (of 8152 // float vectors bitcast to integer vectors) into shuffles. 8153 // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1) 8154 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() && 8155 N0->getOpcode() == ISD::VECTOR_SHUFFLE && 8156 VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() && 8157 !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) { 8158 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0); 8159 8160 // If operands are a bitcast, peek through if it casts the original VT. 8161 // If operands are a constant, just bitcast back to original VT. 8162 auto PeekThroughBitcast = [&](SDValue Op) { 8163 if (Op.getOpcode() == ISD::BITCAST && 8164 Op.getOperand(0).getValueType() == VT) 8165 return SDValue(Op.getOperand(0)); 8166 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) || 8167 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) 8168 return DAG.getBitcast(VT, Op); 8169 return SDValue(); 8170 }; 8171 8172 SDValue SV0 = PeekThroughBitcast(N0->getOperand(0)); 8173 SDValue SV1 = PeekThroughBitcast(N0->getOperand(1)); 8174 if (!(SV0 && SV1)) 8175 return SDValue(); 8176 8177 int MaskScale = 8178 VT.getVectorNumElements() / N0.getValueType().getVectorNumElements(); 8179 SmallVector<int, 8> NewMask; 8180 for (int M : SVN->getMask()) 8181 for (int i = 0; i != MaskScale; ++i) 8182 NewMask.push_back(M < 0 ? -1 : M * MaskScale + i); 8183 8184 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 8185 if (!LegalMask) { 8186 std::swap(SV0, SV1); 8187 ShuffleVectorSDNode::commuteMask(NewMask); 8188 LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 8189 } 8190 8191 if (LegalMask) 8192 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask); 8193 } 8194 8195 return SDValue(); 8196 } 8197 8198 SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 8199 EVT VT = N->getValueType(0); 8200 return CombineConsecutiveLoads(N, VT); 8201 } 8202 8203 /// We know that BV is a build_vector node with Constant, ConstantFP or Undef 8204 /// operands. DstEltVT indicates the destination element value type. 8205 SDValue DAGCombiner:: 8206 ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 8207 EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 8208 8209 // If this is already the right type, we're done. 8210 if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 8211 8212 unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 8213 unsigned DstBitSize = DstEltVT.getSizeInBits(); 8214 8215 // If this is a conversion of N elements of one type to N elements of another 8216 // type, convert each element. This handles FP<->INT cases. 8217 if (SrcBitSize == DstBitSize) { 8218 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 8219 BV->getValueType(0).getVectorNumElements()); 8220 8221 // Due to the FP element handling below calling this routine recursively, 8222 // we can end up with a scalar-to-vector node here. 8223 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 8224 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, 8225 DAG.getBitcast(DstEltVT, BV->getOperand(0))); 8226 8227 SmallVector<SDValue, 8> Ops; 8228 for (SDValue Op : BV->op_values()) { 8229 // If the vector element type is not legal, the BUILD_VECTOR operands 8230 // are promoted and implicitly truncated. Make that explicit here. 8231 if (Op.getValueType() != SrcEltVT) 8232 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op); 8233 Ops.push_back(DAG.getBitcast(DstEltVT, Op)); 8234 AddToWorklist(Ops.back().getNode()); 8235 } 8236 return DAG.getBuildVector(VT, SDLoc(BV), Ops); 8237 } 8238 8239 // Otherwise, we're growing or shrinking the elements. To avoid having to 8240 // handle annoying details of growing/shrinking FP values, we convert them to 8241 // int first. 8242 if (SrcEltVT.isFloatingPoint()) { 8243 // Convert the input float vector to a int vector where the elements are the 8244 // same sizes. 8245 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 8246 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 8247 SrcEltVT = IntVT; 8248 } 8249 8250 // Now we know the input is an integer vector. If the output is a FP type, 8251 // convert to integer first, then to FP of the right size. 8252 if (DstEltVT.isFloatingPoint()) { 8253 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 8254 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 8255 8256 // Next, convert to FP elements of the same size. 8257 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 8258 } 8259 8260 SDLoc DL(BV); 8261 8262 // Okay, we know the src/dst types are both integers of differing types. 8263 // Handling growing first. 8264 assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 8265 if (SrcBitSize < DstBitSize) { 8266 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 8267 8268 SmallVector<SDValue, 8> Ops; 8269 for (unsigned i = 0, e = BV->getNumOperands(); i != e; 8270 i += NumInputsPerOutput) { 8271 bool isLE = DAG.getDataLayout().isLittleEndian(); 8272 APInt NewBits = APInt(DstBitSize, 0); 8273 bool EltIsUndef = true; 8274 for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 8275 // Shift the previously computed bits over. 8276 NewBits <<= SrcBitSize; 8277 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 8278 if (Op.isUndef()) continue; 8279 EltIsUndef = false; 8280 8281 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 8282 zextOrTrunc(SrcBitSize).zext(DstBitSize); 8283 } 8284 8285 if (EltIsUndef) 8286 Ops.push_back(DAG.getUNDEF(DstEltVT)); 8287 else 8288 Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT)); 8289 } 8290 8291 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 8292 return DAG.getBuildVector(VT, DL, Ops); 8293 } 8294 8295 // Finally, this must be the case where we are shrinking elements: each input 8296 // turns into multiple outputs. 8297 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 8298 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 8299 NumOutputsPerInput*BV->getNumOperands()); 8300 SmallVector<SDValue, 8> Ops; 8301 8302 for (const SDValue &Op : BV->op_values()) { 8303 if (Op.isUndef()) { 8304 Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT)); 8305 continue; 8306 } 8307 8308 APInt OpVal = cast<ConstantSDNode>(Op)-> 8309 getAPIntValue().zextOrTrunc(SrcBitSize); 8310 8311 for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 8312 APInt ThisVal = OpVal.trunc(DstBitSize); 8313 Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT)); 8314 OpVal = OpVal.lshr(DstBitSize); 8315 } 8316 8317 // For big endian targets, swap the order of the pieces of each element. 8318 if (DAG.getDataLayout().isBigEndian()) 8319 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 8320 } 8321 8322 return DAG.getBuildVector(VT, DL, Ops); 8323 } 8324 8325 /// Try to perform FMA combining on a given FADD node. 8326 SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) { 8327 SDValue N0 = N->getOperand(0); 8328 SDValue N1 = N->getOperand(1); 8329 EVT VT = N->getValueType(0); 8330 SDLoc SL(N); 8331 8332 const TargetOptions &Options = DAG.getTarget().Options; 8333 bool AllowFusion = 8334 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath); 8335 8336 // Floating-point multiply-add with intermediate rounding. 8337 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8338 8339 // Floating-point multiply-add without intermediate rounding. 8340 bool HasFMA = 8341 AllowFusion && TLI.isFMAFasterThanFMulAndFAdd(VT) && 8342 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8343 8344 // No valid opcode, do not combine. 8345 if (!HasFMAD && !HasFMA) 8346 return SDValue(); 8347 8348 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8349 ; 8350 if (AllowFusion && STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8351 return SDValue(); 8352 8353 // Always prefer FMAD to FMA for precision. 8354 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8355 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8356 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8357 8358 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 8359 // prefer to fold the multiply with fewer uses. 8360 if (Aggressive && N0.getOpcode() == ISD::FMUL && 8361 N1.getOpcode() == ISD::FMUL) { 8362 if (N0.getNode()->use_size() > N1.getNode()->use_size()) 8363 std::swap(N0, N1); 8364 } 8365 8366 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 8367 if (N0.getOpcode() == ISD::FMUL && 8368 (Aggressive || N0->hasOneUse())) { 8369 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8370 N0.getOperand(0), N0.getOperand(1), N1); 8371 } 8372 8373 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 8374 // Note: Commutes FADD operands. 8375 if (N1.getOpcode() == ISD::FMUL && 8376 (Aggressive || N1->hasOneUse())) { 8377 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8378 N1.getOperand(0), N1.getOperand(1), N0); 8379 } 8380 8381 // Look through FP_EXTEND nodes to do more combining. 8382 if (AllowFusion && LookThroughFPExt) { 8383 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 8384 if (N0.getOpcode() == ISD::FP_EXTEND) { 8385 SDValue N00 = N0.getOperand(0); 8386 if (N00.getOpcode() == ISD::FMUL) 8387 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8388 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8389 N00.getOperand(0)), 8390 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8391 N00.getOperand(1)), N1); 8392 } 8393 8394 // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x) 8395 // Note: Commutes FADD operands. 8396 if (N1.getOpcode() == ISD::FP_EXTEND) { 8397 SDValue N10 = N1.getOperand(0); 8398 if (N10.getOpcode() == ISD::FMUL) 8399 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8400 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8401 N10.getOperand(0)), 8402 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8403 N10.getOperand(1)), N0); 8404 } 8405 } 8406 8407 // More folding opportunities when target permits. 8408 if (Aggressive) { 8409 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z)) 8410 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8411 // are currently only supported on binary nodes. 8412 if (Options.UnsafeFPMath && 8413 N0.getOpcode() == PreferredFusedOpcode && 8414 N0.getOperand(2).getOpcode() == ISD::FMUL && 8415 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) { 8416 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8417 N0.getOperand(0), N0.getOperand(1), 8418 DAG.getNode(PreferredFusedOpcode, SL, VT, 8419 N0.getOperand(2).getOperand(0), 8420 N0.getOperand(2).getOperand(1), 8421 N1)); 8422 } 8423 8424 // fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x)) 8425 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8426 // are currently only supported on binary nodes. 8427 if (Options.UnsafeFPMath && 8428 N1->getOpcode() == PreferredFusedOpcode && 8429 N1.getOperand(2).getOpcode() == ISD::FMUL && 8430 N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) { 8431 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8432 N1.getOperand(0), N1.getOperand(1), 8433 DAG.getNode(PreferredFusedOpcode, SL, VT, 8434 N1.getOperand(2).getOperand(0), 8435 N1.getOperand(2).getOperand(1), 8436 N0)); 8437 } 8438 8439 if (AllowFusion && LookThroughFPExt) { 8440 // fold (fadd (fma x, y, (fpext (fmul u, v))), z) 8441 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 8442 auto FoldFAddFMAFPExtFMul = [&] ( 8443 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8444 return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y, 8445 DAG.getNode(PreferredFusedOpcode, SL, VT, 8446 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8447 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8448 Z)); 8449 }; 8450 if (N0.getOpcode() == PreferredFusedOpcode) { 8451 SDValue N02 = N0.getOperand(2); 8452 if (N02.getOpcode() == ISD::FP_EXTEND) { 8453 SDValue N020 = N02.getOperand(0); 8454 if (N020.getOpcode() == ISD::FMUL) 8455 return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1), 8456 N020.getOperand(0), N020.getOperand(1), 8457 N1); 8458 } 8459 } 8460 8461 // fold (fadd (fpext (fma x, y, (fmul u, v))), z) 8462 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 8463 // FIXME: This turns two single-precision and one double-precision 8464 // operation into two double-precision operations, which might not be 8465 // interesting for all targets, especially GPUs. 8466 auto FoldFAddFPExtFMAFMul = [&] ( 8467 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8468 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8469 DAG.getNode(ISD::FP_EXTEND, SL, VT, X), 8470 DAG.getNode(ISD::FP_EXTEND, SL, VT, Y), 8471 DAG.getNode(PreferredFusedOpcode, SL, VT, 8472 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8473 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8474 Z)); 8475 }; 8476 if (N0.getOpcode() == ISD::FP_EXTEND) { 8477 SDValue N00 = N0.getOperand(0); 8478 if (N00.getOpcode() == PreferredFusedOpcode) { 8479 SDValue N002 = N00.getOperand(2); 8480 if (N002.getOpcode() == ISD::FMUL) 8481 return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1), 8482 N002.getOperand(0), N002.getOperand(1), 8483 N1); 8484 } 8485 } 8486 8487 // fold (fadd x, (fma y, z, (fpext (fmul u, v))) 8488 // -> (fma y, z, (fma (fpext u), (fpext v), x)) 8489 if (N1.getOpcode() == PreferredFusedOpcode) { 8490 SDValue N12 = N1.getOperand(2); 8491 if (N12.getOpcode() == ISD::FP_EXTEND) { 8492 SDValue N120 = N12.getOperand(0); 8493 if (N120.getOpcode() == ISD::FMUL) 8494 return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1), 8495 N120.getOperand(0), N120.getOperand(1), 8496 N0); 8497 } 8498 } 8499 8500 // fold (fadd x, (fpext (fma y, z, (fmul u, v))) 8501 // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) 8502 // FIXME: This turns two single-precision and one double-precision 8503 // operation into two double-precision operations, which might not be 8504 // interesting for all targets, especially GPUs. 8505 if (N1.getOpcode() == ISD::FP_EXTEND) { 8506 SDValue N10 = N1.getOperand(0); 8507 if (N10.getOpcode() == PreferredFusedOpcode) { 8508 SDValue N102 = N10.getOperand(2); 8509 if (N102.getOpcode() == ISD::FMUL) 8510 return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1), 8511 N102.getOperand(0), N102.getOperand(1), 8512 N0); 8513 } 8514 } 8515 } 8516 } 8517 8518 return SDValue(); 8519 } 8520 8521 /// Try to perform FMA combining on a given FSUB node. 8522 SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { 8523 SDValue N0 = N->getOperand(0); 8524 SDValue N1 = N->getOperand(1); 8525 EVT VT = N->getValueType(0); 8526 SDLoc SL(N); 8527 8528 const TargetOptions &Options = DAG.getTarget().Options; 8529 bool AllowFusion = 8530 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath); 8531 8532 // Floating-point multiply-add with intermediate rounding. 8533 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8534 8535 // Floating-point multiply-add without intermediate rounding. 8536 bool HasFMA = 8537 AllowFusion && TLI.isFMAFasterThanFMulAndFAdd(VT) && 8538 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8539 8540 // No valid opcode, do not combine. 8541 if (!HasFMAD && !HasFMA) 8542 return SDValue(); 8543 8544 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8545 if (AllowFusion && STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8546 return SDValue(); 8547 8548 // Always prefer FMAD to FMA for precision. 8549 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8550 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8551 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8552 8553 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 8554 if (N0.getOpcode() == ISD::FMUL && 8555 (Aggressive || N0->hasOneUse())) { 8556 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8557 N0.getOperand(0), N0.getOperand(1), 8558 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8559 } 8560 8561 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 8562 // Note: Commutes FSUB operands. 8563 if (N1.getOpcode() == ISD::FMUL && 8564 (Aggressive || N1->hasOneUse())) 8565 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8566 DAG.getNode(ISD::FNEG, SL, VT, 8567 N1.getOperand(0)), 8568 N1.getOperand(1), N0); 8569 8570 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 8571 if (N0.getOpcode() == ISD::FNEG && 8572 N0.getOperand(0).getOpcode() == ISD::FMUL && 8573 (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) { 8574 SDValue N00 = N0.getOperand(0).getOperand(0); 8575 SDValue N01 = N0.getOperand(0).getOperand(1); 8576 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8577 DAG.getNode(ISD::FNEG, SL, VT, N00), N01, 8578 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8579 } 8580 8581 // Look through FP_EXTEND nodes to do more combining. 8582 if (AllowFusion && LookThroughFPExt) { 8583 // fold (fsub (fpext (fmul x, y)), z) 8584 // -> (fma (fpext x), (fpext y), (fneg z)) 8585 if (N0.getOpcode() == ISD::FP_EXTEND) { 8586 SDValue N00 = N0.getOperand(0); 8587 if (N00.getOpcode() == ISD::FMUL) 8588 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8589 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8590 N00.getOperand(0)), 8591 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8592 N00.getOperand(1)), 8593 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8594 } 8595 8596 // fold (fsub x, (fpext (fmul y, z))) 8597 // -> (fma (fneg (fpext y)), (fpext z), x) 8598 // Note: Commutes FSUB operands. 8599 if (N1.getOpcode() == ISD::FP_EXTEND) { 8600 SDValue N10 = N1.getOperand(0); 8601 if (N10.getOpcode() == ISD::FMUL) 8602 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8603 DAG.getNode(ISD::FNEG, SL, VT, 8604 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8605 N10.getOperand(0))), 8606 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8607 N10.getOperand(1)), 8608 N0); 8609 } 8610 8611 // fold (fsub (fpext (fneg (fmul, x, y))), z) 8612 // -> (fneg (fma (fpext x), (fpext y), z)) 8613 // Note: This could be removed with appropriate canonicalization of the 8614 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 8615 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 8616 // from implementing the canonicalization in visitFSUB. 8617 if (N0.getOpcode() == ISD::FP_EXTEND) { 8618 SDValue N00 = N0.getOperand(0); 8619 if (N00.getOpcode() == ISD::FNEG) { 8620 SDValue N000 = N00.getOperand(0); 8621 if (N000.getOpcode() == ISD::FMUL) { 8622 return DAG.getNode(ISD::FNEG, SL, VT, 8623 DAG.getNode(PreferredFusedOpcode, SL, VT, 8624 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8625 N000.getOperand(0)), 8626 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8627 N000.getOperand(1)), 8628 N1)); 8629 } 8630 } 8631 } 8632 8633 // fold (fsub (fneg (fpext (fmul, x, y))), z) 8634 // -> (fneg (fma (fpext x)), (fpext y), z) 8635 // Note: This could be removed with appropriate canonicalization of the 8636 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 8637 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 8638 // from implementing the canonicalization in visitFSUB. 8639 if (N0.getOpcode() == ISD::FNEG) { 8640 SDValue N00 = N0.getOperand(0); 8641 if (N00.getOpcode() == ISD::FP_EXTEND) { 8642 SDValue N000 = N00.getOperand(0); 8643 if (N000.getOpcode() == ISD::FMUL) { 8644 return DAG.getNode(ISD::FNEG, SL, VT, 8645 DAG.getNode(PreferredFusedOpcode, SL, VT, 8646 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8647 N000.getOperand(0)), 8648 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8649 N000.getOperand(1)), 8650 N1)); 8651 } 8652 } 8653 } 8654 8655 } 8656 8657 // More folding opportunities when target permits. 8658 if (Aggressive) { 8659 // fold (fsub (fma x, y, (fmul u, v)), z) 8660 // -> (fma x, y (fma u, v, (fneg z))) 8661 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8662 // are currently only supported on binary nodes. 8663 if (Options.UnsafeFPMath && 8664 N0.getOpcode() == PreferredFusedOpcode && 8665 N0.getOperand(2).getOpcode() == ISD::FMUL && 8666 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) { 8667 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8668 N0.getOperand(0), N0.getOperand(1), 8669 DAG.getNode(PreferredFusedOpcode, SL, VT, 8670 N0.getOperand(2).getOperand(0), 8671 N0.getOperand(2).getOperand(1), 8672 DAG.getNode(ISD::FNEG, SL, VT, 8673 N1))); 8674 } 8675 8676 // fold (fsub x, (fma y, z, (fmul u, v))) 8677 // -> (fma (fneg y), z, (fma (fneg u), v, x)) 8678 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8679 // are currently only supported on binary nodes. 8680 if (Options.UnsafeFPMath && 8681 N1.getOpcode() == PreferredFusedOpcode && 8682 N1.getOperand(2).getOpcode() == ISD::FMUL) { 8683 SDValue N20 = N1.getOperand(2).getOperand(0); 8684 SDValue N21 = N1.getOperand(2).getOperand(1); 8685 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8686 DAG.getNode(ISD::FNEG, SL, VT, 8687 N1.getOperand(0)), 8688 N1.getOperand(1), 8689 DAG.getNode(PreferredFusedOpcode, SL, VT, 8690 DAG.getNode(ISD::FNEG, SL, VT, N20), 8691 8692 N21, N0)); 8693 } 8694 8695 if (AllowFusion && LookThroughFPExt) { 8696 // fold (fsub (fma x, y, (fpext (fmul u, v))), z) 8697 // -> (fma x, y (fma (fpext u), (fpext v), (fneg z))) 8698 if (N0.getOpcode() == PreferredFusedOpcode) { 8699 SDValue N02 = N0.getOperand(2); 8700 if (N02.getOpcode() == ISD::FP_EXTEND) { 8701 SDValue N020 = N02.getOperand(0); 8702 if (N020.getOpcode() == ISD::FMUL) 8703 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8704 N0.getOperand(0), N0.getOperand(1), 8705 DAG.getNode(PreferredFusedOpcode, SL, VT, 8706 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8707 N020.getOperand(0)), 8708 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8709 N020.getOperand(1)), 8710 DAG.getNode(ISD::FNEG, SL, VT, 8711 N1))); 8712 } 8713 } 8714 8715 // fold (fsub (fpext (fma x, y, (fmul u, v))), z) 8716 // -> (fma (fpext x), (fpext y), 8717 // (fma (fpext u), (fpext v), (fneg z))) 8718 // FIXME: This turns two single-precision and one double-precision 8719 // operation into two double-precision operations, which might not be 8720 // interesting for all targets, especially GPUs. 8721 if (N0.getOpcode() == ISD::FP_EXTEND) { 8722 SDValue N00 = N0.getOperand(0); 8723 if (N00.getOpcode() == PreferredFusedOpcode) { 8724 SDValue N002 = N00.getOperand(2); 8725 if (N002.getOpcode() == ISD::FMUL) 8726 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8727 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8728 N00.getOperand(0)), 8729 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8730 N00.getOperand(1)), 8731 DAG.getNode(PreferredFusedOpcode, SL, VT, 8732 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8733 N002.getOperand(0)), 8734 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8735 N002.getOperand(1)), 8736 DAG.getNode(ISD::FNEG, SL, VT, 8737 N1))); 8738 } 8739 } 8740 8741 // fold (fsub x, (fma y, z, (fpext (fmul u, v)))) 8742 // -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x)) 8743 if (N1.getOpcode() == PreferredFusedOpcode && 8744 N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) { 8745 SDValue N120 = N1.getOperand(2).getOperand(0); 8746 if (N120.getOpcode() == ISD::FMUL) { 8747 SDValue N1200 = N120.getOperand(0); 8748 SDValue N1201 = N120.getOperand(1); 8749 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8750 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), 8751 N1.getOperand(1), 8752 DAG.getNode(PreferredFusedOpcode, SL, VT, 8753 DAG.getNode(ISD::FNEG, SL, VT, 8754 DAG.getNode(ISD::FP_EXTEND, SL, 8755 VT, N1200)), 8756 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8757 N1201), 8758 N0)); 8759 } 8760 } 8761 8762 // fold (fsub x, (fpext (fma y, z, (fmul u, v)))) 8763 // -> (fma (fneg (fpext y)), (fpext z), 8764 // (fma (fneg (fpext u)), (fpext v), x)) 8765 // FIXME: This turns two single-precision and one double-precision 8766 // operation into two double-precision operations, which might not be 8767 // interesting for all targets, especially GPUs. 8768 if (N1.getOpcode() == ISD::FP_EXTEND && 8769 N1.getOperand(0).getOpcode() == PreferredFusedOpcode) { 8770 SDValue N100 = N1.getOperand(0).getOperand(0); 8771 SDValue N101 = N1.getOperand(0).getOperand(1); 8772 SDValue N102 = N1.getOperand(0).getOperand(2); 8773 if (N102.getOpcode() == ISD::FMUL) { 8774 SDValue N1020 = N102.getOperand(0); 8775 SDValue N1021 = N102.getOperand(1); 8776 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8777 DAG.getNode(ISD::FNEG, SL, VT, 8778 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8779 N100)), 8780 DAG.getNode(ISD::FP_EXTEND, SL, VT, N101), 8781 DAG.getNode(PreferredFusedOpcode, SL, VT, 8782 DAG.getNode(ISD::FNEG, SL, VT, 8783 DAG.getNode(ISD::FP_EXTEND, SL, 8784 VT, N1020)), 8785 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8786 N1021), 8787 N0)); 8788 } 8789 } 8790 } 8791 } 8792 8793 return SDValue(); 8794 } 8795 8796 /// Try to perform FMA combining on a given FMUL node based on the distributive 8797 /// law x * (y + 1) = x * y + x and variants thereof (commuted versions, 8798 /// subtraction instead of addition). 8799 SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) { 8800 SDValue N0 = N->getOperand(0); 8801 SDValue N1 = N->getOperand(1); 8802 EVT VT = N->getValueType(0); 8803 SDLoc SL(N); 8804 8805 assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation"); 8806 8807 const TargetOptions &Options = DAG.getTarget().Options; 8808 8809 // The transforms below are incorrect when x == 0 and y == inf, because the 8810 // intermediate multiplication produces a nan. 8811 if (!Options.NoInfsFPMath) 8812 return SDValue(); 8813 8814 // Floating-point multiply-add without intermediate rounding. 8815 bool HasFMA = 8816 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) && 8817 TLI.isFMAFasterThanFMulAndFAdd(VT) && 8818 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8819 8820 // Floating-point multiply-add with intermediate rounding. This can result 8821 // in a less precise result due to the changed rounding order. 8822 bool HasFMAD = Options.UnsafeFPMath && 8823 (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8824 8825 // No valid opcode, do not combine. 8826 if (!HasFMAD && !HasFMA) 8827 return SDValue(); 8828 8829 // Always prefer FMAD to FMA for precision. 8830 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8831 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8832 8833 // fold (fmul (fadd x, +1.0), y) -> (fma x, y, y) 8834 // fold (fmul (fadd x, -1.0), y) -> (fma x, y, (fneg y)) 8835 auto FuseFADD = [&](SDValue X, SDValue Y) { 8836 if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) { 8837 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 8838 if (XC1 && XC1->isExactlyValue(+1.0)) 8839 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 8840 if (XC1 && XC1->isExactlyValue(-1.0)) 8841 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 8842 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8843 } 8844 return SDValue(); 8845 }; 8846 8847 if (SDValue FMA = FuseFADD(N0, N1)) 8848 return FMA; 8849 if (SDValue FMA = FuseFADD(N1, N0)) 8850 return FMA; 8851 8852 // fold (fmul (fsub +1.0, x), y) -> (fma (fneg x), y, y) 8853 // fold (fmul (fsub -1.0, x), y) -> (fma (fneg x), y, (fneg y)) 8854 // fold (fmul (fsub x, +1.0), y) -> (fma x, y, (fneg y)) 8855 // fold (fmul (fsub x, -1.0), y) -> (fma x, y, y) 8856 auto FuseFSUB = [&](SDValue X, SDValue Y) { 8857 if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) { 8858 auto XC0 = isConstOrConstSplatFP(X.getOperand(0)); 8859 if (XC0 && XC0->isExactlyValue(+1.0)) 8860 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8861 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 8862 Y); 8863 if (XC0 && XC0->isExactlyValue(-1.0)) 8864 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8865 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 8866 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8867 8868 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 8869 if (XC1 && XC1->isExactlyValue(+1.0)) 8870 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 8871 DAG.getNode(ISD::FNEG, SL, VT, Y)); 8872 if (XC1 && XC1->isExactlyValue(-1.0)) 8873 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 8874 } 8875 return SDValue(); 8876 }; 8877 8878 if (SDValue FMA = FuseFSUB(N0, N1)) 8879 return FMA; 8880 if (SDValue FMA = FuseFSUB(N1, N0)) 8881 return FMA; 8882 8883 return SDValue(); 8884 } 8885 8886 SDValue DAGCombiner::visitFADD(SDNode *N) { 8887 SDValue N0 = N->getOperand(0); 8888 SDValue N1 = N->getOperand(1); 8889 bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0); 8890 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1); 8891 EVT VT = N->getValueType(0); 8892 SDLoc DL(N); 8893 const TargetOptions &Options = DAG.getTarget().Options; 8894 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 8895 8896 // fold vector ops 8897 if (VT.isVector()) 8898 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 8899 return FoldedVOp; 8900 8901 // fold (fadd c1, c2) -> c1 + c2 8902 if (N0CFP && N1CFP) 8903 return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags); 8904 8905 // canonicalize constant to RHS 8906 if (N0CFP && !N1CFP) 8907 return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags); 8908 8909 // fold (fadd A, (fneg B)) -> (fsub A, B) 8910 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 8911 isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2) 8912 return DAG.getNode(ISD::FSUB, DL, VT, N0, 8913 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 8914 8915 // fold (fadd (fneg A), B) -> (fsub B, A) 8916 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 8917 isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2) 8918 return DAG.getNode(ISD::FSUB, DL, VT, N1, 8919 GetNegatedExpression(N0, DAG, LegalOperations), Flags); 8920 8921 // FIXME: Auto-upgrade the target/function-level option. 8922 if (Options.UnsafeFPMath || N->getFlags()->hasNoSignedZeros()) { 8923 // fold (fadd A, 0) -> A 8924 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1)) 8925 if (N1C->isZero()) 8926 return N0; 8927 } 8928 8929 // If 'unsafe math' is enabled, fold lots of things. 8930 if (Options.UnsafeFPMath) { 8931 // No FP constant should be created after legalization as Instruction 8932 // Selection pass has a hard time dealing with FP constants. 8933 bool AllowNewConst = (Level < AfterLegalizeDAG); 8934 8935 // fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 8936 if (N1CFP && N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 8937 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) 8938 return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), 8939 DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, 8940 Flags), 8941 Flags); 8942 8943 // If allowed, fold (fadd (fneg x), x) -> 0.0 8944 if (AllowNewConst && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) 8945 return DAG.getConstantFP(0.0, DL, VT); 8946 8947 // If allowed, fold (fadd x, (fneg x)) -> 0.0 8948 if (AllowNewConst && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) 8949 return DAG.getConstantFP(0.0, DL, VT); 8950 8951 // We can fold chains of FADD's of the same value into multiplications. 8952 // This transform is not safe in general because we are reducing the number 8953 // of rounding steps. 8954 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) { 8955 if (N0.getOpcode() == ISD::FMUL) { 8956 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 8957 bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1)); 8958 8959 // (fadd (fmul x, c), x) -> (fmul x, c+1) 8960 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 8961 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 8962 DAG.getConstantFP(1.0, DL, VT), Flags); 8963 return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags); 8964 } 8965 8966 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2) 8967 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 8968 N1.getOperand(0) == N1.getOperand(1) && 8969 N0.getOperand(0) == N1.getOperand(0)) { 8970 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 8971 DAG.getConstantFP(2.0, DL, VT), Flags); 8972 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags); 8973 } 8974 } 8975 8976 if (N1.getOpcode() == ISD::FMUL) { 8977 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 8978 bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1)); 8979 8980 // (fadd x, (fmul x, c)) -> (fmul x, c+1) 8981 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 8982 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 8983 DAG.getConstantFP(1.0, DL, VT), Flags); 8984 return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags); 8985 } 8986 8987 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2) 8988 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD && 8989 N0.getOperand(0) == N0.getOperand(1) && 8990 N1.getOperand(0) == N0.getOperand(0)) { 8991 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 8992 DAG.getConstantFP(2.0, DL, VT), Flags); 8993 return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags); 8994 } 8995 } 8996 8997 if (N0.getOpcode() == ISD::FADD && AllowNewConst) { 8998 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 8999 // (fadd (fadd x, x), x) -> (fmul x, 3.0) 9000 if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) && 9001 (N0.getOperand(0) == N1)) { 9002 return DAG.getNode(ISD::FMUL, DL, VT, 9003 N1, DAG.getConstantFP(3.0, DL, VT), Flags); 9004 } 9005 } 9006 9007 if (N1.getOpcode() == ISD::FADD && AllowNewConst) { 9008 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 9009 // (fadd x, (fadd x, x)) -> (fmul x, 3.0) 9010 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && 9011 N1.getOperand(0) == N0) { 9012 return DAG.getNode(ISD::FMUL, DL, VT, 9013 N0, DAG.getConstantFP(3.0, DL, VT), Flags); 9014 } 9015 } 9016 9017 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0) 9018 if (AllowNewConst && 9019 N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 9020 N0.getOperand(0) == N0.getOperand(1) && 9021 N1.getOperand(0) == N1.getOperand(1) && 9022 N0.getOperand(0) == N1.getOperand(0)) { 9023 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), 9024 DAG.getConstantFP(4.0, DL, VT), Flags); 9025 } 9026 } 9027 } // enable-unsafe-fp-math 9028 9029 // FADD -> FMA combines: 9030 if (SDValue Fused = visitFADDForFMACombine(N)) { 9031 AddToWorklist(Fused.getNode()); 9032 return Fused; 9033 } 9034 return SDValue(); 9035 } 9036 9037 SDValue DAGCombiner::visitFSUB(SDNode *N) { 9038 SDValue N0 = N->getOperand(0); 9039 SDValue N1 = N->getOperand(1); 9040 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9041 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9042 EVT VT = N->getValueType(0); 9043 SDLoc DL(N); 9044 const TargetOptions &Options = DAG.getTarget().Options; 9045 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9046 9047 // fold vector ops 9048 if (VT.isVector()) 9049 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9050 return FoldedVOp; 9051 9052 // fold (fsub c1, c2) -> c1-c2 9053 if (N0CFP && N1CFP) 9054 return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags); 9055 9056 // fold (fsub A, (fneg B)) -> (fadd A, B) 9057 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 9058 return DAG.getNode(ISD::FADD, DL, VT, N0, 9059 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 9060 9061 // FIXME: Auto-upgrade the target/function-level option. 9062 if (Options.UnsafeFPMath || N->getFlags()->hasNoSignedZeros()) { 9063 // (fsub 0, B) -> -B 9064 if (N0CFP && N0CFP->isZero()) { 9065 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 9066 return GetNegatedExpression(N1, DAG, LegalOperations); 9067 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9068 return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags); 9069 } 9070 } 9071 9072 // If 'unsafe math' is enabled, fold lots of things. 9073 if (Options.UnsafeFPMath) { 9074 // (fsub A, 0) -> A 9075 if (N1CFP && N1CFP->isZero()) 9076 return N0; 9077 9078 // (fsub x, x) -> 0.0 9079 if (N0 == N1) 9080 return DAG.getConstantFP(0.0f, DL, VT); 9081 9082 // (fsub x, (fadd x, y)) -> (fneg y) 9083 // (fsub x, (fadd y, x)) -> (fneg y) 9084 if (N1.getOpcode() == ISD::FADD) { 9085 SDValue N10 = N1->getOperand(0); 9086 SDValue N11 = N1->getOperand(1); 9087 9088 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options)) 9089 return GetNegatedExpression(N11, DAG, LegalOperations); 9090 9091 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options)) 9092 return GetNegatedExpression(N10, DAG, LegalOperations); 9093 } 9094 } 9095 9096 // FSUB -> FMA combines: 9097 if (SDValue Fused = visitFSUBForFMACombine(N)) { 9098 AddToWorklist(Fused.getNode()); 9099 return Fused; 9100 } 9101 9102 return SDValue(); 9103 } 9104 9105 SDValue DAGCombiner::visitFMUL(SDNode *N) { 9106 SDValue N0 = N->getOperand(0); 9107 SDValue N1 = N->getOperand(1); 9108 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9109 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9110 EVT VT = N->getValueType(0); 9111 SDLoc DL(N); 9112 const TargetOptions &Options = DAG.getTarget().Options; 9113 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9114 9115 // fold vector ops 9116 if (VT.isVector()) { 9117 // This just handles C1 * C2 for vectors. Other vector folds are below. 9118 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9119 return FoldedVOp; 9120 } 9121 9122 // fold (fmul c1, c2) -> c1*c2 9123 if (N0CFP && N1CFP) 9124 return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags); 9125 9126 // canonicalize constant to RHS 9127 if (isConstantFPBuildVectorOrConstantFP(N0) && 9128 !isConstantFPBuildVectorOrConstantFP(N1)) 9129 return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags); 9130 9131 // fold (fmul A, 1.0) -> A 9132 if (N1CFP && N1CFP->isExactlyValue(1.0)) 9133 return N0; 9134 9135 if (Options.UnsafeFPMath) { 9136 // fold (fmul A, 0) -> 0 9137 if (N1CFP && N1CFP->isZero()) 9138 return N1; 9139 9140 // fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 9141 if (N0.getOpcode() == ISD::FMUL) { 9142 // Fold scalars or any vector constants (not just splats). 9143 // This fold is done in general by InstCombine, but extra fmul insts 9144 // may have been generated during lowering. 9145 SDValue N00 = N0.getOperand(0); 9146 SDValue N01 = N0.getOperand(1); 9147 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 9148 auto *BV00 = dyn_cast<BuildVectorSDNode>(N00); 9149 auto *BV01 = dyn_cast<BuildVectorSDNode>(N01); 9150 9151 // Check 1: Make sure that the first operand of the inner multiply is NOT 9152 // a constant. Otherwise, we may induce infinite looping. 9153 if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) { 9154 // Check 2: Make sure that the second operand of the inner multiply and 9155 // the second operand of the outer multiply are constants. 9156 if ((N1CFP && isConstOrConstSplatFP(N01)) || 9157 (BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) { 9158 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags); 9159 return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags); 9160 } 9161 } 9162 } 9163 9164 // fold (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c)) 9165 // Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs 9166 // during an early run of DAGCombiner can prevent folding with fmuls 9167 // inserted during lowering. 9168 if (N0.getOpcode() == ISD::FADD && 9169 (N0.getOperand(0) == N0.getOperand(1)) && 9170 N0.hasOneUse()) { 9171 const SDValue Two = DAG.getConstantFP(2.0, DL, VT); 9172 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags); 9173 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags); 9174 } 9175 } 9176 9177 // fold (fmul X, 2.0) -> (fadd X, X) 9178 if (N1CFP && N1CFP->isExactlyValue(+2.0)) 9179 return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags); 9180 9181 // fold (fmul X, -1.0) -> (fneg X) 9182 if (N1CFP && N1CFP->isExactlyValue(-1.0)) 9183 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9184 return DAG.getNode(ISD::FNEG, DL, VT, N0); 9185 9186 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 9187 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 9188 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 9189 // Both can be negated for free, check to see if at least one is cheaper 9190 // negated. 9191 if (LHSNeg == 2 || RHSNeg == 2) 9192 return DAG.getNode(ISD::FMUL, DL, VT, 9193 GetNegatedExpression(N0, DAG, LegalOperations), 9194 GetNegatedExpression(N1, DAG, LegalOperations), 9195 Flags); 9196 } 9197 } 9198 9199 // FMUL -> FMA combines: 9200 if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) { 9201 AddToWorklist(Fused.getNode()); 9202 return Fused; 9203 } 9204 9205 return SDValue(); 9206 } 9207 9208 SDValue DAGCombiner::visitFMA(SDNode *N) { 9209 SDValue N0 = N->getOperand(0); 9210 SDValue N1 = N->getOperand(1); 9211 SDValue N2 = N->getOperand(2); 9212 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9213 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9214 EVT VT = N->getValueType(0); 9215 SDLoc DL(N); 9216 const TargetOptions &Options = DAG.getTarget().Options; 9217 9218 // Constant fold FMA. 9219 if (isa<ConstantFPSDNode>(N0) && 9220 isa<ConstantFPSDNode>(N1) && 9221 isa<ConstantFPSDNode>(N2)) { 9222 return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2); 9223 } 9224 9225 if (Options.UnsafeFPMath) { 9226 if (N0CFP && N0CFP->isZero()) 9227 return N2; 9228 if (N1CFP && N1CFP->isZero()) 9229 return N2; 9230 } 9231 // TODO: The FMA node should have flags that propagate to these nodes. 9232 if (N0CFP && N0CFP->isExactlyValue(1.0)) 9233 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2); 9234 if (N1CFP && N1CFP->isExactlyValue(1.0)) 9235 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2); 9236 9237 // Canonicalize (fma c, x, y) -> (fma x, c, y) 9238 if (isConstantFPBuildVectorOrConstantFP(N0) && 9239 !isConstantFPBuildVectorOrConstantFP(N1)) 9240 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2); 9241 9242 // TODO: FMA nodes should have flags that propagate to the created nodes. 9243 // For now, create a Flags object for use with all unsafe math transforms. 9244 SDNodeFlags Flags; 9245 Flags.setUnsafeAlgebra(true); 9246 9247 if (Options.UnsafeFPMath) { 9248 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) 9249 if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) && 9250 isConstantFPBuildVectorOrConstantFP(N1) && 9251 isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) { 9252 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9253 DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1), 9254 &Flags), &Flags); 9255 } 9256 9257 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) 9258 if (N0.getOpcode() == ISD::FMUL && 9259 isConstantFPBuildVectorOrConstantFP(N1) && 9260 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) { 9261 return DAG.getNode(ISD::FMA, DL, VT, 9262 N0.getOperand(0), 9263 DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1), 9264 &Flags), 9265 N2); 9266 } 9267 } 9268 9269 // (fma x, 1, y) -> (fadd x, y) 9270 // (fma x, -1, y) -> (fadd (fneg x), y) 9271 if (N1CFP) { 9272 if (N1CFP->isExactlyValue(1.0)) 9273 // TODO: The FMA node should have flags that propagate to this node. 9274 return DAG.getNode(ISD::FADD, DL, VT, N0, N2); 9275 9276 if (N1CFP->isExactlyValue(-1.0) && 9277 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { 9278 SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0); 9279 AddToWorklist(RHSNeg.getNode()); 9280 // TODO: The FMA node should have flags that propagate to this node. 9281 return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg); 9282 } 9283 } 9284 9285 if (Options.UnsafeFPMath) { 9286 // (fma x, c, x) -> (fmul x, (c+1)) 9287 if (N1CFP && N0 == N2) { 9288 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9289 DAG.getNode(ISD::FADD, DL, VT, N1, 9290 DAG.getConstantFP(1.0, DL, VT), &Flags), 9291 &Flags); 9292 } 9293 9294 // (fma x, c, (fneg x)) -> (fmul x, (c-1)) 9295 if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) { 9296 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9297 DAG.getNode(ISD::FADD, DL, VT, N1, 9298 DAG.getConstantFP(-1.0, DL, VT), &Flags), 9299 &Flags); 9300 } 9301 } 9302 9303 return SDValue(); 9304 } 9305 9306 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9307 // reciprocal. 9308 // E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip) 9309 // Notice that this is not always beneficial. One reason is different targets 9310 // may have different costs for FDIV and FMUL, so sometimes the cost of two 9311 // FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason 9312 // is the critical path is increased from "one FDIV" to "one FDIV + one FMUL". 9313 SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) { 9314 bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath; 9315 const SDNodeFlags *Flags = N->getFlags(); 9316 if (!UnsafeMath && !Flags->hasAllowReciprocal()) 9317 return SDValue(); 9318 9319 // Skip if current node is a reciprocal. 9320 SDValue N0 = N->getOperand(0); 9321 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9322 if (N0CFP && N0CFP->isExactlyValue(1.0)) 9323 return SDValue(); 9324 9325 // Exit early if the target does not want this transform or if there can't 9326 // possibly be enough uses of the divisor to make the transform worthwhile. 9327 SDValue N1 = N->getOperand(1); 9328 unsigned MinUses = TLI.combineRepeatedFPDivisors(); 9329 if (!MinUses || N1->use_size() < MinUses) 9330 return SDValue(); 9331 9332 // Find all FDIV users of the same divisor. 9333 // Use a set because duplicates may be present in the user list. 9334 SetVector<SDNode *> Users; 9335 for (auto *U : N1->uses()) { 9336 if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) { 9337 // This division is eligible for optimization only if global unsafe math 9338 // is enabled or if this division allows reciprocal formation. 9339 if (UnsafeMath || U->getFlags()->hasAllowReciprocal()) 9340 Users.insert(U); 9341 } 9342 } 9343 9344 // Now that we have the actual number of divisor uses, make sure it meets 9345 // the minimum threshold specified by the target. 9346 if (Users.size() < MinUses) 9347 return SDValue(); 9348 9349 EVT VT = N->getValueType(0); 9350 SDLoc DL(N); 9351 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 9352 SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags); 9353 9354 // Dividend / Divisor -> Dividend * Reciprocal 9355 for (auto *U : Users) { 9356 SDValue Dividend = U->getOperand(0); 9357 if (Dividend != FPOne) { 9358 SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend, 9359 Reciprocal, Flags); 9360 CombineTo(U, NewNode); 9361 } else if (U != Reciprocal.getNode()) { 9362 // In the absence of fast-math-flags, this user node is always the 9363 // same node as Reciprocal, but with FMF they may be different nodes. 9364 CombineTo(U, Reciprocal); 9365 } 9366 } 9367 return SDValue(N, 0); // N was replaced. 9368 } 9369 9370 SDValue DAGCombiner::visitFDIV(SDNode *N) { 9371 SDValue N0 = N->getOperand(0); 9372 SDValue N1 = N->getOperand(1); 9373 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9374 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9375 EVT VT = N->getValueType(0); 9376 SDLoc DL(N); 9377 const TargetOptions &Options = DAG.getTarget().Options; 9378 SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9379 9380 // fold vector ops 9381 if (VT.isVector()) 9382 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9383 return FoldedVOp; 9384 9385 // fold (fdiv c1, c2) -> c1/c2 9386 if (N0CFP && N1CFP) 9387 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags); 9388 9389 if (Options.UnsafeFPMath) { 9390 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 9391 if (N1CFP) { 9392 // Compute the reciprocal 1.0 / c2. 9393 const APFloat &N1APF = N1CFP->getValueAPF(); 9394 APFloat Recip(N1APF.getSemantics(), 1); // 1.0 9395 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 9396 // Only do the transform if the reciprocal is a legal fp immediate that 9397 // isn't too nasty (eg NaN, denormal, ...). 9398 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 9399 (!LegalOperations || 9400 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 9401 // backend)... we should handle this gracefully after Legalize. 9402 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 9403 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 9404 TLI.isFPImmLegal(Recip, VT))) 9405 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9406 DAG.getConstantFP(Recip, DL, VT), Flags); 9407 } 9408 9409 // If this FDIV is part of a reciprocal square root, it may be folded 9410 // into a target-specific square root estimate instruction. 9411 if (N1.getOpcode() == ISD::FSQRT) { 9412 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags)) { 9413 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9414 } 9415 } else if (N1.getOpcode() == ISD::FP_EXTEND && 9416 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9417 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9418 Flags)) { 9419 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV); 9420 AddToWorklist(RV.getNode()); 9421 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9422 } 9423 } else if (N1.getOpcode() == ISD::FP_ROUND && 9424 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9425 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9426 Flags)) { 9427 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1)); 9428 AddToWorklist(RV.getNode()); 9429 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9430 } 9431 } else if (N1.getOpcode() == ISD::FMUL) { 9432 // Look through an FMUL. Even though this won't remove the FDIV directly, 9433 // it's still worthwhile to get rid of the FSQRT if possible. 9434 SDValue SqrtOp; 9435 SDValue OtherOp; 9436 if (N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9437 SqrtOp = N1.getOperand(0); 9438 OtherOp = N1.getOperand(1); 9439 } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) { 9440 SqrtOp = N1.getOperand(1); 9441 OtherOp = N1.getOperand(0); 9442 } 9443 if (SqrtOp.getNode()) { 9444 // We found a FSQRT, so try to make this fold: 9445 // x / (y * sqrt(z)) -> x * (rsqrt(z) / y) 9446 if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) { 9447 RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags); 9448 AddToWorklist(RV.getNode()); 9449 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9450 } 9451 } 9452 } 9453 9454 // Fold into a reciprocal estimate and multiply instead of a real divide. 9455 if (SDValue RV = BuildReciprocalEstimate(N1, Flags)) { 9456 AddToWorklist(RV.getNode()); 9457 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9458 } 9459 } 9460 9461 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 9462 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 9463 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 9464 // Both can be negated for free, check to see if at least one is cheaper 9465 // negated. 9466 if (LHSNeg == 2 || RHSNeg == 2) 9467 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, 9468 GetNegatedExpression(N0, DAG, LegalOperations), 9469 GetNegatedExpression(N1, DAG, LegalOperations), 9470 Flags); 9471 } 9472 } 9473 9474 if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N)) 9475 return CombineRepeatedDivisors; 9476 9477 return SDValue(); 9478 } 9479 9480 SDValue DAGCombiner::visitFREM(SDNode *N) { 9481 SDValue N0 = N->getOperand(0); 9482 SDValue N1 = N->getOperand(1); 9483 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9484 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9485 EVT VT = N->getValueType(0); 9486 9487 // fold (frem c1, c2) -> fmod(c1,c2) 9488 if (N0CFP && N1CFP) 9489 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, 9490 &cast<BinaryWithFlagsSDNode>(N)->Flags); 9491 9492 return SDValue(); 9493 } 9494 9495 SDValue DAGCombiner::visitFSQRT(SDNode *N) { 9496 if (!DAG.getTarget().Options.UnsafeFPMath) 9497 return SDValue(); 9498 9499 SDValue N0 = N->getOperand(0); 9500 if (TLI.isFsqrtCheap(N0, DAG)) 9501 return SDValue(); 9502 9503 // TODO: FSQRT nodes should have flags that propagate to the created nodes. 9504 // For now, create a Flags object for use with all unsafe math transforms. 9505 SDNodeFlags Flags; 9506 Flags.setUnsafeAlgebra(true); 9507 return buildSqrtEstimate(N0, &Flags); 9508 } 9509 9510 /// copysign(x, fp_extend(y)) -> copysign(x, y) 9511 /// copysign(x, fp_round(y)) -> copysign(x, y) 9512 static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) { 9513 SDValue N1 = N->getOperand(1); 9514 if ((N1.getOpcode() == ISD::FP_EXTEND || 9515 N1.getOpcode() == ISD::FP_ROUND)) { 9516 // Do not optimize out type conversion of f128 type yet. 9517 // For some targets like x86_64, configuration is changed to keep one f128 9518 // value in one SSE register, but instruction selection cannot handle 9519 // FCOPYSIGN on SSE registers yet. 9520 EVT N1VT = N1->getValueType(0); 9521 EVT N1Op0VT = N1->getOperand(0)->getValueType(0); 9522 return (N1VT == N1Op0VT || N1Op0VT != MVT::f128); 9523 } 9524 return false; 9525 } 9526 9527 SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 9528 SDValue N0 = N->getOperand(0); 9529 SDValue N1 = N->getOperand(1); 9530 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9531 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9532 EVT VT = N->getValueType(0); 9533 9534 if (N0CFP && N1CFP) // Constant fold 9535 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1); 9536 9537 if (N1CFP) { 9538 const APFloat &V = N1CFP->getValueAPF(); 9539 // copysign(x, c1) -> fabs(x) iff ispos(c1) 9540 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 9541 if (!V.isNegative()) { 9542 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 9543 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9544 } else { 9545 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9546 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, 9547 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0)); 9548 } 9549 } 9550 9551 // copysign(fabs(x), y) -> copysign(x, y) 9552 // copysign(fneg(x), y) -> copysign(x, y) 9553 // copysign(copysign(x,z), y) -> copysign(x, y) 9554 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 9555 N0.getOpcode() == ISD::FCOPYSIGN) 9556 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1); 9557 9558 // copysign(x, abs(y)) -> abs(x) 9559 if (N1.getOpcode() == ISD::FABS) 9560 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9561 9562 // copysign(x, copysign(y,z)) -> copysign(x, z) 9563 if (N1.getOpcode() == ISD::FCOPYSIGN) 9564 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1)); 9565 9566 // copysign(x, fp_extend(y)) -> copysign(x, y) 9567 // copysign(x, fp_round(y)) -> copysign(x, y) 9568 if (CanCombineFCOPYSIGN_EXTEND_ROUND(N)) 9569 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0)); 9570 9571 return SDValue(); 9572 } 9573 9574 SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 9575 SDValue N0 = N->getOperand(0); 9576 EVT VT = N->getValueType(0); 9577 EVT OpVT = N0.getValueType(); 9578 9579 // fold (sint_to_fp c1) -> c1fp 9580 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 9581 // ...but only if the target supports immediate floating-point values 9582 (!LegalOperations || 9583 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 9584 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 9585 9586 // If the input is a legal type, and SINT_TO_FP is not legal on this target, 9587 // but UINT_TO_FP is legal on this target, try to convert. 9588 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 9589 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 9590 // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 9591 if (DAG.SignBitIsZero(N0)) 9592 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 9593 } 9594 9595 // The next optimizations are desirable only if SELECT_CC can be lowered. 9596 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 9597 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 9598 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 9599 !VT.isVector() && 9600 (!LegalOperations || 9601 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9602 SDLoc DL(N); 9603 SDValue Ops[] = 9604 { N0.getOperand(0), N0.getOperand(1), 9605 DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9606 N0.getOperand(2) }; 9607 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9608 } 9609 9610 // fold (sint_to_fp (zext (setcc x, y, cc))) -> 9611 // (select_cc x, y, 1.0, 0.0,, cc) 9612 if (N0.getOpcode() == ISD::ZERO_EXTEND && 9613 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 9614 (!LegalOperations || 9615 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9616 SDLoc DL(N); 9617 SDValue Ops[] = 9618 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 9619 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9620 N0.getOperand(0).getOperand(2) }; 9621 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9622 } 9623 } 9624 9625 return SDValue(); 9626 } 9627 9628 SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 9629 SDValue N0 = N->getOperand(0); 9630 EVT VT = N->getValueType(0); 9631 EVT OpVT = N0.getValueType(); 9632 9633 // fold (uint_to_fp c1) -> c1fp 9634 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 9635 // ...but only if the target supports immediate floating-point values 9636 (!LegalOperations || 9637 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 9638 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 9639 9640 // If the input is a legal type, and UINT_TO_FP is not legal on this target, 9641 // but SINT_TO_FP is legal on this target, try to convert. 9642 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 9643 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 9644 // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 9645 if (DAG.SignBitIsZero(N0)) 9646 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 9647 } 9648 9649 // The next optimizations are desirable only if SELECT_CC can be lowered. 9650 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 9651 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 9652 9653 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 9654 (!LegalOperations || 9655 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 9656 SDLoc DL(N); 9657 SDValue Ops[] = 9658 { N0.getOperand(0), N0.getOperand(1), 9659 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 9660 N0.getOperand(2) }; 9661 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 9662 } 9663 } 9664 9665 return SDValue(); 9666 } 9667 9668 // Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x 9669 static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) { 9670 SDValue N0 = N->getOperand(0); 9671 EVT VT = N->getValueType(0); 9672 9673 if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP) 9674 return SDValue(); 9675 9676 SDValue Src = N0.getOperand(0); 9677 EVT SrcVT = Src.getValueType(); 9678 bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP; 9679 bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT; 9680 9681 // We can safely assume the conversion won't overflow the output range, 9682 // because (for example) (uint8_t)18293.f is undefined behavior. 9683 9684 // Since we can assume the conversion won't overflow, our decision as to 9685 // whether the input will fit in the float should depend on the minimum 9686 // of the input range and output range. 9687 9688 // This means this is also safe for a signed input and unsigned output, since 9689 // a negative input would lead to undefined behavior. 9690 unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned; 9691 unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned; 9692 unsigned ActualSize = std::min(InputSize, OutputSize); 9693 const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType()); 9694 9695 // We can only fold away the float conversion if the input range can be 9696 // represented exactly in the float range. 9697 if (APFloat::semanticsPrecision(sem) >= ActualSize) { 9698 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) { 9699 unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND 9700 : ISD::ZERO_EXTEND; 9701 return DAG.getNode(ExtOp, SDLoc(N), VT, Src); 9702 } 9703 if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits()) 9704 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src); 9705 return DAG.getBitcast(VT, Src); 9706 } 9707 return SDValue(); 9708 } 9709 9710 SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 9711 SDValue N0 = N->getOperand(0); 9712 EVT VT = N->getValueType(0); 9713 9714 // fold (fp_to_sint c1fp) -> c1 9715 if (isConstantFPBuildVectorOrConstantFP(N0)) 9716 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0); 9717 9718 return FoldIntToFPToInt(N, DAG); 9719 } 9720 9721 SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 9722 SDValue N0 = N->getOperand(0); 9723 EVT VT = N->getValueType(0); 9724 9725 // fold (fp_to_uint c1fp) -> c1 9726 if (isConstantFPBuildVectorOrConstantFP(N0)) 9727 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0); 9728 9729 return FoldIntToFPToInt(N, DAG); 9730 } 9731 9732 SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 9733 SDValue N0 = N->getOperand(0); 9734 SDValue N1 = N->getOperand(1); 9735 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9736 EVT VT = N->getValueType(0); 9737 9738 // fold (fp_round c1fp) -> c1fp 9739 if (N0CFP) 9740 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1); 9741 9742 // fold (fp_round (fp_extend x)) -> x 9743 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 9744 return N0.getOperand(0); 9745 9746 // fold (fp_round (fp_round x)) -> (fp_round x) 9747 if (N0.getOpcode() == ISD::FP_ROUND) { 9748 const bool NIsTrunc = N->getConstantOperandVal(1) == 1; 9749 const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1; 9750 9751 // Skip this folding if it results in an fp_round from f80 to f16. 9752 // 9753 // f80 to f16 always generates an expensive (and as yet, unimplemented) 9754 // libcall to __truncxfhf2 instead of selecting native f16 conversion 9755 // instructions from f32 or f64. Moreover, the first (value-preserving) 9756 // fp_round from f80 to either f32 or f64 may become a NOP in platforms like 9757 // x86. 9758 if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16) 9759 return SDValue(); 9760 9761 // If the first fp_round isn't a value preserving truncation, it might 9762 // introduce a tie in the second fp_round, that wouldn't occur in the 9763 // single-step fp_round we want to fold to. 9764 // In other words, double rounding isn't the same as rounding. 9765 // Also, this is a value preserving truncation iff both fp_round's are. 9766 if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) { 9767 SDLoc DL(N); 9768 return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0), 9769 DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL)); 9770 } 9771 } 9772 9773 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 9774 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 9775 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT, 9776 N0.getOperand(0), N1); 9777 AddToWorklist(Tmp.getNode()); 9778 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 9779 Tmp, N0.getOperand(1)); 9780 } 9781 9782 return SDValue(); 9783 } 9784 9785 SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 9786 SDValue N0 = N->getOperand(0); 9787 EVT VT = N->getValueType(0); 9788 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 9789 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9790 9791 // fold (fp_round_inreg c1fp) -> c1fp 9792 if (N0CFP && isTypeLegal(EVT)) { 9793 SDLoc DL(N); 9794 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT); 9795 return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round); 9796 } 9797 9798 return SDValue(); 9799 } 9800 9801 SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 9802 SDValue N0 = N->getOperand(0); 9803 EVT VT = N->getValueType(0); 9804 9805 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 9806 if (N->hasOneUse() && 9807 N->use_begin()->getOpcode() == ISD::FP_ROUND) 9808 return SDValue(); 9809 9810 // fold (fp_extend c1fp) -> c1fp 9811 if (isConstantFPBuildVectorOrConstantFP(N0)) 9812 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0); 9813 9814 // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op) 9815 if (N0.getOpcode() == ISD::FP16_TO_FP && 9816 TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal) 9817 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0)); 9818 9819 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 9820 // value of X. 9821 if (N0.getOpcode() == ISD::FP_ROUND 9822 && N0.getConstantOperandVal(1) == 1) { 9823 SDValue In = N0.getOperand(0); 9824 if (In.getValueType() == VT) return In; 9825 if (VT.bitsLT(In.getValueType())) 9826 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, 9827 In, N0.getOperand(1)); 9828 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In); 9829 } 9830 9831 // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 9832 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 9833 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 9834 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 9835 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 9836 LN0->getChain(), 9837 LN0->getBasePtr(), N0.getValueType(), 9838 LN0->getMemOperand()); 9839 CombineTo(N, ExtLoad); 9840 CombineTo(N0.getNode(), 9841 DAG.getNode(ISD::FP_ROUND, SDLoc(N0), 9842 N0.getValueType(), ExtLoad, 9843 DAG.getIntPtrConstant(1, SDLoc(N0))), 9844 ExtLoad.getValue(1)); 9845 return SDValue(N, 0); // Return N so it doesn't get rechecked! 9846 } 9847 9848 return SDValue(); 9849 } 9850 9851 SDValue DAGCombiner::visitFCEIL(SDNode *N) { 9852 SDValue N0 = N->getOperand(0); 9853 EVT VT = N->getValueType(0); 9854 9855 // fold (fceil c1) -> fceil(c1) 9856 if (isConstantFPBuildVectorOrConstantFP(N0)) 9857 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0); 9858 9859 return SDValue(); 9860 } 9861 9862 SDValue DAGCombiner::visitFTRUNC(SDNode *N) { 9863 SDValue N0 = N->getOperand(0); 9864 EVT VT = N->getValueType(0); 9865 9866 // fold (ftrunc c1) -> ftrunc(c1) 9867 if (isConstantFPBuildVectorOrConstantFP(N0)) 9868 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0); 9869 9870 return SDValue(); 9871 } 9872 9873 SDValue DAGCombiner::visitFFLOOR(SDNode *N) { 9874 SDValue N0 = N->getOperand(0); 9875 EVT VT = N->getValueType(0); 9876 9877 // fold (ffloor c1) -> ffloor(c1) 9878 if (isConstantFPBuildVectorOrConstantFP(N0)) 9879 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0); 9880 9881 return SDValue(); 9882 } 9883 9884 // FIXME: FNEG and FABS have a lot in common; refactor. 9885 SDValue DAGCombiner::visitFNEG(SDNode *N) { 9886 SDValue N0 = N->getOperand(0); 9887 EVT VT = N->getValueType(0); 9888 9889 // Constant fold FNEG. 9890 if (isConstantFPBuildVectorOrConstantFP(N0)) 9891 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0); 9892 9893 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 9894 &DAG.getTarget().Options)) 9895 return GetNegatedExpression(N0, DAG, LegalOperations); 9896 9897 // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading 9898 // constant pool values. 9899 if (!TLI.isFNegFree(VT) && 9900 N0.getOpcode() == ISD::BITCAST && 9901 N0.getNode()->hasOneUse()) { 9902 SDValue Int = N0.getOperand(0); 9903 EVT IntVT = Int.getValueType(); 9904 if (IntVT.isInteger() && !IntVT.isVector()) { 9905 APInt SignMask; 9906 if (N0.getValueType().isVector()) { 9907 // For a vector, get a mask such as 0x80... per scalar element 9908 // and splat it. 9909 SignMask = APInt::getSignBit(N0.getScalarValueSizeInBits()); 9910 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 9911 } else { 9912 // For a scalar, just generate 0x80... 9913 SignMask = APInt::getSignBit(IntVT.getSizeInBits()); 9914 } 9915 SDLoc DL0(N0); 9916 Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int, 9917 DAG.getConstant(SignMask, DL0, IntVT)); 9918 AddToWorklist(Int.getNode()); 9919 return DAG.getBitcast(VT, Int); 9920 } 9921 } 9922 9923 // (fneg (fmul c, x)) -> (fmul -c, x) 9924 if (N0.getOpcode() == ISD::FMUL && 9925 (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) { 9926 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 9927 if (CFP1) { 9928 APFloat CVal = CFP1->getValueAPF(); 9929 CVal.changeSign(); 9930 if (Level >= AfterLegalizeDAG && 9931 (TLI.isFPImmLegal(CVal, VT) || 9932 TLI.isOperationLegal(ISD::ConstantFP, VT))) 9933 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), 9934 DAG.getNode(ISD::FNEG, SDLoc(N), VT, 9935 N0.getOperand(1)), 9936 &cast<BinaryWithFlagsSDNode>(N0)->Flags); 9937 } 9938 } 9939 9940 return SDValue(); 9941 } 9942 9943 SDValue DAGCombiner::visitFMINNUM(SDNode *N) { 9944 SDValue N0 = N->getOperand(0); 9945 SDValue N1 = N->getOperand(1); 9946 EVT VT = N->getValueType(0); 9947 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9948 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9949 9950 if (N0CFP && N1CFP) { 9951 const APFloat &C0 = N0CFP->getValueAPF(); 9952 const APFloat &C1 = N1CFP->getValueAPF(); 9953 return DAG.getConstantFP(minnum(C0, C1), SDLoc(N), VT); 9954 } 9955 9956 // Canonicalize to constant on RHS. 9957 if (isConstantFPBuildVectorOrConstantFP(N0) && 9958 !isConstantFPBuildVectorOrConstantFP(N1)) 9959 return DAG.getNode(ISD::FMINNUM, SDLoc(N), VT, N1, N0); 9960 9961 return SDValue(); 9962 } 9963 9964 SDValue DAGCombiner::visitFMAXNUM(SDNode *N) { 9965 SDValue N0 = N->getOperand(0); 9966 SDValue N1 = N->getOperand(1); 9967 EVT VT = N->getValueType(0); 9968 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9969 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9970 9971 if (N0CFP && N1CFP) { 9972 const APFloat &C0 = N0CFP->getValueAPF(); 9973 const APFloat &C1 = N1CFP->getValueAPF(); 9974 return DAG.getConstantFP(maxnum(C0, C1), SDLoc(N), VT); 9975 } 9976 9977 // Canonicalize to constant on RHS. 9978 if (isConstantFPBuildVectorOrConstantFP(N0) && 9979 !isConstantFPBuildVectorOrConstantFP(N1)) 9980 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), VT, N1, N0); 9981 9982 return SDValue(); 9983 } 9984 9985 SDValue DAGCombiner::visitFABS(SDNode *N) { 9986 SDValue N0 = N->getOperand(0); 9987 EVT VT = N->getValueType(0); 9988 9989 // fold (fabs c1) -> fabs(c1) 9990 if (isConstantFPBuildVectorOrConstantFP(N0)) 9991 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9992 9993 // fold (fabs (fabs x)) -> (fabs x) 9994 if (N0.getOpcode() == ISD::FABS) 9995 return N->getOperand(0); 9996 9997 // fold (fabs (fneg x)) -> (fabs x) 9998 // fold (fabs (fcopysign x, y)) -> (fabs x) 9999 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 10000 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0)); 10001 10002 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading 10003 // constant pool values. 10004 if (!TLI.isFAbsFree(VT) && 10005 N0.getOpcode() == ISD::BITCAST && 10006 N0.getNode()->hasOneUse()) { 10007 SDValue Int = N0.getOperand(0); 10008 EVT IntVT = Int.getValueType(); 10009 if (IntVT.isInteger() && !IntVT.isVector()) { 10010 APInt SignMask; 10011 if (N0.getValueType().isVector()) { 10012 // For a vector, get a mask such as 0x7f... per scalar element 10013 // and splat it. 10014 SignMask = ~APInt::getSignBit(N0.getScalarValueSizeInBits()); 10015 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 10016 } else { 10017 // For a scalar, just generate 0x7f... 10018 SignMask = ~APInt::getSignBit(IntVT.getSizeInBits()); 10019 } 10020 SDLoc DL(N0); 10021 Int = DAG.getNode(ISD::AND, DL, IntVT, Int, 10022 DAG.getConstant(SignMask, DL, IntVT)); 10023 AddToWorklist(Int.getNode()); 10024 return DAG.getBitcast(N->getValueType(0), Int); 10025 } 10026 } 10027 10028 return SDValue(); 10029 } 10030 10031 SDValue DAGCombiner::visitBRCOND(SDNode *N) { 10032 SDValue Chain = N->getOperand(0); 10033 SDValue N1 = N->getOperand(1); 10034 SDValue N2 = N->getOperand(2); 10035 10036 // If N is a constant we could fold this into a fallthrough or unconditional 10037 // branch. However that doesn't happen very often in normal code, because 10038 // Instcombine/SimplifyCFG should have handled the available opportunities. 10039 // If we did this folding here, it would be necessary to update the 10040 // MachineBasicBlock CFG, which is awkward. 10041 10042 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 10043 // on the target. 10044 if (N1.getOpcode() == ISD::SETCC && 10045 TLI.isOperationLegalOrCustom(ISD::BR_CC, 10046 N1.getOperand(0).getValueType())) { 10047 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 10048 Chain, N1.getOperand(2), 10049 N1.getOperand(0), N1.getOperand(1), N2); 10050 } 10051 10052 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 10053 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 10054 (N1.getOperand(0).hasOneUse() && 10055 N1.getOperand(0).getOpcode() == ISD::SRL))) { 10056 SDNode *Trunc = nullptr; 10057 if (N1.getOpcode() == ISD::TRUNCATE) { 10058 // Look pass the truncate. 10059 Trunc = N1.getNode(); 10060 N1 = N1.getOperand(0); 10061 } 10062 10063 // Match this pattern so that we can generate simpler code: 10064 // 10065 // %a = ... 10066 // %b = and i32 %a, 2 10067 // %c = srl i32 %b, 1 10068 // brcond i32 %c ... 10069 // 10070 // into 10071 // 10072 // %a = ... 10073 // %b = and i32 %a, 2 10074 // %c = setcc eq %b, 0 10075 // brcond %c ... 10076 // 10077 // This applies only when the AND constant value has one bit set and the 10078 // SRL constant is equal to the log2 of the AND constant. The back-end is 10079 // smart enough to convert the result into a TEST/JMP sequence. 10080 SDValue Op0 = N1.getOperand(0); 10081 SDValue Op1 = N1.getOperand(1); 10082 10083 if (Op0.getOpcode() == ISD::AND && 10084 Op1.getOpcode() == ISD::Constant) { 10085 SDValue AndOp1 = Op0.getOperand(1); 10086 10087 if (AndOp1.getOpcode() == ISD::Constant) { 10088 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 10089 10090 if (AndConst.isPowerOf2() && 10091 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 10092 SDLoc DL(N); 10093 SDValue SetCC = 10094 DAG.getSetCC(DL, 10095 getSetCCResultType(Op0.getValueType()), 10096 Op0, DAG.getConstant(0, DL, Op0.getValueType()), 10097 ISD::SETNE); 10098 10099 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, DL, 10100 MVT::Other, Chain, SetCC, N2); 10101 // Don't add the new BRCond into the worklist or else SimplifySelectCC 10102 // will convert it back to (X & C1) >> C2. 10103 CombineTo(N, NewBRCond, false); 10104 // Truncate is dead. 10105 if (Trunc) 10106 deleteAndRecombine(Trunc); 10107 // Replace the uses of SRL with SETCC 10108 WorklistRemover DeadNodes(*this); 10109 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 10110 deleteAndRecombine(N1.getNode()); 10111 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10112 } 10113 } 10114 } 10115 10116 if (Trunc) 10117 // Restore N1 if the above transformation doesn't match. 10118 N1 = N->getOperand(1); 10119 } 10120 10121 // Transform br(xor(x, y)) -> br(x != y) 10122 // Transform br(xor(xor(x,y), 1)) -> br (x == y) 10123 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 10124 SDNode *TheXor = N1.getNode(); 10125 SDValue Op0 = TheXor->getOperand(0); 10126 SDValue Op1 = TheXor->getOperand(1); 10127 if (Op0.getOpcode() == Op1.getOpcode()) { 10128 // Avoid missing important xor optimizations. 10129 if (SDValue Tmp = visitXOR(TheXor)) { 10130 if (Tmp.getNode() != TheXor) { 10131 DEBUG(dbgs() << "\nReplacing.8 "; 10132 TheXor->dump(&DAG); 10133 dbgs() << "\nWith: "; 10134 Tmp.getNode()->dump(&DAG); 10135 dbgs() << '\n'); 10136 WorklistRemover DeadNodes(*this); 10137 DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 10138 deleteAndRecombine(TheXor); 10139 return DAG.getNode(ISD::BRCOND, SDLoc(N), 10140 MVT::Other, Chain, Tmp, N2); 10141 } 10142 10143 // visitXOR has changed XOR's operands or replaced the XOR completely, 10144 // bail out. 10145 return SDValue(N, 0); 10146 } 10147 } 10148 10149 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 10150 bool Equal = false; 10151 if (isOneConstant(Op0) && Op0.hasOneUse() && 10152 Op0.getOpcode() == ISD::XOR) { 10153 TheXor = Op0.getNode(); 10154 Equal = true; 10155 } 10156 10157 EVT SetCCVT = N1.getValueType(); 10158 if (LegalTypes) 10159 SetCCVT = getSetCCResultType(SetCCVT); 10160 SDValue SetCC = DAG.getSetCC(SDLoc(TheXor), 10161 SetCCVT, 10162 Op0, Op1, 10163 Equal ? ISD::SETEQ : ISD::SETNE); 10164 // Replace the uses of XOR with SETCC 10165 WorklistRemover DeadNodes(*this); 10166 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 10167 deleteAndRecombine(N1.getNode()); 10168 return DAG.getNode(ISD::BRCOND, SDLoc(N), 10169 MVT::Other, Chain, SetCC, N2); 10170 } 10171 } 10172 10173 return SDValue(); 10174 } 10175 10176 // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 10177 // 10178 SDValue DAGCombiner::visitBR_CC(SDNode *N) { 10179 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 10180 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 10181 10182 // If N is a constant we could fold this into a fallthrough or unconditional 10183 // branch. However that doesn't happen very often in normal code, because 10184 // Instcombine/SimplifyCFG should have handled the available opportunities. 10185 // If we did this folding here, it would be necessary to update the 10186 // MachineBasicBlock CFG, which is awkward. 10187 10188 // Use SimplifySetCC to simplify SETCC's. 10189 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()), 10190 CondLHS, CondRHS, CC->get(), SDLoc(N), 10191 false); 10192 if (Simp.getNode()) AddToWorklist(Simp.getNode()); 10193 10194 // fold to a simpler setcc 10195 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 10196 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 10197 N->getOperand(0), Simp.getOperand(2), 10198 Simp.getOperand(0), Simp.getOperand(1), 10199 N->getOperand(4)); 10200 10201 return SDValue(); 10202 } 10203 10204 /// Return true if 'Use' is a load or a store that uses N as its base pointer 10205 /// and that N may be folded in the load / store addressing mode. 10206 static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 10207 SelectionDAG &DAG, 10208 const TargetLowering &TLI) { 10209 EVT VT; 10210 unsigned AS; 10211 10212 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 10213 if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 10214 return false; 10215 VT = LD->getMemoryVT(); 10216 AS = LD->getAddressSpace(); 10217 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 10218 if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 10219 return false; 10220 VT = ST->getMemoryVT(); 10221 AS = ST->getAddressSpace(); 10222 } else 10223 return false; 10224 10225 TargetLowering::AddrMode AM; 10226 if (N->getOpcode() == ISD::ADD) { 10227 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10228 if (Offset) 10229 // [reg +/- imm] 10230 AM.BaseOffs = Offset->getSExtValue(); 10231 else 10232 // [reg +/- reg] 10233 AM.Scale = 1; 10234 } else if (N->getOpcode() == ISD::SUB) { 10235 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10236 if (Offset) 10237 // [reg +/- imm] 10238 AM.BaseOffs = -Offset->getSExtValue(); 10239 else 10240 // [reg +/- reg] 10241 AM.Scale = 1; 10242 } else 10243 return false; 10244 10245 return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, 10246 VT.getTypeForEVT(*DAG.getContext()), AS); 10247 } 10248 10249 /// Try turning a load/store into a pre-indexed load/store when the base 10250 /// pointer is an add or subtract and it has other uses besides the load/store. 10251 /// After the transformation, the new indexed load/store has effectively folded 10252 /// the add/subtract in and all of its other uses are redirected to the 10253 /// new load/store. 10254 bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 10255 if (Level < AfterLegalizeDAG) 10256 return false; 10257 10258 bool isLoad = true; 10259 SDValue Ptr; 10260 EVT VT; 10261 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10262 if (LD->isIndexed()) 10263 return false; 10264 VT = LD->getMemoryVT(); 10265 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 10266 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 10267 return false; 10268 Ptr = LD->getBasePtr(); 10269 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10270 if (ST->isIndexed()) 10271 return false; 10272 VT = ST->getMemoryVT(); 10273 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 10274 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 10275 return false; 10276 Ptr = ST->getBasePtr(); 10277 isLoad = false; 10278 } else { 10279 return false; 10280 } 10281 10282 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 10283 // out. There is no reason to make this a preinc/predec. 10284 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 10285 Ptr.getNode()->hasOneUse()) 10286 return false; 10287 10288 // Ask the target to do addressing mode selection. 10289 SDValue BasePtr; 10290 SDValue Offset; 10291 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10292 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 10293 return false; 10294 10295 // Backends without true r+i pre-indexed forms may need to pass a 10296 // constant base with a variable offset so that constant coercion 10297 // will work with the patterns in canonical form. 10298 bool Swapped = false; 10299 if (isa<ConstantSDNode>(BasePtr)) { 10300 std::swap(BasePtr, Offset); 10301 Swapped = true; 10302 } 10303 10304 // Don't create a indexed load / store with zero offset. 10305 if (isNullConstant(Offset)) 10306 return false; 10307 10308 // Try turning it into a pre-indexed load / store except when: 10309 // 1) The new base ptr is a frame index. 10310 // 2) If N is a store and the new base ptr is either the same as or is a 10311 // predecessor of the value being stored. 10312 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 10313 // that would create a cycle. 10314 // 4) All uses are load / store ops that use it as old base ptr. 10315 10316 // Check #1. Preinc'ing a frame index would require copying the stack pointer 10317 // (plus the implicit offset) to a register to preinc anyway. 10318 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10319 return false; 10320 10321 // Check #2. 10322 if (!isLoad) { 10323 SDValue Val = cast<StoreSDNode>(N)->getValue(); 10324 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 10325 return false; 10326 } 10327 10328 // Caches for hasPredecessorHelper. 10329 SmallPtrSet<const SDNode *, 32> Visited; 10330 SmallVector<const SDNode *, 16> Worklist; 10331 Worklist.push_back(N); 10332 10333 // If the offset is a constant, there may be other adds of constants that 10334 // can be folded with this one. We should do this to avoid having to keep 10335 // a copy of the original base pointer. 10336 SmallVector<SDNode *, 16> OtherUses; 10337 if (isa<ConstantSDNode>(Offset)) 10338 for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(), 10339 UE = BasePtr.getNode()->use_end(); 10340 UI != UE; ++UI) { 10341 SDUse &Use = UI.getUse(); 10342 // Skip the use that is Ptr and uses of other results from BasePtr's 10343 // node (important for nodes that return multiple results). 10344 if (Use.getUser() == Ptr.getNode() || Use != BasePtr) 10345 continue; 10346 10347 if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist)) 10348 continue; 10349 10350 if (Use.getUser()->getOpcode() != ISD::ADD && 10351 Use.getUser()->getOpcode() != ISD::SUB) { 10352 OtherUses.clear(); 10353 break; 10354 } 10355 10356 SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1); 10357 if (!isa<ConstantSDNode>(Op1)) { 10358 OtherUses.clear(); 10359 break; 10360 } 10361 10362 // FIXME: In some cases, we can be smarter about this. 10363 if (Op1.getValueType() != Offset.getValueType()) { 10364 OtherUses.clear(); 10365 break; 10366 } 10367 10368 OtherUses.push_back(Use.getUser()); 10369 } 10370 10371 if (Swapped) 10372 std::swap(BasePtr, Offset); 10373 10374 // Now check for #3 and #4. 10375 bool RealUse = false; 10376 10377 for (SDNode *Use : Ptr.getNode()->uses()) { 10378 if (Use == N) 10379 continue; 10380 if (SDNode::hasPredecessorHelper(Use, Visited, Worklist)) 10381 return false; 10382 10383 // If Ptr may be folded in addressing mode of other use, then it's 10384 // not profitable to do this transformation. 10385 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 10386 RealUse = true; 10387 } 10388 10389 if (!RealUse) 10390 return false; 10391 10392 SDValue Result; 10393 if (isLoad) 10394 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10395 BasePtr, Offset, AM); 10396 else 10397 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10398 BasePtr, Offset, AM); 10399 ++PreIndexedNodes; 10400 ++NodesCombined; 10401 DEBUG(dbgs() << "\nReplacing.4 "; 10402 N->dump(&DAG); 10403 dbgs() << "\nWith: "; 10404 Result.getNode()->dump(&DAG); 10405 dbgs() << '\n'); 10406 WorklistRemover DeadNodes(*this); 10407 if (isLoad) { 10408 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10409 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10410 } else { 10411 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10412 } 10413 10414 // Finally, since the node is now dead, remove it from the graph. 10415 deleteAndRecombine(N); 10416 10417 if (Swapped) 10418 std::swap(BasePtr, Offset); 10419 10420 // Replace other uses of BasePtr that can be updated to use Ptr 10421 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) { 10422 unsigned OffsetIdx = 1; 10423 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode()) 10424 OffsetIdx = 0; 10425 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == 10426 BasePtr.getNode() && "Expected BasePtr operand"); 10427 10428 // We need to replace ptr0 in the following expression: 10429 // x0 * offset0 + y0 * ptr0 = t0 10430 // knowing that 10431 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) 10432 // 10433 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the 10434 // indexed load/store and the expresion that needs to be re-written. 10435 // 10436 // Therefore, we have: 10437 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 10438 10439 ConstantSDNode *CN = 10440 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); 10441 int X0, X1, Y0, Y1; 10442 const APInt &Offset0 = CN->getAPIntValue(); 10443 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); 10444 10445 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; 10446 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; 10447 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; 10448 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; 10449 10450 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; 10451 10452 APInt CNV = Offset0; 10453 if (X0 < 0) CNV = -CNV; 10454 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; 10455 else CNV = CNV - Offset1; 10456 10457 SDLoc DL(OtherUses[i]); 10458 10459 // We can now generate the new expression. 10460 SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0)); 10461 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); 10462 10463 SDValue NewUse = DAG.getNode(Opcode, 10464 DL, 10465 OtherUses[i]->getValueType(0), NewOp1, NewOp2); 10466 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); 10467 deleteAndRecombine(OtherUses[i]); 10468 } 10469 10470 // Replace the uses of Ptr with uses of the updated base value. 10471 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 10472 deleteAndRecombine(Ptr.getNode()); 10473 10474 return true; 10475 } 10476 10477 /// Try to combine a load/store with a add/sub of the base pointer node into a 10478 /// post-indexed load/store. The transformation folded the add/subtract into the 10479 /// new indexed load/store effectively and all of its uses are redirected to the 10480 /// new load/store. 10481 bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 10482 if (Level < AfterLegalizeDAG) 10483 return false; 10484 10485 bool isLoad = true; 10486 SDValue Ptr; 10487 EVT VT; 10488 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10489 if (LD->isIndexed()) 10490 return false; 10491 VT = LD->getMemoryVT(); 10492 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 10493 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 10494 return false; 10495 Ptr = LD->getBasePtr(); 10496 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10497 if (ST->isIndexed()) 10498 return false; 10499 VT = ST->getMemoryVT(); 10500 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 10501 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 10502 return false; 10503 Ptr = ST->getBasePtr(); 10504 isLoad = false; 10505 } else { 10506 return false; 10507 } 10508 10509 if (Ptr.getNode()->hasOneUse()) 10510 return false; 10511 10512 for (SDNode *Op : Ptr.getNode()->uses()) { 10513 if (Op == N || 10514 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 10515 continue; 10516 10517 SDValue BasePtr; 10518 SDValue Offset; 10519 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10520 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 10521 // Don't create a indexed load / store with zero offset. 10522 if (isNullConstant(Offset)) 10523 continue; 10524 10525 // Try turning it into a post-indexed load / store except when 10526 // 1) All uses are load / store ops that use it as base ptr (and 10527 // it may be folded as addressing mmode). 10528 // 2) Op must be independent of N, i.e. Op is neither a predecessor 10529 // nor a successor of N. Otherwise, if Op is folded that would 10530 // create a cycle. 10531 10532 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10533 continue; 10534 10535 // Check for #1. 10536 bool TryNext = false; 10537 for (SDNode *Use : BasePtr.getNode()->uses()) { 10538 if (Use == Ptr.getNode()) 10539 continue; 10540 10541 // If all the uses are load / store addresses, then don't do the 10542 // transformation. 10543 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 10544 bool RealUse = false; 10545 for (SDNode *UseUse : Use->uses()) { 10546 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 10547 RealUse = true; 10548 } 10549 10550 if (!RealUse) { 10551 TryNext = true; 10552 break; 10553 } 10554 } 10555 } 10556 10557 if (TryNext) 10558 continue; 10559 10560 // Check for #2 10561 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 10562 SDValue Result = isLoad 10563 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10564 BasePtr, Offset, AM) 10565 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10566 BasePtr, Offset, AM); 10567 ++PostIndexedNodes; 10568 ++NodesCombined; 10569 DEBUG(dbgs() << "\nReplacing.5 "; 10570 N->dump(&DAG); 10571 dbgs() << "\nWith: "; 10572 Result.getNode()->dump(&DAG); 10573 dbgs() << '\n'); 10574 WorklistRemover DeadNodes(*this); 10575 if (isLoad) { 10576 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10577 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10578 } else { 10579 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10580 } 10581 10582 // Finally, since the node is now dead, remove it from the graph. 10583 deleteAndRecombine(N); 10584 10585 // Replace the uses of Use with uses of the updated base value. 10586 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 10587 Result.getValue(isLoad ? 1 : 0)); 10588 deleteAndRecombine(Op); 10589 return true; 10590 } 10591 } 10592 } 10593 10594 return false; 10595 } 10596 10597 /// \brief Return the base-pointer arithmetic from an indexed \p LD. 10598 SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { 10599 ISD::MemIndexedMode AM = LD->getAddressingMode(); 10600 assert(AM != ISD::UNINDEXED); 10601 SDValue BP = LD->getOperand(1); 10602 SDValue Inc = LD->getOperand(2); 10603 10604 // Some backends use TargetConstants for load offsets, but don't expect 10605 // TargetConstants in general ADD nodes. We can convert these constants into 10606 // regular Constants (if the constant is not opaque). 10607 assert((Inc.getOpcode() != ISD::TargetConstant || 10608 !cast<ConstantSDNode>(Inc)->isOpaque()) && 10609 "Cannot split out indexing using opaque target constants"); 10610 if (Inc.getOpcode() == ISD::TargetConstant) { 10611 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc); 10612 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc), 10613 ConstInc->getValueType(0)); 10614 } 10615 10616 unsigned Opc = 10617 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB); 10618 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc); 10619 } 10620 10621 SDValue DAGCombiner::visitLOAD(SDNode *N) { 10622 LoadSDNode *LD = cast<LoadSDNode>(N); 10623 SDValue Chain = LD->getChain(); 10624 SDValue Ptr = LD->getBasePtr(); 10625 10626 // If load is not volatile and there are no uses of the loaded value (and 10627 // the updated indexed value in case of indexed loads), change uses of the 10628 // chain value into uses of the chain input (i.e. delete the dead load). 10629 if (!LD->isVolatile()) { 10630 if (N->getValueType(1) == MVT::Other) { 10631 // Unindexed loads. 10632 if (!N->hasAnyUseOfValue(0)) { 10633 // It's not safe to use the two value CombineTo variant here. e.g. 10634 // v1, chain2 = load chain1, loc 10635 // v2, chain3 = load chain2, loc 10636 // v3 = add v2, c 10637 // Now we replace use of chain2 with chain1. This makes the second load 10638 // isomorphic to the one we are deleting, and thus makes this load live. 10639 DEBUG(dbgs() << "\nReplacing.6 "; 10640 N->dump(&DAG); 10641 dbgs() << "\nWith chain: "; 10642 Chain.getNode()->dump(&DAG); 10643 dbgs() << "\n"); 10644 WorklistRemover DeadNodes(*this); 10645 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 10646 10647 if (N->use_empty()) 10648 deleteAndRecombine(N); 10649 10650 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10651 } 10652 } else { 10653 // Indexed loads. 10654 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 10655 10656 // If this load has an opaque TargetConstant offset, then we cannot split 10657 // the indexing into an add/sub directly (that TargetConstant may not be 10658 // valid for a different type of node, and we cannot convert an opaque 10659 // target constant into a regular constant). 10660 bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant && 10661 cast<ConstantSDNode>(LD->getOperand(2))->isOpaque(); 10662 10663 if (!N->hasAnyUseOfValue(0) && 10664 ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) { 10665 SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 10666 SDValue Index; 10667 if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) { 10668 Index = SplitIndexingFromLoad(LD); 10669 // Try to fold the base pointer arithmetic into subsequent loads and 10670 // stores. 10671 AddUsersToWorklist(N); 10672 } else 10673 Index = DAG.getUNDEF(N->getValueType(1)); 10674 DEBUG(dbgs() << "\nReplacing.7 "; 10675 N->dump(&DAG); 10676 dbgs() << "\nWith: "; 10677 Undef.getNode()->dump(&DAG); 10678 dbgs() << " and 2 other values\n"); 10679 WorklistRemover DeadNodes(*this); 10680 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 10681 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index); 10682 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 10683 deleteAndRecombine(N); 10684 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10685 } 10686 } 10687 } 10688 10689 // If this load is directly stored, replace the load value with the stored 10690 // value. 10691 // TODO: Handle store large -> read small portion. 10692 // TODO: Handle TRUNCSTORE/LOADEXT 10693 if (OptLevel != CodeGenOpt::None && 10694 ISD::isNormalLoad(N) && !LD->isVolatile()) { 10695 if (ISD::isNON_TRUNCStore(Chain.getNode())) { 10696 StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 10697 if (PrevST->getBasePtr() == Ptr && 10698 PrevST->getValue().getValueType() == N->getValueType(0)) 10699 return CombineTo(N, Chain.getOperand(1), Chain); 10700 } 10701 } 10702 10703 // Try to infer better alignment information than the load already has. 10704 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 10705 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 10706 if (Align > LD->getMemOperand()->getBaseAlignment()) { 10707 SDValue NewLoad = DAG.getExtLoad( 10708 LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr, 10709 LD->getPointerInfo(), LD->getMemoryVT(), Align, 10710 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 10711 if (NewLoad.getNode() != N) 10712 return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true); 10713 } 10714 } 10715 } 10716 10717 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 10718 : DAG.getSubtarget().useAA(); 10719 #ifndef NDEBUG 10720 if (CombinerAAOnlyFunc.getNumOccurrences() && 10721 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 10722 UseAA = false; 10723 #endif 10724 if (UseAA && LD->isUnindexed()) { 10725 // Walk up chain skipping non-aliasing memory nodes. 10726 SDValue BetterChain = FindBetterChain(N, Chain); 10727 10728 // If there is a better chain. 10729 if (Chain != BetterChain) { 10730 SDValue ReplLoad; 10731 10732 // Replace the chain to void dependency. 10733 if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 10734 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD), 10735 BetterChain, Ptr, LD->getMemOperand()); 10736 } else { 10737 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), 10738 LD->getValueType(0), 10739 BetterChain, Ptr, LD->getMemoryVT(), 10740 LD->getMemOperand()); 10741 } 10742 10743 // Create token factor to keep old chain connected. 10744 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N), 10745 MVT::Other, Chain, ReplLoad.getValue(1)); 10746 10747 // Make sure the new and old chains are cleaned up. 10748 AddToWorklist(Token.getNode()); 10749 10750 // Replace uses with load result and token factor. Don't add users 10751 // to work list. 10752 return CombineTo(N, ReplLoad.getValue(0), Token, false); 10753 } 10754 } 10755 10756 // Try transforming N to an indexed load. 10757 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 10758 return SDValue(N, 0); 10759 10760 // Try to slice up N to more direct loads if the slices are mapped to 10761 // different register banks or pairing can take place. 10762 if (SliceUpLoad(N)) 10763 return SDValue(N, 0); 10764 10765 return SDValue(); 10766 } 10767 10768 namespace { 10769 /// \brief Helper structure used to slice a load in smaller loads. 10770 /// Basically a slice is obtained from the following sequence: 10771 /// Origin = load Ty1, Base 10772 /// Shift = srl Ty1 Origin, CstTy Amount 10773 /// Inst = trunc Shift to Ty2 10774 /// 10775 /// Then, it will be rewriten into: 10776 /// Slice = load SliceTy, Base + SliceOffset 10777 /// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2 10778 /// 10779 /// SliceTy is deduced from the number of bits that are actually used to 10780 /// build Inst. 10781 struct LoadedSlice { 10782 /// \brief Helper structure used to compute the cost of a slice. 10783 struct Cost { 10784 /// Are we optimizing for code size. 10785 bool ForCodeSize; 10786 /// Various cost. 10787 unsigned Loads; 10788 unsigned Truncates; 10789 unsigned CrossRegisterBanksCopies; 10790 unsigned ZExts; 10791 unsigned Shift; 10792 10793 Cost(bool ForCodeSize = false) 10794 : ForCodeSize(ForCodeSize), Loads(0), Truncates(0), 10795 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {} 10796 10797 /// \brief Get the cost of one isolated slice. 10798 Cost(const LoadedSlice &LS, bool ForCodeSize = false) 10799 : ForCodeSize(ForCodeSize), Loads(1), Truncates(0), 10800 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) { 10801 EVT TruncType = LS.Inst->getValueType(0); 10802 EVT LoadedType = LS.getLoadedType(); 10803 if (TruncType != LoadedType && 10804 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType)) 10805 ZExts = 1; 10806 } 10807 10808 /// \brief Account for slicing gain in the current cost. 10809 /// Slicing provide a few gains like removing a shift or a 10810 /// truncate. This method allows to grow the cost of the original 10811 /// load with the gain from this slice. 10812 void addSliceGain(const LoadedSlice &LS) { 10813 // Each slice saves a truncate. 10814 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo(); 10815 if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(), 10816 LS.Inst->getValueType(0))) 10817 ++Truncates; 10818 // If there is a shift amount, this slice gets rid of it. 10819 if (LS.Shift) 10820 ++Shift; 10821 // If this slice can merge a cross register bank copy, account for it. 10822 if (LS.canMergeExpensiveCrossRegisterBankCopy()) 10823 ++CrossRegisterBanksCopies; 10824 } 10825 10826 Cost &operator+=(const Cost &RHS) { 10827 Loads += RHS.Loads; 10828 Truncates += RHS.Truncates; 10829 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies; 10830 ZExts += RHS.ZExts; 10831 Shift += RHS.Shift; 10832 return *this; 10833 } 10834 10835 bool operator==(const Cost &RHS) const { 10836 return Loads == RHS.Loads && Truncates == RHS.Truncates && 10837 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies && 10838 ZExts == RHS.ZExts && Shift == RHS.Shift; 10839 } 10840 10841 bool operator!=(const Cost &RHS) const { return !(*this == RHS); } 10842 10843 bool operator<(const Cost &RHS) const { 10844 // Assume cross register banks copies are as expensive as loads. 10845 // FIXME: Do we want some more target hooks? 10846 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies; 10847 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies; 10848 // Unless we are optimizing for code size, consider the 10849 // expensive operation first. 10850 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS) 10851 return ExpensiveOpsLHS < ExpensiveOpsRHS; 10852 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) < 10853 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS); 10854 } 10855 10856 bool operator>(const Cost &RHS) const { return RHS < *this; } 10857 10858 bool operator<=(const Cost &RHS) const { return !(RHS < *this); } 10859 10860 bool operator>=(const Cost &RHS) const { return !(*this < RHS); } 10861 }; 10862 // The last instruction that represent the slice. This should be a 10863 // truncate instruction. 10864 SDNode *Inst; 10865 // The original load instruction. 10866 LoadSDNode *Origin; 10867 // The right shift amount in bits from the original load. 10868 unsigned Shift; 10869 // The DAG from which Origin came from. 10870 // This is used to get some contextual information about legal types, etc. 10871 SelectionDAG *DAG; 10872 10873 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr, 10874 unsigned Shift = 0, SelectionDAG *DAG = nullptr) 10875 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} 10876 10877 /// \brief Get the bits used in a chunk of bits \p BitWidth large. 10878 /// \return Result is \p BitWidth and has used bits set to 1 and 10879 /// not used bits set to 0. 10880 APInt getUsedBits() const { 10881 // Reproduce the trunc(lshr) sequence: 10882 // - Start from the truncated value. 10883 // - Zero extend to the desired bit width. 10884 // - Shift left. 10885 assert(Origin && "No original load to compare against."); 10886 unsigned BitWidth = Origin->getValueSizeInBits(0); 10887 assert(Inst && "This slice is not bound to an instruction"); 10888 assert(Inst->getValueSizeInBits(0) <= BitWidth && 10889 "Extracted slice is bigger than the whole type!"); 10890 APInt UsedBits(Inst->getValueSizeInBits(0), 0); 10891 UsedBits.setAllBits(); 10892 UsedBits = UsedBits.zext(BitWidth); 10893 UsedBits <<= Shift; 10894 return UsedBits; 10895 } 10896 10897 /// \brief Get the size of the slice to be loaded in bytes. 10898 unsigned getLoadedSize() const { 10899 unsigned SliceSize = getUsedBits().countPopulation(); 10900 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); 10901 return SliceSize / 8; 10902 } 10903 10904 /// \brief Get the type that will be loaded for this slice. 10905 /// Note: This may not be the final type for the slice. 10906 EVT getLoadedType() const { 10907 assert(DAG && "Missing context"); 10908 LLVMContext &Ctxt = *DAG->getContext(); 10909 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); 10910 } 10911 10912 /// \brief Get the alignment of the load used for this slice. 10913 unsigned getAlignment() const { 10914 unsigned Alignment = Origin->getAlignment(); 10915 unsigned Offset = getOffsetFromBase(); 10916 if (Offset != 0) 10917 Alignment = MinAlign(Alignment, Alignment + Offset); 10918 return Alignment; 10919 } 10920 10921 /// \brief Check if this slice can be rewritten with legal operations. 10922 bool isLegal() const { 10923 // An invalid slice is not legal. 10924 if (!Origin || !Inst || !DAG) 10925 return false; 10926 10927 // Offsets are for indexed load only, we do not handle that. 10928 if (!Origin->getOffset().isUndef()) 10929 return false; 10930 10931 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 10932 10933 // Check that the type is legal. 10934 EVT SliceType = getLoadedType(); 10935 if (!TLI.isTypeLegal(SliceType)) 10936 return false; 10937 10938 // Check that the load is legal for this type. 10939 if (!TLI.isOperationLegal(ISD::LOAD, SliceType)) 10940 return false; 10941 10942 // Check that the offset can be computed. 10943 // 1. Check its type. 10944 EVT PtrType = Origin->getBasePtr().getValueType(); 10945 if (PtrType == MVT::Untyped || PtrType.isExtended()) 10946 return false; 10947 10948 // 2. Check that it fits in the immediate. 10949 if (!TLI.isLegalAddImmediate(getOffsetFromBase())) 10950 return false; 10951 10952 // 3. Check that the computation is legal. 10953 if (!TLI.isOperationLegal(ISD::ADD, PtrType)) 10954 return false; 10955 10956 // Check that the zext is legal if it needs one. 10957 EVT TruncateType = Inst->getValueType(0); 10958 if (TruncateType != SliceType && 10959 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType)) 10960 return false; 10961 10962 return true; 10963 } 10964 10965 /// \brief Get the offset in bytes of this slice in the original chunk of 10966 /// bits. 10967 /// \pre DAG != nullptr. 10968 uint64_t getOffsetFromBase() const { 10969 assert(DAG && "Missing context."); 10970 bool IsBigEndian = DAG->getDataLayout().isBigEndian(); 10971 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."); 10972 uint64_t Offset = Shift / 8; 10973 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8; 10974 assert(!(Origin->getValueSizeInBits(0) & 0x7) && 10975 "The size of the original loaded type is not a multiple of a" 10976 " byte."); 10977 // If Offset is bigger than TySizeInBytes, it means we are loading all 10978 // zeros. This should have been optimized before in the process. 10979 assert(TySizeInBytes > Offset && 10980 "Invalid shift amount for given loaded size"); 10981 if (IsBigEndian) 10982 Offset = TySizeInBytes - Offset - getLoadedSize(); 10983 return Offset; 10984 } 10985 10986 /// \brief Generate the sequence of instructions to load the slice 10987 /// represented by this object and redirect the uses of this slice to 10988 /// this new sequence of instructions. 10989 /// \pre this->Inst && this->Origin are valid Instructions and this 10990 /// object passed the legal check: LoadedSlice::isLegal returned true. 10991 /// \return The last instruction of the sequence used to load the slice. 10992 SDValue loadSlice() const { 10993 assert(Inst && Origin && "Unable to replace a non-existing slice."); 10994 const SDValue &OldBaseAddr = Origin->getBasePtr(); 10995 SDValue BaseAddr = OldBaseAddr; 10996 // Get the offset in that chunk of bytes w.r.t. the endianness. 10997 int64_t Offset = static_cast<int64_t>(getOffsetFromBase()); 10998 assert(Offset >= 0 && "Offset too big to fit in int64_t!"); 10999 if (Offset) { 11000 // BaseAddr = BaseAddr + Offset. 11001 EVT ArithType = BaseAddr.getValueType(); 11002 SDLoc DL(Origin); 11003 BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr, 11004 DAG->getConstant(Offset, DL, ArithType)); 11005 } 11006 11007 // Create the type of the loaded slice according to its size. 11008 EVT SliceType = getLoadedType(); 11009 11010 // Create the load for the slice. 11011 SDValue LastInst = 11012 DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr, 11013 Origin->getPointerInfo().getWithOffset(Offset), 11014 getAlignment(), Origin->getMemOperand()->getFlags()); 11015 // If the final type is not the same as the loaded type, this means that 11016 // we have to pad with zero. Create a zero extend for that. 11017 EVT FinalType = Inst->getValueType(0); 11018 if (SliceType != FinalType) 11019 LastInst = 11020 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst); 11021 return LastInst; 11022 } 11023 11024 /// \brief Check if this slice can be merged with an expensive cross register 11025 /// bank copy. E.g., 11026 /// i = load i32 11027 /// f = bitcast i32 i to float 11028 bool canMergeExpensiveCrossRegisterBankCopy() const { 11029 if (!Inst || !Inst->hasOneUse()) 11030 return false; 11031 SDNode *Use = *Inst->use_begin(); 11032 if (Use->getOpcode() != ISD::BITCAST) 11033 return false; 11034 assert(DAG && "Missing context"); 11035 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 11036 EVT ResVT = Use->getValueType(0); 11037 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT()); 11038 const TargetRegisterClass *ArgRC = 11039 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT()); 11040 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) 11041 return false; 11042 11043 // At this point, we know that we perform a cross-register-bank copy. 11044 // Check if it is expensive. 11045 const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo(); 11046 // Assume bitcasts are cheap, unless both register classes do not 11047 // explicitly share a common sub class. 11048 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC)) 11049 return false; 11050 11051 // Check if it will be merged with the load. 11052 // 1. Check the alignment constraint. 11053 unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment( 11054 ResVT.getTypeForEVT(*DAG->getContext())); 11055 11056 if (RequiredAlignment > getAlignment()) 11057 return false; 11058 11059 // 2. Check that the load is a legal operation for that type. 11060 if (!TLI.isOperationLegal(ISD::LOAD, ResVT)) 11061 return false; 11062 11063 // 3. Check that we do not have a zext in the way. 11064 if (Inst->getValueType(0) != getLoadedType()) 11065 return false; 11066 11067 return true; 11068 } 11069 }; 11070 } 11071 11072 /// \brief Check that all bits set in \p UsedBits form a dense region, i.e., 11073 /// \p UsedBits looks like 0..0 1..1 0..0. 11074 static bool areUsedBitsDense(const APInt &UsedBits) { 11075 // If all the bits are one, this is dense! 11076 if (UsedBits.isAllOnesValue()) 11077 return true; 11078 11079 // Get rid of the unused bits on the right. 11080 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros()); 11081 // Get rid of the unused bits on the left. 11082 if (NarrowedUsedBits.countLeadingZeros()) 11083 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits()); 11084 // Check that the chunk of bits is completely used. 11085 return NarrowedUsedBits.isAllOnesValue(); 11086 } 11087 11088 /// \brief Check whether or not \p First and \p Second are next to each other 11089 /// in memory. This means that there is no hole between the bits loaded 11090 /// by \p First and the bits loaded by \p Second. 11091 static bool areSlicesNextToEachOther(const LoadedSlice &First, 11092 const LoadedSlice &Second) { 11093 assert(First.Origin == Second.Origin && First.Origin && 11094 "Unable to match different memory origins."); 11095 APInt UsedBits = First.getUsedBits(); 11096 assert((UsedBits & Second.getUsedBits()) == 0 && 11097 "Slices are not supposed to overlap."); 11098 UsedBits |= Second.getUsedBits(); 11099 return areUsedBitsDense(UsedBits); 11100 } 11101 11102 /// \brief Adjust the \p GlobalLSCost according to the target 11103 /// paring capabilities and the layout of the slices. 11104 /// \pre \p GlobalLSCost should account for at least as many loads as 11105 /// there is in the slices in \p LoadedSlices. 11106 static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, 11107 LoadedSlice::Cost &GlobalLSCost) { 11108 unsigned NumberOfSlices = LoadedSlices.size(); 11109 // If there is less than 2 elements, no pairing is possible. 11110 if (NumberOfSlices < 2) 11111 return; 11112 11113 // Sort the slices so that elements that are likely to be next to each 11114 // other in memory are next to each other in the list. 11115 std::sort(LoadedSlices.begin(), LoadedSlices.end(), 11116 [](const LoadedSlice &LHS, const LoadedSlice &RHS) { 11117 assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); 11118 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); 11119 }); 11120 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo(); 11121 // First (resp. Second) is the first (resp. Second) potentially candidate 11122 // to be placed in a paired load. 11123 const LoadedSlice *First = nullptr; 11124 const LoadedSlice *Second = nullptr; 11125 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice, 11126 // Set the beginning of the pair. 11127 First = Second) { 11128 11129 Second = &LoadedSlices[CurrSlice]; 11130 11131 // If First is NULL, it means we start a new pair. 11132 // Get to the next slice. 11133 if (!First) 11134 continue; 11135 11136 EVT LoadedType = First->getLoadedType(); 11137 11138 // If the types of the slices are different, we cannot pair them. 11139 if (LoadedType != Second->getLoadedType()) 11140 continue; 11141 11142 // Check if the target supplies paired loads for this type. 11143 unsigned RequiredAlignment = 0; 11144 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) { 11145 // move to the next pair, this type is hopeless. 11146 Second = nullptr; 11147 continue; 11148 } 11149 // Check if we meet the alignment requirement. 11150 if (RequiredAlignment > First->getAlignment()) 11151 continue; 11152 11153 // Check that both loads are next to each other in memory. 11154 if (!areSlicesNextToEachOther(*First, *Second)) 11155 continue; 11156 11157 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!"); 11158 --GlobalLSCost.Loads; 11159 // Move to the next pair. 11160 Second = nullptr; 11161 } 11162 } 11163 11164 /// \brief Check the profitability of all involved LoadedSlice. 11165 /// Currently, it is considered profitable if there is exactly two 11166 /// involved slices (1) which are (2) next to each other in memory, and 11167 /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). 11168 /// 11169 /// Note: The order of the elements in \p LoadedSlices may be modified, but not 11170 /// the elements themselves. 11171 /// 11172 /// FIXME: When the cost model will be mature enough, we can relax 11173 /// constraints (1) and (2). 11174 static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, 11175 const APInt &UsedBits, bool ForCodeSize) { 11176 unsigned NumberOfSlices = LoadedSlices.size(); 11177 if (StressLoadSlicing) 11178 return NumberOfSlices > 1; 11179 11180 // Check (1). 11181 if (NumberOfSlices != 2) 11182 return false; 11183 11184 // Check (2). 11185 if (!areUsedBitsDense(UsedBits)) 11186 return false; 11187 11188 // Check (3). 11189 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize); 11190 // The original code has one big load. 11191 OrigCost.Loads = 1; 11192 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) { 11193 const LoadedSlice &LS = LoadedSlices[CurrSlice]; 11194 // Accumulate the cost of all the slices. 11195 LoadedSlice::Cost SliceCost(LS, ForCodeSize); 11196 GlobalSlicingCost += SliceCost; 11197 11198 // Account as cost in the original configuration the gain obtained 11199 // with the current slices. 11200 OrigCost.addSliceGain(LS); 11201 } 11202 11203 // If the target supports paired load, adjust the cost accordingly. 11204 adjustCostForPairing(LoadedSlices, GlobalSlicingCost); 11205 return OrigCost > GlobalSlicingCost; 11206 } 11207 11208 /// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) 11209 /// operations, split it in the various pieces being extracted. 11210 /// 11211 /// This sort of thing is introduced by SROA. 11212 /// This slicing takes care not to insert overlapping loads. 11213 /// \pre LI is a simple load (i.e., not an atomic or volatile load). 11214 bool DAGCombiner::SliceUpLoad(SDNode *N) { 11215 if (Level < AfterLegalizeDAG) 11216 return false; 11217 11218 LoadSDNode *LD = cast<LoadSDNode>(N); 11219 if (LD->isVolatile() || !ISD::isNormalLoad(LD) || 11220 !LD->getValueType(0).isInteger()) 11221 return false; 11222 11223 // Keep track of already used bits to detect overlapping values. 11224 // In that case, we will just abort the transformation. 11225 APInt UsedBits(LD->getValueSizeInBits(0), 0); 11226 11227 SmallVector<LoadedSlice, 4> LoadedSlices; 11228 11229 // Check if this load is used as several smaller chunks of bits. 11230 // Basically, look for uses in trunc or trunc(lshr) and record a new chain 11231 // of computation for each trunc. 11232 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); 11233 UI != UIEnd; ++UI) { 11234 // Skip the uses of the chain. 11235 if (UI.getUse().getResNo() != 0) 11236 continue; 11237 11238 SDNode *User = *UI; 11239 unsigned Shift = 0; 11240 11241 // Check if this is a trunc(lshr). 11242 if (User->getOpcode() == ISD::SRL && User->hasOneUse() && 11243 isa<ConstantSDNode>(User->getOperand(1))) { 11244 Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue(); 11245 User = *User->use_begin(); 11246 } 11247 11248 // At this point, User is a Truncate, iff we encountered, trunc or 11249 // trunc(lshr). 11250 if (User->getOpcode() != ISD::TRUNCATE) 11251 return false; 11252 11253 // The width of the type must be a power of 2 and greater than 8-bits. 11254 // Otherwise the load cannot be represented in LLVM IR. 11255 // Moreover, if we shifted with a non-8-bits multiple, the slice 11256 // will be across several bytes. We do not support that. 11257 unsigned Width = User->getValueSizeInBits(0); 11258 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) 11259 return 0; 11260 11261 // Build the slice for this chain of computations. 11262 LoadedSlice LS(User, LD, Shift, &DAG); 11263 APInt CurrentUsedBits = LS.getUsedBits(); 11264 11265 // Check if this slice overlaps with another. 11266 if ((CurrentUsedBits & UsedBits) != 0) 11267 return false; 11268 // Update the bits used globally. 11269 UsedBits |= CurrentUsedBits; 11270 11271 // Check if the new slice would be legal. 11272 if (!LS.isLegal()) 11273 return false; 11274 11275 // Record the slice. 11276 LoadedSlices.push_back(LS); 11277 } 11278 11279 // Abort slicing if it does not seem to be profitable. 11280 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize)) 11281 return false; 11282 11283 ++SlicedLoads; 11284 11285 // Rewrite each chain to use an independent load. 11286 // By construction, each chain can be represented by a unique load. 11287 11288 // Prepare the argument for the new token factor for all the slices. 11289 SmallVector<SDValue, 8> ArgChains; 11290 for (SmallVectorImpl<LoadedSlice>::const_iterator 11291 LSIt = LoadedSlices.begin(), 11292 LSItEnd = LoadedSlices.end(); 11293 LSIt != LSItEnd; ++LSIt) { 11294 SDValue SliceInst = LSIt->loadSlice(); 11295 CombineTo(LSIt->Inst, SliceInst, true); 11296 if (SliceInst.getOpcode() != ISD::LOAD) 11297 SliceInst = SliceInst.getOperand(0); 11298 assert(SliceInst->getOpcode() == ISD::LOAD && 11299 "It takes more than a zext to get to the loaded slice!!"); 11300 ArgChains.push_back(SliceInst.getValue(1)); 11301 } 11302 11303 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, 11304 ArgChains); 11305 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 11306 return true; 11307 } 11308 11309 /// Check to see if V is (and load (ptr), imm), where the load is having 11310 /// specific bytes cleared out. If so, return the byte size being masked out 11311 /// and the shift amount. 11312 static std::pair<unsigned, unsigned> 11313 CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 11314 std::pair<unsigned, unsigned> Result(0, 0); 11315 11316 // Check for the structure we're looking for. 11317 if (V->getOpcode() != ISD::AND || 11318 !isa<ConstantSDNode>(V->getOperand(1)) || 11319 !ISD::isNormalLoad(V->getOperand(0).getNode())) 11320 return Result; 11321 11322 // Check the chain and pointer. 11323 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 11324 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 11325 11326 // The store should be chained directly to the load or be an operand of a 11327 // tokenfactor. 11328 if (LD == Chain.getNode()) 11329 ; // ok. 11330 else if (Chain->getOpcode() != ISD::TokenFactor) 11331 return Result; // Fail. 11332 else { 11333 bool isOk = false; 11334 for (const SDValue &ChainOp : Chain->op_values()) 11335 if (ChainOp.getNode() == LD) { 11336 isOk = true; 11337 break; 11338 } 11339 if (!isOk) return Result; 11340 } 11341 11342 // This only handles simple types. 11343 if (V.getValueType() != MVT::i16 && 11344 V.getValueType() != MVT::i32 && 11345 V.getValueType() != MVT::i64) 11346 return Result; 11347 11348 // Check the constant mask. Invert it so that the bits being masked out are 11349 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 11350 // follow the sign bit for uniformity. 11351 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 11352 unsigned NotMaskLZ = countLeadingZeros(NotMask); 11353 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 11354 unsigned NotMaskTZ = countTrailingZeros(NotMask); 11355 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 11356 if (NotMaskLZ == 64) return Result; // All zero mask. 11357 11358 // See if we have a continuous run of bits. If so, we have 0*1+0* 11359 if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64) 11360 return Result; 11361 11362 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 11363 if (V.getValueType() != MVT::i64 && NotMaskLZ) 11364 NotMaskLZ -= 64-V.getValueSizeInBits(); 11365 11366 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 11367 switch (MaskedBytes) { 11368 case 1: 11369 case 2: 11370 case 4: break; 11371 default: return Result; // All one mask, or 5-byte mask. 11372 } 11373 11374 // Verify that the first bit starts at a multiple of mask so that the access 11375 // is aligned the same as the access width. 11376 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 11377 11378 Result.first = MaskedBytes; 11379 Result.second = NotMaskTZ/8; 11380 return Result; 11381 } 11382 11383 11384 /// Check to see if IVal is something that provides a value as specified by 11385 /// MaskInfo. If so, replace the specified store with a narrower store of 11386 /// truncated IVal. 11387 static SDNode * 11388 ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 11389 SDValue IVal, StoreSDNode *St, 11390 DAGCombiner *DC) { 11391 unsigned NumBytes = MaskInfo.first; 11392 unsigned ByteShift = MaskInfo.second; 11393 SelectionDAG &DAG = DC->getDAG(); 11394 11395 // Check to see if IVal is all zeros in the part being masked in by the 'or' 11396 // that uses this. If not, this is not a replacement. 11397 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 11398 ByteShift*8, (ByteShift+NumBytes)*8); 11399 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr; 11400 11401 // Check that it is legal on the target to do this. It is legal if the new 11402 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 11403 // legalization. 11404 MVT VT = MVT::getIntegerVT(NumBytes*8); 11405 if (!DC->isTypeLegal(VT)) 11406 return nullptr; 11407 11408 // Okay, we can do this! Replace the 'St' store with a store of IVal that is 11409 // shifted by ByteShift and truncated down to NumBytes. 11410 if (ByteShift) { 11411 SDLoc DL(IVal); 11412 IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal, 11413 DAG.getConstant(ByteShift*8, DL, 11414 DC->getShiftAmountTy(IVal.getValueType()))); 11415 } 11416 11417 // Figure out the offset for the store and the alignment of the access. 11418 unsigned StOffset; 11419 unsigned NewAlign = St->getAlignment(); 11420 11421 if (DAG.getDataLayout().isLittleEndian()) 11422 StOffset = ByteShift; 11423 else 11424 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 11425 11426 SDValue Ptr = St->getBasePtr(); 11427 if (StOffset) { 11428 SDLoc DL(IVal); 11429 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), 11430 Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType())); 11431 NewAlign = MinAlign(NewAlign, StOffset); 11432 } 11433 11434 // Truncate down to the new size. 11435 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal); 11436 11437 ++OpsNarrowed; 11438 return DAG 11439 .getStore(St->getChain(), SDLoc(St), IVal, Ptr, 11440 St->getPointerInfo().getWithOffset(StOffset), NewAlign) 11441 .getNode(); 11442 } 11443 11444 11445 /// Look for sequence of load / op / store where op is one of 'or', 'xor', and 11446 /// 'and' of immediates. If 'op' is only touching some of the loaded bits, try 11447 /// narrowing the load and store if it would end up being a win for performance 11448 /// or code size. 11449 SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 11450 StoreSDNode *ST = cast<StoreSDNode>(N); 11451 if (ST->isVolatile()) 11452 return SDValue(); 11453 11454 SDValue Chain = ST->getChain(); 11455 SDValue Value = ST->getValue(); 11456 SDValue Ptr = ST->getBasePtr(); 11457 EVT VT = Value.getValueType(); 11458 11459 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 11460 return SDValue(); 11461 11462 unsigned Opc = Value.getOpcode(); 11463 11464 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 11465 // is a byte mask indicating a consecutive number of bytes, check to see if 11466 // Y is known to provide just those bytes. If so, we try to replace the 11467 // load + replace + store sequence with a single (narrower) store, which makes 11468 // the load dead. 11469 if (Opc == ISD::OR) { 11470 std::pair<unsigned, unsigned> MaskedLoad; 11471 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 11472 if (MaskedLoad.first) 11473 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11474 Value.getOperand(1), ST,this)) 11475 return SDValue(NewST, 0); 11476 11477 // Or is commutative, so try swapping X and Y. 11478 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 11479 if (MaskedLoad.first) 11480 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11481 Value.getOperand(0), ST,this)) 11482 return SDValue(NewST, 0); 11483 } 11484 11485 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 11486 Value.getOperand(1).getOpcode() != ISD::Constant) 11487 return SDValue(); 11488 11489 SDValue N0 = Value.getOperand(0); 11490 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 11491 Chain == SDValue(N0.getNode(), 1)) { 11492 LoadSDNode *LD = cast<LoadSDNode>(N0); 11493 if (LD->getBasePtr() != Ptr || 11494 LD->getPointerInfo().getAddrSpace() != 11495 ST->getPointerInfo().getAddrSpace()) 11496 return SDValue(); 11497 11498 // Find the type to narrow it the load / op / store to. 11499 SDValue N1 = Value.getOperand(1); 11500 unsigned BitWidth = N1.getValueSizeInBits(); 11501 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 11502 if (Opc == ISD::AND) 11503 Imm ^= APInt::getAllOnesValue(BitWidth); 11504 if (Imm == 0 || Imm.isAllOnesValue()) 11505 return SDValue(); 11506 unsigned ShAmt = Imm.countTrailingZeros(); 11507 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 11508 unsigned NewBW = NextPowerOf2(MSB - ShAmt); 11509 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11510 // The narrowing should be profitable, the load/store operation should be 11511 // legal (or custom) and the store size should be equal to the NewVT width. 11512 while (NewBW < BitWidth && 11513 (NewVT.getStoreSizeInBits() != NewBW || 11514 !TLI.isOperationLegalOrCustom(Opc, NewVT) || 11515 !TLI.isNarrowingProfitable(VT, NewVT))) { 11516 NewBW = NextPowerOf2(NewBW); 11517 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11518 } 11519 if (NewBW >= BitWidth) 11520 return SDValue(); 11521 11522 // If the lsb changed does not start at the type bitwidth boundary, 11523 // start at the previous one. 11524 if (ShAmt % NewBW) 11525 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 11526 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, 11527 std::min(BitWidth, ShAmt + NewBW)); 11528 if ((Imm & Mask) == Imm) { 11529 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 11530 if (Opc == ISD::AND) 11531 NewImm ^= APInt::getAllOnesValue(NewBW); 11532 uint64_t PtrOff = ShAmt / 8; 11533 // For big endian targets, we need to adjust the offset to the pointer to 11534 // load the correct bytes. 11535 if (DAG.getDataLayout().isBigEndian()) 11536 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 11537 11538 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 11539 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 11540 if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy)) 11541 return SDValue(); 11542 11543 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD), 11544 Ptr.getValueType(), Ptr, 11545 DAG.getConstant(PtrOff, SDLoc(LD), 11546 Ptr.getValueType())); 11547 SDValue NewLD = 11548 DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr, 11549 LD->getPointerInfo().getWithOffset(PtrOff), NewAlign, 11550 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 11551 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD, 11552 DAG.getConstant(NewImm, SDLoc(Value), 11553 NewVT)); 11554 SDValue NewST = 11555 DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr, 11556 ST->getPointerInfo().getWithOffset(PtrOff), NewAlign); 11557 11558 AddToWorklist(NewPtr.getNode()); 11559 AddToWorklist(NewLD.getNode()); 11560 AddToWorklist(NewVal.getNode()); 11561 WorklistRemover DeadNodes(*this); 11562 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 11563 ++OpsNarrowed; 11564 return NewST; 11565 } 11566 } 11567 11568 return SDValue(); 11569 } 11570 11571 /// For a given floating point load / store pair, if the load value isn't used 11572 /// by any other operations, then consider transforming the pair to integer 11573 /// load / store operations if the target deems the transformation profitable. 11574 SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 11575 StoreSDNode *ST = cast<StoreSDNode>(N); 11576 SDValue Chain = ST->getChain(); 11577 SDValue Value = ST->getValue(); 11578 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 11579 Value.hasOneUse() && 11580 Chain == SDValue(Value.getNode(), 1)) { 11581 LoadSDNode *LD = cast<LoadSDNode>(Value); 11582 EVT VT = LD->getMemoryVT(); 11583 if (!VT.isFloatingPoint() || 11584 VT != ST->getMemoryVT() || 11585 LD->isNonTemporal() || 11586 ST->isNonTemporal() || 11587 LD->getPointerInfo().getAddrSpace() != 0 || 11588 ST->getPointerInfo().getAddrSpace() != 0) 11589 return SDValue(); 11590 11591 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 11592 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 11593 !TLI.isOperationLegal(ISD::STORE, IntVT) || 11594 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 11595 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 11596 return SDValue(); 11597 11598 unsigned LDAlign = LD->getAlignment(); 11599 unsigned STAlign = ST->getAlignment(); 11600 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 11601 unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy); 11602 if (LDAlign < ABIAlign || STAlign < ABIAlign) 11603 return SDValue(); 11604 11605 SDValue NewLD = 11606 DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(), 11607 LD->getPointerInfo(), LDAlign); 11608 11609 SDValue NewST = 11610 DAG.getStore(NewLD.getValue(1), SDLoc(N), NewLD, ST->getBasePtr(), 11611 ST->getPointerInfo(), STAlign); 11612 11613 AddToWorklist(NewLD.getNode()); 11614 AddToWorklist(NewST.getNode()); 11615 WorklistRemover DeadNodes(*this); 11616 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 11617 ++LdStFP2Int; 11618 return NewST; 11619 } 11620 11621 return SDValue(); 11622 } 11623 11624 // This is a helper function for visitMUL to check the profitability 11625 // of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 11626 // MulNode is the original multiply, AddNode is (add x, c1), 11627 // and ConstNode is c2. 11628 // 11629 // If the (add x, c1) has multiple uses, we could increase 11630 // the number of adds if we make this transformation. 11631 // It would only be worth doing this if we can remove a 11632 // multiply in the process. Check for that here. 11633 // To illustrate: 11634 // (A + c1) * c3 11635 // (A + c2) * c3 11636 // We're checking for cases where we have common "c3 * A" expressions. 11637 bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, 11638 SDValue &AddNode, 11639 SDValue &ConstNode) { 11640 APInt Val; 11641 11642 // If the add only has one use, this would be OK to do. 11643 if (AddNode.getNode()->hasOneUse()) 11644 return true; 11645 11646 // Walk all the users of the constant with which we're multiplying. 11647 for (SDNode *Use : ConstNode->uses()) { 11648 11649 if (Use == MulNode) // This use is the one we're on right now. Skip it. 11650 continue; 11651 11652 if (Use->getOpcode() == ISD::MUL) { // We have another multiply use. 11653 SDNode *OtherOp; 11654 SDNode *MulVar = AddNode.getOperand(0).getNode(); 11655 11656 // OtherOp is what we're multiplying against the constant. 11657 if (Use->getOperand(0) == ConstNode) 11658 OtherOp = Use->getOperand(1).getNode(); 11659 else 11660 OtherOp = Use->getOperand(0).getNode(); 11661 11662 // Check to see if multiply is with the same operand of our "add". 11663 // 11664 // ConstNode = CONST 11665 // Use = ConstNode * A <-- visiting Use. OtherOp is A. 11666 // ... 11667 // AddNode = (A + c1) <-- MulVar is A. 11668 // = AddNode * ConstNode <-- current visiting instruction. 11669 // 11670 // If we make this transformation, we will have a common 11671 // multiply (ConstNode * A) that we can save. 11672 if (OtherOp == MulVar) 11673 return true; 11674 11675 // Now check to see if a future expansion will give us a common 11676 // multiply. 11677 // 11678 // ConstNode = CONST 11679 // AddNode = (A + c1) 11680 // ... = AddNode * ConstNode <-- current visiting instruction. 11681 // ... 11682 // OtherOp = (A + c2) 11683 // Use = OtherOp * ConstNode <-- visiting Use. 11684 // 11685 // If we make this transformation, we will have a common 11686 // multiply (CONST * A) after we also do the same transformation 11687 // to the "t2" instruction. 11688 if (OtherOp->getOpcode() == ISD::ADD && 11689 DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) && 11690 OtherOp->getOperand(0).getNode() == MulVar) 11691 return true; 11692 } 11693 } 11694 11695 // Didn't find a case where this would be profitable. 11696 return false; 11697 } 11698 11699 SDValue DAGCombiner::getMergedConstantVectorStore( 11700 SelectionDAG &DAG, const SDLoc &SL, ArrayRef<MemOpLink> Stores, 11701 SmallVectorImpl<SDValue> &Chains, EVT Ty) const { 11702 SmallVector<SDValue, 8> BuildVector; 11703 11704 for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { 11705 StoreSDNode *St = cast<StoreSDNode>(Stores[I].MemNode); 11706 Chains.push_back(St->getChain()); 11707 BuildVector.push_back(St->getValue()); 11708 } 11709 11710 return DAG.getBuildVector(Ty, SL, BuildVector); 11711 } 11712 11713 bool DAGCombiner::MergeStoresOfConstantsOrVecElts( 11714 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, 11715 unsigned NumStores, bool IsConstantSrc, bool UseVector) { 11716 // Make sure we have something to merge. 11717 if (NumStores < 2) 11718 return false; 11719 11720 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 11721 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 11722 unsigned LatestNodeUsed = 0; 11723 11724 for (unsigned i=0; i < NumStores; ++i) { 11725 // Find a chain for the new wide-store operand. Notice that some 11726 // of the store nodes that we found may not be selected for inclusion 11727 // in the wide store. The chain we use needs to be the chain of the 11728 // latest store node which is *used* and replaced by the wide store. 11729 if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) 11730 LatestNodeUsed = i; 11731 } 11732 11733 SmallVector<SDValue, 8> Chains; 11734 11735 // The latest Node in the DAG. 11736 LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; 11737 SDLoc DL(StoreNodes[0].MemNode); 11738 11739 SDValue StoredVal; 11740 if (UseVector) { 11741 bool IsVec = MemVT.isVector(); 11742 unsigned Elts = NumStores; 11743 if (IsVec) { 11744 // When merging vector stores, get the total number of elements. 11745 Elts *= MemVT.getVectorNumElements(); 11746 } 11747 // Get the type for the merged vector store. 11748 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 11749 assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); 11750 11751 if (IsConstantSrc) { 11752 StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Chains, Ty); 11753 } else { 11754 SmallVector<SDValue, 8> Ops; 11755 for (unsigned i = 0; i < NumStores; ++i) { 11756 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11757 SDValue Val = St->getValue(); 11758 // All operands of BUILD_VECTOR / CONCAT_VECTOR must have the same type. 11759 if (Val.getValueType() != MemVT) 11760 return false; 11761 Ops.push_back(Val); 11762 Chains.push_back(St->getChain()); 11763 } 11764 11765 // Build the extracted vector elements back into a vector. 11766 StoredVal = DAG.getNode(IsVec ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, 11767 DL, Ty, Ops); } 11768 } else { 11769 // We should always use a vector store when merging extracted vector 11770 // elements, so this path implies a store of constants. 11771 assert(IsConstantSrc && "Merged vector elements should use vector store"); 11772 11773 unsigned SizeInBits = NumStores * ElementSizeBytes * 8; 11774 APInt StoreInt(SizeInBits, 0); 11775 11776 // Construct a single integer constant which is made of the smaller 11777 // constant inputs. 11778 bool IsLE = DAG.getDataLayout().isLittleEndian(); 11779 for (unsigned i = 0; i < NumStores; ++i) { 11780 unsigned Idx = IsLE ? (NumStores - 1 - i) : i; 11781 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); 11782 Chains.push_back(St->getChain()); 11783 11784 SDValue Val = St->getValue(); 11785 StoreInt <<= ElementSizeBytes * 8; 11786 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) { 11787 StoreInt |= C->getAPIntValue().zext(SizeInBits); 11788 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) { 11789 StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits); 11790 } else { 11791 llvm_unreachable("Invalid constant element type"); 11792 } 11793 } 11794 11795 // Create the new Load and Store operations. 11796 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits); 11797 StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); 11798 } 11799 11800 assert(!Chains.empty()); 11801 11802 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 11803 SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, 11804 FirstInChain->getBasePtr(), 11805 FirstInChain->getPointerInfo(), 11806 FirstInChain->getAlignment()); 11807 11808 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 11809 : DAG.getSubtarget().useAA(); 11810 if (UseAA) { 11811 // Replace all merged stores with the new store. 11812 for (unsigned i = 0; i < NumStores; ++i) 11813 CombineTo(StoreNodes[i].MemNode, NewStore); 11814 } else { 11815 // Replace the last store with the new store. 11816 CombineTo(LatestOp, NewStore); 11817 // Erase all other stores. 11818 for (unsigned i = 0; i < NumStores; ++i) { 11819 if (StoreNodes[i].MemNode == LatestOp) 11820 continue; 11821 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 11822 // ReplaceAllUsesWith will replace all uses that existed when it was 11823 // called, but graph optimizations may cause new ones to appear. For 11824 // example, the case in pr14333 looks like 11825 // 11826 // St's chain -> St -> another store -> X 11827 // 11828 // And the only difference from St to the other store is the chain. 11829 // When we change it's chain to be St's chain they become identical, 11830 // get CSEed and the net result is that X is now a use of St. 11831 // Since we know that St is redundant, just iterate. 11832 while (!St->use_empty()) 11833 DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); 11834 deleteAndRecombine(St); 11835 } 11836 } 11837 11838 StoreNodes.erase(StoreNodes.begin() + NumStores, StoreNodes.end()); 11839 return true; 11840 } 11841 11842 void DAGCombiner::getStoreMergeAndAliasCandidates( 11843 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes, 11844 SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) { 11845 // This holds the base pointer, index, and the offset in bytes from the base 11846 // pointer. 11847 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 11848 11849 // We must have a base and an offset. 11850 if (!BasePtr.Base.getNode()) 11851 return; 11852 11853 // Do not handle stores to undef base pointers. 11854 if (BasePtr.Base.isUndef()) 11855 return; 11856 11857 // Walk up the chain and look for nodes with offsets from the same 11858 // base pointer. Stop when reaching an instruction with a different kind 11859 // or instruction which has a different base pointer. 11860 EVT MemVT = St->getMemoryVT(); 11861 unsigned Seq = 0; 11862 StoreSDNode *Index = St; 11863 11864 11865 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 11866 : DAG.getSubtarget().useAA(); 11867 11868 if (UseAA) { 11869 // Look at other users of the same chain. Stores on the same chain do not 11870 // alias. If combiner-aa is enabled, non-aliasing stores are canonicalized 11871 // to be on the same chain, so don't bother looking at adjacent chains. 11872 11873 SDValue Chain = St->getChain(); 11874 for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { 11875 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) { 11876 if (I.getOperandNo() != 0) 11877 continue; 11878 11879 if (OtherST->isVolatile() || OtherST->isIndexed()) 11880 continue; 11881 11882 if (OtherST->getMemoryVT() != MemVT) 11883 continue; 11884 11885 BaseIndexOffset Ptr = BaseIndexOffset::match(OtherST->getBasePtr(), DAG); 11886 11887 if (Ptr.equalBaseIndex(BasePtr)) 11888 StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset, Seq++)); 11889 } 11890 } 11891 11892 return; 11893 } 11894 11895 while (Index) { 11896 // If the chain has more than one use, then we can't reorder the mem ops. 11897 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 11898 break; 11899 11900 // Find the base pointer and offset for this memory node. 11901 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); 11902 11903 // Check that the base pointer is the same as the original one. 11904 if (!Ptr.equalBaseIndex(BasePtr)) 11905 break; 11906 11907 // The memory operands must not be volatile. 11908 if (Index->isVolatile() || Index->isIndexed()) 11909 break; 11910 11911 // No truncation. 11912 if (Index->isTruncatingStore()) 11913 break; 11914 11915 // The stored memory type must be the same. 11916 if (Index->getMemoryVT() != MemVT) 11917 break; 11918 11919 // We do not allow under-aligned stores in order to prevent 11920 // overriding stores. NOTE: this is a bad hack. Alignment SHOULD 11921 // be irrelevant here; what MATTERS is that we not move memory 11922 // operations that potentially overlap past each-other. 11923 if (Index->getAlignment() < MemVT.getStoreSize()) 11924 break; 11925 11926 // We found a potential memory operand to merge. 11927 StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); 11928 11929 // Find the next memory operand in the chain. If the next operand in the 11930 // chain is a store then move up and continue the scan with the next 11931 // memory operand. If the next operand is a load save it and use alias 11932 // information to check if it interferes with anything. 11933 SDNode *NextInChain = Index->getChain().getNode(); 11934 while (1) { 11935 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 11936 // We found a store node. Use it for the next iteration. 11937 Index = STn; 11938 break; 11939 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 11940 if (Ldn->isVolatile()) { 11941 Index = nullptr; 11942 break; 11943 } 11944 11945 // Save the load node for later. Continue the scan. 11946 AliasLoadNodes.push_back(Ldn); 11947 NextInChain = Ldn->getChain().getNode(); 11948 continue; 11949 } else { 11950 Index = nullptr; 11951 break; 11952 } 11953 } 11954 } 11955 } 11956 11957 // We need to check that merging these stores does not cause a loop 11958 // in the DAG. Any store candidate may depend on another candidate 11959 // indirectly through its operand (we already consider dependencies 11960 // through the chain). Check in parallel by searching up from 11961 // non-chain operands of candidates. 11962 bool DAGCombiner::checkMergeStoreCandidatesForDependencies( 11963 SmallVectorImpl<MemOpLink> &StoreNodes) { 11964 SmallPtrSet<const SDNode *, 16> Visited; 11965 SmallVector<const SDNode *, 8> Worklist; 11966 // search ops of store candidates 11967 for (unsigned i = 0; i < StoreNodes.size(); ++i) { 11968 SDNode *n = StoreNodes[i].MemNode; 11969 // Potential loops may happen only through non-chain operands 11970 for (unsigned j = 1; j < n->getNumOperands(); ++j) 11971 Worklist.push_back(n->getOperand(j).getNode()); 11972 } 11973 // search through DAG. We can stop early if we find a storenode 11974 for (unsigned i = 0; i < StoreNodes.size(); ++i) { 11975 if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist)) 11976 return false; 11977 } 11978 return true; 11979 } 11980 11981 bool DAGCombiner::MergeConsecutiveStores( 11982 StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes) { 11983 if (OptLevel == CodeGenOpt::None) 11984 return false; 11985 11986 EVT MemVT = St->getMemoryVT(); 11987 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 11988 bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute( 11989 Attribute::NoImplicitFloat); 11990 11991 // This function cannot currently deal with non-byte-sized memory sizes. 11992 if (ElementSizeBytes * 8 != MemVT.getSizeInBits()) 11993 return false; 11994 11995 if (!MemVT.isSimple()) 11996 return false; 11997 11998 // Perform an early exit check. Do not bother looking at stored values that 11999 // are not constants, loads, or extracted vector elements. 12000 SDValue StoredVal = St->getValue(); 12001 bool IsLoadSrc = isa<LoadSDNode>(StoredVal); 12002 bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) || 12003 isa<ConstantFPSDNode>(StoredVal); 12004 bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT || 12005 StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR); 12006 12007 if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc) 12008 return false; 12009 12010 // Don't merge vectors into wider vectors if the source data comes from loads. 12011 // TODO: This restriction can be lifted by using logic similar to the 12012 // ExtractVecSrc case. 12013 if (MemVT.isVector() && IsLoadSrc) 12014 return false; 12015 12016 // Only look at ends of store sequences. 12017 SDValue Chain = SDValue(St, 0); 12018 if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) 12019 return false; 12020 12021 // Save the LoadSDNodes that we find in the chain. 12022 // We need to make sure that these nodes do not interfere with 12023 // any of the store nodes. 12024 SmallVector<LSBaseSDNode*, 8> AliasLoadNodes; 12025 12026 getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes); 12027 12028 // Check if there is anything to merge. 12029 if (StoreNodes.size() < 2) 12030 return false; 12031 12032 // only do dependence check in AA case 12033 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 12034 : DAG.getSubtarget().useAA(); 12035 if (UseAA && !checkMergeStoreCandidatesForDependencies(StoreNodes)) 12036 return false; 12037 12038 // Sort the memory operands according to their distance from the 12039 // base pointer. As a secondary criteria: make sure stores coming 12040 // later in the code come first in the list. This is important for 12041 // the non-UseAA case, because we're merging stores into the FINAL 12042 // store along a chain which potentially contains aliasing stores. 12043 // Thus, if there are multiple stores to the same address, the last 12044 // one can be considered for merging but not the others. 12045 std::sort(StoreNodes.begin(), StoreNodes.end(), 12046 [](MemOpLink LHS, MemOpLink RHS) { 12047 return LHS.OffsetFromBase < RHS.OffsetFromBase || 12048 (LHS.OffsetFromBase == RHS.OffsetFromBase && 12049 LHS.SequenceNum < RHS.SequenceNum); 12050 }); 12051 12052 // Scan the memory operations on the chain and find the first non-consecutive 12053 // store memory address. 12054 unsigned LastConsecutiveStore = 0; 12055 int64_t StartAddress = StoreNodes[0].OffsetFromBase; 12056 for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { 12057 12058 // Check that the addresses are consecutive starting from the second 12059 // element in the list of stores. 12060 if (i > 0) { 12061 int64_t CurrAddress = StoreNodes[i].OffsetFromBase; 12062 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 12063 break; 12064 } 12065 12066 // Check if this store interferes with any of the loads that we found. 12067 // If we find a load that alias with this store. Stop the sequence. 12068 if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) { 12069 return isAlias(Ldn, StoreNodes[i].MemNode); 12070 })) 12071 break; 12072 12073 // Mark this node as useful. 12074 LastConsecutiveStore = i; 12075 } 12076 12077 // The node with the lowest store address. 12078 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 12079 unsigned FirstStoreAS = FirstInChain->getAddressSpace(); 12080 unsigned FirstStoreAlign = FirstInChain->getAlignment(); 12081 LLVMContext &Context = *DAG.getContext(); 12082 const DataLayout &DL = DAG.getDataLayout(); 12083 12084 // Store the constants into memory as one consecutive store. 12085 if (IsConstantSrc) { 12086 unsigned LastLegalType = 0; 12087 unsigned LastLegalVectorType = 0; 12088 bool NonZero = false; 12089 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 12090 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12091 SDValue StoredVal = St->getValue(); 12092 12093 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) { 12094 NonZero |= !C->isNullValue(); 12095 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) { 12096 NonZero |= !C->getConstantFPValue()->isNullValue(); 12097 } else { 12098 // Non-constant. 12099 break; 12100 } 12101 12102 // Find a legal type for the constant store. 12103 unsigned SizeInBits = (i+1) * ElementSizeBytes * 8; 12104 EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits); 12105 bool IsFast; 12106 if (TLI.isTypeLegal(StoreTy) && 12107 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12108 FirstStoreAlign, &IsFast) && IsFast) { 12109 LastLegalType = i+1; 12110 // Or check whether a truncstore is legal. 12111 } else if (TLI.getTypeAction(Context, StoreTy) == 12112 TargetLowering::TypePromoteInteger) { 12113 EVT LegalizedStoredValueTy = 12114 TLI.getTypeToTransformTo(Context, StoredVal.getValueType()); 12115 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 12116 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12117 FirstStoreAS, FirstStoreAlign, &IsFast) && 12118 IsFast) { 12119 LastLegalType = i + 1; 12120 } 12121 } 12122 12123 // We only use vectors if the constant is known to be zero or the target 12124 // allows it and the function is not marked with the noimplicitfloat 12125 // attribute. 12126 if ((!NonZero || TLI.storeOfVectorConstantIsCheap(MemVT, i+1, 12127 FirstStoreAS)) && 12128 !NoVectors) { 12129 // Find a legal type for the vector store. 12130 EVT Ty = EVT::getVectorVT(Context, MemVT, i+1); 12131 if (TLI.isTypeLegal(Ty) && 12132 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 12133 FirstStoreAlign, &IsFast) && IsFast) 12134 LastLegalVectorType = i + 1; 12135 } 12136 } 12137 12138 // Check if we found a legal integer type to store. 12139 if (LastLegalType == 0 && LastLegalVectorType == 0) 12140 return false; 12141 12142 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors; 12143 unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType; 12144 12145 return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, 12146 true, UseVector); 12147 } 12148 12149 // When extracting multiple vector elements, try to store them 12150 // in one vector store rather than a sequence of scalar stores. 12151 if (IsExtractVecSrc) { 12152 unsigned NumStoresToMerge = 0; 12153 bool IsVec = MemVT.isVector(); 12154 for (unsigned i = 0; i < LastConsecutiveStore + 1; ++i) { 12155 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12156 unsigned StoreValOpcode = St->getValue().getOpcode(); 12157 // This restriction could be loosened. 12158 // Bail out if any stored values are not elements extracted from a vector. 12159 // It should be possible to handle mixed sources, but load sources need 12160 // more careful handling (see the block of code below that handles 12161 // consecutive loads). 12162 if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT && 12163 StoreValOpcode != ISD::EXTRACT_SUBVECTOR) 12164 return false; 12165 12166 // Find a legal type for the vector store. 12167 unsigned Elts = i + 1; 12168 if (IsVec) { 12169 // When merging vector stores, get the total number of elements. 12170 Elts *= MemVT.getVectorNumElements(); 12171 } 12172 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 12173 bool IsFast; 12174 if (TLI.isTypeLegal(Ty) && 12175 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 12176 FirstStoreAlign, &IsFast) && IsFast) 12177 NumStoresToMerge = i + 1; 12178 } 12179 12180 return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumStoresToMerge, 12181 false, true); 12182 } 12183 12184 // Below we handle the case of multiple consecutive stores that 12185 // come from multiple consecutive loads. We merge them into a single 12186 // wide load and a single wide store. 12187 12188 // Look for load nodes which are used by the stored values. 12189 SmallVector<MemOpLink, 8> LoadNodes; 12190 12191 // Find acceptable loads. Loads need to have the same chain (token factor), 12192 // must not be zext, volatile, indexed, and they must be consecutive. 12193 BaseIndexOffset LdBasePtr; 12194 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 12195 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12196 LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); 12197 if (!Ld) break; 12198 12199 // Loads must only have one use. 12200 if (!Ld->hasNUsesOfValue(1, 0)) 12201 break; 12202 12203 // The memory operands must not be volatile. 12204 if (Ld->isVolatile() || Ld->isIndexed()) 12205 break; 12206 12207 // We do not accept ext loads. 12208 if (Ld->getExtensionType() != ISD::NON_EXTLOAD) 12209 break; 12210 12211 // The stored memory type must be the same. 12212 if (Ld->getMemoryVT() != MemVT) 12213 break; 12214 12215 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG); 12216 // If this is not the first ptr that we check. 12217 if (LdBasePtr.Base.getNode()) { 12218 // The base ptr must be the same. 12219 if (!LdPtr.equalBaseIndex(LdBasePtr)) 12220 break; 12221 } else { 12222 // Check that all other base pointers are the same as this one. 12223 LdBasePtr = LdPtr; 12224 } 12225 12226 // We found a potential memory operand to merge. 12227 LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); 12228 } 12229 12230 if (LoadNodes.size() < 2) 12231 return false; 12232 12233 // If we have load/store pair instructions and we only have two values, 12234 // don't bother. 12235 unsigned RequiredAlignment; 12236 if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) && 12237 St->getAlignment() >= RequiredAlignment) 12238 return false; 12239 12240 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode); 12241 unsigned FirstLoadAS = FirstLoad->getAddressSpace(); 12242 unsigned FirstLoadAlign = FirstLoad->getAlignment(); 12243 12244 // Scan the memory operations on the chain and find the first non-consecutive 12245 // load memory address. These variables hold the index in the store node 12246 // array. 12247 unsigned LastConsecutiveLoad = 0; 12248 // This variable refers to the size and not index in the array. 12249 unsigned LastLegalVectorType = 0; 12250 unsigned LastLegalIntegerType = 0; 12251 StartAddress = LoadNodes[0].OffsetFromBase; 12252 SDValue FirstChain = FirstLoad->getChain(); 12253 for (unsigned i = 1; i < LoadNodes.size(); ++i) { 12254 // All loads must share the same chain. 12255 if (LoadNodes[i].MemNode->getChain() != FirstChain) 12256 break; 12257 12258 int64_t CurrAddress = LoadNodes[i].OffsetFromBase; 12259 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 12260 break; 12261 LastConsecutiveLoad = i; 12262 // Find a legal type for the vector store. 12263 EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1); 12264 bool IsFastSt, IsFastLd; 12265 if (TLI.isTypeLegal(StoreTy) && 12266 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12267 FirstStoreAlign, &IsFastSt) && IsFastSt && 12268 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 12269 FirstLoadAlign, &IsFastLd) && IsFastLd) { 12270 LastLegalVectorType = i + 1; 12271 } 12272 12273 // Find a legal type for the integer store. 12274 unsigned SizeInBits = (i+1) * ElementSizeBytes * 8; 12275 StoreTy = EVT::getIntegerVT(Context, SizeInBits); 12276 if (TLI.isTypeLegal(StoreTy) && 12277 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12278 FirstStoreAlign, &IsFastSt) && IsFastSt && 12279 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 12280 FirstLoadAlign, &IsFastLd) && IsFastLd) 12281 LastLegalIntegerType = i + 1; 12282 // Or check whether a truncstore and extload is legal. 12283 else if (TLI.getTypeAction(Context, StoreTy) == 12284 TargetLowering::TypePromoteInteger) { 12285 EVT LegalizedStoredValueTy = 12286 TLI.getTypeToTransformTo(Context, StoreTy); 12287 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 12288 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) && 12289 TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) && 12290 TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) && 12291 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12292 FirstStoreAS, FirstStoreAlign, &IsFastSt) && 12293 IsFastSt && 12294 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12295 FirstLoadAS, FirstLoadAlign, &IsFastLd) && 12296 IsFastLd) 12297 LastLegalIntegerType = i+1; 12298 } 12299 } 12300 12301 // Only use vector types if the vector type is larger than the integer type. 12302 // If they are the same, use integers. 12303 bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors; 12304 unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); 12305 12306 // We add +1 here because the LastXXX variables refer to location while 12307 // the NumElem refers to array/index size. 12308 unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1; 12309 NumElem = std::min(LastLegalType, NumElem); 12310 12311 if (NumElem < 2) 12312 return false; 12313 12314 // Collect the chains from all merged stores. 12315 SmallVector<SDValue, 8> MergeStoreChains; 12316 MergeStoreChains.push_back(StoreNodes[0].MemNode->getChain()); 12317 12318 // The latest Node in the DAG. 12319 unsigned LatestNodeUsed = 0; 12320 for (unsigned i=1; i<NumElem; ++i) { 12321 // Find a chain for the new wide-store operand. Notice that some 12322 // of the store nodes that we found may not be selected for inclusion 12323 // in the wide store. The chain we use needs to be the chain of the 12324 // latest store node which is *used* and replaced by the wide store. 12325 if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum) 12326 LatestNodeUsed = i; 12327 12328 MergeStoreChains.push_back(StoreNodes[i].MemNode->getChain()); 12329 } 12330 12331 LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode; 12332 12333 // Find if it is better to use vectors or integers to load and store 12334 // to memory. 12335 EVT JointMemOpVT; 12336 if (UseVectorTy) { 12337 JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem); 12338 } else { 12339 unsigned SizeInBits = NumElem * ElementSizeBytes * 8; 12340 JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits); 12341 } 12342 12343 SDLoc LoadDL(LoadNodes[0].MemNode); 12344 SDLoc StoreDL(StoreNodes[0].MemNode); 12345 12346 // The merged loads are required to have the same incoming chain, so 12347 // using the first's chain is acceptable. 12348 SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(), 12349 FirstLoad->getBasePtr(), 12350 FirstLoad->getPointerInfo(), FirstLoadAlign); 12351 12352 SDValue NewStoreChain = 12353 DAG.getNode(ISD::TokenFactor, StoreDL, MVT::Other, MergeStoreChains); 12354 12355 SDValue NewStore = 12356 DAG.getStore(NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(), 12357 FirstInChain->getPointerInfo(), FirstStoreAlign); 12358 12359 // Transfer chain users from old loads to the new load. 12360 for (unsigned i = 0; i < NumElem; ++i) { 12361 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode); 12362 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), 12363 SDValue(NewLoad.getNode(), 1)); 12364 } 12365 12366 if (UseAA) { 12367 // Replace the all stores with the new store. 12368 for (unsigned i = 0; i < NumElem; ++i) 12369 CombineTo(StoreNodes[i].MemNode, NewStore); 12370 } else { 12371 // Replace the last store with the new store. 12372 CombineTo(LatestOp, NewStore); 12373 // Erase all other stores. 12374 for (unsigned i = 0; i < NumElem; ++i) { 12375 // Remove all Store nodes. 12376 if (StoreNodes[i].MemNode == LatestOp) 12377 continue; 12378 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12379 DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); 12380 deleteAndRecombine(St); 12381 } 12382 } 12383 12384 StoreNodes.erase(StoreNodes.begin() + NumElem, StoreNodes.end()); 12385 return true; 12386 } 12387 12388 SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) { 12389 SDLoc SL(ST); 12390 SDValue ReplStore; 12391 12392 // Replace the chain to avoid dependency. 12393 if (ST->isTruncatingStore()) { 12394 ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(), 12395 ST->getBasePtr(), ST->getMemoryVT(), 12396 ST->getMemOperand()); 12397 } else { 12398 ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(), 12399 ST->getMemOperand()); 12400 } 12401 12402 // Create token to keep both nodes around. 12403 SDValue Token = DAG.getNode(ISD::TokenFactor, SL, 12404 MVT::Other, ST->getChain(), ReplStore); 12405 12406 // Make sure the new and old chains are cleaned up. 12407 AddToWorklist(Token.getNode()); 12408 12409 // Don't add users to work list. 12410 return CombineTo(ST, Token, false); 12411 } 12412 12413 SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) { 12414 SDValue Value = ST->getValue(); 12415 if (Value.getOpcode() == ISD::TargetConstantFP) 12416 return SDValue(); 12417 12418 SDLoc DL(ST); 12419 12420 SDValue Chain = ST->getChain(); 12421 SDValue Ptr = ST->getBasePtr(); 12422 12423 const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value); 12424 12425 // NOTE: If the original store is volatile, this transform must not increase 12426 // the number of stores. For example, on x86-32 an f64 can be stored in one 12427 // processor operation but an i64 (which is not legal) requires two. So the 12428 // transform should not be done in this case. 12429 12430 SDValue Tmp; 12431 switch (CFP->getSimpleValueType(0).SimpleTy) { 12432 default: 12433 llvm_unreachable("Unknown FP type"); 12434 case MVT::f16: // We don't do this for these yet. 12435 case MVT::f80: 12436 case MVT::f128: 12437 case MVT::ppcf128: 12438 return SDValue(); 12439 case MVT::f32: 12440 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 12441 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12442 ; 12443 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 12444 bitcastToAPInt().getZExtValue(), SDLoc(CFP), 12445 MVT::i32); 12446 return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand()); 12447 } 12448 12449 return SDValue(); 12450 case MVT::f64: 12451 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 12452 !ST->isVolatile()) || 12453 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 12454 ; 12455 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 12456 getZExtValue(), SDLoc(CFP), MVT::i64); 12457 return DAG.getStore(Chain, DL, Tmp, 12458 Ptr, ST->getMemOperand()); 12459 } 12460 12461 if (!ST->isVolatile() && 12462 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12463 // Many FP stores are not made apparent until after legalize, e.g. for 12464 // argument passing. Since this is so common, custom legalize the 12465 // 64-bit integer store into two 32-bit stores. 12466 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 12467 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32); 12468 SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32); 12469 if (DAG.getDataLayout().isBigEndian()) 12470 std::swap(Lo, Hi); 12471 12472 unsigned Alignment = ST->getAlignment(); 12473 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 12474 AAMDNodes AAInfo = ST->getAAInfo(); 12475 12476 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 12477 ST->getAlignment(), MMOFlags, AAInfo); 12478 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 12479 DAG.getConstant(4, DL, Ptr.getValueType())); 12480 Alignment = MinAlign(Alignment, 4U); 12481 SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr, 12482 ST->getPointerInfo().getWithOffset(4), 12483 Alignment, MMOFlags, AAInfo); 12484 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 12485 St0, St1); 12486 } 12487 12488 return SDValue(); 12489 } 12490 } 12491 12492 SDValue DAGCombiner::visitSTORE(SDNode *N) { 12493 StoreSDNode *ST = cast<StoreSDNode>(N); 12494 SDValue Chain = ST->getChain(); 12495 SDValue Value = ST->getValue(); 12496 SDValue Ptr = ST->getBasePtr(); 12497 12498 // If this is a store of a bit convert, store the input value if the 12499 // resultant store does not need a higher alignment than the original. 12500 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 12501 ST->isUnindexed()) { 12502 EVT SVT = Value.getOperand(0).getValueType(); 12503 if (((!LegalOperations && !ST->isVolatile()) || 12504 TLI.isOperationLegalOrCustom(ISD::STORE, SVT)) && 12505 TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) { 12506 unsigned OrigAlign = ST->getAlignment(); 12507 bool Fast = false; 12508 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT, 12509 ST->getAddressSpace(), OrigAlign, &Fast) && 12510 Fast) { 12511 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, 12512 ST->getPointerInfo(), OrigAlign, 12513 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12514 } 12515 } 12516 } 12517 12518 // Turn 'store undef, Ptr' -> nothing. 12519 if (Value.isUndef() && ST->isUnindexed()) 12520 return Chain; 12521 12522 // Try to infer better alignment information than the store already has. 12523 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 12524 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 12525 if (Align > ST->getAlignment()) { 12526 SDValue NewStore = 12527 DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(), 12528 ST->getMemoryVT(), Align, 12529 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12530 if (NewStore.getNode() != N) 12531 return CombineTo(ST, NewStore, true); 12532 } 12533 } 12534 } 12535 12536 // Try transforming a pair floating point load / store ops to integer 12537 // load / store ops. 12538 if (SDValue NewST = TransformFPLoadStorePair(N)) 12539 return NewST; 12540 12541 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA 12542 : DAG.getSubtarget().useAA(); 12543 #ifndef NDEBUG 12544 if (CombinerAAOnlyFunc.getNumOccurrences() && 12545 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 12546 UseAA = false; 12547 #endif 12548 if (UseAA && ST->isUnindexed()) { 12549 // FIXME: We should do this even without AA enabled. AA will just allow 12550 // FindBetterChain to work in more situations. The problem with this is that 12551 // any combine that expects memory operations to be on consecutive chains 12552 // first needs to be updated to look for users of the same chain. 12553 12554 // Walk up chain skipping non-aliasing memory nodes, on this store and any 12555 // adjacent stores. 12556 if (findBetterNeighborChains(ST)) { 12557 // replaceStoreChain uses CombineTo, which handled all of the worklist 12558 // manipulation. Return the original node to not do anything else. 12559 return SDValue(ST, 0); 12560 } 12561 Chain = ST->getChain(); 12562 } 12563 12564 // Try transforming N to an indexed store. 12565 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 12566 return SDValue(N, 0); 12567 12568 // FIXME: is there such a thing as a truncating indexed store? 12569 if (ST->isTruncatingStore() && ST->isUnindexed() && 12570 Value.getValueType().isInteger()) { 12571 // See if we can simplify the input to this truncstore with knowledge that 12572 // only the low bits are being used. For example: 12573 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 12574 SDValue Shorter = GetDemandedBits( 12575 Value, APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12576 ST->getMemoryVT().getScalarSizeInBits())); 12577 AddToWorklist(Value.getNode()); 12578 if (Shorter.getNode()) 12579 return DAG.getTruncStore(Chain, SDLoc(N), Shorter, 12580 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12581 12582 // Otherwise, see if we can simplify the operation with 12583 // SimplifyDemandedBits, which only works if the value has a single use. 12584 if (SimplifyDemandedBits( 12585 Value, 12586 APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12587 ST->getMemoryVT().getScalarSizeInBits()))) 12588 return SDValue(N, 0); 12589 } 12590 12591 // If this is a load followed by a store to the same location, then the store 12592 // is dead/noop. 12593 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 12594 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 12595 ST->isUnindexed() && !ST->isVolatile() && 12596 // There can't be any side effects between the load and store, such as 12597 // a call or store. 12598 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 12599 // The store is dead, remove it. 12600 return Chain; 12601 } 12602 } 12603 12604 // If this is a store followed by a store with the same value to the same 12605 // location, then the store is dead/noop. 12606 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) { 12607 if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() && 12608 ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() && 12609 ST1->isUnindexed() && !ST1->isVolatile()) { 12610 // The store is dead, remove it. 12611 return Chain; 12612 } 12613 } 12614 12615 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 12616 // truncating store. We can do this even if this is already a truncstore. 12617 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 12618 && Value.getNode()->hasOneUse() && ST->isUnindexed() && 12619 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 12620 ST->getMemoryVT())) { 12621 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), 12622 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12623 } 12624 12625 // Only perform this optimization before the types are legal, because we 12626 // don't want to perform this optimization on every DAGCombine invocation. 12627 if (!LegalTypes) { 12628 for (;;) { 12629 // There can be multiple store sequences on the same chain. 12630 // Keep trying to merge store sequences until we are unable to do so 12631 // or until we merge the last store on the chain. 12632 SmallVector<MemOpLink, 8> StoreNodes; 12633 bool Changed = MergeConsecutiveStores(ST, StoreNodes); 12634 if (!Changed) break; 12635 12636 if (any_of(StoreNodes, 12637 [ST](const MemOpLink &Link) { return Link.MemNode == ST; })) { 12638 // ST has been merged and no longer exists. 12639 return SDValue(N, 0); 12640 } 12641 } 12642 } 12643 12644 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 12645 // 12646 // Make sure to do this only after attempting to merge stores in order to 12647 // avoid changing the types of some subset of stores due to visit order, 12648 // preventing their merging. 12649 if (isa<ConstantFPSDNode>(Value)) { 12650 if (SDValue NewSt = replaceStoreOfFPConstant(ST)) 12651 return NewSt; 12652 } 12653 12654 if (SDValue NewSt = splitMergedValStore(ST)) 12655 return NewSt; 12656 12657 return ReduceLoadOpStoreWidth(N); 12658 } 12659 12660 /// For the instruction sequence of store below, F and I values 12661 /// are bundled together as an i64 value before being stored into memory. 12662 /// Sometimes it is more efficent to generate separate stores for F and I, 12663 /// which can remove the bitwise instructions or sink them to colder places. 12664 /// 12665 /// (store (or (zext (bitcast F to i32) to i64), 12666 /// (shl (zext I to i64), 32)), addr) --> 12667 /// (store F, addr) and (store I, addr+4) 12668 /// 12669 /// Similarly, splitting for other merged store can also be beneficial, like: 12670 /// For pair of {i32, i32}, i64 store --> two i32 stores. 12671 /// For pair of {i32, i16}, i64 store --> two i32 stores. 12672 /// For pair of {i16, i16}, i32 store --> two i16 stores. 12673 /// For pair of {i16, i8}, i32 store --> two i16 stores. 12674 /// For pair of {i8, i8}, i16 store --> two i8 stores. 12675 /// 12676 /// We allow each target to determine specifically which kind of splitting is 12677 /// supported. 12678 /// 12679 /// The store patterns are commonly seen from the simple code snippet below 12680 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 12681 /// void goo(const std::pair<int, float> &); 12682 /// hoo() { 12683 /// ... 12684 /// goo(std::make_pair(tmp, ftmp)); 12685 /// ... 12686 /// } 12687 /// 12688 SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) { 12689 if (OptLevel == CodeGenOpt::None) 12690 return SDValue(); 12691 12692 SDValue Val = ST->getValue(); 12693 SDLoc DL(ST); 12694 12695 // Match OR operand. 12696 if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR) 12697 return SDValue(); 12698 12699 // Match SHL operand and get Lower and Higher parts of Val. 12700 SDValue Op1 = Val.getOperand(0); 12701 SDValue Op2 = Val.getOperand(1); 12702 SDValue Lo, Hi; 12703 if (Op1.getOpcode() != ISD::SHL) { 12704 std::swap(Op1, Op2); 12705 if (Op1.getOpcode() != ISD::SHL) 12706 return SDValue(); 12707 } 12708 Lo = Op2; 12709 Hi = Op1.getOperand(0); 12710 if (!Op1.hasOneUse()) 12711 return SDValue(); 12712 12713 // Match shift amount to HalfValBitSize. 12714 unsigned HalfValBitSize = Val.getValueSizeInBits() / 2; 12715 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1)); 12716 if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize) 12717 return SDValue(); 12718 12719 // Lo and Hi are zero-extended from int with size less equal than 32 12720 // to i64. 12721 if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() || 12722 !Lo.getOperand(0).getValueType().isScalarInteger() || 12723 Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize || 12724 Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() || 12725 !Hi.getOperand(0).getValueType().isScalarInteger() || 12726 Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize) 12727 return SDValue(); 12728 12729 // Use the EVT of low and high parts before bitcast as the input 12730 // of target query. 12731 EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST) 12732 ? Lo.getOperand(0).getValueType() 12733 : Lo.getValueType(); 12734 EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST) 12735 ? Hi.getOperand(0).getValueType() 12736 : Hi.getValueType(); 12737 if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 12738 return SDValue(); 12739 12740 // Start to split store. 12741 unsigned Alignment = ST->getAlignment(); 12742 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 12743 AAMDNodes AAInfo = ST->getAAInfo(); 12744 12745 // Change the sizes of Lo and Hi's value types to HalfValBitSize. 12746 EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize); 12747 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0)); 12748 Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0)); 12749 12750 SDValue Chain = ST->getChain(); 12751 SDValue Ptr = ST->getBasePtr(); 12752 // Lower value store. 12753 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 12754 ST->getAlignment(), MMOFlags, AAInfo); 12755 Ptr = 12756 DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 12757 DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType())); 12758 // Higher value store. 12759 SDValue St1 = 12760 DAG.getStore(St0, DL, Hi, Ptr, 12761 ST->getPointerInfo().getWithOffset(HalfValBitSize / 8), 12762 Alignment / 2, MMOFlags, AAInfo); 12763 return St1; 12764 } 12765 12766 SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 12767 SDValue InVec = N->getOperand(0); 12768 SDValue InVal = N->getOperand(1); 12769 SDValue EltNo = N->getOperand(2); 12770 SDLoc DL(N); 12771 12772 // If the inserted element is an UNDEF, just use the input vector. 12773 if (InVal.isUndef()) 12774 return InVec; 12775 12776 EVT VT = InVec.getValueType(); 12777 12778 // If we can't generate a legal BUILD_VECTOR, exit 12779 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 12780 return SDValue(); 12781 12782 // Check that we know which element is being inserted 12783 if (!isa<ConstantSDNode>(EltNo)) 12784 return SDValue(); 12785 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 12786 12787 // Canonicalize insert_vector_elt dag nodes. 12788 // Example: 12789 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1) 12790 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0) 12791 // 12792 // Do this only if the child insert_vector node has one use; also 12793 // do this only if indices are both constants and Idx1 < Idx0. 12794 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse() 12795 && isa<ConstantSDNode>(InVec.getOperand(2))) { 12796 unsigned OtherElt = 12797 cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue(); 12798 if (Elt < OtherElt) { 12799 // Swap nodes. 12800 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, 12801 InVec.getOperand(0), InVal, EltNo); 12802 AddToWorklist(NewOp.getNode()); 12803 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()), 12804 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2)); 12805 } 12806 } 12807 12808 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 12809 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 12810 // vector elements. 12811 SmallVector<SDValue, 8> Ops; 12812 // Do not combine these two vectors if the output vector will not replace 12813 // the input vector. 12814 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) { 12815 Ops.append(InVec.getNode()->op_begin(), 12816 InVec.getNode()->op_end()); 12817 } else if (InVec.isUndef()) { 12818 unsigned NElts = VT.getVectorNumElements(); 12819 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 12820 } else { 12821 return SDValue(); 12822 } 12823 12824 // Insert the element 12825 if (Elt < Ops.size()) { 12826 // All the operands of BUILD_VECTOR must have the same type; 12827 // we enforce that here. 12828 EVT OpVT = Ops[0].getValueType(); 12829 if (InVal.getValueType() != OpVT) 12830 InVal = OpVT.bitsGT(InVal.getValueType()) ? 12831 DAG.getNode(ISD::ANY_EXTEND, DL, OpVT, InVal) : 12832 DAG.getNode(ISD::TRUNCATE, DL, OpVT, InVal); 12833 Ops[Elt] = InVal; 12834 } 12835 12836 // Return the new vector 12837 return DAG.getBuildVector(VT, DL, Ops); 12838 } 12839 12840 SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 12841 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) { 12842 assert(!OriginalLoad->isVolatile()); 12843 12844 EVT ResultVT = EVE->getValueType(0); 12845 EVT VecEltVT = InVecVT.getVectorElementType(); 12846 unsigned Align = OriginalLoad->getAlignment(); 12847 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 12848 VecEltVT.getTypeForEVT(*DAG.getContext())); 12849 12850 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) 12851 return SDValue(); 12852 12853 ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ? 12854 ISD::NON_EXTLOAD : ISD::EXTLOAD; 12855 if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT)) 12856 return SDValue(); 12857 12858 Align = NewAlign; 12859 12860 SDValue NewPtr = OriginalLoad->getBasePtr(); 12861 SDValue Offset; 12862 EVT PtrType = NewPtr.getValueType(); 12863 MachinePointerInfo MPI; 12864 SDLoc DL(EVE); 12865 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) { 12866 int Elt = ConstEltNo->getZExtValue(); 12867 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8; 12868 Offset = DAG.getConstant(PtrOff, DL, PtrType); 12869 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff); 12870 } else { 12871 Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType); 12872 Offset = DAG.getNode( 12873 ISD::MUL, DL, PtrType, Offset, 12874 DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType)); 12875 MPI = OriginalLoad->getPointerInfo(); 12876 } 12877 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset); 12878 12879 // The replacement we need to do here is a little tricky: we need to 12880 // replace an extractelement of a load with a load. 12881 // Use ReplaceAllUsesOfValuesWith to do the replacement. 12882 // Note that this replacement assumes that the extractvalue is the only 12883 // use of the load; that's okay because we don't want to perform this 12884 // transformation in other cases anyway. 12885 SDValue Load; 12886 SDValue Chain; 12887 if (ResultVT.bitsGT(VecEltVT)) { 12888 // If the result type of vextract is wider than the load, then issue an 12889 // extending load instead. 12890 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT, 12891 VecEltVT) 12892 ? ISD::ZEXTLOAD 12893 : ISD::EXTLOAD; 12894 Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT, 12895 OriginalLoad->getChain(), NewPtr, MPI, VecEltVT, 12896 Align, OriginalLoad->getMemOperand()->getFlags(), 12897 OriginalLoad->getAAInfo()); 12898 Chain = Load.getValue(1); 12899 } else { 12900 Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, 12901 MPI, Align, OriginalLoad->getMemOperand()->getFlags(), 12902 OriginalLoad->getAAInfo()); 12903 Chain = Load.getValue(1); 12904 if (ResultVT.bitsLT(VecEltVT)) 12905 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load); 12906 else 12907 Load = DAG.getBitcast(ResultVT, Load); 12908 } 12909 WorklistRemover DeadNodes(*this); 12910 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) }; 12911 SDValue To[] = { Load, Chain }; 12912 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 12913 // Since we're explicitly calling ReplaceAllUses, add the new node to the 12914 // worklist explicitly as well. 12915 AddToWorklist(Load.getNode()); 12916 AddUsersToWorklist(Load.getNode()); // Add users too 12917 // Make sure to revisit this node to clean it up; it will usually be dead. 12918 AddToWorklist(EVE); 12919 ++OpsNarrowed; 12920 return SDValue(EVE, 0); 12921 } 12922 12923 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 12924 // (vextract (scalar_to_vector val, 0) -> val 12925 SDValue InVec = N->getOperand(0); 12926 EVT VT = InVec.getValueType(); 12927 EVT NVT = N->getValueType(0); 12928 12929 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 12930 // Check if the result type doesn't match the inserted element type. A 12931 // SCALAR_TO_VECTOR may truncate the inserted element and the 12932 // EXTRACT_VECTOR_ELT may widen the extracted vector. 12933 SDValue InOp = InVec.getOperand(0); 12934 if (InOp.getValueType() != NVT) { 12935 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 12936 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT); 12937 } 12938 return InOp; 12939 } 12940 12941 SDValue EltNo = N->getOperand(1); 12942 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 12943 12944 // extract_vector_elt (build_vector x, y), 1 -> y 12945 if (ConstEltNo && 12946 InVec.getOpcode() == ISD::BUILD_VECTOR && 12947 TLI.isTypeLegal(VT) && 12948 (InVec.hasOneUse() || 12949 TLI.aggressivelyPreferBuildVectorSources(VT))) { 12950 SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue()); 12951 EVT InEltVT = Elt.getValueType(); 12952 12953 // Sometimes build_vector's scalar input types do not match result type. 12954 if (NVT == InEltVT) 12955 return Elt; 12956 12957 // TODO: It may be useful to truncate if free if the build_vector implicitly 12958 // converts. 12959 } 12960 12961 // extract_vector_elt (v2i32 (bitcast i64:x)), 0 -> i32 (trunc i64:x) 12962 if (ConstEltNo && InVec.getOpcode() == ISD::BITCAST && InVec.hasOneUse() && 12963 ConstEltNo->isNullValue() && VT.isInteger()) { 12964 SDValue BCSrc = InVec.getOperand(0); 12965 if (BCSrc.getValueType().isScalarInteger()) 12966 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), NVT, BCSrc); 12967 } 12968 12969 // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val 12970 // 12971 // This only really matters if the index is non-constant since other combines 12972 // on the constant elements already work. 12973 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && 12974 EltNo == InVec.getOperand(2)) { 12975 SDValue Elt = InVec.getOperand(1); 12976 return VT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, SDLoc(N), NVT) : Elt; 12977 } 12978 12979 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 12980 // We only perform this optimization before the op legalization phase because 12981 // we may introduce new vector instructions which are not backed by TD 12982 // patterns. For example on AVX, extracting elements from a wide vector 12983 // without using extract_subvector. However, if we can find an underlying 12984 // scalar value, then we can always use that. 12985 if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { 12986 int NumElem = VT.getVectorNumElements(); 12987 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 12988 // Find the new index to extract from. 12989 int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); 12990 12991 // Extracting an undef index is undef. 12992 if (OrigElt == -1) 12993 return DAG.getUNDEF(NVT); 12994 12995 // Select the right vector half to extract from. 12996 SDValue SVInVec; 12997 if (OrigElt < NumElem) { 12998 SVInVec = InVec->getOperand(0); 12999 } else { 13000 SVInVec = InVec->getOperand(1); 13001 OrigElt -= NumElem; 13002 } 13003 13004 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) { 13005 SDValue InOp = SVInVec.getOperand(OrigElt); 13006 if (InOp.getValueType() != NVT) { 13007 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 13008 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT); 13009 } 13010 13011 return InOp; 13012 } 13013 13014 // FIXME: We should handle recursing on other vector shuffles and 13015 // scalar_to_vector here as well. 13016 13017 if (!LegalOperations) { 13018 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 13019 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec, 13020 DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy)); 13021 } 13022 } 13023 13024 bool BCNumEltsChanged = false; 13025 EVT ExtVT = VT.getVectorElementType(); 13026 EVT LVT = ExtVT; 13027 13028 // If the result of load has to be truncated, then it's not necessarily 13029 // profitable. 13030 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 13031 return SDValue(); 13032 13033 if (InVec.getOpcode() == ISD::BITCAST) { 13034 // Don't duplicate a load with other uses. 13035 if (!InVec.hasOneUse()) 13036 return SDValue(); 13037 13038 EVT BCVT = InVec.getOperand(0).getValueType(); 13039 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 13040 return SDValue(); 13041 if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 13042 BCNumEltsChanged = true; 13043 InVec = InVec.getOperand(0); 13044 ExtVT = BCVT.getVectorElementType(); 13045 } 13046 13047 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size) 13048 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() && 13049 ISD::isNormalLoad(InVec.getNode()) && 13050 !N->getOperand(1)->hasPredecessor(InVec.getNode())) { 13051 SDValue Index = N->getOperand(1); 13052 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec)) { 13053 if (!OrigLoad->isVolatile()) { 13054 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index, 13055 OrigLoad); 13056 } 13057 } 13058 } 13059 13060 // Perform only after legalization to ensure build_vector / vector_shuffle 13061 // optimizations have already been done. 13062 if (!LegalOperations) return SDValue(); 13063 13064 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 13065 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 13066 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 13067 13068 if (ConstEltNo) { 13069 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 13070 13071 LoadSDNode *LN0 = nullptr; 13072 const ShuffleVectorSDNode *SVN = nullptr; 13073 if (ISD::isNormalLoad(InVec.getNode())) { 13074 LN0 = cast<LoadSDNode>(InVec); 13075 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 13076 InVec.getOperand(0).getValueType() == ExtVT && 13077 ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 13078 // Don't duplicate a load with other uses. 13079 if (!InVec.hasOneUse()) 13080 return SDValue(); 13081 13082 LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 13083 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 13084 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 13085 // => 13086 // (load $addr+1*size) 13087 13088 // Don't duplicate a load with other uses. 13089 if (!InVec.hasOneUse()) 13090 return SDValue(); 13091 13092 // If the bit convert changed the number of elements, it is unsafe 13093 // to examine the mask. 13094 if (BCNumEltsChanged) 13095 return SDValue(); 13096 13097 // Select the input vector, guarding against out of range extract vector. 13098 unsigned NumElems = VT.getVectorNumElements(); 13099 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 13100 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 13101 13102 if (InVec.getOpcode() == ISD::BITCAST) { 13103 // Don't duplicate a load with other uses. 13104 if (!InVec.hasOneUse()) 13105 return SDValue(); 13106 13107 InVec = InVec.getOperand(0); 13108 } 13109 if (ISD::isNormalLoad(InVec.getNode())) { 13110 LN0 = cast<LoadSDNode>(InVec); 13111 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 13112 EltNo = DAG.getConstant(Elt, SDLoc(EltNo), EltNo.getValueType()); 13113 } 13114 } 13115 13116 // Make sure we found a non-volatile load and the extractelement is 13117 // the only use. 13118 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 13119 return SDValue(); 13120 13121 // If Idx was -1 above, Elt is going to be -1, so just return undef. 13122 if (Elt == -1) 13123 return DAG.getUNDEF(LVT); 13124 13125 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0); 13126 } 13127 13128 return SDValue(); 13129 } 13130 13131 // Simplify (build_vec (ext )) to (bitcast (build_vec )) 13132 SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { 13133 // We perform this optimization post type-legalization because 13134 // the type-legalizer often scalarizes integer-promoted vectors. 13135 // Performing this optimization before may create bit-casts which 13136 // will be type-legalized to complex code sequences. 13137 // We perform this optimization only before the operation legalizer because we 13138 // may introduce illegal operations. 13139 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) 13140 return SDValue(); 13141 13142 unsigned NumInScalars = N->getNumOperands(); 13143 SDLoc DL(N); 13144 EVT VT = N->getValueType(0); 13145 13146 // Check to see if this is a BUILD_VECTOR of a bunch of values 13147 // which come from any_extend or zero_extend nodes. If so, we can create 13148 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 13149 // optimizations. We do not handle sign-extend because we can't fill the sign 13150 // using shuffles. 13151 EVT SourceType = MVT::Other; 13152 bool AllAnyExt = true; 13153 13154 for (unsigned i = 0; i != NumInScalars; ++i) { 13155 SDValue In = N->getOperand(i); 13156 // Ignore undef inputs. 13157 if (In.isUndef()) continue; 13158 13159 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 13160 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 13161 13162 // Abort if the element is not an extension. 13163 if (!ZeroExt && !AnyExt) { 13164 SourceType = MVT::Other; 13165 break; 13166 } 13167 13168 // The input is a ZeroExt or AnyExt. Check the original type. 13169 EVT InTy = In.getOperand(0).getValueType(); 13170 13171 // Check that all of the widened source types are the same. 13172 if (SourceType == MVT::Other) 13173 // First time. 13174 SourceType = InTy; 13175 else if (InTy != SourceType) { 13176 // Multiple income types. Abort. 13177 SourceType = MVT::Other; 13178 break; 13179 } 13180 13181 // Check if all of the extends are ANY_EXTENDs. 13182 AllAnyExt &= AnyExt; 13183 } 13184 13185 // In order to have valid types, all of the inputs must be extended from the 13186 // same source type and all of the inputs must be any or zero extend. 13187 // Scalar sizes must be a power of two. 13188 EVT OutScalarTy = VT.getScalarType(); 13189 bool ValidTypes = SourceType != MVT::Other && 13190 isPowerOf2_32(OutScalarTy.getSizeInBits()) && 13191 isPowerOf2_32(SourceType.getSizeInBits()); 13192 13193 // Create a new simpler BUILD_VECTOR sequence which other optimizations can 13194 // turn into a single shuffle instruction. 13195 if (!ValidTypes) 13196 return SDValue(); 13197 13198 bool isLE = DAG.getDataLayout().isLittleEndian(); 13199 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 13200 assert(ElemRatio > 1 && "Invalid element size ratio"); 13201 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 13202 DAG.getConstant(0, DL, SourceType); 13203 13204 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); 13205 SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 13206 13207 // Populate the new build_vector 13208 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 13209 SDValue Cast = N->getOperand(i); 13210 assert((Cast.getOpcode() == ISD::ANY_EXTEND || 13211 Cast.getOpcode() == ISD::ZERO_EXTEND || 13212 Cast.isUndef()) && "Invalid cast opcode"); 13213 SDValue In; 13214 if (Cast.isUndef()) 13215 In = DAG.getUNDEF(SourceType); 13216 else 13217 In = Cast->getOperand(0); 13218 unsigned Index = isLE ? (i * ElemRatio) : 13219 (i * ElemRatio + (ElemRatio - 1)); 13220 13221 assert(Index < Ops.size() && "Invalid index"); 13222 Ops[Index] = In; 13223 } 13224 13225 // The type of the new BUILD_VECTOR node. 13226 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 13227 assert(VecVT.getSizeInBits() == VT.getSizeInBits() && 13228 "Invalid vector size"); 13229 // Check if the new vector type is legal. 13230 if (!isTypeLegal(VecVT)) return SDValue(); 13231 13232 // Make the new BUILD_VECTOR. 13233 SDValue BV = DAG.getBuildVector(VecVT, DL, Ops); 13234 13235 // The new BUILD_VECTOR node has the potential to be further optimized. 13236 AddToWorklist(BV.getNode()); 13237 // Bitcast to the desired type. 13238 return DAG.getBitcast(VT, BV); 13239 } 13240 13241 SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { 13242 EVT VT = N->getValueType(0); 13243 13244 unsigned NumInScalars = N->getNumOperands(); 13245 SDLoc DL(N); 13246 13247 EVT SrcVT = MVT::Other; 13248 unsigned Opcode = ISD::DELETED_NODE; 13249 unsigned NumDefs = 0; 13250 13251 for (unsigned i = 0; i != NumInScalars; ++i) { 13252 SDValue In = N->getOperand(i); 13253 unsigned Opc = In.getOpcode(); 13254 13255 if (Opc == ISD::UNDEF) 13256 continue; 13257 13258 // If all scalar values are floats and converted from integers. 13259 if (Opcode == ISD::DELETED_NODE && 13260 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { 13261 Opcode = Opc; 13262 } 13263 13264 if (Opc != Opcode) 13265 return SDValue(); 13266 13267 EVT InVT = In.getOperand(0).getValueType(); 13268 13269 // If all scalar values are typed differently, bail out. It's chosen to 13270 // simplify BUILD_VECTOR of integer types. 13271 if (SrcVT == MVT::Other) 13272 SrcVT = InVT; 13273 if (SrcVT != InVT) 13274 return SDValue(); 13275 NumDefs++; 13276 } 13277 13278 // If the vector has just one element defined, it's not worth to fold it into 13279 // a vectorized one. 13280 if (NumDefs < 2) 13281 return SDValue(); 13282 13283 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) 13284 && "Should only handle conversion from integer to float."); 13285 assert(SrcVT != MVT::Other && "Cannot determine source type!"); 13286 13287 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); 13288 13289 if (!TLI.isOperationLegalOrCustom(Opcode, NVT)) 13290 return SDValue(); 13291 13292 // Just because the floating-point vector type is legal does not necessarily 13293 // mean that the corresponding integer vector type is. 13294 if (!isTypeLegal(NVT)) 13295 return SDValue(); 13296 13297 SmallVector<SDValue, 8> Opnds; 13298 for (unsigned i = 0; i != NumInScalars; ++i) { 13299 SDValue In = N->getOperand(i); 13300 13301 if (In.isUndef()) 13302 Opnds.push_back(DAG.getUNDEF(SrcVT)); 13303 else 13304 Opnds.push_back(In.getOperand(0)); 13305 } 13306 SDValue BV = DAG.getBuildVector(NVT, DL, Opnds); 13307 AddToWorklist(BV.getNode()); 13308 13309 return DAG.getNode(Opcode, DL, VT, BV); 13310 } 13311 13312 SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N, 13313 ArrayRef<int> VectorMask, 13314 SDValue VecIn1, SDValue VecIn2, 13315 unsigned LeftIdx) { 13316 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 13317 SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy); 13318 13319 EVT VT = N->getValueType(0); 13320 EVT InVT1 = VecIn1.getValueType(); 13321 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1; 13322 13323 unsigned Vec2Offset = InVT1.getVectorNumElements(); 13324 unsigned NumElems = VT.getVectorNumElements(); 13325 unsigned ShuffleNumElems = NumElems; 13326 13327 // We can't generate a shuffle node with mismatched input and output types. 13328 // Try to make the types match the type of the output. 13329 if (InVT1 != VT || InVT2 != VT) { 13330 if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) { 13331 // If the output vector length is a multiple of both input lengths, 13332 // we can concatenate them and pad the rest with undefs. 13333 unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits(); 13334 assert(NumConcats >= 2 && "Concat needs at least two inputs!"); 13335 SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1)); 13336 ConcatOps[0] = VecIn1; 13337 ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1); 13338 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps); 13339 VecIn2 = SDValue(); 13340 } else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) { 13341 if (!TLI.isExtractSubvectorCheap(VT, NumElems)) 13342 return SDValue(); 13343 13344 if (!VecIn2.getNode()) { 13345 // If we only have one input vector, and it's twice the size of the 13346 // output, split it in two. 13347 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, 13348 DAG.getConstant(NumElems, DL, IdxTy)); 13349 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx); 13350 // Since we now have shorter input vectors, adjust the offset of the 13351 // second vector's start. 13352 Vec2Offset = NumElems; 13353 } else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) { 13354 // VecIn1 is wider than the output, and we have another, possibly 13355 // smaller input. Pad the smaller input with undefs, shuffle at the 13356 // input vector width, and extract the output. 13357 // The shuffle type is different than VT, so check legality again. 13358 if (LegalOperations && 13359 !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1)) 13360 return SDValue(); 13361 13362 if (InVT1 != InVT2) 13363 VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1, 13364 DAG.getUNDEF(InVT1), VecIn2, ZeroIdx); 13365 ShuffleNumElems = NumElems * 2; 13366 } else { 13367 // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider 13368 // than VecIn1. We can't handle this for now - this case will disappear 13369 // when we start sorting the vectors by type. 13370 return SDValue(); 13371 } 13372 } else { 13373 // TODO: Support cases where the length mismatch isn't exactly by a 13374 // factor of 2. 13375 // TODO: Move this check upwards, so that if we have bad type 13376 // mismatches, we don't create any DAG nodes. 13377 return SDValue(); 13378 } 13379 } 13380 13381 // Initialize mask to undef. 13382 SmallVector<int, 8> Mask(ShuffleNumElems, -1); 13383 13384 // Only need to run up to the number of elements actually used, not the 13385 // total number of elements in the shuffle - if we are shuffling a wider 13386 // vector, the high lanes should be set to undef. 13387 for (unsigned i = 0; i != NumElems; ++i) { 13388 if (VectorMask[i] <= 0) 13389 continue; 13390 13391 unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1); 13392 if (VectorMask[i] == (int)LeftIdx) { 13393 Mask[i] = ExtIndex; 13394 } else if (VectorMask[i] == (int)LeftIdx + 1) { 13395 Mask[i] = Vec2Offset + ExtIndex; 13396 } 13397 } 13398 13399 // The type the input vectors may have changed above. 13400 InVT1 = VecIn1.getValueType(); 13401 13402 // If we already have a VecIn2, it should have the same type as VecIn1. 13403 // If we don't, get an undef/zero vector of the appropriate type. 13404 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1); 13405 assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type."); 13406 13407 SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask); 13408 if (ShuffleNumElems > NumElems) 13409 Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx); 13410 13411 return Shuffle; 13412 } 13413 13414 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 13415 // operations. If the types of the vectors we're extracting from allow it, 13416 // turn this into a vector_shuffle node. 13417 SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { 13418 SDLoc DL(N); 13419 EVT VT = N->getValueType(0); 13420 13421 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 13422 if (!isTypeLegal(VT)) 13423 return SDValue(); 13424 13425 // May only combine to shuffle after legalize if shuffle is legal. 13426 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT)) 13427 return SDValue(); 13428 13429 bool UsesZeroVector = false; 13430 unsigned NumElems = N->getNumOperands(); 13431 13432 // Record, for each element of the newly built vector, which input vector 13433 // that element comes from. -1 stands for undef, 0 for the zero vector, 13434 // and positive values for the input vectors. 13435 // VectorMask maps each element to its vector number, and VecIn maps vector 13436 // numbers to their initial SDValues. 13437 13438 SmallVector<int, 8> VectorMask(NumElems, -1); 13439 SmallVector<SDValue, 8> VecIn; 13440 VecIn.push_back(SDValue()); 13441 13442 for (unsigned i = 0; i != NumElems; ++i) { 13443 SDValue Op = N->getOperand(i); 13444 13445 if (Op.isUndef()) 13446 continue; 13447 13448 // See if we can use a blend with a zero vector. 13449 // TODO: Should we generalize this to a blend with an arbitrary constant 13450 // vector? 13451 if (isNullConstant(Op) || isNullFPConstant(Op)) { 13452 UsesZeroVector = true; 13453 VectorMask[i] = 0; 13454 continue; 13455 } 13456 13457 // Not an undef or zero. If the input is something other than an 13458 // EXTRACT_VECTOR_ELT with a constant index, bail out. 13459 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13460 !isa<ConstantSDNode>(Op.getOperand(1))) 13461 return SDValue(); 13462 13463 SDValue ExtractedFromVec = Op.getOperand(0); 13464 13465 // All inputs must have the same element type as the output. 13466 if (VT.getVectorElementType() != 13467 ExtractedFromVec.getValueType().getVectorElementType()) 13468 return SDValue(); 13469 13470 // Have we seen this input vector before? 13471 // The vectors are expected to be tiny (usually 1 or 2 elements), so using 13472 // a map back from SDValues to numbers isn't worth it. 13473 unsigned Idx = std::distance( 13474 VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec)); 13475 if (Idx == VecIn.size()) 13476 VecIn.push_back(ExtractedFromVec); 13477 13478 VectorMask[i] = Idx; 13479 } 13480 13481 // If we didn't find at least one input vector, bail out. 13482 if (VecIn.size() < 2) 13483 return SDValue(); 13484 13485 // TODO: We want to sort the vectors by descending length, so that adjacent 13486 // pairs have similar length, and the longer vector is always first in the 13487 // pair. 13488 13489 // TODO: Should this fire if some of the input vectors has illegal type (like 13490 // it does now), or should we let legalization run its course first? 13491 13492 // Shuffle phase: 13493 // Take pairs of vectors, and shuffle them so that the result has elements 13494 // from these vectors in the correct places. 13495 // For example, given: 13496 // t10: i32 = extract_vector_elt t1, Constant:i64<0> 13497 // t11: i32 = extract_vector_elt t2, Constant:i64<0> 13498 // t12: i32 = extract_vector_elt t3, Constant:i64<0> 13499 // t13: i32 = extract_vector_elt t1, Constant:i64<1> 13500 // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13 13501 // We will generate: 13502 // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2 13503 // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef 13504 SmallVector<SDValue, 4> Shuffles; 13505 for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) { 13506 unsigned LeftIdx = 2 * In + 1; 13507 SDValue VecLeft = VecIn[LeftIdx]; 13508 SDValue VecRight = 13509 (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue(); 13510 13511 if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft, 13512 VecRight, LeftIdx)) 13513 Shuffles.push_back(Shuffle); 13514 else 13515 return SDValue(); 13516 } 13517 13518 // If we need the zero vector as an "ingredient" in the blend tree, add it 13519 // to the list of shuffles. 13520 if (UsesZeroVector) 13521 Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT) 13522 : DAG.getConstantFP(0.0, DL, VT)); 13523 13524 // If we only have one shuffle, we're done. 13525 if (Shuffles.size() == 1) 13526 return Shuffles[0]; 13527 13528 // Update the vector mask to point to the post-shuffle vectors. 13529 for (int &Vec : VectorMask) 13530 if (Vec == 0) 13531 Vec = Shuffles.size() - 1; 13532 else 13533 Vec = (Vec - 1) / 2; 13534 13535 // More than one shuffle. Generate a binary tree of blends, e.g. if from 13536 // the previous step we got the set of shuffles t10, t11, t12, t13, we will 13537 // generate: 13538 // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2 13539 // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4 13540 // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6 13541 // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8 13542 // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11 13543 // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13 13544 // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21 13545 13546 // Make sure the initial size of the shuffle list is even. 13547 if (Shuffles.size() % 2) 13548 Shuffles.push_back(DAG.getUNDEF(VT)); 13549 13550 for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) { 13551 if (CurSize % 2) { 13552 Shuffles[CurSize] = DAG.getUNDEF(VT); 13553 CurSize++; 13554 } 13555 for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) { 13556 int Left = 2 * In; 13557 int Right = 2 * In + 1; 13558 SmallVector<int, 8> Mask(NumElems, -1); 13559 for (unsigned i = 0; i != NumElems; ++i) { 13560 if (VectorMask[i] == Left) { 13561 Mask[i] = i; 13562 VectorMask[i] = In; 13563 } else if (VectorMask[i] == Right) { 13564 Mask[i] = i + NumElems; 13565 VectorMask[i] = In; 13566 } 13567 } 13568 13569 Shuffles[In] = 13570 DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask); 13571 } 13572 } 13573 13574 return Shuffles[0]; 13575 } 13576 13577 SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 13578 EVT VT = N->getValueType(0); 13579 13580 // A vector built entirely of undefs is undef. 13581 if (ISD::allOperandsUndef(N)) 13582 return DAG.getUNDEF(VT); 13583 13584 if (SDValue V = reduceBuildVecExtToExtBuildVec(N)) 13585 return V; 13586 13587 if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N)) 13588 return V; 13589 13590 if (SDValue V = reduceBuildVecToShuffle(N)) 13591 return V; 13592 13593 return SDValue(); 13594 } 13595 13596 static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) { 13597 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13598 EVT OpVT = N->getOperand(0).getValueType(); 13599 13600 // If the operands are legal vectors, leave them alone. 13601 if (TLI.isTypeLegal(OpVT)) 13602 return SDValue(); 13603 13604 SDLoc DL(N); 13605 EVT VT = N->getValueType(0); 13606 SmallVector<SDValue, 8> Ops; 13607 13608 EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits()); 13609 SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13610 13611 // Keep track of what we encounter. 13612 bool AnyInteger = false; 13613 bool AnyFP = false; 13614 for (const SDValue &Op : N->ops()) { 13615 if (ISD::BITCAST == Op.getOpcode() && 13616 !Op.getOperand(0).getValueType().isVector()) 13617 Ops.push_back(Op.getOperand(0)); 13618 else if (ISD::UNDEF == Op.getOpcode()) 13619 Ops.push_back(ScalarUndef); 13620 else 13621 return SDValue(); 13622 13623 // Note whether we encounter an integer or floating point scalar. 13624 // If it's neither, bail out, it could be something weird like x86mmx. 13625 EVT LastOpVT = Ops.back().getValueType(); 13626 if (LastOpVT.isFloatingPoint()) 13627 AnyFP = true; 13628 else if (LastOpVT.isInteger()) 13629 AnyInteger = true; 13630 else 13631 return SDValue(); 13632 } 13633 13634 // If any of the operands is a floating point scalar bitcast to a vector, 13635 // use floating point types throughout, and bitcast everything. 13636 // Replace UNDEFs by another scalar UNDEF node, of the final desired type. 13637 if (AnyFP) { 13638 SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits()); 13639 ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13640 if (AnyInteger) { 13641 for (SDValue &Op : Ops) { 13642 if (Op.getValueType() == SVT) 13643 continue; 13644 if (Op.isUndef()) 13645 Op = ScalarUndef; 13646 else 13647 Op = DAG.getBitcast(SVT, Op); 13648 } 13649 } 13650 } 13651 13652 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT, 13653 VT.getSizeInBits() / SVT.getSizeInBits()); 13654 return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops)); 13655 } 13656 13657 // Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR 13658 // operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at 13659 // most two distinct vectors the same size as the result, attempt to turn this 13660 // into a legal shuffle. 13661 static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { 13662 EVT VT = N->getValueType(0); 13663 EVT OpVT = N->getOperand(0).getValueType(); 13664 int NumElts = VT.getVectorNumElements(); 13665 int NumOpElts = OpVT.getVectorNumElements(); 13666 13667 SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT); 13668 SmallVector<int, 8> Mask; 13669 13670 for (SDValue Op : N->ops()) { 13671 // Peek through any bitcast. 13672 while (Op.getOpcode() == ISD::BITCAST) 13673 Op = Op.getOperand(0); 13674 13675 // UNDEF nodes convert to UNDEF shuffle mask values. 13676 if (Op.isUndef()) { 13677 Mask.append((unsigned)NumOpElts, -1); 13678 continue; 13679 } 13680 13681 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 13682 return SDValue(); 13683 13684 // What vector are we extracting the subvector from and at what index? 13685 SDValue ExtVec = Op.getOperand(0); 13686 13687 // We want the EVT of the original extraction to correctly scale the 13688 // extraction index. 13689 EVT ExtVT = ExtVec.getValueType(); 13690 13691 // Peek through any bitcast. 13692 while (ExtVec.getOpcode() == ISD::BITCAST) 13693 ExtVec = ExtVec.getOperand(0); 13694 13695 // UNDEF nodes convert to UNDEF shuffle mask values. 13696 if (ExtVec.isUndef()) { 13697 Mask.append((unsigned)NumOpElts, -1); 13698 continue; 13699 } 13700 13701 if (!isa<ConstantSDNode>(Op.getOperand(1))) 13702 return SDValue(); 13703 int ExtIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 13704 13705 // Ensure that we are extracting a subvector from a vector the same 13706 // size as the result. 13707 if (ExtVT.getSizeInBits() != VT.getSizeInBits()) 13708 return SDValue(); 13709 13710 // Scale the subvector index to account for any bitcast. 13711 int NumExtElts = ExtVT.getVectorNumElements(); 13712 if (0 == (NumExtElts % NumElts)) 13713 ExtIdx /= (NumExtElts / NumElts); 13714 else if (0 == (NumElts % NumExtElts)) 13715 ExtIdx *= (NumElts / NumExtElts); 13716 else 13717 return SDValue(); 13718 13719 // At most we can reference 2 inputs in the final shuffle. 13720 if (SV0.isUndef() || SV0 == ExtVec) { 13721 SV0 = ExtVec; 13722 for (int i = 0; i != NumOpElts; ++i) 13723 Mask.push_back(i + ExtIdx); 13724 } else if (SV1.isUndef() || SV1 == ExtVec) { 13725 SV1 = ExtVec; 13726 for (int i = 0; i != NumOpElts; ++i) 13727 Mask.push_back(i + ExtIdx + NumElts); 13728 } else { 13729 return SDValue(); 13730 } 13731 } 13732 13733 if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT)) 13734 return SDValue(); 13735 13736 return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0), 13737 DAG.getBitcast(VT, SV1), Mask); 13738 } 13739 13740 SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 13741 // If we only have one input vector, we don't need to do any concatenation. 13742 if (N->getNumOperands() == 1) 13743 return N->getOperand(0); 13744 13745 // Check if all of the operands are undefs. 13746 EVT VT = N->getValueType(0); 13747 if (ISD::allOperandsUndef(N)) 13748 return DAG.getUNDEF(VT); 13749 13750 // Optimize concat_vectors where all but the first of the vectors are undef. 13751 if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) { 13752 return Op.isUndef(); 13753 })) { 13754 SDValue In = N->getOperand(0); 13755 assert(In.getValueType().isVector() && "Must concat vectors"); 13756 13757 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr). 13758 if (In->getOpcode() == ISD::BITCAST && 13759 !In->getOperand(0)->getValueType(0).isVector()) { 13760 SDValue Scalar = In->getOperand(0); 13761 13762 // If the bitcast type isn't legal, it might be a trunc of a legal type; 13763 // look through the trunc so we can still do the transform: 13764 // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar) 13765 if (Scalar->getOpcode() == ISD::TRUNCATE && 13766 !TLI.isTypeLegal(Scalar.getValueType()) && 13767 TLI.isTypeLegal(Scalar->getOperand(0).getValueType())) 13768 Scalar = Scalar->getOperand(0); 13769 13770 EVT SclTy = Scalar->getValueType(0); 13771 13772 if (!SclTy.isFloatingPoint() && !SclTy.isInteger()) 13773 return SDValue(); 13774 13775 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, 13776 VT.getSizeInBits() / SclTy.getSizeInBits()); 13777 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType())) 13778 return SDValue(); 13779 13780 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar); 13781 return DAG.getBitcast(VT, Res); 13782 } 13783 } 13784 13785 // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR. 13786 // We have already tested above for an UNDEF only concatenation. 13787 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...)) 13788 // -> (BUILD_VECTOR A, B, ..., C, D, ...) 13789 auto IsBuildVectorOrUndef = [](const SDValue &Op) { 13790 return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode(); 13791 }; 13792 if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) { 13793 SmallVector<SDValue, 8> Opnds; 13794 EVT SVT = VT.getScalarType(); 13795 13796 EVT MinVT = SVT; 13797 if (!SVT.isFloatingPoint()) { 13798 // If BUILD_VECTOR are from built from integer, they may have different 13799 // operand types. Get the smallest type and truncate all operands to it. 13800 bool FoundMinVT = false; 13801 for (const SDValue &Op : N->ops()) 13802 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 13803 EVT OpSVT = Op.getOperand(0)->getValueType(0); 13804 MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT; 13805 FoundMinVT = true; 13806 } 13807 assert(FoundMinVT && "Concat vector type mismatch"); 13808 } 13809 13810 for (const SDValue &Op : N->ops()) { 13811 EVT OpVT = Op.getValueType(); 13812 unsigned NumElts = OpVT.getVectorNumElements(); 13813 13814 if (ISD::UNDEF == Op.getOpcode()) 13815 Opnds.append(NumElts, DAG.getUNDEF(MinVT)); 13816 13817 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 13818 if (SVT.isFloatingPoint()) { 13819 assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch"); 13820 Opnds.append(Op->op_begin(), Op->op_begin() + NumElts); 13821 } else { 13822 for (unsigned i = 0; i != NumElts; ++i) 13823 Opnds.push_back( 13824 DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i))); 13825 } 13826 } 13827 } 13828 13829 assert(VT.getVectorNumElements() == Opnds.size() && 13830 "Concat vector type mismatch"); 13831 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 13832 } 13833 13834 // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR. 13835 if (SDValue V = combineConcatVectorOfScalars(N, DAG)) 13836 return V; 13837 13838 // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE. 13839 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 13840 if (SDValue V = combineConcatVectorOfExtracts(N, DAG)) 13841 return V; 13842 13843 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR 13844 // nodes often generate nop CONCAT_VECTOR nodes. 13845 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that 13846 // place the incoming vectors at the exact same location. 13847 SDValue SingleSource = SDValue(); 13848 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); 13849 13850 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 13851 SDValue Op = N->getOperand(i); 13852 13853 if (Op.isUndef()) 13854 continue; 13855 13856 // Check if this is the identity extract: 13857 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 13858 return SDValue(); 13859 13860 // Find the single incoming vector for the extract_subvector. 13861 if (SingleSource.getNode()) { 13862 if (Op.getOperand(0) != SingleSource) 13863 return SDValue(); 13864 } else { 13865 SingleSource = Op.getOperand(0); 13866 13867 // Check the source type is the same as the type of the result. 13868 // If not, this concat may extend the vector, so we can not 13869 // optimize it away. 13870 if (SingleSource.getValueType() != N->getValueType(0)) 13871 return SDValue(); 13872 } 13873 13874 unsigned IdentityIndex = i * PartNumElem; 13875 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 13876 // The extract index must be constant. 13877 if (!CS) 13878 return SDValue(); 13879 13880 // Check that we are reading from the identity index. 13881 if (CS->getZExtValue() != IdentityIndex) 13882 return SDValue(); 13883 } 13884 13885 if (SingleSource.getNode()) 13886 return SingleSource; 13887 13888 return SDValue(); 13889 } 13890 13891 SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 13892 EVT NVT = N->getValueType(0); 13893 SDValue V = N->getOperand(0); 13894 13895 // Extract from UNDEF is UNDEF. 13896 if (V.isUndef()) 13897 return DAG.getUNDEF(NVT); 13898 13899 // Combine: 13900 // (extract_subvec (concat V1, V2, ...), i) 13901 // Into: 13902 // Vi if possible 13903 // Only operand 0 is checked as 'concat' assumes all inputs of the same 13904 // type. 13905 if (V->getOpcode() == ISD::CONCAT_VECTORS && 13906 isa<ConstantSDNode>(N->getOperand(1)) && 13907 V->getOperand(0).getValueType() == NVT) { 13908 unsigned Idx = N->getConstantOperandVal(1); 13909 unsigned NumElems = NVT.getVectorNumElements(); 13910 assert((Idx % NumElems) == 0 && 13911 "IDX in concat is not a multiple of the result vector length."); 13912 return V->getOperand(Idx / NumElems); 13913 } 13914 13915 // Skip bitcasting 13916 if (V->getOpcode() == ISD::BITCAST) 13917 V = V.getOperand(0); 13918 13919 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 13920 // Handle only simple case where vector being inserted and vector 13921 // being extracted are of same type, and are half size of larger vectors. 13922 EVT BigVT = V->getOperand(0).getValueType(); 13923 EVT SmallVT = V->getOperand(1).getValueType(); 13924 if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) 13925 return SDValue(); 13926 13927 // Only handle cases where both indexes are constants with the same type. 13928 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13929 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 13930 13931 if (InsIdx && ExtIdx) { 13932 // Combine: 13933 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 13934 // Into: 13935 // indices are equal or bit offsets are equal => V1 13936 // otherwise => (extract_subvec V1, ExtIdx) 13937 if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() == 13938 ExtIdx->getZExtValue() * NVT.getScalarSizeInBits()) 13939 return DAG.getBitcast(NVT, V->getOperand(1)); 13940 return DAG.getNode( 13941 ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, 13942 DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)), 13943 N->getOperand(1)); 13944 } 13945 } 13946 13947 return SDValue(); 13948 } 13949 13950 static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements, 13951 SDValue V, SelectionDAG &DAG) { 13952 SDLoc DL(V); 13953 EVT VT = V.getValueType(); 13954 13955 switch (V.getOpcode()) { 13956 default: 13957 return V; 13958 13959 case ISD::CONCAT_VECTORS: { 13960 EVT OpVT = V->getOperand(0).getValueType(); 13961 int OpSize = OpVT.getVectorNumElements(); 13962 SmallBitVector OpUsedElements(OpSize, false); 13963 bool FoundSimplification = false; 13964 SmallVector<SDValue, 4> NewOps; 13965 NewOps.reserve(V->getNumOperands()); 13966 for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) { 13967 SDValue Op = V->getOperand(i); 13968 bool OpUsed = false; 13969 for (int j = 0; j < OpSize; ++j) 13970 if (UsedElements[i * OpSize + j]) { 13971 OpUsedElements[j] = true; 13972 OpUsed = true; 13973 } 13974 NewOps.push_back( 13975 OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG) 13976 : DAG.getUNDEF(OpVT)); 13977 FoundSimplification |= Op == NewOps.back(); 13978 OpUsedElements.reset(); 13979 } 13980 if (FoundSimplification) 13981 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps); 13982 return V; 13983 } 13984 13985 case ISD::INSERT_SUBVECTOR: { 13986 SDValue BaseV = V->getOperand(0); 13987 SDValue SubV = V->getOperand(1); 13988 auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2)); 13989 if (!IdxN) 13990 return V; 13991 13992 int SubSize = SubV.getValueType().getVectorNumElements(); 13993 int Idx = IdxN->getZExtValue(); 13994 bool SubVectorUsed = false; 13995 SmallBitVector SubUsedElements(SubSize, false); 13996 for (int i = 0; i < SubSize; ++i) 13997 if (UsedElements[i + Idx]) { 13998 SubVectorUsed = true; 13999 SubUsedElements[i] = true; 14000 UsedElements[i + Idx] = false; 14001 } 14002 14003 // Now recurse on both the base and sub vectors. 14004 SDValue SimplifiedSubV = 14005 SubVectorUsed 14006 ? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG) 14007 : DAG.getUNDEF(SubV.getValueType()); 14008 SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG); 14009 if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV) 14010 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 14011 SimplifiedBaseV, SimplifiedSubV, V->getOperand(2)); 14012 return V; 14013 } 14014 } 14015 } 14016 14017 static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0, 14018 SDValue N1, SelectionDAG &DAG) { 14019 EVT VT = SVN->getValueType(0); 14020 int NumElts = VT.getVectorNumElements(); 14021 SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false); 14022 for (int M : SVN->getMask()) 14023 if (M >= 0 && M < NumElts) 14024 N0UsedElements[M] = true; 14025 else if (M >= NumElts) 14026 N1UsedElements[M - NumElts] = true; 14027 14028 SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG); 14029 SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG); 14030 if (S0 == N0 && S1 == N1) 14031 return SDValue(); 14032 14033 return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask()); 14034 } 14035 14036 // Tries to turn a shuffle of two CONCAT_VECTORS into a single concat, 14037 // or turn a shuffle of a single concat into simpler shuffle then concat. 14038 static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { 14039 EVT VT = N->getValueType(0); 14040 unsigned NumElts = VT.getVectorNumElements(); 14041 14042 SDValue N0 = N->getOperand(0); 14043 SDValue N1 = N->getOperand(1); 14044 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 14045 14046 SmallVector<SDValue, 4> Ops; 14047 EVT ConcatVT = N0.getOperand(0).getValueType(); 14048 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); 14049 unsigned NumConcats = NumElts / NumElemsPerConcat; 14050 14051 // Special case: shuffle(concat(A,B)) can be more efficiently represented 14052 // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high 14053 // half vector elements. 14054 if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() && 14055 std::all_of(SVN->getMask().begin() + NumElemsPerConcat, 14056 SVN->getMask().end(), [](int i) { return i == -1; })) { 14057 N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1), 14058 makeArrayRef(SVN->getMask().begin(), NumElemsPerConcat)); 14059 N1 = DAG.getUNDEF(ConcatVT); 14060 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1); 14061 } 14062 14063 // Look at every vector that's inserted. We're looking for exact 14064 // subvector-sized copies from a concatenated vector 14065 for (unsigned I = 0; I != NumConcats; ++I) { 14066 // Make sure we're dealing with a copy. 14067 unsigned Begin = I * NumElemsPerConcat; 14068 bool AllUndef = true, NoUndef = true; 14069 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) { 14070 if (SVN->getMaskElt(J) >= 0) 14071 AllUndef = false; 14072 else 14073 NoUndef = false; 14074 } 14075 14076 if (NoUndef) { 14077 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) 14078 return SDValue(); 14079 14080 for (unsigned J = 1; J != NumElemsPerConcat; ++J) 14081 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) 14082 return SDValue(); 14083 14084 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; 14085 if (FirstElt < N0.getNumOperands()) 14086 Ops.push_back(N0.getOperand(FirstElt)); 14087 else 14088 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); 14089 14090 } else if (AllUndef) { 14091 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType())); 14092 } else { // Mixed with general masks and undefs, can't do optimization. 14093 return SDValue(); 14094 } 14095 } 14096 14097 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops); 14098 } 14099 14100 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 14101 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 14102 // 14103 // SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always 14104 // a simplification in some sense, but it isn't appropriate in general: some 14105 // BUILD_VECTORs are substantially cheaper than others. The general case 14106 // of a BUILD_VECTOR requires inserting each element individually (or 14107 // performing the equivalent in a temporary stack variable). A BUILD_VECTOR of 14108 // all constants is a single constant pool load. A BUILD_VECTOR where each 14109 // element is identical is a splat. A BUILD_VECTOR where most of the operands 14110 // are undef lowers to a small number of element insertions. 14111 // 14112 // To deal with this, we currently use a bunch of mostly arbitrary heuristics. 14113 // We don't fold shuffles where one side is a non-zero constant, and we don't 14114 // fold shuffles if the resulting BUILD_VECTOR would have duplicate 14115 // non-constant operands. This seems to work out reasonably well in practice. 14116 static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN, 14117 SelectionDAG &DAG, 14118 const TargetLowering &TLI) { 14119 EVT VT = SVN->getValueType(0); 14120 unsigned NumElts = VT.getVectorNumElements(); 14121 SDValue N0 = SVN->getOperand(0); 14122 SDValue N1 = SVN->getOperand(1); 14123 14124 if (!N0->hasOneUse() || !N1->hasOneUse()) 14125 return SDValue(); 14126 // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as 14127 // discussed above. 14128 if (!N1.isUndef()) { 14129 bool N0AnyConst = isAnyConstantBuildVector(N0.getNode()); 14130 bool N1AnyConst = isAnyConstantBuildVector(N1.getNode()); 14131 if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode())) 14132 return SDValue(); 14133 if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode())) 14134 return SDValue(); 14135 } 14136 14137 SmallVector<SDValue, 8> Ops; 14138 SmallSet<SDValue, 16> DuplicateOps; 14139 for (int M : SVN->getMask()) { 14140 SDValue Op = DAG.getUNDEF(VT.getScalarType()); 14141 if (M >= 0) { 14142 int Idx = M < (int)NumElts ? M : M - NumElts; 14143 SDValue &S = (M < (int)NumElts ? N0 : N1); 14144 if (S.getOpcode() == ISD::BUILD_VECTOR) { 14145 Op = S.getOperand(Idx); 14146 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) { 14147 if (Idx == 0) 14148 Op = S.getOperand(0); 14149 } else { 14150 // Operand can't be combined - bail out. 14151 return SDValue(); 14152 } 14153 } 14154 14155 // Don't duplicate a non-constant BUILD_VECTOR operand; semantically, this is 14156 // fine, but it's likely to generate low-quality code if the target can't 14157 // reconstruct an appropriate shuffle. 14158 if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op)) 14159 if (!DuplicateOps.insert(Op).second) 14160 return SDValue(); 14161 14162 Ops.push_back(Op); 14163 } 14164 // BUILD_VECTOR requires all inputs to be of the same type, find the 14165 // maximum type and extend them all. 14166 EVT SVT = VT.getScalarType(); 14167 if (SVT.isInteger()) 14168 for (SDValue &Op : Ops) 14169 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 14170 if (SVT != VT.getScalarType()) 14171 for (SDValue &Op : Ops) 14172 Op = TLI.isZExtFree(Op.getValueType(), SVT) 14173 ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT) 14174 : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT); 14175 return DAG.getBuildVector(VT, SDLoc(SVN), Ops); 14176 } 14177 14178 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 14179 EVT VT = N->getValueType(0); 14180 unsigned NumElts = VT.getVectorNumElements(); 14181 14182 SDValue N0 = N->getOperand(0); 14183 SDValue N1 = N->getOperand(1); 14184 14185 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 14186 14187 // Canonicalize shuffle undef, undef -> undef 14188 if (N0.isUndef() && N1.isUndef()) 14189 return DAG.getUNDEF(VT); 14190 14191 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 14192 14193 // Canonicalize shuffle v, v -> v, undef 14194 if (N0 == N1) { 14195 SmallVector<int, 8> NewMask; 14196 for (unsigned i = 0; i != NumElts; ++i) { 14197 int Idx = SVN->getMaskElt(i); 14198 if (Idx >= (int)NumElts) Idx -= NumElts; 14199 NewMask.push_back(Idx); 14200 } 14201 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask); 14202 } 14203 14204 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 14205 if (N0.isUndef()) 14206 return DAG.getCommutedVectorShuffle(*SVN); 14207 14208 // Remove references to rhs if it is undef 14209 if (N1.isUndef()) { 14210 bool Changed = false; 14211 SmallVector<int, 8> NewMask; 14212 for (unsigned i = 0; i != NumElts; ++i) { 14213 int Idx = SVN->getMaskElt(i); 14214 if (Idx >= (int)NumElts) { 14215 Idx = -1; 14216 Changed = true; 14217 } 14218 NewMask.push_back(Idx); 14219 } 14220 if (Changed) 14221 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask); 14222 } 14223 14224 // If it is a splat, check if the argument vector is another splat or a 14225 // build_vector. 14226 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 14227 SDNode *V = N0.getNode(); 14228 14229 // If this is a bit convert that changes the element type of the vector but 14230 // not the number of vector elements, look through it. Be careful not to 14231 // look though conversions that change things like v4f32 to v2f64. 14232 if (V->getOpcode() == ISD::BITCAST) { 14233 SDValue ConvInput = V->getOperand(0); 14234 if (ConvInput.getValueType().isVector() && 14235 ConvInput.getValueType().getVectorNumElements() == NumElts) 14236 V = ConvInput.getNode(); 14237 } 14238 14239 if (V->getOpcode() == ISD::BUILD_VECTOR) { 14240 assert(V->getNumOperands() == NumElts && 14241 "BUILD_VECTOR has wrong number of operands"); 14242 SDValue Base; 14243 bool AllSame = true; 14244 for (unsigned i = 0; i != NumElts; ++i) { 14245 if (!V->getOperand(i).isUndef()) { 14246 Base = V->getOperand(i); 14247 break; 14248 } 14249 } 14250 // Splat of <u, u, u, u>, return <u, u, u, u> 14251 if (!Base.getNode()) 14252 return N0; 14253 for (unsigned i = 0; i != NumElts; ++i) { 14254 if (V->getOperand(i) != Base) { 14255 AllSame = false; 14256 break; 14257 } 14258 } 14259 // Splat of <x, x, x, x>, return <x, x, x, x> 14260 if (AllSame) 14261 return N0; 14262 14263 // Canonicalize any other splat as a build_vector. 14264 const SDValue &Splatted = V->getOperand(SVN->getSplatIndex()); 14265 SmallVector<SDValue, 8> Ops(NumElts, Splatted); 14266 SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops); 14267 14268 // We may have jumped through bitcasts, so the type of the 14269 // BUILD_VECTOR may not match the type of the shuffle. 14270 if (V->getValueType(0) != VT) 14271 NewBV = DAG.getBitcast(VT, NewBV); 14272 return NewBV; 14273 } 14274 } 14275 14276 // There are various patterns used to build up a vector from smaller vectors, 14277 // subvectors, or elements. Scan chains of these and replace unused insertions 14278 // or components with undef. 14279 if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG)) 14280 return S; 14281 14282 if (N0.getOpcode() == ISD::CONCAT_VECTORS && 14283 Level < AfterLegalizeVectorOps && 14284 (N1.isUndef() || 14285 (N1.getOpcode() == ISD::CONCAT_VECTORS && 14286 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { 14287 if (SDValue V = partitionShuffleOfConcats(N, DAG)) 14288 return V; 14289 } 14290 14291 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 14292 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 14293 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 14294 if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI)) 14295 return Res; 14296 14297 // If this shuffle only has a single input that is a bitcasted shuffle, 14298 // attempt to merge the 2 shuffles and suitably bitcast the inputs/output 14299 // back to their original types. 14300 if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 14301 N1.isUndef() && Level < AfterLegalizeVectorOps && 14302 TLI.isTypeLegal(VT)) { 14303 14304 // Peek through the bitcast only if there is one user. 14305 SDValue BC0 = N0; 14306 while (BC0.getOpcode() == ISD::BITCAST) { 14307 if (!BC0.hasOneUse()) 14308 break; 14309 BC0 = BC0.getOperand(0); 14310 } 14311 14312 auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) { 14313 if (Scale == 1) 14314 return SmallVector<int, 8>(Mask.begin(), Mask.end()); 14315 14316 SmallVector<int, 8> NewMask; 14317 for (int M : Mask) 14318 for (int s = 0; s != Scale; ++s) 14319 NewMask.push_back(M < 0 ? -1 : Scale * M + s); 14320 return NewMask; 14321 }; 14322 14323 if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) { 14324 EVT SVT = VT.getScalarType(); 14325 EVT InnerVT = BC0->getValueType(0); 14326 EVT InnerSVT = InnerVT.getScalarType(); 14327 14328 // Determine which shuffle works with the smaller scalar type. 14329 EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT; 14330 EVT ScaleSVT = ScaleVT.getScalarType(); 14331 14332 if (TLI.isTypeLegal(ScaleVT) && 14333 0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) && 14334 0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) { 14335 14336 int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14337 int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14338 14339 // Scale the shuffle masks to the smaller scalar type. 14340 ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0); 14341 SmallVector<int, 8> InnerMask = 14342 ScaleShuffleMask(InnerSVN->getMask(), InnerScale); 14343 SmallVector<int, 8> OuterMask = 14344 ScaleShuffleMask(SVN->getMask(), OuterScale); 14345 14346 // Merge the shuffle masks. 14347 SmallVector<int, 8> NewMask; 14348 for (int M : OuterMask) 14349 NewMask.push_back(M < 0 ? -1 : InnerMask[M]); 14350 14351 // Test for shuffle mask legality over both commutations. 14352 SDValue SV0 = BC0->getOperand(0); 14353 SDValue SV1 = BC0->getOperand(1); 14354 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14355 if (!LegalMask) { 14356 std::swap(SV0, SV1); 14357 ShuffleVectorSDNode::commuteMask(NewMask); 14358 LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14359 } 14360 14361 if (LegalMask) { 14362 SV0 = DAG.getBitcast(ScaleVT, SV0); 14363 SV1 = DAG.getBitcast(ScaleVT, SV1); 14364 return DAG.getBitcast( 14365 VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask)); 14366 } 14367 } 14368 } 14369 } 14370 14371 // Canonicalize shuffles according to rules: 14372 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A) 14373 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B) 14374 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B) 14375 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && 14376 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 14377 TLI.isTypeLegal(VT)) { 14378 // The incoming shuffle must be of the same type as the result of the 14379 // current shuffle. 14380 assert(N1->getOperand(0).getValueType() == VT && 14381 "Shuffle types don't match"); 14382 14383 SDValue SV0 = N1->getOperand(0); 14384 SDValue SV1 = N1->getOperand(1); 14385 bool HasSameOp0 = N0 == SV0; 14386 bool IsSV1Undef = SV1.isUndef(); 14387 if (HasSameOp0 || IsSV1Undef || N0 == SV1) 14388 // Commute the operands of this shuffle so that next rule 14389 // will trigger. 14390 return DAG.getCommutedVectorShuffle(*SVN); 14391 } 14392 14393 // Try to fold according to rules: 14394 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14395 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14396 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14397 // Don't try to fold shuffles with illegal type. 14398 // Only fold if this shuffle is the only user of the other shuffle. 14399 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) && 14400 Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) { 14401 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 14402 14403 // Don't try to fold splats; they're likely to simplify somehow, or they 14404 // might be free. 14405 if (OtherSV->isSplat()) 14406 return SDValue(); 14407 14408 // The incoming shuffle must be of the same type as the result of the 14409 // current shuffle. 14410 assert(OtherSV->getOperand(0).getValueType() == VT && 14411 "Shuffle types don't match"); 14412 14413 SDValue SV0, SV1; 14414 SmallVector<int, 4> Mask; 14415 // Compute the combined shuffle mask for a shuffle with SV0 as the first 14416 // operand, and SV1 as the second operand. 14417 for (unsigned i = 0; i != NumElts; ++i) { 14418 int Idx = SVN->getMaskElt(i); 14419 if (Idx < 0) { 14420 // Propagate Undef. 14421 Mask.push_back(Idx); 14422 continue; 14423 } 14424 14425 SDValue CurrentVec; 14426 if (Idx < (int)NumElts) { 14427 // This shuffle index refers to the inner shuffle N0. Lookup the inner 14428 // shuffle mask to identify which vector is actually referenced. 14429 Idx = OtherSV->getMaskElt(Idx); 14430 if (Idx < 0) { 14431 // Propagate Undef. 14432 Mask.push_back(Idx); 14433 continue; 14434 } 14435 14436 CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0) 14437 : OtherSV->getOperand(1); 14438 } else { 14439 // This shuffle index references an element within N1. 14440 CurrentVec = N1; 14441 } 14442 14443 // Simple case where 'CurrentVec' is UNDEF. 14444 if (CurrentVec.isUndef()) { 14445 Mask.push_back(-1); 14446 continue; 14447 } 14448 14449 // Canonicalize the shuffle index. We don't know yet if CurrentVec 14450 // will be the first or second operand of the combined shuffle. 14451 Idx = Idx % NumElts; 14452 if (!SV0.getNode() || SV0 == CurrentVec) { 14453 // Ok. CurrentVec is the left hand side. 14454 // Update the mask accordingly. 14455 SV0 = CurrentVec; 14456 Mask.push_back(Idx); 14457 continue; 14458 } 14459 14460 // Bail out if we cannot convert the shuffle pair into a single shuffle. 14461 if (SV1.getNode() && SV1 != CurrentVec) 14462 return SDValue(); 14463 14464 // Ok. CurrentVec is the right hand side. 14465 // Update the mask accordingly. 14466 SV1 = CurrentVec; 14467 Mask.push_back(Idx + NumElts); 14468 } 14469 14470 // Check if all indices in Mask are Undef. In case, propagate Undef. 14471 bool isUndefMask = true; 14472 for (unsigned i = 0; i != NumElts && isUndefMask; ++i) 14473 isUndefMask &= Mask[i] < 0; 14474 14475 if (isUndefMask) 14476 return DAG.getUNDEF(VT); 14477 14478 if (!SV0.getNode()) 14479 SV0 = DAG.getUNDEF(VT); 14480 if (!SV1.getNode()) 14481 SV1 = DAG.getUNDEF(VT); 14482 14483 // Avoid introducing shuffles with illegal mask. 14484 if (!TLI.isShuffleMaskLegal(Mask, VT)) { 14485 ShuffleVectorSDNode::commuteMask(Mask); 14486 14487 if (!TLI.isShuffleMaskLegal(Mask, VT)) 14488 return SDValue(); 14489 14490 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2) 14491 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2) 14492 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2) 14493 std::swap(SV0, SV1); 14494 } 14495 14496 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14497 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14498 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14499 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask); 14500 } 14501 14502 return SDValue(); 14503 } 14504 14505 SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) { 14506 SDValue InVal = N->getOperand(0); 14507 EVT VT = N->getValueType(0); 14508 14509 // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern 14510 // with a VECTOR_SHUFFLE. 14511 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14512 SDValue InVec = InVal->getOperand(0); 14513 SDValue EltNo = InVal->getOperand(1); 14514 14515 // FIXME: We could support implicit truncation if the shuffle can be 14516 // scaled to a smaller vector scalar type. 14517 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo); 14518 if (C0 && VT == InVec.getValueType() && 14519 VT.getScalarType() == InVal.getValueType()) { 14520 SmallVector<int, 8> NewMask(VT.getVectorNumElements(), -1); 14521 int Elt = C0->getZExtValue(); 14522 NewMask[0] = Elt; 14523 14524 if (TLI.isShuffleMaskLegal(NewMask, VT)) 14525 return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), 14526 NewMask); 14527 } 14528 } 14529 14530 return SDValue(); 14531 } 14532 14533 SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) { 14534 EVT VT = N->getValueType(0); 14535 SDValue N0 = N->getOperand(0); 14536 SDValue N1 = N->getOperand(1); 14537 SDValue N2 = N->getOperand(2); 14538 14539 // If inserting an UNDEF, just return the original vector. 14540 if (N1.isUndef()) 14541 return N0; 14542 14543 // Combine INSERT_SUBVECTORs where we are inserting to the same index. 14544 // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx ) 14545 // --> INSERT_SUBVECTOR( Vec, SubNew, Idx ) 14546 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && 14547 N0.getOperand(1).getValueType() == N1.getValueType() && 14548 N0.getOperand(2) == N2) 14549 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0), 14550 N1, N2); 14551 14552 if (N0.getValueType() != N1.getValueType()) 14553 return SDValue(); 14554 14555 // If the input vector is a concatenation, and the insert replaces 14556 // one of the halves, we can optimize into a single concat_vectors. 14557 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0->getNumOperands() == 2 && 14558 isa<ConstantSDNode>(N2)) { 14559 unsigned InsIdx = cast<ConstantSDNode>(N2)->getZExtValue(); 14560 14561 // Lower half: fold (insert_subvector (concat_vectors X, Y), Z) -> 14562 // (concat_vectors Z, Y) 14563 if (InsIdx == 0) 14564 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N1, 14565 N0.getOperand(1)); 14566 14567 // Upper half: fold (insert_subvector (concat_vectors X, Y), Z) -> 14568 // (concat_vectors X, Z) 14569 if (InsIdx == VT.getVectorNumElements() / 2) 14570 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0.getOperand(0), 14571 N1); 14572 } 14573 14574 return SDValue(); 14575 } 14576 14577 SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) { 14578 SDValue N0 = N->getOperand(0); 14579 14580 // fold (fp_to_fp16 (fp16_to_fp op)) -> op 14581 if (N0->getOpcode() == ISD::FP16_TO_FP) 14582 return N0->getOperand(0); 14583 14584 return SDValue(); 14585 } 14586 14587 SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) { 14588 SDValue N0 = N->getOperand(0); 14589 14590 // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op) 14591 if (N0->getOpcode() == ISD::AND) { 14592 ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1)); 14593 if (AndConst && AndConst->getAPIntValue() == 0xffff) { 14594 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0), 14595 N0.getOperand(0)); 14596 } 14597 } 14598 14599 return SDValue(); 14600 } 14601 14602 /// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle 14603 /// with the destination vector and a zero vector. 14604 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 14605 /// vector_shuffle V, Zero, <0, 4, 2, 4> 14606 SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 14607 EVT VT = N->getValueType(0); 14608 SDValue LHS = N->getOperand(0); 14609 SDValue RHS = N->getOperand(1); 14610 SDLoc DL(N); 14611 14612 // Make sure we're not running after operation legalization where it 14613 // may have custom lowered the vector shuffles. 14614 if (LegalOperations) 14615 return SDValue(); 14616 14617 if (N->getOpcode() != ISD::AND) 14618 return SDValue(); 14619 14620 if (RHS.getOpcode() == ISD::BITCAST) 14621 RHS = RHS.getOperand(0); 14622 14623 if (RHS.getOpcode() != ISD::BUILD_VECTOR) 14624 return SDValue(); 14625 14626 EVT RVT = RHS.getValueType(); 14627 unsigned NumElts = RHS.getNumOperands(); 14628 14629 // Attempt to create a valid clear mask, splitting the mask into 14630 // sub elements and checking to see if each is 14631 // all zeros or all ones - suitable for shuffle masking. 14632 auto BuildClearMask = [&](int Split) { 14633 int NumSubElts = NumElts * Split; 14634 int NumSubBits = RVT.getScalarSizeInBits() / Split; 14635 14636 SmallVector<int, 8> Indices; 14637 for (int i = 0; i != NumSubElts; ++i) { 14638 int EltIdx = i / Split; 14639 int SubIdx = i % Split; 14640 SDValue Elt = RHS.getOperand(EltIdx); 14641 if (Elt.isUndef()) { 14642 Indices.push_back(-1); 14643 continue; 14644 } 14645 14646 APInt Bits; 14647 if (isa<ConstantSDNode>(Elt)) 14648 Bits = cast<ConstantSDNode>(Elt)->getAPIntValue(); 14649 else if (isa<ConstantFPSDNode>(Elt)) 14650 Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt(); 14651 else 14652 return SDValue(); 14653 14654 // Extract the sub element from the constant bit mask. 14655 if (DAG.getDataLayout().isBigEndian()) { 14656 Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits); 14657 } else { 14658 Bits = Bits.lshr(SubIdx * NumSubBits); 14659 } 14660 14661 if (Split > 1) 14662 Bits = Bits.trunc(NumSubBits); 14663 14664 if (Bits.isAllOnesValue()) 14665 Indices.push_back(i); 14666 else if (Bits == 0) 14667 Indices.push_back(i + NumSubElts); 14668 else 14669 return SDValue(); 14670 } 14671 14672 // Let's see if the target supports this vector_shuffle. 14673 EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits); 14674 EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts); 14675 if (!TLI.isVectorClearMaskLegal(Indices, ClearVT)) 14676 return SDValue(); 14677 14678 SDValue Zero = DAG.getConstant(0, DL, ClearVT); 14679 return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL, 14680 DAG.getBitcast(ClearVT, LHS), 14681 Zero, Indices)); 14682 }; 14683 14684 // Determine maximum split level (byte level masking). 14685 int MaxSplit = 1; 14686 if (RVT.getScalarSizeInBits() % 8 == 0) 14687 MaxSplit = RVT.getScalarSizeInBits() / 8; 14688 14689 for (int Split = 1; Split <= MaxSplit; ++Split) 14690 if (RVT.getScalarSizeInBits() % Split == 0) 14691 if (SDValue S = BuildClearMask(Split)) 14692 return S; 14693 14694 return SDValue(); 14695 } 14696 14697 /// Visit a binary vector operation, like ADD. 14698 SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 14699 assert(N->getValueType(0).isVector() && 14700 "SimplifyVBinOp only works on vectors!"); 14701 14702 SDValue LHS = N->getOperand(0); 14703 SDValue RHS = N->getOperand(1); 14704 SDValue Ops[] = {LHS, RHS}; 14705 14706 // See if we can constant fold the vector operation. 14707 if (SDValue Fold = DAG.FoldConstantVectorArithmetic( 14708 N->getOpcode(), SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags())) 14709 return Fold; 14710 14711 // Try to convert a constant mask AND into a shuffle clear mask. 14712 if (SDValue Shuffle = XformToShuffleWithZero(N)) 14713 return Shuffle; 14714 14715 // Type legalization might introduce new shuffles in the DAG. 14716 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask))) 14717 // -> (shuffle (VBinOp (A, B)), Undef, Mask). 14718 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) && 14719 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && 14720 LHS.getOperand(1).isUndef() && 14721 RHS.getOperand(1).isUndef()) { 14722 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); 14723 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); 14724 14725 if (SVN0->getMask().equals(SVN1->getMask())) { 14726 EVT VT = N->getValueType(0); 14727 SDValue UndefVector = LHS.getOperand(1); 14728 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 14729 LHS.getOperand(0), RHS.getOperand(0), 14730 N->getFlags()); 14731 AddUsersToWorklist(N); 14732 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector, 14733 SVN0->getMask()); 14734 } 14735 } 14736 14737 return SDValue(); 14738 } 14739 14740 SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, 14741 SDValue N2) { 14742 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 14743 14744 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 14745 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 14746 14747 // If we got a simplified select_cc node back from SimplifySelectCC, then 14748 // break it down into a new SETCC node, and a new SELECT node, and then return 14749 // the SELECT node, since we were called with a SELECT node. 14750 if (SCC.getNode()) { 14751 // Check to see if we got a select_cc back (to turn into setcc/select). 14752 // Otherwise, just return whatever node we got back, like fabs. 14753 if (SCC.getOpcode() == ISD::SELECT_CC) { 14754 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0), 14755 N0.getValueType(), 14756 SCC.getOperand(0), SCC.getOperand(1), 14757 SCC.getOperand(4)); 14758 AddToWorklist(SETCC.getNode()); 14759 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC, 14760 SCC.getOperand(2), SCC.getOperand(3)); 14761 } 14762 14763 return SCC; 14764 } 14765 return SDValue(); 14766 } 14767 14768 /// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values 14769 /// being selected between, see if we can simplify the select. Callers of this 14770 /// should assume that TheSelect is deleted if this returns true. As such, they 14771 /// should return the appropriate thing (e.g. the node) back to the top-level of 14772 /// the DAG combiner loop to avoid it being looked at. 14773 bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 14774 SDValue RHS) { 14775 14776 // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 14777 // The select + setcc is redundant, because fsqrt returns NaN for X < 0. 14778 if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) { 14779 if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) { 14780 // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?)) 14781 SDValue Sqrt = RHS; 14782 ISD::CondCode CC; 14783 SDValue CmpLHS; 14784 const ConstantFPSDNode *Zero = nullptr; 14785 14786 if (TheSelect->getOpcode() == ISD::SELECT_CC) { 14787 CC = dyn_cast<CondCodeSDNode>(TheSelect->getOperand(4))->get(); 14788 CmpLHS = TheSelect->getOperand(0); 14789 Zero = isConstOrConstSplatFP(TheSelect->getOperand(1)); 14790 } else { 14791 // SELECT or VSELECT 14792 SDValue Cmp = TheSelect->getOperand(0); 14793 if (Cmp.getOpcode() == ISD::SETCC) { 14794 CC = dyn_cast<CondCodeSDNode>(Cmp.getOperand(2))->get(); 14795 CmpLHS = Cmp.getOperand(0); 14796 Zero = isConstOrConstSplatFP(Cmp.getOperand(1)); 14797 } 14798 } 14799 if (Zero && Zero->isZero() && 14800 Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT || 14801 CC == ISD::SETULT || CC == ISD::SETLT)) { 14802 // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 14803 CombineTo(TheSelect, Sqrt); 14804 return true; 14805 } 14806 } 14807 } 14808 // Cannot simplify select with vector condition 14809 if (TheSelect->getOperand(0).getValueType().isVector()) return false; 14810 14811 // If this is a select from two identical things, try to pull the operation 14812 // through the select. 14813 if (LHS.getOpcode() != RHS.getOpcode() || 14814 !LHS.hasOneUse() || !RHS.hasOneUse()) 14815 return false; 14816 14817 // If this is a load and the token chain is identical, replace the select 14818 // of two loads with a load through a select of the address to load from. 14819 // This triggers in things like "select bool X, 10.0, 123.0" after the FP 14820 // constants have been dropped into the constant pool. 14821 if (LHS.getOpcode() == ISD::LOAD) { 14822 LoadSDNode *LLD = cast<LoadSDNode>(LHS); 14823 LoadSDNode *RLD = cast<LoadSDNode>(RHS); 14824 14825 // Token chains must be identical. 14826 if (LHS.getOperand(0) != RHS.getOperand(0) || 14827 // Do not let this transformation reduce the number of volatile loads. 14828 LLD->isVolatile() || RLD->isVolatile() || 14829 // FIXME: If either is a pre/post inc/dec load, 14830 // we'd need to split out the address adjustment. 14831 LLD->isIndexed() || RLD->isIndexed() || 14832 // If this is an EXTLOAD, the VT's must match. 14833 LLD->getMemoryVT() != RLD->getMemoryVT() || 14834 // If this is an EXTLOAD, the kind of extension must match. 14835 (LLD->getExtensionType() != RLD->getExtensionType() && 14836 // The only exception is if one of the extensions is anyext. 14837 LLD->getExtensionType() != ISD::EXTLOAD && 14838 RLD->getExtensionType() != ISD::EXTLOAD) || 14839 // FIXME: this discards src value information. This is 14840 // over-conservative. It would be beneficial to be able to remember 14841 // both potential memory locations. Since we are discarding 14842 // src value info, don't do the transformation if the memory 14843 // locations are not in the default address space. 14844 LLD->getPointerInfo().getAddrSpace() != 0 || 14845 RLD->getPointerInfo().getAddrSpace() != 0 || 14846 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), 14847 LLD->getBasePtr().getValueType())) 14848 return false; 14849 14850 // Check that the select condition doesn't reach either load. If so, 14851 // folding this will induce a cycle into the DAG. If not, this is safe to 14852 // xform, so create a select of the addresses. 14853 SDValue Addr; 14854 if (TheSelect->getOpcode() == ISD::SELECT) { 14855 SDNode *CondNode = TheSelect->getOperand(0).getNode(); 14856 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 14857 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 14858 return false; 14859 // The loads must not depend on one another. 14860 if (LLD->isPredecessorOf(RLD) || 14861 RLD->isPredecessorOf(LLD)) 14862 return false; 14863 Addr = DAG.getSelect(SDLoc(TheSelect), 14864 LLD->getBasePtr().getValueType(), 14865 TheSelect->getOperand(0), LLD->getBasePtr(), 14866 RLD->getBasePtr()); 14867 } else { // Otherwise SELECT_CC 14868 SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 14869 SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 14870 14871 if ((LLD->hasAnyUseOfValue(1) && 14872 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 14873 (RLD->hasAnyUseOfValue(1) && 14874 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 14875 return false; 14876 14877 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect), 14878 LLD->getBasePtr().getValueType(), 14879 TheSelect->getOperand(0), 14880 TheSelect->getOperand(1), 14881 LLD->getBasePtr(), RLD->getBasePtr(), 14882 TheSelect->getOperand(4)); 14883 } 14884 14885 SDValue Load; 14886 // It is safe to replace the two loads if they have different alignments, 14887 // but the new load must be the minimum (most restrictive) alignment of the 14888 // inputs. 14889 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment()); 14890 MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags(); 14891 if (!RLD->isInvariant()) 14892 MMOFlags &= ~MachineMemOperand::MOInvariant; 14893 if (!RLD->isDereferenceable()) 14894 MMOFlags &= ~MachineMemOperand::MODereferenceable; 14895 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 14896 // FIXME: Discards pointer and AA info. 14897 Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect), 14898 LLD->getChain(), Addr, MachinePointerInfo(), Alignment, 14899 MMOFlags); 14900 } else { 14901 // FIXME: Discards pointer and AA info. 14902 Load = DAG.getExtLoad( 14903 LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType() 14904 : LLD->getExtensionType(), 14905 SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr, 14906 MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags); 14907 } 14908 14909 // Users of the select now use the result of the load. 14910 CombineTo(TheSelect, Load); 14911 14912 // Users of the old loads now use the new load's chain. We know the 14913 // old-load value is dead now. 14914 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 14915 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 14916 return true; 14917 } 14918 14919 return false; 14920 } 14921 14922 /// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and 14923 /// bitwise 'and'. 14924 SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, 14925 SDValue N1, SDValue N2, SDValue N3, 14926 ISD::CondCode CC) { 14927 // If this is a select where the false operand is zero and the compare is a 14928 // check of the sign bit, see if we can perform the "gzip trick": 14929 // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A 14930 // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A 14931 EVT XType = N0.getValueType(); 14932 EVT AType = N2.getValueType(); 14933 if (!isNullConstant(N3) || !XType.bitsGE(AType)) 14934 return SDValue(); 14935 14936 // If the comparison is testing for a positive value, we have to invert 14937 // the sign bit mask, so only do that transform if the target has a bitwise 14938 // 'and not' instruction (the invert is free). 14939 if (CC == ISD::SETGT && TLI.hasAndNot(N2)) { 14940 // (X > -1) ? A : 0 14941 // (X > 0) ? X : 0 <-- This is canonical signed max. 14942 if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2))) 14943 return SDValue(); 14944 } else if (CC == ISD::SETLT) { 14945 // (X < 0) ? A : 0 14946 // (X < 1) ? X : 0 <-- This is un-canonicalized signed min. 14947 if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2))) 14948 return SDValue(); 14949 } else { 14950 return SDValue(); 14951 } 14952 14953 // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit 14954 // constant. 14955 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType()); 14956 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 14957 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) { 14958 unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1; 14959 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy); 14960 SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt); 14961 AddToWorklist(Shift.getNode()); 14962 14963 if (XType.bitsGT(AType)) { 14964 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 14965 AddToWorklist(Shift.getNode()); 14966 } 14967 14968 if (CC == ISD::SETGT) 14969 Shift = DAG.getNOT(DL, Shift, AType); 14970 14971 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 14972 } 14973 14974 SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy); 14975 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt); 14976 AddToWorklist(Shift.getNode()); 14977 14978 if (XType.bitsGT(AType)) { 14979 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 14980 AddToWorklist(Shift.getNode()); 14981 } 14982 14983 if (CC == ISD::SETGT) 14984 Shift = DAG.getNOT(DL, Shift, AType); 14985 14986 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 14987 } 14988 14989 /// Simplify an expression of the form (N0 cond N1) ? N2 : N3 14990 /// where 'cond' is the comparison specified by CC. 14991 SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 14992 SDValue N2, SDValue N3, ISD::CondCode CC, 14993 bool NotExtCompare) { 14994 // (x ? y : y) -> y. 14995 if (N2 == N3) return N2; 14996 14997 EVT VT = N2.getValueType(); 14998 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 14999 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 15000 15001 // Determine if the condition we're dealing with is constant 15002 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), 15003 N0, N1, CC, DL, false); 15004 if (SCC.getNode()) AddToWorklist(SCC.getNode()); 15005 15006 if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) { 15007 // fold select_cc true, x, y -> x 15008 // fold select_cc false, x, y -> y 15009 return !SCCC->isNullValue() ? N2 : N3; 15010 } 15011 15012 // Check to see if we can simplify the select into an fabs node 15013 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 15014 // Allow either -0.0 or 0.0 15015 if (CFP->isZero()) { 15016 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 15017 if ((CC == ISD::SETGE || CC == ISD::SETGT) && 15018 N0 == N2 && N3.getOpcode() == ISD::FNEG && 15019 N2 == N3.getOperand(0)) 15020 return DAG.getNode(ISD::FABS, DL, VT, N0); 15021 15022 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 15023 if ((CC == ISD::SETLT || CC == ISD::SETLE) && 15024 N0 == N3 && N2.getOpcode() == ISD::FNEG && 15025 N2.getOperand(0) == N3) 15026 return DAG.getNode(ISD::FABS, DL, VT, N3); 15027 } 15028 } 15029 15030 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 15031 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 15032 // in it. This is a win when the constant is not otherwise available because 15033 // it replaces two constant pool loads with one. We only do this if the FP 15034 // type is known to be legal, because if it isn't, then we are before legalize 15035 // types an we want the other legalization to happen first (e.g. to avoid 15036 // messing with soft float) and if the ConstantFP is not legal, because if 15037 // it is legal, we may not need to store the FP constant in a constant pool. 15038 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 15039 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 15040 if (TLI.isTypeLegal(N2.getValueType()) && 15041 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 15042 TargetLowering::Legal && 15043 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) && 15044 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) && 15045 // If both constants have multiple uses, then we won't need to do an 15046 // extra load, they are likely around in registers for other users. 15047 (TV->hasOneUse() || FV->hasOneUse())) { 15048 Constant *Elts[] = { 15049 const_cast<ConstantFP*>(FV->getConstantFPValue()), 15050 const_cast<ConstantFP*>(TV->getConstantFPValue()) 15051 }; 15052 Type *FPTy = Elts[0]->getType(); 15053 const DataLayout &TD = DAG.getDataLayout(); 15054 15055 // Create a ConstantArray of the two constants. 15056 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 15057 SDValue CPIdx = 15058 DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()), 15059 TD.getPrefTypeAlignment(FPTy)); 15060 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 15061 15062 // Get the offsets to the 0 and 1 element of the array so that we can 15063 // select between them. 15064 SDValue Zero = DAG.getIntPtrConstant(0, DL); 15065 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 15066 SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV)); 15067 15068 SDValue Cond = DAG.getSetCC(DL, 15069 getSetCCResultType(N0.getValueType()), 15070 N0, N1, CC); 15071 AddToWorklist(Cond.getNode()); 15072 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), 15073 Cond, One, Zero); 15074 AddToWorklist(CstOffset.getNode()); 15075 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, 15076 CstOffset); 15077 AddToWorklist(CPIdx.getNode()); 15078 return DAG.getLoad( 15079 TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 15080 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 15081 Alignment); 15082 } 15083 } 15084 15085 if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC)) 15086 return V; 15087 15088 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 15089 // where y is has a single bit set. 15090 // A plaintext description would be, we can turn the SELECT_CC into an AND 15091 // when the condition can be materialized as an all-ones register. Any 15092 // single bit-test can be materialized as an all-ones register with 15093 // shift-left and shift-right-arith. 15094 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 15095 N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) { 15096 SDValue AndLHS = N0->getOperand(0); 15097 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 15098 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 15099 // Shift the tested bit over the sign bit. 15100 const APInt &AndMask = ConstAndRHS->getAPIntValue(); 15101 SDValue ShlAmt = 15102 DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS), 15103 getShiftAmountTy(AndLHS.getValueType())); 15104 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt); 15105 15106 // Now arithmetic right shift it all the way over, so the result is either 15107 // all-ones, or zero. 15108 SDValue ShrAmt = 15109 DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl), 15110 getShiftAmountTy(Shl.getValueType())); 15111 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt); 15112 15113 return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 15114 } 15115 } 15116 15117 // fold select C, 16, 0 -> shl C, 4 15118 if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() && 15119 TLI.getBooleanContents(N0.getValueType()) == 15120 TargetLowering::ZeroOrOneBooleanContent) { 15121 15122 // If the caller doesn't want us to simplify this into a zext of a compare, 15123 // don't do it. 15124 if (NotExtCompare && N2C->isOne()) 15125 return SDValue(); 15126 15127 // Get a SetCC of the condition 15128 // NOTE: Don't create a SETCC if it's not legal on this target. 15129 if (!LegalOperations || 15130 TLI.isOperationLegal(ISD::SETCC, N0.getValueType())) { 15131 SDValue Temp, SCC; 15132 // cast from setcc result type to select result type 15133 if (LegalTypes) { 15134 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), 15135 N0, N1, CC); 15136 if (N2.getValueType().bitsLT(SCC.getValueType())) 15137 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), 15138 N2.getValueType()); 15139 else 15140 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 15141 N2.getValueType(), SCC); 15142 } else { 15143 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC); 15144 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 15145 N2.getValueType(), SCC); 15146 } 15147 15148 AddToWorklist(SCC.getNode()); 15149 AddToWorklist(Temp.getNode()); 15150 15151 if (N2C->isOne()) 15152 return Temp; 15153 15154 // shl setcc result by log2 n2c 15155 return DAG.getNode( 15156 ISD::SHL, DL, N2.getValueType(), Temp, 15157 DAG.getConstant(N2C->getAPIntValue().logBase2(), SDLoc(Temp), 15158 getShiftAmountTy(Temp.getValueType()))); 15159 } 15160 } 15161 15162 // Check to see if this is an integer abs. 15163 // select_cc setg[te] X, 0, X, -X -> 15164 // select_cc setgt X, -1, X, -X -> 15165 // select_cc setl[te] X, 0, -X, X -> 15166 // select_cc setlt X, 1, -X, X -> 15167 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 15168 if (N1C) { 15169 ConstantSDNode *SubC = nullptr; 15170 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 15171 (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 15172 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 15173 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 15174 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 15175 (N1C->isOne() && CC == ISD::SETLT)) && 15176 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 15177 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 15178 15179 EVT XType = N0.getValueType(); 15180 if (SubC && SubC->isNullValue() && XType.isInteger()) { 15181 SDLoc DL(N0); 15182 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, 15183 N0, 15184 DAG.getConstant(XType.getSizeInBits() - 1, DL, 15185 getShiftAmountTy(N0.getValueType()))); 15186 SDValue Add = DAG.getNode(ISD::ADD, DL, 15187 XType, N0, Shift); 15188 AddToWorklist(Shift.getNode()); 15189 AddToWorklist(Add.getNode()); 15190 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 15191 } 15192 } 15193 15194 // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X) 15195 // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X) 15196 // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X) 15197 // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X) 15198 // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X) 15199 // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X) 15200 // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X) 15201 // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X) 15202 if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 15203 SDValue ValueOnZero = N2; 15204 SDValue Count = N3; 15205 // If the condition is NE instead of E, swap the operands. 15206 if (CC == ISD::SETNE) 15207 std::swap(ValueOnZero, Count); 15208 // Check if the value on zero is a constant equal to the bits in the type. 15209 if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) { 15210 if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) { 15211 // If the other operand is cttz/cttz_zero_undef of N0, and cttz is 15212 // legal, combine to just cttz. 15213 if ((Count.getOpcode() == ISD::CTTZ || 15214 Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) && 15215 N0 == Count.getOperand(0) && 15216 (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT))) 15217 return DAG.getNode(ISD::CTTZ, DL, VT, N0); 15218 // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is 15219 // legal, combine to just ctlz. 15220 if ((Count.getOpcode() == ISD::CTLZ || 15221 Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) && 15222 N0 == Count.getOperand(0) && 15223 (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT))) 15224 return DAG.getNode(ISD::CTLZ, DL, VT, N0); 15225 } 15226 } 15227 } 15228 15229 return SDValue(); 15230 } 15231 15232 /// This is a stub for TargetLowering::SimplifySetCC. 15233 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 15234 ISD::CondCode Cond, const SDLoc &DL, 15235 bool foldBooleans) { 15236 TargetLowering::DAGCombinerInfo 15237 DagCombineInfo(DAG, Level, false, this); 15238 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 15239 } 15240 15241 /// Given an ISD::SDIV node expressing a divide by constant, return 15242 /// a DAG expression to select that will generate the same value by multiplying 15243 /// by a magic number. 15244 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 15245 SDValue DAGCombiner::BuildSDIV(SDNode *N) { 15246 // when optimising for minimum size, we don't want to expand a div to a mul 15247 // and a shift. 15248 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 15249 return SDValue(); 15250 15251 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15252 if (!C) 15253 return SDValue(); 15254 15255 // Avoid division by zero. 15256 if (C->isNullValue()) 15257 return SDValue(); 15258 15259 std::vector<SDNode*> Built; 15260 SDValue S = 15261 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 15262 15263 for (SDNode *N : Built) 15264 AddToWorklist(N); 15265 return S; 15266 } 15267 15268 /// Given an ISD::SDIV node expressing a divide by constant power of 2, return a 15269 /// DAG expression that will generate the same value by right shifting. 15270 SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { 15271 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15272 if (!C) 15273 return SDValue(); 15274 15275 // Avoid division by zero. 15276 if (C->isNullValue()) 15277 return SDValue(); 15278 15279 std::vector<SDNode *> Built; 15280 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built); 15281 15282 for (SDNode *N : Built) 15283 AddToWorklist(N); 15284 return S; 15285 } 15286 15287 /// Given an ISD::UDIV node expressing a divide by constant, return a DAG 15288 /// expression that will generate the same value by multiplying by a magic 15289 /// number. 15290 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 15291 SDValue DAGCombiner::BuildUDIV(SDNode *N) { 15292 // when optimising for minimum size, we don't want to expand a div to a mul 15293 // and a shift. 15294 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 15295 return SDValue(); 15296 15297 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15298 if (!C) 15299 return SDValue(); 15300 15301 // Avoid division by zero. 15302 if (C->isNullValue()) 15303 return SDValue(); 15304 15305 std::vector<SDNode*> Built; 15306 SDValue S = 15307 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 15308 15309 for (SDNode *N : Built) 15310 AddToWorklist(N); 15311 return S; 15312 } 15313 15314 /// Determines the LogBase2 value for a non-null input value using the 15315 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). 15316 SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) { 15317 EVT VT = V.getValueType(); 15318 unsigned EltBits = VT.getScalarSizeInBits(); 15319 SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V); 15320 SDValue Base = DAG.getConstant(EltBits - 1, DL, VT); 15321 SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz); 15322 return LogBase2; 15323 } 15324 15325 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15326 /// For the reciprocal, we need to find the zero of the function: 15327 /// F(X) = A X - 1 [which has a zero at X = 1/A] 15328 /// => 15329 /// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 15330 /// does not require additional intermediate precision] 15331 SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) { 15332 if (Level >= AfterLegalizeDAG) 15333 return SDValue(); 15334 15335 // TODO: Handle half and/or extended types? 15336 EVT VT = Op.getValueType(); 15337 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15338 return SDValue(); 15339 15340 // If estimates are explicitly disabled for this function, we're done. 15341 MachineFunction &MF = DAG.getMachineFunction(); 15342 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF); 15343 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15344 return SDValue(); 15345 15346 // Estimates may be explicitly enabled for this type with a custom number of 15347 // refinement steps. 15348 int Iterations = TLI.getDivRefinementSteps(VT, MF); 15349 if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) { 15350 AddToWorklist(Est.getNode()); 15351 15352 if (Iterations) { 15353 EVT VT = Op.getValueType(); 15354 SDLoc DL(Op); 15355 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 15356 15357 // Newton iterations: Est = Est + Est (1 - Arg * Est) 15358 for (int i = 0; i < Iterations; ++i) { 15359 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est, Flags); 15360 AddToWorklist(NewEst.getNode()); 15361 15362 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst, Flags); 15363 AddToWorklist(NewEst.getNode()); 15364 15365 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15366 AddToWorklist(NewEst.getNode()); 15367 15368 Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst, Flags); 15369 AddToWorklist(Est.getNode()); 15370 } 15371 } 15372 return Est; 15373 } 15374 15375 return SDValue(); 15376 } 15377 15378 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15379 /// For the reciprocal sqrt, we need to find the zero of the function: 15380 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15381 /// => 15382 /// X_{i+1} = X_i (1.5 - A X_i^2 / 2) 15383 /// As a result, we precompute A/2 prior to the iteration loop. 15384 SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est, 15385 unsigned Iterations, 15386 SDNodeFlags *Flags, bool Reciprocal) { 15387 EVT VT = Arg.getValueType(); 15388 SDLoc DL(Arg); 15389 SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT); 15390 15391 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that 15392 // this entire sequence requires only one FP constant. 15393 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags); 15394 AddToWorklist(HalfArg.getNode()); 15395 15396 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags); 15397 AddToWorklist(HalfArg.getNode()); 15398 15399 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 15400 for (unsigned i = 0; i < Iterations; ++i) { 15401 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags); 15402 AddToWorklist(NewEst.getNode()); 15403 15404 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags); 15405 AddToWorklist(NewEst.getNode()); 15406 15407 NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags); 15408 AddToWorklist(NewEst.getNode()); 15409 15410 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15411 AddToWorklist(Est.getNode()); 15412 } 15413 15414 // If non-reciprocal square root is requested, multiply the result by Arg. 15415 if (!Reciprocal) { 15416 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags); 15417 AddToWorklist(Est.getNode()); 15418 } 15419 15420 return Est; 15421 } 15422 15423 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15424 /// For the reciprocal sqrt, we need to find the zero of the function: 15425 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15426 /// => 15427 /// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0)) 15428 SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est, 15429 unsigned Iterations, 15430 SDNodeFlags *Flags, bool Reciprocal) { 15431 EVT VT = Arg.getValueType(); 15432 SDLoc DL(Arg); 15433 SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT); 15434 SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT); 15435 15436 // This routine must enter the loop below to work correctly 15437 // when (Reciprocal == false). 15438 assert(Iterations > 0); 15439 15440 // Newton iterations for reciprocal square root: 15441 // E = (E * -0.5) * ((A * E) * E + -3.0) 15442 for (unsigned i = 0; i < Iterations; ++i) { 15443 SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags); 15444 AddToWorklist(AE.getNode()); 15445 15446 SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags); 15447 AddToWorklist(AEE.getNode()); 15448 15449 SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags); 15450 AddToWorklist(RHS.getNode()); 15451 15452 // When calculating a square root at the last iteration build: 15453 // S = ((A * E) * -0.5) * ((A * E) * E + -3.0) 15454 // (notice a common subexpression) 15455 SDValue LHS; 15456 if (Reciprocal || (i + 1) < Iterations) { 15457 // RSQRT: LHS = (E * -0.5) 15458 LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags); 15459 } else { 15460 // SQRT: LHS = (A * E) * -0.5 15461 LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags); 15462 } 15463 AddToWorklist(LHS.getNode()); 15464 15465 Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags); 15466 AddToWorklist(Est.getNode()); 15467 } 15468 15469 return Est; 15470 } 15471 15472 /// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case 15473 /// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if 15474 /// Op can be zero. 15475 SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, 15476 bool Reciprocal) { 15477 if (Level >= AfterLegalizeDAG) 15478 return SDValue(); 15479 15480 // TODO: Handle half and/or extended types? 15481 EVT VT = Op.getValueType(); 15482 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15483 return SDValue(); 15484 15485 // If estimates are explicitly disabled for this function, we're done. 15486 MachineFunction &MF = DAG.getMachineFunction(); 15487 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF); 15488 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15489 return SDValue(); 15490 15491 // Estimates may be explicitly enabled for this type with a custom number of 15492 // refinement steps. 15493 int Iterations = TLI.getSqrtRefinementSteps(VT, MF); 15494 15495 bool UseOneConstNR = false; 15496 if (SDValue Est = 15497 TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR, 15498 Reciprocal)) { 15499 AddToWorklist(Est.getNode()); 15500 15501 if (Iterations) { 15502 Est = UseOneConstNR 15503 ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal) 15504 : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal); 15505 15506 if (!Reciprocal) { 15507 // Unfortunately, Est is now NaN if the input was exactly 0.0. 15508 // Select out this case and force the answer to 0.0. 15509 EVT VT = Op.getValueType(); 15510 SDLoc DL(Op); 15511 15512 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 15513 EVT CCVT = getSetCCResultType(VT); 15514 SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 15515 AddToWorklist(ZeroCmp.getNode()); 15516 15517 Est = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT, 15518 ZeroCmp, FPZero, Est); 15519 AddToWorklist(Est.getNode()); 15520 } 15521 } 15522 return Est; 15523 } 15524 15525 return SDValue(); 15526 } 15527 15528 SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 15529 return buildSqrtEstimateImpl(Op, Flags, true); 15530 } 15531 15532 SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 15533 return buildSqrtEstimateImpl(Op, Flags, false); 15534 } 15535 15536 /// Return true if base is a frame index, which is known not to alias with 15537 /// anything but itself. Provides base object and offset as results. 15538 static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 15539 const GlobalValue *&GV, const void *&CV) { 15540 // Assume it is a primitive operation. 15541 Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; 15542 15543 // If it's an adding a simple constant then integrate the offset. 15544 if (Base.getOpcode() == ISD::ADD) { 15545 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 15546 Base = Base.getOperand(0); 15547 Offset += C->getZExtValue(); 15548 } 15549 } 15550 15551 // Return the underlying GlobalValue, and update the Offset. Return false 15552 // for GlobalAddressSDNode since the same GlobalAddress may be represented 15553 // by multiple nodes with different offsets. 15554 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 15555 GV = G->getGlobal(); 15556 Offset += G->getOffset(); 15557 return false; 15558 } 15559 15560 // Return the underlying Constant value, and update the Offset. Return false 15561 // for ConstantSDNodes since the same constant pool entry may be represented 15562 // by multiple nodes with different offsets. 15563 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 15564 CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() 15565 : (const void *)C->getConstVal(); 15566 Offset += C->getOffset(); 15567 return false; 15568 } 15569 // If it's any of the following then it can't alias with anything but itself. 15570 return isa<FrameIndexSDNode>(Base); 15571 } 15572 15573 /// Return true if there is any possibility that the two addresses overlap. 15574 bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { 15575 // If they are the same then they must be aliases. 15576 if (Op0->getBasePtr() == Op1->getBasePtr()) return true; 15577 15578 // If they are both volatile then they cannot be reordered. 15579 if (Op0->isVolatile() && Op1->isVolatile()) return true; 15580 15581 // If one operation reads from invariant memory, and the other may store, they 15582 // cannot alias. These should really be checking the equivalent of mayWrite, 15583 // but it only matters for memory nodes other than load /store. 15584 if (Op0->isInvariant() && Op1->writeMem()) 15585 return false; 15586 15587 if (Op1->isInvariant() && Op0->writeMem()) 15588 return false; 15589 15590 // Gather base node and offset information. 15591 SDValue Base1, Base2; 15592 int64_t Offset1, Offset2; 15593 const GlobalValue *GV1, *GV2; 15594 const void *CV1, *CV2; 15595 bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), 15596 Base1, Offset1, GV1, CV1); 15597 bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), 15598 Base2, Offset2, GV2, CV2); 15599 15600 // If they have a same base address then check to see if they overlap. 15601 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 15602 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 15603 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 15604 15605 // It is possible for different frame indices to alias each other, mostly 15606 // when tail call optimization reuses return address slots for arguments. 15607 // To catch this case, look up the actual index of frame indices to compute 15608 // the real alias relationship. 15609 if (isFrameIndex1 && isFrameIndex2) { 15610 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 15611 Offset1 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 15612 Offset2 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 15613 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 15614 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 15615 } 15616 15617 // Otherwise, if we know what the bases are, and they aren't identical, then 15618 // we know they cannot alias. 15619 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 15620 return false; 15621 15622 // If we know required SrcValue1 and SrcValue2 have relatively large alignment 15623 // compared to the size and offset of the access, we may be able to prove they 15624 // do not alias. This check is conservative for now to catch cases created by 15625 // splitting vector types. 15626 if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) && 15627 (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) && 15628 (Op0->getMemoryVT().getSizeInBits() >> 3 == 15629 Op1->getMemoryVT().getSizeInBits() >> 3) && 15630 (Op0->getOriginalAlignment() > (Op0->getMemoryVT().getSizeInBits() >> 3))) { 15631 int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment(); 15632 int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment(); 15633 15634 // There is no overlap between these relatively aligned accesses of similar 15635 // size, return no alias. 15636 if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 || 15637 (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1) 15638 return false; 15639 } 15640 15641 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 15642 ? CombinerGlobalAA 15643 : DAG.getSubtarget().useAA(); 15644 #ifndef NDEBUG 15645 if (CombinerAAOnlyFunc.getNumOccurrences() && 15646 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 15647 UseAA = false; 15648 #endif 15649 if (UseAA && 15650 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) { 15651 // Use alias analysis information. 15652 int64_t MinOffset = std::min(Op0->getSrcValueOffset(), 15653 Op1->getSrcValueOffset()); 15654 int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) + 15655 Op0->getSrcValueOffset() - MinOffset; 15656 int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) + 15657 Op1->getSrcValueOffset() - MinOffset; 15658 AliasResult AAResult = 15659 AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1, 15660 UseTBAA ? Op0->getAAInfo() : AAMDNodes()), 15661 MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2, 15662 UseTBAA ? Op1->getAAInfo() : AAMDNodes())); 15663 if (AAResult == NoAlias) 15664 return false; 15665 } 15666 15667 // Otherwise we have to assume they alias. 15668 return true; 15669 } 15670 15671 /// Walk up chain skipping non-aliasing memory nodes, 15672 /// looking for aliasing nodes and adding them to the Aliases vector. 15673 void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 15674 SmallVectorImpl<SDValue> &Aliases) { 15675 SmallVector<SDValue, 8> Chains; // List of chains to visit. 15676 SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 15677 15678 // Get alias information for node. 15679 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile(); 15680 15681 // Starting off. 15682 Chains.push_back(OriginalChain); 15683 unsigned Depth = 0; 15684 15685 // Look at each chain and determine if it is an alias. If so, add it to the 15686 // aliases list. If not, then continue up the chain looking for the next 15687 // candidate. 15688 while (!Chains.empty()) { 15689 SDValue Chain = Chains.pop_back_val(); 15690 15691 // For TokenFactor nodes, look at each operand and only continue up the 15692 // chain until we reach the depth limit. 15693 // 15694 // FIXME: The depth check could be made to return the last non-aliasing 15695 // chain we found before we hit a tokenfactor rather than the original 15696 // chain. 15697 if (Depth > TLI.getGatherAllAliasesMaxDepth()) { 15698 Aliases.clear(); 15699 Aliases.push_back(OriginalChain); 15700 return; 15701 } 15702 15703 // Don't bother if we've been before. 15704 if (!Visited.insert(Chain.getNode()).second) 15705 continue; 15706 15707 switch (Chain.getOpcode()) { 15708 case ISD::EntryToken: 15709 // Entry token is ideal chain operand, but handled in FindBetterChain. 15710 break; 15711 15712 case ISD::LOAD: 15713 case ISD::STORE: { 15714 // Get alias information for Chain. 15715 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) && 15716 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile(); 15717 15718 // If chain is alias then stop here. 15719 if (!(IsLoad && IsOpLoad) && 15720 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) { 15721 Aliases.push_back(Chain); 15722 } else { 15723 // Look further up the chain. 15724 Chains.push_back(Chain.getOperand(0)); 15725 ++Depth; 15726 } 15727 break; 15728 } 15729 15730 case ISD::TokenFactor: 15731 // We have to check each of the operands of the token factor for "small" 15732 // token factors, so we queue them up. Adding the operands to the queue 15733 // (stack) in reverse order maintains the original order and increases the 15734 // likelihood that getNode will find a matching token factor (CSE.) 15735 if (Chain.getNumOperands() > 16) { 15736 Aliases.push_back(Chain); 15737 break; 15738 } 15739 for (unsigned n = Chain.getNumOperands(); n;) 15740 Chains.push_back(Chain.getOperand(--n)); 15741 ++Depth; 15742 break; 15743 15744 default: 15745 // For all other instructions we will just have to take what we can get. 15746 Aliases.push_back(Chain); 15747 break; 15748 } 15749 } 15750 } 15751 15752 /// Walk up chain skipping non-aliasing memory nodes, looking for a better chain 15753 /// (aliasing node.) 15754 SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 15755 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 15756 15757 // Accumulate all the aliases to this node. 15758 GatherAllAliases(N, OldChain, Aliases); 15759 15760 // If no operands then chain to entry token. 15761 if (Aliases.size() == 0) 15762 return DAG.getEntryNode(); 15763 15764 // If a single operand then chain to it. We don't need to revisit it. 15765 if (Aliases.size() == 1) 15766 return Aliases[0]; 15767 15768 // Construct a custom tailored token factor. 15769 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); 15770 } 15771 15772 bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { 15773 // This holds the base pointer, index, and the offset in bytes from the base 15774 // pointer. 15775 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 15776 15777 // We must have a base and an offset. 15778 if (!BasePtr.Base.getNode()) 15779 return false; 15780 15781 // Do not handle stores to undef base pointers. 15782 if (BasePtr.Base.isUndef()) 15783 return false; 15784 15785 SmallVector<StoreSDNode *, 8> ChainedStores; 15786 ChainedStores.push_back(St); 15787 15788 // Walk up the chain and look for nodes with offsets from the same 15789 // base pointer. Stop when reaching an instruction with a different kind 15790 // or instruction which has a different base pointer. 15791 StoreSDNode *Index = St; 15792 while (Index) { 15793 // If the chain has more than one use, then we can't reorder the mem ops. 15794 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 15795 break; 15796 15797 if (Index->isVolatile() || Index->isIndexed()) 15798 break; 15799 15800 // Find the base pointer and offset for this memory node. 15801 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); 15802 15803 // Check that the base pointer is the same as the original one. 15804 if (!Ptr.equalBaseIndex(BasePtr)) 15805 break; 15806 15807 // Find the next memory operand in the chain. If the next operand in the 15808 // chain is a store then move up and continue the scan with the next 15809 // memory operand. If the next operand is a load save it and use alias 15810 // information to check if it interferes with anything. 15811 SDNode *NextInChain = Index->getChain().getNode(); 15812 while (true) { 15813 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 15814 // We found a store node. Use it for the next iteration. 15815 if (STn->isVolatile() || STn->isIndexed()) { 15816 Index = nullptr; 15817 break; 15818 } 15819 ChainedStores.push_back(STn); 15820 Index = STn; 15821 break; 15822 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 15823 NextInChain = Ldn->getChain().getNode(); 15824 continue; 15825 } else { 15826 Index = nullptr; 15827 break; 15828 } 15829 } 15830 } 15831 15832 bool MadeChangeToSt = false; 15833 SmallVector<std::pair<StoreSDNode *, SDValue>, 8> BetterChains; 15834 15835 for (StoreSDNode *ChainedStore : ChainedStores) { 15836 SDValue Chain = ChainedStore->getChain(); 15837 SDValue BetterChain = FindBetterChain(ChainedStore, Chain); 15838 15839 if (Chain != BetterChain) { 15840 if (ChainedStore == St) 15841 MadeChangeToSt = true; 15842 BetterChains.push_back(std::make_pair(ChainedStore, BetterChain)); 15843 } 15844 } 15845 15846 // Do all replacements after finding the replacements to make to avoid making 15847 // the chains more complicated by introducing new TokenFactors. 15848 for (auto Replacement : BetterChains) 15849 replaceStoreChain(Replacement.first, Replacement.second); 15850 15851 return MadeChangeToSt; 15852 } 15853 15854 /// This is the entry point for the file. 15855 void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 15856 CodeGenOpt::Level OptLevel) { 15857 /// This is the main entry point to this class. 15858 DAGCombiner(*this, AA, OptLevel).Run(Level); 15859 } 15860