1 //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11 // both before and after the DAG is legalized. 12 // 13 // This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14 // primarily intended to handle simplification opportunities that are implicit 15 // in the LLVM IR and exposed by the various codegen lowering phases. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/CodeGen/SelectionDAG.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Target/TargetLowering.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include "llvm/Target/TargetOptions.h" 38 #include "llvm/Target/TargetRegisterInfo.h" 39 #include "llvm/Target/TargetSubtargetInfo.h" 40 #include <algorithm> 41 using namespace llvm; 42 43 #define DEBUG_TYPE "dagcombine" 44 45 STATISTIC(NodesCombined , "Number of dag nodes combined"); 46 STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 47 STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 48 STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 49 STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 50 STATISTIC(SlicedLoads, "Number of load sliced"); 51 52 namespace { 53 static cl::opt<bool> 54 CombinerAA("combiner-alias-analysis", cl::Hidden, 55 cl::desc("Enable DAG combiner alias-analysis heuristics")); 56 57 static cl::opt<bool> 58 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 59 cl::desc("Enable DAG combiner's use of IR alias analysis")); 60 61 static cl::opt<bool> 62 UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), 63 cl::desc("Enable DAG combiner's use of TBAA")); 64 65 #ifndef NDEBUG 66 static cl::opt<std::string> 67 CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, 68 cl::desc("Only use DAG-combiner alias analysis in this" 69 " function")); 70 #endif 71 72 /// Hidden option to stress test load slicing, i.e., when this option 73 /// is enabled, load slicing bypasses most of its profitability guards. 74 static cl::opt<bool> 75 StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, 76 cl::desc("Bypass the profitability model of load " 77 "slicing"), 78 cl::init(false)); 79 80 //------------------------------ DAGCombiner ---------------------------------// 81 82 class DAGCombiner { 83 SelectionDAG &DAG; 84 const TargetLowering &TLI; 85 CombineLevel Level; 86 CodeGenOpt::Level OptLevel; 87 bool LegalOperations; 88 bool LegalTypes; 89 bool ForCodeSize; 90 91 /// \brief Worklist of all of the nodes that need to be simplified. 92 /// 93 /// This must behave as a stack -- new nodes to process are pushed onto the 94 /// back and when processing we pop off of the back. 95 /// 96 /// The worklist will not contain duplicates but may contain null entries 97 /// due to nodes being deleted from the underlying DAG. 98 SmallVector<SDNode *, 64> Worklist; 99 100 /// \brief Mapping from an SDNode to its position on the worklist. 101 /// 102 /// This is used to find and remove nodes from the worklist (by nulling 103 /// them) when they are deleted from the underlying DAG. It relies on 104 /// stable indices of nodes within the worklist. 105 DenseMap<SDNode *, unsigned> WorklistMap; 106 107 /// \brief Set of nodes which have been combined (at least once). 108 /// 109 /// This is used to allow us to reliably add any operands of a DAG node 110 /// which have not yet been combined to the worklist. 111 SmallPtrSet<SDNode *, 64> CombinedNodes; 112 113 // AA - Used for DAG load/store alias analysis. 114 AliasAnalysis &AA; 115 116 /// AddUsersToWorklist - When an instruction is simplified, add all users of 117 /// the instruction to the work lists because they might get more simplified 118 /// now. 119 /// 120 void AddUsersToWorklist(SDNode *N) { 121 for (SDNode *Node : N->uses()) 122 AddToWorklist(Node); 123 } 124 125 /// visit - call the node-specific routine that knows how to fold each 126 /// particular type of node. 127 SDValue visit(SDNode *N); 128 129 public: 130 /// AddToWorklist - Add to the work list making sure its instance is at the 131 /// back (next to be processed.) 132 void AddToWorklist(SDNode *N) { 133 // Skip handle nodes as they can't usefully be combined and confuse the 134 // zero-use deletion strategy. 135 if (N->getOpcode() == ISD::HANDLENODE) 136 return; 137 138 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second) 139 Worklist.push_back(N); 140 } 141 142 /// removeFromWorklist - remove all instances of N from the worklist. 143 /// 144 void removeFromWorklist(SDNode *N) { 145 CombinedNodes.erase(N); 146 147 auto It = WorklistMap.find(N); 148 if (It == WorklistMap.end()) 149 return; // Not in the worklist. 150 151 // Null out the entry rather than erasing it to avoid a linear operation. 152 Worklist[It->second] = nullptr; 153 WorklistMap.erase(It); 154 } 155 156 void deleteAndRecombine(SDNode *N); 157 bool recursivelyDeleteUnusedNodes(SDNode *N); 158 159 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 160 bool AddTo = true); 161 162 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 163 return CombineTo(N, &Res, 1, AddTo); 164 } 165 166 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 167 bool AddTo = true) { 168 SDValue To[] = { Res0, Res1 }; 169 return CombineTo(N, To, 2, AddTo); 170 } 171 172 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 173 174 private: 175 176 /// SimplifyDemandedBits - Check the specified integer node value to see if 177 /// it can be simplified or if things it uses can be simplified by bit 178 /// propagation. If so, return true. 179 bool SimplifyDemandedBits(SDValue Op) { 180 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 181 APInt Demanded = APInt::getAllOnesValue(BitWidth); 182 return SimplifyDemandedBits(Op, Demanded); 183 } 184 185 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 186 187 bool CombineToPreIndexedLoadStore(SDNode *N); 188 bool CombineToPostIndexedLoadStore(SDNode *N); 189 bool SliceUpLoad(SDNode *N); 190 191 /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed 192 /// load. 193 /// 194 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. 195 /// \param InVecVT type of the input vector to EVE with bitcasts resolved. 196 /// \param EltNo index of the vector element to load. 197 /// \param OriginalLoad load that EVE came from to be replaced. 198 /// \returns EVE on success SDValue() on failure. 199 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 200 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad); 201 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 202 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 203 SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 204 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 205 SDValue PromoteIntBinOp(SDValue Op); 206 SDValue PromoteIntShiftOp(SDValue Op); 207 SDValue PromoteExtend(SDValue Op); 208 bool PromoteLoad(SDValue Op); 209 210 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, 211 SDValue Trunc, SDValue ExtLoad, SDLoc DL, 212 ISD::NodeType ExtType); 213 214 /// combine - call the node-specific routine that knows how to fold each 215 /// particular type of node. If that doesn't do anything, try the 216 /// target-specific DAG combines. 217 SDValue combine(SDNode *N); 218 219 // Visitation implementation - Implement dag node combining for different 220 // node types. The semantics are as follows: 221 // Return Value: 222 // SDValue.getNode() == 0 - No change was made 223 // SDValue.getNode() == N - N was replaced, is dead and has been handled. 224 // otherwise - N should be replaced by the returned Operand. 225 // 226 SDValue visitTokenFactor(SDNode *N); 227 SDValue visitMERGE_VALUES(SDNode *N); 228 SDValue visitADD(SDNode *N); 229 SDValue visitSUB(SDNode *N); 230 SDValue visitADDC(SDNode *N); 231 SDValue visitSUBC(SDNode *N); 232 SDValue visitADDE(SDNode *N); 233 SDValue visitSUBE(SDNode *N); 234 SDValue visitMUL(SDNode *N); 235 SDValue visitSDIV(SDNode *N); 236 SDValue visitUDIV(SDNode *N); 237 SDValue visitSREM(SDNode *N); 238 SDValue visitUREM(SDNode *N); 239 SDValue visitMULHU(SDNode *N); 240 SDValue visitMULHS(SDNode *N); 241 SDValue visitSMUL_LOHI(SDNode *N); 242 SDValue visitUMUL_LOHI(SDNode *N); 243 SDValue visitSMULO(SDNode *N); 244 SDValue visitUMULO(SDNode *N); 245 SDValue visitSDIVREM(SDNode *N); 246 SDValue visitUDIVREM(SDNode *N); 247 SDValue visitAND(SDNode *N); 248 SDValue visitOR(SDNode *N); 249 SDValue visitXOR(SDNode *N); 250 SDValue SimplifyVBinOp(SDNode *N); 251 SDValue SimplifyVUnaryOp(SDNode *N); 252 SDValue visitSHL(SDNode *N); 253 SDValue visitSRA(SDNode *N); 254 SDValue visitSRL(SDNode *N); 255 SDValue visitRotate(SDNode *N); 256 SDValue visitCTLZ(SDNode *N); 257 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 258 SDValue visitCTTZ(SDNode *N); 259 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 260 SDValue visitCTPOP(SDNode *N); 261 SDValue visitSELECT(SDNode *N); 262 SDValue visitVSELECT(SDNode *N); 263 SDValue visitSELECT_CC(SDNode *N); 264 SDValue visitSETCC(SDNode *N); 265 SDValue visitSIGN_EXTEND(SDNode *N); 266 SDValue visitZERO_EXTEND(SDNode *N); 267 SDValue visitANY_EXTEND(SDNode *N); 268 SDValue visitSIGN_EXTEND_INREG(SDNode *N); 269 SDValue visitTRUNCATE(SDNode *N); 270 SDValue visitBITCAST(SDNode *N); 271 SDValue visitBUILD_PAIR(SDNode *N); 272 SDValue visitFADD(SDNode *N); 273 SDValue visitFSUB(SDNode *N); 274 SDValue visitFMUL(SDNode *N); 275 SDValue visitFMA(SDNode *N); 276 SDValue visitFDIV(SDNode *N); 277 SDValue visitFREM(SDNode *N); 278 SDValue visitFCOPYSIGN(SDNode *N); 279 SDValue visitSINT_TO_FP(SDNode *N); 280 SDValue visitUINT_TO_FP(SDNode *N); 281 SDValue visitFP_TO_SINT(SDNode *N); 282 SDValue visitFP_TO_UINT(SDNode *N); 283 SDValue visitFP_ROUND(SDNode *N); 284 SDValue visitFP_ROUND_INREG(SDNode *N); 285 SDValue visitFP_EXTEND(SDNode *N); 286 SDValue visitFNEG(SDNode *N); 287 SDValue visitFABS(SDNode *N); 288 SDValue visitFCEIL(SDNode *N); 289 SDValue visitFTRUNC(SDNode *N); 290 SDValue visitFFLOOR(SDNode *N); 291 SDValue visitBRCOND(SDNode *N); 292 SDValue visitBR_CC(SDNode *N); 293 SDValue visitLOAD(SDNode *N); 294 SDValue visitSTORE(SDNode *N); 295 SDValue visitINSERT_VECTOR_ELT(SDNode *N); 296 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 297 SDValue visitBUILD_VECTOR(SDNode *N); 298 SDValue visitCONCAT_VECTORS(SDNode *N); 299 SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 300 SDValue visitVECTOR_SHUFFLE(SDNode *N); 301 SDValue visitINSERT_SUBVECTOR(SDNode *N); 302 303 SDValue XformToShuffleWithZero(SDNode *N); 304 SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS); 305 306 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt); 307 308 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 309 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 310 SDValue SimplifySelect(SDLoc DL, SDValue N0, SDValue N1, SDValue N2); 311 SDValue SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1, SDValue N2, 312 SDValue N3, ISD::CondCode CC, 313 bool NotExtCompare = false); 314 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 315 SDLoc DL, bool foldBooleans = true); 316 317 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 318 SDValue &CC) const; 319 bool isOneUseSetCC(SDValue N) const; 320 321 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 322 unsigned HiOp); 323 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 324 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 325 SDValue BuildSDIV(SDNode *N); 326 SDValue BuildSDIVPow2(SDNode *N); 327 SDValue BuildUDIV(SDNode *N); 328 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 329 bool DemandHighBits = true); 330 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 331 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg, 332 SDValue InnerPos, SDValue InnerNeg, 333 unsigned PosOpcode, unsigned NegOpcode, 334 SDLoc DL); 335 SDNode *MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL); 336 SDValue ReduceLoadWidth(SDNode *N); 337 SDValue ReduceLoadOpStoreWidth(SDNode *N); 338 SDValue TransformFPLoadStorePair(SDNode *N); 339 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); 340 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); 341 342 SDValue GetDemandedBits(SDValue V, const APInt &Mask); 343 344 /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 345 /// looking for aliasing nodes and adding them to the Aliases vector. 346 void GatherAllAliases(SDNode *N, SDValue OriginalChain, 347 SmallVectorImpl<SDValue> &Aliases); 348 349 /// isAlias - Return true if there is any possibility that the two addresses 350 /// overlap. 351 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const; 352 353 /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, 354 /// looking for a better chain (aliasing node.) 355 SDValue FindBetterChain(SDNode *N, SDValue Chain); 356 357 /// Merge consecutive store operations into a wide store. 358 /// This optimization uses wide integers or vectors when possible. 359 /// \return True if some memory operations were changed. 360 bool MergeConsecutiveStores(StoreSDNode *N); 361 362 /// \brief Try to transform a truncation where C is a constant: 363 /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) 364 /// 365 /// \p N needs to be a truncation and its first operand an AND. Other 366 /// requirements are checked by the function (e.g. that trunc is 367 /// single-use) and if missed an empty SDValue is returned. 368 SDValue distributeTruncateThroughAnd(SDNode *N); 369 370 public: 371 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 372 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 373 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { 374 AttributeSet FnAttrs = 375 DAG.getMachineFunction().getFunction()->getAttributes(); 376 ForCodeSize = 377 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, 378 Attribute::OptimizeForSize) || 379 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); 380 } 381 382 /// Run - runs the dag combiner on all nodes in the work list 383 void Run(CombineLevel AtLevel); 384 385 SelectionDAG &getDAG() const { return DAG; } 386 387 /// getShiftAmountTy - Returns a type large enough to hold any valid 388 /// shift amount - before type legalization these can be huge. 389 EVT getShiftAmountTy(EVT LHSTy) { 390 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 391 if (LHSTy.isVector()) 392 return LHSTy; 393 return LegalTypes ? TLI.getScalarShiftAmountTy(LHSTy) 394 : TLI.getPointerTy(); 395 } 396 397 /// isTypeLegal - This method returns true if we are running before type 398 /// legalization or if the specified VT is legal. 399 bool isTypeLegal(const EVT &VT) { 400 if (!LegalTypes) return true; 401 return TLI.isTypeLegal(VT); 402 } 403 404 /// getSetCCResultType - Convenience wrapper around 405 /// TargetLowering::getSetCCResultType 406 EVT getSetCCResultType(EVT VT) const { 407 return TLI.getSetCCResultType(*DAG.getContext(), VT); 408 } 409 }; 410 } 411 412 413 namespace { 414 /// WorklistRemover - This class is a DAGUpdateListener that removes any deleted 415 /// nodes from the worklist. 416 class WorklistRemover : public SelectionDAG::DAGUpdateListener { 417 DAGCombiner &DC; 418 public: 419 explicit WorklistRemover(DAGCombiner &dc) 420 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 421 422 void NodeDeleted(SDNode *N, SDNode *E) override { 423 DC.removeFromWorklist(N); 424 } 425 }; 426 } 427 428 //===----------------------------------------------------------------------===// 429 // TargetLowering::DAGCombinerInfo implementation 430 //===----------------------------------------------------------------------===// 431 432 void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 433 ((DAGCombiner*)DC)->AddToWorklist(N); 434 } 435 436 void TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) { 437 ((DAGCombiner*)DC)->removeFromWorklist(N); 438 } 439 440 SDValue TargetLowering::DAGCombinerInfo:: 441 CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) { 442 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 443 } 444 445 SDValue TargetLowering::DAGCombinerInfo:: 446 CombineTo(SDNode *N, SDValue Res, bool AddTo) { 447 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 448 } 449 450 451 SDValue TargetLowering::DAGCombinerInfo:: 452 CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 453 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 454 } 455 456 void TargetLowering::DAGCombinerInfo:: 457 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 458 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 459 } 460 461 //===----------------------------------------------------------------------===// 462 // Helper Functions 463 //===----------------------------------------------------------------------===// 464 465 void DAGCombiner::deleteAndRecombine(SDNode *N) { 466 removeFromWorklist(N); 467 468 // If the operands of this node are only used by the node, they will now be 469 // dead. Make sure to re-visit them and recursively delete dead nodes. 470 for (const SDValue &Op : N->ops()) 471 if (Op->hasOneUse()) 472 AddToWorklist(Op.getNode()); 473 474 DAG.DeleteNode(N); 475 } 476 477 /// isNegatibleForFree - Return 1 if we can compute the negated form of the 478 /// specified expression for the same cost as the expression itself, or 2 if we 479 /// can compute the negated form more cheaply than the expression itself. 480 static char isNegatibleForFree(SDValue Op, bool LegalOperations, 481 const TargetLowering &TLI, 482 const TargetOptions *Options, 483 unsigned Depth = 0) { 484 // fneg is removable even if it has multiple uses. 485 if (Op.getOpcode() == ISD::FNEG) return 2; 486 487 // Don't allow anything with multiple uses. 488 if (!Op.hasOneUse()) return 0; 489 490 // Don't recurse exponentially. 491 if (Depth > 6) return 0; 492 493 switch (Op.getOpcode()) { 494 default: return false; 495 case ISD::ConstantFP: 496 // Don't invert constant FP values after legalize. The negated constant 497 // isn't necessarily legal. 498 return LegalOperations ? 0 : 1; 499 case ISD::FADD: 500 // FIXME: determine better conditions for this xform. 501 if (!Options->UnsafeFPMath) return 0; 502 503 // After operation legalization, it might not be legal to create new FSUBs. 504 if (LegalOperations && 505 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 506 return 0; 507 508 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 509 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 510 Options, Depth + 1)) 511 return V; 512 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 513 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 514 Depth + 1); 515 case ISD::FSUB: 516 // We can't turn -(A-B) into B-A when we honor signed zeros. 517 if (!Options->UnsafeFPMath) return 0; 518 519 // fold (fneg (fsub A, B)) -> (fsub B, A) 520 return 1; 521 522 case ISD::FMUL: 523 case ISD::FDIV: 524 if (Options->HonorSignDependentRoundingFPMath()) return 0; 525 526 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 527 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 528 Options, Depth + 1)) 529 return V; 530 531 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 532 Depth + 1); 533 534 case ISD::FP_EXTEND: 535 case ISD::FP_ROUND: 536 case ISD::FSIN: 537 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 538 Depth + 1); 539 } 540 } 541 542 /// GetNegatedExpression - If isNegatibleForFree returns true, this function 543 /// returns the newly negated expression. 544 static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 545 bool LegalOperations, unsigned Depth = 0) { 546 // fneg is removable even if it has multiple uses. 547 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 548 549 // Don't allow anything with multiple uses. 550 assert(Op.hasOneUse() && "Unknown reuse!"); 551 552 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 553 switch (Op.getOpcode()) { 554 default: llvm_unreachable("Unknown code"); 555 case ISD::ConstantFP: { 556 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 557 V.changeSign(); 558 return DAG.getConstantFP(V, Op.getValueType()); 559 } 560 case ISD::FADD: 561 // FIXME: determine better conditions for this xform. 562 assert(DAG.getTarget().Options.UnsafeFPMath); 563 564 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 565 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 566 DAG.getTargetLoweringInfo(), 567 &DAG.getTarget().Options, Depth+1)) 568 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 569 GetNegatedExpression(Op.getOperand(0), DAG, 570 LegalOperations, Depth+1), 571 Op.getOperand(1)); 572 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 573 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 574 GetNegatedExpression(Op.getOperand(1), DAG, 575 LegalOperations, Depth+1), 576 Op.getOperand(0)); 577 case ISD::FSUB: 578 // We can't turn -(A-B) into B-A when we honor signed zeros. 579 assert(DAG.getTarget().Options.UnsafeFPMath); 580 581 // fold (fneg (fsub 0, B)) -> B 582 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 583 if (N0CFP->getValueAPF().isZero()) 584 return Op.getOperand(1); 585 586 // fold (fneg (fsub A, B)) -> (fsub B, A) 587 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 588 Op.getOperand(1), Op.getOperand(0)); 589 590 case ISD::FMUL: 591 case ISD::FDIV: 592 assert(!DAG.getTarget().Options.HonorSignDependentRoundingFPMath()); 593 594 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 595 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 596 DAG.getTargetLoweringInfo(), 597 &DAG.getTarget().Options, Depth+1)) 598 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 599 GetNegatedExpression(Op.getOperand(0), DAG, 600 LegalOperations, Depth+1), 601 Op.getOperand(1)); 602 603 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 604 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 605 Op.getOperand(0), 606 GetNegatedExpression(Op.getOperand(1), DAG, 607 LegalOperations, Depth+1)); 608 609 case ISD::FP_EXTEND: 610 case ISD::FSIN: 611 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 612 GetNegatedExpression(Op.getOperand(0), DAG, 613 LegalOperations, Depth+1)); 614 case ISD::FP_ROUND: 615 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(), 616 GetNegatedExpression(Op.getOperand(0), DAG, 617 LegalOperations, Depth+1), 618 Op.getOperand(1)); 619 } 620 } 621 622 // isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc 623 // that selects between the target values used for true and false, making it 624 // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to 625 // the appropriate nodes based on the type of node we are checking. This 626 // simplifies life a bit for the callers. 627 bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 628 SDValue &CC) const { 629 if (N.getOpcode() == ISD::SETCC) { 630 LHS = N.getOperand(0); 631 RHS = N.getOperand(1); 632 CC = N.getOperand(2); 633 return true; 634 } 635 636 if (N.getOpcode() != ISD::SELECT_CC || 637 !TLI.isConstTrueVal(N.getOperand(2).getNode()) || 638 !TLI.isConstFalseVal(N.getOperand(3).getNode())) 639 return false; 640 641 LHS = N.getOperand(0); 642 RHS = N.getOperand(1); 643 CC = N.getOperand(4); 644 return true; 645 } 646 647 // isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only 648 // one use. If this is true, it allows the users to invert the operation for 649 // free when it is profitable to do so. 650 bool DAGCombiner::isOneUseSetCC(SDValue N) const { 651 SDValue N0, N1, N2; 652 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 653 return true; 654 return false; 655 } 656 657 /// isConstantSplatVector - Returns true if N is a BUILD_VECTOR node whose 658 /// elements are all the same constant or undefined. 659 static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) { 660 BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N); 661 if (!C) 662 return false; 663 664 APInt SplatUndef; 665 unsigned SplatBitSize; 666 bool HasAnyUndefs; 667 EVT EltVT = N->getValueType(0).getVectorElementType(); 668 return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 669 HasAnyUndefs) && 670 EltVT.getSizeInBits() >= SplatBitSize); 671 } 672 673 // \brief Returns the SDNode if it is a constant BuildVector or constant. 674 static SDNode *isConstantBuildVectorOrConstantInt(SDValue N) { 675 if (isa<ConstantSDNode>(N)) 676 return N.getNode(); 677 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 678 if(BV && BV->isConstant()) 679 return BV; 680 return nullptr; 681 } 682 683 // \brief Returns the SDNode if it is a constant splat BuildVector or constant 684 // int. 685 static ConstantSDNode *isConstOrConstSplat(SDValue N) { 686 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 687 return CN; 688 689 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 690 BitVector UndefElements; 691 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 692 693 // BuildVectors can truncate their operands. Ignore that case here. 694 // FIXME: We blindly ignore splats which include undef which is overly 695 // pessimistic. 696 if (CN && UndefElements.none() && 697 CN->getValueType(0) == N.getValueType().getScalarType()) 698 return CN; 699 } 700 701 return nullptr; 702 } 703 704 SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL, 705 SDValue N0, SDValue N1) { 706 EVT VT = N0.getValueType(); 707 if (N0.getOpcode() == Opc) { 708 if (SDNode *L = isConstantBuildVectorOrConstantInt(N0.getOperand(1))) { 709 if (SDNode *R = isConstantBuildVectorOrConstantInt(N1)) { 710 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 711 SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, L, R); 712 if (!OpNode.getNode()) 713 return SDValue(); 714 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 715 } 716 if (N0.hasOneUse()) { 717 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one 718 // use 719 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1); 720 if (!OpNode.getNode()) 721 return SDValue(); 722 AddToWorklist(OpNode.getNode()); 723 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 724 } 725 } 726 } 727 728 if (N1.getOpcode() == Opc) { 729 if (SDNode *R = isConstantBuildVectorOrConstantInt(N1.getOperand(1))) { 730 if (SDNode *L = isConstantBuildVectorOrConstantInt(N0)) { 731 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 732 SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, R, L); 733 if (!OpNode.getNode()) 734 return SDValue(); 735 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 736 } 737 if (N1.hasOneUse()) { 738 // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one 739 // use 740 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N1.getOperand(0), N0); 741 if (!OpNode.getNode()) 742 return SDValue(); 743 AddToWorklist(OpNode.getNode()); 744 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 745 } 746 } 747 } 748 749 return SDValue(); 750 } 751 752 SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 753 bool AddTo) { 754 assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 755 ++NodesCombined; 756 DEBUG(dbgs() << "\nReplacing.1 "; 757 N->dump(&DAG); 758 dbgs() << "\nWith: "; 759 To[0].getNode()->dump(&DAG); 760 dbgs() << " and " << NumTo-1 << " other values\n"; 761 for (unsigned i = 0, e = NumTo; i != e; ++i) 762 assert((!To[i].getNode() || 763 N->getValueType(i) == To[i].getValueType()) && 764 "Cannot combine value to value of different type!")); 765 WorklistRemover DeadNodes(*this); 766 DAG.ReplaceAllUsesWith(N, To); 767 if (AddTo) { 768 // Push the new nodes and any users onto the worklist 769 for (unsigned i = 0, e = NumTo; i != e; ++i) { 770 if (To[i].getNode()) { 771 AddToWorklist(To[i].getNode()); 772 AddUsersToWorklist(To[i].getNode()); 773 } 774 } 775 } 776 777 // Finally, if the node is now dead, remove it from the graph. The node 778 // may not be dead if the replacement process recursively simplified to 779 // something else needing this node. 780 if (N->use_empty()) 781 deleteAndRecombine(N); 782 return SDValue(N, 0); 783 } 784 785 void DAGCombiner:: 786 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 787 // Replace all uses. If any nodes become isomorphic to other nodes and 788 // are deleted, make sure to remove them from our worklist. 789 WorklistRemover DeadNodes(*this); 790 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 791 792 // Push the new node and any (possibly new) users onto the worklist. 793 AddToWorklist(TLO.New.getNode()); 794 AddUsersToWorklist(TLO.New.getNode()); 795 796 // Finally, if the node is now dead, remove it from the graph. The node 797 // may not be dead if the replacement process recursively simplified to 798 // something else needing this node. 799 if (TLO.Old.getNode()->use_empty()) 800 deleteAndRecombine(TLO.Old.getNode()); 801 } 802 803 /// SimplifyDemandedBits - Check the specified integer node value to see if 804 /// it can be simplified or if things it uses can be simplified by bit 805 /// propagation. If so, return true. 806 bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 807 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 808 APInt KnownZero, KnownOne; 809 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 810 return false; 811 812 // Revisit the node. 813 AddToWorklist(Op.getNode()); 814 815 // Replace the old value with the new one. 816 ++NodesCombined; 817 DEBUG(dbgs() << "\nReplacing.2 "; 818 TLO.Old.getNode()->dump(&DAG); 819 dbgs() << "\nWith: "; 820 TLO.New.getNode()->dump(&DAG); 821 dbgs() << '\n'); 822 823 CommitTargetLoweringOpt(TLO); 824 return true; 825 } 826 827 void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 828 SDLoc dl(Load); 829 EVT VT = Load->getValueType(0); 830 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0)); 831 832 DEBUG(dbgs() << "\nReplacing.9 "; 833 Load->dump(&DAG); 834 dbgs() << "\nWith: "; 835 Trunc.getNode()->dump(&DAG); 836 dbgs() << '\n'); 837 WorklistRemover DeadNodes(*this); 838 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 839 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 840 deleteAndRecombine(Load); 841 AddToWorklist(Trunc.getNode()); 842 } 843 844 SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 845 Replace = false; 846 SDLoc dl(Op); 847 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 848 EVT MemVT = LD->getMemoryVT(); 849 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 850 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 851 : ISD::EXTLOAD) 852 : LD->getExtensionType(); 853 Replace = true; 854 return DAG.getExtLoad(ExtType, dl, PVT, 855 LD->getChain(), LD->getBasePtr(), 856 MemVT, LD->getMemOperand()); 857 } 858 859 unsigned Opc = Op.getOpcode(); 860 switch (Opc) { 861 default: break; 862 case ISD::AssertSext: 863 return DAG.getNode(ISD::AssertSext, dl, PVT, 864 SExtPromoteOperand(Op.getOperand(0), PVT), 865 Op.getOperand(1)); 866 case ISD::AssertZext: 867 return DAG.getNode(ISD::AssertZext, dl, PVT, 868 ZExtPromoteOperand(Op.getOperand(0), PVT), 869 Op.getOperand(1)); 870 case ISD::Constant: { 871 unsigned ExtOpc = 872 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 873 return DAG.getNode(ExtOpc, dl, PVT, Op); 874 } 875 } 876 877 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 878 return SDValue(); 879 return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op); 880 } 881 882 SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 883 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 884 return SDValue(); 885 EVT OldVT = Op.getValueType(); 886 SDLoc dl(Op); 887 bool Replace = false; 888 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 889 if (!NewOp.getNode()) 890 return SDValue(); 891 AddToWorklist(NewOp.getNode()); 892 893 if (Replace) 894 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 895 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp, 896 DAG.getValueType(OldVT)); 897 } 898 899 SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 900 EVT OldVT = Op.getValueType(); 901 SDLoc dl(Op); 902 bool Replace = false; 903 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 904 if (!NewOp.getNode()) 905 return SDValue(); 906 AddToWorklist(NewOp.getNode()); 907 908 if (Replace) 909 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 910 return DAG.getZeroExtendInReg(NewOp, dl, OldVT); 911 } 912 913 /// PromoteIntBinOp - Promote the specified integer binary operation if the 914 /// target indicates it is beneficial. e.g. On x86, it's usually better to 915 /// promote i16 operations to i32 since i16 instructions are longer. 916 SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 917 if (!LegalOperations) 918 return SDValue(); 919 920 EVT VT = Op.getValueType(); 921 if (VT.isVector() || !VT.isInteger()) 922 return SDValue(); 923 924 // If operation type is 'undesirable', e.g. i16 on x86, consider 925 // promoting it. 926 unsigned Opc = Op.getOpcode(); 927 if (TLI.isTypeDesirableForOp(Opc, VT)) 928 return SDValue(); 929 930 EVT PVT = VT; 931 // Consult target whether it is a good idea to promote this operation and 932 // what's the right type to promote it to. 933 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 934 assert(PVT != VT && "Don't know what type to promote to!"); 935 936 bool Replace0 = false; 937 SDValue N0 = Op.getOperand(0); 938 SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 939 if (!NN0.getNode()) 940 return SDValue(); 941 942 bool Replace1 = false; 943 SDValue N1 = Op.getOperand(1); 944 SDValue NN1; 945 if (N0 == N1) 946 NN1 = NN0; 947 else { 948 NN1 = PromoteOperand(N1, PVT, Replace1); 949 if (!NN1.getNode()) 950 return SDValue(); 951 } 952 953 AddToWorklist(NN0.getNode()); 954 if (NN1.getNode()) 955 AddToWorklist(NN1.getNode()); 956 957 if (Replace0) 958 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 959 if (Replace1) 960 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 961 962 DEBUG(dbgs() << "\nPromoting "; 963 Op.getNode()->dump(&DAG)); 964 SDLoc dl(Op); 965 return DAG.getNode(ISD::TRUNCATE, dl, VT, 966 DAG.getNode(Opc, dl, PVT, NN0, NN1)); 967 } 968 return SDValue(); 969 } 970 971 /// PromoteIntShiftOp - Promote the specified integer shift operation if the 972 /// target indicates it is beneficial. e.g. On x86, it's usually better to 973 /// promote i16 operations to i32 since i16 instructions are longer. 974 SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 975 if (!LegalOperations) 976 return SDValue(); 977 978 EVT VT = Op.getValueType(); 979 if (VT.isVector() || !VT.isInteger()) 980 return SDValue(); 981 982 // If operation type is 'undesirable', e.g. i16 on x86, consider 983 // promoting it. 984 unsigned Opc = Op.getOpcode(); 985 if (TLI.isTypeDesirableForOp(Opc, VT)) 986 return SDValue(); 987 988 EVT PVT = VT; 989 // Consult target whether it is a good idea to promote this operation and 990 // what's the right type to promote it to. 991 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 992 assert(PVT != VT && "Don't know what type to promote to!"); 993 994 bool Replace = false; 995 SDValue N0 = Op.getOperand(0); 996 if (Opc == ISD::SRA) 997 N0 = SExtPromoteOperand(Op.getOperand(0), PVT); 998 else if (Opc == ISD::SRL) 999 N0 = ZExtPromoteOperand(Op.getOperand(0), PVT); 1000 else 1001 N0 = PromoteOperand(N0, PVT, Replace); 1002 if (!N0.getNode()) 1003 return SDValue(); 1004 1005 AddToWorklist(N0.getNode()); 1006 if (Replace) 1007 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 1008 1009 DEBUG(dbgs() << "\nPromoting "; 1010 Op.getNode()->dump(&DAG)); 1011 SDLoc dl(Op); 1012 return DAG.getNode(ISD::TRUNCATE, dl, VT, 1013 DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1))); 1014 } 1015 return SDValue(); 1016 } 1017 1018 SDValue DAGCombiner::PromoteExtend(SDValue Op) { 1019 if (!LegalOperations) 1020 return SDValue(); 1021 1022 EVT VT = Op.getValueType(); 1023 if (VT.isVector() || !VT.isInteger()) 1024 return SDValue(); 1025 1026 // If operation type is 'undesirable', e.g. i16 on x86, consider 1027 // promoting it. 1028 unsigned Opc = Op.getOpcode(); 1029 if (TLI.isTypeDesirableForOp(Opc, VT)) 1030 return SDValue(); 1031 1032 EVT PVT = VT; 1033 // Consult target whether it is a good idea to promote this operation and 1034 // what's the right type to promote it to. 1035 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1036 assert(PVT != VT && "Don't know what type to promote to!"); 1037 // fold (aext (aext x)) -> (aext x) 1038 // fold (aext (zext x)) -> (zext x) 1039 // fold (aext (sext x)) -> (sext x) 1040 DEBUG(dbgs() << "\nPromoting "; 1041 Op.getNode()->dump(&DAG)); 1042 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0)); 1043 } 1044 return SDValue(); 1045 } 1046 1047 bool DAGCombiner::PromoteLoad(SDValue Op) { 1048 if (!LegalOperations) 1049 return false; 1050 1051 EVT VT = Op.getValueType(); 1052 if (VT.isVector() || !VT.isInteger()) 1053 return false; 1054 1055 // If operation type is 'undesirable', e.g. i16 on x86, consider 1056 // promoting it. 1057 unsigned Opc = Op.getOpcode(); 1058 if (TLI.isTypeDesirableForOp(Opc, VT)) 1059 return false; 1060 1061 EVT PVT = VT; 1062 // Consult target whether it is a good idea to promote this operation and 1063 // what's the right type to promote it to. 1064 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1065 assert(PVT != VT && "Don't know what type to promote to!"); 1066 1067 SDLoc dl(Op); 1068 SDNode *N = Op.getNode(); 1069 LoadSDNode *LD = cast<LoadSDNode>(N); 1070 EVT MemVT = LD->getMemoryVT(); 1071 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1072 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 1073 : ISD::EXTLOAD) 1074 : LD->getExtensionType(); 1075 SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT, 1076 LD->getChain(), LD->getBasePtr(), 1077 MemVT, LD->getMemOperand()); 1078 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD); 1079 1080 DEBUG(dbgs() << "\nPromoting "; 1081 N->dump(&DAG); 1082 dbgs() << "\nTo: "; 1083 Result.getNode()->dump(&DAG); 1084 dbgs() << '\n'); 1085 WorklistRemover DeadNodes(*this); 1086 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 1087 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 1088 deleteAndRecombine(N); 1089 AddToWorklist(Result.getNode()); 1090 return true; 1091 } 1092 return false; 1093 } 1094 1095 /// \brief Recursively delete a node which has no uses and any operands for 1096 /// which it is the only use. 1097 /// 1098 /// Note that this both deletes the nodes and removes them from the worklist. 1099 /// It also adds any nodes who have had a user deleted to the worklist as they 1100 /// may now have only one use and subject to other combines. 1101 bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) { 1102 if (!N->use_empty()) 1103 return false; 1104 1105 SmallSetVector<SDNode *, 16> Nodes; 1106 Nodes.insert(N); 1107 do { 1108 N = Nodes.pop_back_val(); 1109 if (!N) 1110 continue; 1111 1112 if (N->use_empty()) { 1113 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1114 Nodes.insert(N->getOperand(i).getNode()); 1115 1116 removeFromWorklist(N); 1117 DAG.DeleteNode(N); 1118 } else { 1119 AddToWorklist(N); 1120 } 1121 } while (!Nodes.empty()); 1122 return true; 1123 } 1124 1125 //===----------------------------------------------------------------------===// 1126 // Main DAG Combiner implementation 1127 //===----------------------------------------------------------------------===// 1128 1129 void DAGCombiner::Run(CombineLevel AtLevel) { 1130 // set the instance variables, so that the various visit routines may use it. 1131 Level = AtLevel; 1132 LegalOperations = Level >= AfterLegalizeVectorOps; 1133 LegalTypes = Level >= AfterLegalizeTypes; 1134 1135 // Add all the dag nodes to the worklist. 1136 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 1137 E = DAG.allnodes_end(); I != E; ++I) 1138 AddToWorklist(I); 1139 1140 // Create a dummy node (which is not added to allnodes), that adds a reference 1141 // to the root node, preventing it from being deleted, and tracking any 1142 // changes of the root. 1143 HandleSDNode Dummy(DAG.getRoot()); 1144 1145 // while the worklist isn't empty, find a node and 1146 // try and combine it. 1147 while (!WorklistMap.empty()) { 1148 SDNode *N; 1149 // The Worklist holds the SDNodes in order, but it may contain null entries. 1150 do { 1151 N = Worklist.pop_back_val(); 1152 } while (!N); 1153 1154 bool GoodWorklistEntry = WorklistMap.erase(N); 1155 (void)GoodWorklistEntry; 1156 assert(GoodWorklistEntry && 1157 "Found a worklist entry without a corresponding map entry!"); 1158 1159 // If N has no uses, it is dead. Make sure to revisit all N's operands once 1160 // N is deleted from the DAG, since they too may now be dead or may have a 1161 // reduced number of uses, allowing other xforms. 1162 if (recursivelyDeleteUnusedNodes(N)) 1163 continue; 1164 1165 WorklistRemover DeadNodes(*this); 1166 1167 // If this combine is running after legalizing the DAG, re-legalize any 1168 // nodes pulled off the worklist. 1169 if (Level == AfterLegalizeDAG) { 1170 SmallSetVector<SDNode *, 16> UpdatedNodes; 1171 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes); 1172 1173 for (SDNode *LN : UpdatedNodes) { 1174 AddToWorklist(LN); 1175 AddUsersToWorklist(LN); 1176 } 1177 if (!NIsValid) 1178 continue; 1179 } 1180 1181 DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG)); 1182 1183 // Add any operands of the new node which have not yet been combined to the 1184 // worklist as well. Because the worklist uniques things already, this 1185 // won't repeatedly process the same operand. 1186 CombinedNodes.insert(N); 1187 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1188 if (!CombinedNodes.count(N->getOperand(i).getNode())) 1189 AddToWorklist(N->getOperand(i).getNode()); 1190 1191 SDValue RV = combine(N); 1192 1193 if (!RV.getNode()) 1194 continue; 1195 1196 ++NodesCombined; 1197 1198 // If we get back the same node we passed in, rather than a new node or 1199 // zero, we know that the node must have defined multiple values and 1200 // CombineTo was used. Since CombineTo takes care of the worklist 1201 // mechanics for us, we have no work to do in this case. 1202 if (RV.getNode() == N) 1203 continue; 1204 1205 assert(N->getOpcode() != ISD::DELETED_NODE && 1206 RV.getNode()->getOpcode() != ISD::DELETED_NODE && 1207 "Node was deleted but visit returned new node!"); 1208 1209 DEBUG(dbgs() << " ... into: "; 1210 RV.getNode()->dump(&DAG)); 1211 1212 // Transfer debug value. 1213 DAG.TransferDbgValues(SDValue(N, 0), RV); 1214 if (N->getNumValues() == RV.getNode()->getNumValues()) 1215 DAG.ReplaceAllUsesWith(N, RV.getNode()); 1216 else { 1217 assert(N->getValueType(0) == RV.getValueType() && 1218 N->getNumValues() == 1 && "Type mismatch"); 1219 SDValue OpV = RV; 1220 DAG.ReplaceAllUsesWith(N, &OpV); 1221 } 1222 1223 // Push the new node and any users onto the worklist 1224 AddToWorklist(RV.getNode()); 1225 AddUsersToWorklist(RV.getNode()); 1226 1227 // Finally, if the node is now dead, remove it from the graph. The node 1228 // may not be dead if the replacement process recursively simplified to 1229 // something else needing this node. This will also take care of adding any 1230 // operands which have lost a user to the worklist. 1231 recursivelyDeleteUnusedNodes(N); 1232 } 1233 1234 // If the root changed (e.g. it was a dead load, update the root). 1235 DAG.setRoot(Dummy.getValue()); 1236 DAG.RemoveDeadNodes(); 1237 } 1238 1239 SDValue DAGCombiner::visit(SDNode *N) { 1240 switch (N->getOpcode()) { 1241 default: break; 1242 case ISD::TokenFactor: return visitTokenFactor(N); 1243 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1244 case ISD::ADD: return visitADD(N); 1245 case ISD::SUB: return visitSUB(N); 1246 case ISD::ADDC: return visitADDC(N); 1247 case ISD::SUBC: return visitSUBC(N); 1248 case ISD::ADDE: return visitADDE(N); 1249 case ISD::SUBE: return visitSUBE(N); 1250 case ISD::MUL: return visitMUL(N); 1251 case ISD::SDIV: return visitSDIV(N); 1252 case ISD::UDIV: return visitUDIV(N); 1253 case ISD::SREM: return visitSREM(N); 1254 case ISD::UREM: return visitUREM(N); 1255 case ISD::MULHU: return visitMULHU(N); 1256 case ISD::MULHS: return visitMULHS(N); 1257 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1258 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1259 case ISD::SMULO: return visitSMULO(N); 1260 case ISD::UMULO: return visitUMULO(N); 1261 case ISD::SDIVREM: return visitSDIVREM(N); 1262 case ISD::UDIVREM: return visitUDIVREM(N); 1263 case ISD::AND: return visitAND(N); 1264 case ISD::OR: return visitOR(N); 1265 case ISD::XOR: return visitXOR(N); 1266 case ISD::SHL: return visitSHL(N); 1267 case ISD::SRA: return visitSRA(N); 1268 case ISD::SRL: return visitSRL(N); 1269 case ISD::ROTR: 1270 case ISD::ROTL: return visitRotate(N); 1271 case ISD::CTLZ: return visitCTLZ(N); 1272 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1273 case ISD::CTTZ: return visitCTTZ(N); 1274 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1275 case ISD::CTPOP: return visitCTPOP(N); 1276 case ISD::SELECT: return visitSELECT(N); 1277 case ISD::VSELECT: return visitVSELECT(N); 1278 case ISD::SELECT_CC: return visitSELECT_CC(N); 1279 case ISD::SETCC: return visitSETCC(N); 1280 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1281 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1282 case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1283 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1284 case ISD::TRUNCATE: return visitTRUNCATE(N); 1285 case ISD::BITCAST: return visitBITCAST(N); 1286 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1287 case ISD::FADD: return visitFADD(N); 1288 case ISD::FSUB: return visitFSUB(N); 1289 case ISD::FMUL: return visitFMUL(N); 1290 case ISD::FMA: return visitFMA(N); 1291 case ISD::FDIV: return visitFDIV(N); 1292 case ISD::FREM: return visitFREM(N); 1293 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1294 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1295 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1296 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1297 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1298 case ISD::FP_ROUND: return visitFP_ROUND(N); 1299 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1300 case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1301 case ISD::FNEG: return visitFNEG(N); 1302 case ISD::FABS: return visitFABS(N); 1303 case ISD::FFLOOR: return visitFFLOOR(N); 1304 case ISD::FCEIL: return visitFCEIL(N); 1305 case ISD::FTRUNC: return visitFTRUNC(N); 1306 case ISD::BRCOND: return visitBRCOND(N); 1307 case ISD::BR_CC: return visitBR_CC(N); 1308 case ISD::LOAD: return visitLOAD(N); 1309 case ISD::STORE: return visitSTORE(N); 1310 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1311 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1312 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1313 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1314 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1315 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1316 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); 1317 } 1318 return SDValue(); 1319 } 1320 1321 SDValue DAGCombiner::combine(SDNode *N) { 1322 SDValue RV = visit(N); 1323 1324 // If nothing happened, try a target-specific DAG combine. 1325 if (!RV.getNode()) { 1326 assert(N->getOpcode() != ISD::DELETED_NODE && 1327 "Node was deleted but visit returned NULL!"); 1328 1329 if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1330 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1331 1332 // Expose the DAG combiner to the target combiner impls. 1333 TargetLowering::DAGCombinerInfo 1334 DagCombineInfo(DAG, Level, false, this); 1335 1336 RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1337 } 1338 } 1339 1340 // If nothing happened still, try promoting the operation. 1341 if (!RV.getNode()) { 1342 switch (N->getOpcode()) { 1343 default: break; 1344 case ISD::ADD: 1345 case ISD::SUB: 1346 case ISD::MUL: 1347 case ISD::AND: 1348 case ISD::OR: 1349 case ISD::XOR: 1350 RV = PromoteIntBinOp(SDValue(N, 0)); 1351 break; 1352 case ISD::SHL: 1353 case ISD::SRA: 1354 case ISD::SRL: 1355 RV = PromoteIntShiftOp(SDValue(N, 0)); 1356 break; 1357 case ISD::SIGN_EXTEND: 1358 case ISD::ZERO_EXTEND: 1359 case ISD::ANY_EXTEND: 1360 RV = PromoteExtend(SDValue(N, 0)); 1361 break; 1362 case ISD::LOAD: 1363 if (PromoteLoad(SDValue(N, 0))) 1364 RV = SDValue(N, 0); 1365 break; 1366 } 1367 } 1368 1369 // If N is a commutative binary node, try commuting it to enable more 1370 // sdisel CSE. 1371 if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1372 N->getNumValues() == 1) { 1373 SDValue N0 = N->getOperand(0); 1374 SDValue N1 = N->getOperand(1); 1375 1376 // Constant operands are canonicalized to RHS. 1377 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1378 SDValue Ops[] = {N1, N0}; 1379 SDNode *CSENode; 1380 if (const BinaryWithFlagsSDNode *BinNode = 1381 dyn_cast<BinaryWithFlagsSDNode>(N)) { 1382 CSENode = DAG.getNodeIfExists( 1383 N->getOpcode(), N->getVTList(), Ops, BinNode->hasNoUnsignedWrap(), 1384 BinNode->hasNoSignedWrap(), BinNode->isExact()); 1385 } else { 1386 CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops); 1387 } 1388 if (CSENode) 1389 return SDValue(CSENode, 0); 1390 } 1391 } 1392 1393 return RV; 1394 } 1395 1396 /// getInputChainForNode - Given a node, return its input chain if it has one, 1397 /// otherwise return a null sd operand. 1398 static SDValue getInputChainForNode(SDNode *N) { 1399 if (unsigned NumOps = N->getNumOperands()) { 1400 if (N->getOperand(0).getValueType() == MVT::Other) 1401 return N->getOperand(0); 1402 if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1403 return N->getOperand(NumOps-1); 1404 for (unsigned i = 1; i < NumOps-1; ++i) 1405 if (N->getOperand(i).getValueType() == MVT::Other) 1406 return N->getOperand(i); 1407 } 1408 return SDValue(); 1409 } 1410 1411 SDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1412 // If N has two operands, where one has an input chain equal to the other, 1413 // the 'other' chain is redundant. 1414 if (N->getNumOperands() == 2) { 1415 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1416 return N->getOperand(0); 1417 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1418 return N->getOperand(1); 1419 } 1420 1421 SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1422 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1423 SmallPtrSet<SDNode*, 16> SeenOps; 1424 bool Changed = false; // If we should replace this token factor. 1425 1426 // Start out with this token factor. 1427 TFs.push_back(N); 1428 1429 // Iterate through token factors. The TFs grows when new token factors are 1430 // encountered. 1431 for (unsigned i = 0; i < TFs.size(); ++i) { 1432 SDNode *TF = TFs[i]; 1433 1434 // Check each of the operands. 1435 for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) { 1436 SDValue Op = TF->getOperand(i); 1437 1438 switch (Op.getOpcode()) { 1439 case ISD::EntryToken: 1440 // Entry tokens don't need to be added to the list. They are 1441 // rededundant. 1442 Changed = true; 1443 break; 1444 1445 case ISD::TokenFactor: 1446 if (Op.hasOneUse() && 1447 std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) { 1448 // Queue up for processing. 1449 TFs.push_back(Op.getNode()); 1450 // Clean up in case the token factor is removed. 1451 AddToWorklist(Op.getNode()); 1452 Changed = true; 1453 break; 1454 } 1455 // Fall thru 1456 1457 default: 1458 // Only add if it isn't already in the list. 1459 if (SeenOps.insert(Op.getNode())) 1460 Ops.push_back(Op); 1461 else 1462 Changed = true; 1463 break; 1464 } 1465 } 1466 } 1467 1468 SDValue Result; 1469 1470 // If we've change things around then replace token factor. 1471 if (Changed) { 1472 if (Ops.empty()) { 1473 // The entry token is the only possible outcome. 1474 Result = DAG.getEntryNode(); 1475 } else { 1476 // New and improved token factor. 1477 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); 1478 } 1479 1480 // Don't add users to work list. 1481 return CombineTo(N, Result, false); 1482 } 1483 1484 return Result; 1485 } 1486 1487 /// MERGE_VALUES can always be eliminated. 1488 SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1489 WorklistRemover DeadNodes(*this); 1490 // Replacing results may cause a different MERGE_VALUES to suddenly 1491 // be CSE'd with N, and carry its uses with it. Iterate until no 1492 // uses remain, to ensure that the node can be safely deleted. 1493 // First add the users of this node to the work list so that they 1494 // can be tried again once they have new operands. 1495 AddUsersToWorklist(N); 1496 do { 1497 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1498 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1499 } while (!N->use_empty()); 1500 deleteAndRecombine(N); 1501 return SDValue(N, 0); // Return N so it doesn't get rechecked! 1502 } 1503 1504 static 1505 SDValue combineShlAddConstant(SDLoc DL, SDValue N0, SDValue N1, 1506 SelectionDAG &DAG) { 1507 EVT VT = N0.getValueType(); 1508 SDValue N00 = N0.getOperand(0); 1509 SDValue N01 = N0.getOperand(1); 1510 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01); 1511 1512 if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() && 1513 isa<ConstantSDNode>(N00.getOperand(1))) { 1514 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1515 N0 = DAG.getNode(ISD::ADD, SDLoc(N0), VT, 1516 DAG.getNode(ISD::SHL, SDLoc(N00), VT, 1517 N00.getOperand(0), N01), 1518 DAG.getNode(ISD::SHL, SDLoc(N01), VT, 1519 N00.getOperand(1), N01)); 1520 return DAG.getNode(ISD::ADD, DL, VT, N0, N1); 1521 } 1522 1523 return SDValue(); 1524 } 1525 1526 SDValue DAGCombiner::visitADD(SDNode *N) { 1527 SDValue N0 = N->getOperand(0); 1528 SDValue N1 = N->getOperand(1); 1529 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1530 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1531 EVT VT = N0.getValueType(); 1532 1533 // fold vector ops 1534 if (VT.isVector()) { 1535 SDValue FoldedVOp = SimplifyVBinOp(N); 1536 if (FoldedVOp.getNode()) return FoldedVOp; 1537 1538 // fold (add x, 0) -> x, vector edition 1539 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1540 return N0; 1541 if (ISD::isBuildVectorAllZeros(N0.getNode())) 1542 return N1; 1543 } 1544 1545 // fold (add x, undef) -> undef 1546 if (N0.getOpcode() == ISD::UNDEF) 1547 return N0; 1548 if (N1.getOpcode() == ISD::UNDEF) 1549 return N1; 1550 // fold (add c1, c2) -> c1+c2 1551 if (N0C && N1C) 1552 return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C); 1553 // canonicalize constant to RHS 1554 if (N0C && !N1C) 1555 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, N0); 1556 // fold (add x, 0) -> x 1557 if (N1C && N1C->isNullValue()) 1558 return N0; 1559 // fold (add Sym, c) -> Sym+c 1560 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1561 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C && 1562 GA->getOpcode() == ISD::GlobalAddress) 1563 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT, 1564 GA->getOffset() + 1565 (uint64_t)N1C->getSExtValue()); 1566 // fold ((c1-A)+c2) -> (c1+c2)-A 1567 if (N1C && N0.getOpcode() == ISD::SUB) 1568 if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 1569 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 1570 DAG.getConstant(N1C->getAPIntValue()+ 1571 N0C->getAPIntValue(), VT), 1572 N0.getOperand(1)); 1573 // reassociate add 1574 SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1); 1575 if (RADD.getNode()) 1576 return RADD; 1577 // fold ((0-A) + B) -> B-A 1578 if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) && 1579 cast<ConstantSDNode>(N0.getOperand(0))->isNullValue()) 1580 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1, N0.getOperand(1)); 1581 // fold (A + (0-B)) -> A-B 1582 if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) && 1583 cast<ConstantSDNode>(N1.getOperand(0))->isNullValue()) 1584 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1.getOperand(1)); 1585 // fold (A+(B-A)) -> B 1586 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1587 return N1.getOperand(0); 1588 // fold ((B-A)+A) -> B 1589 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1590 return N0.getOperand(0); 1591 // fold (A+(B-(A+C))) to (B-C) 1592 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1593 N0 == N1.getOperand(1).getOperand(0)) 1594 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0), 1595 N1.getOperand(1).getOperand(1)); 1596 // fold (A+(B-(C+A))) to (B-C) 1597 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1598 N0 == N1.getOperand(1).getOperand(1)) 1599 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1.getOperand(0), 1600 N1.getOperand(1).getOperand(0)); 1601 // fold (A+((B-A)+or-C)) to (B+or-C) 1602 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1603 N1.getOperand(0).getOpcode() == ISD::SUB && 1604 N0 == N1.getOperand(0).getOperand(1)) 1605 return DAG.getNode(N1.getOpcode(), SDLoc(N), VT, 1606 N1.getOperand(0).getOperand(0), N1.getOperand(1)); 1607 1608 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1609 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1610 SDValue N00 = N0.getOperand(0); 1611 SDValue N01 = N0.getOperand(1); 1612 SDValue N10 = N1.getOperand(0); 1613 SDValue N11 = N1.getOperand(1); 1614 1615 if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10)) 1616 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 1617 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10), 1618 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11)); 1619 } 1620 1621 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 1622 return SDValue(N, 0); 1623 1624 // fold (a+b) -> (a|b) iff a and b share no bits. 1625 if (VT.isInteger() && !VT.isVector()) { 1626 APInt LHSZero, LHSOne; 1627 APInt RHSZero, RHSOne; 1628 DAG.computeKnownBits(N0, LHSZero, LHSOne); 1629 1630 if (LHSZero.getBoolValue()) { 1631 DAG.computeKnownBits(N1, RHSZero, RHSOne); 1632 1633 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1634 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1635 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero){ 1636 if (!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) 1637 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1); 1638 } 1639 } 1640 } 1641 1642 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1643 if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) { 1644 SDValue Result = combineShlAddConstant(SDLoc(N), N0, N1, DAG); 1645 if (Result.getNode()) return Result; 1646 } 1647 if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) { 1648 SDValue Result = combineShlAddConstant(SDLoc(N), N1, N0, DAG); 1649 if (Result.getNode()) return Result; 1650 } 1651 1652 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1653 if (N1.getOpcode() == ISD::SHL && 1654 N1.getOperand(0).getOpcode() == ISD::SUB) 1655 if (ConstantSDNode *C = 1656 dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0))) 1657 if (C->getAPIntValue() == 0) 1658 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, 1659 DAG.getNode(ISD::SHL, SDLoc(N), VT, 1660 N1.getOperand(0).getOperand(1), 1661 N1.getOperand(1))); 1662 if (N0.getOpcode() == ISD::SHL && 1663 N0.getOperand(0).getOpcode() == ISD::SUB) 1664 if (ConstantSDNode *C = 1665 dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0))) 1666 if (C->getAPIntValue() == 0) 1667 return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1, 1668 DAG.getNode(ISD::SHL, SDLoc(N), VT, 1669 N0.getOperand(0).getOperand(1), 1670 N0.getOperand(1))); 1671 1672 if (N1.getOpcode() == ISD::AND) { 1673 SDValue AndOp0 = N1.getOperand(0); 1674 ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1)); 1675 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1676 unsigned DestBits = VT.getScalarType().getSizeInBits(); 1677 1678 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1679 // and similar xforms where the inner op is either ~0 or 0. 1680 if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) { 1681 SDLoc DL(N); 1682 return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); 1683 } 1684 } 1685 1686 // add (sext i1), X -> sub X, (zext i1) 1687 if (N0.getOpcode() == ISD::SIGN_EXTEND && 1688 N0.getOperand(0).getValueType() == MVT::i1 && 1689 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1690 SDLoc DL(N); 1691 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1692 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1693 } 1694 1695 return SDValue(); 1696 } 1697 1698 SDValue DAGCombiner::visitADDC(SDNode *N) { 1699 SDValue N0 = N->getOperand(0); 1700 SDValue N1 = N->getOperand(1); 1701 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1702 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1703 EVT VT = N0.getValueType(); 1704 1705 // If the flag result is dead, turn this into an ADD. 1706 if (!N->hasAnyUseOfValue(1)) 1707 return CombineTo(N, DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N1), 1708 DAG.getNode(ISD::CARRY_FALSE, 1709 SDLoc(N), MVT::Glue)); 1710 1711 // canonicalize constant to RHS. 1712 if (N0C && !N1C) 1713 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0); 1714 1715 // fold (addc x, 0) -> x + no carry out 1716 if (N1C && N1C->isNullValue()) 1717 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 1718 SDLoc(N), MVT::Glue)); 1719 1720 // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. 1721 APInt LHSZero, LHSOne; 1722 APInt RHSZero, RHSOne; 1723 DAG.computeKnownBits(N0, LHSZero, LHSOne); 1724 1725 if (LHSZero.getBoolValue()) { 1726 DAG.computeKnownBits(N1, RHSZero, RHSOne); 1727 1728 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1729 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1730 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1731 return CombineTo(N, DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1), 1732 DAG.getNode(ISD::CARRY_FALSE, 1733 SDLoc(N), MVT::Glue)); 1734 } 1735 1736 return SDValue(); 1737 } 1738 1739 SDValue DAGCombiner::visitADDE(SDNode *N) { 1740 SDValue N0 = N->getOperand(0); 1741 SDValue N1 = N->getOperand(1); 1742 SDValue CarryIn = N->getOperand(2); 1743 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1744 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1745 1746 // canonicalize constant to RHS 1747 if (N0C && !N1C) 1748 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), 1749 N1, N0, CarryIn); 1750 1751 // fold (adde x, y, false) -> (addc x, y) 1752 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1753 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1); 1754 1755 return SDValue(); 1756 } 1757 1758 // Since it may not be valid to emit a fold to zero for vector initializers 1759 // check if we can before folding. 1760 static SDValue tryFoldToZero(SDLoc DL, const TargetLowering &TLI, EVT VT, 1761 SelectionDAG &DAG, 1762 bool LegalOperations, bool LegalTypes) { 1763 if (!VT.isVector()) 1764 return DAG.getConstant(0, VT); 1765 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 1766 return DAG.getConstant(0, VT); 1767 return SDValue(); 1768 } 1769 1770 SDValue DAGCombiner::visitSUB(SDNode *N) { 1771 SDValue N0 = N->getOperand(0); 1772 SDValue N1 = N->getOperand(1); 1773 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1774 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1775 ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? nullptr : 1776 dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode()); 1777 EVT VT = N0.getValueType(); 1778 1779 // fold vector ops 1780 if (VT.isVector()) { 1781 SDValue FoldedVOp = SimplifyVBinOp(N); 1782 if (FoldedVOp.getNode()) return FoldedVOp; 1783 1784 // fold (sub x, 0) -> x, vector edition 1785 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1786 return N0; 1787 } 1788 1789 // fold (sub x, x) -> 0 1790 // FIXME: Refactor this and xor and other similar operations together. 1791 if (N0 == N1) 1792 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes); 1793 // fold (sub c1, c2) -> c1-c2 1794 if (N0C && N1C) 1795 return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C); 1796 // fold (sub x, c) -> (add x, -c) 1797 if (N1C) 1798 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, 1799 DAG.getConstant(-N1C->getAPIntValue(), VT)); 1800 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 1801 if (N0C && N0C->isAllOnesValue()) 1802 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0); 1803 // fold A-(A-B) -> B 1804 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 1805 return N1.getOperand(1); 1806 // fold (A+B)-A -> B 1807 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 1808 return N0.getOperand(1); 1809 // fold (A+B)-B -> A 1810 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 1811 return N0.getOperand(0); 1812 // fold C2-(A+C1) -> (C2-C1)-A 1813 if (N1.getOpcode() == ISD::ADD && N0C && N1C1) { 1814 SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(), 1815 VT); 1816 return DAG.getNode(ISD::SUB, SDLoc(N), VT, NewC, 1817 N1.getOperand(0)); 1818 } 1819 // fold ((A+(B+or-C))-B) -> A+or-C 1820 if (N0.getOpcode() == ISD::ADD && 1821 (N0.getOperand(1).getOpcode() == ISD::SUB || 1822 N0.getOperand(1).getOpcode() == ISD::ADD) && 1823 N0.getOperand(1).getOperand(0) == N1) 1824 return DAG.getNode(N0.getOperand(1).getOpcode(), SDLoc(N), VT, 1825 N0.getOperand(0), N0.getOperand(1).getOperand(1)); 1826 // fold ((A+(C+B))-B) -> A+C 1827 if (N0.getOpcode() == ISD::ADD && 1828 N0.getOperand(1).getOpcode() == ISD::ADD && 1829 N0.getOperand(1).getOperand(1) == N1) 1830 return DAG.getNode(ISD::ADD, SDLoc(N), VT, 1831 N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1832 // fold ((A-(B-C))-C) -> A-B 1833 if (N0.getOpcode() == ISD::SUB && 1834 N0.getOperand(1).getOpcode() == ISD::SUB && 1835 N0.getOperand(1).getOperand(1) == N1) 1836 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 1837 N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1838 1839 // If either operand of a sub is undef, the result is undef 1840 if (N0.getOpcode() == ISD::UNDEF) 1841 return N0; 1842 if (N1.getOpcode() == ISD::UNDEF) 1843 return N1; 1844 1845 // If the relocation model supports it, consider symbol offsets. 1846 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1847 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 1848 // fold (sub Sym, c) -> Sym-c 1849 if (N1C && GA->getOpcode() == ISD::GlobalAddress) 1850 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT, 1851 GA->getOffset() - 1852 (uint64_t)N1C->getSExtValue()); 1853 // fold (sub Sym+c1, Sym+c2) -> c1-c2 1854 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 1855 if (GA->getGlobal() == GB->getGlobal()) 1856 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 1857 VT); 1858 } 1859 1860 return SDValue(); 1861 } 1862 1863 SDValue DAGCombiner::visitSUBC(SDNode *N) { 1864 SDValue N0 = N->getOperand(0); 1865 SDValue N1 = N->getOperand(1); 1866 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1867 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1868 EVT VT = N0.getValueType(); 1869 1870 // If the flag result is dead, turn this into an SUB. 1871 if (!N->hasAnyUseOfValue(1)) 1872 return CombineTo(N, DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1), 1873 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N), 1874 MVT::Glue)); 1875 1876 // fold (subc x, x) -> 0 + no borrow 1877 if (N0 == N1) 1878 return CombineTo(N, DAG.getConstant(0, VT), 1879 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N), 1880 MVT::Glue)); 1881 1882 // fold (subc x, 0) -> x + no borrow 1883 if (N1C && N1C->isNullValue()) 1884 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, SDLoc(N), 1885 MVT::Glue)); 1886 1887 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 1888 if (N0C && N0C->isAllOnesValue()) 1889 return CombineTo(N, DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0), 1890 DAG.getNode(ISD::CARRY_FALSE, SDLoc(N), 1891 MVT::Glue)); 1892 1893 return SDValue(); 1894 } 1895 1896 SDValue DAGCombiner::visitSUBE(SDNode *N) { 1897 SDValue N0 = N->getOperand(0); 1898 SDValue N1 = N->getOperand(1); 1899 SDValue CarryIn = N->getOperand(2); 1900 1901 // fold (sube x, y, false) -> (subc x, y) 1902 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1903 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1); 1904 1905 return SDValue(); 1906 } 1907 1908 SDValue DAGCombiner::visitMUL(SDNode *N) { 1909 SDValue N0 = N->getOperand(0); 1910 SDValue N1 = N->getOperand(1); 1911 EVT VT = N0.getValueType(); 1912 1913 // fold (mul x, undef) -> 0 1914 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 1915 return DAG.getConstant(0, VT); 1916 1917 bool N0IsConst = false; 1918 bool N1IsConst = false; 1919 APInt ConstValue0, ConstValue1; 1920 // fold vector ops 1921 if (VT.isVector()) { 1922 SDValue FoldedVOp = SimplifyVBinOp(N); 1923 if (FoldedVOp.getNode()) return FoldedVOp; 1924 1925 N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0); 1926 N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1); 1927 } else { 1928 N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr; 1929 ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue() 1930 : APInt(); 1931 N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr; 1932 ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue() 1933 : APInt(); 1934 } 1935 1936 // fold (mul c1, c2) -> c1*c2 1937 if (N0IsConst && N1IsConst) 1938 return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0.getNode(), N1.getNode()); 1939 1940 // canonicalize constant to RHS 1941 if (N0IsConst && !N1IsConst) 1942 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0); 1943 // fold (mul x, 0) -> 0 1944 if (N1IsConst && ConstValue1 == 0) 1945 return N1; 1946 // We require a splat of the entire scalar bit width for non-contiguous 1947 // bit patterns. 1948 bool IsFullSplat = 1949 ConstValue1.getBitWidth() == VT.getScalarType().getSizeInBits(); 1950 // fold (mul x, 1) -> x 1951 if (N1IsConst && ConstValue1 == 1 && IsFullSplat) 1952 return N0; 1953 // fold (mul x, -1) -> 0-x 1954 if (N1IsConst && ConstValue1.isAllOnesValue()) 1955 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 1956 DAG.getConstant(0, VT), N0); 1957 // fold (mul x, (1 << c)) -> x << c 1958 if (N1IsConst && ConstValue1.isPowerOf2() && IsFullSplat) 1959 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, 1960 DAG.getConstant(ConstValue1.logBase2(), 1961 getShiftAmountTy(N0.getValueType()))); 1962 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 1963 if (N1IsConst && (-ConstValue1).isPowerOf2() && IsFullSplat) { 1964 unsigned Log2Val = (-ConstValue1).logBase2(); 1965 // FIXME: If the input is something that is easily negated (e.g. a 1966 // single-use add), we should put the negate there. 1967 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 1968 DAG.getConstant(0, VT), 1969 DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, 1970 DAG.getConstant(Log2Val, 1971 getShiftAmountTy(N0.getValueType())))); 1972 } 1973 1974 APInt Val; 1975 // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 1976 if (N1IsConst && N0.getOpcode() == ISD::SHL && 1977 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) || 1978 isa<ConstantSDNode>(N0.getOperand(1)))) { 1979 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, 1980 N1, N0.getOperand(1)); 1981 AddToWorklist(C3.getNode()); 1982 return DAG.getNode(ISD::MUL, SDLoc(N), VT, 1983 N0.getOperand(0), C3); 1984 } 1985 1986 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 1987 // use. 1988 { 1989 SDValue Sh(nullptr,0), Y(nullptr,0); 1990 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 1991 if (N0.getOpcode() == ISD::SHL && 1992 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) || 1993 isa<ConstantSDNode>(N0.getOperand(1))) && 1994 N0.getNode()->hasOneUse()) { 1995 Sh = N0; Y = N1; 1996 } else if (N1.getOpcode() == ISD::SHL && 1997 isa<ConstantSDNode>(N1.getOperand(1)) && 1998 N1.getNode()->hasOneUse()) { 1999 Sh = N1; Y = N0; 2000 } 2001 2002 if (Sh.getNode()) { 2003 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, 2004 Sh.getOperand(0), Y); 2005 return DAG.getNode(ISD::SHL, SDLoc(N), VT, 2006 Mul, Sh.getOperand(1)); 2007 } 2008 } 2009 2010 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 2011 if (N1IsConst && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 2012 (isConstantSplatVector(N0.getOperand(1).getNode(), Val) || 2013 isa<ConstantSDNode>(N0.getOperand(1)))) 2014 return DAG.getNode(ISD::ADD, SDLoc(N), VT, 2015 DAG.getNode(ISD::MUL, SDLoc(N0), VT, 2016 N0.getOperand(0), N1), 2017 DAG.getNode(ISD::MUL, SDLoc(N1), VT, 2018 N0.getOperand(1), N1)); 2019 2020 // reassociate mul 2021 SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1); 2022 if (RMUL.getNode()) 2023 return RMUL; 2024 2025 return SDValue(); 2026 } 2027 2028 SDValue DAGCombiner::visitSDIV(SDNode *N) { 2029 SDValue N0 = N->getOperand(0); 2030 SDValue N1 = N->getOperand(1); 2031 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2032 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2033 EVT VT = N->getValueType(0); 2034 2035 // fold vector ops 2036 if (VT.isVector()) { 2037 SDValue FoldedVOp = SimplifyVBinOp(N); 2038 if (FoldedVOp.getNode()) return FoldedVOp; 2039 } 2040 2041 // fold (sdiv c1, c2) -> c1/c2 2042 if (N0C && N1C && !N1C->isNullValue()) 2043 return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C); 2044 // fold (sdiv X, 1) -> X 2045 if (N1C && N1C->getAPIntValue() == 1LL) 2046 return N0; 2047 // fold (sdiv X, -1) -> 0-X 2048 if (N1C && N1C->isAllOnesValue()) 2049 return DAG.getNode(ISD::SUB, SDLoc(N), VT, 2050 DAG.getConstant(0, VT), N0); 2051 // If we know the sign bits of both operands are zero, strength reduce to a 2052 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 2053 if (!VT.isVector()) { 2054 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2055 return DAG.getNode(ISD::UDIV, SDLoc(N), N1.getValueType(), 2056 N0, N1); 2057 } 2058 2059 // fold (sdiv X, pow2) -> simple ops after legalize 2060 if (N1C && !N1C->isNullValue() && (N1C->getAPIntValue().isPowerOf2() || 2061 (-N1C->getAPIntValue()).isPowerOf2())) { 2062 // If dividing by powers of two is cheap, then don't perform the following 2063 // fold. 2064 if (TLI.isPow2DivCheap()) 2065 return SDValue(); 2066 2067 // Target-specific implementation of sdiv x, pow2. 2068 SDValue Res = BuildSDIVPow2(N); 2069 if (Res.getNode()) 2070 return Res; 2071 2072 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 2073 2074 // Splat the sign bit into the register 2075 SDValue SGN = 2076 DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, 2077 DAG.getConstant(VT.getScalarSizeInBits() - 1, 2078 getShiftAmountTy(N0.getValueType()))); 2079 AddToWorklist(SGN.getNode()); 2080 2081 // Add (N0 < 0) ? abs2 - 1 : 0; 2082 SDValue SRL = 2083 DAG.getNode(ISD::SRL, SDLoc(N), VT, SGN, 2084 DAG.getConstant(VT.getScalarSizeInBits() - lg2, 2085 getShiftAmountTy(SGN.getValueType()))); 2086 SDValue ADD = DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, SRL); 2087 AddToWorklist(SRL.getNode()); 2088 AddToWorklist(ADD.getNode()); // Divide by pow2 2089 SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), VT, ADD, 2090 DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType()))); 2091 2092 // If we're dividing by a positive value, we're done. Otherwise, we must 2093 // negate the result. 2094 if (N1C->getAPIntValue().isNonNegative()) 2095 return SRA; 2096 2097 AddToWorklist(SRA.getNode()); 2098 return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), SRA); 2099 } 2100 2101 // if integer divide is expensive and we satisfy the requirements, emit an 2102 // alternate sequence. 2103 if (N1C && !TLI.isIntDivCheap()) { 2104 SDValue Op = BuildSDIV(N); 2105 if (Op.getNode()) return Op; 2106 } 2107 2108 // undef / X -> 0 2109 if (N0.getOpcode() == ISD::UNDEF) 2110 return DAG.getConstant(0, VT); 2111 // X / undef -> undef 2112 if (N1.getOpcode() == ISD::UNDEF) 2113 return N1; 2114 2115 return SDValue(); 2116 } 2117 2118 SDValue DAGCombiner::visitUDIV(SDNode *N) { 2119 SDValue N0 = N->getOperand(0); 2120 SDValue N1 = N->getOperand(1); 2121 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2122 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2123 EVT VT = N->getValueType(0); 2124 2125 // fold vector ops 2126 if (VT.isVector()) { 2127 SDValue FoldedVOp = SimplifyVBinOp(N); 2128 if (FoldedVOp.getNode()) return FoldedVOp; 2129 } 2130 2131 // fold (udiv c1, c2) -> c1/c2 2132 if (N0C && N1C && !N1C->isNullValue()) 2133 return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C); 2134 // fold (udiv x, (1 << c)) -> x >>u c 2135 if (N1C && N1C->getAPIntValue().isPowerOf2()) 2136 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, 2137 DAG.getConstant(N1C->getAPIntValue().logBase2(), 2138 getShiftAmountTy(N0.getValueType()))); 2139 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 2140 if (N1.getOpcode() == ISD::SHL) { 2141 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 2142 if (SHC->getAPIntValue().isPowerOf2()) { 2143 EVT ADDVT = N1.getOperand(1).getValueType(); 2144 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N), ADDVT, 2145 N1.getOperand(1), 2146 DAG.getConstant(SHC->getAPIntValue() 2147 .logBase2(), 2148 ADDVT)); 2149 AddToWorklist(Add.getNode()); 2150 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, Add); 2151 } 2152 } 2153 } 2154 // fold (udiv x, c) -> alternate 2155 if (N1C && !TLI.isIntDivCheap()) { 2156 SDValue Op = BuildUDIV(N); 2157 if (Op.getNode()) return Op; 2158 } 2159 2160 // undef / X -> 0 2161 if (N0.getOpcode() == ISD::UNDEF) 2162 return DAG.getConstant(0, VT); 2163 // X / undef -> undef 2164 if (N1.getOpcode() == ISD::UNDEF) 2165 return N1; 2166 2167 return SDValue(); 2168 } 2169 2170 SDValue DAGCombiner::visitSREM(SDNode *N) { 2171 SDValue N0 = N->getOperand(0); 2172 SDValue N1 = N->getOperand(1); 2173 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2174 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2175 EVT VT = N->getValueType(0); 2176 2177 // fold (srem c1, c2) -> c1%c2 2178 if (N0C && N1C && !N1C->isNullValue()) 2179 return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C); 2180 // If we know the sign bits of both operands are zero, strength reduce to a 2181 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 2182 if (!VT.isVector()) { 2183 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2184 return DAG.getNode(ISD::UREM, SDLoc(N), VT, N0, N1); 2185 } 2186 2187 // If X/C can be simplified by the division-by-constant logic, lower 2188 // X%C to the equivalent of X-X/C*C. 2189 if (N1C && !N1C->isNullValue()) { 2190 SDValue Div = DAG.getNode(ISD::SDIV, SDLoc(N), VT, N0, N1); 2191 AddToWorklist(Div.getNode()); 2192 SDValue OptimizedDiv = combine(Div.getNode()); 2193 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2194 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, 2195 OptimizedDiv, N1); 2196 SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul); 2197 AddToWorklist(Mul.getNode()); 2198 return Sub; 2199 } 2200 } 2201 2202 // undef % X -> 0 2203 if (N0.getOpcode() == ISD::UNDEF) 2204 return DAG.getConstant(0, VT); 2205 // X % undef -> undef 2206 if (N1.getOpcode() == ISD::UNDEF) 2207 return N1; 2208 2209 return SDValue(); 2210 } 2211 2212 SDValue DAGCombiner::visitUREM(SDNode *N) { 2213 SDValue N0 = N->getOperand(0); 2214 SDValue N1 = N->getOperand(1); 2215 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2216 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2217 EVT VT = N->getValueType(0); 2218 2219 // fold (urem c1, c2) -> c1%c2 2220 if (N0C && N1C && !N1C->isNullValue()) 2221 return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C); 2222 // fold (urem x, pow2) -> (and x, pow2-1) 2223 if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2()) 2224 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, 2225 DAG.getConstant(N1C->getAPIntValue()-1,VT)); 2226 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2227 if (N1.getOpcode() == ISD::SHL) { 2228 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 2229 if (SHC->getAPIntValue().isPowerOf2()) { 2230 SDValue Add = 2231 DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, 2232 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), 2233 VT)); 2234 AddToWorklist(Add.getNode()); 2235 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, Add); 2236 } 2237 } 2238 } 2239 2240 // If X/C can be simplified by the division-by-constant logic, lower 2241 // X%C to the equivalent of X-X/C*C. 2242 if (N1C && !N1C->isNullValue()) { 2243 SDValue Div = DAG.getNode(ISD::UDIV, SDLoc(N), VT, N0, N1); 2244 AddToWorklist(Div.getNode()); 2245 SDValue OptimizedDiv = combine(Div.getNode()); 2246 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2247 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, 2248 OptimizedDiv, N1); 2249 SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul); 2250 AddToWorklist(Mul.getNode()); 2251 return Sub; 2252 } 2253 } 2254 2255 // undef % X -> 0 2256 if (N0.getOpcode() == ISD::UNDEF) 2257 return DAG.getConstant(0, VT); 2258 // X % undef -> undef 2259 if (N1.getOpcode() == ISD::UNDEF) 2260 return N1; 2261 2262 return SDValue(); 2263 } 2264 2265 SDValue DAGCombiner::visitMULHS(SDNode *N) { 2266 SDValue N0 = N->getOperand(0); 2267 SDValue N1 = N->getOperand(1); 2268 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2269 EVT VT = N->getValueType(0); 2270 SDLoc DL(N); 2271 2272 // fold (mulhs x, 0) -> 0 2273 if (N1C && N1C->isNullValue()) 2274 return N1; 2275 // fold (mulhs x, 1) -> (sra x, size(x)-1) 2276 if (N1C && N1C->getAPIntValue() == 1) 2277 return DAG.getNode(ISD::SRA, SDLoc(N), N0.getValueType(), N0, 2278 DAG.getConstant(N0.getValueType().getSizeInBits() - 1, 2279 getShiftAmountTy(N0.getValueType()))); 2280 // fold (mulhs x, undef) -> 0 2281 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2282 return DAG.getConstant(0, VT); 2283 2284 // If the type twice as wide is legal, transform the mulhs to a wider multiply 2285 // plus a shift. 2286 if (VT.isSimple() && !VT.isVector()) { 2287 MVT Simple = VT.getSimpleVT(); 2288 unsigned SimpleSize = Simple.getSizeInBits(); 2289 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2290 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2291 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2292 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2293 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2294 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2295 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2296 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2297 } 2298 } 2299 2300 return SDValue(); 2301 } 2302 2303 SDValue DAGCombiner::visitMULHU(SDNode *N) { 2304 SDValue N0 = N->getOperand(0); 2305 SDValue N1 = N->getOperand(1); 2306 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2307 EVT VT = N->getValueType(0); 2308 SDLoc DL(N); 2309 2310 // fold (mulhu x, 0) -> 0 2311 if (N1C && N1C->isNullValue()) 2312 return N1; 2313 // fold (mulhu x, 1) -> 0 2314 if (N1C && N1C->getAPIntValue() == 1) 2315 return DAG.getConstant(0, N0.getValueType()); 2316 // fold (mulhu x, undef) -> 0 2317 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2318 return DAG.getConstant(0, VT); 2319 2320 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2321 // plus a shift. 2322 if (VT.isSimple() && !VT.isVector()) { 2323 MVT Simple = VT.getSimpleVT(); 2324 unsigned SimpleSize = Simple.getSizeInBits(); 2325 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2326 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2327 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2328 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2329 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2330 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2331 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2332 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2333 } 2334 } 2335 2336 return SDValue(); 2337 } 2338 2339 /// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that 2340 /// compute two values. LoOp and HiOp give the opcodes for the two computations 2341 /// that are being performed. Return true if a simplification was made. 2342 /// 2343 SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2344 unsigned HiOp) { 2345 // If the high half is not needed, just compute the low half. 2346 bool HiExists = N->hasAnyUseOfValue(1); 2347 if (!HiExists && 2348 (!LegalOperations || 2349 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) { 2350 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), 2351 ArrayRef<SDUse>(N->op_begin(), N->op_end())); 2352 return CombineTo(N, Res, Res); 2353 } 2354 2355 // If the low half is not needed, just compute the high half. 2356 bool LoExists = N->hasAnyUseOfValue(0); 2357 if (!LoExists && 2358 (!LegalOperations || 2359 TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2360 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), 2361 ArrayRef<SDUse>(N->op_begin(), N->op_end())); 2362 return CombineTo(N, Res, Res); 2363 } 2364 2365 // If both halves are used, return as it is. 2366 if (LoExists && HiExists) 2367 return SDValue(); 2368 2369 // If the two computed results can be simplified separately, separate them. 2370 if (LoExists) { 2371 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), 2372 ArrayRef<SDUse>(N->op_begin(), N->op_end())); 2373 AddToWorklist(Lo.getNode()); 2374 SDValue LoOpt = combine(Lo.getNode()); 2375 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2376 (!LegalOperations || 2377 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2378 return CombineTo(N, LoOpt, LoOpt); 2379 } 2380 2381 if (HiExists) { 2382 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), 2383 ArrayRef<SDUse>(N->op_begin(), N->op_end())); 2384 AddToWorklist(Hi.getNode()); 2385 SDValue HiOpt = combine(Hi.getNode()); 2386 if (HiOpt.getNode() && HiOpt != Hi && 2387 (!LegalOperations || 2388 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2389 return CombineTo(N, HiOpt, HiOpt); 2390 } 2391 2392 return SDValue(); 2393 } 2394 2395 SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2396 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS); 2397 if (Res.getNode()) return Res; 2398 2399 EVT VT = N->getValueType(0); 2400 SDLoc DL(N); 2401 2402 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2403 // plus a shift. 2404 if (VT.isSimple() && !VT.isVector()) { 2405 MVT Simple = VT.getSimpleVT(); 2406 unsigned SimpleSize = Simple.getSizeInBits(); 2407 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2408 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2409 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2410 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2411 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2412 // Compute the high part as N1. 2413 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2414 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2415 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2416 // Compute the low part as N0. 2417 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2418 return CombineTo(N, Lo, Hi); 2419 } 2420 } 2421 2422 return SDValue(); 2423 } 2424 2425 SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2426 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU); 2427 if (Res.getNode()) return Res; 2428 2429 EVT VT = N->getValueType(0); 2430 SDLoc DL(N); 2431 2432 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2433 // plus a shift. 2434 if (VT.isSimple() && !VT.isVector()) { 2435 MVT Simple = VT.getSimpleVT(); 2436 unsigned SimpleSize = Simple.getSizeInBits(); 2437 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2438 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2439 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2440 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2441 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2442 // Compute the high part as N1. 2443 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2444 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2445 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2446 // Compute the low part as N0. 2447 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2448 return CombineTo(N, Lo, Hi); 2449 } 2450 } 2451 2452 return SDValue(); 2453 } 2454 2455 SDValue DAGCombiner::visitSMULO(SDNode *N) { 2456 // (smulo x, 2) -> (saddo x, x) 2457 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2458 if (C2->getAPIntValue() == 2) 2459 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), 2460 N->getOperand(0), N->getOperand(0)); 2461 2462 return SDValue(); 2463 } 2464 2465 SDValue DAGCombiner::visitUMULO(SDNode *N) { 2466 // (umulo x, 2) -> (uaddo x, x) 2467 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2468 if (C2->getAPIntValue() == 2) 2469 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), 2470 N->getOperand(0), N->getOperand(0)); 2471 2472 return SDValue(); 2473 } 2474 2475 SDValue DAGCombiner::visitSDIVREM(SDNode *N) { 2476 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM); 2477 if (Res.getNode()) return Res; 2478 2479 return SDValue(); 2480 } 2481 2482 SDValue DAGCombiner::visitUDIVREM(SDNode *N) { 2483 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM); 2484 if (Res.getNode()) return Res; 2485 2486 return SDValue(); 2487 } 2488 2489 /// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with 2490 /// two operands of the same opcode, try to simplify it. 2491 SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 2492 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2493 EVT VT = N0.getValueType(); 2494 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 2495 2496 // Bail early if none of these transforms apply. 2497 if (N0.getNode()->getNumOperands() == 0) return SDValue(); 2498 2499 // For each of OP in AND/OR/XOR: 2500 // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 2501 // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 2502 // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 2503 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 2504 // 2505 // do not sink logical op inside of a vector extend, since it may combine 2506 // into a vsetcc. 2507 EVT Op0VT = N0.getOperand(0).getValueType(); 2508 if ((N0.getOpcode() == ISD::ZERO_EXTEND || 2509 N0.getOpcode() == ISD::SIGN_EXTEND || 2510 // Avoid infinite looping with PromoteIntBinOp. 2511 (N0.getOpcode() == ISD::ANY_EXTEND && 2512 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 2513 (N0.getOpcode() == ISD::TRUNCATE && 2514 (!TLI.isZExtFree(VT, Op0VT) || 2515 !TLI.isTruncateFree(Op0VT, VT)) && 2516 TLI.isTypeLegal(Op0VT))) && 2517 !VT.isVector() && 2518 Op0VT == N1.getOperand(0).getValueType() && 2519 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 2520 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2521 N0.getOperand(0).getValueType(), 2522 N0.getOperand(0), N1.getOperand(0)); 2523 AddToWorklist(ORNode.getNode()); 2524 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode); 2525 } 2526 2527 // For each of OP in SHL/SRL/SRA/AND... 2528 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 2529 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 2530 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 2531 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 2532 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 2533 N0.getOperand(1) == N1.getOperand(1)) { 2534 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 2535 N0.getOperand(0).getValueType(), 2536 N0.getOperand(0), N1.getOperand(0)); 2537 AddToWorklist(ORNode.getNode()); 2538 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 2539 ORNode, N0.getOperand(1)); 2540 } 2541 2542 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 2543 // Only perform this optimization after type legalization and before 2544 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 2545 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 2546 // we don't want to undo this promotion. 2547 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 2548 // on scalars. 2549 if ((N0.getOpcode() == ISD::BITCAST || 2550 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && 2551 Level == AfterLegalizeTypes) { 2552 SDValue In0 = N0.getOperand(0); 2553 SDValue In1 = N1.getOperand(0); 2554 EVT In0Ty = In0.getValueType(); 2555 EVT In1Ty = In1.getValueType(); 2556 SDLoc DL(N); 2557 // If both incoming values are integers, and the original types are the 2558 // same. 2559 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 2560 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); 2561 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); 2562 AddToWorklist(Op.getNode()); 2563 return BC; 2564 } 2565 } 2566 2567 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 2568 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 2569 // If both shuffles use the same mask, and both shuffle within a single 2570 // vector, then it is worthwhile to move the swizzle after the operation. 2571 // The type-legalizer generates this pattern when loading illegal 2572 // vector types from memory. In many cases this allows additional shuffle 2573 // optimizations. 2574 // There are other cases where moving the shuffle after the xor/and/or 2575 // is profitable even if shuffles don't perform a swizzle. 2576 // If both shuffles use the same mask, and both shuffles have the same first 2577 // or second operand, then it might still be profitable to move the shuffle 2578 // after the xor/and/or operation. 2579 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { 2580 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 2581 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 2582 2583 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && 2584 "Inputs to shuffles are not the same type"); 2585 2586 // Check that both shuffles use the same mask. The masks are known to be of 2587 // the same length because the result vector type is the same. 2588 // Check also that shuffles have only one use to avoid introducing extra 2589 // instructions. 2590 if (SVN0->hasOneUse() && SVN1->hasOneUse() && 2591 SVN0->getMask().equals(SVN1->getMask())) { 2592 SDValue ShOp = N0->getOperand(1); 2593 2594 // Don't try to fold this node if it requires introducing a 2595 // build vector of all zeros that might be illegal at this stage. 2596 if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) { 2597 if (!LegalTypes) 2598 ShOp = DAG.getConstant(0, VT); 2599 else 2600 ShOp = SDValue(); 2601 } 2602 2603 // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C) 2604 // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C) 2605 // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0) 2606 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) { 2607 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2608 N0->getOperand(0), N1->getOperand(0)); 2609 AddToWorklist(NewNode.getNode()); 2610 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp, 2611 &SVN0->getMask()[0]); 2612 } 2613 2614 // Don't try to fold this node if it requires introducing a 2615 // build vector of all zeros that might be illegal at this stage. 2616 ShOp = N0->getOperand(0); 2617 if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) { 2618 if (!LegalTypes) 2619 ShOp = DAG.getConstant(0, VT); 2620 else 2621 ShOp = SDValue(); 2622 } 2623 2624 // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B)) 2625 // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B)) 2626 // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B)) 2627 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) { 2628 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 2629 N0->getOperand(1), N1->getOperand(1)); 2630 AddToWorklist(NewNode.getNode()); 2631 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode, 2632 &SVN0->getMask()[0]); 2633 } 2634 } 2635 } 2636 2637 return SDValue(); 2638 } 2639 2640 SDValue DAGCombiner::visitAND(SDNode *N) { 2641 SDValue N0 = N->getOperand(0); 2642 SDValue N1 = N->getOperand(1); 2643 SDValue LL, LR, RL, RR, CC0, CC1; 2644 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2645 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2646 EVT VT = N1.getValueType(); 2647 unsigned BitWidth = VT.getScalarType().getSizeInBits(); 2648 2649 // fold vector ops 2650 if (VT.isVector()) { 2651 SDValue FoldedVOp = SimplifyVBinOp(N); 2652 if (FoldedVOp.getNode()) return FoldedVOp; 2653 2654 // fold (and x, 0) -> 0, vector edition 2655 if (ISD::isBuildVectorAllZeros(N0.getNode())) 2656 return N0; 2657 if (ISD::isBuildVectorAllZeros(N1.getNode())) 2658 return N1; 2659 2660 // fold (and x, -1) -> x, vector edition 2661 if (ISD::isBuildVectorAllOnes(N0.getNode())) 2662 return N1; 2663 if (ISD::isBuildVectorAllOnes(N1.getNode())) 2664 return N0; 2665 } 2666 2667 // fold (and x, undef) -> 0 2668 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2669 return DAG.getConstant(0, VT); 2670 // fold (and c1, c2) -> c1&c2 2671 if (N0C && N1C) 2672 return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C); 2673 // canonicalize constant to RHS 2674 if (N0C && !N1C) 2675 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0); 2676 // fold (and x, -1) -> x 2677 if (N1C && N1C->isAllOnesValue()) 2678 return N0; 2679 // if (and x, c) is known to be zero, return 0 2680 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 2681 APInt::getAllOnesValue(BitWidth))) 2682 return DAG.getConstant(0, VT); 2683 // reassociate and 2684 SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1); 2685 if (RAND.getNode()) 2686 return RAND; 2687 // fold (and (or x, C), D) -> D if (C & D) == D 2688 if (N1C && N0.getOpcode() == ISD::OR) 2689 if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 2690 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 2691 return N1; 2692 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 2693 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 2694 SDValue N0Op0 = N0.getOperand(0); 2695 APInt Mask = ~N1C->getAPIntValue(); 2696 Mask = Mask.trunc(N0Op0.getValueSizeInBits()); 2697 if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 2698 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), 2699 N0.getValueType(), N0Op0); 2700 2701 // Replace uses of the AND with uses of the Zero extend node. 2702 CombineTo(N, Zext); 2703 2704 // We actually want to replace all uses of the any_extend with the 2705 // zero_extend, to avoid duplicating things. This will later cause this 2706 // AND to be folded. 2707 CombineTo(N0.getNode(), Zext); 2708 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2709 } 2710 } 2711 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 2712 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 2713 // already be zero by virtue of the width of the base type of the load. 2714 // 2715 // the 'X' node here can either be nothing or an extract_vector_elt to catch 2716 // more cases. 2717 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 2718 N0.getOperand(0).getOpcode() == ISD::LOAD) || 2719 N0.getOpcode() == ISD::LOAD) { 2720 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 2721 N0 : N0.getOperand(0) ); 2722 2723 // Get the constant (if applicable) the zero'th operand is being ANDed with. 2724 // This can be a pure constant or a vector splat, in which case we treat the 2725 // vector as a scalar and use the splat value. 2726 APInt Constant = APInt::getNullValue(1); 2727 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 2728 Constant = C->getAPIntValue(); 2729 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 2730 APInt SplatValue, SplatUndef; 2731 unsigned SplatBitSize; 2732 bool HasAnyUndefs; 2733 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 2734 SplatBitSize, HasAnyUndefs); 2735 if (IsSplat) { 2736 // Undef bits can contribute to a possible optimisation if set, so 2737 // set them. 2738 SplatValue |= SplatUndef; 2739 2740 // The splat value may be something like "0x00FFFFFF", which means 0 for 2741 // the first vector value and FF for the rest, repeating. We need a mask 2742 // that will apply equally to all members of the vector, so AND all the 2743 // lanes of the constant together. 2744 EVT VT = Vector->getValueType(0); 2745 unsigned BitWidth = VT.getVectorElementType().getSizeInBits(); 2746 2747 // If the splat value has been compressed to a bitlength lower 2748 // than the size of the vector lane, we need to re-expand it to 2749 // the lane size. 2750 if (BitWidth > SplatBitSize) 2751 for (SplatValue = SplatValue.zextOrTrunc(BitWidth); 2752 SplatBitSize < BitWidth; 2753 SplatBitSize = SplatBitSize * 2) 2754 SplatValue |= SplatValue.shl(SplatBitSize); 2755 2756 Constant = APInt::getAllOnesValue(BitWidth); 2757 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) 2758 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 2759 } 2760 } 2761 2762 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 2763 // actually legal and isn't going to get expanded, else this is a false 2764 // optimisation. 2765 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 2766 Load->getMemoryVT()); 2767 2768 // Resize the constant to the same size as the original memory access before 2769 // extension. If it is still the AllOnesValue then this AND is completely 2770 // unneeded. 2771 Constant = 2772 Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits()); 2773 2774 bool B; 2775 switch (Load->getExtensionType()) { 2776 default: B = false; break; 2777 case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 2778 case ISD::ZEXTLOAD: 2779 case ISD::NON_EXTLOAD: B = true; break; 2780 } 2781 2782 if (B && Constant.isAllOnesValue()) { 2783 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 2784 // preserve semantics once we get rid of the AND. 2785 SDValue NewLoad(Load, 0); 2786 if (Load->getExtensionType() == ISD::EXTLOAD) { 2787 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 2788 Load->getValueType(0), SDLoc(Load), 2789 Load->getChain(), Load->getBasePtr(), 2790 Load->getOffset(), Load->getMemoryVT(), 2791 Load->getMemOperand()); 2792 // Replace uses of the EXTLOAD with the new ZEXTLOAD. 2793 if (Load->getNumValues() == 3) { 2794 // PRE/POST_INC loads have 3 values. 2795 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 2796 NewLoad.getValue(2) }; 2797 CombineTo(Load, To, 3, true); 2798 } else { 2799 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 2800 } 2801 } 2802 2803 // Fold the AND away, taking care not to fold to the old load node if we 2804 // replaced it. 2805 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 2806 2807 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2808 } 2809 } 2810 // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) 2811 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 2812 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 2813 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 2814 2815 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 2816 LL.getValueType().isInteger()) { 2817 // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0) 2818 if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) { 2819 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2820 LR.getValueType(), LL, RL); 2821 AddToWorklist(ORNode.getNode()); 2822 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1); 2823 } 2824 // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1) 2825 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) { 2826 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0), 2827 LR.getValueType(), LL, RL); 2828 AddToWorklist(ANDNode.getNode()); 2829 return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1); 2830 } 2831 // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1) 2832 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) { 2833 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0), 2834 LR.getValueType(), LL, RL); 2835 AddToWorklist(ORNode.getNode()); 2836 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1); 2837 } 2838 } 2839 // Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2) 2840 if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) && 2841 Op0 == Op1 && LL.getValueType().isInteger() && 2842 Op0 == ISD::SETNE && ((cast<ConstantSDNode>(LR)->isNullValue() && 2843 cast<ConstantSDNode>(RR)->isAllOnesValue()) || 2844 (cast<ConstantSDNode>(LR)->isAllOnesValue() && 2845 cast<ConstantSDNode>(RR)->isNullValue()))) { 2846 SDValue ADDNode = DAG.getNode(ISD::ADD, SDLoc(N0), LL.getValueType(), 2847 LL, DAG.getConstant(1, LL.getValueType())); 2848 AddToWorklist(ADDNode.getNode()); 2849 return DAG.getSetCC(SDLoc(N), VT, ADDNode, 2850 DAG.getConstant(2, LL.getValueType()), ISD::SETUGE); 2851 } 2852 // canonicalize equivalent to ll == rl 2853 if (LL == RR && LR == RL) { 2854 Op1 = ISD::getSetCCSwappedOperands(Op1); 2855 std::swap(RL, RR); 2856 } 2857 if (LL == RL && LR == RR) { 2858 bool isInteger = LL.getValueType().isInteger(); 2859 ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); 2860 if (Result != ISD::SETCC_INVALID && 2861 (!LegalOperations || 2862 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 2863 TLI.isOperationLegal(ISD::SETCC, 2864 getSetCCResultType(N0.getSimpleValueType()))))) 2865 return DAG.getSetCC(SDLoc(N), N0.getValueType(), 2866 LL, LR, Result); 2867 } 2868 } 2869 2870 // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 2871 if (N0.getOpcode() == N1.getOpcode()) { 2872 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 2873 if (Tmp.getNode()) return Tmp; 2874 } 2875 2876 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 2877 // fold (and (sra)) -> (and (srl)) when possible. 2878 if (!VT.isVector() && 2879 SimplifyDemandedBits(SDValue(N, 0))) 2880 return SDValue(N, 0); 2881 2882 // fold (zext_inreg (extload x)) -> (zextload x) 2883 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 2884 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2885 EVT MemVT = LN0->getMemoryVT(); 2886 // If we zero all the possible extended bits, then we can turn this into 2887 // a zextload if we are running before legalize or the operation is legal. 2888 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2889 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2890 BitWidth - MemVT.getScalarType().getSizeInBits())) && 2891 ((!LegalOperations && !LN0->isVolatile()) || 2892 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2893 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 2894 LN0->getChain(), LN0->getBasePtr(), 2895 MemVT, LN0->getMemOperand()); 2896 AddToWorklist(N); 2897 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2898 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2899 } 2900 } 2901 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 2902 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 2903 N0.hasOneUse()) { 2904 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2905 EVT MemVT = LN0->getMemoryVT(); 2906 // If we zero all the possible extended bits, then we can turn this into 2907 // a zextload if we are running before legalize or the operation is legal. 2908 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2909 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2910 BitWidth - MemVT.getScalarType().getSizeInBits())) && 2911 ((!LegalOperations && !LN0->isVolatile()) || 2912 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2913 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 2914 LN0->getChain(), LN0->getBasePtr(), 2915 MemVT, LN0->getMemOperand()); 2916 AddToWorklist(N); 2917 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2918 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2919 } 2920 } 2921 2922 // fold (and (load x), 255) -> (zextload x, i8) 2923 // fold (and (extload x, i16), 255) -> (zextload x, i8) 2924 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 2925 if (N1C && (N0.getOpcode() == ISD::LOAD || 2926 (N0.getOpcode() == ISD::ANY_EXTEND && 2927 N0.getOperand(0).getOpcode() == ISD::LOAD))) { 2928 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 2929 LoadSDNode *LN0 = HasAnyExt 2930 ? cast<LoadSDNode>(N0.getOperand(0)) 2931 : cast<LoadSDNode>(N0); 2932 if (LN0->getExtensionType() != ISD::SEXTLOAD && 2933 LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) { 2934 uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits(); 2935 if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){ 2936 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 2937 EVT LoadedVT = LN0->getMemoryVT(); 2938 2939 if (ExtVT == LoadedVT && 2940 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2941 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2942 2943 SDValue NewLoad = 2944 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, 2945 LN0->getChain(), LN0->getBasePtr(), ExtVT, 2946 LN0->getMemOperand()); 2947 AddToWorklist(N); 2948 CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 2949 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2950 } 2951 2952 // Do not change the width of a volatile load. 2953 // Do not generate loads of non-round integer types since these can 2954 // be expensive (and would be wrong if the type is not byte sized). 2955 if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() && 2956 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2957 EVT PtrType = LN0->getOperand(1).getValueType(); 2958 2959 unsigned Alignment = LN0->getAlignment(); 2960 SDValue NewPtr = LN0->getBasePtr(); 2961 2962 // For big endian targets, we need to add an offset to the pointer 2963 // to load the correct bytes. For little endian systems, we merely 2964 // need to read fewer bytes from the same pointer. 2965 if (TLI.isBigEndian()) { 2966 unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 2967 unsigned EVTStoreBytes = ExtVT.getStoreSize(); 2968 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 2969 NewPtr = DAG.getNode(ISD::ADD, SDLoc(LN0), PtrType, 2970 NewPtr, DAG.getConstant(PtrOff, PtrType)); 2971 Alignment = MinAlign(Alignment, PtrOff); 2972 } 2973 2974 AddToWorklist(NewPtr.getNode()); 2975 2976 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2977 SDValue Load = 2978 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, 2979 LN0->getChain(), NewPtr, 2980 LN0->getPointerInfo(), 2981 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 2982 LN0->isInvariant(), Alignment, LN0->getAAInfo()); 2983 AddToWorklist(N); 2984 CombineTo(LN0, Load, Load.getValue(1)); 2985 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2986 } 2987 } 2988 } 2989 } 2990 2991 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 2992 VT.getSizeInBits() <= 64) { 2993 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2994 APInt ADDC = ADDI->getAPIntValue(); 2995 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 2996 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 2997 // immediate for an add, but it is legal if its top c2 bits are set, 2998 // transform the ADD so the immediate doesn't need to be materialized 2999 // in a register. 3000 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 3001 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3002 SRLI->getZExtValue()); 3003 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 3004 ADDC |= Mask; 3005 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3006 SDValue NewAdd = 3007 DAG.getNode(ISD::ADD, SDLoc(N0), VT, 3008 N0.getOperand(0), DAG.getConstant(ADDC, VT)); 3009 CombineTo(N0.getNode(), NewAdd); 3010 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3011 } 3012 } 3013 } 3014 } 3015 } 3016 } 3017 3018 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const) 3019 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) { 3020 SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 3021 N0.getOperand(1), false); 3022 if (BSwap.getNode()) 3023 return BSwap; 3024 } 3025 3026 return SDValue(); 3027 } 3028 3029 /// MatchBSwapHWord - Match (a >> 8) | (a << 8) as (bswap a) >> 16 3030 /// 3031 SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 3032 bool DemandHighBits) { 3033 if (!LegalOperations) 3034 return SDValue(); 3035 3036 EVT VT = N->getValueType(0); 3037 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 3038 return SDValue(); 3039 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3040 return SDValue(); 3041 3042 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 3043 bool LookPassAnd0 = false; 3044 bool LookPassAnd1 = false; 3045 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 3046 std::swap(N0, N1); 3047 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 3048 std::swap(N0, N1); 3049 if (N0.getOpcode() == ISD::AND) { 3050 if (!N0.getNode()->hasOneUse()) 3051 return SDValue(); 3052 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3053 if (!N01C || N01C->getZExtValue() != 0xFF00) 3054 return SDValue(); 3055 N0 = N0.getOperand(0); 3056 LookPassAnd0 = true; 3057 } 3058 3059 if (N1.getOpcode() == ISD::AND) { 3060 if (!N1.getNode()->hasOneUse()) 3061 return SDValue(); 3062 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3063 if (!N11C || N11C->getZExtValue() != 0xFF) 3064 return SDValue(); 3065 N1 = N1.getOperand(0); 3066 LookPassAnd1 = true; 3067 } 3068 3069 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 3070 std::swap(N0, N1); 3071 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 3072 return SDValue(); 3073 if (!N0.getNode()->hasOneUse() || 3074 !N1.getNode()->hasOneUse()) 3075 return SDValue(); 3076 3077 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3078 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3079 if (!N01C || !N11C) 3080 return SDValue(); 3081 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 3082 return SDValue(); 3083 3084 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 3085 SDValue N00 = N0->getOperand(0); 3086 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 3087 if (!N00.getNode()->hasOneUse()) 3088 return SDValue(); 3089 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 3090 if (!N001C || N001C->getZExtValue() != 0xFF) 3091 return SDValue(); 3092 N00 = N00.getOperand(0); 3093 LookPassAnd0 = true; 3094 } 3095 3096 SDValue N10 = N1->getOperand(0); 3097 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 3098 if (!N10.getNode()->hasOneUse()) 3099 return SDValue(); 3100 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 3101 if (!N101C || N101C->getZExtValue() != 0xFF00) 3102 return SDValue(); 3103 N10 = N10.getOperand(0); 3104 LookPassAnd1 = true; 3105 } 3106 3107 if (N00 != N10) 3108 return SDValue(); 3109 3110 // Make sure everything beyond the low halfword gets set to zero since the SRL 3111 // 16 will clear the top bits. 3112 unsigned OpSizeInBits = VT.getSizeInBits(); 3113 if (DemandHighBits && OpSizeInBits > 16) { 3114 // If the left-shift isn't masked out then the only way this is a bswap is 3115 // if all bits beyond the low 8 are 0. In that case the entire pattern 3116 // reduces to a left shift anyway: leave it for other parts of the combiner. 3117 if (!LookPassAnd0) 3118 return SDValue(); 3119 3120 // However, if the right shift isn't masked out then it might be because 3121 // it's not needed. See if we can spot that too. 3122 if (!LookPassAnd1 && 3123 !DAG.MaskedValueIsZero( 3124 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16))) 3125 return SDValue(); 3126 } 3127 3128 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00); 3129 if (OpSizeInBits > 16) 3130 Res = DAG.getNode(ISD::SRL, SDLoc(N), VT, Res, 3131 DAG.getConstant(OpSizeInBits-16, getShiftAmountTy(VT))); 3132 return Res; 3133 } 3134 3135 /// isBSwapHWordElement - Return true if the specified node is an element 3136 /// that makes up a 32-bit packed halfword byteswap. i.e. 3137 /// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 3138 static bool isBSwapHWordElement(SDValue N, SmallVectorImpl<SDNode *> &Parts) { 3139 if (!N.getNode()->hasOneUse()) 3140 return false; 3141 3142 unsigned Opc = N.getOpcode(); 3143 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 3144 return false; 3145 3146 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3147 if (!N1C) 3148 return false; 3149 3150 unsigned Num; 3151 switch (N1C->getZExtValue()) { 3152 default: 3153 return false; 3154 case 0xFF: Num = 0; break; 3155 case 0xFF00: Num = 1; break; 3156 case 0xFF0000: Num = 2; break; 3157 case 0xFF000000: Num = 3; break; 3158 } 3159 3160 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 3161 SDValue N0 = N.getOperand(0); 3162 if (Opc == ISD::AND) { 3163 if (Num == 0 || Num == 2) { 3164 // (x >> 8) & 0xff 3165 // (x >> 8) & 0xff0000 3166 if (N0.getOpcode() != ISD::SRL) 3167 return false; 3168 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3169 if (!C || C->getZExtValue() != 8) 3170 return false; 3171 } else { 3172 // (x << 8) & 0xff00 3173 // (x << 8) & 0xff000000 3174 if (N0.getOpcode() != ISD::SHL) 3175 return false; 3176 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3177 if (!C || C->getZExtValue() != 8) 3178 return false; 3179 } 3180 } else if (Opc == ISD::SHL) { 3181 // (x & 0xff) << 8 3182 // (x & 0xff0000) << 8 3183 if (Num != 0 && Num != 2) 3184 return false; 3185 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3186 if (!C || C->getZExtValue() != 8) 3187 return false; 3188 } else { // Opc == ISD::SRL 3189 // (x & 0xff00) >> 8 3190 // (x & 0xff000000) >> 8 3191 if (Num != 1 && Num != 3) 3192 return false; 3193 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3194 if (!C || C->getZExtValue() != 8) 3195 return false; 3196 } 3197 3198 if (Parts[Num]) 3199 return false; 3200 3201 Parts[Num] = N0.getOperand(0).getNode(); 3202 return true; 3203 } 3204 3205 /// MatchBSwapHWord - Match a 32-bit packed halfword bswap. That is 3206 /// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 3207 /// => (rotl (bswap x), 16) 3208 SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 3209 if (!LegalOperations) 3210 return SDValue(); 3211 3212 EVT VT = N->getValueType(0); 3213 if (VT != MVT::i32) 3214 return SDValue(); 3215 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3216 return SDValue(); 3217 3218 SmallVector<SDNode*,4> Parts(4, (SDNode*)nullptr); 3219 // Look for either 3220 // (or (or (and), (and)), (or (and), (and))) 3221 // (or (or (or (and), (and)), (and)), (and)) 3222 if (N0.getOpcode() != ISD::OR) 3223 return SDValue(); 3224 SDValue N00 = N0.getOperand(0); 3225 SDValue N01 = N0.getOperand(1); 3226 3227 if (N1.getOpcode() == ISD::OR && 3228 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) { 3229 // (or (or (and), (and)), (or (and), (and))) 3230 SDValue N000 = N00.getOperand(0); 3231 if (!isBSwapHWordElement(N000, Parts)) 3232 return SDValue(); 3233 3234 SDValue N001 = N00.getOperand(1); 3235 if (!isBSwapHWordElement(N001, Parts)) 3236 return SDValue(); 3237 SDValue N010 = N01.getOperand(0); 3238 if (!isBSwapHWordElement(N010, Parts)) 3239 return SDValue(); 3240 SDValue N011 = N01.getOperand(1); 3241 if (!isBSwapHWordElement(N011, Parts)) 3242 return SDValue(); 3243 } else { 3244 // (or (or (or (and), (and)), (and)), (and)) 3245 if (!isBSwapHWordElement(N1, Parts)) 3246 return SDValue(); 3247 if (!isBSwapHWordElement(N01, Parts)) 3248 return SDValue(); 3249 if (N00.getOpcode() != ISD::OR) 3250 return SDValue(); 3251 SDValue N000 = N00.getOperand(0); 3252 if (!isBSwapHWordElement(N000, Parts)) 3253 return SDValue(); 3254 SDValue N001 = N00.getOperand(1); 3255 if (!isBSwapHWordElement(N001, Parts)) 3256 return SDValue(); 3257 } 3258 3259 // Make sure the parts are all coming from the same node. 3260 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 3261 return SDValue(); 3262 3263 SDValue BSwap = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, 3264 SDValue(Parts[0],0)); 3265 3266 // Result of the bswap should be rotated by 16. If it's not legal, then 3267 // do (x << 16) | (x >> 16). 3268 SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT)); 3269 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 3270 return DAG.getNode(ISD::ROTL, SDLoc(N), VT, BSwap, ShAmt); 3271 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 3272 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, BSwap, ShAmt); 3273 return DAG.getNode(ISD::OR, SDLoc(N), VT, 3274 DAG.getNode(ISD::SHL, SDLoc(N), VT, BSwap, ShAmt), 3275 DAG.getNode(ISD::SRL, SDLoc(N), VT, BSwap, ShAmt)); 3276 } 3277 3278 SDValue DAGCombiner::visitOR(SDNode *N) { 3279 SDValue N0 = N->getOperand(0); 3280 SDValue N1 = N->getOperand(1); 3281 SDValue LL, LR, RL, RR, CC0, CC1; 3282 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3283 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3284 EVT VT = N1.getValueType(); 3285 3286 // fold vector ops 3287 if (VT.isVector()) { 3288 SDValue FoldedVOp = SimplifyVBinOp(N); 3289 if (FoldedVOp.getNode()) return FoldedVOp; 3290 3291 // fold (or x, 0) -> x, vector edition 3292 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3293 return N1; 3294 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3295 return N0; 3296 3297 // fold (or x, -1) -> -1, vector edition 3298 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3299 return N0; 3300 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3301 return N1; 3302 3303 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask1) 3304 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf B, A, Mask2) 3305 // Do this only if the resulting shuffle is legal. 3306 if (isa<ShuffleVectorSDNode>(N0) && 3307 isa<ShuffleVectorSDNode>(N1) && 3308 // Avoid folding a node with illegal type. 3309 TLI.isTypeLegal(VT) && 3310 N0->getOperand(1) == N1->getOperand(1) && 3311 ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode())) { 3312 bool CanFold = true; 3313 unsigned NumElts = VT.getVectorNumElements(); 3314 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0); 3315 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1); 3316 // We construct two shuffle masks: 3317 // - Mask1 is a shuffle mask for a shuffle with N0 as the first operand 3318 // and N1 as the second operand. 3319 // - Mask2 is a shuffle mask for a shuffle with N1 as the first operand 3320 // and N0 as the second operand. 3321 // We do this because OR is commutable and therefore there might be 3322 // two ways to fold this node into a shuffle. 3323 SmallVector<int,4> Mask1; 3324 SmallVector<int,4> Mask2; 3325 3326 for (unsigned i = 0; i != NumElts && CanFold; ++i) { 3327 int M0 = SV0->getMaskElt(i); 3328 int M1 = SV1->getMaskElt(i); 3329 3330 // Both shuffle indexes are undef. Propagate Undef. 3331 if (M0 < 0 && M1 < 0) { 3332 Mask1.push_back(M0); 3333 Mask2.push_back(M0); 3334 continue; 3335 } 3336 3337 if (M0 < 0 || M1 < 0 || 3338 (M0 < (int)NumElts && M1 < (int)NumElts) || 3339 (M0 >= (int)NumElts && M1 >= (int)NumElts)) { 3340 CanFold = false; 3341 break; 3342 } 3343 3344 Mask1.push_back(M0 < (int)NumElts ? M0 : M1 + NumElts); 3345 Mask2.push_back(M1 < (int)NumElts ? M1 : M0 + NumElts); 3346 } 3347 3348 if (CanFold) { 3349 // Fold this sequence only if the resulting shuffle is 'legal'. 3350 if (TLI.isShuffleMaskLegal(Mask1, VT)) 3351 return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0), 3352 N1->getOperand(0), &Mask1[0]); 3353 if (TLI.isShuffleMaskLegal(Mask2, VT)) 3354 return DAG.getVectorShuffle(VT, SDLoc(N), N1->getOperand(0), 3355 N0->getOperand(0), &Mask2[0]); 3356 } 3357 } 3358 } 3359 3360 // fold (or x, undef) -> -1 3361 if (!LegalOperations && 3362 (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) { 3363 EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; 3364 return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 3365 } 3366 // fold (or c1, c2) -> c1|c2 3367 if (N0C && N1C) 3368 return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C); 3369 // canonicalize constant to RHS 3370 if (N0C && !N1C) 3371 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0); 3372 // fold (or x, 0) -> x 3373 if (N1C && N1C->isNullValue()) 3374 return N0; 3375 // fold (or x, -1) -> -1 3376 if (N1C && N1C->isAllOnesValue()) 3377 return N1; 3378 // fold (or x, c) -> c iff (x & ~c) == 0 3379 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 3380 return N1; 3381 3382 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 3383 SDValue BSwap = MatchBSwapHWord(N, N0, N1); 3384 if (BSwap.getNode()) 3385 return BSwap; 3386 BSwap = MatchBSwapHWordLow(N, N0, N1); 3387 if (BSwap.getNode()) 3388 return BSwap; 3389 3390 // reassociate or 3391 SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1); 3392 if (ROR.getNode()) 3393 return ROR; 3394 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 3395 // iff (c1 & c2) == 0. 3396 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3397 isa<ConstantSDNode>(N0.getOperand(1))) { 3398 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 3399 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) { 3400 SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1); 3401 if (!COR.getNode()) 3402 return SDValue(); 3403 return DAG.getNode(ISD::AND, SDLoc(N), VT, 3404 DAG.getNode(ISD::OR, SDLoc(N0), VT, 3405 N0.getOperand(0), N1), COR); 3406 } 3407 } 3408 // fold (or (setcc x), (setcc y)) -> (setcc (or x, y)) 3409 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 3410 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 3411 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 3412 3413 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 3414 LL.getValueType().isInteger()) { 3415 // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0) 3416 // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0) 3417 if (cast<ConstantSDNode>(LR)->isNullValue() && 3418 (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { 3419 SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR), 3420 LR.getValueType(), LL, RL); 3421 AddToWorklist(ORNode.getNode()); 3422 return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1); 3423 } 3424 // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1) 3425 // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1) 3426 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && 3427 (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { 3428 SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR), 3429 LR.getValueType(), LL, RL); 3430 AddToWorklist(ANDNode.getNode()); 3431 return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1); 3432 } 3433 } 3434 // canonicalize equivalent to ll == rl 3435 if (LL == RR && LR == RL) { 3436 Op1 = ISD::getSetCCSwappedOperands(Op1); 3437 std::swap(RL, RR); 3438 } 3439 if (LL == RL && LR == RR) { 3440 bool isInteger = LL.getValueType().isInteger(); 3441 ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); 3442 if (Result != ISD::SETCC_INVALID && 3443 (!LegalOperations || 3444 (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3445 TLI.isOperationLegal(ISD::SETCC, 3446 getSetCCResultType(N0.getValueType()))))) 3447 return DAG.getSetCC(SDLoc(N), N0.getValueType(), 3448 LL, LR, Result); 3449 } 3450 } 3451 3452 // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 3453 if (N0.getOpcode() == N1.getOpcode()) { 3454 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3455 if (Tmp.getNode()) return Tmp; 3456 } 3457 3458 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 3459 if (N0.getOpcode() == ISD::AND && 3460 N1.getOpcode() == ISD::AND && 3461 N0.getOperand(1).getOpcode() == ISD::Constant && 3462 N1.getOperand(1).getOpcode() == ISD::Constant && 3463 // Don't increase # computations. 3464 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3465 // We can only do this xform if we know that bits from X that are set in C2 3466 // but not in C1 are already zero. Likewise for Y. 3467 const APInt &LHSMask = 3468 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 3469 const APInt &RHSMask = 3470 cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue(); 3471 3472 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 3473 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 3474 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 3475 N0.getOperand(0), N1.getOperand(0)); 3476 return DAG.getNode(ISD::AND, SDLoc(N), VT, X, 3477 DAG.getConstant(LHSMask | RHSMask, VT)); 3478 } 3479 } 3480 3481 // See if this is some rotate idiom. 3482 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N))) 3483 return SDValue(Rot, 0); 3484 3485 // Simplify the operands using demanded-bits information. 3486 if (!VT.isVector() && 3487 SimplifyDemandedBits(SDValue(N, 0))) 3488 return SDValue(N, 0); 3489 3490 return SDValue(); 3491 } 3492 3493 /// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present. 3494 static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 3495 if (Op.getOpcode() == ISD::AND) { 3496 if (isa<ConstantSDNode>(Op.getOperand(1))) { 3497 Mask = Op.getOperand(1); 3498 Op = Op.getOperand(0); 3499 } else { 3500 return false; 3501 } 3502 } 3503 3504 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 3505 Shift = Op; 3506 return true; 3507 } 3508 3509 return false; 3510 } 3511 3512 // Return true if we can prove that, whenever Neg and Pos are both in the 3513 // range [0, OpSize), Neg == (Pos == 0 ? 0 : OpSize - Pos). This means that 3514 // for two opposing shifts shift1 and shift2 and a value X with OpBits bits: 3515 // 3516 // (or (shift1 X, Neg), (shift2 X, Pos)) 3517 // 3518 // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate 3519 // in direction shift1 by Neg. The range [0, OpSize) means that we only need 3520 // to consider shift amounts with defined behavior. 3521 static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned OpSize) { 3522 // If OpSize is a power of 2 then: 3523 // 3524 // (a) (Pos == 0 ? 0 : OpSize - Pos) == (OpSize - Pos) & (OpSize - 1) 3525 // (b) Neg == Neg & (OpSize - 1) whenever Neg is in [0, OpSize). 3526 // 3527 // So if OpSize is a power of 2 and Neg is (and Neg', OpSize-1), we check 3528 // for the stronger condition: 3529 // 3530 // Neg & (OpSize - 1) == (OpSize - Pos) & (OpSize - 1) [A] 3531 // 3532 // for all Neg and Pos. Since Neg & (OpSize - 1) == Neg' & (OpSize - 1) 3533 // we can just replace Neg with Neg' for the rest of the function. 3534 // 3535 // In other cases we check for the even stronger condition: 3536 // 3537 // Neg == OpSize - Pos [B] 3538 // 3539 // for all Neg and Pos. Note that the (or ...) then invokes undefined 3540 // behavior if Pos == 0 (and consequently Neg == OpSize). 3541 // 3542 // We could actually use [A] whenever OpSize is a power of 2, but the 3543 // only extra cases that it would match are those uninteresting ones 3544 // where Neg and Pos are never in range at the same time. E.g. for 3545 // OpSize == 32, using [A] would allow a Neg of the form (sub 64, Pos) 3546 // as well as (sub 32, Pos), but: 3547 // 3548 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos)) 3549 // 3550 // always invokes undefined behavior for 32-bit X. 3551 // 3552 // Below, Mask == OpSize - 1 when using [A] and is all-ones otherwise. 3553 unsigned MaskLoBits = 0; 3554 if (Neg.getOpcode() == ISD::AND && 3555 isPowerOf2_64(OpSize) && 3556 Neg.getOperand(1).getOpcode() == ISD::Constant && 3557 cast<ConstantSDNode>(Neg.getOperand(1))->getAPIntValue() == OpSize - 1) { 3558 Neg = Neg.getOperand(0); 3559 MaskLoBits = Log2_64(OpSize); 3560 } 3561 3562 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1. 3563 if (Neg.getOpcode() != ISD::SUB) 3564 return 0; 3565 ConstantSDNode *NegC = dyn_cast<ConstantSDNode>(Neg.getOperand(0)); 3566 if (!NegC) 3567 return 0; 3568 SDValue NegOp1 = Neg.getOperand(1); 3569 3570 // On the RHS of [A], if Pos is Pos' & (OpSize - 1), just replace Pos with 3571 // Pos'. The truncation is redundant for the purpose of the equality. 3572 if (MaskLoBits && 3573 Pos.getOpcode() == ISD::AND && 3574 Pos.getOperand(1).getOpcode() == ISD::Constant && 3575 cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() == OpSize - 1) 3576 Pos = Pos.getOperand(0); 3577 3578 // The condition we need is now: 3579 // 3580 // (NegC - NegOp1) & Mask == (OpSize - Pos) & Mask 3581 // 3582 // If NegOp1 == Pos then we need: 3583 // 3584 // OpSize & Mask == NegC & Mask 3585 // 3586 // (because "x & Mask" is a truncation and distributes through subtraction). 3587 APInt Width; 3588 if (Pos == NegOp1) 3589 Width = NegC->getAPIntValue(); 3590 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC. 3591 // Then the condition we want to prove becomes: 3592 // 3593 // (NegC - NegOp1) & Mask == (OpSize - (NegOp1 + PosC)) & Mask 3594 // 3595 // which, again because "x & Mask" is a truncation, becomes: 3596 // 3597 // NegC & Mask == (OpSize - PosC) & Mask 3598 // OpSize & Mask == (NegC + PosC) & Mask 3599 else if (Pos.getOpcode() == ISD::ADD && 3600 Pos.getOperand(0) == NegOp1 && 3601 Pos.getOperand(1).getOpcode() == ISD::Constant) 3602 Width = (cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() + 3603 NegC->getAPIntValue()); 3604 else 3605 return false; 3606 3607 // Now we just need to check that OpSize & Mask == Width & Mask. 3608 if (MaskLoBits) 3609 // Opsize & Mask is 0 since Mask is Opsize - 1. 3610 return Width.getLoBits(MaskLoBits) == 0; 3611 return Width == OpSize; 3612 } 3613 3614 // A subroutine of MatchRotate used once we have found an OR of two opposite 3615 // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces 3616 // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the 3617 // former being preferred if supported. InnerPos and InnerNeg are Pos and 3618 // Neg with outer conversions stripped away. 3619 SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos, 3620 SDValue Neg, SDValue InnerPos, 3621 SDValue InnerNeg, unsigned PosOpcode, 3622 unsigned NegOpcode, SDLoc DL) { 3623 // fold (or (shl x, (*ext y)), 3624 // (srl x, (*ext (sub 32, y)))) -> 3625 // (rotl x, y) or (rotr x, (sub 32, y)) 3626 // 3627 // fold (or (shl x, (*ext (sub 32, y))), 3628 // (srl x, (*ext y))) -> 3629 // (rotr x, y) or (rotl x, (sub 32, y)) 3630 EVT VT = Shifted.getValueType(); 3631 if (matchRotateSub(InnerPos, InnerNeg, VT.getSizeInBits())) { 3632 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT); 3633 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted, 3634 HasPos ? Pos : Neg).getNode(); 3635 } 3636 3637 return nullptr; 3638 } 3639 3640 // MatchRotate - Handle an 'or' of two operands. If this is one of the many 3641 // idioms for rotate, and if the target supports rotation instructions, generate 3642 // a rot[lr]. 3643 SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) { 3644 // Must be a legal type. Expanded 'n promoted things won't work with rotates. 3645 EVT VT = LHS.getValueType(); 3646 if (!TLI.isTypeLegal(VT)) return nullptr; 3647 3648 // The target must have at least one rotate flavor. 3649 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 3650 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 3651 if (!HasROTL && !HasROTR) return nullptr; 3652 3653 // Match "(X shl/srl V1) & V2" where V2 may not be present. 3654 SDValue LHSShift; // The shift. 3655 SDValue LHSMask; // AND value if any. 3656 if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 3657 return nullptr; // Not part of a rotate. 3658 3659 SDValue RHSShift; // The shift. 3660 SDValue RHSMask; // AND value if any. 3661 if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 3662 return nullptr; // Not part of a rotate. 3663 3664 if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 3665 return nullptr; // Not shifting the same value. 3666 3667 if (LHSShift.getOpcode() == RHSShift.getOpcode()) 3668 return nullptr; // Shifts must disagree. 3669 3670 // Canonicalize shl to left side in a shl/srl pair. 3671 if (RHSShift.getOpcode() == ISD::SHL) { 3672 std::swap(LHS, RHS); 3673 std::swap(LHSShift, RHSShift); 3674 std::swap(LHSMask , RHSMask ); 3675 } 3676 3677 unsigned OpSizeInBits = VT.getSizeInBits(); 3678 SDValue LHSShiftArg = LHSShift.getOperand(0); 3679 SDValue LHSShiftAmt = LHSShift.getOperand(1); 3680 SDValue RHSShiftArg = RHSShift.getOperand(0); 3681 SDValue RHSShiftAmt = RHSShift.getOperand(1); 3682 3683 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 3684 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 3685 if (LHSShiftAmt.getOpcode() == ISD::Constant && 3686 RHSShiftAmt.getOpcode() == ISD::Constant) { 3687 uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue(); 3688 uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue(); 3689 if ((LShVal + RShVal) != OpSizeInBits) 3690 return nullptr; 3691 3692 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 3693 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); 3694 3695 // If there is an AND of either shifted operand, apply it to the result. 3696 if (LHSMask.getNode() || RHSMask.getNode()) { 3697 APInt Mask = APInt::getAllOnesValue(OpSizeInBits); 3698 3699 if (LHSMask.getNode()) { 3700 APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal); 3701 Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits; 3702 } 3703 if (RHSMask.getNode()) { 3704 APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal); 3705 Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits; 3706 } 3707 3708 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT)); 3709 } 3710 3711 return Rot.getNode(); 3712 } 3713 3714 // If there is a mask here, and we have a variable shift, we can't be sure 3715 // that we're masking out the right stuff. 3716 if (LHSMask.getNode() || RHSMask.getNode()) 3717 return nullptr; 3718 3719 // If the shift amount is sign/zext/any-extended just peel it off. 3720 SDValue LExtOp0 = LHSShiftAmt; 3721 SDValue RExtOp0 = RHSShiftAmt; 3722 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 3723 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 3724 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 3725 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 3726 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 3727 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 3728 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 3729 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 3730 LExtOp0 = LHSShiftAmt.getOperand(0); 3731 RExtOp0 = RHSShiftAmt.getOperand(0); 3732 } 3733 3734 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, 3735 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL); 3736 if (TryL) 3737 return TryL; 3738 3739 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, 3740 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL); 3741 if (TryR) 3742 return TryR; 3743 3744 return nullptr; 3745 } 3746 3747 SDValue DAGCombiner::visitXOR(SDNode *N) { 3748 SDValue N0 = N->getOperand(0); 3749 SDValue N1 = N->getOperand(1); 3750 SDValue LHS, RHS, CC; 3751 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3752 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3753 EVT VT = N0.getValueType(); 3754 3755 // fold vector ops 3756 if (VT.isVector()) { 3757 SDValue FoldedVOp = SimplifyVBinOp(N); 3758 if (FoldedVOp.getNode()) return FoldedVOp; 3759 3760 // fold (xor x, 0) -> x, vector edition 3761 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3762 return N1; 3763 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3764 return N0; 3765 } 3766 3767 // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 3768 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 3769 return DAG.getConstant(0, VT); 3770 // fold (xor x, undef) -> undef 3771 if (N0.getOpcode() == ISD::UNDEF) 3772 return N0; 3773 if (N1.getOpcode() == ISD::UNDEF) 3774 return N1; 3775 // fold (xor c1, c2) -> c1^c2 3776 if (N0C && N1C) 3777 return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C); 3778 // canonicalize constant to RHS 3779 if (N0C && !N1C) 3780 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0); 3781 // fold (xor x, 0) -> x 3782 if (N1C && N1C->isNullValue()) 3783 return N0; 3784 // reassociate xor 3785 SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1); 3786 if (RXOR.getNode()) 3787 return RXOR; 3788 3789 // fold !(x cc y) -> (x !cc y) 3790 if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) { 3791 bool isInt = LHS.getValueType().isInteger(); 3792 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3793 isInt); 3794 3795 if (!LegalOperations || 3796 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { 3797 switch (N0.getOpcode()) { 3798 default: 3799 llvm_unreachable("Unhandled SetCC Equivalent!"); 3800 case ISD::SETCC: 3801 return DAG.getSetCC(SDLoc(N), VT, LHS, RHS, NotCC); 3802 case ISD::SELECT_CC: 3803 return DAG.getSelectCC(SDLoc(N), LHS, RHS, N0.getOperand(2), 3804 N0.getOperand(3), NotCC); 3805 } 3806 } 3807 } 3808 3809 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 3810 if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND && 3811 N0.getNode()->hasOneUse() && 3812 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 3813 SDValue V = N0.getOperand(0); 3814 V = DAG.getNode(ISD::XOR, SDLoc(N0), V.getValueType(), V, 3815 DAG.getConstant(1, V.getValueType())); 3816 AddToWorklist(V.getNode()); 3817 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V); 3818 } 3819 3820 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 3821 if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 && 3822 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3823 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3824 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 3825 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3826 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 3827 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 3828 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 3829 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 3830 } 3831 } 3832 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 3833 if (N1C && N1C->isAllOnesValue() && 3834 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3835 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3836 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 3837 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3838 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 3839 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 3840 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 3841 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 3842 } 3843 } 3844 // fold (xor (and x, y), y) -> (and (not x), y) 3845 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3846 N0->getOperand(1) == N1) { 3847 SDValue X = N0->getOperand(0); 3848 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT); 3849 AddToWorklist(NotX.getNode()); 3850 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1); 3851 } 3852 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 3853 if (N1C && N0.getOpcode() == ISD::XOR) { 3854 ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 3855 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3856 if (N00C) 3857 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N0.getOperand(1), 3858 DAG.getConstant(N1C->getAPIntValue() ^ 3859 N00C->getAPIntValue(), VT)); 3860 if (N01C) 3861 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N0.getOperand(0), 3862 DAG.getConstant(N1C->getAPIntValue() ^ 3863 N01C->getAPIntValue(), VT)); 3864 } 3865 // fold (xor x, x) -> 0 3866 if (N0 == N1) 3867 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes); 3868 3869 // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 3870 if (N0.getOpcode() == N1.getOpcode()) { 3871 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3872 if (Tmp.getNode()) return Tmp; 3873 } 3874 3875 // Simplify the expression using non-local knowledge. 3876 if (!VT.isVector() && 3877 SimplifyDemandedBits(SDValue(N, 0))) 3878 return SDValue(N, 0); 3879 3880 return SDValue(); 3881 } 3882 3883 /// visitShiftByConstant - Handle transforms common to the three shifts, when 3884 /// the shift amount is a constant. 3885 SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) { 3886 // We can't and shouldn't fold opaque constants. 3887 if (Amt->isOpaque()) 3888 return SDValue(); 3889 3890 SDNode *LHS = N->getOperand(0).getNode(); 3891 if (!LHS->hasOneUse()) return SDValue(); 3892 3893 // We want to pull some binops through shifts, so that we have (and (shift)) 3894 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 3895 // thing happens with address calculations, so it's important to canonicalize 3896 // it. 3897 bool HighBitSet = false; // Can we transform this if the high bit is set? 3898 3899 switch (LHS->getOpcode()) { 3900 default: return SDValue(); 3901 case ISD::OR: 3902 case ISD::XOR: 3903 HighBitSet = false; // We can only transform sra if the high bit is clear. 3904 break; 3905 case ISD::AND: 3906 HighBitSet = true; // We can only transform sra if the high bit is set. 3907 break; 3908 case ISD::ADD: 3909 if (N->getOpcode() != ISD::SHL) 3910 return SDValue(); // only shl(add) not sr[al](add). 3911 HighBitSet = false; // We can only transform sra if the high bit is clear. 3912 break; 3913 } 3914 3915 // We require the RHS of the binop to be a constant and not opaque as well. 3916 ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 3917 if (!BinOpCst || BinOpCst->isOpaque()) return SDValue(); 3918 3919 // FIXME: disable this unless the input to the binop is a shift by a constant. 3920 // If it is not a shift, it pessimizes some common cases like: 3921 // 3922 // void foo(int *X, int i) { X[i & 1235] = 1; } 3923 // int bar(int *X, int i) { return X[i & 255]; } 3924 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 3925 if ((BinOpLHSVal->getOpcode() != ISD::SHL && 3926 BinOpLHSVal->getOpcode() != ISD::SRA && 3927 BinOpLHSVal->getOpcode() != ISD::SRL) || 3928 !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) 3929 return SDValue(); 3930 3931 EVT VT = N->getValueType(0); 3932 3933 // If this is a signed shift right, and the high bit is modified by the 3934 // logical operation, do not perform the transformation. The highBitSet 3935 // boolean indicates the value of the high bit of the constant which would 3936 // cause it to be modified for this operation. 3937 if (N->getOpcode() == ISD::SRA) { 3938 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 3939 if (BinOpRHSSignSet != HighBitSet) 3940 return SDValue(); 3941 } 3942 3943 if (!TLI.isDesirableToCommuteWithShift(LHS)) 3944 return SDValue(); 3945 3946 // Fold the constants, shifting the binop RHS by the shift amount. 3947 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)), 3948 N->getValueType(0), 3949 LHS->getOperand(1), N->getOperand(1)); 3950 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!"); 3951 3952 // Create the new shift. 3953 SDValue NewShift = DAG.getNode(N->getOpcode(), 3954 SDLoc(LHS->getOperand(0)), 3955 VT, LHS->getOperand(0), N->getOperand(1)); 3956 3957 // Create the new binop. 3958 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS); 3959 } 3960 3961 SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { 3962 assert(N->getOpcode() == ISD::TRUNCATE); 3963 assert(N->getOperand(0).getOpcode() == ISD::AND); 3964 3965 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) 3966 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) { 3967 SDValue N01 = N->getOperand(0).getOperand(1); 3968 3969 if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) { 3970 EVT TruncVT = N->getValueType(0); 3971 SDValue N00 = N->getOperand(0).getOperand(0); 3972 APInt TruncC = N01C->getAPIntValue(); 3973 TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits()); 3974 3975 return DAG.getNode(ISD::AND, SDLoc(N), TruncVT, 3976 DAG.getNode(ISD::TRUNCATE, SDLoc(N), TruncVT, N00), 3977 DAG.getConstant(TruncC, TruncVT)); 3978 } 3979 } 3980 3981 return SDValue(); 3982 } 3983 3984 SDValue DAGCombiner::visitRotate(SDNode *N) { 3985 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))). 3986 if (N->getOperand(1).getOpcode() == ISD::TRUNCATE && 3987 N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) { 3988 SDValue NewOp1 = distributeTruncateThroughAnd(N->getOperand(1).getNode()); 3989 if (NewOp1.getNode()) 3990 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), 3991 N->getOperand(0), NewOp1); 3992 } 3993 return SDValue(); 3994 } 3995 3996 SDValue DAGCombiner::visitSHL(SDNode *N) { 3997 SDValue N0 = N->getOperand(0); 3998 SDValue N1 = N->getOperand(1); 3999 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4000 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4001 EVT VT = N0.getValueType(); 4002 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 4003 4004 // fold vector ops 4005 if (VT.isVector()) { 4006 SDValue FoldedVOp = SimplifyVBinOp(N); 4007 if (FoldedVOp.getNode()) return FoldedVOp; 4008 4009 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1); 4010 // If setcc produces all-one true value then: 4011 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV) 4012 if (N1CV && N1CV->isConstant()) { 4013 if (N0.getOpcode() == ISD::AND) { 4014 SDValue N00 = N0->getOperand(0); 4015 SDValue N01 = N0->getOperand(1); 4016 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01); 4017 4018 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && 4019 TLI.getBooleanContents(N00.getOperand(0).getValueType()) == 4020 TargetLowering::ZeroOrNegativeOneBooleanContent) { 4021 SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, VT, N01CV, N1CV); 4022 if (C.getNode()) 4023 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C); 4024 } 4025 } else { 4026 N1C = isConstOrConstSplat(N1); 4027 } 4028 } 4029 } 4030 4031 // fold (shl c1, c2) -> c1<<c2 4032 if (N0C && N1C) 4033 return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C); 4034 // fold (shl 0, x) -> 0 4035 if (N0C && N0C->isNullValue()) 4036 return N0; 4037 // fold (shl x, c >= size(x)) -> undef 4038 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 4039 return DAG.getUNDEF(VT); 4040 // fold (shl x, 0) -> x 4041 if (N1C && N1C->isNullValue()) 4042 return N0; 4043 // fold (shl undef, x) -> 0 4044 if (N0.getOpcode() == ISD::UNDEF) 4045 return DAG.getConstant(0, VT); 4046 // if (shl x, c) is known to be zero, return 0 4047 if (DAG.MaskedValueIsZero(SDValue(N, 0), 4048 APInt::getAllOnesValue(OpSizeInBits))) 4049 return DAG.getConstant(0, VT); 4050 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 4051 if (N1.getOpcode() == ISD::TRUNCATE && 4052 N1.getOperand(0).getOpcode() == ISD::AND) { 4053 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()); 4054 if (NewOp1.getNode()) 4055 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1); 4056 } 4057 4058 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4059 return SDValue(N, 0); 4060 4061 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 4062 if (N1C && N0.getOpcode() == ISD::SHL) { 4063 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4064 uint64_t c1 = N0C1->getZExtValue(); 4065 uint64_t c2 = N1C->getZExtValue(); 4066 if (c1 + c2 >= OpSizeInBits) 4067 return DAG.getConstant(0, VT); 4068 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0), 4069 DAG.getConstant(c1 + c2, N1.getValueType())); 4070 } 4071 } 4072 4073 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 4074 // For this to be valid, the second form must not preserve any of the bits 4075 // that are shifted out by the inner shift in the first form. This means 4076 // the outer shift size must be >= the number of bits added by the ext. 4077 // As a corollary, we don't care what kind of ext it is. 4078 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 4079 N0.getOpcode() == ISD::ANY_EXTEND || 4080 N0.getOpcode() == ISD::SIGN_EXTEND) && 4081 N0.getOperand(0).getOpcode() == ISD::SHL) { 4082 SDValue N0Op0 = N0.getOperand(0); 4083 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 4084 uint64_t c1 = N0Op0C1->getZExtValue(); 4085 uint64_t c2 = N1C->getZExtValue(); 4086 EVT InnerShiftVT = N0Op0.getValueType(); 4087 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 4088 if (c2 >= OpSizeInBits - InnerShiftSize) { 4089 if (c1 + c2 >= OpSizeInBits) 4090 return DAG.getConstant(0, VT); 4091 return DAG.getNode(ISD::SHL, SDLoc(N0), VT, 4092 DAG.getNode(N0.getOpcode(), SDLoc(N0), VT, 4093 N0Op0->getOperand(0)), 4094 DAG.getConstant(c1 + c2, N1.getValueType())); 4095 } 4096 } 4097 } 4098 4099 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) 4100 // Only fold this if the inner zext has no other uses to avoid increasing 4101 // the total number of instructions. 4102 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && 4103 N0.getOperand(0).getOpcode() == ISD::SRL) { 4104 SDValue N0Op0 = N0.getOperand(0); 4105 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 4106 uint64_t c1 = N0Op0C1->getZExtValue(); 4107 if (c1 < VT.getScalarSizeInBits()) { 4108 uint64_t c2 = N1C->getZExtValue(); 4109 if (c1 == c2) { 4110 SDValue NewOp0 = N0.getOperand(0); 4111 EVT CountVT = NewOp0.getOperand(1).getValueType(); 4112 SDValue NewSHL = DAG.getNode(ISD::SHL, SDLoc(N), NewOp0.getValueType(), 4113 NewOp0, DAG.getConstant(c2, CountVT)); 4114 AddToWorklist(NewSHL.getNode()); 4115 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL); 4116 } 4117 } 4118 } 4119 } 4120 4121 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 4122 // (and (srl x, (sub c1, c2), MASK) 4123 // Only fold this if the inner shift has no other uses -- if it does, folding 4124 // this will increase the total number of instructions. 4125 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 4126 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 4127 uint64_t c1 = N0C1->getZExtValue(); 4128 if (c1 < OpSizeInBits) { 4129 uint64_t c2 = N1C->getZExtValue(); 4130 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); 4131 SDValue Shift; 4132 if (c2 > c1) { 4133 Mask = Mask.shl(c2 - c1); 4134 Shift = DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0), 4135 DAG.getConstant(c2 - c1, N1.getValueType())); 4136 } else { 4137 Mask = Mask.lshr(c1 - c2); 4138 Shift = DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), 4139 DAG.getConstant(c1 - c2, N1.getValueType())); 4140 } 4141 return DAG.getNode(ISD::AND, SDLoc(N0), VT, Shift, 4142 DAG.getConstant(Mask, VT)); 4143 } 4144 } 4145 } 4146 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 4147 if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) { 4148 unsigned BitSize = VT.getScalarSizeInBits(); 4149 SDValue HiBitsMask = 4150 DAG.getConstant(APInt::getHighBitsSet(BitSize, 4151 BitSize - N1C->getZExtValue()), VT); 4152 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0), 4153 HiBitsMask); 4154 } 4155 4156 if (N1C) { 4157 SDValue NewSHL = visitShiftByConstant(N, N1C); 4158 if (NewSHL.getNode()) 4159 return NewSHL; 4160 } 4161 4162 return SDValue(); 4163 } 4164 4165 SDValue DAGCombiner::visitSRA(SDNode *N) { 4166 SDValue N0 = N->getOperand(0); 4167 SDValue N1 = N->getOperand(1); 4168 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4169 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4170 EVT VT = N0.getValueType(); 4171 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 4172 4173 // fold vector ops 4174 if (VT.isVector()) { 4175 SDValue FoldedVOp = SimplifyVBinOp(N); 4176 if (FoldedVOp.getNode()) return FoldedVOp; 4177 4178 N1C = isConstOrConstSplat(N1); 4179 } 4180 4181 // fold (sra c1, c2) -> (sra c1, c2) 4182 if (N0C && N1C) 4183 return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C); 4184 // fold (sra 0, x) -> 0 4185 if (N0C && N0C->isNullValue()) 4186 return N0; 4187 // fold (sra -1, x) -> -1 4188 if (N0C && N0C->isAllOnesValue()) 4189 return N0; 4190 // fold (sra x, (setge c, size(x))) -> undef 4191 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 4192 return DAG.getUNDEF(VT); 4193 // fold (sra x, 0) -> x 4194 if (N1C && N1C->isNullValue()) 4195 return N0; 4196 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 4197 // sext_inreg. 4198 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 4199 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 4200 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 4201 if (VT.isVector()) 4202 ExtVT = EVT::getVectorVT(*DAG.getContext(), 4203 ExtVT, VT.getVectorNumElements()); 4204 if ((!LegalOperations || 4205 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 4206 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 4207 N0.getOperand(0), DAG.getValueType(ExtVT)); 4208 } 4209 4210 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 4211 if (N1C && N0.getOpcode() == ISD::SRA) { 4212 if (ConstantSDNode *C1 = isConstOrConstSplat(N0.getOperand(1))) { 4213 unsigned Sum = N1C->getZExtValue() + C1->getZExtValue(); 4214 if (Sum >= OpSizeInBits) 4215 Sum = OpSizeInBits - 1; 4216 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0), 4217 DAG.getConstant(Sum, N1.getValueType())); 4218 } 4219 } 4220 4221 // fold (sra (shl X, m), (sub result_size, n)) 4222 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 4223 // result_size - n != m. 4224 // If truncate is free for the target sext(shl) is likely to result in better 4225 // code. 4226 if (N0.getOpcode() == ISD::SHL && N1C) { 4227 // Get the two constanst of the shifts, CN0 = m, CN = n. 4228 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1)); 4229 if (N01C) { 4230 LLVMContext &Ctx = *DAG.getContext(); 4231 // Determine what the truncate's result bitsize and type would be. 4232 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()); 4233 4234 if (VT.isVector()) 4235 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements()); 4236 4237 // Determine the residual right-shift amount. 4238 signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 4239 4240 // If the shift is not a no-op (in which case this should be just a sign 4241 // extend already), the truncated to type is legal, sign_extend is legal 4242 // on that type, and the truncate to that type is both legal and free, 4243 // perform the transform. 4244 if ((ShiftAmt > 0) && 4245 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 4246 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 4247 TLI.isTruncateFree(VT, TruncVT)) { 4248 4249 SDValue Amt = DAG.getConstant(ShiftAmt, 4250 getShiftAmountTy(N0.getOperand(0).getValueType())); 4251 SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0), VT, 4252 N0.getOperand(0), Amt); 4253 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), TruncVT, 4254 Shift); 4255 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), 4256 N->getValueType(0), Trunc); 4257 } 4258 } 4259 } 4260 4261 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 4262 if (N1.getOpcode() == ISD::TRUNCATE && 4263 N1.getOperand(0).getOpcode() == ISD::AND) { 4264 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()); 4265 if (NewOp1.getNode()) 4266 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1); 4267 } 4268 4269 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) 4270 // if c1 is equal to the number of bits the trunc removes 4271 if (N0.getOpcode() == ISD::TRUNCATE && 4272 (N0.getOperand(0).getOpcode() == ISD::SRL || 4273 N0.getOperand(0).getOpcode() == ISD::SRA) && 4274 N0.getOperand(0).hasOneUse() && 4275 N0.getOperand(0).getOperand(1).hasOneUse() && 4276 N1C) { 4277 SDValue N0Op0 = N0.getOperand(0); 4278 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) { 4279 unsigned LargeShiftVal = LargeShift->getZExtValue(); 4280 EVT LargeVT = N0Op0.getValueType(); 4281 4282 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) { 4283 SDValue Amt = 4284 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), 4285 getShiftAmountTy(N0Op0.getOperand(0).getValueType())); 4286 SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), LargeVT, 4287 N0Op0.getOperand(0), Amt); 4288 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, SRA); 4289 } 4290 } 4291 } 4292 4293 // Simplify, based on bits shifted out of the LHS. 4294 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4295 return SDValue(N, 0); 4296 4297 4298 // If the sign bit is known to be zero, switch this to a SRL. 4299 if (DAG.SignBitIsZero(N0)) 4300 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1); 4301 4302 if (N1C) { 4303 SDValue NewSRA = visitShiftByConstant(N, N1C); 4304 if (NewSRA.getNode()) 4305 return NewSRA; 4306 } 4307 4308 return SDValue(); 4309 } 4310 4311 SDValue DAGCombiner::visitSRL(SDNode *N) { 4312 SDValue N0 = N->getOperand(0); 4313 SDValue N1 = N->getOperand(1); 4314 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4315 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4316 EVT VT = N0.getValueType(); 4317 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 4318 4319 // fold vector ops 4320 if (VT.isVector()) { 4321 SDValue FoldedVOp = SimplifyVBinOp(N); 4322 if (FoldedVOp.getNode()) return FoldedVOp; 4323 4324 N1C = isConstOrConstSplat(N1); 4325 } 4326 4327 // fold (srl c1, c2) -> c1 >>u c2 4328 if (N0C && N1C) 4329 return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C); 4330 // fold (srl 0, x) -> 0 4331 if (N0C && N0C->isNullValue()) 4332 return N0; 4333 // fold (srl x, c >= size(x)) -> undef 4334 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 4335 return DAG.getUNDEF(VT); 4336 // fold (srl x, 0) -> x 4337 if (N1C && N1C->isNullValue()) 4338 return N0; 4339 // if (srl x, c) is known to be zero, return 0 4340 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 4341 APInt::getAllOnesValue(OpSizeInBits))) 4342 return DAG.getConstant(0, VT); 4343 4344 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 4345 if (N1C && N0.getOpcode() == ISD::SRL) { 4346 if (ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1))) { 4347 uint64_t c1 = N01C->getZExtValue(); 4348 uint64_t c2 = N1C->getZExtValue(); 4349 if (c1 + c2 >= OpSizeInBits) 4350 return DAG.getConstant(0, VT); 4351 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), 4352 DAG.getConstant(c1 + c2, N1.getValueType())); 4353 } 4354 } 4355 4356 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 4357 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 4358 N0.getOperand(0).getOpcode() == ISD::SRL && 4359 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 4360 uint64_t c1 = 4361 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 4362 uint64_t c2 = N1C->getZExtValue(); 4363 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 4364 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 4365 uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits(); 4366 // This is only valid if the OpSizeInBits + c1 = size of inner shift. 4367 if (c1 + OpSizeInBits == InnerShiftSize) { 4368 if (c1 + c2 >= InnerShiftSize) 4369 return DAG.getConstant(0, VT); 4370 return DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, 4371 DAG.getNode(ISD::SRL, SDLoc(N0), InnerShiftVT, 4372 N0.getOperand(0)->getOperand(0), 4373 DAG.getConstant(c1 + c2, ShiftCountVT))); 4374 } 4375 } 4376 4377 // fold (srl (shl x, c), c) -> (and x, cst2) 4378 if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1) { 4379 unsigned BitSize = N0.getScalarValueSizeInBits(); 4380 if (BitSize <= 64) { 4381 uint64_t ShAmt = N1C->getZExtValue() + 64 - BitSize; 4382 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0), 4383 DAG.getConstant(~0ULL >> ShAmt, VT)); 4384 } 4385 } 4386 4387 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) 4388 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 4389 // Shifting in all undef bits? 4390 EVT SmallVT = N0.getOperand(0).getValueType(); 4391 unsigned BitSize = SmallVT.getScalarSizeInBits(); 4392 if (N1C->getZExtValue() >= BitSize) 4393 return DAG.getUNDEF(VT); 4394 4395 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 4396 uint64_t ShiftAmt = N1C->getZExtValue(); 4397 SDValue SmallShift = DAG.getNode(ISD::SRL, SDLoc(N0), SmallVT, 4398 N0.getOperand(0), 4399 DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT))); 4400 AddToWorklist(SmallShift.getNode()); 4401 APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); 4402 return DAG.getNode(ISD::AND, SDLoc(N), VT, 4403 DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, SmallShift), 4404 DAG.getConstant(Mask, VT)); 4405 } 4406 } 4407 4408 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 4409 // bit, which is unmodified by sra. 4410 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) { 4411 if (N0.getOpcode() == ISD::SRA) 4412 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1); 4413 } 4414 4415 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 4416 if (N1C && N0.getOpcode() == ISD::CTLZ && 4417 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) { 4418 APInt KnownZero, KnownOne; 4419 DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne); 4420 4421 // If any of the input bits are KnownOne, then the input couldn't be all 4422 // zeros, thus the result of the srl will always be zero. 4423 if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT); 4424 4425 // If all of the bits input the to ctlz node are known to be zero, then 4426 // the result of the ctlz is "32" and the result of the shift is one. 4427 APInt UnknownBits = ~KnownZero; 4428 if (UnknownBits == 0) return DAG.getConstant(1, VT); 4429 4430 // Otherwise, check to see if there is exactly one bit input to the ctlz. 4431 if ((UnknownBits & (UnknownBits - 1)) == 0) { 4432 // Okay, we know that only that the single bit specified by UnknownBits 4433 // could be set on input to the CTLZ node. If this bit is set, the SRL 4434 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 4435 // to an SRL/XOR pair, which is likely to simplify more. 4436 unsigned ShAmt = UnknownBits.countTrailingZeros(); 4437 SDValue Op = N0.getOperand(0); 4438 4439 if (ShAmt) { 4440 Op = DAG.getNode(ISD::SRL, SDLoc(N0), VT, Op, 4441 DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType()))); 4442 AddToWorklist(Op.getNode()); 4443 } 4444 4445 return DAG.getNode(ISD::XOR, SDLoc(N), VT, 4446 Op, DAG.getConstant(1, VT)); 4447 } 4448 } 4449 4450 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 4451 if (N1.getOpcode() == ISD::TRUNCATE && 4452 N1.getOperand(0).getOpcode() == ISD::AND) { 4453 SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()); 4454 if (NewOp1.getNode()) 4455 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1); 4456 } 4457 4458 // fold operands of srl based on knowledge that the low bits are not 4459 // demanded. 4460 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 4461 return SDValue(N, 0); 4462 4463 if (N1C) { 4464 SDValue NewSRL = visitShiftByConstant(N, N1C); 4465 if (NewSRL.getNode()) 4466 return NewSRL; 4467 } 4468 4469 // Attempt to convert a srl of a load into a narrower zero-extending load. 4470 SDValue NarrowLoad = ReduceLoadWidth(N); 4471 if (NarrowLoad.getNode()) 4472 return NarrowLoad; 4473 4474 // Here is a common situation. We want to optimize: 4475 // 4476 // %a = ... 4477 // %b = and i32 %a, 2 4478 // %c = srl i32 %b, 1 4479 // brcond i32 %c ... 4480 // 4481 // into 4482 // 4483 // %a = ... 4484 // %b = and %a, 2 4485 // %c = setcc eq %b, 0 4486 // brcond %c ... 4487 // 4488 // However when after the source operand of SRL is optimized into AND, the SRL 4489 // itself may not be optimized further. Look for it and add the BRCOND into 4490 // the worklist. 4491 if (N->hasOneUse()) { 4492 SDNode *Use = *N->use_begin(); 4493 if (Use->getOpcode() == ISD::BRCOND) 4494 AddToWorklist(Use); 4495 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 4496 // Also look pass the truncate. 4497 Use = *Use->use_begin(); 4498 if (Use->getOpcode() == ISD::BRCOND) 4499 AddToWorklist(Use); 4500 } 4501 } 4502 4503 return SDValue(); 4504 } 4505 4506 SDValue DAGCombiner::visitCTLZ(SDNode *N) { 4507 SDValue N0 = N->getOperand(0); 4508 EVT VT = N->getValueType(0); 4509 4510 // fold (ctlz c1) -> c2 4511 if (isa<ConstantSDNode>(N0)) 4512 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0); 4513 return SDValue(); 4514 } 4515 4516 SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 4517 SDValue N0 = N->getOperand(0); 4518 EVT VT = N->getValueType(0); 4519 4520 // fold (ctlz_zero_undef c1) -> c2 4521 if (isa<ConstantSDNode>(N0)) 4522 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0); 4523 return SDValue(); 4524 } 4525 4526 SDValue DAGCombiner::visitCTTZ(SDNode *N) { 4527 SDValue N0 = N->getOperand(0); 4528 EVT VT = N->getValueType(0); 4529 4530 // fold (cttz c1) -> c2 4531 if (isa<ConstantSDNode>(N0)) 4532 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0); 4533 return SDValue(); 4534 } 4535 4536 SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 4537 SDValue N0 = N->getOperand(0); 4538 EVT VT = N->getValueType(0); 4539 4540 // fold (cttz_zero_undef c1) -> c2 4541 if (isa<ConstantSDNode>(N0)) 4542 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0); 4543 return SDValue(); 4544 } 4545 4546 SDValue DAGCombiner::visitCTPOP(SDNode *N) { 4547 SDValue N0 = N->getOperand(0); 4548 EVT VT = N->getValueType(0); 4549 4550 // fold (ctpop c1) -> c2 4551 if (isa<ConstantSDNode>(N0)) 4552 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0); 4553 return SDValue(); 4554 } 4555 4556 SDValue DAGCombiner::visitSELECT(SDNode *N) { 4557 SDValue N0 = N->getOperand(0); 4558 SDValue N1 = N->getOperand(1); 4559 SDValue N2 = N->getOperand(2); 4560 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4561 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4562 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4563 EVT VT = N->getValueType(0); 4564 EVT VT0 = N0.getValueType(); 4565 4566 // fold (select C, X, X) -> X 4567 if (N1 == N2) 4568 return N1; 4569 // fold (select true, X, Y) -> X 4570 if (N0C && !N0C->isNullValue()) 4571 return N1; 4572 // fold (select false, X, Y) -> Y 4573 if (N0C && N0C->isNullValue()) 4574 return N2; 4575 // fold (select C, 1, X) -> (or C, X) 4576 if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1) 4577 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2); 4578 // fold (select C, 0, 1) -> (xor C, 1) 4579 // We can't do this reliably if integer based booleans have different contents 4580 // to floating point based booleans. This is because we can't tell whether we 4581 // have an integer-based boolean or a floating-point-based boolean unless we 4582 // can find the SETCC that produced it and inspect its operands. This is 4583 // fairly easy if C is the SETCC node, but it can potentially be 4584 // undiscoverable (or not reasonably discoverable). For example, it could be 4585 // in another basic block or it could require searching a complicated 4586 // expression. 4587 if (VT.isInteger() && 4588 (VT0 == MVT::i1 || (VT0.isInteger() && 4589 TLI.getBooleanContents(false, false) == 4590 TLI.getBooleanContents(false, true) && 4591 TLI.getBooleanContents(false, false) == 4592 TargetLowering::ZeroOrOneBooleanContent)) && 4593 N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { 4594 SDValue XORNode; 4595 if (VT == VT0) 4596 return DAG.getNode(ISD::XOR, SDLoc(N), VT0, 4597 N0, DAG.getConstant(1, VT0)); 4598 XORNode = DAG.getNode(ISD::XOR, SDLoc(N0), VT0, 4599 N0, DAG.getConstant(1, VT0)); 4600 AddToWorklist(XORNode.getNode()); 4601 if (VT.bitsGT(VT0)) 4602 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, XORNode); 4603 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, XORNode); 4604 } 4605 // fold (select C, 0, X) -> (and (not C), X) 4606 if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) { 4607 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 4608 AddToWorklist(NOTNode.getNode()); 4609 return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2); 4610 } 4611 // fold (select C, X, 1) -> (or (not C), X) 4612 if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) { 4613 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 4614 AddToWorklist(NOTNode.getNode()); 4615 return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1); 4616 } 4617 // fold (select C, X, 0) -> (and C, X) 4618 if (VT == MVT::i1 && N2C && N2C->isNullValue()) 4619 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1); 4620 // fold (select X, X, Y) -> (or X, Y) 4621 // fold (select X, 1, Y) -> (or X, Y) 4622 if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1))) 4623 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2); 4624 // fold (select X, Y, X) -> (and X, Y) 4625 // fold (select X, Y, 0) -> (and X, Y) 4626 if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0))) 4627 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1); 4628 4629 // If we can fold this based on the true/false value, do so. 4630 if (SimplifySelectOps(N, N1, N2)) 4631 return SDValue(N, 0); // Don't revisit N. 4632 4633 // fold selects based on a setcc into other things, such as min/max/abs 4634 if (N0.getOpcode() == ISD::SETCC) { 4635 if ((!LegalOperations && 4636 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) || 4637 TLI.isOperationLegal(ISD::SELECT_CC, VT)) 4638 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, 4639 N0.getOperand(0), N0.getOperand(1), 4640 N1, N2, N0.getOperand(2)); 4641 return SimplifySelect(SDLoc(N), N0, N1, N2); 4642 } 4643 4644 return SDValue(); 4645 } 4646 4647 static 4648 std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) { 4649 SDLoc DL(N); 4650 EVT LoVT, HiVT; 4651 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 4652 4653 // Split the inputs. 4654 SDValue Lo, Hi, LL, LH, RL, RH; 4655 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0); 4656 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1); 4657 4658 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2)); 4659 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2)); 4660 4661 return std::make_pair(Lo, Hi); 4662 } 4663 4664 // This function assumes all the vselect's arguments are CONCAT_VECTOR 4665 // nodes and that the condition is a BV of ConstantSDNodes (or undefs). 4666 static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { 4667 SDLoc dl(N); 4668 SDValue Cond = N->getOperand(0); 4669 SDValue LHS = N->getOperand(1); 4670 SDValue RHS = N->getOperand(2); 4671 MVT VT = N->getSimpleValueType(0); 4672 int NumElems = VT.getVectorNumElements(); 4673 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS && 4674 RHS.getOpcode() == ISD::CONCAT_VECTORS && 4675 Cond.getOpcode() == ISD::BUILD_VECTOR); 4676 4677 // We're sure we have an even number of elements due to the 4678 // concat_vectors we have as arguments to vselect. 4679 // Skip BV elements until we find one that's not an UNDEF 4680 // After we find an UNDEF element, keep looping until we get to half the 4681 // length of the BV and see if all the non-undef nodes are the same. 4682 ConstantSDNode *BottomHalf = nullptr; 4683 for (int i = 0; i < NumElems / 2; ++i) { 4684 if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF) 4685 continue; 4686 4687 if (BottomHalf == nullptr) 4688 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 4689 else if (Cond->getOperand(i).getNode() != BottomHalf) 4690 return SDValue(); 4691 } 4692 4693 // Do the same for the second half of the BuildVector 4694 ConstantSDNode *TopHalf = nullptr; 4695 for (int i = NumElems / 2; i < NumElems; ++i) { 4696 if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF) 4697 continue; 4698 4699 if (TopHalf == nullptr) 4700 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 4701 else if (Cond->getOperand(i).getNode() != TopHalf) 4702 return SDValue(); 4703 } 4704 4705 assert(TopHalf && BottomHalf && 4706 "One half of the selector was all UNDEFs and the other was all the " 4707 "same value. This should have been addressed before this function."); 4708 return DAG.getNode( 4709 ISD::CONCAT_VECTORS, dl, VT, 4710 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0), 4711 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1)); 4712 } 4713 4714 SDValue DAGCombiner::visitVSELECT(SDNode *N) { 4715 SDValue N0 = N->getOperand(0); 4716 SDValue N1 = N->getOperand(1); 4717 SDValue N2 = N->getOperand(2); 4718 SDLoc DL(N); 4719 4720 // Canonicalize integer abs. 4721 // vselect (setg[te] X, 0), X, -X -> 4722 // vselect (setgt X, -1), X, -X -> 4723 // vselect (setl[te] X, 0), -X, X -> 4724 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 4725 if (N0.getOpcode() == ISD::SETCC) { 4726 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4727 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4728 bool isAbs = false; 4729 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 4730 4731 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || 4732 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && 4733 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) 4734 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); 4735 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && 4736 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) 4737 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 4738 4739 if (isAbs) { 4740 EVT VT = LHS.getValueType(); 4741 SDValue Shift = DAG.getNode( 4742 ISD::SRA, DL, VT, LHS, 4743 DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, VT)); 4744 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); 4745 AddToWorklist(Shift.getNode()); 4746 AddToWorklist(Add.getNode()); 4747 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); 4748 } 4749 } 4750 4751 // If the VSELECT result requires splitting and the mask is provided by a 4752 // SETCC, then split both nodes and its operands before legalization. This 4753 // prevents the type legalizer from unrolling SETCC into scalar comparisons 4754 // and enables future optimizations (e.g. min/max pattern matching on X86). 4755 if (N0.getOpcode() == ISD::SETCC) { 4756 EVT VT = N->getValueType(0); 4757 4758 // Check if any splitting is required. 4759 if (TLI.getTypeAction(*DAG.getContext(), VT) != 4760 TargetLowering::TypeSplitVector) 4761 return SDValue(); 4762 4763 SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH; 4764 std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG); 4765 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1); 4766 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2); 4767 4768 Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL); 4769 Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH); 4770 4771 // Add the new VSELECT nodes to the work list in case they need to be split 4772 // again. 4773 AddToWorklist(Lo.getNode()); 4774 AddToWorklist(Hi.getNode()); 4775 4776 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 4777 } 4778 4779 // Fold (vselect (build_vector all_ones), N1, N2) -> N1 4780 if (ISD::isBuildVectorAllOnes(N0.getNode())) 4781 return N1; 4782 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2 4783 if (ISD::isBuildVectorAllZeros(N0.getNode())) 4784 return N2; 4785 4786 // The ConvertSelectToConcatVector function is assuming both the above 4787 // checks for (vselect (build_vector all{ones,zeros) ...) have been made 4788 // and addressed. 4789 if (N1.getOpcode() == ISD::CONCAT_VECTORS && 4790 N2.getOpcode() == ISD::CONCAT_VECTORS && 4791 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { 4792 SDValue CV = ConvertSelectToConcatVector(N, DAG); 4793 if (CV.getNode()) 4794 return CV; 4795 } 4796 4797 return SDValue(); 4798 } 4799 4800 SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 4801 SDValue N0 = N->getOperand(0); 4802 SDValue N1 = N->getOperand(1); 4803 SDValue N2 = N->getOperand(2); 4804 SDValue N3 = N->getOperand(3); 4805 SDValue N4 = N->getOperand(4); 4806 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 4807 4808 // fold select_cc lhs, rhs, x, x, cc -> x 4809 if (N2 == N3) 4810 return N2; 4811 4812 // Determine if the condition we're dealing with is constant 4813 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), 4814 N0, N1, CC, SDLoc(N), false); 4815 if (SCC.getNode()) { 4816 AddToWorklist(SCC.getNode()); 4817 4818 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) { 4819 if (!SCCC->isNullValue()) 4820 return N2; // cond always true -> true val 4821 else 4822 return N3; // cond always false -> false val 4823 } 4824 4825 // Fold to a simpler select_cc 4826 if (SCC.getOpcode() == ISD::SETCC) 4827 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(), 4828 SCC.getOperand(0), SCC.getOperand(1), N2, N3, 4829 SCC.getOperand(2)); 4830 } 4831 4832 // If we can fold this based on the true/false value, do so. 4833 if (SimplifySelectOps(N, N2, N3)) 4834 return SDValue(N, 0); // Don't revisit N. 4835 4836 // fold select_cc into other things, such as min/max/abs 4837 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC); 4838 } 4839 4840 SDValue DAGCombiner::visitSETCC(SDNode *N) { 4841 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 4842 cast<CondCodeSDNode>(N->getOperand(2))->get(), 4843 SDLoc(N)); 4844 } 4845 4846 // tryToFoldExtendOfConstant - Try to fold a sext/zext/aext 4847 // dag node into a ConstantSDNode or a build_vector of constants. 4848 // This function is called by the DAGCombiner when visiting sext/zext/aext 4849 // dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND). 4850 // Vector extends are not folded if operations are legal; this is to 4851 // avoid introducing illegal build_vector dag nodes. 4852 static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, 4853 SelectionDAG &DAG, bool LegalTypes, 4854 bool LegalOperations) { 4855 unsigned Opcode = N->getOpcode(); 4856 SDValue N0 = N->getOperand(0); 4857 EVT VT = N->getValueType(0); 4858 4859 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || 4860 Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!"); 4861 4862 // fold (sext c1) -> c1 4863 // fold (zext c1) -> c1 4864 // fold (aext c1) -> c1 4865 if (isa<ConstantSDNode>(N0)) 4866 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode(); 4867 4868 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants) 4869 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants) 4870 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants) 4871 EVT SVT = VT.getScalarType(); 4872 if (!(VT.isVector() && 4873 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) && 4874 ISD::isBuildVectorOfConstantSDNodes(N0.getNode()))) 4875 return nullptr; 4876 4877 // We can fold this node into a build_vector. 4878 unsigned VTBits = SVT.getSizeInBits(); 4879 unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits(); 4880 unsigned ShAmt = VTBits - EVTBits; 4881 SmallVector<SDValue, 8> Elts; 4882 unsigned NumElts = N0->getNumOperands(); 4883 SDLoc DL(N); 4884 4885 for (unsigned i=0; i != NumElts; ++i) { 4886 SDValue Op = N0->getOperand(i); 4887 if (Op->getOpcode() == ISD::UNDEF) { 4888 Elts.push_back(DAG.getUNDEF(SVT)); 4889 continue; 4890 } 4891 4892 ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op); 4893 const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue()); 4894 if (Opcode == ISD::SIGN_EXTEND) 4895 Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(), 4896 SVT)); 4897 else 4898 Elts.push_back(DAG.getConstant(C.shl(ShAmt).lshr(ShAmt).getZExtValue(), 4899 SVT)); 4900 } 4901 4902 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode(); 4903 } 4904 4905 // ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 4906 // "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 4907 // transformation. Returns true if extension are possible and the above 4908 // mentioned transformation is profitable. 4909 static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 4910 unsigned ExtOpc, 4911 SmallVectorImpl<SDNode *> &ExtendNodes, 4912 const TargetLowering &TLI) { 4913 bool HasCopyToRegUses = false; 4914 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 4915 for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 4916 UE = N0.getNode()->use_end(); 4917 UI != UE; ++UI) { 4918 SDNode *User = *UI; 4919 if (User == N) 4920 continue; 4921 if (UI.getUse().getResNo() != N0.getResNo()) 4922 continue; 4923 // FIXME: Only extend SETCC N, N and SETCC N, c for now. 4924 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 4925 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 4926 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 4927 // Sign bits will be lost after a zext. 4928 return false; 4929 bool Add = false; 4930 for (unsigned i = 0; i != 2; ++i) { 4931 SDValue UseOp = User->getOperand(i); 4932 if (UseOp == N0) 4933 continue; 4934 if (!isa<ConstantSDNode>(UseOp)) 4935 return false; 4936 Add = true; 4937 } 4938 if (Add) 4939 ExtendNodes.push_back(User); 4940 continue; 4941 } 4942 // If truncates aren't free and there are users we can't 4943 // extend, it isn't worthwhile. 4944 if (!isTruncFree) 4945 return false; 4946 // Remember if this value is live-out. 4947 if (User->getOpcode() == ISD::CopyToReg) 4948 HasCopyToRegUses = true; 4949 } 4950 4951 if (HasCopyToRegUses) { 4952 bool BothLiveOut = false; 4953 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 4954 UI != UE; ++UI) { 4955 SDUse &Use = UI.getUse(); 4956 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 4957 BothLiveOut = true; 4958 break; 4959 } 4960 } 4961 if (BothLiveOut) 4962 // Both unextended and extended values are live out. There had better be 4963 // a good reason for the transformation. 4964 return ExtendNodes.size(); 4965 } 4966 return true; 4967 } 4968 4969 void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, 4970 SDValue Trunc, SDValue ExtLoad, SDLoc DL, 4971 ISD::NodeType ExtType) { 4972 // Extend SetCC uses if necessary. 4973 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 4974 SDNode *SetCC = SetCCs[i]; 4975 SmallVector<SDValue, 4> Ops; 4976 4977 for (unsigned j = 0; j != 2; ++j) { 4978 SDValue SOp = SetCC->getOperand(j); 4979 if (SOp == Trunc) 4980 Ops.push_back(ExtLoad); 4981 else 4982 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 4983 } 4984 4985 Ops.push_back(SetCC->getOperand(2)); 4986 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); 4987 } 4988 } 4989 4990 SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 4991 SDValue N0 = N->getOperand(0); 4992 EVT VT = N->getValueType(0); 4993 4994 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 4995 LegalOperations)) 4996 return SDValue(Res, 0); 4997 4998 // fold (sext (sext x)) -> (sext x) 4999 // fold (sext (aext x)) -> (sext x) 5000 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 5001 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, 5002 N0.getOperand(0)); 5003 5004 if (N0.getOpcode() == ISD::TRUNCATE) { 5005 // fold (sext (truncate (load x))) -> (sext (smaller load x)) 5006 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 5007 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 5008 if (NarrowLoad.getNode()) { 5009 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 5010 if (NarrowLoad.getNode() != N0.getNode()) { 5011 CombineTo(N0.getNode(), NarrowLoad); 5012 // CombineTo deleted the truncate, if needed, but not what's under it. 5013 AddToWorklist(oye); 5014 } 5015 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5016 } 5017 5018 // See if the value being truncated is already sign extended. If so, just 5019 // eliminate the trunc/sext pair. 5020 SDValue Op = N0.getOperand(0); 5021 unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits(); 5022 unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits(); 5023 unsigned DestBits = VT.getScalarType().getSizeInBits(); 5024 unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 5025 5026 if (OpBits == DestBits) { 5027 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 5028 // bits, it is already ready. 5029 if (NumSignBits > DestBits-MidBits) 5030 return Op; 5031 } else if (OpBits < DestBits) { 5032 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 5033 // bits, just sext from i32. 5034 if (NumSignBits > OpBits-MidBits) 5035 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, Op); 5036 } else { 5037 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 5038 // bits, just truncate to i32. 5039 if (NumSignBits > OpBits-MidBits) 5040 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 5041 } 5042 5043 // fold (sext (truncate x)) -> (sextinreg x). 5044 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 5045 N0.getValueType())) { 5046 if (OpBits < DestBits) 5047 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op); 5048 else if (OpBits > DestBits) 5049 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op); 5050 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, Op, 5051 DAG.getValueType(N0.getValueType())); 5052 } 5053 } 5054 5055 // fold (sext (load x)) -> (sext (truncate (sextload x))) 5056 // None of the supported targets knows how to perform load and sign extend 5057 // on vectors in one instruction. We only perform this transformation on 5058 // scalars. 5059 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 5060 ISD::isUNINDEXEDLoad(N0.getNode()) && 5061 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5062 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) { 5063 bool DoXform = true; 5064 SmallVector<SDNode*, 4> SetCCs; 5065 if (!N0.hasOneUse()) 5066 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 5067 if (DoXform) { 5068 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5069 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 5070 LN0->getChain(), 5071 LN0->getBasePtr(), N0.getValueType(), 5072 LN0->getMemOperand()); 5073 CombineTo(N, ExtLoad); 5074 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 5075 N0.getValueType(), ExtLoad); 5076 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 5077 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 5078 ISD::SIGN_EXTEND); 5079 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5080 } 5081 } 5082 5083 // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 5084 // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 5085 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 5086 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 5087 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5088 EVT MemVT = LN0->getMemoryVT(); 5089 if ((!LegalOperations && !LN0->isVolatile()) || 5090 TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) { 5091 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 5092 LN0->getChain(), 5093 LN0->getBasePtr(), MemVT, 5094 LN0->getMemOperand()); 5095 CombineTo(N, ExtLoad); 5096 CombineTo(N0.getNode(), 5097 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 5098 N0.getValueType(), ExtLoad), 5099 ExtLoad.getValue(1)); 5100 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5101 } 5102 } 5103 5104 // fold (sext (and/or/xor (load x), cst)) -> 5105 // (and/or/xor (sextload x), (sext cst)) 5106 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 5107 N0.getOpcode() == ISD::XOR) && 5108 isa<LoadSDNode>(N0.getOperand(0)) && 5109 N0.getOperand(1).getOpcode() == ISD::Constant && 5110 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()) && 5111 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 5112 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 5113 if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) { 5114 bool DoXform = true; 5115 SmallVector<SDNode*, 4> SetCCs; 5116 if (!N0.hasOneUse()) 5117 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 5118 SetCCs, TLI); 5119 if (DoXform) { 5120 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT, 5121 LN0->getChain(), LN0->getBasePtr(), 5122 LN0->getMemoryVT(), 5123 LN0->getMemOperand()); 5124 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 5125 Mask = Mask.sext(VT.getSizeInBits()); 5126 SDValue And = DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 5127 ExtLoad, DAG.getConstant(Mask, VT)); 5128 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 5129 SDLoc(N0.getOperand(0)), 5130 N0.getOperand(0).getValueType(), ExtLoad); 5131 CombineTo(N, And); 5132 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 5133 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 5134 ISD::SIGN_EXTEND); 5135 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5136 } 5137 } 5138 } 5139 5140 if (N0.getOpcode() == ISD::SETCC) { 5141 EVT N0VT = N0.getOperand(0).getValueType(); 5142 // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 5143 // Only do this before legalize for now. 5144 if (VT.isVector() && !LegalOperations && 5145 TLI.getBooleanContents(N0VT) == 5146 TargetLowering::ZeroOrNegativeOneBooleanContent) { 5147 // On some architectures (such as SSE/NEON/etc) the SETCC result type is 5148 // of the same size as the compared operands. Only optimize sext(setcc()) 5149 // if this is the case. 5150 EVT SVT = getSetCCResultType(N0VT); 5151 5152 // We know that the # elements of the results is the same as the 5153 // # elements of the compare (and the # elements of the compare result 5154 // for that matter). Check to see that they are the same size. If so, 5155 // we know that the element size of the sext'd result matches the 5156 // element size of the compare operands. 5157 if (VT.getSizeInBits() == SVT.getSizeInBits()) 5158 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 5159 N0.getOperand(1), 5160 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5161 5162 // If the desired elements are smaller or larger than the source 5163 // elements we can use a matching integer vector type and then 5164 // truncate/sign extend 5165 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 5166 if (SVT == MatchingVectorType) { 5167 SDValue VsetCC = DAG.getSetCC(SDLoc(N), MatchingVectorType, 5168 N0.getOperand(0), N0.getOperand(1), 5169 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5170 return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT); 5171 } 5172 } 5173 5174 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), -1, 0) 5175 unsigned ElementWidth = VT.getScalarType().getSizeInBits(); 5176 SDValue NegOne = 5177 DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT); 5178 SDValue SCC = 5179 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1), 5180 NegOne, DAG.getConstant(0, VT), 5181 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 5182 if (SCC.getNode()) return SCC; 5183 5184 if (!VT.isVector()) { 5185 EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType()); 5186 if (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, SetCCVT)) { 5187 SDLoc DL(N); 5188 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 5189 SDValue SetCC = DAG.getSetCC(DL, 5190 SetCCVT, 5191 N0.getOperand(0), N0.getOperand(1), CC); 5192 EVT SelectVT = getSetCCResultType(VT); 5193 return DAG.getSelect(DL, VT, 5194 DAG.getSExtOrTrunc(SetCC, DL, SelectVT), 5195 NegOne, DAG.getConstant(0, VT)); 5196 5197 } 5198 } 5199 } 5200 5201 // fold (sext x) -> (zext x) if the sign bit is known zero. 5202 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 5203 DAG.SignBitIsZero(N0)) 5204 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0); 5205 5206 return SDValue(); 5207 } 5208 5209 // isTruncateOf - If N is a truncate of some other value, return true, record 5210 // the value being truncated in Op and which of Op's bits are zero in KnownZero. 5211 // This function computes KnownZero to avoid a duplicated call to 5212 // computeKnownBits in the caller. 5213 static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 5214 APInt &KnownZero) { 5215 APInt KnownOne; 5216 if (N->getOpcode() == ISD::TRUNCATE) { 5217 Op = N->getOperand(0); 5218 DAG.computeKnownBits(Op, KnownZero, KnownOne); 5219 return true; 5220 } 5221 5222 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 5223 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 5224 return false; 5225 5226 SDValue Op0 = N->getOperand(0); 5227 SDValue Op1 = N->getOperand(1); 5228 assert(Op0.getValueType() == Op1.getValueType()); 5229 5230 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0); 5231 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1); 5232 if (COp0 && COp0->isNullValue()) 5233 Op = Op1; 5234 else if (COp1 && COp1->isNullValue()) 5235 Op = Op0; 5236 else 5237 return false; 5238 5239 DAG.computeKnownBits(Op, KnownZero, KnownOne); 5240 5241 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 5242 return false; 5243 5244 return true; 5245 } 5246 5247 SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 5248 SDValue N0 = N->getOperand(0); 5249 EVT VT = N->getValueType(0); 5250 5251 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 5252 LegalOperations)) 5253 return SDValue(Res, 0); 5254 5255 // fold (zext (zext x)) -> (zext x) 5256 // fold (zext (aext x)) -> (zext x) 5257 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 5258 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, 5259 N0.getOperand(0)); 5260 5261 // fold (zext (truncate x)) -> (zext x) or 5262 // (zext (truncate x)) -> (truncate x) 5263 // This is valid when the truncated bits of x are already zero. 5264 // FIXME: We should extend this to work for vectors too. 5265 SDValue Op; 5266 APInt KnownZero; 5267 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 5268 APInt TruncatedBits = 5269 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 5270 APInt(Op.getValueSizeInBits(), 0) : 5271 APInt::getBitsSet(Op.getValueSizeInBits(), 5272 N0.getValueSizeInBits(), 5273 std::min(Op.getValueSizeInBits(), 5274 VT.getSizeInBits())); 5275 if (TruncatedBits == (KnownZero & TruncatedBits)) { 5276 if (VT.bitsGT(Op.getValueType())) 5277 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op); 5278 if (VT.bitsLT(Op.getValueType())) 5279 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 5280 5281 return Op; 5282 } 5283 } 5284 5285 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 5286 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 5287 if (N0.getOpcode() == ISD::TRUNCATE) { 5288 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 5289 if (NarrowLoad.getNode()) { 5290 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 5291 if (NarrowLoad.getNode() != N0.getNode()) { 5292 CombineTo(N0.getNode(), NarrowLoad); 5293 // CombineTo deleted the truncate, if needed, but not what's under it. 5294 AddToWorklist(oye); 5295 } 5296 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5297 } 5298 } 5299 5300 // fold (zext (truncate x)) -> (and x, mask) 5301 if (N0.getOpcode() == ISD::TRUNCATE && 5302 (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) { 5303 5304 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 5305 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 5306 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 5307 if (NarrowLoad.getNode()) { 5308 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 5309 if (NarrowLoad.getNode() != N0.getNode()) { 5310 CombineTo(N0.getNode(), NarrowLoad); 5311 // CombineTo deleted the truncate, if needed, but not what's under it. 5312 AddToWorklist(oye); 5313 } 5314 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5315 } 5316 5317 SDValue Op = N0.getOperand(0); 5318 if (Op.getValueType().bitsLT(VT)) { 5319 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op); 5320 AddToWorklist(Op.getNode()); 5321 } else if (Op.getValueType().bitsGT(VT)) { 5322 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 5323 AddToWorklist(Op.getNode()); 5324 } 5325 return DAG.getZeroExtendInReg(Op, SDLoc(N), 5326 N0.getValueType().getScalarType()); 5327 } 5328 5329 // Fold (zext (and (trunc x), cst)) -> (and x, cst), 5330 // if either of the casts is not free. 5331 if (N0.getOpcode() == ISD::AND && 5332 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 5333 N0.getOperand(1).getOpcode() == ISD::Constant && 5334 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 5335 N0.getValueType()) || 5336 !TLI.isZExtFree(N0.getValueType(), VT))) { 5337 SDValue X = N0.getOperand(0).getOperand(0); 5338 if (X.getValueType().bitsLT(VT)) { 5339 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X); 5340 } else if (X.getValueType().bitsGT(VT)) { 5341 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 5342 } 5343 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 5344 Mask = Mask.zext(VT.getSizeInBits()); 5345 return DAG.getNode(ISD::AND, SDLoc(N), VT, 5346 X, DAG.getConstant(Mask, VT)); 5347 } 5348 5349 // fold (zext (load x)) -> (zext (truncate (zextload x))) 5350 // None of the supported targets knows how to perform load and vector_zext 5351 // on vectors in one instruction. We only perform this transformation on 5352 // scalars. 5353 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 5354 ISD::isUNINDEXEDLoad(N0.getNode()) && 5355 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5356 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) { 5357 bool DoXform = true; 5358 SmallVector<SDNode*, 4> SetCCs; 5359 if (!N0.hasOneUse()) 5360 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 5361 if (DoXform) { 5362 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5363 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 5364 LN0->getChain(), 5365 LN0->getBasePtr(), N0.getValueType(), 5366 LN0->getMemOperand()); 5367 CombineTo(N, ExtLoad); 5368 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 5369 N0.getValueType(), ExtLoad); 5370 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 5371 5372 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 5373 ISD::ZERO_EXTEND); 5374 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5375 } 5376 } 5377 5378 // fold (zext (and/or/xor (load x), cst)) -> 5379 // (and/or/xor (zextload x), (zext cst)) 5380 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 5381 N0.getOpcode() == ISD::XOR) && 5382 isa<LoadSDNode>(N0.getOperand(0)) && 5383 N0.getOperand(1).getOpcode() == ISD::Constant && 5384 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()) && 5385 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 5386 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 5387 if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) { 5388 bool DoXform = true; 5389 SmallVector<SDNode*, 4> SetCCs; 5390 if (!N0.hasOneUse()) 5391 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::ZERO_EXTEND, 5392 SetCCs, TLI); 5393 if (DoXform) { 5394 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT, 5395 LN0->getChain(), LN0->getBasePtr(), 5396 LN0->getMemoryVT(), 5397 LN0->getMemOperand()); 5398 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 5399 Mask = Mask.zext(VT.getSizeInBits()); 5400 SDValue And = DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 5401 ExtLoad, DAG.getConstant(Mask, VT)); 5402 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 5403 SDLoc(N0.getOperand(0)), 5404 N0.getOperand(0).getValueType(), ExtLoad); 5405 CombineTo(N, And); 5406 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 5407 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 5408 ISD::ZERO_EXTEND); 5409 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5410 } 5411 } 5412 } 5413 5414 // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 5415 // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 5416 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 5417 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 5418 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5419 EVT MemVT = LN0->getMemoryVT(); 5420 if ((!LegalOperations && !LN0->isVolatile()) || 5421 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) { 5422 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 5423 LN0->getChain(), 5424 LN0->getBasePtr(), MemVT, 5425 LN0->getMemOperand()); 5426 CombineTo(N, ExtLoad); 5427 CombineTo(N0.getNode(), 5428 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), 5429 ExtLoad), 5430 ExtLoad.getValue(1)); 5431 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5432 } 5433 } 5434 5435 if (N0.getOpcode() == ISD::SETCC) { 5436 if (!LegalOperations && VT.isVector() && 5437 N0.getValueType().getVectorElementType() == MVT::i1) { 5438 EVT N0VT = N0.getOperand(0).getValueType(); 5439 if (getSetCCResultType(N0VT) == N0.getValueType()) 5440 return SDValue(); 5441 5442 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 5443 // Only do this before legalize for now. 5444 EVT EltVT = VT.getVectorElementType(); 5445 SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(), 5446 DAG.getConstant(1, EltVT)); 5447 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 5448 // We know that the # elements of the results is the same as the 5449 // # elements of the compare (and the # elements of the compare result 5450 // for that matter). Check to see that they are the same size. If so, 5451 // we know that the element size of the sext'd result matches the 5452 // element size of the compare operands. 5453 return DAG.getNode(ISD::AND, SDLoc(N), VT, 5454 DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 5455 N0.getOperand(1), 5456 cast<CondCodeSDNode>(N0.getOperand(2))->get()), 5457 DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, 5458 OneOps)); 5459 5460 // If the desired elements are smaller or larger than the source 5461 // elements we can use a matching integer vector type and then 5462 // truncate/sign extend 5463 EVT MatchingElementType = 5464 EVT::getIntegerVT(*DAG.getContext(), 5465 N0VT.getScalarType().getSizeInBits()); 5466 EVT MatchingVectorType = 5467 EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 5468 N0VT.getVectorNumElements()); 5469 SDValue VsetCC = 5470 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0), 5471 N0.getOperand(1), 5472 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5473 return DAG.getNode(ISD::AND, SDLoc(N), VT, 5474 DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT), 5475 DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, OneOps)); 5476 } 5477 5478 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 5479 SDValue SCC = 5480 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1), 5481 DAG.getConstant(1, VT), DAG.getConstant(0, VT), 5482 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 5483 if (SCC.getNode()) return SCC; 5484 } 5485 5486 // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 5487 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 5488 isa<ConstantSDNode>(N0.getOperand(1)) && 5489 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 5490 N0.hasOneUse()) { 5491 SDValue ShAmt = N0.getOperand(1); 5492 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 5493 if (N0.getOpcode() == ISD::SHL) { 5494 SDValue InnerZExt = N0.getOperand(0); 5495 // If the original shl may be shifting out bits, do not perform this 5496 // transformation. 5497 unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() - 5498 InnerZExt.getOperand(0).getValueType().getSizeInBits(); 5499 if (ShAmtVal > KnownZeroBits) 5500 return SDValue(); 5501 } 5502 5503 SDLoc DL(N); 5504 5505 // Ensure that the shift amount is wide enough for the shifted value. 5506 if (VT.getSizeInBits() >= 256) 5507 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 5508 5509 return DAG.getNode(N0.getOpcode(), DL, VT, 5510 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 5511 ShAmt); 5512 } 5513 5514 return SDValue(); 5515 } 5516 5517 SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 5518 SDValue N0 = N->getOperand(0); 5519 EVT VT = N->getValueType(0); 5520 5521 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 5522 LegalOperations)) 5523 return SDValue(Res, 0); 5524 5525 // fold (aext (aext x)) -> (aext x) 5526 // fold (aext (zext x)) -> (zext x) 5527 // fold (aext (sext x)) -> (sext x) 5528 if (N0.getOpcode() == ISD::ANY_EXTEND || 5529 N0.getOpcode() == ISD::ZERO_EXTEND || 5530 N0.getOpcode() == ISD::SIGN_EXTEND) 5531 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 5532 5533 // fold (aext (truncate (load x))) -> (aext (smaller load x)) 5534 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 5535 if (N0.getOpcode() == ISD::TRUNCATE) { 5536 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 5537 if (NarrowLoad.getNode()) { 5538 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 5539 if (NarrowLoad.getNode() != N0.getNode()) { 5540 CombineTo(N0.getNode(), NarrowLoad); 5541 // CombineTo deleted the truncate, if needed, but not what's under it. 5542 AddToWorklist(oye); 5543 } 5544 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5545 } 5546 } 5547 5548 // fold (aext (truncate x)) 5549 if (N0.getOpcode() == ISD::TRUNCATE) { 5550 SDValue TruncOp = N0.getOperand(0); 5551 if (TruncOp.getValueType() == VT) 5552 return TruncOp; // x iff x size == zext size. 5553 if (TruncOp.getValueType().bitsGT(VT)) 5554 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp); 5555 return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp); 5556 } 5557 5558 // Fold (aext (and (trunc x), cst)) -> (and x, cst) 5559 // if the trunc is not free. 5560 if (N0.getOpcode() == ISD::AND && 5561 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 5562 N0.getOperand(1).getOpcode() == ISD::Constant && 5563 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 5564 N0.getValueType())) { 5565 SDValue X = N0.getOperand(0).getOperand(0); 5566 if (X.getValueType().bitsLT(VT)) { 5567 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, X); 5568 } else if (X.getValueType().bitsGT(VT)) { 5569 X = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, X); 5570 } 5571 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 5572 Mask = Mask.zext(VT.getSizeInBits()); 5573 return DAG.getNode(ISD::AND, SDLoc(N), VT, 5574 X, DAG.getConstant(Mask, VT)); 5575 } 5576 5577 // fold (aext (load x)) -> (aext (truncate (extload x))) 5578 // None of the supported targets knows how to perform load and any_ext 5579 // on vectors in one instruction. We only perform this transformation on 5580 // scalars. 5581 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 5582 ISD::isUNINDEXEDLoad(N0.getNode()) && 5583 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) { 5584 bool DoXform = true; 5585 SmallVector<SDNode*, 4> SetCCs; 5586 if (!N0.hasOneUse()) 5587 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 5588 if (DoXform) { 5589 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5590 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 5591 LN0->getChain(), 5592 LN0->getBasePtr(), N0.getValueType(), 5593 LN0->getMemOperand()); 5594 CombineTo(N, ExtLoad); 5595 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 5596 N0.getValueType(), ExtLoad); 5597 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 5598 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 5599 ISD::ANY_EXTEND); 5600 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5601 } 5602 } 5603 5604 // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 5605 // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 5606 // fold (aext ( extload x)) -> (aext (truncate (extload x))) 5607 if (N0.getOpcode() == ISD::LOAD && 5608 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 5609 N0.hasOneUse()) { 5610 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5611 ISD::LoadExtType ExtType = LN0->getExtensionType(); 5612 EVT MemVT = LN0->getMemoryVT(); 5613 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, MemVT)) { 5614 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N), 5615 VT, LN0->getChain(), LN0->getBasePtr(), 5616 MemVT, LN0->getMemOperand()); 5617 CombineTo(N, ExtLoad); 5618 CombineTo(N0.getNode(), 5619 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 5620 N0.getValueType(), ExtLoad), 5621 ExtLoad.getValue(1)); 5622 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5623 } 5624 } 5625 5626 if (N0.getOpcode() == ISD::SETCC) { 5627 // For vectors: 5628 // aext(setcc) -> vsetcc 5629 // aext(setcc) -> truncate(vsetcc) 5630 // aext(setcc) -> aext(vsetcc) 5631 // Only do this before legalize for now. 5632 if (VT.isVector() && !LegalOperations) { 5633 EVT N0VT = N0.getOperand(0).getValueType(); 5634 // We know that the # elements of the results is the same as the 5635 // # elements of the compare (and the # elements of the compare result 5636 // for that matter). Check to see that they are the same size. If so, 5637 // we know that the element size of the sext'd result matches the 5638 // element size of the compare operands. 5639 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 5640 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 5641 N0.getOperand(1), 5642 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5643 // If the desired elements are smaller or larger than the source 5644 // elements we can use a matching integer vector type and then 5645 // truncate/any extend 5646 else { 5647 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 5648 SDValue VsetCC = 5649 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0), 5650 N0.getOperand(1), 5651 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5652 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT); 5653 } 5654 } 5655 5656 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 5657 SDValue SCC = 5658 SimplifySelectCC(SDLoc(N), N0.getOperand(0), N0.getOperand(1), 5659 DAG.getConstant(1, VT), DAG.getConstant(0, VT), 5660 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 5661 if (SCC.getNode()) 5662 return SCC; 5663 } 5664 5665 return SDValue(); 5666 } 5667 5668 /// GetDemandedBits - See if the specified operand can be simplified with the 5669 /// knowledge that only the bits specified by Mask are used. If so, return the 5670 /// simpler operand, otherwise return a null SDValue. 5671 SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 5672 switch (V.getOpcode()) { 5673 default: break; 5674 case ISD::Constant: { 5675 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 5676 assert(CV && "Const value should be ConstSDNode."); 5677 const APInt &CVal = CV->getAPIntValue(); 5678 APInt NewVal = CVal & Mask; 5679 if (NewVal != CVal) 5680 return DAG.getConstant(NewVal, V.getValueType()); 5681 break; 5682 } 5683 case ISD::OR: 5684 case ISD::XOR: 5685 // If the LHS or RHS don't contribute bits to the or, drop them. 5686 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 5687 return V.getOperand(1); 5688 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 5689 return V.getOperand(0); 5690 break; 5691 case ISD::SRL: 5692 // Only look at single-use SRLs. 5693 if (!V.getNode()->hasOneUse()) 5694 break; 5695 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 5696 // See if we can recursively simplify the LHS. 5697 unsigned Amt = RHSC->getZExtValue(); 5698 5699 // Watch out for shift count overflow though. 5700 if (Amt >= Mask.getBitWidth()) break; 5701 APInt NewMask = Mask << Amt; 5702 SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask); 5703 if (SimplifyLHS.getNode()) 5704 return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(), 5705 SimplifyLHS, V.getOperand(1)); 5706 } 5707 } 5708 return SDValue(); 5709 } 5710 5711 /// ReduceLoadWidth - If the result of a wider load is shifted to right of N 5712 /// bits and then truncated to a narrower type and where N is a multiple 5713 /// of number of bits of the narrower type, transform it to a narrower load 5714 /// from address + N / num of bits of new type. If the result is to be 5715 /// extended, also fold the extension to form a extending load. 5716 SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 5717 unsigned Opc = N->getOpcode(); 5718 5719 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 5720 SDValue N0 = N->getOperand(0); 5721 EVT VT = N->getValueType(0); 5722 EVT ExtVT = VT; 5723 5724 // This transformation isn't valid for vector loads. 5725 if (VT.isVector()) 5726 return SDValue(); 5727 5728 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 5729 // extended to VT. 5730 if (Opc == ISD::SIGN_EXTEND_INREG) { 5731 ExtType = ISD::SEXTLOAD; 5732 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 5733 } else if (Opc == ISD::SRL) { 5734 // Another special-case: SRL is basically zero-extending a narrower value. 5735 ExtType = ISD::ZEXTLOAD; 5736 N0 = SDValue(N, 0); 5737 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5738 if (!N01) return SDValue(); 5739 ExtVT = EVT::getIntegerVT(*DAG.getContext(), 5740 VT.getSizeInBits() - N01->getZExtValue()); 5741 } 5742 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, ExtVT)) 5743 return SDValue(); 5744 5745 unsigned EVTBits = ExtVT.getSizeInBits(); 5746 5747 // Do not generate loads of non-round integer types since these can 5748 // be expensive (and would be wrong if the type is not byte sized). 5749 if (!ExtVT.isRound()) 5750 return SDValue(); 5751 5752 unsigned ShAmt = 0; 5753 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 5754 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5755 ShAmt = N01->getZExtValue(); 5756 // Is the shift amount a multiple of size of VT? 5757 if ((ShAmt & (EVTBits-1)) == 0) { 5758 N0 = N0.getOperand(0); 5759 // Is the load width a multiple of size of VT? 5760 if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0) 5761 return SDValue(); 5762 } 5763 5764 // At this point, we must have a load or else we can't do the transform. 5765 if (!isa<LoadSDNode>(N0)) return SDValue(); 5766 5767 // Because a SRL must be assumed to *need* to zero-extend the high bits 5768 // (as opposed to anyext the high bits), we can't combine the zextload 5769 // lowering of SRL and an sextload. 5770 if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD) 5771 return SDValue(); 5772 5773 // If the shift amount is larger than the input type then we're not 5774 // accessing any of the loaded bytes. If the load was a zextload/extload 5775 // then the result of the shift+trunc is zero/undef (handled elsewhere). 5776 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 5777 return SDValue(); 5778 } 5779 } 5780 5781 // If the load is shifted left (and the result isn't shifted back right), 5782 // we can fold the truncate through the shift. 5783 unsigned ShLeftAmt = 0; 5784 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 5785 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 5786 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5787 ShLeftAmt = N01->getZExtValue(); 5788 N0 = N0.getOperand(0); 5789 } 5790 } 5791 5792 // If we haven't found a load, we can't narrow it. Don't transform one with 5793 // multiple uses, this would require adding a new load. 5794 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse()) 5795 return SDValue(); 5796 5797 // Don't change the width of a volatile load. 5798 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5799 if (LN0->isVolatile()) 5800 return SDValue(); 5801 5802 // Verify that we are actually reducing a load width here. 5803 if (LN0->getMemoryVT().getSizeInBits() < EVTBits) 5804 return SDValue(); 5805 5806 // For the transform to be legal, the load must produce only two values 5807 // (the value loaded and the chain). Don't transform a pre-increment 5808 // load, for example, which produces an extra value. Otherwise the 5809 // transformation is not equivalent, and the downstream logic to replace 5810 // uses gets things wrong. 5811 if (LN0->getNumValues() > 2) 5812 return SDValue(); 5813 5814 // If the load that we're shrinking is an extload and we're not just 5815 // discarding the extension we can't simply shrink the load. Bail. 5816 // TODO: It would be possible to merge the extensions in some cases. 5817 if (LN0->getExtensionType() != ISD::NON_EXTLOAD && 5818 LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt) 5819 return SDValue(); 5820 5821 EVT PtrType = N0.getOperand(1).getValueType(); 5822 5823 if (PtrType == MVT::Untyped || PtrType.isExtended()) 5824 // It's not possible to generate a constant of extended or untyped type. 5825 return SDValue(); 5826 5827 // For big endian targets, we need to adjust the offset to the pointer to 5828 // load the correct bytes. 5829 if (TLI.isBigEndian()) { 5830 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 5831 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 5832 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 5833 } 5834 5835 uint64_t PtrOff = ShAmt / 8; 5836 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 5837 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LN0), 5838 PtrType, LN0->getBasePtr(), 5839 DAG.getConstant(PtrOff, PtrType)); 5840 AddToWorklist(NewPtr.getNode()); 5841 5842 SDValue Load; 5843 if (ExtType == ISD::NON_EXTLOAD) 5844 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr, 5845 LN0->getPointerInfo().getWithOffset(PtrOff), 5846 LN0->isVolatile(), LN0->isNonTemporal(), 5847 LN0->isInvariant(), NewAlign, LN0->getAAInfo()); 5848 else 5849 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(),NewPtr, 5850 LN0->getPointerInfo().getWithOffset(PtrOff), 5851 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 5852 LN0->isInvariant(), NewAlign, LN0->getAAInfo()); 5853 5854 // Replace the old load's chain with the new load's chain. 5855 WorklistRemover DeadNodes(*this); 5856 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 5857 5858 // Shift the result left, if we've swallowed a left shift. 5859 SDValue Result = Load; 5860 if (ShLeftAmt != 0) { 5861 EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 5862 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 5863 ShImmTy = VT; 5864 // If the shift amount is as large as the result size (but, presumably, 5865 // no larger than the source) then the useful bits of the result are 5866 // zero; we can't simply return the shortened shift, because the result 5867 // of that operation is undefined. 5868 if (ShLeftAmt >= VT.getSizeInBits()) 5869 Result = DAG.getConstant(0, VT); 5870 else 5871 Result = DAG.getNode(ISD::SHL, SDLoc(N0), VT, 5872 Result, DAG.getConstant(ShLeftAmt, ShImmTy)); 5873 } 5874 5875 // Return the new loaded value. 5876 return Result; 5877 } 5878 5879 SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 5880 SDValue N0 = N->getOperand(0); 5881 SDValue N1 = N->getOperand(1); 5882 EVT VT = N->getValueType(0); 5883 EVT EVT = cast<VTSDNode>(N1)->getVT(); 5884 unsigned VTBits = VT.getScalarType().getSizeInBits(); 5885 unsigned EVTBits = EVT.getScalarType().getSizeInBits(); 5886 5887 // fold (sext_in_reg c1) -> c1 5888 if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF) 5889 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1); 5890 5891 // If the input is already sign extended, just drop the extension. 5892 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 5893 return N0; 5894 5895 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 5896 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 5897 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) 5898 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 5899 N0.getOperand(0), N1); 5900 5901 // fold (sext_in_reg (sext x)) -> (sext x) 5902 // fold (sext_in_reg (aext x)) -> (sext x) 5903 // if x is small enough. 5904 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 5905 SDValue N00 = N0.getOperand(0); 5906 if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits && 5907 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 5908 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 5909 } 5910 5911 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 5912 if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) 5913 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT); 5914 5915 // fold operands of sext_in_reg based on knowledge that the top bits are not 5916 // demanded. 5917 if (SimplifyDemandedBits(SDValue(N, 0))) 5918 return SDValue(N, 0); 5919 5920 // fold (sext_in_reg (load x)) -> (smaller sextload x) 5921 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 5922 SDValue NarrowLoad = ReduceLoadWidth(N); 5923 if (NarrowLoad.getNode()) 5924 return NarrowLoad; 5925 5926 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 5927 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 5928 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 5929 if (N0.getOpcode() == ISD::SRL) { 5930 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 5931 if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 5932 // We can turn this into an SRA iff the input to the SRL is already sign 5933 // extended enough. 5934 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 5935 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 5936 return DAG.getNode(ISD::SRA, SDLoc(N), VT, 5937 N0.getOperand(0), N0.getOperand(1)); 5938 } 5939 } 5940 5941 // fold (sext_inreg (extload x)) -> (sextload x) 5942 if (ISD::isEXTLoad(N0.getNode()) && 5943 ISD::isUNINDEXEDLoad(N0.getNode()) && 5944 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5945 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5946 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5947 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5948 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 5949 LN0->getChain(), 5950 LN0->getBasePtr(), EVT, 5951 LN0->getMemOperand()); 5952 CombineTo(N, ExtLoad); 5953 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5954 AddToWorklist(ExtLoad.getNode()); 5955 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5956 } 5957 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 5958 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 5959 N0.hasOneUse() && 5960 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5961 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5962 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5963 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5964 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 5965 LN0->getChain(), 5966 LN0->getBasePtr(), EVT, 5967 LN0->getMemOperand()); 5968 CombineTo(N, ExtLoad); 5969 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5970 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5971 } 5972 5973 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 5974 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 5975 SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 5976 N0.getOperand(1), false); 5977 if (BSwap.getNode()) 5978 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 5979 BSwap, N1); 5980 } 5981 5982 // Fold a sext_inreg of a build_vector of ConstantSDNodes or undefs 5983 // into a build_vector. 5984 if (ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { 5985 SmallVector<SDValue, 8> Elts; 5986 unsigned NumElts = N0->getNumOperands(); 5987 unsigned ShAmt = VTBits - EVTBits; 5988 5989 for (unsigned i = 0; i != NumElts; ++i) { 5990 SDValue Op = N0->getOperand(i); 5991 if (Op->getOpcode() == ISD::UNDEF) { 5992 Elts.push_back(Op); 5993 continue; 5994 } 5995 5996 ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op); 5997 const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue()); 5998 Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(), 5999 Op.getValueType())); 6000 } 6001 6002 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Elts); 6003 } 6004 6005 return SDValue(); 6006 } 6007 6008 SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 6009 SDValue N0 = N->getOperand(0); 6010 EVT VT = N->getValueType(0); 6011 bool isLE = TLI.isLittleEndian(); 6012 6013 // noop truncate 6014 if (N0.getValueType() == N->getValueType(0)) 6015 return N0; 6016 // fold (truncate c1) -> c1 6017 if (isa<ConstantSDNode>(N0)) 6018 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0); 6019 // fold (truncate (truncate x)) -> (truncate x) 6020 if (N0.getOpcode() == ISD::TRUNCATE) 6021 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 6022 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 6023 if (N0.getOpcode() == ISD::ZERO_EXTEND || 6024 N0.getOpcode() == ISD::SIGN_EXTEND || 6025 N0.getOpcode() == ISD::ANY_EXTEND) { 6026 if (N0.getOperand(0).getValueType().bitsLT(VT)) 6027 // if the source is smaller than the dest, we still need an extend 6028 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 6029 N0.getOperand(0)); 6030 if (N0.getOperand(0).getValueType().bitsGT(VT)) 6031 // if the source is larger than the dest, than we just need the truncate 6032 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 6033 // if the source and dest are the same type, we can drop both the extend 6034 // and the truncate. 6035 return N0.getOperand(0); 6036 } 6037 6038 // Fold extract-and-trunc into a narrow extract. For example: 6039 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 6040 // i32 y = TRUNCATE(i64 x) 6041 // -- becomes -- 6042 // v16i8 b = BITCAST (v2i64 val) 6043 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 6044 // 6045 // Note: We only run this optimization after type legalization (which often 6046 // creates this pattern) and before operation legalization after which 6047 // we need to be more careful about the vector instructions that we generate. 6048 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6049 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) { 6050 6051 EVT VecTy = N0.getOperand(0).getValueType(); 6052 EVT ExTy = N0.getValueType(); 6053 EVT TrTy = N->getValueType(0); 6054 6055 unsigned NumElem = VecTy.getVectorNumElements(); 6056 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 6057 6058 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 6059 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 6060 6061 SDValue EltNo = N0->getOperand(1); 6062 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 6063 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 6064 EVT IndexTy = TLI.getVectorIdxTy(); 6065 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 6066 6067 SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N), 6068 NVT, N0.getOperand(0)); 6069 6070 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, 6071 SDLoc(N), TrTy, V, 6072 DAG.getConstant(Index, IndexTy)); 6073 } 6074 } 6075 6076 // trunc (select c, a, b) -> select c, (trunc a), (trunc b) 6077 if (N0.getOpcode() == ISD::SELECT) { 6078 EVT SrcVT = N0.getValueType(); 6079 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) && 6080 TLI.isTruncateFree(SrcVT, VT)) { 6081 SDLoc SL(N0); 6082 SDValue Cond = N0.getOperand(0); 6083 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1)); 6084 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2)); 6085 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1); 6086 } 6087 } 6088 6089 // Fold a series of buildvector, bitcast, and truncate if possible. 6090 // For example fold 6091 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to 6092 // (2xi32 (buildvector x, y)). 6093 if (Level == AfterLegalizeVectorOps && VT.isVector() && 6094 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 6095 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 6096 N0.getOperand(0).hasOneUse()) { 6097 6098 SDValue BuildVect = N0.getOperand(0); 6099 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType(); 6100 EVT TruncVecEltTy = VT.getVectorElementType(); 6101 6102 // Check that the element types match. 6103 if (BuildVectEltTy == TruncVecEltTy) { 6104 // Now we only need to compute the offset of the truncated elements. 6105 unsigned BuildVecNumElts = BuildVect.getNumOperands(); 6106 unsigned TruncVecNumElts = VT.getVectorNumElements(); 6107 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts; 6108 6109 assert((BuildVecNumElts % TruncVecNumElts) == 0 && 6110 "Invalid number of elements"); 6111 6112 SmallVector<SDValue, 8> Opnds; 6113 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset) 6114 Opnds.push_back(BuildVect.getOperand(i)); 6115 6116 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds); 6117 } 6118 } 6119 6120 // See if we can simplify the input to this truncate through knowledge that 6121 // only the low bits are being used. 6122 // For example "trunc (or (shl x, 8), y)" // -> trunc y 6123 // Currently we only perform this optimization on scalars because vectors 6124 // may have different active low bits. 6125 if (!VT.isVector()) { 6126 SDValue Shorter = 6127 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 6128 VT.getSizeInBits())); 6129 if (Shorter.getNode()) 6130 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter); 6131 } 6132 // fold (truncate (load x)) -> (smaller load x) 6133 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 6134 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 6135 SDValue Reduced = ReduceLoadWidth(N); 6136 if (Reduced.getNode()) 6137 return Reduced; 6138 // Handle the case where the load remains an extending load even 6139 // after truncation. 6140 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) { 6141 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6142 if (!LN0->isVolatile() && 6143 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) { 6144 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0), 6145 VT, LN0->getChain(), LN0->getBasePtr(), 6146 LN0->getMemoryVT(), 6147 LN0->getMemOperand()); 6148 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1)); 6149 return NewLoad; 6150 } 6151 } 6152 } 6153 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), 6154 // where ... are all 'undef'. 6155 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { 6156 SmallVector<EVT, 8> VTs; 6157 SDValue V; 6158 unsigned Idx = 0; 6159 unsigned NumDefs = 0; 6160 6161 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 6162 SDValue X = N0.getOperand(i); 6163 if (X.getOpcode() != ISD::UNDEF) { 6164 V = X; 6165 Idx = i; 6166 NumDefs++; 6167 } 6168 // Stop if more than one members are non-undef. 6169 if (NumDefs > 1) 6170 break; 6171 VTs.push_back(EVT::getVectorVT(*DAG.getContext(), 6172 VT.getVectorElementType(), 6173 X.getValueType().getVectorNumElements())); 6174 } 6175 6176 if (NumDefs == 0) 6177 return DAG.getUNDEF(VT); 6178 6179 if (NumDefs == 1) { 6180 assert(V.getNode() && "The single defined operand is empty!"); 6181 SmallVector<SDValue, 8> Opnds; 6182 for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 6183 if (i != Idx) { 6184 Opnds.push_back(DAG.getUNDEF(VTs[i])); 6185 continue; 6186 } 6187 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V); 6188 AddToWorklist(NV.getNode()); 6189 Opnds.push_back(NV); 6190 } 6191 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds); 6192 } 6193 } 6194 6195 // Simplify the operands using demanded-bits information. 6196 if (!VT.isVector() && 6197 SimplifyDemandedBits(SDValue(N, 0))) 6198 return SDValue(N, 0); 6199 6200 return SDValue(); 6201 } 6202 6203 static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 6204 SDValue Elt = N->getOperand(i); 6205 if (Elt.getOpcode() != ISD::MERGE_VALUES) 6206 return Elt.getNode(); 6207 return Elt.getOperand(Elt.getResNo()).getNode(); 6208 } 6209 6210 /// CombineConsecutiveLoads - build_pair (load, load) -> load 6211 /// if load locations are consecutive. 6212 SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 6213 assert(N->getOpcode() == ISD::BUILD_PAIR); 6214 6215 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 6216 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 6217 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 6218 LD1->getAddressSpace() != LD2->getAddressSpace()) 6219 return SDValue(); 6220 EVT LD1VT = LD1->getValueType(0); 6221 6222 if (ISD::isNON_EXTLoad(LD2) && 6223 LD2->hasOneUse() && 6224 // If both are volatile this would reduce the number of volatile loads. 6225 // If one is volatile it might be ok, but play conservative and bail out. 6226 !LD1->isVolatile() && 6227 !LD2->isVolatile() && 6228 DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { 6229 unsigned Align = LD1->getAlignment(); 6230 unsigned NewAlign = TLI.getDataLayout()-> 6231 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 6232 6233 if (NewAlign <= Align && 6234 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 6235 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), 6236 LD1->getBasePtr(), LD1->getPointerInfo(), 6237 false, false, false, Align); 6238 } 6239 6240 return SDValue(); 6241 } 6242 6243 SDValue DAGCombiner::visitBITCAST(SDNode *N) { 6244 SDValue N0 = N->getOperand(0); 6245 EVT VT = N->getValueType(0); 6246 6247 // If the input is a BUILD_VECTOR with all constant elements, fold this now. 6248 // Only do this before legalize, since afterward the target may be depending 6249 // on the bitconvert. 6250 // First check to see if this is all constant. 6251 if (!LegalTypes && 6252 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 6253 VT.isVector()) { 6254 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant(); 6255 6256 EVT DestEltVT = N->getValueType(0).getVectorElementType(); 6257 assert(!DestEltVT.isVector() && 6258 "Element type of vector ValueType must not be vector!"); 6259 if (isSimple) 6260 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 6261 } 6262 6263 // If the input is a constant, let getNode fold it. 6264 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 6265 SDValue Res = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0); 6266 if (Res.getNode() != N) { 6267 if (!LegalOperations || 6268 TLI.isOperationLegal(Res.getNode()->getOpcode(), VT)) 6269 return Res; 6270 6271 // Folding it resulted in an illegal node, and it's too late to 6272 // do that. Clean up the old node and forego the transformation. 6273 // Ideally this won't happen very often, because instcombine 6274 // and the earlier dagcombine runs (where illegal nodes are 6275 // permitted) should have folded most of them already. 6276 deleteAndRecombine(Res.getNode()); 6277 } 6278 } 6279 6280 // (conv (conv x, t1), t2) -> (conv x, t2) 6281 if (N0.getOpcode() == ISD::BITCAST) 6282 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, 6283 N0.getOperand(0)); 6284 6285 // fold (conv (load x)) -> (load (conv*)x) 6286 // If the resultant load doesn't need a higher alignment than the original! 6287 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 6288 // Do not change the width of a volatile load. 6289 !cast<LoadSDNode>(N0)->isVolatile() && 6290 // Do not remove the cast if the types differ in endian layout. 6291 TLI.hasBigEndianPartOrdering(N0.getValueType()) == 6292 TLI.hasBigEndianPartOrdering(VT) && 6293 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && 6294 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { 6295 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6296 unsigned Align = TLI.getDataLayout()-> 6297 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 6298 unsigned OrigAlign = LN0->getAlignment(); 6299 6300 if (Align <= OrigAlign) { 6301 SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), 6302 LN0->getBasePtr(), LN0->getPointerInfo(), 6303 LN0->isVolatile(), LN0->isNonTemporal(), 6304 LN0->isInvariant(), OrigAlign, 6305 LN0->getAAInfo()); 6306 AddToWorklist(N); 6307 CombineTo(N0.getNode(), 6308 DAG.getNode(ISD::BITCAST, SDLoc(N0), 6309 N0.getValueType(), Load), 6310 Load.getValue(1)); 6311 return Load; 6312 } 6313 } 6314 6315 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 6316 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 6317 // This often reduces constant pool loads. 6318 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) || 6319 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) && 6320 N0.getNode()->hasOneUse() && VT.isInteger() && 6321 !VT.isVector() && !N0.getValueType().isVector()) { 6322 SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT, 6323 N0.getOperand(0)); 6324 AddToWorklist(NewConv.getNode()); 6325 6326 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 6327 if (N0.getOpcode() == ISD::FNEG) 6328 return DAG.getNode(ISD::XOR, SDLoc(N), VT, 6329 NewConv, DAG.getConstant(SignBit, VT)); 6330 assert(N0.getOpcode() == ISD::FABS); 6331 return DAG.getNode(ISD::AND, SDLoc(N), VT, 6332 NewConv, DAG.getConstant(~SignBit, VT)); 6333 } 6334 6335 // fold (bitconvert (fcopysign cst, x)) -> 6336 // (or (and (bitconvert x), sign), (and cst, (not sign))) 6337 // Note that we don't handle (copysign x, cst) because this can always be 6338 // folded to an fneg or fabs. 6339 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 6340 isa<ConstantFPSDNode>(N0.getOperand(0)) && 6341 VT.isInteger() && !VT.isVector()) { 6342 unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); 6343 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 6344 if (isTypeLegal(IntXVT)) { 6345 SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0), 6346 IntXVT, N0.getOperand(1)); 6347 AddToWorklist(X.getNode()); 6348 6349 // If X has a different width than the result/lhs, sext it or truncate it. 6350 unsigned VTWidth = VT.getSizeInBits(); 6351 if (OrigXWidth < VTWidth) { 6352 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X); 6353 AddToWorklist(X.getNode()); 6354 } else if (OrigXWidth > VTWidth) { 6355 // To get the sign bit in the right place, we have to shift it right 6356 // before truncating. 6357 X = DAG.getNode(ISD::SRL, SDLoc(X), 6358 X.getValueType(), X, 6359 DAG.getConstant(OrigXWidth-VTWidth, X.getValueType())); 6360 AddToWorklist(X.getNode()); 6361 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 6362 AddToWorklist(X.getNode()); 6363 } 6364 6365 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 6366 X = DAG.getNode(ISD::AND, SDLoc(X), VT, 6367 X, DAG.getConstant(SignBit, VT)); 6368 AddToWorklist(X.getNode()); 6369 6370 SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0), 6371 VT, N0.getOperand(0)); 6372 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT, 6373 Cst, DAG.getConstant(~SignBit, VT)); 6374 AddToWorklist(Cst.getNode()); 6375 6376 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst); 6377 } 6378 } 6379 6380 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 6381 if (N0.getOpcode() == ISD::BUILD_PAIR) { 6382 SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT); 6383 if (CombineLD.getNode()) 6384 return CombineLD; 6385 } 6386 6387 return SDValue(); 6388 } 6389 6390 SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 6391 EVT VT = N->getValueType(0); 6392 return CombineConsecutiveLoads(N, VT); 6393 } 6394 6395 /// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector 6396 /// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the 6397 /// destination element value type. 6398 SDValue DAGCombiner:: 6399 ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 6400 EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 6401 6402 // If this is already the right type, we're done. 6403 if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 6404 6405 unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 6406 unsigned DstBitSize = DstEltVT.getSizeInBits(); 6407 6408 // If this is a conversion of N elements of one type to N elements of another 6409 // type, convert each element. This handles FP<->INT cases. 6410 if (SrcBitSize == DstBitSize) { 6411 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 6412 BV->getValueType(0).getVectorNumElements()); 6413 6414 // Due to the FP element handling below calling this routine recursively, 6415 // we can end up with a scalar-to-vector node here. 6416 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 6417 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, 6418 DAG.getNode(ISD::BITCAST, SDLoc(BV), 6419 DstEltVT, BV->getOperand(0))); 6420 6421 SmallVector<SDValue, 8> Ops; 6422 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 6423 SDValue Op = BV->getOperand(i); 6424 // If the vector element type is not legal, the BUILD_VECTOR operands 6425 // are promoted and implicitly truncated. Make that explicit here. 6426 if (Op.getValueType() != SrcEltVT) 6427 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op); 6428 Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV), 6429 DstEltVT, Op)); 6430 AddToWorklist(Ops.back().getNode()); 6431 } 6432 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops); 6433 } 6434 6435 // Otherwise, we're growing or shrinking the elements. To avoid having to 6436 // handle annoying details of growing/shrinking FP values, we convert them to 6437 // int first. 6438 if (SrcEltVT.isFloatingPoint()) { 6439 // Convert the input float vector to a int vector where the elements are the 6440 // same sizes. 6441 assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!"); 6442 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 6443 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 6444 SrcEltVT = IntVT; 6445 } 6446 6447 // Now we know the input is an integer vector. If the output is a FP type, 6448 // convert to integer first, then to FP of the right size. 6449 if (DstEltVT.isFloatingPoint()) { 6450 assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!"); 6451 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 6452 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 6453 6454 // Next, convert to FP elements of the same size. 6455 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 6456 } 6457 6458 // Okay, we know the src/dst types are both integers of differing types. 6459 // Handling growing first. 6460 assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 6461 if (SrcBitSize < DstBitSize) { 6462 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 6463 6464 SmallVector<SDValue, 8> Ops; 6465 for (unsigned i = 0, e = BV->getNumOperands(); i != e; 6466 i += NumInputsPerOutput) { 6467 bool isLE = TLI.isLittleEndian(); 6468 APInt NewBits = APInt(DstBitSize, 0); 6469 bool EltIsUndef = true; 6470 for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 6471 // Shift the previously computed bits over. 6472 NewBits <<= SrcBitSize; 6473 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 6474 if (Op.getOpcode() == ISD::UNDEF) continue; 6475 EltIsUndef = false; 6476 6477 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 6478 zextOrTrunc(SrcBitSize).zext(DstBitSize); 6479 } 6480 6481 if (EltIsUndef) 6482 Ops.push_back(DAG.getUNDEF(DstEltVT)); 6483 else 6484 Ops.push_back(DAG.getConstant(NewBits, DstEltVT)); 6485 } 6486 6487 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 6488 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops); 6489 } 6490 6491 // Finally, this must be the case where we are shrinking elements: each input 6492 // turns into multiple outputs. 6493 bool isS2V = ISD::isScalarToVector(BV); 6494 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 6495 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 6496 NumOutputsPerInput*BV->getNumOperands()); 6497 SmallVector<SDValue, 8> Ops; 6498 6499 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 6500 if (BV->getOperand(i).getOpcode() == ISD::UNDEF) { 6501 for (unsigned j = 0; j != NumOutputsPerInput; ++j) 6502 Ops.push_back(DAG.getUNDEF(DstEltVT)); 6503 continue; 6504 } 6505 6506 APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))-> 6507 getAPIntValue().zextOrTrunc(SrcBitSize); 6508 6509 for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 6510 APInt ThisVal = OpVal.trunc(DstBitSize); 6511 Ops.push_back(DAG.getConstant(ThisVal, DstEltVT)); 6512 if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal) 6513 // Simply turn this into a SCALAR_TO_VECTOR of the new type. 6514 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, 6515 Ops[0]); 6516 OpVal = OpVal.lshr(DstBitSize); 6517 } 6518 6519 // For big endian targets, swap the order of the pieces of each element. 6520 if (TLI.isBigEndian()) 6521 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 6522 } 6523 6524 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops); 6525 } 6526 6527 SDValue DAGCombiner::visitFADD(SDNode *N) { 6528 SDValue N0 = N->getOperand(0); 6529 SDValue N1 = N->getOperand(1); 6530 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6531 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6532 EVT VT = N->getValueType(0); 6533 6534 // fold vector ops 6535 if (VT.isVector()) { 6536 SDValue FoldedVOp = SimplifyVBinOp(N); 6537 if (FoldedVOp.getNode()) return FoldedVOp; 6538 } 6539 6540 // fold (fadd c1, c2) -> c1 + c2 6541 if (N0CFP && N1CFP) 6542 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N1); 6543 // canonicalize constant to RHS 6544 if (N0CFP && !N1CFP) 6545 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N0); 6546 // fold (fadd A, 0) -> A 6547 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6548 N1CFP->getValueAPF().isZero()) 6549 return N0; 6550 // fold (fadd A, (fneg B)) -> (fsub A, B) 6551 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 6552 isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 6553 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0, 6554 GetNegatedExpression(N1, DAG, LegalOperations)); 6555 // fold (fadd (fneg A), B) -> (fsub B, A) 6556 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 6557 isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 6558 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N1, 6559 GetNegatedExpression(N0, DAG, LegalOperations)); 6560 6561 // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 6562 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6563 N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 6564 isa<ConstantFPSDNode>(N0.getOperand(1))) 6565 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0.getOperand(0), 6566 DAG.getNode(ISD::FADD, SDLoc(N), VT, 6567 N0.getOperand(1), N1)); 6568 6569 // No FP constant should be created after legalization as Instruction 6570 // Selection pass has hard time in dealing with FP constant. 6571 // 6572 // We don't need test this condition for transformation like following, as 6573 // the DAG being transformed implies it is legal to take FP constant as 6574 // operand. 6575 // 6576 // (fadd (fmul c, x), x) -> (fmul c+1, x) 6577 // 6578 bool AllowNewFpConst = (Level < AfterLegalizeDAG); 6579 6580 // If allow, fold (fadd (fneg x), x) -> 0.0 6581 if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && 6582 N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) 6583 return DAG.getConstantFP(0.0, VT); 6584 6585 // If allow, fold (fadd x, (fneg x)) -> 0.0 6586 if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && 6587 N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) 6588 return DAG.getConstantFP(0.0, VT); 6589 6590 // In unsafe math mode, we can fold chains of FADD's of the same value 6591 // into multiplications. This transform is not safe in general because 6592 // we are reducing the number of rounding steps. 6593 if (DAG.getTarget().Options.UnsafeFPMath && 6594 TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && 6595 !N0CFP && !N1CFP) { 6596 if (N0.getOpcode() == ISD::FMUL) { 6597 ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); 6598 ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 6599 6600 // (fadd (fmul c, x), x) -> (fmul x, c+1) 6601 if (CFP00 && !CFP01 && N0.getOperand(1) == N1) { 6602 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6603 SDValue(CFP00, 0), 6604 DAG.getConstantFP(1.0, VT)); 6605 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6606 N1, NewCFP); 6607 } 6608 6609 // (fadd (fmul x, c), x) -> (fmul x, c+1) 6610 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 6611 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6612 SDValue(CFP01, 0), 6613 DAG.getConstantFP(1.0, VT)); 6614 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6615 N1, NewCFP); 6616 } 6617 6618 // (fadd (fmul c, x), (fadd x, x)) -> (fmul x, c+2) 6619 if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD && 6620 N1.getOperand(0) == N1.getOperand(1) && 6621 N0.getOperand(1) == N1.getOperand(0)) { 6622 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6623 SDValue(CFP00, 0), 6624 DAG.getConstantFP(2.0, VT)); 6625 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6626 N0.getOperand(1), NewCFP); 6627 } 6628 6629 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2) 6630 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 6631 N1.getOperand(0) == N1.getOperand(1) && 6632 N0.getOperand(0) == N1.getOperand(0)) { 6633 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6634 SDValue(CFP01, 0), 6635 DAG.getConstantFP(2.0, VT)); 6636 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6637 N0.getOperand(0), NewCFP); 6638 } 6639 } 6640 6641 if (N1.getOpcode() == ISD::FMUL) { 6642 ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); 6643 ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1)); 6644 6645 // (fadd x, (fmul c, x)) -> (fmul x, c+1) 6646 if (CFP10 && !CFP11 && N1.getOperand(1) == N0) { 6647 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6648 SDValue(CFP10, 0), 6649 DAG.getConstantFP(1.0, VT)); 6650 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6651 N0, NewCFP); 6652 } 6653 6654 // (fadd x, (fmul x, c)) -> (fmul x, c+1) 6655 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 6656 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6657 SDValue(CFP11, 0), 6658 DAG.getConstantFP(1.0, VT)); 6659 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6660 N0, NewCFP); 6661 } 6662 6663 6664 // (fadd (fadd x, x), (fmul c, x)) -> (fmul x, c+2) 6665 if (CFP10 && !CFP11 && N0.getOpcode() == ISD::FADD && 6666 N0.getOperand(0) == N0.getOperand(1) && 6667 N1.getOperand(1) == N0.getOperand(0)) { 6668 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6669 SDValue(CFP10, 0), 6670 DAG.getConstantFP(2.0, VT)); 6671 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6672 N1.getOperand(1), NewCFP); 6673 } 6674 6675 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2) 6676 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD && 6677 N0.getOperand(0) == N0.getOperand(1) && 6678 N1.getOperand(0) == N0.getOperand(0)) { 6679 SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT, 6680 SDValue(CFP11, 0), 6681 DAG.getConstantFP(2.0, VT)); 6682 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6683 N1.getOperand(0), NewCFP); 6684 } 6685 } 6686 6687 if (N0.getOpcode() == ISD::FADD && AllowNewFpConst) { 6688 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); 6689 // (fadd (fadd x, x), x) -> (fmul x, 3.0) 6690 if (!CFP && N0.getOperand(0) == N0.getOperand(1) && 6691 (N0.getOperand(0) == N1)) 6692 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6693 N1, DAG.getConstantFP(3.0, VT)); 6694 } 6695 6696 if (N1.getOpcode() == ISD::FADD && AllowNewFpConst) { 6697 ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); 6698 // (fadd x, (fadd x, x)) -> (fmul x, 3.0) 6699 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && 6700 N1.getOperand(0) == N0) 6701 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6702 N0, DAG.getConstantFP(3.0, VT)); 6703 } 6704 6705 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0) 6706 if (AllowNewFpConst && 6707 N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 6708 N0.getOperand(0) == N0.getOperand(1) && 6709 N1.getOperand(0) == N1.getOperand(1) && 6710 N0.getOperand(0) == N1.getOperand(0)) 6711 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6712 N0.getOperand(0), 6713 DAG.getConstantFP(4.0, VT)); 6714 } 6715 6716 // FADD -> FMA combines: 6717 if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 6718 DAG.getTarget().Options.UnsafeFPMath) && 6719 DAG.getTarget() 6720 .getSubtargetImpl() 6721 ->getTargetLowering() 6722 ->isFMAFasterThanFMulAndFAdd(VT) && 6723 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) { 6724 6725 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 6726 if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) 6727 return DAG.getNode(ISD::FMA, SDLoc(N), VT, 6728 N0.getOperand(0), N0.getOperand(1), N1); 6729 6730 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 6731 // Note: Commutes FADD operands. 6732 if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) 6733 return DAG.getNode(ISD::FMA, SDLoc(N), VT, 6734 N1.getOperand(0), N1.getOperand(1), N0); 6735 } 6736 6737 return SDValue(); 6738 } 6739 6740 SDValue DAGCombiner::visitFSUB(SDNode *N) { 6741 SDValue N0 = N->getOperand(0); 6742 SDValue N1 = N->getOperand(1); 6743 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6744 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6745 EVT VT = N->getValueType(0); 6746 SDLoc dl(N); 6747 6748 // fold vector ops 6749 if (VT.isVector()) { 6750 SDValue FoldedVOp = SimplifyVBinOp(N); 6751 if (FoldedVOp.getNode()) return FoldedVOp; 6752 } 6753 6754 // fold (fsub c1, c2) -> c1-c2 6755 if (N0CFP && N1CFP) 6756 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0, N1); 6757 // fold (fsub A, 0) -> A 6758 if (DAG.getTarget().Options.UnsafeFPMath && 6759 N1CFP && N1CFP->getValueAPF().isZero()) 6760 return N0; 6761 // fold (fsub 0, B) -> -B 6762 if (DAG.getTarget().Options.UnsafeFPMath && 6763 N0CFP && N0CFP->getValueAPF().isZero()) { 6764 if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 6765 return GetNegatedExpression(N1, DAG, LegalOperations); 6766 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6767 return DAG.getNode(ISD::FNEG, dl, VT, N1); 6768 } 6769 // fold (fsub A, (fneg B)) -> (fadd A, B) 6770 if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 6771 return DAG.getNode(ISD::FADD, dl, VT, N0, 6772 GetNegatedExpression(N1, DAG, LegalOperations)); 6773 6774 // If 'unsafe math' is enabled, fold 6775 // (fsub x, x) -> 0.0 & 6776 // (fsub x, (fadd x, y)) -> (fneg y) & 6777 // (fsub x, (fadd y, x)) -> (fneg y) 6778 if (DAG.getTarget().Options.UnsafeFPMath) { 6779 if (N0 == N1) 6780 return DAG.getConstantFP(0.0f, VT); 6781 6782 if (N1.getOpcode() == ISD::FADD) { 6783 SDValue N10 = N1->getOperand(0); 6784 SDValue N11 = N1->getOperand(1); 6785 6786 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, 6787 &DAG.getTarget().Options)) 6788 return GetNegatedExpression(N11, DAG, LegalOperations); 6789 6790 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, 6791 &DAG.getTarget().Options)) 6792 return GetNegatedExpression(N10, DAG, LegalOperations); 6793 } 6794 } 6795 6796 // FSUB -> FMA combines: 6797 if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 6798 DAG.getTarget().Options.UnsafeFPMath) && 6799 DAG.getTarget() 6800 .getSubtargetImpl() 6801 ->getTargetLowering() 6802 ->isFMAFasterThanFMulAndFAdd(VT) && 6803 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) { 6804 6805 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 6806 if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) 6807 return DAG.getNode(ISD::FMA, dl, VT, 6808 N0.getOperand(0), N0.getOperand(1), 6809 DAG.getNode(ISD::FNEG, dl, VT, N1)); 6810 6811 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 6812 // Note: Commutes FSUB operands. 6813 if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) 6814 return DAG.getNode(ISD::FMA, dl, VT, 6815 DAG.getNode(ISD::FNEG, dl, VT, 6816 N1.getOperand(0)), 6817 N1.getOperand(1), N0); 6818 6819 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 6820 if (N0.getOpcode() == ISD::FNEG && 6821 N0.getOperand(0).getOpcode() == ISD::FMUL && 6822 N0->hasOneUse() && N0.getOperand(0).hasOneUse()) { 6823 SDValue N00 = N0.getOperand(0).getOperand(0); 6824 SDValue N01 = N0.getOperand(0).getOperand(1); 6825 return DAG.getNode(ISD::FMA, dl, VT, 6826 DAG.getNode(ISD::FNEG, dl, VT, N00), N01, 6827 DAG.getNode(ISD::FNEG, dl, VT, N1)); 6828 } 6829 } 6830 6831 return SDValue(); 6832 } 6833 6834 SDValue DAGCombiner::visitFMUL(SDNode *N) { 6835 SDValue N0 = N->getOperand(0); 6836 SDValue N1 = N->getOperand(1); 6837 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6838 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6839 EVT VT = N->getValueType(0); 6840 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6841 6842 // fold vector ops 6843 if (VT.isVector()) { 6844 SDValue FoldedVOp = SimplifyVBinOp(N); 6845 if (FoldedVOp.getNode()) return FoldedVOp; 6846 } 6847 6848 // fold (fmul c1, c2) -> c1*c2 6849 if (N0CFP && N1CFP) 6850 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0, N1); 6851 // canonicalize constant to RHS 6852 if (N0CFP && !N1CFP) 6853 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N1, N0); 6854 // fold (fmul A, 0) -> 0 6855 if (DAG.getTarget().Options.UnsafeFPMath && 6856 N1CFP && N1CFP->getValueAPF().isZero()) 6857 return N1; 6858 // fold (fmul A, 0) -> 0, vector edition. 6859 if (DAG.getTarget().Options.UnsafeFPMath && 6860 ISD::isBuildVectorAllZeros(N1.getNode())) 6861 return N1; 6862 // fold (fmul A, 1.0) -> A 6863 if (N1CFP && N1CFP->isExactlyValue(1.0)) 6864 return N0; 6865 // fold (fmul X, 2.0) -> (fadd X, X) 6866 if (N1CFP && N1CFP->isExactlyValue(+2.0)) 6867 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N0); 6868 // fold (fmul X, -1.0) -> (fneg X) 6869 if (N1CFP && N1CFP->isExactlyValue(-1.0)) 6870 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6871 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0); 6872 6873 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 6874 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 6875 &DAG.getTarget().Options)) { 6876 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 6877 &DAG.getTarget().Options)) { 6878 // Both can be negated for free, check to see if at least one is cheaper 6879 // negated. 6880 if (LHSNeg == 2 || RHSNeg == 2) 6881 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6882 GetNegatedExpression(N0, DAG, LegalOperations), 6883 GetNegatedExpression(N1, DAG, LegalOperations)); 6884 } 6885 } 6886 6887 // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 6888 if (DAG.getTarget().Options.UnsafeFPMath && 6889 N1CFP && N0.getOpcode() == ISD::FMUL && 6890 N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1))) 6891 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), 6892 DAG.getNode(ISD::FMUL, SDLoc(N), VT, 6893 N0.getOperand(1), N1)); 6894 6895 return SDValue(); 6896 } 6897 6898 SDValue DAGCombiner::visitFMA(SDNode *N) { 6899 SDValue N0 = N->getOperand(0); 6900 SDValue N1 = N->getOperand(1); 6901 SDValue N2 = N->getOperand(2); 6902 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6903 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6904 EVT VT = N->getValueType(0); 6905 SDLoc dl(N); 6906 6907 6908 // Constant fold FMA. 6909 if (isa<ConstantFPSDNode>(N0) && 6910 isa<ConstantFPSDNode>(N1) && 6911 isa<ConstantFPSDNode>(N2)) { 6912 return DAG.getNode(ISD::FMA, dl, VT, N0, N1, N2); 6913 } 6914 6915 if (DAG.getTarget().Options.UnsafeFPMath) { 6916 if (N0CFP && N0CFP->isZero()) 6917 return N2; 6918 if (N1CFP && N1CFP->isZero()) 6919 return N2; 6920 } 6921 if (N0CFP && N0CFP->isExactlyValue(1.0)) 6922 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2); 6923 if (N1CFP && N1CFP->isExactlyValue(1.0)) 6924 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2); 6925 6926 // Canonicalize (fma c, x, y) -> (fma x, c, y) 6927 if (N0CFP && !N1CFP) 6928 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2); 6929 6930 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) 6931 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6932 N2.getOpcode() == ISD::FMUL && 6933 N0 == N2.getOperand(0) && 6934 N2.getOperand(1).getOpcode() == ISD::ConstantFP) { 6935 return DAG.getNode(ISD::FMUL, dl, VT, N0, 6936 DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1))); 6937 } 6938 6939 6940 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) 6941 if (DAG.getTarget().Options.UnsafeFPMath && 6942 N0.getOpcode() == ISD::FMUL && N1CFP && 6943 N0.getOperand(1).getOpcode() == ISD::ConstantFP) { 6944 return DAG.getNode(ISD::FMA, dl, VT, 6945 N0.getOperand(0), 6946 DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)), 6947 N2); 6948 } 6949 6950 // (fma x, 1, y) -> (fadd x, y) 6951 // (fma x, -1, y) -> (fadd (fneg x), y) 6952 if (N1CFP) { 6953 if (N1CFP->isExactlyValue(1.0)) 6954 return DAG.getNode(ISD::FADD, dl, VT, N0, N2); 6955 6956 if (N1CFP->isExactlyValue(-1.0) && 6957 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { 6958 SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0); 6959 AddToWorklist(RHSNeg.getNode()); 6960 return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg); 6961 } 6962 } 6963 6964 // (fma x, c, x) -> (fmul x, (c+1)) 6965 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2) 6966 return DAG.getNode(ISD::FMUL, dl, VT, N0, 6967 DAG.getNode(ISD::FADD, dl, VT, 6968 N1, DAG.getConstantFP(1.0, VT))); 6969 6970 // (fma x, c, (fneg x)) -> (fmul x, (c-1)) 6971 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6972 N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) 6973 return DAG.getNode(ISD::FMUL, dl, VT, N0, 6974 DAG.getNode(ISD::FADD, dl, VT, 6975 N1, DAG.getConstantFP(-1.0, VT))); 6976 6977 6978 return SDValue(); 6979 } 6980 6981 SDValue DAGCombiner::visitFDIV(SDNode *N) { 6982 SDValue N0 = N->getOperand(0); 6983 SDValue N1 = N->getOperand(1); 6984 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6985 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6986 EVT VT = N->getValueType(0); 6987 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6988 6989 // fold vector ops 6990 if (VT.isVector()) { 6991 SDValue FoldedVOp = SimplifyVBinOp(N); 6992 if (FoldedVOp.getNode()) return FoldedVOp; 6993 } 6994 6995 // fold (fdiv c1, c2) -> c1/c2 6996 if (N0CFP && N1CFP) 6997 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1); 6998 6999 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 7000 if (N1CFP && DAG.getTarget().Options.UnsafeFPMath) { 7001 // Compute the reciprocal 1.0 / c2. 7002 APFloat N1APF = N1CFP->getValueAPF(); 7003 APFloat Recip(N1APF.getSemantics(), 1); // 1.0 7004 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 7005 // Only do the transform if the reciprocal is a legal fp immediate that 7006 // isn't too nasty (eg NaN, denormal, ...). 7007 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 7008 (!LegalOperations || 7009 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 7010 // backend)... we should handle this gracefully after Legalize. 7011 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 7012 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 7013 TLI.isFPImmLegal(Recip, VT))) 7014 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0, 7015 DAG.getConstantFP(Recip, VT)); 7016 } 7017 7018 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 7019 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 7020 &DAG.getTarget().Options)) { 7021 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 7022 &DAG.getTarget().Options)) { 7023 // Both can be negated for free, check to see if at least one is cheaper 7024 // negated. 7025 if (LHSNeg == 2 || RHSNeg == 2) 7026 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, 7027 GetNegatedExpression(N0, DAG, LegalOperations), 7028 GetNegatedExpression(N1, DAG, LegalOperations)); 7029 } 7030 } 7031 7032 return SDValue(); 7033 } 7034 7035 SDValue DAGCombiner::visitFREM(SDNode *N) { 7036 SDValue N0 = N->getOperand(0); 7037 SDValue N1 = N->getOperand(1); 7038 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7039 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 7040 EVT VT = N->getValueType(0); 7041 7042 // fold (frem c1, c2) -> fmod(c1,c2) 7043 if (N0CFP && N1CFP) 7044 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1); 7045 7046 return SDValue(); 7047 } 7048 7049 SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 7050 SDValue N0 = N->getOperand(0); 7051 SDValue N1 = N->getOperand(1); 7052 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7053 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 7054 EVT VT = N->getValueType(0); 7055 7056 if (N0CFP && N1CFP) // Constant fold 7057 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1); 7058 7059 if (N1CFP) { 7060 const APFloat& V = N1CFP->getValueAPF(); 7061 // copysign(x, c1) -> fabs(x) iff ispos(c1) 7062 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 7063 if (!V.isNegative()) { 7064 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 7065 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 7066 } else { 7067 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 7068 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, 7069 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0)); 7070 } 7071 } 7072 7073 // copysign(fabs(x), y) -> copysign(x, y) 7074 // copysign(fneg(x), y) -> copysign(x, y) 7075 // copysign(copysign(x,z), y) -> copysign(x, y) 7076 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 7077 N0.getOpcode() == ISD::FCOPYSIGN) 7078 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 7079 N0.getOperand(0), N1); 7080 7081 // copysign(x, abs(y)) -> abs(x) 7082 if (N1.getOpcode() == ISD::FABS) 7083 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 7084 7085 // copysign(x, copysign(y,z)) -> copysign(x, z) 7086 if (N1.getOpcode() == ISD::FCOPYSIGN) 7087 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 7088 N0, N1.getOperand(1)); 7089 7090 // copysign(x, fp_extend(y)) -> copysign(x, y) 7091 // copysign(x, fp_round(y)) -> copysign(x, y) 7092 if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND) 7093 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 7094 N0, N1.getOperand(0)); 7095 7096 return SDValue(); 7097 } 7098 7099 SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 7100 SDValue N0 = N->getOperand(0); 7101 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 7102 EVT VT = N->getValueType(0); 7103 EVT OpVT = N0.getValueType(); 7104 7105 // fold (sint_to_fp c1) -> c1fp 7106 if (N0C && 7107 // ...but only if the target supports immediate floating-point values 7108 (!LegalOperations || 7109 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 7110 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 7111 7112 // If the input is a legal type, and SINT_TO_FP is not legal on this target, 7113 // but UINT_TO_FP is legal on this target, try to convert. 7114 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 7115 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 7116 // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 7117 if (DAG.SignBitIsZero(N0)) 7118 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 7119 } 7120 7121 // The next optimizations are desirable only if SELECT_CC can be lowered. 7122 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 7123 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 7124 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 7125 !VT.isVector() && 7126 (!LegalOperations || 7127 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 7128 SDValue Ops[] = 7129 { N0.getOperand(0), N0.getOperand(1), 7130 DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT), 7131 N0.getOperand(2) }; 7132 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops); 7133 } 7134 7135 // fold (sint_to_fp (zext (setcc x, y, cc))) -> 7136 // (select_cc x, y, 1.0, 0.0,, cc) 7137 if (N0.getOpcode() == ISD::ZERO_EXTEND && 7138 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 7139 (!LegalOperations || 7140 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 7141 SDValue Ops[] = 7142 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 7143 DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT), 7144 N0.getOperand(0).getOperand(2) }; 7145 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops); 7146 } 7147 } 7148 7149 return SDValue(); 7150 } 7151 7152 SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 7153 SDValue N0 = N->getOperand(0); 7154 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 7155 EVT VT = N->getValueType(0); 7156 EVT OpVT = N0.getValueType(); 7157 7158 // fold (uint_to_fp c1) -> c1fp 7159 if (N0C && 7160 // ...but only if the target supports immediate floating-point values 7161 (!LegalOperations || 7162 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 7163 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 7164 7165 // If the input is a legal type, and UINT_TO_FP is not legal on this target, 7166 // but SINT_TO_FP is legal on this target, try to convert. 7167 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 7168 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 7169 // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 7170 if (DAG.SignBitIsZero(N0)) 7171 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 7172 } 7173 7174 // The next optimizations are desirable only if SELECT_CC can be lowered. 7175 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 7176 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 7177 7178 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 7179 (!LegalOperations || 7180 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 7181 SDValue Ops[] = 7182 { N0.getOperand(0), N0.getOperand(1), 7183 DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT), 7184 N0.getOperand(2) }; 7185 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops); 7186 } 7187 } 7188 7189 return SDValue(); 7190 } 7191 7192 SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 7193 SDValue N0 = N->getOperand(0); 7194 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7195 EVT VT = N->getValueType(0); 7196 7197 // fold (fp_to_sint c1fp) -> c1 7198 if (N0CFP) 7199 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0); 7200 7201 return SDValue(); 7202 } 7203 7204 SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 7205 SDValue N0 = N->getOperand(0); 7206 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7207 EVT VT = N->getValueType(0); 7208 7209 // fold (fp_to_uint c1fp) -> c1 7210 if (N0CFP) 7211 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0); 7212 7213 return SDValue(); 7214 } 7215 7216 SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 7217 SDValue N0 = N->getOperand(0); 7218 SDValue N1 = N->getOperand(1); 7219 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7220 EVT VT = N->getValueType(0); 7221 7222 // fold (fp_round c1fp) -> c1fp 7223 if (N0CFP) 7224 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1); 7225 7226 // fold (fp_round (fp_extend x)) -> x 7227 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 7228 return N0.getOperand(0); 7229 7230 // fold (fp_round (fp_round x)) -> (fp_round x) 7231 if (N0.getOpcode() == ISD::FP_ROUND) { 7232 // This is a value preserving truncation if both round's are. 7233 bool IsTrunc = N->getConstantOperandVal(1) == 1 && 7234 N0.getNode()->getConstantOperandVal(1) == 1; 7235 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0.getOperand(0), 7236 DAG.getIntPtrConstant(IsTrunc)); 7237 } 7238 7239 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 7240 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 7241 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT, 7242 N0.getOperand(0), N1); 7243 AddToWorklist(Tmp.getNode()); 7244 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 7245 Tmp, N0.getOperand(1)); 7246 } 7247 7248 return SDValue(); 7249 } 7250 7251 SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 7252 SDValue N0 = N->getOperand(0); 7253 EVT VT = N->getValueType(0); 7254 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 7255 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7256 7257 // fold (fp_round_inreg c1fp) -> c1fp 7258 if (N0CFP && isTypeLegal(EVT)) { 7259 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT); 7260 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Round); 7261 } 7262 7263 return SDValue(); 7264 } 7265 7266 SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 7267 SDValue N0 = N->getOperand(0); 7268 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7269 EVT VT = N->getValueType(0); 7270 7271 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 7272 if (N->hasOneUse() && 7273 N->use_begin()->getOpcode() == ISD::FP_ROUND) 7274 return SDValue(); 7275 7276 // fold (fp_extend c1fp) -> c1fp 7277 if (N0CFP) 7278 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0); 7279 7280 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 7281 // value of X. 7282 if (N0.getOpcode() == ISD::FP_ROUND 7283 && N0.getNode()->getConstantOperandVal(1) == 1) { 7284 SDValue In = N0.getOperand(0); 7285 if (In.getValueType() == VT) return In; 7286 if (VT.bitsLT(In.getValueType())) 7287 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, 7288 In, N0.getOperand(1)); 7289 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In); 7290 } 7291 7292 // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 7293 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 7294 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) { 7295 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7296 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 7297 LN0->getChain(), 7298 LN0->getBasePtr(), N0.getValueType(), 7299 LN0->getMemOperand()); 7300 CombineTo(N, ExtLoad); 7301 CombineTo(N0.getNode(), 7302 DAG.getNode(ISD::FP_ROUND, SDLoc(N0), 7303 N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)), 7304 ExtLoad.getValue(1)); 7305 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7306 } 7307 7308 return SDValue(); 7309 } 7310 7311 SDValue DAGCombiner::visitFNEG(SDNode *N) { 7312 SDValue N0 = N->getOperand(0); 7313 EVT VT = N->getValueType(0); 7314 7315 // Constant fold FNEG. 7316 if (isa<ConstantFPSDNode>(N0)) 7317 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N->getOperand(0)); 7318 7319 if (VT.isVector()) { 7320 SDValue FoldedVOp = SimplifyVUnaryOp(N); 7321 if (FoldedVOp.getNode()) return FoldedVOp; 7322 } 7323 7324 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 7325 &DAG.getTarget().Options)) 7326 return GetNegatedExpression(N0, DAG, LegalOperations); 7327 7328 // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading 7329 // constant pool values. 7330 // TODO: We can also optimize for vectors here, but we need to make sure 7331 // that the sign mask is created properly for each vector element. 7332 if (!TLI.isFNegFree(VT) && N0.getOpcode() == ISD::BITCAST && 7333 !VT.isVector() && 7334 N0.getNode()->hasOneUse() && 7335 N0.getOperand(0).getValueType().isInteger()) { 7336 SDValue Int = N0.getOperand(0); 7337 EVT IntVT = Int.getValueType(); 7338 if (IntVT.isInteger() && !IntVT.isVector()) { 7339 Int = DAG.getNode(ISD::XOR, SDLoc(N0), IntVT, Int, 7340 DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); 7341 AddToWorklist(Int.getNode()); 7342 return DAG.getNode(ISD::BITCAST, SDLoc(N), 7343 VT, Int); 7344 } 7345 } 7346 7347 // (fneg (fmul c, x)) -> (fmul -c, x) 7348 if (N0.getOpcode() == ISD::FMUL) { 7349 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 7350 if (CFP1) { 7351 APFloat CVal = CFP1->getValueAPF(); 7352 CVal.changeSign(); 7353 if (Level >= AfterLegalizeDAG && 7354 (TLI.isFPImmLegal(CVal, N->getValueType(0)) || 7355 TLI.isOperationLegal(ISD::ConstantFP, N->getValueType(0)))) 7356 return DAG.getNode( 7357 ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), 7358 DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1))); 7359 } 7360 } 7361 7362 return SDValue(); 7363 } 7364 7365 SDValue DAGCombiner::visitFCEIL(SDNode *N) { 7366 SDValue N0 = N->getOperand(0); 7367 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7368 EVT VT = N->getValueType(0); 7369 7370 // fold (fceil c1) -> fceil(c1) 7371 if (N0CFP) 7372 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0); 7373 7374 return SDValue(); 7375 } 7376 7377 SDValue DAGCombiner::visitFTRUNC(SDNode *N) { 7378 SDValue N0 = N->getOperand(0); 7379 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7380 EVT VT = N->getValueType(0); 7381 7382 // fold (ftrunc c1) -> ftrunc(c1) 7383 if (N0CFP) 7384 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0); 7385 7386 return SDValue(); 7387 } 7388 7389 SDValue DAGCombiner::visitFFLOOR(SDNode *N) { 7390 SDValue N0 = N->getOperand(0); 7391 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7392 EVT VT = N->getValueType(0); 7393 7394 // fold (ffloor c1) -> ffloor(c1) 7395 if (N0CFP) 7396 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0); 7397 7398 return SDValue(); 7399 } 7400 7401 SDValue DAGCombiner::visitFABS(SDNode *N) { 7402 SDValue N0 = N->getOperand(0); 7403 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 7404 EVT VT = N->getValueType(0); 7405 7406 if (VT.isVector()) { 7407 SDValue FoldedVOp = SimplifyVUnaryOp(N); 7408 if (FoldedVOp.getNode()) return FoldedVOp; 7409 } 7410 7411 // fold (fabs c1) -> fabs(c1) 7412 if (N0CFP) 7413 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 7414 // fold (fabs (fabs x)) -> (fabs x) 7415 if (N0.getOpcode() == ISD::FABS) 7416 return N->getOperand(0); 7417 // fold (fabs (fneg x)) -> (fabs x) 7418 // fold (fabs (fcopysign x, y)) -> (fabs x) 7419 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 7420 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0)); 7421 7422 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading 7423 // constant pool values. 7424 if (!TLI.isFAbsFree(VT) && 7425 N0.getOpcode() == ISD::BITCAST && 7426 N0.getNode()->hasOneUse()) { 7427 SDValue Int = N0.getOperand(0); 7428 EVT IntVT = Int.getValueType(); 7429 if (IntVT.isInteger() && !IntVT.isVector()) { 7430 APInt SignMask; 7431 if (N0.getValueType().isVector()) { 7432 // For a vector, get a mask such as 0x7f... per scalar element 7433 // and splat it. 7434 SignMask = ~APInt::getSignBit(N0.getValueType().getScalarSizeInBits()); 7435 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 7436 } else { 7437 // For a scalar, just generate 0x7f... 7438 SignMask = ~APInt::getSignBit(IntVT.getSizeInBits()); 7439 } 7440 Int = DAG.getNode(ISD::AND, SDLoc(N0), IntVT, Int, 7441 DAG.getConstant(SignMask, IntVT)); 7442 AddToWorklist(Int.getNode()); 7443 return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Int); 7444 } 7445 } 7446 7447 return SDValue(); 7448 } 7449 7450 SDValue DAGCombiner::visitBRCOND(SDNode *N) { 7451 SDValue Chain = N->getOperand(0); 7452 SDValue N1 = N->getOperand(1); 7453 SDValue N2 = N->getOperand(2); 7454 7455 // If N is a constant we could fold this into a fallthrough or unconditional 7456 // branch. However that doesn't happen very often in normal code, because 7457 // Instcombine/SimplifyCFG should have handled the available opportunities. 7458 // If we did this folding here, it would be necessary to update the 7459 // MachineBasicBlock CFG, which is awkward. 7460 7461 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 7462 // on the target. 7463 if (N1.getOpcode() == ISD::SETCC && 7464 TLI.isOperationLegalOrCustom(ISD::BR_CC, 7465 N1.getOperand(0).getValueType())) { 7466 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 7467 Chain, N1.getOperand(2), 7468 N1.getOperand(0), N1.getOperand(1), N2); 7469 } 7470 7471 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 7472 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 7473 (N1.getOperand(0).hasOneUse() && 7474 N1.getOperand(0).getOpcode() == ISD::SRL))) { 7475 SDNode *Trunc = nullptr; 7476 if (N1.getOpcode() == ISD::TRUNCATE) { 7477 // Look pass the truncate. 7478 Trunc = N1.getNode(); 7479 N1 = N1.getOperand(0); 7480 } 7481 7482 // Match this pattern so that we can generate simpler code: 7483 // 7484 // %a = ... 7485 // %b = and i32 %a, 2 7486 // %c = srl i32 %b, 1 7487 // brcond i32 %c ... 7488 // 7489 // into 7490 // 7491 // %a = ... 7492 // %b = and i32 %a, 2 7493 // %c = setcc eq %b, 0 7494 // brcond %c ... 7495 // 7496 // This applies only when the AND constant value has one bit set and the 7497 // SRL constant is equal to the log2 of the AND constant. The back-end is 7498 // smart enough to convert the result into a TEST/JMP sequence. 7499 SDValue Op0 = N1.getOperand(0); 7500 SDValue Op1 = N1.getOperand(1); 7501 7502 if (Op0.getOpcode() == ISD::AND && 7503 Op1.getOpcode() == ISD::Constant) { 7504 SDValue AndOp1 = Op0.getOperand(1); 7505 7506 if (AndOp1.getOpcode() == ISD::Constant) { 7507 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 7508 7509 if (AndConst.isPowerOf2() && 7510 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 7511 SDValue SetCC = 7512 DAG.getSetCC(SDLoc(N), 7513 getSetCCResultType(Op0.getValueType()), 7514 Op0, DAG.getConstant(0, Op0.getValueType()), 7515 ISD::SETNE); 7516 7517 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, SDLoc(N), 7518 MVT::Other, Chain, SetCC, N2); 7519 // Don't add the new BRCond into the worklist or else SimplifySelectCC 7520 // will convert it back to (X & C1) >> C2. 7521 CombineTo(N, NewBRCond, false); 7522 // Truncate is dead. 7523 if (Trunc) 7524 deleteAndRecombine(Trunc); 7525 // Replace the uses of SRL with SETCC 7526 WorklistRemover DeadNodes(*this); 7527 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 7528 deleteAndRecombine(N1.getNode()); 7529 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7530 } 7531 } 7532 } 7533 7534 if (Trunc) 7535 // Restore N1 if the above transformation doesn't match. 7536 N1 = N->getOperand(1); 7537 } 7538 7539 // Transform br(xor(x, y)) -> br(x != y) 7540 // Transform br(xor(xor(x,y), 1)) -> br (x == y) 7541 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 7542 SDNode *TheXor = N1.getNode(); 7543 SDValue Op0 = TheXor->getOperand(0); 7544 SDValue Op1 = TheXor->getOperand(1); 7545 if (Op0.getOpcode() == Op1.getOpcode()) { 7546 // Avoid missing important xor optimizations. 7547 SDValue Tmp = visitXOR(TheXor); 7548 if (Tmp.getNode()) { 7549 if (Tmp.getNode() != TheXor) { 7550 DEBUG(dbgs() << "\nReplacing.8 "; 7551 TheXor->dump(&DAG); 7552 dbgs() << "\nWith: "; 7553 Tmp.getNode()->dump(&DAG); 7554 dbgs() << '\n'); 7555 WorklistRemover DeadNodes(*this); 7556 DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 7557 deleteAndRecombine(TheXor); 7558 return DAG.getNode(ISD::BRCOND, SDLoc(N), 7559 MVT::Other, Chain, Tmp, N2); 7560 } 7561 7562 // visitXOR has changed XOR's operands or replaced the XOR completely, 7563 // bail out. 7564 return SDValue(N, 0); 7565 } 7566 } 7567 7568 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 7569 bool Equal = false; 7570 if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0)) 7571 if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() && 7572 Op0.getOpcode() == ISD::XOR) { 7573 TheXor = Op0.getNode(); 7574 Equal = true; 7575 } 7576 7577 EVT SetCCVT = N1.getValueType(); 7578 if (LegalTypes) 7579 SetCCVT = getSetCCResultType(SetCCVT); 7580 SDValue SetCC = DAG.getSetCC(SDLoc(TheXor), 7581 SetCCVT, 7582 Op0, Op1, 7583 Equal ? ISD::SETEQ : ISD::SETNE); 7584 // Replace the uses of XOR with SETCC 7585 WorklistRemover DeadNodes(*this); 7586 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 7587 deleteAndRecombine(N1.getNode()); 7588 return DAG.getNode(ISD::BRCOND, SDLoc(N), 7589 MVT::Other, Chain, SetCC, N2); 7590 } 7591 } 7592 7593 return SDValue(); 7594 } 7595 7596 // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 7597 // 7598 SDValue DAGCombiner::visitBR_CC(SDNode *N) { 7599 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 7600 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 7601 7602 // If N is a constant we could fold this into a fallthrough or unconditional 7603 // branch. However that doesn't happen very often in normal code, because 7604 // Instcombine/SimplifyCFG should have handled the available opportunities. 7605 // If we did this folding here, it would be necessary to update the 7606 // MachineBasicBlock CFG, which is awkward. 7607 7608 // Use SimplifySetCC to simplify SETCC's. 7609 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()), 7610 CondLHS, CondRHS, CC->get(), SDLoc(N), 7611 false); 7612 if (Simp.getNode()) AddToWorklist(Simp.getNode()); 7613 7614 // fold to a simpler setcc 7615 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 7616 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 7617 N->getOperand(0), Simp.getOperand(2), 7618 Simp.getOperand(0), Simp.getOperand(1), 7619 N->getOperand(4)); 7620 7621 return SDValue(); 7622 } 7623 7624 /// canFoldInAddressingMode - Return true if 'Use' is a load or a store that 7625 /// uses N as its base pointer and that N may be folded in the load / store 7626 /// addressing mode. 7627 static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 7628 SelectionDAG &DAG, 7629 const TargetLowering &TLI) { 7630 EVT VT; 7631 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 7632 if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 7633 return false; 7634 VT = Use->getValueType(0); 7635 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 7636 if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 7637 return false; 7638 VT = ST->getValue().getValueType(); 7639 } else 7640 return false; 7641 7642 TargetLowering::AddrMode AM; 7643 if (N->getOpcode() == ISD::ADD) { 7644 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7645 if (Offset) 7646 // [reg +/- imm] 7647 AM.BaseOffs = Offset->getSExtValue(); 7648 else 7649 // [reg +/- reg] 7650 AM.Scale = 1; 7651 } else if (N->getOpcode() == ISD::SUB) { 7652 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7653 if (Offset) 7654 // [reg +/- imm] 7655 AM.BaseOffs = -Offset->getSExtValue(); 7656 else 7657 // [reg +/- reg] 7658 AM.Scale = 1; 7659 } else 7660 return false; 7661 7662 return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext())); 7663 } 7664 7665 /// CombineToPreIndexedLoadStore - Try turning a load / store into a 7666 /// pre-indexed load / store when the base pointer is an add or subtract 7667 /// and it has other uses besides the load / store. After the 7668 /// transformation, the new indexed load / store has effectively folded 7669 /// the add / subtract in and all of its other uses are redirected to the 7670 /// new load / store. 7671 bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 7672 if (Level < AfterLegalizeDAG) 7673 return false; 7674 7675 bool isLoad = true; 7676 SDValue Ptr; 7677 EVT VT; 7678 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7679 if (LD->isIndexed()) 7680 return false; 7681 VT = LD->getMemoryVT(); 7682 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 7683 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 7684 return false; 7685 Ptr = LD->getBasePtr(); 7686 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7687 if (ST->isIndexed()) 7688 return false; 7689 VT = ST->getMemoryVT(); 7690 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 7691 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 7692 return false; 7693 Ptr = ST->getBasePtr(); 7694 isLoad = false; 7695 } else { 7696 return false; 7697 } 7698 7699 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 7700 // out. There is no reason to make this a preinc/predec. 7701 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 7702 Ptr.getNode()->hasOneUse()) 7703 return false; 7704 7705 // Ask the target to do addressing mode selection. 7706 SDValue BasePtr; 7707 SDValue Offset; 7708 ISD::MemIndexedMode AM = ISD::UNINDEXED; 7709 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 7710 return false; 7711 7712 // Backends without true r+i pre-indexed forms may need to pass a 7713 // constant base with a variable offset so that constant coercion 7714 // will work with the patterns in canonical form. 7715 bool Swapped = false; 7716 if (isa<ConstantSDNode>(BasePtr)) { 7717 std::swap(BasePtr, Offset); 7718 Swapped = true; 7719 } 7720 7721 // Don't create a indexed load / store with zero offset. 7722 if (isa<ConstantSDNode>(Offset) && 7723 cast<ConstantSDNode>(Offset)->isNullValue()) 7724 return false; 7725 7726 // Try turning it into a pre-indexed load / store except when: 7727 // 1) The new base ptr is a frame index. 7728 // 2) If N is a store and the new base ptr is either the same as or is a 7729 // predecessor of the value being stored. 7730 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 7731 // that would create a cycle. 7732 // 4) All uses are load / store ops that use it as old base ptr. 7733 7734 // Check #1. Preinc'ing a frame index would require copying the stack pointer 7735 // (plus the implicit offset) to a register to preinc anyway. 7736 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 7737 return false; 7738 7739 // Check #2. 7740 if (!isLoad) { 7741 SDValue Val = cast<StoreSDNode>(N)->getValue(); 7742 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 7743 return false; 7744 } 7745 7746 // If the offset is a constant, there may be other adds of constants that 7747 // can be folded with this one. We should do this to avoid having to keep 7748 // a copy of the original base pointer. 7749 SmallVector<SDNode *, 16> OtherUses; 7750 if (isa<ConstantSDNode>(Offset)) 7751 for (SDNode *Use : BasePtr.getNode()->uses()) { 7752 if (Use == Ptr.getNode()) 7753 continue; 7754 7755 if (Use->isPredecessorOf(N)) 7756 continue; 7757 7758 if (Use->getOpcode() != ISD::ADD && Use->getOpcode() != ISD::SUB) { 7759 OtherUses.clear(); 7760 break; 7761 } 7762 7763 SDValue Op0 = Use->getOperand(0), Op1 = Use->getOperand(1); 7764 if (Op1.getNode() == BasePtr.getNode()) 7765 std::swap(Op0, Op1); 7766 assert(Op0.getNode() == BasePtr.getNode() && 7767 "Use of ADD/SUB but not an operand"); 7768 7769 if (!isa<ConstantSDNode>(Op1)) { 7770 OtherUses.clear(); 7771 break; 7772 } 7773 7774 // FIXME: In some cases, we can be smarter about this. 7775 if (Op1.getValueType() != Offset.getValueType()) { 7776 OtherUses.clear(); 7777 break; 7778 } 7779 7780 OtherUses.push_back(Use); 7781 } 7782 7783 if (Swapped) 7784 std::swap(BasePtr, Offset); 7785 7786 // Now check for #3 and #4. 7787 bool RealUse = false; 7788 7789 // Caches for hasPredecessorHelper 7790 SmallPtrSet<const SDNode *, 32> Visited; 7791 SmallVector<const SDNode *, 16> Worklist; 7792 7793 for (SDNode *Use : Ptr.getNode()->uses()) { 7794 if (Use == N) 7795 continue; 7796 if (N->hasPredecessorHelper(Use, Visited, Worklist)) 7797 return false; 7798 7799 // If Ptr may be folded in addressing mode of other use, then it's 7800 // not profitable to do this transformation. 7801 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 7802 RealUse = true; 7803 } 7804 7805 if (!RealUse) 7806 return false; 7807 7808 SDValue Result; 7809 if (isLoad) 7810 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 7811 BasePtr, Offset, AM); 7812 else 7813 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 7814 BasePtr, Offset, AM); 7815 ++PreIndexedNodes; 7816 ++NodesCombined; 7817 DEBUG(dbgs() << "\nReplacing.4 "; 7818 N->dump(&DAG); 7819 dbgs() << "\nWith: "; 7820 Result.getNode()->dump(&DAG); 7821 dbgs() << '\n'); 7822 WorklistRemover DeadNodes(*this); 7823 if (isLoad) { 7824 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 7825 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 7826 } else { 7827 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 7828 } 7829 7830 // Finally, since the node is now dead, remove it from the graph. 7831 deleteAndRecombine(N); 7832 7833 if (Swapped) 7834 std::swap(BasePtr, Offset); 7835 7836 // Replace other uses of BasePtr that can be updated to use Ptr 7837 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) { 7838 unsigned OffsetIdx = 1; 7839 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode()) 7840 OffsetIdx = 0; 7841 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == 7842 BasePtr.getNode() && "Expected BasePtr operand"); 7843 7844 // We need to replace ptr0 in the following expression: 7845 // x0 * offset0 + y0 * ptr0 = t0 7846 // knowing that 7847 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) 7848 // 7849 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the 7850 // indexed load/store and the expresion that needs to be re-written. 7851 // 7852 // Therefore, we have: 7853 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 7854 7855 ConstantSDNode *CN = 7856 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); 7857 int X0, X1, Y0, Y1; 7858 APInt Offset0 = CN->getAPIntValue(); 7859 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); 7860 7861 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; 7862 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; 7863 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; 7864 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; 7865 7866 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; 7867 7868 APInt CNV = Offset0; 7869 if (X0 < 0) CNV = -CNV; 7870 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; 7871 else CNV = CNV - Offset1; 7872 7873 // We can now generate the new expression. 7874 SDValue NewOp1 = DAG.getConstant(CNV, CN->getValueType(0)); 7875 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); 7876 7877 SDValue NewUse = DAG.getNode(Opcode, 7878 SDLoc(OtherUses[i]), 7879 OtherUses[i]->getValueType(0), NewOp1, NewOp2); 7880 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); 7881 deleteAndRecombine(OtherUses[i]); 7882 } 7883 7884 // Replace the uses of Ptr with uses of the updated base value. 7885 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 7886 deleteAndRecombine(Ptr.getNode()); 7887 7888 return true; 7889 } 7890 7891 /// CombineToPostIndexedLoadStore - Try to combine a load / store with a 7892 /// add / sub of the base pointer node into a post-indexed load / store. 7893 /// The transformation folded the add / subtract into the new indexed 7894 /// load / store effectively and all of its uses are redirected to the 7895 /// new load / store. 7896 bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 7897 if (Level < AfterLegalizeDAG) 7898 return false; 7899 7900 bool isLoad = true; 7901 SDValue Ptr; 7902 EVT VT; 7903 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7904 if (LD->isIndexed()) 7905 return false; 7906 VT = LD->getMemoryVT(); 7907 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 7908 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 7909 return false; 7910 Ptr = LD->getBasePtr(); 7911 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7912 if (ST->isIndexed()) 7913 return false; 7914 VT = ST->getMemoryVT(); 7915 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 7916 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 7917 return false; 7918 Ptr = ST->getBasePtr(); 7919 isLoad = false; 7920 } else { 7921 return false; 7922 } 7923 7924 if (Ptr.getNode()->hasOneUse()) 7925 return false; 7926 7927 for (SDNode *Op : Ptr.getNode()->uses()) { 7928 if (Op == N || 7929 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 7930 continue; 7931 7932 SDValue BasePtr; 7933 SDValue Offset; 7934 ISD::MemIndexedMode AM = ISD::UNINDEXED; 7935 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 7936 // Don't create a indexed load / store with zero offset. 7937 if (isa<ConstantSDNode>(Offset) && 7938 cast<ConstantSDNode>(Offset)->isNullValue()) 7939 continue; 7940 7941 // Try turning it into a post-indexed load / store except when 7942 // 1) All uses are load / store ops that use it as base ptr (and 7943 // it may be folded as addressing mmode). 7944 // 2) Op must be independent of N, i.e. Op is neither a predecessor 7945 // nor a successor of N. Otherwise, if Op is folded that would 7946 // create a cycle. 7947 7948 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 7949 continue; 7950 7951 // Check for #1. 7952 bool TryNext = false; 7953 for (SDNode *Use : BasePtr.getNode()->uses()) { 7954 if (Use == Ptr.getNode()) 7955 continue; 7956 7957 // If all the uses are load / store addresses, then don't do the 7958 // transformation. 7959 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 7960 bool RealUse = false; 7961 for (SDNode *UseUse : Use->uses()) { 7962 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 7963 RealUse = true; 7964 } 7965 7966 if (!RealUse) { 7967 TryNext = true; 7968 break; 7969 } 7970 } 7971 } 7972 7973 if (TryNext) 7974 continue; 7975 7976 // Check for #2 7977 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 7978 SDValue Result = isLoad 7979 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 7980 BasePtr, Offset, AM) 7981 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 7982 BasePtr, Offset, AM); 7983 ++PostIndexedNodes; 7984 ++NodesCombined; 7985 DEBUG(dbgs() << "\nReplacing.5 "; 7986 N->dump(&DAG); 7987 dbgs() << "\nWith: "; 7988 Result.getNode()->dump(&DAG); 7989 dbgs() << '\n'); 7990 WorklistRemover DeadNodes(*this); 7991 if (isLoad) { 7992 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 7993 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 7994 } else { 7995 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 7996 } 7997 7998 // Finally, since the node is now dead, remove it from the graph. 7999 deleteAndRecombine(N); 8000 8001 // Replace the uses of Use with uses of the updated base value. 8002 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 8003 Result.getValue(isLoad ? 1 : 0)); 8004 deleteAndRecombine(Op); 8005 return true; 8006 } 8007 } 8008 } 8009 8010 return false; 8011 } 8012 8013 SDValue DAGCombiner::visitLOAD(SDNode *N) { 8014 LoadSDNode *LD = cast<LoadSDNode>(N); 8015 SDValue Chain = LD->getChain(); 8016 SDValue Ptr = LD->getBasePtr(); 8017 8018 // If load is not volatile and there are no uses of the loaded value (and 8019 // the updated indexed value in case of indexed loads), change uses of the 8020 // chain value into uses of the chain input (i.e. delete the dead load). 8021 if (!LD->isVolatile()) { 8022 if (N->getValueType(1) == MVT::Other) { 8023 // Unindexed loads. 8024 if (!N->hasAnyUseOfValue(0)) { 8025 // It's not safe to use the two value CombineTo variant here. e.g. 8026 // v1, chain2 = load chain1, loc 8027 // v2, chain3 = load chain2, loc 8028 // v3 = add v2, c 8029 // Now we replace use of chain2 with chain1. This makes the second load 8030 // isomorphic to the one we are deleting, and thus makes this load live. 8031 DEBUG(dbgs() << "\nReplacing.6 "; 8032 N->dump(&DAG); 8033 dbgs() << "\nWith chain: "; 8034 Chain.getNode()->dump(&DAG); 8035 dbgs() << "\n"); 8036 WorklistRemover DeadNodes(*this); 8037 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 8038 8039 if (N->use_empty()) 8040 deleteAndRecombine(N); 8041 8042 return SDValue(N, 0); // Return N so it doesn't get rechecked! 8043 } 8044 } else { 8045 // Indexed loads. 8046 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 8047 if (!N->hasAnyUseOfValue(0) && !N->hasAnyUseOfValue(1)) { 8048 SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 8049 DEBUG(dbgs() << "\nReplacing.7 "; 8050 N->dump(&DAG); 8051 dbgs() << "\nWith: "; 8052 Undef.getNode()->dump(&DAG); 8053 dbgs() << " and 2 other values\n"); 8054 WorklistRemover DeadNodes(*this); 8055 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 8056 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), 8057 DAG.getUNDEF(N->getValueType(1))); 8058 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 8059 deleteAndRecombine(N); 8060 return SDValue(N, 0); // Return N so it doesn't get rechecked! 8061 } 8062 } 8063 } 8064 8065 // If this load is directly stored, replace the load value with the stored 8066 // value. 8067 // TODO: Handle store large -> read small portion. 8068 // TODO: Handle TRUNCSTORE/LOADEXT 8069 if (ISD::isNormalLoad(N) && !LD->isVolatile()) { 8070 if (ISD::isNON_TRUNCStore(Chain.getNode())) { 8071 StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 8072 if (PrevST->getBasePtr() == Ptr && 8073 PrevST->getValue().getValueType() == N->getValueType(0)) 8074 return CombineTo(N, Chain.getOperand(1), Chain); 8075 } 8076 } 8077 8078 // Try to infer better alignment information than the load already has. 8079 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 8080 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 8081 if (Align > LD->getMemOperand()->getBaseAlignment()) { 8082 SDValue NewLoad = 8083 DAG.getExtLoad(LD->getExtensionType(), SDLoc(N), 8084 LD->getValueType(0), 8085 Chain, Ptr, LD->getPointerInfo(), 8086 LD->getMemoryVT(), 8087 LD->isVolatile(), LD->isNonTemporal(), 8088 LD->isInvariant(), Align, LD->getAAInfo()); 8089 return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true); 8090 } 8091 } 8092 } 8093 8094 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA : 8095 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA(); 8096 #ifndef NDEBUG 8097 if (CombinerAAOnlyFunc.getNumOccurrences() && 8098 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 8099 UseAA = false; 8100 #endif 8101 if (UseAA && LD->isUnindexed()) { 8102 // Walk up chain skipping non-aliasing memory nodes. 8103 SDValue BetterChain = FindBetterChain(N, Chain); 8104 8105 // If there is a better chain. 8106 if (Chain != BetterChain) { 8107 SDValue ReplLoad; 8108 8109 // Replace the chain to void dependency. 8110 if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 8111 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD), 8112 BetterChain, Ptr, LD->getMemOperand()); 8113 } else { 8114 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), 8115 LD->getValueType(0), 8116 BetterChain, Ptr, LD->getMemoryVT(), 8117 LD->getMemOperand()); 8118 } 8119 8120 // Create token factor to keep old chain connected. 8121 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N), 8122 MVT::Other, Chain, ReplLoad.getValue(1)); 8123 8124 // Make sure the new and old chains are cleaned up. 8125 AddToWorklist(Token.getNode()); 8126 8127 // Replace uses with load result and token factor. Don't add users 8128 // to work list. 8129 return CombineTo(N, ReplLoad.getValue(0), Token, false); 8130 } 8131 } 8132 8133 // Try transforming N to an indexed load. 8134 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 8135 return SDValue(N, 0); 8136 8137 // Try to slice up N to more direct loads if the slices are mapped to 8138 // different register banks or pairing can take place. 8139 if (SliceUpLoad(N)) 8140 return SDValue(N, 0); 8141 8142 return SDValue(); 8143 } 8144 8145 namespace { 8146 /// \brief Helper structure used to slice a load in smaller loads. 8147 /// Basically a slice is obtained from the following sequence: 8148 /// Origin = load Ty1, Base 8149 /// Shift = srl Ty1 Origin, CstTy Amount 8150 /// Inst = trunc Shift to Ty2 8151 /// 8152 /// Then, it will be rewriten into: 8153 /// Slice = load SliceTy, Base + SliceOffset 8154 /// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2 8155 /// 8156 /// SliceTy is deduced from the number of bits that are actually used to 8157 /// build Inst. 8158 struct LoadedSlice { 8159 /// \brief Helper structure used to compute the cost of a slice. 8160 struct Cost { 8161 /// Are we optimizing for code size. 8162 bool ForCodeSize; 8163 /// Various cost. 8164 unsigned Loads; 8165 unsigned Truncates; 8166 unsigned CrossRegisterBanksCopies; 8167 unsigned ZExts; 8168 unsigned Shift; 8169 8170 Cost(bool ForCodeSize = false) 8171 : ForCodeSize(ForCodeSize), Loads(0), Truncates(0), 8172 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {} 8173 8174 /// \brief Get the cost of one isolated slice. 8175 Cost(const LoadedSlice &LS, bool ForCodeSize = false) 8176 : ForCodeSize(ForCodeSize), Loads(1), Truncates(0), 8177 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) { 8178 EVT TruncType = LS.Inst->getValueType(0); 8179 EVT LoadedType = LS.getLoadedType(); 8180 if (TruncType != LoadedType && 8181 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType)) 8182 ZExts = 1; 8183 } 8184 8185 /// \brief Account for slicing gain in the current cost. 8186 /// Slicing provide a few gains like removing a shift or a 8187 /// truncate. This method allows to grow the cost of the original 8188 /// load with the gain from this slice. 8189 void addSliceGain(const LoadedSlice &LS) { 8190 // Each slice saves a truncate. 8191 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo(); 8192 if (!TLI.isTruncateFree(LS.Inst->getValueType(0), 8193 LS.Inst->getOperand(0).getValueType())) 8194 ++Truncates; 8195 // If there is a shift amount, this slice gets rid of it. 8196 if (LS.Shift) 8197 ++Shift; 8198 // If this slice can merge a cross register bank copy, account for it. 8199 if (LS.canMergeExpensiveCrossRegisterBankCopy()) 8200 ++CrossRegisterBanksCopies; 8201 } 8202 8203 Cost &operator+=(const Cost &RHS) { 8204 Loads += RHS.Loads; 8205 Truncates += RHS.Truncates; 8206 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies; 8207 ZExts += RHS.ZExts; 8208 Shift += RHS.Shift; 8209 return *this; 8210 } 8211 8212 bool operator==(const Cost &RHS) const { 8213 return Loads == RHS.Loads && Truncates == RHS.Truncates && 8214 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies && 8215 ZExts == RHS.ZExts && Shift == RHS.Shift; 8216 } 8217 8218 bool operator!=(const Cost &RHS) const { return !(*this == RHS); } 8219 8220 bool operator<(const Cost &RHS) const { 8221 // Assume cross register banks copies are as expensive as loads. 8222 // FIXME: Do we want some more target hooks? 8223 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies; 8224 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies; 8225 // Unless we are optimizing for code size, consider the 8226 // expensive operation first. 8227 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS) 8228 return ExpensiveOpsLHS < ExpensiveOpsRHS; 8229 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) < 8230 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS); 8231 } 8232 8233 bool operator>(const Cost &RHS) const { return RHS < *this; } 8234 8235 bool operator<=(const Cost &RHS) const { return !(RHS < *this); } 8236 8237 bool operator>=(const Cost &RHS) const { return !(*this < RHS); } 8238 }; 8239 // The last instruction that represent the slice. This should be a 8240 // truncate instruction. 8241 SDNode *Inst; 8242 // The original load instruction. 8243 LoadSDNode *Origin; 8244 // The right shift amount in bits from the original load. 8245 unsigned Shift; 8246 // The DAG from which Origin came from. 8247 // This is used to get some contextual information about legal types, etc. 8248 SelectionDAG *DAG; 8249 8250 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr, 8251 unsigned Shift = 0, SelectionDAG *DAG = nullptr) 8252 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} 8253 8254 LoadedSlice(const LoadedSlice &LS) 8255 : Inst(LS.Inst), Origin(LS.Origin), Shift(LS.Shift), DAG(LS.DAG) {} 8256 8257 /// \brief Get the bits used in a chunk of bits \p BitWidth large. 8258 /// \return Result is \p BitWidth and has used bits set to 1 and 8259 /// not used bits set to 0. 8260 APInt getUsedBits() const { 8261 // Reproduce the trunc(lshr) sequence: 8262 // - Start from the truncated value. 8263 // - Zero extend to the desired bit width. 8264 // - Shift left. 8265 assert(Origin && "No original load to compare against."); 8266 unsigned BitWidth = Origin->getValueSizeInBits(0); 8267 assert(Inst && "This slice is not bound to an instruction"); 8268 assert(Inst->getValueSizeInBits(0) <= BitWidth && 8269 "Extracted slice is bigger than the whole type!"); 8270 APInt UsedBits(Inst->getValueSizeInBits(0), 0); 8271 UsedBits.setAllBits(); 8272 UsedBits = UsedBits.zext(BitWidth); 8273 UsedBits <<= Shift; 8274 return UsedBits; 8275 } 8276 8277 /// \brief Get the size of the slice to be loaded in bytes. 8278 unsigned getLoadedSize() const { 8279 unsigned SliceSize = getUsedBits().countPopulation(); 8280 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); 8281 return SliceSize / 8; 8282 } 8283 8284 /// \brief Get the type that will be loaded for this slice. 8285 /// Note: This may not be the final type for the slice. 8286 EVT getLoadedType() const { 8287 assert(DAG && "Missing context"); 8288 LLVMContext &Ctxt = *DAG->getContext(); 8289 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); 8290 } 8291 8292 /// \brief Get the alignment of the load used for this slice. 8293 unsigned getAlignment() const { 8294 unsigned Alignment = Origin->getAlignment(); 8295 unsigned Offset = getOffsetFromBase(); 8296 if (Offset != 0) 8297 Alignment = MinAlign(Alignment, Alignment + Offset); 8298 return Alignment; 8299 } 8300 8301 /// \brief Check if this slice can be rewritten with legal operations. 8302 bool isLegal() const { 8303 // An invalid slice is not legal. 8304 if (!Origin || !Inst || !DAG) 8305 return false; 8306 8307 // Offsets are for indexed load only, we do not handle that. 8308 if (Origin->getOffset().getOpcode() != ISD::UNDEF) 8309 return false; 8310 8311 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 8312 8313 // Check that the type is legal. 8314 EVT SliceType = getLoadedType(); 8315 if (!TLI.isTypeLegal(SliceType)) 8316 return false; 8317 8318 // Check that the load is legal for this type. 8319 if (!TLI.isOperationLegal(ISD::LOAD, SliceType)) 8320 return false; 8321 8322 // Check that the offset can be computed. 8323 // 1. Check its type. 8324 EVT PtrType = Origin->getBasePtr().getValueType(); 8325 if (PtrType == MVT::Untyped || PtrType.isExtended()) 8326 return false; 8327 8328 // 2. Check that it fits in the immediate. 8329 if (!TLI.isLegalAddImmediate(getOffsetFromBase())) 8330 return false; 8331 8332 // 3. Check that the computation is legal. 8333 if (!TLI.isOperationLegal(ISD::ADD, PtrType)) 8334 return false; 8335 8336 // Check that the zext is legal if it needs one. 8337 EVT TruncateType = Inst->getValueType(0); 8338 if (TruncateType != SliceType && 8339 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType)) 8340 return false; 8341 8342 return true; 8343 } 8344 8345 /// \brief Get the offset in bytes of this slice in the original chunk of 8346 /// bits. 8347 /// \pre DAG != nullptr. 8348 uint64_t getOffsetFromBase() const { 8349 assert(DAG && "Missing context."); 8350 bool IsBigEndian = 8351 DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian(); 8352 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."); 8353 uint64_t Offset = Shift / 8; 8354 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8; 8355 assert(!(Origin->getValueSizeInBits(0) & 0x7) && 8356 "The size of the original loaded type is not a multiple of a" 8357 " byte."); 8358 // If Offset is bigger than TySizeInBytes, it means we are loading all 8359 // zeros. This should have been optimized before in the process. 8360 assert(TySizeInBytes > Offset && 8361 "Invalid shift amount for given loaded size"); 8362 if (IsBigEndian) 8363 Offset = TySizeInBytes - Offset - getLoadedSize(); 8364 return Offset; 8365 } 8366 8367 /// \brief Generate the sequence of instructions to load the slice 8368 /// represented by this object and redirect the uses of this slice to 8369 /// this new sequence of instructions. 8370 /// \pre this->Inst && this->Origin are valid Instructions and this 8371 /// object passed the legal check: LoadedSlice::isLegal returned true. 8372 /// \return The last instruction of the sequence used to load the slice. 8373 SDValue loadSlice() const { 8374 assert(Inst && Origin && "Unable to replace a non-existing slice."); 8375 const SDValue &OldBaseAddr = Origin->getBasePtr(); 8376 SDValue BaseAddr = OldBaseAddr; 8377 // Get the offset in that chunk of bytes w.r.t. the endianess. 8378 int64_t Offset = static_cast<int64_t>(getOffsetFromBase()); 8379 assert(Offset >= 0 && "Offset too big to fit in int64_t!"); 8380 if (Offset) { 8381 // BaseAddr = BaseAddr + Offset. 8382 EVT ArithType = BaseAddr.getValueType(); 8383 BaseAddr = DAG->getNode(ISD::ADD, SDLoc(Origin), ArithType, BaseAddr, 8384 DAG->getConstant(Offset, ArithType)); 8385 } 8386 8387 // Create the type of the loaded slice according to its size. 8388 EVT SliceType = getLoadedType(); 8389 8390 // Create the load for the slice. 8391 SDValue LastInst = DAG->getLoad( 8392 SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr, 8393 Origin->getPointerInfo().getWithOffset(Offset), Origin->isVolatile(), 8394 Origin->isNonTemporal(), Origin->isInvariant(), getAlignment()); 8395 // If the final type is not the same as the loaded type, this means that 8396 // we have to pad with zero. Create a zero extend for that. 8397 EVT FinalType = Inst->getValueType(0); 8398 if (SliceType != FinalType) 8399 LastInst = 8400 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst); 8401 return LastInst; 8402 } 8403 8404 /// \brief Check if this slice can be merged with an expensive cross register 8405 /// bank copy. E.g., 8406 /// i = load i32 8407 /// f = bitcast i32 i to float 8408 bool canMergeExpensiveCrossRegisterBankCopy() const { 8409 if (!Inst || !Inst->hasOneUse()) 8410 return false; 8411 SDNode *Use = *Inst->use_begin(); 8412 if (Use->getOpcode() != ISD::BITCAST) 8413 return false; 8414 assert(DAG && "Missing context"); 8415 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 8416 EVT ResVT = Use->getValueType(0); 8417 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT()); 8418 const TargetRegisterClass *ArgRC = 8419 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT()); 8420 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) 8421 return false; 8422 8423 // At this point, we know that we perform a cross-register-bank copy. 8424 // Check if it is expensive. 8425 const TargetRegisterInfo *TRI = 8426 TLI.getTargetMachine().getSubtargetImpl()->getRegisterInfo(); 8427 // Assume bitcasts are cheap, unless both register classes do not 8428 // explicitly share a common sub class. 8429 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC)) 8430 return false; 8431 8432 // Check if it will be merged with the load. 8433 // 1. Check the alignment constraint. 8434 unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment( 8435 ResVT.getTypeForEVT(*DAG->getContext())); 8436 8437 if (RequiredAlignment > getAlignment()) 8438 return false; 8439 8440 // 2. Check that the load is a legal operation for that type. 8441 if (!TLI.isOperationLegal(ISD::LOAD, ResVT)) 8442 return false; 8443 8444 // 3. Check that we do not have a zext in the way. 8445 if (Inst->getValueType(0) != getLoadedType()) 8446 return false; 8447 8448 return true; 8449 } 8450 }; 8451 } 8452 8453 /// \brief Check that all bits set in \p UsedBits form a dense region, i.e., 8454 /// \p UsedBits looks like 0..0 1..1 0..0. 8455 static bool areUsedBitsDense(const APInt &UsedBits) { 8456 // If all the bits are one, this is dense! 8457 if (UsedBits.isAllOnesValue()) 8458 return true; 8459 8460 // Get rid of the unused bits on the right. 8461 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros()); 8462 // Get rid of the unused bits on the left. 8463 if (NarrowedUsedBits.countLeadingZeros()) 8464 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits()); 8465 // Check that the chunk of bits is completely used. 8466 return NarrowedUsedBits.isAllOnesValue(); 8467 } 8468 8469 /// \brief Check whether or not \p First and \p Second are next to each other 8470 /// in memory. This means that there is no hole between the bits loaded 8471 /// by \p First and the bits loaded by \p Second. 8472 static bool areSlicesNextToEachOther(const LoadedSlice &First, 8473 const LoadedSlice &Second) { 8474 assert(First.Origin == Second.Origin && First.Origin && 8475 "Unable to match different memory origins."); 8476 APInt UsedBits = First.getUsedBits(); 8477 assert((UsedBits & Second.getUsedBits()) == 0 && 8478 "Slices are not supposed to overlap."); 8479 UsedBits |= Second.getUsedBits(); 8480 return areUsedBitsDense(UsedBits); 8481 } 8482 8483 /// \brief Adjust the \p GlobalLSCost according to the target 8484 /// paring capabilities and the layout of the slices. 8485 /// \pre \p GlobalLSCost should account for at least as many loads as 8486 /// there is in the slices in \p LoadedSlices. 8487 static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, 8488 LoadedSlice::Cost &GlobalLSCost) { 8489 unsigned NumberOfSlices = LoadedSlices.size(); 8490 // If there is less than 2 elements, no pairing is possible. 8491 if (NumberOfSlices < 2) 8492 return; 8493 8494 // Sort the slices so that elements that are likely to be next to each 8495 // other in memory are next to each other in the list. 8496 std::sort(LoadedSlices.begin(), LoadedSlices.end(), 8497 [](const LoadedSlice &LHS, const LoadedSlice &RHS) { 8498 assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); 8499 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); 8500 }); 8501 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo(); 8502 // First (resp. Second) is the first (resp. Second) potentially candidate 8503 // to be placed in a paired load. 8504 const LoadedSlice *First = nullptr; 8505 const LoadedSlice *Second = nullptr; 8506 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice, 8507 // Set the beginning of the pair. 8508 First = Second) { 8509 8510 Second = &LoadedSlices[CurrSlice]; 8511 8512 // If First is NULL, it means we start a new pair. 8513 // Get to the next slice. 8514 if (!First) 8515 continue; 8516 8517 EVT LoadedType = First->getLoadedType(); 8518 8519 // If the types of the slices are different, we cannot pair them. 8520 if (LoadedType != Second->getLoadedType()) 8521 continue; 8522 8523 // Check if the target supplies paired loads for this type. 8524 unsigned RequiredAlignment = 0; 8525 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) { 8526 // move to the next pair, this type is hopeless. 8527 Second = nullptr; 8528 continue; 8529 } 8530 // Check if we meet the alignment requirement. 8531 if (RequiredAlignment > First->getAlignment()) 8532 continue; 8533 8534 // Check that both loads are next to each other in memory. 8535 if (!areSlicesNextToEachOther(*First, *Second)) 8536 continue; 8537 8538 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!"); 8539 --GlobalLSCost.Loads; 8540 // Move to the next pair. 8541 Second = nullptr; 8542 } 8543 } 8544 8545 /// \brief Check the profitability of all involved LoadedSlice. 8546 /// Currently, it is considered profitable if there is exactly two 8547 /// involved slices (1) which are (2) next to each other in memory, and 8548 /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). 8549 /// 8550 /// Note: The order of the elements in \p LoadedSlices may be modified, but not 8551 /// the elements themselves. 8552 /// 8553 /// FIXME: When the cost model will be mature enough, we can relax 8554 /// constraints (1) and (2). 8555 static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, 8556 const APInt &UsedBits, bool ForCodeSize) { 8557 unsigned NumberOfSlices = LoadedSlices.size(); 8558 if (StressLoadSlicing) 8559 return NumberOfSlices > 1; 8560 8561 // Check (1). 8562 if (NumberOfSlices != 2) 8563 return false; 8564 8565 // Check (2). 8566 if (!areUsedBitsDense(UsedBits)) 8567 return false; 8568 8569 // Check (3). 8570 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize); 8571 // The original code has one big load. 8572 OrigCost.Loads = 1; 8573 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) { 8574 const LoadedSlice &LS = LoadedSlices[CurrSlice]; 8575 // Accumulate the cost of all the slices. 8576 LoadedSlice::Cost SliceCost(LS, ForCodeSize); 8577 GlobalSlicingCost += SliceCost; 8578 8579 // Account as cost in the original configuration the gain obtained 8580 // with the current slices. 8581 OrigCost.addSliceGain(LS); 8582 } 8583 8584 // If the target supports paired load, adjust the cost accordingly. 8585 adjustCostForPairing(LoadedSlices, GlobalSlicingCost); 8586 return OrigCost > GlobalSlicingCost; 8587 } 8588 8589 /// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) 8590 /// operations, split it in the various pieces being extracted. 8591 /// 8592 /// This sort of thing is introduced by SROA. 8593 /// This slicing takes care not to insert overlapping loads. 8594 /// \pre LI is a simple load (i.e., not an atomic or volatile load). 8595 bool DAGCombiner::SliceUpLoad(SDNode *N) { 8596 if (Level < AfterLegalizeDAG) 8597 return false; 8598 8599 LoadSDNode *LD = cast<LoadSDNode>(N); 8600 if (LD->isVolatile() || !ISD::isNormalLoad(LD) || 8601 !LD->getValueType(0).isInteger()) 8602 return false; 8603 8604 // Keep track of already used bits to detect overlapping values. 8605 // In that case, we will just abort the transformation. 8606 APInt UsedBits(LD->getValueSizeInBits(0), 0); 8607 8608 SmallVector<LoadedSlice, 4> LoadedSlices; 8609 8610 // Check if this load is used as several smaller chunks of bits. 8611 // Basically, look for uses in trunc or trunc(lshr) and record a new chain 8612 // of computation for each trunc. 8613 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); 8614 UI != UIEnd; ++UI) { 8615 // Skip the uses of the chain. 8616 if (UI.getUse().getResNo() != 0) 8617 continue; 8618 8619 SDNode *User = *UI; 8620 unsigned Shift = 0; 8621 8622 // Check if this is a trunc(lshr). 8623 if (User->getOpcode() == ISD::SRL && User->hasOneUse() && 8624 isa<ConstantSDNode>(User->getOperand(1))) { 8625 Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue(); 8626 User = *User->use_begin(); 8627 } 8628 8629 // At this point, User is a Truncate, iff we encountered, trunc or 8630 // trunc(lshr). 8631 if (User->getOpcode() != ISD::TRUNCATE) 8632 return false; 8633 8634 // The width of the type must be a power of 2 and greater than 8-bits. 8635 // Otherwise the load cannot be represented in LLVM IR. 8636 // Moreover, if we shifted with a non-8-bits multiple, the slice 8637 // will be across several bytes. We do not support that. 8638 unsigned Width = User->getValueSizeInBits(0); 8639 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) 8640 return 0; 8641 8642 // Build the slice for this chain of computations. 8643 LoadedSlice LS(User, LD, Shift, &DAG); 8644 APInt CurrentUsedBits = LS.getUsedBits(); 8645 8646 // Check if this slice overlaps with another. 8647 if ((CurrentUsedBits & UsedBits) != 0) 8648 return false; 8649 // Update the bits used globally. 8650 UsedBits |= CurrentUsedBits; 8651 8652 // Check if the new slice would be legal. 8653 if (!LS.isLegal()) 8654 return false; 8655 8656 // Record the slice. 8657 LoadedSlices.push_back(LS); 8658 } 8659 8660 // Abort slicing if it does not seem to be profitable. 8661 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize)) 8662 return false; 8663 8664 ++SlicedLoads; 8665 8666 // Rewrite each chain to use an independent load. 8667 // By construction, each chain can be represented by a unique load. 8668 8669 // Prepare the argument for the new token factor for all the slices. 8670 SmallVector<SDValue, 8> ArgChains; 8671 for (SmallVectorImpl<LoadedSlice>::const_iterator 8672 LSIt = LoadedSlices.begin(), 8673 LSItEnd = LoadedSlices.end(); 8674 LSIt != LSItEnd; ++LSIt) { 8675 SDValue SliceInst = LSIt->loadSlice(); 8676 CombineTo(LSIt->Inst, SliceInst, true); 8677 if (SliceInst.getNode()->getOpcode() != ISD::LOAD) 8678 SliceInst = SliceInst.getOperand(0); 8679 assert(SliceInst->getOpcode() == ISD::LOAD && 8680 "It takes more than a zext to get to the loaded slice!!"); 8681 ArgChains.push_back(SliceInst.getValue(1)); 8682 } 8683 8684 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, 8685 ArgChains); 8686 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 8687 return true; 8688 } 8689 8690 /// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the 8691 /// load is having specific bytes cleared out. If so, return the byte size 8692 /// being masked out and the shift amount. 8693 static std::pair<unsigned, unsigned> 8694 CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 8695 std::pair<unsigned, unsigned> Result(0, 0); 8696 8697 // Check for the structure we're looking for. 8698 if (V->getOpcode() != ISD::AND || 8699 !isa<ConstantSDNode>(V->getOperand(1)) || 8700 !ISD::isNormalLoad(V->getOperand(0).getNode())) 8701 return Result; 8702 8703 // Check the chain and pointer. 8704 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 8705 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 8706 8707 // The store should be chained directly to the load or be an operand of a 8708 // tokenfactor. 8709 if (LD == Chain.getNode()) 8710 ; // ok. 8711 else if (Chain->getOpcode() != ISD::TokenFactor) 8712 return Result; // Fail. 8713 else { 8714 bool isOk = false; 8715 for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i) 8716 if (Chain->getOperand(i).getNode() == LD) { 8717 isOk = true; 8718 break; 8719 } 8720 if (!isOk) return Result; 8721 } 8722 8723 // This only handles simple types. 8724 if (V.getValueType() != MVT::i16 && 8725 V.getValueType() != MVT::i32 && 8726 V.getValueType() != MVT::i64) 8727 return Result; 8728 8729 // Check the constant mask. Invert it so that the bits being masked out are 8730 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 8731 // follow the sign bit for uniformity. 8732 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 8733 unsigned NotMaskLZ = countLeadingZeros(NotMask); 8734 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 8735 unsigned NotMaskTZ = countTrailingZeros(NotMask); 8736 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 8737 if (NotMaskLZ == 64) return Result; // All zero mask. 8738 8739 // See if we have a continuous run of bits. If so, we have 0*1+0* 8740 if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64) 8741 return Result; 8742 8743 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 8744 if (V.getValueType() != MVT::i64 && NotMaskLZ) 8745 NotMaskLZ -= 64-V.getValueSizeInBits(); 8746 8747 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 8748 switch (MaskedBytes) { 8749 case 1: 8750 case 2: 8751 case 4: break; 8752 default: return Result; // All one mask, or 5-byte mask. 8753 } 8754 8755 // Verify that the first bit starts at a multiple of mask so that the access 8756 // is aligned the same as the access width. 8757 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 8758 8759 Result.first = MaskedBytes; 8760 Result.second = NotMaskTZ/8; 8761 return Result; 8762 } 8763 8764 8765 /// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that 8766 /// provides a value as specified by MaskInfo. If so, replace the specified 8767 /// store with a narrower store of truncated IVal. 8768 static SDNode * 8769 ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 8770 SDValue IVal, StoreSDNode *St, 8771 DAGCombiner *DC) { 8772 unsigned NumBytes = MaskInfo.first; 8773 unsigned ByteShift = MaskInfo.second; 8774 SelectionDAG &DAG = DC->getDAG(); 8775 8776 // Check to see if IVal is all zeros in the part being masked in by the 'or' 8777 // that uses this. If not, this is not a replacement. 8778 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 8779 ByteShift*8, (ByteShift+NumBytes)*8); 8780 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr; 8781 8782 // Check that it is legal on the target to do this. It is legal if the new 8783 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 8784 // legalization. 8785 MVT VT = MVT::getIntegerVT(NumBytes*8); 8786 if (!DC->isTypeLegal(VT)) 8787 return nullptr; 8788 8789 // Okay, we can do this! Replace the 'St' store with a store of IVal that is 8790 // shifted by ByteShift and truncated down to NumBytes. 8791 if (ByteShift) 8792 IVal = DAG.getNode(ISD::SRL, SDLoc(IVal), IVal.getValueType(), IVal, 8793 DAG.getConstant(ByteShift*8, 8794 DC->getShiftAmountTy(IVal.getValueType()))); 8795 8796 // Figure out the offset for the store and the alignment of the access. 8797 unsigned StOffset; 8798 unsigned NewAlign = St->getAlignment(); 8799 8800 if (DAG.getTargetLoweringInfo().isLittleEndian()) 8801 StOffset = ByteShift; 8802 else 8803 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 8804 8805 SDValue Ptr = St->getBasePtr(); 8806 if (StOffset) { 8807 Ptr = DAG.getNode(ISD::ADD, SDLoc(IVal), Ptr.getValueType(), 8808 Ptr, DAG.getConstant(StOffset, Ptr.getValueType())); 8809 NewAlign = MinAlign(NewAlign, StOffset); 8810 } 8811 8812 // Truncate down to the new size. 8813 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal); 8814 8815 ++OpsNarrowed; 8816 return DAG.getStore(St->getChain(), SDLoc(St), IVal, Ptr, 8817 St->getPointerInfo().getWithOffset(StOffset), 8818 false, false, NewAlign).getNode(); 8819 } 8820 8821 8822 /// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is 8823 /// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some 8824 /// of the loaded bits, try narrowing the load and store if it would end up 8825 /// being a win for performance or code size. 8826 SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 8827 StoreSDNode *ST = cast<StoreSDNode>(N); 8828 if (ST->isVolatile()) 8829 return SDValue(); 8830 8831 SDValue Chain = ST->getChain(); 8832 SDValue Value = ST->getValue(); 8833 SDValue Ptr = ST->getBasePtr(); 8834 EVT VT = Value.getValueType(); 8835 8836 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 8837 return SDValue(); 8838 8839 unsigned Opc = Value.getOpcode(); 8840 8841 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 8842 // is a byte mask indicating a consecutive number of bytes, check to see if 8843 // Y is known to provide just those bytes. If so, we try to replace the 8844 // load + replace + store sequence with a single (narrower) store, which makes 8845 // the load dead. 8846 if (Opc == ISD::OR) { 8847 std::pair<unsigned, unsigned> MaskedLoad; 8848 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 8849 if (MaskedLoad.first) 8850 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 8851 Value.getOperand(1), ST,this)) 8852 return SDValue(NewST, 0); 8853 8854 // Or is commutative, so try swapping X and Y. 8855 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 8856 if (MaskedLoad.first) 8857 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 8858 Value.getOperand(0), ST,this)) 8859 return SDValue(NewST, 0); 8860 } 8861 8862 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 8863 Value.getOperand(1).getOpcode() != ISD::Constant) 8864 return SDValue(); 8865 8866 SDValue N0 = Value.getOperand(0); 8867 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 8868 Chain == SDValue(N0.getNode(), 1)) { 8869 LoadSDNode *LD = cast<LoadSDNode>(N0); 8870 if (LD->getBasePtr() != Ptr || 8871 LD->getPointerInfo().getAddrSpace() != 8872 ST->getPointerInfo().getAddrSpace()) 8873 return SDValue(); 8874 8875 // Find the type to narrow it the load / op / store to. 8876 SDValue N1 = Value.getOperand(1); 8877 unsigned BitWidth = N1.getValueSizeInBits(); 8878 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 8879 if (Opc == ISD::AND) 8880 Imm ^= APInt::getAllOnesValue(BitWidth); 8881 if (Imm == 0 || Imm.isAllOnesValue()) 8882 return SDValue(); 8883 unsigned ShAmt = Imm.countTrailingZeros(); 8884 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 8885 unsigned NewBW = NextPowerOf2(MSB - ShAmt); 8886 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 8887 while (NewBW < BitWidth && 8888 !(TLI.isOperationLegalOrCustom(Opc, NewVT) && 8889 TLI.isNarrowingProfitable(VT, NewVT))) { 8890 NewBW = NextPowerOf2(NewBW); 8891 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 8892 } 8893 if (NewBW >= BitWidth) 8894 return SDValue(); 8895 8896 // If the lsb changed does not start at the type bitwidth boundary, 8897 // start at the previous one. 8898 if (ShAmt % NewBW) 8899 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 8900 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, 8901 std::min(BitWidth, ShAmt + NewBW)); 8902 if ((Imm & Mask) == Imm) { 8903 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 8904 if (Opc == ISD::AND) 8905 NewImm ^= APInt::getAllOnesValue(NewBW); 8906 uint64_t PtrOff = ShAmt / 8; 8907 // For big endian targets, we need to adjust the offset to the pointer to 8908 // load the correct bytes. 8909 if (TLI.isBigEndian()) 8910 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 8911 8912 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 8913 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 8914 if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy)) 8915 return SDValue(); 8916 8917 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD), 8918 Ptr.getValueType(), Ptr, 8919 DAG.getConstant(PtrOff, Ptr.getValueType())); 8920 SDValue NewLD = DAG.getLoad(NewVT, SDLoc(N0), 8921 LD->getChain(), NewPtr, 8922 LD->getPointerInfo().getWithOffset(PtrOff), 8923 LD->isVolatile(), LD->isNonTemporal(), 8924 LD->isInvariant(), NewAlign, 8925 LD->getAAInfo()); 8926 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD, 8927 DAG.getConstant(NewImm, NewVT)); 8928 SDValue NewST = DAG.getStore(Chain, SDLoc(N), 8929 NewVal, NewPtr, 8930 ST->getPointerInfo().getWithOffset(PtrOff), 8931 false, false, NewAlign); 8932 8933 AddToWorklist(NewPtr.getNode()); 8934 AddToWorklist(NewLD.getNode()); 8935 AddToWorklist(NewVal.getNode()); 8936 WorklistRemover DeadNodes(*this); 8937 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 8938 ++OpsNarrowed; 8939 return NewST; 8940 } 8941 } 8942 8943 return SDValue(); 8944 } 8945 8946 /// TransformFPLoadStorePair - For a given floating point load / store pair, 8947 /// if the load value isn't used by any other operations, then consider 8948 /// transforming the pair to integer load / store operations if the target 8949 /// deems the transformation profitable. 8950 SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 8951 StoreSDNode *ST = cast<StoreSDNode>(N); 8952 SDValue Chain = ST->getChain(); 8953 SDValue Value = ST->getValue(); 8954 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 8955 Value.hasOneUse() && 8956 Chain == SDValue(Value.getNode(), 1)) { 8957 LoadSDNode *LD = cast<LoadSDNode>(Value); 8958 EVT VT = LD->getMemoryVT(); 8959 if (!VT.isFloatingPoint() || 8960 VT != ST->getMemoryVT() || 8961 LD->isNonTemporal() || 8962 ST->isNonTemporal() || 8963 LD->getPointerInfo().getAddrSpace() != 0 || 8964 ST->getPointerInfo().getAddrSpace() != 0) 8965 return SDValue(); 8966 8967 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 8968 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 8969 !TLI.isOperationLegal(ISD::STORE, IntVT) || 8970 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 8971 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 8972 return SDValue(); 8973 8974 unsigned LDAlign = LD->getAlignment(); 8975 unsigned STAlign = ST->getAlignment(); 8976 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 8977 unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy); 8978 if (LDAlign < ABIAlign || STAlign < ABIAlign) 8979 return SDValue(); 8980 8981 SDValue NewLD = DAG.getLoad(IntVT, SDLoc(Value), 8982 LD->getChain(), LD->getBasePtr(), 8983 LD->getPointerInfo(), 8984 false, false, false, LDAlign); 8985 8986 SDValue NewST = DAG.getStore(NewLD.getValue(1), SDLoc(N), 8987 NewLD, ST->getBasePtr(), 8988 ST->getPointerInfo(), 8989 false, false, STAlign); 8990 8991 AddToWorklist(NewLD.getNode()); 8992 AddToWorklist(NewST.getNode()); 8993 WorklistRemover DeadNodes(*this); 8994 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 8995 ++LdStFP2Int; 8996 return NewST; 8997 } 8998 8999 return SDValue(); 9000 } 9001 9002 /// Helper struct to parse and store a memory address as base + index + offset. 9003 /// We ignore sign extensions when it is safe to do so. 9004 /// The following two expressions are not equivalent. To differentiate we need 9005 /// to store whether there was a sign extension involved in the index 9006 /// computation. 9007 /// (load (i64 add (i64 copyfromreg %c) 9008 /// (i64 signextend (add (i8 load %index) 9009 /// (i8 1)))) 9010 /// vs 9011 /// 9012 /// (load (i64 add (i64 copyfromreg %c) 9013 /// (i64 signextend (i32 add (i32 signextend (i8 load %index)) 9014 /// (i32 1))))) 9015 struct BaseIndexOffset { 9016 SDValue Base; 9017 SDValue Index; 9018 int64_t Offset; 9019 bool IsIndexSignExt; 9020 9021 BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} 9022 9023 BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, 9024 bool IsIndexSignExt) : 9025 Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} 9026 9027 bool equalBaseIndex(const BaseIndexOffset &Other) { 9028 return Other.Base == Base && Other.Index == Index && 9029 Other.IsIndexSignExt == IsIndexSignExt; 9030 } 9031 9032 /// Parses tree in Ptr for base, index, offset addresses. 9033 static BaseIndexOffset match(SDValue Ptr) { 9034 bool IsIndexSignExt = false; 9035 9036 // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD 9037 // instruction, then it could be just the BASE or everything else we don't 9038 // know how to handle. Just use Ptr as BASE and give up. 9039 if (Ptr->getOpcode() != ISD::ADD) 9040 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); 9041 9042 // We know that we have at least an ADD instruction. Try to pattern match 9043 // the simple case of BASE + OFFSET. 9044 if (isa<ConstantSDNode>(Ptr->getOperand(1))) { 9045 int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); 9046 return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset, 9047 IsIndexSignExt); 9048 } 9049 9050 // Inside a loop the current BASE pointer is calculated using an ADD and a 9051 // MUL instruction. In this case Ptr is the actual BASE pointer. 9052 // (i64 add (i64 %array_ptr) 9053 // (i64 mul (i64 %induction_var) 9054 // (i64 %element_size))) 9055 if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) 9056 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); 9057 9058 // Look at Base + Index + Offset cases. 9059 SDValue Base = Ptr->getOperand(0); 9060 SDValue IndexOffset = Ptr->getOperand(1); 9061 9062 // Skip signextends. 9063 if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { 9064 IndexOffset = IndexOffset->getOperand(0); 9065 IsIndexSignExt = true; 9066 } 9067 9068 // Either the case of Base + Index (no offset) or something else. 9069 if (IndexOffset->getOpcode() != ISD::ADD) 9070 return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt); 9071 9072 // Now we have the case of Base + Index + offset. 9073 SDValue Index = IndexOffset->getOperand(0); 9074 SDValue Offset = IndexOffset->getOperand(1); 9075 9076 if (!isa<ConstantSDNode>(Offset)) 9077 return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); 9078 9079 // Ignore signextends. 9080 if (Index->getOpcode() == ISD::SIGN_EXTEND) { 9081 Index = Index->getOperand(0); 9082 IsIndexSignExt = true; 9083 } else IsIndexSignExt = false; 9084 9085 int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); 9086 return BaseIndexOffset(Base, Index, Off, IsIndexSignExt); 9087 } 9088 }; 9089 9090 /// Holds a pointer to an LSBaseSDNode as well as information on where it 9091 /// is located in a sequence of memory operations connected by a chain. 9092 struct MemOpLink { 9093 MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): 9094 MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } 9095 // Ptr to the mem node. 9096 LSBaseSDNode *MemNode; 9097 // Offset from the base ptr. 9098 int64_t OffsetFromBase; 9099 // What is the sequence number of this mem node. 9100 // Lowest mem operand in the DAG starts at zero. 9101 unsigned SequenceNum; 9102 }; 9103 9104 bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { 9105 EVT MemVT = St->getMemoryVT(); 9106 int64_t ElementSizeBytes = MemVT.getSizeInBits()/8; 9107 bool NoVectors = DAG.getMachineFunction().getFunction()->getAttributes(). 9108 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 9109 9110 // Don't merge vectors into wider inputs. 9111 if (MemVT.isVector() || !MemVT.isSimple()) 9112 return false; 9113 9114 // Perform an early exit check. Do not bother looking at stored values that 9115 // are not constants or loads. 9116 SDValue StoredVal = St->getValue(); 9117 bool IsLoadSrc = isa<LoadSDNode>(StoredVal); 9118 if (!isa<ConstantSDNode>(StoredVal) && !isa<ConstantFPSDNode>(StoredVal) && 9119 !IsLoadSrc) 9120 return false; 9121 9122 // Only look at ends of store sequences. 9123 SDValue Chain = SDValue(St, 0); 9124 if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) 9125 return false; 9126 9127 // This holds the base pointer, index, and the offset in bytes from the base 9128 // pointer. 9129 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr()); 9130 9131 // We must have a base and an offset. 9132 if (!BasePtr.Base.getNode()) 9133 return false; 9134 9135 // Do not handle stores to undef base pointers. 9136 if (BasePtr.Base.getOpcode() == ISD::UNDEF) 9137 return false; 9138 9139 // Save the LoadSDNodes that we find in the chain. 9140 // We need to make sure that these nodes do not interfere with 9141 // any of the store nodes. 9142 SmallVector<LSBaseSDNode*, 8> AliasLoadNodes; 9143 9144 // Save the StoreSDNodes that we find in the chain. 9145 SmallVector<MemOpLink, 8> StoreNodes; 9146 9147 // Walk up the chain and look for nodes with offsets from the same 9148 // base pointer. Stop when reaching an instruction with a different kind 9149 // or instruction which has a different base pointer. 9150 unsigned Seq = 0; 9151 StoreSDNode *Index = St; 9152 while (Index) { 9153 // If the chain has more than one use, then we can't reorder the mem ops. 9154 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 9155 break; 9156 9157 // Find the base pointer and offset for this memory node. 9158 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr()); 9159 9160 // Check that the base pointer is the same as the original one. 9161 if (!Ptr.equalBaseIndex(BasePtr)) 9162 break; 9163 9164 // Check that the alignment is the same. 9165 if (Index->getAlignment() != St->getAlignment()) 9166 break; 9167 9168 // The memory operands must not be volatile. 9169 if (Index->isVolatile() || Index->isIndexed()) 9170 break; 9171 9172 // No truncation. 9173 if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index)) 9174 if (St->isTruncatingStore()) 9175 break; 9176 9177 // The stored memory type must be the same. 9178 if (Index->getMemoryVT() != MemVT) 9179 break; 9180 9181 // We do not allow unaligned stores because we want to prevent overriding 9182 // stores. 9183 if (Index->getAlignment()*8 != MemVT.getSizeInBits()) 9184 break; 9185 9186 // We found a potential memory operand to merge. 9187 StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); 9188 9189 // Find the next memory operand in the chain. If the next operand in the 9190 // chain is a store then move up and continue the scan with the next 9191 // memory operand. If the next operand is a load save it and use alias 9192 // information to check if it interferes with anything. 9193 SDNode *NextInChain = Index->getChain().getNode(); 9194 while (1) { 9195 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 9196 // We found a store node. Use it for the next iteration. 9197 Index = STn; 9198 break; 9199 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 9200 if (Ldn->isVolatile()) { 9201 Index = nullptr; 9202 break; 9203 } 9204 9205 // Save the load node for later. Continue the scan. 9206 AliasLoadNodes.push_back(Ldn); 9207 NextInChain = Ldn->getChain().getNode(); 9208 continue; 9209 } else { 9210 Index = nullptr; 9211 break; 9212 } 9213 } 9214 } 9215 9216 // Check if there is anything to merge. 9217 if (StoreNodes.size() < 2) 9218 return false; 9219 9220 // Sort the memory operands according to their distance from the base pointer. 9221 std::sort(StoreNodes.begin(), StoreNodes.end(), 9222 [](MemOpLink LHS, MemOpLink RHS) { 9223 return LHS.OffsetFromBase < RHS.OffsetFromBase || 9224 (LHS.OffsetFromBase == RHS.OffsetFromBase && 9225 LHS.SequenceNum > RHS.SequenceNum); 9226 }); 9227 9228 // Scan the memory operations on the chain and find the first non-consecutive 9229 // store memory address. 9230 unsigned LastConsecutiveStore = 0; 9231 int64_t StartAddress = StoreNodes[0].OffsetFromBase; 9232 for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { 9233 9234 // Check that the addresses are consecutive starting from the second 9235 // element in the list of stores. 9236 if (i > 0) { 9237 int64_t CurrAddress = StoreNodes[i].OffsetFromBase; 9238 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 9239 break; 9240 } 9241 9242 bool Alias = false; 9243 // Check if this store interferes with any of the loads that we found. 9244 for (unsigned ld = 0, lde = AliasLoadNodes.size(); ld < lde; ++ld) 9245 if (isAlias(AliasLoadNodes[ld], StoreNodes[i].MemNode)) { 9246 Alias = true; 9247 break; 9248 } 9249 // We found a load that alias with this store. Stop the sequence. 9250 if (Alias) 9251 break; 9252 9253 // Mark this node as useful. 9254 LastConsecutiveStore = i; 9255 } 9256 9257 // The node with the lowest store address. 9258 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 9259 9260 // Store the constants into memory as one consecutive store. 9261 if (!IsLoadSrc) { 9262 unsigned LastLegalType = 0; 9263 unsigned LastLegalVectorType = 0; 9264 bool NonZero = false; 9265 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 9266 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 9267 SDValue StoredVal = St->getValue(); 9268 9269 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) { 9270 NonZero |= !C->isNullValue(); 9271 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) { 9272 NonZero |= !C->getConstantFPValue()->isNullValue(); 9273 } else { 9274 // Non-constant. 9275 break; 9276 } 9277 9278 // Find a legal type for the constant store. 9279 unsigned StoreBW = (i+1) * ElementSizeBytes * 8; 9280 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 9281 if (TLI.isTypeLegal(StoreTy)) 9282 LastLegalType = i+1; 9283 // Or check whether a truncstore is legal. 9284 else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == 9285 TargetLowering::TypePromoteInteger) { 9286 EVT LegalizedStoredValueTy = 9287 TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType()); 9288 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy)) 9289 LastLegalType = i+1; 9290 } 9291 9292 // Find a legal type for the vector store. 9293 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); 9294 if (TLI.isTypeLegal(Ty)) 9295 LastLegalVectorType = i + 1; 9296 } 9297 9298 // We only use vectors if the constant is known to be zero and the 9299 // function is not marked with the noimplicitfloat attribute. 9300 if (NonZero || NoVectors) 9301 LastLegalVectorType = 0; 9302 9303 // Check if we found a legal integer type to store. 9304 if (LastLegalType == 0 && LastLegalVectorType == 0) 9305 return false; 9306 9307 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors; 9308 unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType; 9309 9310 // Make sure we have something to merge. 9311 if (NumElem < 2) 9312 return false; 9313 9314 unsigned EarliestNodeUsed = 0; 9315 for (unsigned i=0; i < NumElem; ++i) { 9316 // Find a chain for the new wide-store operand. Notice that some 9317 // of the store nodes that we found may not be selected for inclusion 9318 // in the wide store. The chain we use needs to be the chain of the 9319 // earliest store node which is *used* and replaced by the wide store. 9320 if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum) 9321 EarliestNodeUsed = i; 9322 } 9323 9324 // The earliest Node in the DAG. 9325 LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; 9326 SDLoc DL(StoreNodes[0].MemNode); 9327 9328 SDValue StoredVal; 9329 if (UseVector) { 9330 // Find a legal type for the vector store. 9331 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); 9332 assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); 9333 StoredVal = DAG.getConstant(0, Ty); 9334 } else { 9335 unsigned StoreBW = NumElem * ElementSizeBytes * 8; 9336 APInt StoreInt(StoreBW, 0); 9337 9338 // Construct a single integer constant which is made of the smaller 9339 // constant inputs. 9340 bool IsLE = TLI.isLittleEndian(); 9341 for (unsigned i = 0; i < NumElem ; ++i) { 9342 unsigned Idx = IsLE ?(NumElem - 1 - i) : i; 9343 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); 9344 SDValue Val = St->getValue(); 9345 StoreInt<<=ElementSizeBytes*8; 9346 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) { 9347 StoreInt|=C->getAPIntValue().zext(StoreBW); 9348 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) { 9349 StoreInt|= C->getValueAPF().bitcastToAPInt().zext(StoreBW); 9350 } else { 9351 assert(false && "Invalid constant element type"); 9352 } 9353 } 9354 9355 // Create the new Load and Store operations. 9356 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 9357 StoredVal = DAG.getConstant(StoreInt, StoreTy); 9358 } 9359 9360 SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal, 9361 FirstInChain->getBasePtr(), 9362 FirstInChain->getPointerInfo(), 9363 false, false, 9364 FirstInChain->getAlignment()); 9365 9366 // Replace the first store with the new store 9367 CombineTo(EarliestOp, NewStore); 9368 // Erase all other stores. 9369 for (unsigned i = 0; i < NumElem ; ++i) { 9370 if (StoreNodes[i].MemNode == EarliestOp) 9371 continue; 9372 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 9373 // ReplaceAllUsesWith will replace all uses that existed when it was 9374 // called, but graph optimizations may cause new ones to appear. For 9375 // example, the case in pr14333 looks like 9376 // 9377 // St's chain -> St -> another store -> X 9378 // 9379 // And the only difference from St to the other store is the chain. 9380 // When we change it's chain to be St's chain they become identical, 9381 // get CSEed and the net result is that X is now a use of St. 9382 // Since we know that St is redundant, just iterate. 9383 while (!St->use_empty()) 9384 DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); 9385 deleteAndRecombine(St); 9386 } 9387 9388 return true; 9389 } 9390 9391 // Below we handle the case of multiple consecutive stores that 9392 // come from multiple consecutive loads. We merge them into a single 9393 // wide load and a single wide store. 9394 9395 // Look for load nodes which are used by the stored values. 9396 SmallVector<MemOpLink, 8> LoadNodes; 9397 9398 // Find acceptable loads. Loads need to have the same chain (token factor), 9399 // must not be zext, volatile, indexed, and they must be consecutive. 9400 BaseIndexOffset LdBasePtr; 9401 for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 9402 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 9403 LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); 9404 if (!Ld) break; 9405 9406 // Loads must only have one use. 9407 if (!Ld->hasNUsesOfValue(1, 0)) 9408 break; 9409 9410 // Check that the alignment is the same as the stores. 9411 if (Ld->getAlignment() != St->getAlignment()) 9412 break; 9413 9414 // The memory operands must not be volatile. 9415 if (Ld->isVolatile() || Ld->isIndexed()) 9416 break; 9417 9418 // We do not accept ext loads. 9419 if (Ld->getExtensionType() != ISD::NON_EXTLOAD) 9420 break; 9421 9422 // The stored memory type must be the same. 9423 if (Ld->getMemoryVT() != MemVT) 9424 break; 9425 9426 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr()); 9427 // If this is not the first ptr that we check. 9428 if (LdBasePtr.Base.getNode()) { 9429 // The base ptr must be the same. 9430 if (!LdPtr.equalBaseIndex(LdBasePtr)) 9431 break; 9432 } else { 9433 // Check that all other base pointers are the same as this one. 9434 LdBasePtr = LdPtr; 9435 } 9436 9437 // We found a potential memory operand to merge. 9438 LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); 9439 } 9440 9441 if (LoadNodes.size() < 2) 9442 return false; 9443 9444 // If we have load/store pair instructions and we only have two values, 9445 // don't bother. 9446 unsigned RequiredAlignment; 9447 if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) && 9448 St->getAlignment() >= RequiredAlignment) 9449 return false; 9450 9451 // Scan the memory operations on the chain and find the first non-consecutive 9452 // load memory address. These variables hold the index in the store node 9453 // array. 9454 unsigned LastConsecutiveLoad = 0; 9455 // This variable refers to the size and not index in the array. 9456 unsigned LastLegalVectorType = 0; 9457 unsigned LastLegalIntegerType = 0; 9458 StartAddress = LoadNodes[0].OffsetFromBase; 9459 SDValue FirstChain = LoadNodes[0].MemNode->getChain(); 9460 for (unsigned i = 1; i < LoadNodes.size(); ++i) { 9461 // All loads much share the same chain. 9462 if (LoadNodes[i].MemNode->getChain() != FirstChain) 9463 break; 9464 9465 int64_t CurrAddress = LoadNodes[i].OffsetFromBase; 9466 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 9467 break; 9468 LastConsecutiveLoad = i; 9469 9470 // Find a legal type for the vector store. 9471 EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); 9472 if (TLI.isTypeLegal(StoreTy)) 9473 LastLegalVectorType = i + 1; 9474 9475 // Find a legal type for the integer store. 9476 unsigned StoreBW = (i+1) * ElementSizeBytes * 8; 9477 StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 9478 if (TLI.isTypeLegal(StoreTy)) 9479 LastLegalIntegerType = i + 1; 9480 // Or check whether a truncstore and extload is legal. 9481 else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == 9482 TargetLowering::TypePromoteInteger) { 9483 EVT LegalizedStoredValueTy = 9484 TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy); 9485 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 9486 TLI.isLoadExtLegal(ISD::ZEXTLOAD, StoreTy) && 9487 TLI.isLoadExtLegal(ISD::SEXTLOAD, StoreTy) && 9488 TLI.isLoadExtLegal(ISD::EXTLOAD, StoreTy)) 9489 LastLegalIntegerType = i+1; 9490 } 9491 } 9492 9493 // Only use vector types if the vector type is larger than the integer type. 9494 // If they are the same, use integers. 9495 bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors; 9496 unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); 9497 9498 // We add +1 here because the LastXXX variables refer to location while 9499 // the NumElem refers to array/index size. 9500 unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1; 9501 NumElem = std::min(LastLegalType, NumElem); 9502 9503 if (NumElem < 2) 9504 return false; 9505 9506 // The earliest Node in the DAG. 9507 unsigned EarliestNodeUsed = 0; 9508 LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; 9509 for (unsigned i=1; i<NumElem; ++i) { 9510 // Find a chain for the new wide-store operand. Notice that some 9511 // of the store nodes that we found may not be selected for inclusion 9512 // in the wide store. The chain we use needs to be the chain of the 9513 // earliest store node which is *used* and replaced by the wide store. 9514 if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum) 9515 EarliestNodeUsed = i; 9516 } 9517 9518 // Find if it is better to use vectors or integers to load and store 9519 // to memory. 9520 EVT JointMemOpVT; 9521 if (UseVectorTy) { 9522 JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); 9523 } else { 9524 unsigned StoreBW = NumElem * ElementSizeBytes * 8; 9525 JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 9526 } 9527 9528 SDLoc LoadDL(LoadNodes[0].MemNode); 9529 SDLoc StoreDL(StoreNodes[0].MemNode); 9530 9531 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode); 9532 SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, 9533 FirstLoad->getChain(), 9534 FirstLoad->getBasePtr(), 9535 FirstLoad->getPointerInfo(), 9536 false, false, false, 9537 FirstLoad->getAlignment()); 9538 9539 SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad, 9540 FirstInChain->getBasePtr(), 9541 FirstInChain->getPointerInfo(), false, false, 9542 FirstInChain->getAlignment()); 9543 9544 // Replace one of the loads with the new load. 9545 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode); 9546 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), 9547 SDValue(NewLoad.getNode(), 1)); 9548 9549 // Remove the rest of the load chains. 9550 for (unsigned i = 1; i < NumElem ; ++i) { 9551 // Replace all chain users of the old load nodes with the chain of the new 9552 // load node. 9553 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode); 9554 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain()); 9555 } 9556 9557 // Replace the first store with the new store. 9558 CombineTo(EarliestOp, NewStore); 9559 // Erase all other stores. 9560 for (unsigned i = 0; i < NumElem ; ++i) { 9561 // Remove all Store nodes. 9562 if (StoreNodes[i].MemNode == EarliestOp) 9563 continue; 9564 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 9565 DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); 9566 deleteAndRecombine(St); 9567 } 9568 9569 return true; 9570 } 9571 9572 SDValue DAGCombiner::visitSTORE(SDNode *N) { 9573 StoreSDNode *ST = cast<StoreSDNode>(N); 9574 SDValue Chain = ST->getChain(); 9575 SDValue Value = ST->getValue(); 9576 SDValue Ptr = ST->getBasePtr(); 9577 9578 // If this is a store of a bit convert, store the input value if the 9579 // resultant store does not need a higher alignment than the original. 9580 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 9581 ST->isUnindexed()) { 9582 unsigned OrigAlign = ST->getAlignment(); 9583 EVT SVT = Value.getOperand(0).getValueType(); 9584 unsigned Align = TLI.getDataLayout()-> 9585 getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); 9586 if (Align <= OrigAlign && 9587 ((!LegalOperations && !ST->isVolatile()) || 9588 TLI.isOperationLegalOrCustom(ISD::STORE, SVT))) 9589 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), 9590 Ptr, ST->getPointerInfo(), ST->isVolatile(), 9591 ST->isNonTemporal(), OrigAlign, 9592 ST->getAAInfo()); 9593 } 9594 9595 // Turn 'store undef, Ptr' -> nothing. 9596 if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed()) 9597 return Chain; 9598 9599 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 9600 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) { 9601 // NOTE: If the original store is volatile, this transform must not increase 9602 // the number of stores. For example, on x86-32 an f64 can be stored in one 9603 // processor operation but an i64 (which is not legal) requires two. So the 9604 // transform should not be done in this case. 9605 if (Value.getOpcode() != ISD::TargetConstantFP) { 9606 SDValue Tmp; 9607 switch (CFP->getSimpleValueType(0).SimpleTy) { 9608 default: llvm_unreachable("Unknown FP type"); 9609 case MVT::f16: // We don't do this for these yet. 9610 case MVT::f80: 9611 case MVT::f128: 9612 case MVT::ppcf128: 9613 break; 9614 case MVT::f32: 9615 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 9616 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 9617 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 9618 bitcastToAPInt().getZExtValue(), MVT::i32); 9619 return DAG.getStore(Chain, SDLoc(N), Tmp, 9620 Ptr, ST->getMemOperand()); 9621 } 9622 break; 9623 case MVT::f64: 9624 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 9625 !ST->isVolatile()) || 9626 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 9627 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 9628 getZExtValue(), MVT::i64); 9629 return DAG.getStore(Chain, SDLoc(N), Tmp, 9630 Ptr, ST->getMemOperand()); 9631 } 9632 9633 if (!ST->isVolatile() && 9634 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 9635 // Many FP stores are not made apparent until after legalize, e.g. for 9636 // argument passing. Since this is so common, custom legalize the 9637 // 64-bit integer store into two 32-bit stores. 9638 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 9639 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32); 9640 SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32); 9641 if (TLI.isBigEndian()) std::swap(Lo, Hi); 9642 9643 unsigned Alignment = ST->getAlignment(); 9644 bool isVolatile = ST->isVolatile(); 9645 bool isNonTemporal = ST->isNonTemporal(); 9646 AAMDNodes AAInfo = ST->getAAInfo(); 9647 9648 SDValue St0 = DAG.getStore(Chain, SDLoc(ST), Lo, 9649 Ptr, ST->getPointerInfo(), 9650 isVolatile, isNonTemporal, 9651 ST->getAlignment(), AAInfo); 9652 Ptr = DAG.getNode(ISD::ADD, SDLoc(N), Ptr.getValueType(), Ptr, 9653 DAG.getConstant(4, Ptr.getValueType())); 9654 Alignment = MinAlign(Alignment, 4U); 9655 SDValue St1 = DAG.getStore(Chain, SDLoc(ST), Hi, 9656 Ptr, ST->getPointerInfo().getWithOffset(4), 9657 isVolatile, isNonTemporal, 9658 Alignment, AAInfo); 9659 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, 9660 St0, St1); 9661 } 9662 9663 break; 9664 } 9665 } 9666 } 9667 9668 // Try to infer better alignment information than the store already has. 9669 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 9670 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 9671 if (Align > ST->getAlignment()) 9672 return DAG.getTruncStore(Chain, SDLoc(N), Value, 9673 Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 9674 ST->isVolatile(), ST->isNonTemporal(), Align, 9675 ST->getAAInfo()); 9676 } 9677 } 9678 9679 // Try transforming a pair floating point load / store ops to integer 9680 // load / store ops. 9681 SDValue NewST = TransformFPLoadStorePair(N); 9682 if (NewST.getNode()) 9683 return NewST; 9684 9685 bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA : 9686 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA(); 9687 #ifndef NDEBUG 9688 if (CombinerAAOnlyFunc.getNumOccurrences() && 9689 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 9690 UseAA = false; 9691 #endif 9692 if (UseAA && ST->isUnindexed()) { 9693 // Walk up chain skipping non-aliasing memory nodes. 9694 SDValue BetterChain = FindBetterChain(N, Chain); 9695 9696 // If there is a better chain. 9697 if (Chain != BetterChain) { 9698 SDValue ReplStore; 9699 9700 // Replace the chain to avoid dependency. 9701 if (ST->isTruncatingStore()) { 9702 ReplStore = DAG.getTruncStore(BetterChain, SDLoc(N), Value, Ptr, 9703 ST->getMemoryVT(), ST->getMemOperand()); 9704 } else { 9705 ReplStore = DAG.getStore(BetterChain, SDLoc(N), Value, Ptr, 9706 ST->getMemOperand()); 9707 } 9708 9709 // Create token to keep both nodes around. 9710 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N), 9711 MVT::Other, Chain, ReplStore); 9712 9713 // Make sure the new and old chains are cleaned up. 9714 AddToWorklist(Token.getNode()); 9715 9716 // Don't add users to work list. 9717 return CombineTo(N, Token, false); 9718 } 9719 } 9720 9721 // Try transforming N to an indexed store. 9722 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 9723 return SDValue(N, 0); 9724 9725 // FIXME: is there such a thing as a truncating indexed store? 9726 if (ST->isTruncatingStore() && ST->isUnindexed() && 9727 Value.getValueType().isInteger()) { 9728 // See if we can simplify the input to this truncstore with knowledge that 9729 // only the low bits are being used. For example: 9730 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 9731 SDValue Shorter = 9732 GetDemandedBits(Value, 9733 APInt::getLowBitsSet( 9734 Value.getValueType().getScalarType().getSizeInBits(), 9735 ST->getMemoryVT().getScalarType().getSizeInBits())); 9736 AddToWorklist(Value.getNode()); 9737 if (Shorter.getNode()) 9738 return DAG.getTruncStore(Chain, SDLoc(N), Shorter, 9739 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 9740 9741 // Otherwise, see if we can simplify the operation with 9742 // SimplifyDemandedBits, which only works if the value has a single use. 9743 if (SimplifyDemandedBits(Value, 9744 APInt::getLowBitsSet( 9745 Value.getValueType().getScalarType().getSizeInBits(), 9746 ST->getMemoryVT().getScalarType().getSizeInBits()))) 9747 return SDValue(N, 0); 9748 } 9749 9750 // If this is a load followed by a store to the same location, then the store 9751 // is dead/noop. 9752 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 9753 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 9754 ST->isUnindexed() && !ST->isVolatile() && 9755 // There can't be any side effects between the load and store, such as 9756 // a call or store. 9757 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 9758 // The store is dead, remove it. 9759 return Chain; 9760 } 9761 } 9762 9763 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 9764 // truncating store. We can do this even if this is already a truncstore. 9765 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 9766 && Value.getNode()->hasOneUse() && ST->isUnindexed() && 9767 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 9768 ST->getMemoryVT())) { 9769 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), 9770 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 9771 } 9772 9773 // Only perform this optimization before the types are legal, because we 9774 // don't want to perform this optimization on every DAGCombine invocation. 9775 if (!LegalTypes) { 9776 bool EverChanged = false; 9777 9778 do { 9779 // There can be multiple store sequences on the same chain. 9780 // Keep trying to merge store sequences until we are unable to do so 9781 // or until we merge the last store on the chain. 9782 bool Changed = MergeConsecutiveStores(ST); 9783 EverChanged |= Changed; 9784 if (!Changed) break; 9785 } while (ST->getOpcode() != ISD::DELETED_NODE); 9786 9787 if (EverChanged) 9788 return SDValue(N, 0); 9789 } 9790 9791 return ReduceLoadOpStoreWidth(N); 9792 } 9793 9794 SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 9795 SDValue InVec = N->getOperand(0); 9796 SDValue InVal = N->getOperand(1); 9797 SDValue EltNo = N->getOperand(2); 9798 SDLoc dl(N); 9799 9800 // If the inserted element is an UNDEF, just use the input vector. 9801 if (InVal.getOpcode() == ISD::UNDEF) 9802 return InVec; 9803 9804 EVT VT = InVec.getValueType(); 9805 9806 // If we can't generate a legal BUILD_VECTOR, exit 9807 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 9808 return SDValue(); 9809 9810 // Check that we know which element is being inserted 9811 if (!isa<ConstantSDNode>(EltNo)) 9812 return SDValue(); 9813 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 9814 9815 // Canonicalize insert_vector_elt dag nodes. 9816 // Example: 9817 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1) 9818 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0) 9819 // 9820 // Do this only if the child insert_vector node has one use; also 9821 // do this only if indices are both constants and Idx1 < Idx0. 9822 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse() 9823 && isa<ConstantSDNode>(InVec.getOperand(2))) { 9824 unsigned OtherElt = 9825 cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue(); 9826 if (Elt < OtherElt) { 9827 // Swap nodes. 9828 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VT, 9829 InVec.getOperand(0), InVal, EltNo); 9830 AddToWorklist(NewOp.getNode()); 9831 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()), 9832 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2)); 9833 } 9834 } 9835 9836 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 9837 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 9838 // vector elements. 9839 SmallVector<SDValue, 8> Ops; 9840 // Do not combine these two vectors if the output vector will not replace 9841 // the input vector. 9842 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) { 9843 Ops.append(InVec.getNode()->op_begin(), 9844 InVec.getNode()->op_end()); 9845 } else if (InVec.getOpcode() == ISD::UNDEF) { 9846 unsigned NElts = VT.getVectorNumElements(); 9847 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 9848 } else { 9849 return SDValue(); 9850 } 9851 9852 // Insert the element 9853 if (Elt < Ops.size()) { 9854 // All the operands of BUILD_VECTOR must have the same type; 9855 // we enforce that here. 9856 EVT OpVT = Ops[0].getValueType(); 9857 if (InVal.getValueType() != OpVT) 9858 InVal = OpVT.bitsGT(InVal.getValueType()) ? 9859 DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) : 9860 DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal); 9861 Ops[Elt] = InVal; 9862 } 9863 9864 // Return the new vector 9865 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 9866 } 9867 9868 SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 9869 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) { 9870 EVT ResultVT = EVE->getValueType(0); 9871 EVT VecEltVT = InVecVT.getVectorElementType(); 9872 unsigned Align = OriginalLoad->getAlignment(); 9873 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment( 9874 VecEltVT.getTypeForEVT(*DAG.getContext())); 9875 9876 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) 9877 return SDValue(); 9878 9879 Align = NewAlign; 9880 9881 SDValue NewPtr = OriginalLoad->getBasePtr(); 9882 SDValue Offset; 9883 EVT PtrType = NewPtr.getValueType(); 9884 MachinePointerInfo MPI; 9885 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) { 9886 int Elt = ConstEltNo->getZExtValue(); 9887 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8; 9888 if (TLI.isBigEndian()) 9889 PtrOff = InVecVT.getSizeInBits() / 8 - PtrOff; 9890 Offset = DAG.getConstant(PtrOff, PtrType); 9891 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff); 9892 } else { 9893 Offset = DAG.getNode( 9894 ISD::MUL, SDLoc(EVE), EltNo.getValueType(), EltNo, 9895 DAG.getConstant(VecEltVT.getStoreSize(), EltNo.getValueType())); 9896 if (TLI.isBigEndian()) 9897 Offset = DAG.getNode( 9898 ISD::SUB, SDLoc(EVE), EltNo.getValueType(), 9899 DAG.getConstant(InVecVT.getStoreSize(), EltNo.getValueType()), Offset); 9900 MPI = OriginalLoad->getPointerInfo(); 9901 } 9902 NewPtr = DAG.getNode(ISD::ADD, SDLoc(EVE), PtrType, NewPtr, Offset); 9903 9904 // The replacement we need to do here is a little tricky: we need to 9905 // replace an extractelement of a load with a load. 9906 // Use ReplaceAllUsesOfValuesWith to do the replacement. 9907 // Note that this replacement assumes that the extractvalue is the only 9908 // use of the load; that's okay because we don't want to perform this 9909 // transformation in other cases anyway. 9910 SDValue Load; 9911 SDValue Chain; 9912 if (ResultVT.bitsGT(VecEltVT)) { 9913 // If the result type of vextract is wider than the load, then issue an 9914 // extending load instead. 9915 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, VecEltVT) 9916 ? ISD::ZEXTLOAD 9917 : ISD::EXTLOAD; 9918 Load = DAG.getExtLoad( 9919 ExtType, SDLoc(EVE), ResultVT, OriginalLoad->getChain(), NewPtr, MPI, 9920 VecEltVT, OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(), 9921 OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo()); 9922 Chain = Load.getValue(1); 9923 } else { 9924 Load = DAG.getLoad( 9925 VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, MPI, 9926 OriginalLoad->isVolatile(), OriginalLoad->isNonTemporal(), 9927 OriginalLoad->isInvariant(), Align, OriginalLoad->getAAInfo()); 9928 Chain = Load.getValue(1); 9929 if (ResultVT.bitsLT(VecEltVT)) 9930 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load); 9931 else 9932 Load = DAG.getNode(ISD::BITCAST, SDLoc(EVE), ResultVT, Load); 9933 } 9934 WorklistRemover DeadNodes(*this); 9935 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) }; 9936 SDValue To[] = { Load, Chain }; 9937 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 9938 // Since we're explicitly calling ReplaceAllUses, add the new node to the 9939 // worklist explicitly as well. 9940 AddToWorklist(Load.getNode()); 9941 AddUsersToWorklist(Load.getNode()); // Add users too 9942 // Make sure to revisit this node to clean it up; it will usually be dead. 9943 AddToWorklist(EVE); 9944 ++OpsNarrowed; 9945 return SDValue(EVE, 0); 9946 } 9947 9948 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 9949 // (vextract (scalar_to_vector val, 0) -> val 9950 SDValue InVec = N->getOperand(0); 9951 EVT VT = InVec.getValueType(); 9952 EVT NVT = N->getValueType(0); 9953 9954 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 9955 // Check if the result type doesn't match the inserted element type. A 9956 // SCALAR_TO_VECTOR may truncate the inserted element and the 9957 // EXTRACT_VECTOR_ELT may widen the extracted vector. 9958 SDValue InOp = InVec.getOperand(0); 9959 if (InOp.getValueType() != NVT) { 9960 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 9961 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT); 9962 } 9963 return InOp; 9964 } 9965 9966 SDValue EltNo = N->getOperand(1); 9967 bool ConstEltNo = isa<ConstantSDNode>(EltNo); 9968 9969 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 9970 // We only perform this optimization before the op legalization phase because 9971 // we may introduce new vector instructions which are not backed by TD 9972 // patterns. For example on AVX, extracting elements from a wide vector 9973 // without using extract_subvector. However, if we can find an underlying 9974 // scalar value, then we can always use that. 9975 if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE 9976 && ConstEltNo) { 9977 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 9978 int NumElem = VT.getVectorNumElements(); 9979 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 9980 // Find the new index to extract from. 9981 int OrigElt = SVOp->getMaskElt(Elt); 9982 9983 // Extracting an undef index is undef. 9984 if (OrigElt == -1) 9985 return DAG.getUNDEF(NVT); 9986 9987 // Select the right vector half to extract from. 9988 SDValue SVInVec; 9989 if (OrigElt < NumElem) { 9990 SVInVec = InVec->getOperand(0); 9991 } else { 9992 SVInVec = InVec->getOperand(1); 9993 OrigElt -= NumElem; 9994 } 9995 9996 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) { 9997 SDValue InOp = SVInVec.getOperand(OrigElt); 9998 if (InOp.getValueType() != NVT) { 9999 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 10000 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT); 10001 } 10002 10003 return InOp; 10004 } 10005 10006 // FIXME: We should handle recursing on other vector shuffles and 10007 // scalar_to_vector here as well. 10008 10009 if (!LegalOperations) { 10010 EVT IndexTy = TLI.getVectorIdxTy(); 10011 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, 10012 SVInVec, DAG.getConstant(OrigElt, IndexTy)); 10013 } 10014 } 10015 10016 bool BCNumEltsChanged = false; 10017 EVT ExtVT = VT.getVectorElementType(); 10018 EVT LVT = ExtVT; 10019 10020 // If the result of load has to be truncated, then it's not necessarily 10021 // profitable. 10022 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 10023 return SDValue(); 10024 10025 if (InVec.getOpcode() == ISD::BITCAST) { 10026 // Don't duplicate a load with other uses. 10027 if (!InVec.hasOneUse()) 10028 return SDValue(); 10029 10030 EVT BCVT = InVec.getOperand(0).getValueType(); 10031 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 10032 return SDValue(); 10033 if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 10034 BCNumEltsChanged = true; 10035 InVec = InVec.getOperand(0); 10036 ExtVT = BCVT.getVectorElementType(); 10037 } 10038 10039 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size) 10040 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() && 10041 ISD::isNormalLoad(InVec.getNode()) && 10042 !N->getOperand(1)->hasPredecessor(InVec.getNode())) { 10043 SDValue Index = N->getOperand(1); 10044 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec)) 10045 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index, 10046 OrigLoad); 10047 } 10048 10049 // Perform only after legalization to ensure build_vector / vector_shuffle 10050 // optimizations have already been done. 10051 if (!LegalOperations) return SDValue(); 10052 10053 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 10054 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 10055 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 10056 10057 if (ConstEltNo) { 10058 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 10059 10060 LoadSDNode *LN0 = nullptr; 10061 const ShuffleVectorSDNode *SVN = nullptr; 10062 if (ISD::isNormalLoad(InVec.getNode())) { 10063 LN0 = cast<LoadSDNode>(InVec); 10064 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 10065 InVec.getOperand(0).getValueType() == ExtVT && 10066 ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 10067 // Don't duplicate a load with other uses. 10068 if (!InVec.hasOneUse()) 10069 return SDValue(); 10070 10071 LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 10072 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 10073 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 10074 // => 10075 // (load $addr+1*size) 10076 10077 // Don't duplicate a load with other uses. 10078 if (!InVec.hasOneUse()) 10079 return SDValue(); 10080 10081 // If the bit convert changed the number of elements, it is unsafe 10082 // to examine the mask. 10083 if (BCNumEltsChanged) 10084 return SDValue(); 10085 10086 // Select the input vector, guarding against out of range extract vector. 10087 unsigned NumElems = VT.getVectorNumElements(); 10088 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 10089 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 10090 10091 if (InVec.getOpcode() == ISD::BITCAST) { 10092 // Don't duplicate a load with other uses. 10093 if (!InVec.hasOneUse()) 10094 return SDValue(); 10095 10096 InVec = InVec.getOperand(0); 10097 } 10098 if (ISD::isNormalLoad(InVec.getNode())) { 10099 LN0 = cast<LoadSDNode>(InVec); 10100 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 10101 EltNo = DAG.getConstant(Elt, EltNo.getValueType()); 10102 } 10103 } 10104 10105 // Make sure we found a non-volatile load and the extractelement is 10106 // the only use. 10107 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 10108 return SDValue(); 10109 10110 // If Idx was -1 above, Elt is going to be -1, so just return undef. 10111 if (Elt == -1) 10112 return DAG.getUNDEF(LVT); 10113 10114 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0); 10115 } 10116 10117 return SDValue(); 10118 } 10119 10120 // Simplify (build_vec (ext )) to (bitcast (build_vec )) 10121 SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { 10122 // We perform this optimization post type-legalization because 10123 // the type-legalizer often scalarizes integer-promoted vectors. 10124 // Performing this optimization before may create bit-casts which 10125 // will be type-legalized to complex code sequences. 10126 // We perform this optimization only before the operation legalizer because we 10127 // may introduce illegal operations. 10128 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) 10129 return SDValue(); 10130 10131 unsigned NumInScalars = N->getNumOperands(); 10132 SDLoc dl(N); 10133 EVT VT = N->getValueType(0); 10134 10135 // Check to see if this is a BUILD_VECTOR of a bunch of values 10136 // which come from any_extend or zero_extend nodes. If so, we can create 10137 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 10138 // optimizations. We do not handle sign-extend because we can't fill the sign 10139 // using shuffles. 10140 EVT SourceType = MVT::Other; 10141 bool AllAnyExt = true; 10142 10143 for (unsigned i = 0; i != NumInScalars; ++i) { 10144 SDValue In = N->getOperand(i); 10145 // Ignore undef inputs. 10146 if (In.getOpcode() == ISD::UNDEF) continue; 10147 10148 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 10149 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 10150 10151 // Abort if the element is not an extension. 10152 if (!ZeroExt && !AnyExt) { 10153 SourceType = MVT::Other; 10154 break; 10155 } 10156 10157 // The input is a ZeroExt or AnyExt. Check the original type. 10158 EVT InTy = In.getOperand(0).getValueType(); 10159 10160 // Check that all of the widened source types are the same. 10161 if (SourceType == MVT::Other) 10162 // First time. 10163 SourceType = InTy; 10164 else if (InTy != SourceType) { 10165 // Multiple income types. Abort. 10166 SourceType = MVT::Other; 10167 break; 10168 } 10169 10170 // Check if all of the extends are ANY_EXTENDs. 10171 AllAnyExt &= AnyExt; 10172 } 10173 10174 // In order to have valid types, all of the inputs must be extended from the 10175 // same source type and all of the inputs must be any or zero extend. 10176 // Scalar sizes must be a power of two. 10177 EVT OutScalarTy = VT.getScalarType(); 10178 bool ValidTypes = SourceType != MVT::Other && 10179 isPowerOf2_32(OutScalarTy.getSizeInBits()) && 10180 isPowerOf2_32(SourceType.getSizeInBits()); 10181 10182 // Create a new simpler BUILD_VECTOR sequence which other optimizations can 10183 // turn into a single shuffle instruction. 10184 if (!ValidTypes) 10185 return SDValue(); 10186 10187 bool isLE = TLI.isLittleEndian(); 10188 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 10189 assert(ElemRatio > 1 && "Invalid element size ratio"); 10190 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 10191 DAG.getConstant(0, SourceType); 10192 10193 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); 10194 SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 10195 10196 // Populate the new build_vector 10197 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 10198 SDValue Cast = N->getOperand(i); 10199 assert((Cast.getOpcode() == ISD::ANY_EXTEND || 10200 Cast.getOpcode() == ISD::ZERO_EXTEND || 10201 Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); 10202 SDValue In; 10203 if (Cast.getOpcode() == ISD::UNDEF) 10204 In = DAG.getUNDEF(SourceType); 10205 else 10206 In = Cast->getOperand(0); 10207 unsigned Index = isLE ? (i * ElemRatio) : 10208 (i * ElemRatio + (ElemRatio - 1)); 10209 10210 assert(Index < Ops.size() && "Invalid index"); 10211 Ops[Index] = In; 10212 } 10213 10214 // The type of the new BUILD_VECTOR node. 10215 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 10216 assert(VecVT.getSizeInBits() == VT.getSizeInBits() && 10217 "Invalid vector size"); 10218 // Check if the new vector type is legal. 10219 if (!isTypeLegal(VecVT)) return SDValue(); 10220 10221 // Make the new BUILD_VECTOR. 10222 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops); 10223 10224 // The new BUILD_VECTOR node has the potential to be further optimized. 10225 AddToWorklist(BV.getNode()); 10226 // Bitcast to the desired type. 10227 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 10228 } 10229 10230 SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { 10231 EVT VT = N->getValueType(0); 10232 10233 unsigned NumInScalars = N->getNumOperands(); 10234 SDLoc dl(N); 10235 10236 EVT SrcVT = MVT::Other; 10237 unsigned Opcode = ISD::DELETED_NODE; 10238 unsigned NumDefs = 0; 10239 10240 for (unsigned i = 0; i != NumInScalars; ++i) { 10241 SDValue In = N->getOperand(i); 10242 unsigned Opc = In.getOpcode(); 10243 10244 if (Opc == ISD::UNDEF) 10245 continue; 10246 10247 // If all scalar values are floats and converted from integers. 10248 if (Opcode == ISD::DELETED_NODE && 10249 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { 10250 Opcode = Opc; 10251 } 10252 10253 if (Opc != Opcode) 10254 return SDValue(); 10255 10256 EVT InVT = In.getOperand(0).getValueType(); 10257 10258 // If all scalar values are typed differently, bail out. It's chosen to 10259 // simplify BUILD_VECTOR of integer types. 10260 if (SrcVT == MVT::Other) 10261 SrcVT = InVT; 10262 if (SrcVT != InVT) 10263 return SDValue(); 10264 NumDefs++; 10265 } 10266 10267 // If the vector has just one element defined, it's not worth to fold it into 10268 // a vectorized one. 10269 if (NumDefs < 2) 10270 return SDValue(); 10271 10272 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) 10273 && "Should only handle conversion from integer to float."); 10274 assert(SrcVT != MVT::Other && "Cannot determine source type!"); 10275 10276 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); 10277 10278 if (!TLI.isOperationLegalOrCustom(Opcode, NVT)) 10279 return SDValue(); 10280 10281 SmallVector<SDValue, 8> Opnds; 10282 for (unsigned i = 0; i != NumInScalars; ++i) { 10283 SDValue In = N->getOperand(i); 10284 10285 if (In.getOpcode() == ISD::UNDEF) 10286 Opnds.push_back(DAG.getUNDEF(SrcVT)); 10287 else 10288 Opnds.push_back(In.getOperand(0)); 10289 } 10290 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Opnds); 10291 AddToWorklist(BV.getNode()); 10292 10293 return DAG.getNode(Opcode, dl, VT, BV); 10294 } 10295 10296 SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 10297 unsigned NumInScalars = N->getNumOperands(); 10298 SDLoc dl(N); 10299 EVT VT = N->getValueType(0); 10300 10301 // A vector built entirely of undefs is undef. 10302 if (ISD::allOperandsUndef(N)) 10303 return DAG.getUNDEF(VT); 10304 10305 SDValue V = reduceBuildVecExtToExtBuildVec(N); 10306 if (V.getNode()) 10307 return V; 10308 10309 V = reduceBuildVecConvertToConvertBuildVec(N); 10310 if (V.getNode()) 10311 return V; 10312 10313 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 10314 // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from 10315 // at most two distinct vectors, turn this into a shuffle node. 10316 10317 // May only combine to shuffle after legalize if shuffle is legal. 10318 if (LegalOperations && 10319 !TLI.isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT)) 10320 return SDValue(); 10321 10322 SDValue VecIn1, VecIn2; 10323 for (unsigned i = 0; i != NumInScalars; ++i) { 10324 // Ignore undef inputs. 10325 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 10326 10327 // If this input is something other than a EXTRACT_VECTOR_ELT with a 10328 // constant index, bail out. 10329 if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 10330 !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) { 10331 VecIn1 = VecIn2 = SDValue(nullptr, 0); 10332 break; 10333 } 10334 10335 // We allow up to two distinct input vectors. 10336 SDValue ExtractedFromVec = N->getOperand(i).getOperand(0); 10337 if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2) 10338 continue; 10339 10340 if (!VecIn1.getNode()) { 10341 VecIn1 = ExtractedFromVec; 10342 } else if (!VecIn2.getNode()) { 10343 VecIn2 = ExtractedFromVec; 10344 } else { 10345 // Too many inputs. 10346 VecIn1 = VecIn2 = SDValue(nullptr, 0); 10347 break; 10348 } 10349 } 10350 10351 // If everything is good, we can make a shuffle operation. 10352 if (VecIn1.getNode()) { 10353 SmallVector<int, 8> Mask; 10354 for (unsigned i = 0; i != NumInScalars; ++i) { 10355 if (N->getOperand(i).getOpcode() == ISD::UNDEF) { 10356 Mask.push_back(-1); 10357 continue; 10358 } 10359 10360 // If extracting from the first vector, just use the index directly. 10361 SDValue Extract = N->getOperand(i); 10362 SDValue ExtVal = Extract.getOperand(1); 10363 if (Extract.getOperand(0) == VecIn1) { 10364 unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 10365 if (ExtIndex > VT.getVectorNumElements()) 10366 return SDValue(); 10367 10368 Mask.push_back(ExtIndex); 10369 continue; 10370 } 10371 10372 // Otherwise, use InIdx + VecSize 10373 unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 10374 Mask.push_back(Idx+NumInScalars); 10375 } 10376 10377 // We can't generate a shuffle node with mismatched input and output types. 10378 // Attempt to transform a single input vector to the correct type. 10379 if ((VT != VecIn1.getValueType())) { 10380 // We don't support shuffeling between TWO values of different types. 10381 if (VecIn2.getNode()) 10382 return SDValue(); 10383 10384 // We only support widening of vectors which are half the size of the 10385 // output registers. For example XMM->YMM widening on X86 with AVX. 10386 if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits()) 10387 return SDValue(); 10388 10389 // If the input vector type has a different base type to the output 10390 // vector type, bail out. 10391 if (VecIn1.getValueType().getVectorElementType() != 10392 VT.getVectorElementType()) 10393 return SDValue(); 10394 10395 // Widen the input vector by adding undef values. 10396 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10397 VecIn1, DAG.getUNDEF(VecIn1.getValueType())); 10398 } 10399 10400 // If VecIn2 is unused then change it to undef. 10401 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 10402 10403 // Check that we were able to transform all incoming values to the same 10404 // type. 10405 if (VecIn2.getValueType() != VecIn1.getValueType() || 10406 VecIn1.getValueType() != VT) 10407 return SDValue(); 10408 10409 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 10410 if (!isTypeLegal(VT)) 10411 return SDValue(); 10412 10413 // Return the new VECTOR_SHUFFLE node. 10414 SDValue Ops[2]; 10415 Ops[0] = VecIn1; 10416 Ops[1] = VecIn2; 10417 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]); 10418 } 10419 10420 return SDValue(); 10421 } 10422 10423 SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 10424 // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of 10425 // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector 10426 // inputs come from at most two distinct vectors, turn this into a shuffle 10427 // node. 10428 10429 // If we only have one input vector, we don't need to do any concatenation. 10430 if (N->getNumOperands() == 1) 10431 return N->getOperand(0); 10432 10433 // Check if all of the operands are undefs. 10434 EVT VT = N->getValueType(0); 10435 if (ISD::allOperandsUndef(N)) 10436 return DAG.getUNDEF(VT); 10437 10438 // Optimize concat_vectors where one of the vectors is undef. 10439 if (N->getNumOperands() == 2 && 10440 N->getOperand(1)->getOpcode() == ISD::UNDEF) { 10441 SDValue In = N->getOperand(0); 10442 assert(In.getValueType().isVector() && "Must concat vectors"); 10443 10444 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr). 10445 if (In->getOpcode() == ISD::BITCAST && 10446 !In->getOperand(0)->getValueType(0).isVector()) { 10447 SDValue Scalar = In->getOperand(0); 10448 EVT SclTy = Scalar->getValueType(0); 10449 10450 if (!SclTy.isFloatingPoint() && !SclTy.isInteger()) 10451 return SDValue(); 10452 10453 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, 10454 VT.getSizeInBits() / SclTy.getSizeInBits()); 10455 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType())) 10456 return SDValue(); 10457 10458 SDLoc dl = SDLoc(N); 10459 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar); 10460 return DAG.getNode(ISD::BITCAST, dl, VT, Res); 10461 } 10462 } 10463 10464 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...)) 10465 // -> (BUILD_VECTOR A, B, ..., C, D, ...) 10466 if (N->getNumOperands() == 2 && 10467 N->getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 10468 N->getOperand(1).getOpcode() == ISD::BUILD_VECTOR) { 10469 EVT VT = N->getValueType(0); 10470 SDValue N0 = N->getOperand(0); 10471 SDValue N1 = N->getOperand(1); 10472 SmallVector<SDValue, 8> Opnds; 10473 unsigned BuildVecNumElts = N0.getNumOperands(); 10474 10475 EVT SclTy0 = N0.getOperand(0)->getValueType(0); 10476 EVT SclTy1 = N1.getOperand(0)->getValueType(0); 10477 if (SclTy0.isFloatingPoint()) { 10478 for (unsigned i = 0; i != BuildVecNumElts; ++i) 10479 Opnds.push_back(N0.getOperand(i)); 10480 for (unsigned i = 0; i != BuildVecNumElts; ++i) 10481 Opnds.push_back(N1.getOperand(i)); 10482 } else { 10483 // If BUILD_VECTOR are from built from integer, they may have different 10484 // operand types. Get the smaller type and truncate all operands to it. 10485 EVT MinTy = SclTy0.bitsLE(SclTy1) ? SclTy0 : SclTy1; 10486 for (unsigned i = 0; i != BuildVecNumElts; ++i) 10487 Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy, 10488 N0.getOperand(i))); 10489 for (unsigned i = 0; i != BuildVecNumElts; ++i) 10490 Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy, 10491 N1.getOperand(i))); 10492 } 10493 10494 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds); 10495 } 10496 10497 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR 10498 // nodes often generate nop CONCAT_VECTOR nodes. 10499 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that 10500 // place the incoming vectors at the exact same location. 10501 SDValue SingleSource = SDValue(); 10502 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); 10503 10504 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 10505 SDValue Op = N->getOperand(i); 10506 10507 if (Op.getOpcode() == ISD::UNDEF) 10508 continue; 10509 10510 // Check if this is the identity extract: 10511 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 10512 return SDValue(); 10513 10514 // Find the single incoming vector for the extract_subvector. 10515 if (SingleSource.getNode()) { 10516 if (Op.getOperand(0) != SingleSource) 10517 return SDValue(); 10518 } else { 10519 SingleSource = Op.getOperand(0); 10520 10521 // Check the source type is the same as the type of the result. 10522 // If not, this concat may extend the vector, so we can not 10523 // optimize it away. 10524 if (SingleSource.getValueType() != N->getValueType(0)) 10525 return SDValue(); 10526 } 10527 10528 unsigned IdentityIndex = i * PartNumElem; 10529 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 10530 // The extract index must be constant. 10531 if (!CS) 10532 return SDValue(); 10533 10534 // Check that we are reading from the identity index. 10535 if (CS->getZExtValue() != IdentityIndex) 10536 return SDValue(); 10537 } 10538 10539 if (SingleSource.getNode()) 10540 return SingleSource; 10541 10542 return SDValue(); 10543 } 10544 10545 SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 10546 EVT NVT = N->getValueType(0); 10547 SDValue V = N->getOperand(0); 10548 10549 if (V->getOpcode() == ISD::CONCAT_VECTORS) { 10550 // Combine: 10551 // (extract_subvec (concat V1, V2, ...), i) 10552 // Into: 10553 // Vi if possible 10554 // Only operand 0 is checked as 'concat' assumes all inputs of the same 10555 // type. 10556 if (V->getOperand(0).getValueType() != NVT) 10557 return SDValue(); 10558 unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 10559 unsigned NumElems = NVT.getVectorNumElements(); 10560 assert((Idx % NumElems) == 0 && 10561 "IDX in concat is not a multiple of the result vector length."); 10562 return V->getOperand(Idx / NumElems); 10563 } 10564 10565 // Skip bitcasting 10566 if (V->getOpcode() == ISD::BITCAST) 10567 V = V.getOperand(0); 10568 10569 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 10570 SDLoc dl(N); 10571 // Handle only simple case where vector being inserted and vector 10572 // being extracted are of same type, and are half size of larger vectors. 10573 EVT BigVT = V->getOperand(0).getValueType(); 10574 EVT SmallVT = V->getOperand(1).getValueType(); 10575 if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) 10576 return SDValue(); 10577 10578 // Only handle cases where both indexes are constants with the same type. 10579 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10580 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 10581 10582 if (InsIdx && ExtIdx && 10583 InsIdx->getValueType(0).getSizeInBits() <= 64 && 10584 ExtIdx->getValueType(0).getSizeInBits() <= 64) { 10585 // Combine: 10586 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 10587 // Into: 10588 // indices are equal or bit offsets are equal => V1 10589 // otherwise => (extract_subvec V1, ExtIdx) 10590 if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() == 10591 ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits()) 10592 return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1)); 10593 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, 10594 DAG.getNode(ISD::BITCAST, dl, 10595 N->getOperand(0).getValueType(), 10596 V->getOperand(0)), N->getOperand(1)); 10597 } 10598 } 10599 10600 return SDValue(); 10601 } 10602 10603 // Tries to turn a shuffle of two CONCAT_VECTORS into a single concat. 10604 static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { 10605 EVT VT = N->getValueType(0); 10606 unsigned NumElts = VT.getVectorNumElements(); 10607 10608 SDValue N0 = N->getOperand(0); 10609 SDValue N1 = N->getOperand(1); 10610 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 10611 10612 SmallVector<SDValue, 4> Ops; 10613 EVT ConcatVT = N0.getOperand(0).getValueType(); 10614 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); 10615 unsigned NumConcats = NumElts / NumElemsPerConcat; 10616 10617 // Look at every vector that's inserted. We're looking for exact 10618 // subvector-sized copies from a concatenated vector 10619 for (unsigned I = 0; I != NumConcats; ++I) { 10620 // Make sure we're dealing with a copy. 10621 unsigned Begin = I * NumElemsPerConcat; 10622 bool AllUndef = true, NoUndef = true; 10623 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) { 10624 if (SVN->getMaskElt(J) >= 0) 10625 AllUndef = false; 10626 else 10627 NoUndef = false; 10628 } 10629 10630 if (NoUndef) { 10631 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) 10632 return SDValue(); 10633 10634 for (unsigned J = 1; J != NumElemsPerConcat; ++J) 10635 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) 10636 return SDValue(); 10637 10638 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; 10639 if (FirstElt < N0.getNumOperands()) 10640 Ops.push_back(N0.getOperand(FirstElt)); 10641 else 10642 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); 10643 10644 } else if (AllUndef) { 10645 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType())); 10646 } else { // Mixed with general masks and undefs, can't do optimization. 10647 return SDValue(); 10648 } 10649 } 10650 10651 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops); 10652 } 10653 10654 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 10655 EVT VT = N->getValueType(0); 10656 unsigned NumElts = VT.getVectorNumElements(); 10657 10658 SDValue N0 = N->getOperand(0); 10659 SDValue N1 = N->getOperand(1); 10660 10661 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 10662 10663 // Canonicalize shuffle undef, undef -> undef 10664 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 10665 return DAG.getUNDEF(VT); 10666 10667 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 10668 10669 // Canonicalize shuffle v, v -> v, undef 10670 if (N0 == N1) { 10671 SmallVector<int, 8> NewMask; 10672 for (unsigned i = 0; i != NumElts; ++i) { 10673 int Idx = SVN->getMaskElt(i); 10674 if (Idx >= (int)NumElts) Idx -= NumElts; 10675 NewMask.push_back(Idx); 10676 } 10677 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), 10678 &NewMask[0]); 10679 } 10680 10681 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 10682 if (N0.getOpcode() == ISD::UNDEF) { 10683 SmallVector<int, 8> NewMask; 10684 for (unsigned i = 0; i != NumElts; ++i) { 10685 int Idx = SVN->getMaskElt(i); 10686 if (Idx >= 0) { 10687 if (Idx >= (int)NumElts) 10688 Idx -= NumElts; 10689 else 10690 Idx = -1; // remove reference to lhs 10691 } 10692 NewMask.push_back(Idx); 10693 } 10694 return DAG.getVectorShuffle(VT, SDLoc(N), N1, DAG.getUNDEF(VT), 10695 &NewMask[0]); 10696 } 10697 10698 // Remove references to rhs if it is undef 10699 if (N1.getOpcode() == ISD::UNDEF) { 10700 bool Changed = false; 10701 SmallVector<int, 8> NewMask; 10702 for (unsigned i = 0; i != NumElts; ++i) { 10703 int Idx = SVN->getMaskElt(i); 10704 if (Idx >= (int)NumElts) { 10705 Idx = -1; 10706 Changed = true; 10707 } 10708 NewMask.push_back(Idx); 10709 } 10710 if (Changed) 10711 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, &NewMask[0]); 10712 } 10713 10714 // If it is a splat, check if the argument vector is another splat or a 10715 // build_vector with all scalar elements the same. 10716 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 10717 SDNode *V = N0.getNode(); 10718 10719 // If this is a bit convert that changes the element type of the vector but 10720 // not the number of vector elements, look through it. Be careful not to 10721 // look though conversions that change things like v4f32 to v2f64. 10722 if (V->getOpcode() == ISD::BITCAST) { 10723 SDValue ConvInput = V->getOperand(0); 10724 if (ConvInput.getValueType().isVector() && 10725 ConvInput.getValueType().getVectorNumElements() == NumElts) 10726 V = ConvInput.getNode(); 10727 } 10728 10729 if (V->getOpcode() == ISD::BUILD_VECTOR) { 10730 assert(V->getNumOperands() == NumElts && 10731 "BUILD_VECTOR has wrong number of operands"); 10732 SDValue Base; 10733 bool AllSame = true; 10734 for (unsigned i = 0; i != NumElts; ++i) { 10735 if (V->getOperand(i).getOpcode() != ISD::UNDEF) { 10736 Base = V->getOperand(i); 10737 break; 10738 } 10739 } 10740 // Splat of <u, u, u, u>, return <u, u, u, u> 10741 if (!Base.getNode()) 10742 return N0; 10743 for (unsigned i = 0; i != NumElts; ++i) { 10744 if (V->getOperand(i) != Base) { 10745 AllSame = false; 10746 break; 10747 } 10748 } 10749 // Splat of <x, x, x, x>, return <x, x, x, x> 10750 if (AllSame) 10751 return N0; 10752 } 10753 } 10754 10755 if (N0.getOpcode() == ISD::CONCAT_VECTORS && 10756 Level < AfterLegalizeVectorOps && 10757 (N1.getOpcode() == ISD::UNDEF || 10758 (N1.getOpcode() == ISD::CONCAT_VECTORS && 10759 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { 10760 SDValue V = partitionShuffleOfConcats(N, DAG); 10761 10762 if (V.getNode()) 10763 return V; 10764 } 10765 10766 // If this shuffle node is simply a swizzle of another shuffle node, 10767 // then try to simplify it. 10768 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 10769 N1.getOpcode() == ISD::UNDEF) { 10770 10771 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 10772 10773 // The incoming shuffle must be of the same type as the result of the 10774 // current shuffle. 10775 assert(OtherSV->getOperand(0).getValueType() == VT && 10776 "Shuffle types don't match"); 10777 10778 SmallVector<int, 4> Mask; 10779 // Compute the combined shuffle mask. 10780 for (unsigned i = 0; i != NumElts; ++i) { 10781 int Idx = SVN->getMaskElt(i); 10782 assert(Idx < (int)NumElts && "Index references undef operand"); 10783 // Next, this index comes from the first value, which is the incoming 10784 // shuffle. Adopt the incoming index. 10785 if (Idx >= 0) 10786 Idx = OtherSV->getMaskElt(Idx); 10787 Mask.push_back(Idx); 10788 } 10789 10790 bool CommuteOperands = false; 10791 if (N0.getOperand(1).getOpcode() != ISD::UNDEF) { 10792 // To be valid, the combine shuffle mask should only reference elements 10793 // from one of the two vectors in input to the inner shufflevector. 10794 bool IsValidMask = true; 10795 for (unsigned i = 0; i != NumElts && IsValidMask; ++i) 10796 // See if the combined mask only reference undefs or elements coming 10797 // from the first shufflevector operand. 10798 IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] < NumElts; 10799 10800 if (!IsValidMask) { 10801 IsValidMask = true; 10802 for (unsigned i = 0; i != NumElts && IsValidMask; ++i) 10803 // Check that all the elements come from the second shuffle operand. 10804 IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] >= NumElts; 10805 CommuteOperands = IsValidMask; 10806 } 10807 10808 // Early exit if the combined shuffle mask is not valid. 10809 if (!IsValidMask) 10810 return SDValue(); 10811 } 10812 10813 // See if this pair of shuffles can be safely folded according to either 10814 // of the following rules: 10815 // shuffle(shuffle(x, y), undef) -> x 10816 // shuffle(shuffle(x, undef), undef) -> x 10817 // shuffle(shuffle(x, y), undef) -> y 10818 bool IsIdentityMask = true; 10819 unsigned BaseMaskIndex = CommuteOperands ? NumElts : 0; 10820 for (unsigned i = 0; i != NumElts && IsIdentityMask; ++i) { 10821 // Skip Undefs. 10822 if (Mask[i] < 0) 10823 continue; 10824 10825 // The combined shuffle must map each index to itself. 10826 IsIdentityMask = (unsigned)Mask[i] == i + BaseMaskIndex; 10827 } 10828 10829 if (IsIdentityMask) { 10830 if (CommuteOperands) 10831 // optimize shuffle(shuffle(x, y), undef) -> y. 10832 return OtherSV->getOperand(1); 10833 10834 // optimize shuffle(shuffle(x, undef), undef) -> x 10835 // optimize shuffle(shuffle(x, y), undef) -> x 10836 return OtherSV->getOperand(0); 10837 } 10838 10839 // It may still be beneficial to combine the two shuffles if the 10840 // resulting shuffle is legal. 10841 if (TLI.isTypeLegal(VT) && TLI.isShuffleMaskLegal(Mask, VT)) { 10842 if (!CommuteOperands) 10843 // shuffle(shuffle(x, undef, M1), undef, M2) -> shuffle(x, undef, M3). 10844 // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(x, undef, M3) 10845 return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0), N1, 10846 &Mask[0]); 10847 10848 // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(undef, y, M3) 10849 return DAG.getVectorShuffle(VT, SDLoc(N), N1, N0->getOperand(1), 10850 &Mask[0]); 10851 } 10852 } 10853 10854 // Canonicalize shuffles according to rules: 10855 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A) 10856 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B) 10857 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B) 10858 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && N0.getOpcode() != ISD::UNDEF && 10859 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 10860 TLI.isTypeLegal(VT)) { 10861 // The incoming shuffle must be of the same type as the result of the 10862 // current shuffle. 10863 assert(N1->getOperand(0).getValueType() == VT && 10864 "Shuffle types don't match"); 10865 10866 SDValue SV0 = N1->getOperand(0); 10867 SDValue SV1 = N1->getOperand(1); 10868 bool HasSameOp0 = N0 == SV0; 10869 bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF; 10870 if (HasSameOp0 || IsSV1Undef || N0 == SV1) 10871 // Commute the operands of this shuffle so that next rule 10872 // will trigger. 10873 return DAG.getCommutedVectorShuffle(*SVN); 10874 } 10875 10876 // Try to fold according to rules: 10877 // shuffle(shuffle(A, B, M0), B, M1) -> shuffle(A, B, M2) 10878 // shuffle(shuffle(A, B, M0), A, M1) -> shuffle(A, B, M2) 10879 // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2) 10880 // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2) 10881 // Don't try to fold shuffles with illegal type. 10882 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 10883 N1.getOpcode() != ISD::UNDEF && TLI.isTypeLegal(VT)) { 10884 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 10885 10886 // The incoming shuffle must be of the same type as the result of the 10887 // current shuffle. 10888 assert(OtherSV->getOperand(0).getValueType() == VT && 10889 "Shuffle types don't match"); 10890 10891 SDValue SV0 = OtherSV->getOperand(0); 10892 SDValue SV1 = OtherSV->getOperand(1); 10893 bool HasSameOp0 = N1 == SV0; 10894 bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF; 10895 if (!HasSameOp0 && !IsSV1Undef && N1 != SV1) 10896 // Early exit. 10897 return SDValue(); 10898 10899 SmallVector<int, 4> Mask; 10900 // Compute the combined shuffle mask for a shuffle with SV0 as the first 10901 // operand, and SV1 as the second operand. 10902 for (unsigned i = 0; i != NumElts; ++i) { 10903 int Idx = SVN->getMaskElt(i); 10904 if (Idx < 0) { 10905 // Propagate Undef. 10906 Mask.push_back(Idx); 10907 continue; 10908 } 10909 10910 if (Idx < (int)NumElts) { 10911 Idx = OtherSV->getMaskElt(Idx); 10912 if (IsSV1Undef && Idx >= (int) NumElts) 10913 Idx = -1; // Propagate Undef. 10914 } else 10915 Idx = HasSameOp0 ? Idx - NumElts : Idx; 10916 10917 Mask.push_back(Idx); 10918 } 10919 10920 // Avoid introducing shuffles with illegal mask. 10921 if (TLI.isShuffleMaskLegal(Mask, VT)) { 10922 if (IsSV1Undef) 10923 // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2) 10924 // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2) 10925 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, N1, &Mask[0]); 10926 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, &Mask[0]); 10927 } 10928 } 10929 10930 return SDValue(); 10931 } 10932 10933 SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) { 10934 SDValue N0 = N->getOperand(0); 10935 SDValue N2 = N->getOperand(2); 10936 10937 // If the input vector is a concatenation, and the insert replaces 10938 // one of the halves, we can optimize into a single concat_vectors. 10939 if (N0.getOpcode() == ISD::CONCAT_VECTORS && 10940 N0->getNumOperands() == 2 && N2.getOpcode() == ISD::Constant) { 10941 APInt InsIdx = cast<ConstantSDNode>(N2)->getAPIntValue(); 10942 EVT VT = N->getValueType(0); 10943 10944 // Lower half: fold (insert_subvector (concat_vectors X, Y), Z) -> 10945 // (concat_vectors Z, Y) 10946 if (InsIdx == 0) 10947 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 10948 N->getOperand(1), N0.getOperand(1)); 10949 10950 // Upper half: fold (insert_subvector (concat_vectors X, Y), Z) -> 10951 // (concat_vectors X, Z) 10952 if (InsIdx == VT.getVectorNumElements()/2) 10953 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 10954 N0.getOperand(0), N->getOperand(1)); 10955 } 10956 10957 return SDValue(); 10958 } 10959 10960 /// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform 10961 /// an AND to a vector_shuffle with the destination vector and a zero vector. 10962 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 10963 /// vector_shuffle V, Zero, <0, 4, 2, 4> 10964 SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 10965 EVT VT = N->getValueType(0); 10966 SDLoc dl(N); 10967 SDValue LHS = N->getOperand(0); 10968 SDValue RHS = N->getOperand(1); 10969 if (N->getOpcode() == ISD::AND) { 10970 if (RHS.getOpcode() == ISD::BITCAST) 10971 RHS = RHS.getOperand(0); 10972 if (RHS.getOpcode() == ISD::BUILD_VECTOR) { 10973 SmallVector<int, 8> Indices; 10974 unsigned NumElts = RHS.getNumOperands(); 10975 for (unsigned i = 0; i != NumElts; ++i) { 10976 SDValue Elt = RHS.getOperand(i); 10977 if (!isa<ConstantSDNode>(Elt)) 10978 return SDValue(); 10979 10980 if (cast<ConstantSDNode>(Elt)->isAllOnesValue()) 10981 Indices.push_back(i); 10982 else if (cast<ConstantSDNode>(Elt)->isNullValue()) 10983 Indices.push_back(NumElts); 10984 else 10985 return SDValue(); 10986 } 10987 10988 // Let's see if the target supports this vector_shuffle. 10989 EVT RVT = RHS.getValueType(); 10990 if (!TLI.isVectorClearMaskLegal(Indices, RVT)) 10991 return SDValue(); 10992 10993 // Return the new VECTOR_SHUFFLE node. 10994 EVT EltVT = RVT.getVectorElementType(); 10995 SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(), 10996 DAG.getConstant(0, EltVT)); 10997 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), RVT, ZeroOps); 10998 LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS); 10999 SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]); 11000 return DAG.getNode(ISD::BITCAST, dl, VT, Shuf); 11001 } 11002 } 11003 11004 return SDValue(); 11005 } 11006 11007 /// SimplifyVBinOp - Visit a binary vector operation, like ADD. 11008 SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 11009 assert(N->getValueType(0).isVector() && 11010 "SimplifyVBinOp only works on vectors!"); 11011 11012 SDValue LHS = N->getOperand(0); 11013 SDValue RHS = N->getOperand(1); 11014 SDValue Shuffle = XformToShuffleWithZero(N); 11015 if (Shuffle.getNode()) return Shuffle; 11016 11017 // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold 11018 // this operation. 11019 if (LHS.getOpcode() == ISD::BUILD_VECTOR && 11020 RHS.getOpcode() == ISD::BUILD_VECTOR) { 11021 // Check if both vectors are constants. If not bail out. 11022 if (!(cast<BuildVectorSDNode>(LHS)->isConstant() && 11023 cast<BuildVectorSDNode>(RHS)->isConstant())) 11024 return SDValue(); 11025 11026 SmallVector<SDValue, 8> Ops; 11027 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 11028 SDValue LHSOp = LHS.getOperand(i); 11029 SDValue RHSOp = RHS.getOperand(i); 11030 11031 // Can't fold divide by zero. 11032 if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV || 11033 N->getOpcode() == ISD::FDIV) { 11034 if ((RHSOp.getOpcode() == ISD::Constant && 11035 cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) || 11036 (RHSOp.getOpcode() == ISD::ConstantFP && 11037 cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero())) 11038 break; 11039 } 11040 11041 EVT VT = LHSOp.getValueType(); 11042 EVT RVT = RHSOp.getValueType(); 11043 if (RVT != VT) { 11044 // Integer BUILD_VECTOR operands may have types larger than the element 11045 // size (e.g., when the element type is not legal). Prior to type 11046 // legalization, the types may not match between the two BUILD_VECTORS. 11047 // Truncate one of the operands to make them match. 11048 if (RVT.getSizeInBits() > VT.getSizeInBits()) { 11049 RHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, RHSOp); 11050 } else { 11051 LHSOp = DAG.getNode(ISD::TRUNCATE, SDLoc(N), RVT, LHSOp); 11052 VT = RVT; 11053 } 11054 } 11055 SDValue FoldOp = DAG.getNode(N->getOpcode(), SDLoc(LHS), VT, 11056 LHSOp, RHSOp); 11057 if (FoldOp.getOpcode() != ISD::UNDEF && 11058 FoldOp.getOpcode() != ISD::Constant && 11059 FoldOp.getOpcode() != ISD::ConstantFP) 11060 break; 11061 Ops.push_back(FoldOp); 11062 AddToWorklist(FoldOp.getNode()); 11063 } 11064 11065 if (Ops.size() == LHS.getNumOperands()) 11066 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), LHS.getValueType(), Ops); 11067 } 11068 11069 // Type legalization might introduce new shuffles in the DAG. 11070 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask))) 11071 // -> (shuffle (VBinOp (A, B)), Undef, Mask). 11072 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) && 11073 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && 11074 LHS.getOperand(1).getOpcode() == ISD::UNDEF && 11075 RHS.getOperand(1).getOpcode() == ISD::UNDEF) { 11076 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); 11077 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); 11078 11079 if (SVN0->getMask().equals(SVN1->getMask())) { 11080 EVT VT = N->getValueType(0); 11081 SDValue UndefVector = LHS.getOperand(1); 11082 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 11083 LHS.getOperand(0), RHS.getOperand(0)); 11084 AddUsersToWorklist(N); 11085 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector, 11086 &SVN0->getMask()[0]); 11087 } 11088 } 11089 11090 return SDValue(); 11091 } 11092 11093 /// SimplifyVUnaryOp - Visit a binary vector operation, like FABS/FNEG. 11094 SDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) { 11095 assert(N->getValueType(0).isVector() && 11096 "SimplifyVUnaryOp only works on vectors!"); 11097 11098 SDValue N0 = N->getOperand(0); 11099 11100 if (N0.getOpcode() != ISD::BUILD_VECTOR) 11101 return SDValue(); 11102 11103 // Operand is a BUILD_VECTOR node, see if we can constant fold it. 11104 SmallVector<SDValue, 8> Ops; 11105 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 11106 SDValue Op = N0.getOperand(i); 11107 if (Op.getOpcode() != ISD::UNDEF && 11108 Op.getOpcode() != ISD::ConstantFP) 11109 break; 11110 EVT EltVT = Op.getValueType(); 11111 SDValue FoldOp = DAG.getNode(N->getOpcode(), SDLoc(N0), EltVT, Op); 11112 if (FoldOp.getOpcode() != ISD::UNDEF && 11113 FoldOp.getOpcode() != ISD::ConstantFP) 11114 break; 11115 Ops.push_back(FoldOp); 11116 AddToWorklist(FoldOp.getNode()); 11117 } 11118 11119 if (Ops.size() != N0.getNumOperands()) 11120 return SDValue(); 11121 11122 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N0.getValueType(), Ops); 11123 } 11124 11125 SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0, 11126 SDValue N1, SDValue N2){ 11127 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 11128 11129 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 11130 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 11131 11132 // If we got a simplified select_cc node back from SimplifySelectCC, then 11133 // break it down into a new SETCC node, and a new SELECT node, and then return 11134 // the SELECT node, since we were called with a SELECT node. 11135 if (SCC.getNode()) { 11136 // Check to see if we got a select_cc back (to turn into setcc/select). 11137 // Otherwise, just return whatever node we got back, like fabs. 11138 if (SCC.getOpcode() == ISD::SELECT_CC) { 11139 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0), 11140 N0.getValueType(), 11141 SCC.getOperand(0), SCC.getOperand(1), 11142 SCC.getOperand(4)); 11143 AddToWorklist(SETCC.getNode()); 11144 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC, 11145 SCC.getOperand(2), SCC.getOperand(3)); 11146 } 11147 11148 return SCC; 11149 } 11150 return SDValue(); 11151 } 11152 11153 /// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS 11154 /// are the two values being selected between, see if we can simplify the 11155 /// select. Callers of this should assume that TheSelect is deleted if this 11156 /// returns true. As such, they should return the appropriate thing (e.g. the 11157 /// node) back to the top-level of the DAG combiner loop to avoid it being 11158 /// looked at. 11159 bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 11160 SDValue RHS) { 11161 11162 // Cannot simplify select with vector condition 11163 if (TheSelect->getOperand(0).getValueType().isVector()) return false; 11164 11165 // If this is a select from two identical things, try to pull the operation 11166 // through the select. 11167 if (LHS.getOpcode() != RHS.getOpcode() || 11168 !LHS.hasOneUse() || !RHS.hasOneUse()) 11169 return false; 11170 11171 // If this is a load and the token chain is identical, replace the select 11172 // of two loads with a load through a select of the address to load from. 11173 // This triggers in things like "select bool X, 10.0, 123.0" after the FP 11174 // constants have been dropped into the constant pool. 11175 if (LHS.getOpcode() == ISD::LOAD) { 11176 LoadSDNode *LLD = cast<LoadSDNode>(LHS); 11177 LoadSDNode *RLD = cast<LoadSDNode>(RHS); 11178 11179 // Token chains must be identical. 11180 if (LHS.getOperand(0) != RHS.getOperand(0) || 11181 // Do not let this transformation reduce the number of volatile loads. 11182 LLD->isVolatile() || RLD->isVolatile() || 11183 // If this is an EXTLOAD, the VT's must match. 11184 LLD->getMemoryVT() != RLD->getMemoryVT() || 11185 // If this is an EXTLOAD, the kind of extension must match. 11186 (LLD->getExtensionType() != RLD->getExtensionType() && 11187 // The only exception is if one of the extensions is anyext. 11188 LLD->getExtensionType() != ISD::EXTLOAD && 11189 RLD->getExtensionType() != ISD::EXTLOAD) || 11190 // FIXME: this discards src value information. This is 11191 // over-conservative. It would be beneficial to be able to remember 11192 // both potential memory locations. Since we are discarding 11193 // src value info, don't do the transformation if the memory 11194 // locations are not in the default address space. 11195 LLD->getPointerInfo().getAddrSpace() != 0 || 11196 RLD->getPointerInfo().getAddrSpace() != 0 || 11197 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), 11198 LLD->getBasePtr().getValueType())) 11199 return false; 11200 11201 // Check that the select condition doesn't reach either load. If so, 11202 // folding this will induce a cycle into the DAG. If not, this is safe to 11203 // xform, so create a select of the addresses. 11204 SDValue Addr; 11205 if (TheSelect->getOpcode() == ISD::SELECT) { 11206 SDNode *CondNode = TheSelect->getOperand(0).getNode(); 11207 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 11208 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 11209 return false; 11210 // The loads must not depend on one another. 11211 if (LLD->isPredecessorOf(RLD) || 11212 RLD->isPredecessorOf(LLD)) 11213 return false; 11214 Addr = DAG.getSelect(SDLoc(TheSelect), 11215 LLD->getBasePtr().getValueType(), 11216 TheSelect->getOperand(0), LLD->getBasePtr(), 11217 RLD->getBasePtr()); 11218 } else { // Otherwise SELECT_CC 11219 SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 11220 SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 11221 11222 if ((LLD->hasAnyUseOfValue(1) && 11223 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 11224 (RLD->hasAnyUseOfValue(1) && 11225 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 11226 return false; 11227 11228 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect), 11229 LLD->getBasePtr().getValueType(), 11230 TheSelect->getOperand(0), 11231 TheSelect->getOperand(1), 11232 LLD->getBasePtr(), RLD->getBasePtr(), 11233 TheSelect->getOperand(4)); 11234 } 11235 11236 SDValue Load; 11237 // It is safe to replace the two loads if they have different alignments, 11238 // but the new load must be the minimum (most restrictive) alignment of the 11239 // inputs. 11240 bool isInvariant = LLD->getAlignment() & RLD->getAlignment(); 11241 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment()); 11242 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 11243 Load = DAG.getLoad(TheSelect->getValueType(0), 11244 SDLoc(TheSelect), 11245 // FIXME: Discards pointer and AA info. 11246 LLD->getChain(), Addr, MachinePointerInfo(), 11247 LLD->isVolatile(), LLD->isNonTemporal(), 11248 isInvariant, Alignment); 11249 } else { 11250 Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ? 11251 RLD->getExtensionType() : LLD->getExtensionType(), 11252 SDLoc(TheSelect), 11253 TheSelect->getValueType(0), 11254 // FIXME: Discards pointer and AA info. 11255 LLD->getChain(), Addr, MachinePointerInfo(), 11256 LLD->getMemoryVT(), LLD->isVolatile(), 11257 LLD->isNonTemporal(), isInvariant, Alignment); 11258 } 11259 11260 // Users of the select now use the result of the load. 11261 CombineTo(TheSelect, Load); 11262 11263 // Users of the old loads now use the new load's chain. We know the 11264 // old-load value is dead now. 11265 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 11266 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 11267 return true; 11268 } 11269 11270 return false; 11271 } 11272 11273 /// SimplifySelectCC - Simplify an expression of the form (N0 cond N1) ? N2 : N3 11274 /// where 'cond' is the comparison specified by CC. 11275 SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1, 11276 SDValue N2, SDValue N3, 11277 ISD::CondCode CC, bool NotExtCompare) { 11278 // (x ? y : y) -> y. 11279 if (N2 == N3) return N2; 11280 11281 EVT VT = N2.getValueType(); 11282 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 11283 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 11284 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode()); 11285 11286 // Determine if the condition we're dealing with is constant 11287 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), 11288 N0, N1, CC, DL, false); 11289 if (SCC.getNode()) AddToWorklist(SCC.getNode()); 11290 ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode()); 11291 11292 // fold select_cc true, x, y -> x 11293 if (SCCC && !SCCC->isNullValue()) 11294 return N2; 11295 // fold select_cc false, x, y -> y 11296 if (SCCC && SCCC->isNullValue()) 11297 return N3; 11298 11299 // Check to see if we can simplify the select into an fabs node 11300 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 11301 // Allow either -0.0 or 0.0 11302 if (CFP->getValueAPF().isZero()) { 11303 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 11304 if ((CC == ISD::SETGE || CC == ISD::SETGT) && 11305 N0 == N2 && N3.getOpcode() == ISD::FNEG && 11306 N2 == N3.getOperand(0)) 11307 return DAG.getNode(ISD::FABS, DL, VT, N0); 11308 11309 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 11310 if ((CC == ISD::SETLT || CC == ISD::SETLE) && 11311 N0 == N3 && N2.getOpcode() == ISD::FNEG && 11312 N2.getOperand(0) == N3) 11313 return DAG.getNode(ISD::FABS, DL, VT, N3); 11314 } 11315 } 11316 11317 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 11318 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 11319 // in it. This is a win when the constant is not otherwise available because 11320 // it replaces two constant pool loads with one. We only do this if the FP 11321 // type is known to be legal, because if it isn't, then we are before legalize 11322 // types an we want the other legalization to happen first (e.g. to avoid 11323 // messing with soft float) and if the ConstantFP is not legal, because if 11324 // it is legal, we may not need to store the FP constant in a constant pool. 11325 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 11326 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 11327 if (TLI.isTypeLegal(N2.getValueType()) && 11328 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 11329 TargetLowering::Legal && 11330 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) && 11331 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) && 11332 // If both constants have multiple uses, then we won't need to do an 11333 // extra load, they are likely around in registers for other users. 11334 (TV->hasOneUse() || FV->hasOneUse())) { 11335 Constant *Elts[] = { 11336 const_cast<ConstantFP*>(FV->getConstantFPValue()), 11337 const_cast<ConstantFP*>(TV->getConstantFPValue()) 11338 }; 11339 Type *FPTy = Elts[0]->getType(); 11340 const DataLayout &TD = *TLI.getDataLayout(); 11341 11342 // Create a ConstantArray of the two constants. 11343 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 11344 SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(), 11345 TD.getPrefTypeAlignment(FPTy)); 11346 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 11347 11348 // Get the offsets to the 0 and 1 element of the array so that we can 11349 // select between them. 11350 SDValue Zero = DAG.getIntPtrConstant(0); 11351 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 11352 SDValue One = DAG.getIntPtrConstant(EltSize); 11353 11354 SDValue Cond = DAG.getSetCC(DL, 11355 getSetCCResultType(N0.getValueType()), 11356 N0, N1, CC); 11357 AddToWorklist(Cond.getNode()); 11358 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), 11359 Cond, One, Zero); 11360 AddToWorklist(CstOffset.getNode()); 11361 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, 11362 CstOffset); 11363 AddToWorklist(CPIdx.getNode()); 11364 return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 11365 MachinePointerInfo::getConstantPool(), false, 11366 false, false, Alignment); 11367 11368 } 11369 } 11370 11371 // Check to see if we can perform the "gzip trick", transforming 11372 // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A) 11373 if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT && 11374 (N1C->isNullValue() || // (a < 0) ? b : 0 11375 (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0 11376 EVT XType = N0.getValueType(); 11377 EVT AType = N2.getValueType(); 11378 if (XType.bitsGE(AType)) { 11379 // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a 11380 // single-bit constant. 11381 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { 11382 unsigned ShCtV = N2C->getAPIntValue().logBase2(); 11383 ShCtV = XType.getSizeInBits()-ShCtV-1; 11384 SDValue ShCt = DAG.getConstant(ShCtV, 11385 getShiftAmountTy(N0.getValueType())); 11386 SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0), 11387 XType, N0, ShCt); 11388 AddToWorklist(Shift.getNode()); 11389 11390 if (XType.bitsGT(AType)) { 11391 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 11392 AddToWorklist(Shift.getNode()); 11393 } 11394 11395 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 11396 } 11397 11398 SDValue Shift = DAG.getNode(ISD::SRA, SDLoc(N0), 11399 XType, N0, 11400 DAG.getConstant(XType.getSizeInBits()-1, 11401 getShiftAmountTy(N0.getValueType()))); 11402 AddToWorklist(Shift.getNode()); 11403 11404 if (XType.bitsGT(AType)) { 11405 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 11406 AddToWorklist(Shift.getNode()); 11407 } 11408 11409 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 11410 } 11411 } 11412 11413 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 11414 // where y is has a single bit set. 11415 // A plaintext description would be, we can turn the SELECT_CC into an AND 11416 // when the condition can be materialized as an all-ones register. Any 11417 // single bit-test can be materialized as an all-ones register with 11418 // shift-left and shift-right-arith. 11419 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 11420 N0->getValueType(0) == VT && 11421 N1C && N1C->isNullValue() && 11422 N2C && N2C->isNullValue()) { 11423 SDValue AndLHS = N0->getOperand(0); 11424 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 11425 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 11426 // Shift the tested bit over the sign bit. 11427 APInt AndMask = ConstAndRHS->getAPIntValue(); 11428 SDValue ShlAmt = 11429 DAG.getConstant(AndMask.countLeadingZeros(), 11430 getShiftAmountTy(AndLHS.getValueType())); 11431 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt); 11432 11433 // Now arithmetic right shift it all the way over, so the result is either 11434 // all-ones, or zero. 11435 SDValue ShrAmt = 11436 DAG.getConstant(AndMask.getBitWidth()-1, 11437 getShiftAmountTy(Shl.getValueType())); 11438 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt); 11439 11440 return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 11441 } 11442 } 11443 11444 // fold select C, 16, 0 -> shl C, 4 11445 if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() && 11446 TLI.getBooleanContents(N0.getValueType()) == 11447 TargetLowering::ZeroOrOneBooleanContent) { 11448 11449 // If the caller doesn't want us to simplify this into a zext of a compare, 11450 // don't do it. 11451 if (NotExtCompare && N2C->getAPIntValue() == 1) 11452 return SDValue(); 11453 11454 // Get a SetCC of the condition 11455 // NOTE: Don't create a SETCC if it's not legal on this target. 11456 if (!LegalOperations || 11457 TLI.isOperationLegal(ISD::SETCC, 11458 LegalTypes ? getSetCCResultType(N0.getValueType()) : MVT::i1)) { 11459 SDValue Temp, SCC; 11460 // cast from setcc result type to select result type 11461 if (LegalTypes) { 11462 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), 11463 N0, N1, CC); 11464 if (N2.getValueType().bitsLT(SCC.getValueType())) 11465 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), 11466 N2.getValueType()); 11467 else 11468 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 11469 N2.getValueType(), SCC); 11470 } else { 11471 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC); 11472 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 11473 N2.getValueType(), SCC); 11474 } 11475 11476 AddToWorklist(SCC.getNode()); 11477 AddToWorklist(Temp.getNode()); 11478 11479 if (N2C->getAPIntValue() == 1) 11480 return Temp; 11481 11482 // shl setcc result by log2 n2c 11483 return DAG.getNode( 11484 ISD::SHL, DL, N2.getValueType(), Temp, 11485 DAG.getConstant(N2C->getAPIntValue().logBase2(), 11486 getShiftAmountTy(Temp.getValueType()))); 11487 } 11488 } 11489 11490 // Check to see if this is the equivalent of setcc 11491 // FIXME: Turn all of these into setcc if setcc if setcc is legal 11492 // otherwise, go ahead with the folds. 11493 if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) { 11494 EVT XType = N0.getValueType(); 11495 if (!LegalOperations || 11496 TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(XType))) { 11497 SDValue Res = DAG.getSetCC(DL, getSetCCResultType(XType), N0, N1, CC); 11498 if (Res.getValueType() != VT) 11499 Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res); 11500 return Res; 11501 } 11502 11503 // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X)))) 11504 if (N1C && N1C->isNullValue() && CC == ISD::SETEQ && 11505 (!LegalOperations || 11506 TLI.isOperationLegal(ISD::CTLZ, XType))) { 11507 SDValue Ctlz = DAG.getNode(ISD::CTLZ, SDLoc(N0), XType, N0); 11508 return DAG.getNode(ISD::SRL, DL, XType, Ctlz, 11509 DAG.getConstant(Log2_32(XType.getSizeInBits()), 11510 getShiftAmountTy(Ctlz.getValueType()))); 11511 } 11512 // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1)) 11513 if (N1C && N1C->isNullValue() && CC == ISD::SETGT) { 11514 SDValue NegN0 = DAG.getNode(ISD::SUB, SDLoc(N0), 11515 XType, DAG.getConstant(0, XType), N0); 11516 SDValue NotN0 = DAG.getNOT(SDLoc(N0), N0, XType); 11517 return DAG.getNode(ISD::SRL, DL, XType, 11518 DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0), 11519 DAG.getConstant(XType.getSizeInBits()-1, 11520 getShiftAmountTy(XType))); 11521 } 11522 // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1)) 11523 if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) { 11524 SDValue Sign = DAG.getNode(ISD::SRL, SDLoc(N0), XType, N0, 11525 DAG.getConstant(XType.getSizeInBits()-1, 11526 getShiftAmountTy(N0.getValueType()))); 11527 return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType)); 11528 } 11529 } 11530 11531 // Check to see if this is an integer abs. 11532 // select_cc setg[te] X, 0, X, -X -> 11533 // select_cc setgt X, -1, X, -X -> 11534 // select_cc setl[te] X, 0, -X, X -> 11535 // select_cc setlt X, 1, -X, X -> 11536 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 11537 if (N1C) { 11538 ConstantSDNode *SubC = nullptr; 11539 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 11540 (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 11541 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 11542 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 11543 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 11544 (N1C->isOne() && CC == ISD::SETLT)) && 11545 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 11546 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 11547 11548 EVT XType = N0.getValueType(); 11549 if (SubC && SubC->isNullValue() && XType.isInteger()) { 11550 SDValue Shift = DAG.getNode(ISD::SRA, SDLoc(N0), XType, 11551 N0, 11552 DAG.getConstant(XType.getSizeInBits()-1, 11553 getShiftAmountTy(N0.getValueType()))); 11554 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), 11555 XType, N0, Shift); 11556 AddToWorklist(Shift.getNode()); 11557 AddToWorklist(Add.getNode()); 11558 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 11559 } 11560 } 11561 11562 return SDValue(); 11563 } 11564 11565 /// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC. 11566 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, 11567 SDValue N1, ISD::CondCode Cond, 11568 SDLoc DL, bool foldBooleans) { 11569 TargetLowering::DAGCombinerInfo 11570 DagCombineInfo(DAG, Level, false, this); 11571 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 11572 } 11573 11574 /// BuildSDIV - Given an ISD::SDIV node expressing a divide by constant, return 11575 /// a DAG expression to select that will generate the same value by multiplying 11576 /// by a magic number. See: 11577 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 11578 SDValue DAGCombiner::BuildSDIV(SDNode *N) { 11579 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 11580 if (!C) 11581 return SDValue(); 11582 11583 // Avoid division by zero. 11584 if (!C->getAPIntValue()) 11585 return SDValue(); 11586 11587 std::vector<SDNode*> Built; 11588 SDValue S = 11589 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 11590 11591 for (SDNode *N : Built) 11592 AddToWorklist(N); 11593 return S; 11594 } 11595 11596 /// BuildSDIVPow2 - Given an ISD::SDIV node expressing a divide by constant 11597 /// power of 2, return a DAG expression to select that will generate the same 11598 /// value by right shifting. 11599 SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { 11600 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 11601 if (!C) 11602 return SDValue(); 11603 11604 // Avoid division by zero. 11605 if (!C->getAPIntValue()) 11606 return SDValue(); 11607 11608 std::vector<SDNode *> Built; 11609 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built); 11610 11611 for (SDNode *N : Built) 11612 AddToWorklist(N); 11613 return S; 11614 } 11615 11616 /// BuildUDIV - Given an ISD::UDIV node expressing a divide by constant, 11617 /// return a DAG expression to select that will generate the same value by 11618 /// multiplying by a magic number. See: 11619 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 11620 SDValue DAGCombiner::BuildUDIV(SDNode *N) { 11621 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 11622 if (!C) 11623 return SDValue(); 11624 11625 // Avoid division by zero. 11626 if (!C->getAPIntValue()) 11627 return SDValue(); 11628 11629 std::vector<SDNode*> Built; 11630 SDValue S = 11631 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 11632 11633 for (SDNode *N : Built) 11634 AddToWorklist(N); 11635 return S; 11636 } 11637 11638 /// FindBaseOffset - Return true if base is a frame index, which is known not 11639 // to alias with anything but itself. Provides base object and offset as 11640 // results. 11641 static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 11642 const GlobalValue *&GV, const void *&CV) { 11643 // Assume it is a primitive operation. 11644 Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; 11645 11646 // If it's an adding a simple constant then integrate the offset. 11647 if (Base.getOpcode() == ISD::ADD) { 11648 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 11649 Base = Base.getOperand(0); 11650 Offset += C->getZExtValue(); 11651 } 11652 } 11653 11654 // Return the underlying GlobalValue, and update the Offset. Return false 11655 // for GlobalAddressSDNode since the same GlobalAddress may be represented 11656 // by multiple nodes with different offsets. 11657 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 11658 GV = G->getGlobal(); 11659 Offset += G->getOffset(); 11660 return false; 11661 } 11662 11663 // Return the underlying Constant value, and update the Offset. Return false 11664 // for ConstantSDNodes since the same constant pool entry may be represented 11665 // by multiple nodes with different offsets. 11666 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 11667 CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() 11668 : (const void *)C->getConstVal(); 11669 Offset += C->getOffset(); 11670 return false; 11671 } 11672 // If it's any of the following then it can't alias with anything but itself. 11673 return isa<FrameIndexSDNode>(Base); 11674 } 11675 11676 /// isAlias - Return true if there is any possibility that the two addresses 11677 /// overlap. 11678 bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { 11679 // If they are the same then they must be aliases. 11680 if (Op0->getBasePtr() == Op1->getBasePtr()) return true; 11681 11682 // If they are both volatile then they cannot be reordered. 11683 if (Op0->isVolatile() && Op1->isVolatile()) return true; 11684 11685 // Gather base node and offset information. 11686 SDValue Base1, Base2; 11687 int64_t Offset1, Offset2; 11688 const GlobalValue *GV1, *GV2; 11689 const void *CV1, *CV2; 11690 bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), 11691 Base1, Offset1, GV1, CV1); 11692 bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), 11693 Base2, Offset2, GV2, CV2); 11694 11695 // If they have a same base address then check to see if they overlap. 11696 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 11697 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 11698 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 11699 11700 // It is possible for different frame indices to alias each other, mostly 11701 // when tail call optimization reuses return address slots for arguments. 11702 // To catch this case, look up the actual index of frame indices to compute 11703 // the real alias relationship. 11704 if (isFrameIndex1 && isFrameIndex2) { 11705 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11706 Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 11707 Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 11708 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 11709 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 11710 } 11711 11712 // Otherwise, if we know what the bases are, and they aren't identical, then 11713 // we know they cannot alias. 11714 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 11715 return false; 11716 11717 // If we know required SrcValue1 and SrcValue2 have relatively large alignment 11718 // compared to the size and offset of the access, we may be able to prove they 11719 // do not alias. This check is conservative for now to catch cases created by 11720 // splitting vector types. 11721 if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) && 11722 (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) && 11723 (Op0->getMemoryVT().getSizeInBits() >> 3 == 11724 Op1->getMemoryVT().getSizeInBits() >> 3) && 11725 (Op0->getOriginalAlignment() > Op0->getMemoryVT().getSizeInBits()) >> 3) { 11726 int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment(); 11727 int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment(); 11728 11729 // There is no overlap between these relatively aligned accesses of similar 11730 // size, return no alias. 11731 if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 || 11732 (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1) 11733 return false; 11734 } 11735 11736 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 ? CombinerGlobalAA : 11737 TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA(); 11738 #ifndef NDEBUG 11739 if (CombinerAAOnlyFunc.getNumOccurrences() && 11740 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 11741 UseAA = false; 11742 #endif 11743 if (UseAA && 11744 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) { 11745 // Use alias analysis information. 11746 int64_t MinOffset = std::min(Op0->getSrcValueOffset(), 11747 Op1->getSrcValueOffset()); 11748 int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) + 11749 Op0->getSrcValueOffset() - MinOffset; 11750 int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) + 11751 Op1->getSrcValueOffset() - MinOffset; 11752 AliasAnalysis::AliasResult AAResult = 11753 AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(), 11754 Overlap1, 11755 UseTBAA ? Op0->getAAInfo() : AAMDNodes()), 11756 AliasAnalysis::Location(Op1->getMemOperand()->getValue(), 11757 Overlap2, 11758 UseTBAA ? Op1->getAAInfo() : AAMDNodes())); 11759 if (AAResult == AliasAnalysis::NoAlias) 11760 return false; 11761 } 11762 11763 // Otherwise we have to assume they alias. 11764 return true; 11765 } 11766 11767 /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 11768 /// looking for aliasing nodes and adding them to the Aliases vector. 11769 void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 11770 SmallVectorImpl<SDValue> &Aliases) { 11771 SmallVector<SDValue, 8> Chains; // List of chains to visit. 11772 SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 11773 11774 // Get alias information for node. 11775 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile(); 11776 11777 // Starting off. 11778 Chains.push_back(OriginalChain); 11779 unsigned Depth = 0; 11780 11781 // Look at each chain and determine if it is an alias. If so, add it to the 11782 // aliases list. If not, then continue up the chain looking for the next 11783 // candidate. 11784 while (!Chains.empty()) { 11785 SDValue Chain = Chains.back(); 11786 Chains.pop_back(); 11787 11788 // For TokenFactor nodes, look at each operand and only continue up the 11789 // chain until we find two aliases. If we've seen two aliases, assume we'll 11790 // find more and revert to original chain since the xform is unlikely to be 11791 // profitable. 11792 // 11793 // FIXME: The depth check could be made to return the last non-aliasing 11794 // chain we found before we hit a tokenfactor rather than the original 11795 // chain. 11796 if (Depth > 6 || Aliases.size() == 2) { 11797 Aliases.clear(); 11798 Aliases.push_back(OriginalChain); 11799 return; 11800 } 11801 11802 // Don't bother if we've been before. 11803 if (!Visited.insert(Chain.getNode())) 11804 continue; 11805 11806 switch (Chain.getOpcode()) { 11807 case ISD::EntryToken: 11808 // Entry token is ideal chain operand, but handled in FindBetterChain. 11809 break; 11810 11811 case ISD::LOAD: 11812 case ISD::STORE: { 11813 // Get alias information for Chain. 11814 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) && 11815 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile(); 11816 11817 // If chain is alias then stop here. 11818 if (!(IsLoad && IsOpLoad) && 11819 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) { 11820 Aliases.push_back(Chain); 11821 } else { 11822 // Look further up the chain. 11823 Chains.push_back(Chain.getOperand(0)); 11824 ++Depth; 11825 } 11826 break; 11827 } 11828 11829 case ISD::TokenFactor: 11830 // We have to check each of the operands of the token factor for "small" 11831 // token factors, so we queue them up. Adding the operands to the queue 11832 // (stack) in reverse order maintains the original order and increases the 11833 // likelihood that getNode will find a matching token factor (CSE.) 11834 if (Chain.getNumOperands() > 16) { 11835 Aliases.push_back(Chain); 11836 break; 11837 } 11838 for (unsigned n = Chain.getNumOperands(); n;) 11839 Chains.push_back(Chain.getOperand(--n)); 11840 ++Depth; 11841 break; 11842 11843 default: 11844 // For all other instructions we will just have to take what we can get. 11845 Aliases.push_back(Chain); 11846 break; 11847 } 11848 } 11849 11850 // We need to be careful here to also search for aliases through the 11851 // value operand of a store, etc. Consider the following situation: 11852 // Token1 = ... 11853 // L1 = load Token1, %52 11854 // S1 = store Token1, L1, %51 11855 // L2 = load Token1, %52+8 11856 // S2 = store Token1, L2, %51+8 11857 // Token2 = Token(S1, S2) 11858 // L3 = load Token2, %53 11859 // S3 = store Token2, L3, %52 11860 // L4 = load Token2, %53+8 11861 // S4 = store Token2, L4, %52+8 11862 // If we search for aliases of S3 (which loads address %52), and we look 11863 // only through the chain, then we'll miss the trivial dependence on L1 11864 // (which also loads from %52). We then might change all loads and 11865 // stores to use Token1 as their chain operand, which could result in 11866 // copying %53 into %52 before copying %52 into %51 (which should 11867 // happen first). 11868 // 11869 // The problem is, however, that searching for such data dependencies 11870 // can become expensive, and the cost is not directly related to the 11871 // chain depth. Instead, we'll rule out such configurations here by 11872 // insisting that we've visited all chain users (except for users 11873 // of the original chain, which is not necessary). When doing this, 11874 // we need to look through nodes we don't care about (otherwise, things 11875 // like register copies will interfere with trivial cases). 11876 11877 SmallVector<const SDNode *, 16> Worklist; 11878 for (SmallPtrSet<SDNode *, 16>::iterator I = Visited.begin(), 11879 IE = Visited.end(); I != IE; ++I) 11880 if (*I != OriginalChain.getNode()) 11881 Worklist.push_back(*I); 11882 11883 while (!Worklist.empty()) { 11884 const SDNode *M = Worklist.pop_back_val(); 11885 11886 // We have already visited M, and want to make sure we've visited any uses 11887 // of M that we care about. For uses that we've not visisted, and don't 11888 // care about, queue them to the worklist. 11889 11890 for (SDNode::use_iterator UI = M->use_begin(), 11891 UIE = M->use_end(); UI != UIE; ++UI) 11892 if (UI.getUse().getValueType() == MVT::Other && Visited.insert(*UI)) { 11893 if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) { 11894 // We've not visited this use, and we care about it (it could have an 11895 // ordering dependency with the original node). 11896 Aliases.clear(); 11897 Aliases.push_back(OriginalChain); 11898 return; 11899 } 11900 11901 // We've not visited this use, but we don't care about it. Mark it as 11902 // visited and enqueue it to the worklist. 11903 Worklist.push_back(*UI); 11904 } 11905 } 11906 } 11907 11908 /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking 11909 /// for a better chain (aliasing node.) 11910 SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 11911 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 11912 11913 // Accumulate all the aliases to this node. 11914 GatherAllAliases(N, OldChain, Aliases); 11915 11916 // If no operands then chain to entry token. 11917 if (Aliases.size() == 0) 11918 return DAG.getEntryNode(); 11919 11920 // If a single operand then chain to it. We don't need to revisit it. 11921 if (Aliases.size() == 1) 11922 return Aliases[0]; 11923 11924 // Construct a custom tailored token factor. 11925 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); 11926 } 11927 11928 // SelectionDAG::Combine - This is the entry point for the file. 11929 // 11930 void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 11931 CodeGenOpt::Level OptLevel) { 11932 /// run - This is the main entry point to this class. 11933 /// 11934 DAGCombiner(*this, AA, OptLevel).Run(Level); 11935 } 11936