1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/BlockFrequencyInfo.h" 22 #include "llvm/Analysis/BranchProbabilityInfo.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ProfileSummaryInfo.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/CodeGen/Analysis.h" 32 #include "llvm/IR/CallSite.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/InlineAsm.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/MDBuilder.h" 44 #include "llvm/IR/PatternMatch.h" 45 #include "llvm/IR/Statepoint.h" 46 #include "llvm/IR/ValueHandle.h" 47 #include "llvm/IR/ValueMap.h" 48 #include "llvm/Pass.h" 49 #include "llvm/Support/BranchProbability.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Target/TargetLowering.h" 54 #include "llvm/Target/TargetSubtargetInfo.h" 55 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 56 #include "llvm/Transforms/Utils/BuildLibCalls.h" 57 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 58 #include "llvm/Transforms/Utils/Cloning.h" 59 #include "llvm/Transforms/Utils/Local.h" 60 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 61 #include "llvm/Transforms/Utils/ValueMapper.h" 62 using namespace llvm; 63 using namespace llvm::PatternMatch; 64 65 #define DEBUG_TYPE "codegenprepare" 66 67 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 68 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 69 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 70 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 71 "sunken Cmps"); 72 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 73 "of sunken Casts"); 74 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 75 "computations were sunk"); 76 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 77 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 78 STATISTIC(NumAndsAdded, 79 "Number of and mask instructions added to form ext loads"); 80 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 81 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 82 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 83 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 84 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 85 86 static cl::opt<bool> DisableBranchOpts( 87 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 88 cl::desc("Disable branch optimizations in CodeGenPrepare")); 89 90 static cl::opt<bool> 91 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 92 cl::desc("Disable GC optimizations in CodeGenPrepare")); 93 94 static cl::opt<bool> DisableSelectToBranch( 95 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 96 cl::desc("Disable select to branch conversion.")); 97 98 static cl::opt<bool> AddrSinkUsingGEPs( 99 "addr-sink-using-gep", cl::Hidden, cl::init(true), 100 cl::desc("Address sinking in CGP using GEPs.")); 101 102 static cl::opt<bool> EnableAndCmpSinking( 103 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 104 cl::desc("Enable sinkinig and/cmp into branches.")); 105 106 static cl::opt<bool> DisableStoreExtract( 107 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 108 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 109 110 static cl::opt<bool> StressStoreExtract( 111 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 112 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 113 114 static cl::opt<bool> DisableExtLdPromotion( 115 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 116 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 117 "CodeGenPrepare")); 118 119 static cl::opt<bool> StressExtLdPromotion( 120 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 121 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 122 "optimization in CodeGenPrepare")); 123 124 static cl::opt<bool> DisablePreheaderProtect( 125 "disable-preheader-prot", cl::Hidden, cl::init(false), 126 cl::desc("Disable protection against removing loop preheaders")); 127 128 static cl::opt<bool> ProfileGuidedSectionPrefix( 129 "profile-guided-section-prefix", cl::Hidden, cl::init(true), 130 cl::desc("Use profile info to add section prefix for hot/cold functions")); 131 132 static cl::opt<unsigned> FreqRatioToSkipMerge( 133 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 134 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 135 "(frequency of destination block) is greater than this ratio")); 136 137 static cl::opt<bool> ForceSplitStore( 138 "force-split-store", cl::Hidden, cl::init(false), 139 cl::desc("Force store splitting no matter what the target query says.")); 140 141 static cl::opt<bool> 142 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 143 cl::desc("Enable merging of redundant sexts when one is dominating" 144 " the other."), cl::init(true)); 145 146 namespace { 147 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 148 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt; 149 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 150 typedef SmallVector<Instruction *, 16> SExts; 151 typedef DenseMap<Value *, SExts> ValueToSExts; 152 class TypePromotionTransaction; 153 154 class CodeGenPrepare : public FunctionPass { 155 const TargetMachine *TM; 156 const TargetSubtargetInfo *SubtargetInfo; 157 const TargetLowering *TLI; 158 const TargetRegisterInfo *TRI; 159 const TargetTransformInfo *TTI; 160 const TargetLibraryInfo *TLInfo; 161 const LoopInfo *LI; 162 std::unique_ptr<BlockFrequencyInfo> BFI; 163 std::unique_ptr<BranchProbabilityInfo> BPI; 164 165 /// As we scan instructions optimizing them, this is the next instruction 166 /// to optimize. Transforms that can invalidate this should update it. 167 BasicBlock::iterator CurInstIterator; 168 169 /// Keeps track of non-local addresses that have been sunk into a block. 170 /// This allows us to avoid inserting duplicate code for blocks with 171 /// multiple load/stores of the same address. 172 ValueMap<Value*, Value*> SunkAddrs; 173 174 /// Keeps track of all instructions inserted for the current function. 175 SetOfInstrs InsertedInsts; 176 /// Keeps track of the type of the related instruction before their 177 /// promotion for the current function. 178 InstrToOrigTy PromotedInsts; 179 180 /// Keep track of instructions removed during promotion. 181 SetOfInstrs RemovedInsts; 182 183 /// Keep track of sext chains based on their initial value. 184 DenseMap<Value *, Instruction *> SeenChainsForSExt; 185 186 /// Keep track of SExt promoted. 187 ValueToSExts ValToSExtendedUses; 188 189 /// True if CFG is modified in any way. 190 bool ModifiedDT; 191 192 /// True if optimizing for size. 193 bool OptSize; 194 195 /// DataLayout for the Function being processed. 196 const DataLayout *DL; 197 198 public: 199 static char ID; // Pass identification, replacement for typeid 200 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 201 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) { 202 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 203 } 204 bool runOnFunction(Function &F) override; 205 206 StringRef getPassName() const override { return "CodeGen Prepare"; } 207 208 void getAnalysisUsage(AnalysisUsage &AU) const override { 209 // FIXME: When we can selectively preserve passes, preserve the domtree. 210 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 211 AU.addRequired<TargetLibraryInfoWrapperPass>(); 212 AU.addRequired<TargetTransformInfoWrapperPass>(); 213 AU.addRequired<LoopInfoWrapperPass>(); 214 } 215 216 private: 217 bool eliminateFallThrough(Function &F); 218 bool eliminateMostlyEmptyBlocks(Function &F); 219 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 220 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 221 void eliminateMostlyEmptyBlock(BasicBlock *BB); 222 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 223 bool isPreheader); 224 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT); 225 bool optimizeInst(Instruction *I, bool& ModifiedDT); 226 bool optimizeMemoryInst(Instruction *I, Value *Addr, 227 Type *AccessTy, unsigned AS); 228 bool optimizeInlineAsmInst(CallInst *CS); 229 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT); 230 bool optimizeExt(Instruction *&I); 231 bool optimizeExtUses(Instruction *I); 232 bool optimizeLoadExt(LoadInst *I); 233 bool optimizeSelectInst(SelectInst *SI); 234 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 235 bool optimizeSwitchInst(SwitchInst *CI); 236 bool optimizeExtractElementInst(Instruction *Inst); 237 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 238 bool placeDbgValues(Function &F); 239 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 240 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 241 bool tryToPromoteExts(TypePromotionTransaction &TPT, 242 const SmallVectorImpl<Instruction *> &Exts, 243 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 244 unsigned CreatedInstsCost = 0); 245 bool mergeSExts(Function &F); 246 bool performAddressTypePromotion( 247 Instruction *&Inst, 248 bool AllowPromotionWithoutCommonHeader, 249 bool HasPromoted, TypePromotionTransaction &TPT, 250 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 251 bool splitBranchCondition(Function &F); 252 bool simplifyOffsetableRelocate(Instruction &I); 253 bool splitIndirectCriticalEdges(Function &F); 254 }; 255 } 256 257 char CodeGenPrepare::ID = 0; 258 INITIALIZE_TM_PASS_BEGIN(CodeGenPrepare, "codegenprepare", 259 "Optimize for code generation", false, false) 260 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 261 INITIALIZE_TM_PASS_END(CodeGenPrepare, "codegenprepare", 262 "Optimize for code generation", false, false) 263 264 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 265 return new CodeGenPrepare(TM); 266 } 267 268 bool CodeGenPrepare::runOnFunction(Function &F) { 269 if (skipFunction(F)) 270 return false; 271 272 DL = &F.getParent()->getDataLayout(); 273 274 bool EverMadeChange = false; 275 // Clear per function information. 276 InsertedInsts.clear(); 277 PromotedInsts.clear(); 278 BFI.reset(); 279 BPI.reset(); 280 281 ModifiedDT = false; 282 if (TM) { 283 SubtargetInfo = TM->getSubtargetImpl(F); 284 TLI = SubtargetInfo->getTargetLowering(); 285 TRI = SubtargetInfo->getRegisterInfo(); 286 } 287 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 288 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 289 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 290 OptSize = F.optForSize(); 291 292 if (ProfileGuidedSectionPrefix) { 293 ProfileSummaryInfo *PSI = 294 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 295 if (PSI->isFunctionHotInCallGraph(&F)) 296 F.setSectionPrefix(".hot"); 297 else if (PSI->isFunctionColdInCallGraph(&F)) 298 F.setSectionPrefix(".unlikely"); 299 } 300 301 /// This optimization identifies DIV instructions that can be 302 /// profitably bypassed and carried out with a shorter, faster divide. 303 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 304 const DenseMap<unsigned int, unsigned int> &BypassWidths = 305 TLI->getBypassSlowDivWidths(); 306 BasicBlock* BB = &*F.begin(); 307 while (BB != nullptr) { 308 // bypassSlowDivision may create new BBs, but we don't want to reapply the 309 // optimization to those blocks. 310 BasicBlock* Next = BB->getNextNode(); 311 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 312 BB = Next; 313 } 314 } 315 316 // Eliminate blocks that contain only PHI nodes and an 317 // unconditional branch. 318 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 319 320 // llvm.dbg.value is far away from the value then iSel may not be able 321 // handle it properly. iSel will drop llvm.dbg.value if it can not 322 // find a node corresponding to the value. 323 EverMadeChange |= placeDbgValues(F); 324 325 if (!DisableBranchOpts) 326 EverMadeChange |= splitBranchCondition(F); 327 328 // Split some critical edges where one of the sources is an indirect branch, 329 // to help generate sane code for PHIs involving such edges. 330 EverMadeChange |= splitIndirectCriticalEdges(F); 331 332 bool MadeChange = true; 333 while (MadeChange) { 334 MadeChange = false; 335 SeenChainsForSExt.clear(); 336 ValToSExtendedUses.clear(); 337 RemovedInsts.clear(); 338 for (Function::iterator I = F.begin(); I != F.end(); ) { 339 BasicBlock *BB = &*I++; 340 bool ModifiedDTOnIteration = false; 341 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 342 343 // Restart BB iteration if the dominator tree of the Function was changed 344 if (ModifiedDTOnIteration) 345 break; 346 } 347 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 348 MadeChange |= mergeSExts(F); 349 350 // Really free removed instructions during promotion. 351 for (Instruction *I : RemovedInsts) 352 delete I; 353 354 EverMadeChange |= MadeChange; 355 } 356 357 SunkAddrs.clear(); 358 359 if (!DisableBranchOpts) { 360 MadeChange = false; 361 SmallPtrSet<BasicBlock*, 8> WorkList; 362 for (BasicBlock &BB : F) { 363 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 364 MadeChange |= ConstantFoldTerminator(&BB, true); 365 if (!MadeChange) continue; 366 367 for (SmallVectorImpl<BasicBlock*>::iterator 368 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 369 if (pred_begin(*II) == pred_end(*II)) 370 WorkList.insert(*II); 371 } 372 373 // Delete the dead blocks and any of their dead successors. 374 MadeChange |= !WorkList.empty(); 375 while (!WorkList.empty()) { 376 BasicBlock *BB = *WorkList.begin(); 377 WorkList.erase(BB); 378 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 379 380 DeleteDeadBlock(BB); 381 382 for (SmallVectorImpl<BasicBlock*>::iterator 383 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 384 if (pred_begin(*II) == pred_end(*II)) 385 WorkList.insert(*II); 386 } 387 388 // Merge pairs of basic blocks with unconditional branches, connected by 389 // a single edge. 390 if (EverMadeChange || MadeChange) 391 MadeChange |= eliminateFallThrough(F); 392 393 EverMadeChange |= MadeChange; 394 } 395 396 if (!DisableGCOpts) { 397 SmallVector<Instruction *, 2> Statepoints; 398 for (BasicBlock &BB : F) 399 for (Instruction &I : BB) 400 if (isStatepoint(I)) 401 Statepoints.push_back(&I); 402 for (auto &I : Statepoints) 403 EverMadeChange |= simplifyOffsetableRelocate(*I); 404 } 405 406 return EverMadeChange; 407 } 408 409 /// Merge basic blocks which are connected by a single edge, where one of the 410 /// basic blocks has a single successor pointing to the other basic block, 411 /// which has a single predecessor. 412 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 413 bool Changed = false; 414 // Scan all of the blocks in the function, except for the entry block. 415 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 416 BasicBlock *BB = &*I++; 417 // If the destination block has a single pred, then this is a trivial 418 // edge, just collapse it. 419 BasicBlock *SinglePred = BB->getSinglePredecessor(); 420 421 // Don't merge if BB's address is taken. 422 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 423 424 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 425 if (Term && !Term->isConditional()) { 426 Changed = true; 427 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 428 // Remember if SinglePred was the entry block of the function. 429 // If so, we will need to move BB back to the entry position. 430 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 431 MergeBasicBlockIntoOnlyPred(BB, nullptr); 432 433 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 434 BB->moveBefore(&BB->getParent()->getEntryBlock()); 435 436 // We have erased a block. Update the iterator. 437 I = BB->getIterator(); 438 } 439 } 440 return Changed; 441 } 442 443 /// Find a destination block from BB if BB is mergeable empty block. 444 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 445 // If this block doesn't end with an uncond branch, ignore it. 446 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 447 if (!BI || !BI->isUnconditional()) 448 return nullptr; 449 450 // If the instruction before the branch (skipping debug info) isn't a phi 451 // node, then other stuff is happening here. 452 BasicBlock::iterator BBI = BI->getIterator(); 453 if (BBI != BB->begin()) { 454 --BBI; 455 while (isa<DbgInfoIntrinsic>(BBI)) { 456 if (BBI == BB->begin()) 457 break; 458 --BBI; 459 } 460 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 461 return nullptr; 462 } 463 464 // Do not break infinite loops. 465 BasicBlock *DestBB = BI->getSuccessor(0); 466 if (DestBB == BB) 467 return nullptr; 468 469 if (!canMergeBlocks(BB, DestBB)) 470 DestBB = nullptr; 471 472 return DestBB; 473 } 474 475 // Return the unique indirectbr predecessor of a block. This may return null 476 // even if such a predecessor exists, if it's not useful for splitting. 477 // If a predecessor is found, OtherPreds will contain all other (non-indirectbr) 478 // predecessors of BB. 479 static BasicBlock * 480 findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) { 481 // If the block doesn't have any PHIs, we don't care about it, since there's 482 // no point in splitting it. 483 PHINode *PN = dyn_cast<PHINode>(BB->begin()); 484 if (!PN) 485 return nullptr; 486 487 // Verify we have exactly one IBR predecessor. 488 // Conservatively bail out if one of the other predecessors is not a "regular" 489 // terminator (that is, not a switch or a br). 490 BasicBlock *IBB = nullptr; 491 for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) { 492 BasicBlock *PredBB = PN->getIncomingBlock(Pred); 493 TerminatorInst *PredTerm = PredBB->getTerminator(); 494 switch (PredTerm->getOpcode()) { 495 case Instruction::IndirectBr: 496 if (IBB) 497 return nullptr; 498 IBB = PredBB; 499 break; 500 case Instruction::Br: 501 case Instruction::Switch: 502 OtherPreds.push_back(PredBB); 503 continue; 504 default: 505 return nullptr; 506 } 507 } 508 509 return IBB; 510 } 511 512 // Split critical edges where the source of the edge is an indirectbr 513 // instruction. This isn't always possible, but we can handle some easy cases. 514 // This is useful because MI is unable to split such critical edges, 515 // which means it will not be able to sink instructions along those edges. 516 // This is especially painful for indirect branches with many successors, where 517 // we end up having to prepare all outgoing values in the origin block. 518 // 519 // Our normal algorithm for splitting critical edges requires us to update 520 // the outgoing edges of the edge origin block, but for an indirectbr this 521 // is hard, since it would require finding and updating the block addresses 522 // the indirect branch uses. But if a block only has a single indirectbr 523 // predecessor, with the others being regular branches, we can do it in a 524 // different way. 525 // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr. 526 // We can split D into D0 and D1, where D0 contains only the PHIs from D, 527 // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and 528 // create the following structure: 529 // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1 530 bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { 531 // Check whether the function has any indirectbrs, and collect which blocks 532 // they may jump to. Since most functions don't have indirect branches, 533 // this lowers the common case's overhead to O(Blocks) instead of O(Edges). 534 SmallSetVector<BasicBlock *, 16> Targets; 535 for (auto &BB : F) { 536 auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator()); 537 if (!IBI) 538 continue; 539 540 for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ) 541 Targets.insert(IBI->getSuccessor(Succ)); 542 } 543 544 if (Targets.empty()) 545 return false; 546 547 bool Changed = false; 548 for (BasicBlock *Target : Targets) { 549 SmallVector<BasicBlock *, 16> OtherPreds; 550 BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds); 551 // If we did not found an indirectbr, or the indirectbr is the only 552 // incoming edge, this isn't the kind of edge we're looking for. 553 if (!IBRPred || OtherPreds.empty()) 554 continue; 555 556 // Don't even think about ehpads/landingpads. 557 Instruction *FirstNonPHI = Target->getFirstNonPHI(); 558 if (FirstNonPHI->isEHPad() || Target->isLandingPad()) 559 continue; 560 561 BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split"); 562 // It's possible Target was its own successor through an indirectbr. 563 // In this case, the indirectbr now comes from BodyBlock. 564 if (IBRPred == Target) 565 IBRPred = BodyBlock; 566 567 // At this point Target only has PHIs, and BodyBlock has the rest of the 568 // block's body. Create a copy of Target that will be used by the "direct" 569 // preds. 570 ValueToValueMapTy VMap; 571 BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); 572 573 for (BasicBlock *Pred : OtherPreds) { 574 // If the target is a loop to itself, then the terminator of the split 575 // block needs to be updated. 576 if (Pred == Target) 577 BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 578 else 579 Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 580 } 581 582 // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that 583 // they are clones, so the number of PHIs are the same. 584 // (a) Remove the edge coming from IBRPred from the "Direct" PHI 585 // (b) Leave that as the only edge in the "Indirect" PHI. 586 // (c) Merge the two in the body block. 587 BasicBlock::iterator Indirect = Target->begin(), 588 End = Target->getFirstNonPHI()->getIterator(); 589 BasicBlock::iterator Direct = DirectSucc->begin(); 590 BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt(); 591 592 assert(&*End == Target->getTerminator() && 593 "Block was expected to only contain PHIs"); 594 595 while (Indirect != End) { 596 PHINode *DirPHI = cast<PHINode>(Direct); 597 PHINode *IndPHI = cast<PHINode>(Indirect); 598 599 // Now, clean up - the direct block shouldn't get the indirect value, 600 // and vice versa. 601 DirPHI->removeIncomingValue(IBRPred); 602 Direct++; 603 604 // Advance the pointer here, to avoid invalidation issues when the old 605 // PHI is erased. 606 Indirect++; 607 608 PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI); 609 NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred), 610 IBRPred); 611 612 // Create a PHI in the body block, to merge the direct and indirect 613 // predecessors. 614 PHINode *MergePHI = 615 PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert); 616 MergePHI->addIncoming(NewIndPHI, Target); 617 MergePHI->addIncoming(DirPHI, DirectSucc); 618 619 IndPHI->replaceAllUsesWith(MergePHI); 620 IndPHI->eraseFromParent(); 621 } 622 623 Changed = true; 624 } 625 626 return Changed; 627 } 628 629 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 630 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 631 /// edges in ways that are non-optimal for isel. Start by eliminating these 632 /// blocks so we can split them the way we want them. 633 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 634 SmallPtrSet<BasicBlock *, 16> Preheaders; 635 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 636 while (!LoopList.empty()) { 637 Loop *L = LoopList.pop_back_val(); 638 LoopList.insert(LoopList.end(), L->begin(), L->end()); 639 if (BasicBlock *Preheader = L->getLoopPreheader()) 640 Preheaders.insert(Preheader); 641 } 642 643 bool MadeChange = false; 644 // Note that this intentionally skips the entry block. 645 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 646 BasicBlock *BB = &*I++; 647 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 648 if (!DestBB || 649 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 650 continue; 651 652 eliminateMostlyEmptyBlock(BB); 653 MadeChange = true; 654 } 655 return MadeChange; 656 } 657 658 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 659 BasicBlock *DestBB, 660 bool isPreheader) { 661 // Do not delete loop preheaders if doing so would create a critical edge. 662 // Loop preheaders can be good locations to spill registers. If the 663 // preheader is deleted and we create a critical edge, registers may be 664 // spilled in the loop body instead. 665 if (!DisablePreheaderProtect && isPreheader && 666 !(BB->getSinglePredecessor() && 667 BB->getSinglePredecessor()->getSingleSuccessor())) 668 return false; 669 670 // Try to skip merging if the unique predecessor of BB is terminated by a 671 // switch or indirect branch instruction, and BB is used as an incoming block 672 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 673 // add COPY instructions in the predecessor of BB instead of BB (if it is not 674 // merged). Note that the critical edge created by merging such blocks wont be 675 // split in MachineSink because the jump table is not analyzable. By keeping 676 // such empty block (BB), ISel will place COPY instructions in BB, not in the 677 // predecessor of BB. 678 BasicBlock *Pred = BB->getUniquePredecessor(); 679 if (!Pred || 680 !(isa<SwitchInst>(Pred->getTerminator()) || 681 isa<IndirectBrInst>(Pred->getTerminator()))) 682 return true; 683 684 if (BB->getTerminator() != BB->getFirstNonPHI()) 685 return true; 686 687 // We use a simple cost heuristic which determine skipping merging is 688 // profitable if the cost of skipping merging is less than the cost of 689 // merging : Cost(skipping merging) < Cost(merging BB), where the 690 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 691 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 692 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 693 // Freq(Pred) / Freq(BB) > 2. 694 // Note that if there are multiple empty blocks sharing the same incoming 695 // value for the PHIs in the DestBB, we consider them together. In such 696 // case, Cost(merging BB) will be the sum of their frequencies. 697 698 if (!isa<PHINode>(DestBB->begin())) 699 return true; 700 701 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 702 703 // Find all other incoming blocks from which incoming values of all PHIs in 704 // DestBB are the same as the ones from BB. 705 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 706 ++PI) { 707 BasicBlock *DestBBPred = *PI; 708 if (DestBBPred == BB) 709 continue; 710 711 bool HasAllSameValue = true; 712 BasicBlock::const_iterator DestBBI = DestBB->begin(); 713 while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) { 714 if (DestPN->getIncomingValueForBlock(BB) != 715 DestPN->getIncomingValueForBlock(DestBBPred)) { 716 HasAllSameValue = false; 717 break; 718 } 719 } 720 if (HasAllSameValue) 721 SameIncomingValueBBs.insert(DestBBPred); 722 } 723 724 // See if all BB's incoming values are same as the value from Pred. In this 725 // case, no reason to skip merging because COPYs are expected to be place in 726 // Pred already. 727 if (SameIncomingValueBBs.count(Pred)) 728 return true; 729 730 if (!BFI) { 731 Function &F = *BB->getParent(); 732 LoopInfo LI{DominatorTree(F)}; 733 BPI.reset(new BranchProbabilityInfo(F, LI)); 734 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 735 } 736 737 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 738 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 739 740 for (auto SameValueBB : SameIncomingValueBBs) 741 if (SameValueBB->getUniquePredecessor() == Pred && 742 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 743 BBFreq += BFI->getBlockFreq(SameValueBB); 744 745 return PredFreq.getFrequency() <= 746 BBFreq.getFrequency() * FreqRatioToSkipMerge; 747 } 748 749 /// Return true if we can merge BB into DestBB if there is a single 750 /// unconditional branch between them, and BB contains no other non-phi 751 /// instructions. 752 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 753 const BasicBlock *DestBB) const { 754 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 755 // the successor. If there are more complex condition (e.g. preheaders), 756 // don't mess around with them. 757 BasicBlock::const_iterator BBI = BB->begin(); 758 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 759 for (const User *U : PN->users()) { 760 const Instruction *UI = cast<Instruction>(U); 761 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 762 return false; 763 // If User is inside DestBB block and it is a PHINode then check 764 // incoming value. If incoming value is not from BB then this is 765 // a complex condition (e.g. preheaders) we want to avoid here. 766 if (UI->getParent() == DestBB) { 767 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 768 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 769 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 770 if (Insn && Insn->getParent() == BB && 771 Insn->getParent() != UPN->getIncomingBlock(I)) 772 return false; 773 } 774 } 775 } 776 } 777 778 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 779 // and DestBB may have conflicting incoming values for the block. If so, we 780 // can't merge the block. 781 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 782 if (!DestBBPN) return true; // no conflict. 783 784 // Collect the preds of BB. 785 SmallPtrSet<const BasicBlock*, 16> BBPreds; 786 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 787 // It is faster to get preds from a PHI than with pred_iterator. 788 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 789 BBPreds.insert(BBPN->getIncomingBlock(i)); 790 } else { 791 BBPreds.insert(pred_begin(BB), pred_end(BB)); 792 } 793 794 // Walk the preds of DestBB. 795 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 796 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 797 if (BBPreds.count(Pred)) { // Common predecessor? 798 BBI = DestBB->begin(); 799 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 800 const Value *V1 = PN->getIncomingValueForBlock(Pred); 801 const Value *V2 = PN->getIncomingValueForBlock(BB); 802 803 // If V2 is a phi node in BB, look up what the mapped value will be. 804 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 805 if (V2PN->getParent() == BB) 806 V2 = V2PN->getIncomingValueForBlock(Pred); 807 808 // If there is a conflict, bail out. 809 if (V1 != V2) return false; 810 } 811 } 812 } 813 814 return true; 815 } 816 817 818 /// Eliminate a basic block that has only phi's and an unconditional branch in 819 /// it. 820 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 821 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 822 BasicBlock *DestBB = BI->getSuccessor(0); 823 824 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 825 826 // If the destination block has a single pred, then this is a trivial edge, 827 // just collapse it. 828 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 829 if (SinglePred != DestBB) { 830 // Remember if SinglePred was the entry block of the function. If so, we 831 // will need to move BB back to the entry position. 832 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 833 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 834 835 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 836 BB->moveBefore(&BB->getParent()->getEntryBlock()); 837 838 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 839 return; 840 } 841 } 842 843 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 844 // to handle the new incoming edges it is about to have. 845 PHINode *PN; 846 for (BasicBlock::iterator BBI = DestBB->begin(); 847 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 848 // Remove the incoming value for BB, and remember it. 849 Value *InVal = PN->removeIncomingValue(BB, false); 850 851 // Two options: either the InVal is a phi node defined in BB or it is some 852 // value that dominates BB. 853 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 854 if (InValPhi && InValPhi->getParent() == BB) { 855 // Add all of the input values of the input PHI as inputs of this phi. 856 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 857 PN->addIncoming(InValPhi->getIncomingValue(i), 858 InValPhi->getIncomingBlock(i)); 859 } else { 860 // Otherwise, add one instance of the dominating value for each edge that 861 // we will be adding. 862 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 863 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 864 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 865 } else { 866 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 867 PN->addIncoming(InVal, *PI); 868 } 869 } 870 } 871 872 // The PHIs are now updated, change everything that refers to BB to use 873 // DestBB and remove BB. 874 BB->replaceAllUsesWith(DestBB); 875 BB->eraseFromParent(); 876 ++NumBlocksElim; 877 878 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 879 } 880 881 // Computes a map of base pointer relocation instructions to corresponding 882 // derived pointer relocation instructions given a vector of all relocate calls 883 static void computeBaseDerivedRelocateMap( 884 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 885 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 886 &RelocateInstMap) { 887 // Collect information in two maps: one primarily for locating the base object 888 // while filling the second map; the second map is the final structure holding 889 // a mapping between Base and corresponding Derived relocate calls 890 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 891 for (auto *ThisRelocate : AllRelocateCalls) { 892 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 893 ThisRelocate->getDerivedPtrIndex()); 894 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 895 } 896 for (auto &Item : RelocateIdxMap) { 897 std::pair<unsigned, unsigned> Key = Item.first; 898 if (Key.first == Key.second) 899 // Base relocation: nothing to insert 900 continue; 901 902 GCRelocateInst *I = Item.second; 903 auto BaseKey = std::make_pair(Key.first, Key.first); 904 905 // We're iterating over RelocateIdxMap so we cannot modify it. 906 auto MaybeBase = RelocateIdxMap.find(BaseKey); 907 if (MaybeBase == RelocateIdxMap.end()) 908 // TODO: We might want to insert a new base object relocate and gep off 909 // that, if there are enough derived object relocates. 910 continue; 911 912 RelocateInstMap[MaybeBase->second].push_back(I); 913 } 914 } 915 916 // Accepts a GEP and extracts the operands into a vector provided they're all 917 // small integer constants 918 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 919 SmallVectorImpl<Value *> &OffsetV) { 920 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 921 // Only accept small constant integer operands 922 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 923 if (!Op || Op->getZExtValue() > 20) 924 return false; 925 } 926 927 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 928 OffsetV.push_back(GEP->getOperand(i)); 929 return true; 930 } 931 932 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 933 // replace, computes a replacement, and affects it. 934 static bool 935 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 936 const SmallVectorImpl<GCRelocateInst *> &Targets) { 937 bool MadeChange = false; 938 for (GCRelocateInst *ToReplace : Targets) { 939 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 940 "Not relocating a derived object of the original base object"); 941 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 942 // A duplicate relocate call. TODO: coalesce duplicates. 943 continue; 944 } 945 946 if (RelocatedBase->getParent() != ToReplace->getParent()) { 947 // Base and derived relocates are in different basic blocks. 948 // In this case transform is only valid when base dominates derived 949 // relocate. However it would be too expensive to check dominance 950 // for each such relocate, so we skip the whole transformation. 951 continue; 952 } 953 954 Value *Base = ToReplace->getBasePtr(); 955 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 956 if (!Derived || Derived->getPointerOperand() != Base) 957 continue; 958 959 SmallVector<Value *, 2> OffsetV; 960 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 961 continue; 962 963 // Create a Builder and replace the target callsite with a gep 964 assert(RelocatedBase->getNextNode() && 965 "Should always have one since it's not a terminator"); 966 967 // Insert after RelocatedBase 968 IRBuilder<> Builder(RelocatedBase->getNextNode()); 969 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 970 971 // If gc_relocate does not match the actual type, cast it to the right type. 972 // In theory, there must be a bitcast after gc_relocate if the type does not 973 // match, and we should reuse it to get the derived pointer. But it could be 974 // cases like this: 975 // bb1: 976 // ... 977 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 978 // br label %merge 979 // 980 // bb2: 981 // ... 982 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 983 // br label %merge 984 // 985 // merge: 986 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 987 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 988 // 989 // In this case, we can not find the bitcast any more. So we insert a new bitcast 990 // no matter there is already one or not. In this way, we can handle all cases, and 991 // the extra bitcast should be optimized away in later passes. 992 Value *ActualRelocatedBase = RelocatedBase; 993 if (RelocatedBase->getType() != Base->getType()) { 994 ActualRelocatedBase = 995 Builder.CreateBitCast(RelocatedBase, Base->getType()); 996 } 997 Value *Replacement = Builder.CreateGEP( 998 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 999 Replacement->takeName(ToReplace); 1000 // If the newly generated derived pointer's type does not match the original derived 1001 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1002 Value *ActualReplacement = Replacement; 1003 if (Replacement->getType() != ToReplace->getType()) { 1004 ActualReplacement = 1005 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1006 } 1007 ToReplace->replaceAllUsesWith(ActualReplacement); 1008 ToReplace->eraseFromParent(); 1009 1010 MadeChange = true; 1011 } 1012 return MadeChange; 1013 } 1014 1015 // Turns this: 1016 // 1017 // %base = ... 1018 // %ptr = gep %base + 15 1019 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1020 // %base' = relocate(%tok, i32 4, i32 4) 1021 // %ptr' = relocate(%tok, i32 4, i32 5) 1022 // %val = load %ptr' 1023 // 1024 // into this: 1025 // 1026 // %base = ... 1027 // %ptr = gep %base + 15 1028 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1029 // %base' = gc.relocate(%tok, i32 4, i32 4) 1030 // %ptr' = gep %base' + 15 1031 // %val = load %ptr' 1032 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1033 bool MadeChange = false; 1034 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1035 1036 for (auto *U : I.users()) 1037 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1038 // Collect all the relocate calls associated with a statepoint 1039 AllRelocateCalls.push_back(Relocate); 1040 1041 // We need atleast one base pointer relocation + one derived pointer 1042 // relocation to mangle 1043 if (AllRelocateCalls.size() < 2) 1044 return false; 1045 1046 // RelocateInstMap is a mapping from the base relocate instruction to the 1047 // corresponding derived relocate instructions 1048 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1049 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1050 if (RelocateInstMap.empty()) 1051 return false; 1052 1053 for (auto &Item : RelocateInstMap) 1054 // Item.first is the RelocatedBase to offset against 1055 // Item.second is the vector of Targets to replace 1056 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1057 return MadeChange; 1058 } 1059 1060 /// SinkCast - Sink the specified cast instruction into its user blocks 1061 static bool SinkCast(CastInst *CI) { 1062 BasicBlock *DefBB = CI->getParent(); 1063 1064 /// InsertedCasts - Only insert a cast in each block once. 1065 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1066 1067 bool MadeChange = false; 1068 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1069 UI != E; ) { 1070 Use &TheUse = UI.getUse(); 1071 Instruction *User = cast<Instruction>(*UI); 1072 1073 // Figure out which BB this cast is used in. For PHI's this is the 1074 // appropriate predecessor block. 1075 BasicBlock *UserBB = User->getParent(); 1076 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1077 UserBB = PN->getIncomingBlock(TheUse); 1078 } 1079 1080 // Preincrement use iterator so we don't invalidate it. 1081 ++UI; 1082 1083 // The first insertion point of a block containing an EH pad is after the 1084 // pad. If the pad is the user, we cannot sink the cast past the pad. 1085 if (User->isEHPad()) 1086 continue; 1087 1088 // If the block selected to receive the cast is an EH pad that does not 1089 // allow non-PHI instructions before the terminator, we can't sink the 1090 // cast. 1091 if (UserBB->getTerminator()->isEHPad()) 1092 continue; 1093 1094 // If this user is in the same block as the cast, don't change the cast. 1095 if (UserBB == DefBB) continue; 1096 1097 // If we have already inserted a cast into this block, use it. 1098 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1099 1100 if (!InsertedCast) { 1101 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1102 assert(InsertPt != UserBB->end()); 1103 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1104 CI->getType(), "", &*InsertPt); 1105 } 1106 1107 // Replace a use of the cast with a use of the new cast. 1108 TheUse = InsertedCast; 1109 MadeChange = true; 1110 ++NumCastUses; 1111 } 1112 1113 // If we removed all uses, nuke the cast. 1114 if (CI->use_empty()) { 1115 CI->eraseFromParent(); 1116 MadeChange = true; 1117 } 1118 1119 return MadeChange; 1120 } 1121 1122 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1123 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1124 /// reduce the number of virtual registers that must be created and coalesced. 1125 /// 1126 /// Return true if any changes are made. 1127 /// 1128 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1129 const DataLayout &DL) { 1130 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1131 // than sinking only nop casts, but is helpful on some platforms. 1132 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1133 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1134 ASC->getDestAddressSpace())) 1135 return false; 1136 } 1137 1138 // If this is a noop copy, 1139 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1140 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1141 1142 // This is an fp<->int conversion? 1143 if (SrcVT.isInteger() != DstVT.isInteger()) 1144 return false; 1145 1146 // If this is an extension, it will be a zero or sign extension, which 1147 // isn't a noop. 1148 if (SrcVT.bitsLT(DstVT)) return false; 1149 1150 // If these values will be promoted, find out what they will be promoted 1151 // to. This helps us consider truncates on PPC as noop copies when they 1152 // are. 1153 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1154 TargetLowering::TypePromoteInteger) 1155 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1156 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1157 TargetLowering::TypePromoteInteger) 1158 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1159 1160 // If, after promotion, these are the same types, this is a noop copy. 1161 if (SrcVT != DstVT) 1162 return false; 1163 1164 return SinkCast(CI); 1165 } 1166 1167 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 1168 /// possible. 1169 /// 1170 /// Return true if any changes were made. 1171 static bool CombineUAddWithOverflow(CmpInst *CI) { 1172 Value *A, *B; 1173 Instruction *AddI; 1174 if (!match(CI, 1175 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 1176 return false; 1177 1178 Type *Ty = AddI->getType(); 1179 if (!isa<IntegerType>(Ty)) 1180 return false; 1181 1182 // We don't want to move around uses of condition values this late, so we we 1183 // check if it is legal to create the call to the intrinsic in the basic 1184 // block containing the icmp: 1185 1186 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 1187 return false; 1188 1189 #ifndef NDEBUG 1190 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1191 // for now: 1192 if (AddI->hasOneUse()) 1193 assert(*AddI->user_begin() == CI && "expected!"); 1194 #endif 1195 1196 Module *M = CI->getModule(); 1197 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1198 1199 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 1200 1201 auto *UAddWithOverflow = 1202 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 1203 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 1204 auto *Overflow = 1205 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 1206 1207 CI->replaceAllUsesWith(Overflow); 1208 AddI->replaceAllUsesWith(UAdd); 1209 CI->eraseFromParent(); 1210 AddI->eraseFromParent(); 1211 return true; 1212 } 1213 1214 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1215 /// registers that must be created and coalesced. This is a clear win except on 1216 /// targets with multiple condition code registers (PowerPC), where it might 1217 /// lose; some adjustment may be wanted there. 1218 /// 1219 /// Return true if any changes are made. 1220 static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1221 BasicBlock *DefBB = CI->getParent(); 1222 1223 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1224 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) 1225 return false; 1226 1227 // Only insert a cmp in each block once. 1228 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1229 1230 bool MadeChange = false; 1231 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1232 UI != E; ) { 1233 Use &TheUse = UI.getUse(); 1234 Instruction *User = cast<Instruction>(*UI); 1235 1236 // Preincrement use iterator so we don't invalidate it. 1237 ++UI; 1238 1239 // Don't bother for PHI nodes. 1240 if (isa<PHINode>(User)) 1241 continue; 1242 1243 // Figure out which BB this cmp is used in. 1244 BasicBlock *UserBB = User->getParent(); 1245 1246 // If this user is in the same block as the cmp, don't change the cmp. 1247 if (UserBB == DefBB) continue; 1248 1249 // If we have already inserted a cmp into this block, use it. 1250 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1251 1252 if (!InsertedCmp) { 1253 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1254 assert(InsertPt != UserBB->end()); 1255 InsertedCmp = 1256 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 1257 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 1258 // Propagate the debug info. 1259 InsertedCmp->setDebugLoc(CI->getDebugLoc()); 1260 } 1261 1262 // Replace a use of the cmp with a use of the new cmp. 1263 TheUse = InsertedCmp; 1264 MadeChange = true; 1265 ++NumCmpUses; 1266 } 1267 1268 // If we removed all uses, nuke the cmp. 1269 if (CI->use_empty()) { 1270 CI->eraseFromParent(); 1271 MadeChange = true; 1272 } 1273 1274 return MadeChange; 1275 } 1276 1277 static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1278 if (SinkCmpExpression(CI, TLI)) 1279 return true; 1280 1281 if (CombineUAddWithOverflow(CI)) 1282 return true; 1283 1284 return false; 1285 } 1286 1287 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1288 /// used in a compare to allow isel to generate better code for targets where 1289 /// this operation can be combined. 1290 /// 1291 /// Return true if any changes are made. 1292 static bool sinkAndCmp0Expression(Instruction *AndI, 1293 const TargetLowering &TLI, 1294 SetOfInstrs &InsertedInsts) { 1295 // Double-check that we're not trying to optimize an instruction that was 1296 // already optimized by some other part of this pass. 1297 assert(!InsertedInsts.count(AndI) && 1298 "Attempting to optimize already optimized and instruction"); 1299 (void) InsertedInsts; 1300 1301 // Nothing to do for single use in same basic block. 1302 if (AndI->hasOneUse() && 1303 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1304 return false; 1305 1306 // Try to avoid cases where sinking/duplicating is likely to increase register 1307 // pressure. 1308 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1309 !isa<ConstantInt>(AndI->getOperand(1)) && 1310 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1311 return false; 1312 1313 for (auto *U : AndI->users()) { 1314 Instruction *User = cast<Instruction>(U); 1315 1316 // Only sink for and mask feeding icmp with 0. 1317 if (!isa<ICmpInst>(User)) 1318 return false; 1319 1320 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1321 if (!CmpC || !CmpC->isZero()) 1322 return false; 1323 } 1324 1325 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1326 return false; 1327 1328 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1329 DEBUG(AndI->getParent()->dump()); 1330 1331 // Push the 'and' into the same block as the icmp 0. There should only be 1332 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1333 // others, so we don't need to keep track of which BBs we insert into. 1334 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1335 UI != E; ) { 1336 Use &TheUse = UI.getUse(); 1337 Instruction *User = cast<Instruction>(*UI); 1338 1339 // Preincrement use iterator so we don't invalidate it. 1340 ++UI; 1341 1342 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1343 1344 // Keep the 'and' in the same place if the use is already in the same block. 1345 Instruction *InsertPt = 1346 User->getParent() == AndI->getParent() ? AndI : User; 1347 Instruction *InsertedAnd = 1348 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1349 AndI->getOperand(1), "", InsertPt); 1350 // Propagate the debug info. 1351 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1352 1353 // Replace a use of the 'and' with a use of the new 'and'. 1354 TheUse = InsertedAnd; 1355 ++NumAndUses; 1356 DEBUG(User->getParent()->dump()); 1357 } 1358 1359 // We removed all uses, nuke the and. 1360 AndI->eraseFromParent(); 1361 return true; 1362 } 1363 1364 /// Check if the candidates could be combined with a shift instruction, which 1365 /// includes: 1366 /// 1. Truncate instruction 1367 /// 2. And instruction and the imm is a mask of the low bits: 1368 /// imm & (imm+1) == 0 1369 static bool isExtractBitsCandidateUse(Instruction *User) { 1370 if (!isa<TruncInst>(User)) { 1371 if (User->getOpcode() != Instruction::And || 1372 !isa<ConstantInt>(User->getOperand(1))) 1373 return false; 1374 1375 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1376 1377 if ((Cimm & (Cimm + 1)).getBoolValue()) 1378 return false; 1379 } 1380 return true; 1381 } 1382 1383 /// Sink both shift and truncate instruction to the use of truncate's BB. 1384 static bool 1385 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1386 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1387 const TargetLowering &TLI, const DataLayout &DL) { 1388 BasicBlock *UserBB = User->getParent(); 1389 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1390 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1391 bool MadeChange = false; 1392 1393 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1394 TruncE = TruncI->user_end(); 1395 TruncUI != TruncE;) { 1396 1397 Use &TruncTheUse = TruncUI.getUse(); 1398 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1399 // Preincrement use iterator so we don't invalidate it. 1400 1401 ++TruncUI; 1402 1403 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1404 if (!ISDOpcode) 1405 continue; 1406 1407 // If the use is actually a legal node, there will not be an 1408 // implicit truncate. 1409 // FIXME: always querying the result type is just an 1410 // approximation; some nodes' legality is determined by the 1411 // operand or other means. There's no good way to find out though. 1412 if (TLI.isOperationLegalOrCustom( 1413 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1414 continue; 1415 1416 // Don't bother for PHI nodes. 1417 if (isa<PHINode>(TruncUser)) 1418 continue; 1419 1420 BasicBlock *TruncUserBB = TruncUser->getParent(); 1421 1422 if (UserBB == TruncUserBB) 1423 continue; 1424 1425 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1426 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1427 1428 if (!InsertedShift && !InsertedTrunc) { 1429 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1430 assert(InsertPt != TruncUserBB->end()); 1431 // Sink the shift 1432 if (ShiftI->getOpcode() == Instruction::AShr) 1433 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1434 "", &*InsertPt); 1435 else 1436 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1437 "", &*InsertPt); 1438 1439 // Sink the trunc 1440 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1441 TruncInsertPt++; 1442 assert(TruncInsertPt != TruncUserBB->end()); 1443 1444 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1445 TruncI->getType(), "", &*TruncInsertPt); 1446 1447 MadeChange = true; 1448 1449 TruncTheUse = InsertedTrunc; 1450 } 1451 } 1452 return MadeChange; 1453 } 1454 1455 /// Sink the shift *right* instruction into user blocks if the uses could 1456 /// potentially be combined with this shift instruction and generate BitExtract 1457 /// instruction. It will only be applied if the architecture supports BitExtract 1458 /// instruction. Here is an example: 1459 /// BB1: 1460 /// %x.extract.shift = lshr i64 %arg1, 32 1461 /// BB2: 1462 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1463 /// ==> 1464 /// 1465 /// BB2: 1466 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1467 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1468 /// 1469 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1470 /// instruction. 1471 /// Return true if any changes are made. 1472 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1473 const TargetLowering &TLI, 1474 const DataLayout &DL) { 1475 BasicBlock *DefBB = ShiftI->getParent(); 1476 1477 /// Only insert instructions in each block once. 1478 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1479 1480 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1481 1482 bool MadeChange = false; 1483 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1484 UI != E;) { 1485 Use &TheUse = UI.getUse(); 1486 Instruction *User = cast<Instruction>(*UI); 1487 // Preincrement use iterator so we don't invalidate it. 1488 ++UI; 1489 1490 // Don't bother for PHI nodes. 1491 if (isa<PHINode>(User)) 1492 continue; 1493 1494 if (!isExtractBitsCandidateUse(User)) 1495 continue; 1496 1497 BasicBlock *UserBB = User->getParent(); 1498 1499 if (UserBB == DefBB) { 1500 // If the shift and truncate instruction are in the same BB. The use of 1501 // the truncate(TruncUse) may still introduce another truncate if not 1502 // legal. In this case, we would like to sink both shift and truncate 1503 // instruction to the BB of TruncUse. 1504 // for example: 1505 // BB1: 1506 // i64 shift.result = lshr i64 opnd, imm 1507 // trunc.result = trunc shift.result to i16 1508 // 1509 // BB2: 1510 // ----> We will have an implicit truncate here if the architecture does 1511 // not have i16 compare. 1512 // cmp i16 trunc.result, opnd2 1513 // 1514 if (isa<TruncInst>(User) && shiftIsLegal 1515 // If the type of the truncate is legal, no trucate will be 1516 // introduced in other basic blocks. 1517 && 1518 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1519 MadeChange = 1520 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1521 1522 continue; 1523 } 1524 // If we have already inserted a shift into this block, use it. 1525 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1526 1527 if (!InsertedShift) { 1528 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1529 assert(InsertPt != UserBB->end()); 1530 1531 if (ShiftI->getOpcode() == Instruction::AShr) 1532 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1533 "", &*InsertPt); 1534 else 1535 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1536 "", &*InsertPt); 1537 1538 MadeChange = true; 1539 } 1540 1541 // Replace a use of the shift with a use of the new shift. 1542 TheUse = InsertedShift; 1543 } 1544 1545 // If we removed all uses, nuke the shift. 1546 if (ShiftI->use_empty()) 1547 ShiftI->eraseFromParent(); 1548 1549 return MadeChange; 1550 } 1551 1552 // Translate a masked load intrinsic like 1553 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1554 // <16 x i1> %mask, <16 x i32> %passthru) 1555 // to a chain of basic blocks, with loading element one-by-one if 1556 // the appropriate mask bit is set 1557 // 1558 // %1 = bitcast i8* %addr to i32* 1559 // %2 = extractelement <16 x i1> %mask, i32 0 1560 // %3 = icmp eq i1 %2, true 1561 // br i1 %3, label %cond.load, label %else 1562 // 1563 //cond.load: ; preds = %0 1564 // %4 = getelementptr i32* %1, i32 0 1565 // %5 = load i32* %4 1566 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1567 // br label %else 1568 // 1569 //else: ; preds = %0, %cond.load 1570 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1571 // %7 = extractelement <16 x i1> %mask, i32 1 1572 // %8 = icmp eq i1 %7, true 1573 // br i1 %8, label %cond.load1, label %else2 1574 // 1575 //cond.load1: ; preds = %else 1576 // %9 = getelementptr i32* %1, i32 1 1577 // %10 = load i32* %9 1578 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1579 // br label %else2 1580 // 1581 //else2: ; preds = %else, %cond.load1 1582 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1583 // %12 = extractelement <16 x i1> %mask, i32 2 1584 // %13 = icmp eq i1 %12, true 1585 // br i1 %13, label %cond.load4, label %else5 1586 // 1587 static void scalarizeMaskedLoad(CallInst *CI) { 1588 Value *Ptr = CI->getArgOperand(0); 1589 Value *Alignment = CI->getArgOperand(1); 1590 Value *Mask = CI->getArgOperand(2); 1591 Value *Src0 = CI->getArgOperand(3); 1592 1593 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1594 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1595 assert(VecType && "Unexpected return type of masked load intrinsic"); 1596 1597 Type *EltTy = CI->getType()->getVectorElementType(); 1598 1599 IRBuilder<> Builder(CI->getContext()); 1600 Instruction *InsertPt = CI; 1601 BasicBlock *IfBlock = CI->getParent(); 1602 BasicBlock *CondBlock = nullptr; 1603 BasicBlock *PrevIfBlock = CI->getParent(); 1604 1605 Builder.SetInsertPoint(InsertPt); 1606 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1607 1608 // Short-cut if the mask is all-true. 1609 bool IsAllOnesMask = isa<Constant>(Mask) && 1610 cast<Constant>(Mask)->isAllOnesValue(); 1611 1612 if (IsAllOnesMask) { 1613 Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal); 1614 CI->replaceAllUsesWith(NewI); 1615 CI->eraseFromParent(); 1616 return; 1617 } 1618 1619 // Adjust alignment for the scalar instruction. 1620 AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8); 1621 // Bitcast %addr fron i8* to EltTy* 1622 Type *NewPtrType = 1623 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1624 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1625 unsigned VectorWidth = VecType->getNumElements(); 1626 1627 Value *UndefVal = UndefValue::get(VecType); 1628 1629 // The result vector 1630 Value *VResult = UndefVal; 1631 1632 if (isa<ConstantVector>(Mask)) { 1633 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1634 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1635 continue; 1636 Value *Gep = 1637 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1638 LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1639 VResult = Builder.CreateInsertElement(VResult, Load, 1640 Builder.getInt32(Idx)); 1641 } 1642 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1643 CI->replaceAllUsesWith(NewI); 1644 CI->eraseFromParent(); 1645 return; 1646 } 1647 1648 PHINode *Phi = nullptr; 1649 Value *PrevPhi = UndefVal; 1650 1651 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1652 1653 // Fill the "else" block, created in the previous iteration 1654 // 1655 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1656 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1657 // %to_load = icmp eq i1 %mask_1, true 1658 // br i1 %to_load, label %cond.load, label %else 1659 // 1660 if (Idx > 0) { 1661 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1662 Phi->addIncoming(VResult, CondBlock); 1663 Phi->addIncoming(PrevPhi, PrevIfBlock); 1664 PrevPhi = Phi; 1665 VResult = Phi; 1666 } 1667 1668 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1669 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1670 ConstantInt::get(Predicate->getType(), 1)); 1671 1672 // Create "cond" block 1673 // 1674 // %EltAddr = getelementptr i32* %1, i32 0 1675 // %Elt = load i32* %EltAddr 1676 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1677 // 1678 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load"); 1679 Builder.SetInsertPoint(InsertPt); 1680 1681 Value *Gep = 1682 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1683 LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1684 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1685 1686 // Create "else" block, fill it in the next iteration 1687 BasicBlock *NewIfBlock = 1688 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1689 Builder.SetInsertPoint(InsertPt); 1690 Instruction *OldBr = IfBlock->getTerminator(); 1691 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1692 OldBr->eraseFromParent(); 1693 PrevIfBlock = IfBlock; 1694 IfBlock = NewIfBlock; 1695 } 1696 1697 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1698 Phi->addIncoming(VResult, CondBlock); 1699 Phi->addIncoming(PrevPhi, PrevIfBlock); 1700 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1701 CI->replaceAllUsesWith(NewI); 1702 CI->eraseFromParent(); 1703 } 1704 1705 // Translate a masked store intrinsic, like 1706 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1707 // <16 x i1> %mask) 1708 // to a chain of basic blocks, that stores element one-by-one if 1709 // the appropriate mask bit is set 1710 // 1711 // %1 = bitcast i8* %addr to i32* 1712 // %2 = extractelement <16 x i1> %mask, i32 0 1713 // %3 = icmp eq i1 %2, true 1714 // br i1 %3, label %cond.store, label %else 1715 // 1716 // cond.store: ; preds = %0 1717 // %4 = extractelement <16 x i32> %val, i32 0 1718 // %5 = getelementptr i32* %1, i32 0 1719 // store i32 %4, i32* %5 1720 // br label %else 1721 // 1722 // else: ; preds = %0, %cond.store 1723 // %6 = extractelement <16 x i1> %mask, i32 1 1724 // %7 = icmp eq i1 %6, true 1725 // br i1 %7, label %cond.store1, label %else2 1726 // 1727 // cond.store1: ; preds = %else 1728 // %8 = extractelement <16 x i32> %val, i32 1 1729 // %9 = getelementptr i32* %1, i32 1 1730 // store i32 %8, i32* %9 1731 // br label %else2 1732 // . . . 1733 static void scalarizeMaskedStore(CallInst *CI) { 1734 Value *Src = CI->getArgOperand(0); 1735 Value *Ptr = CI->getArgOperand(1); 1736 Value *Alignment = CI->getArgOperand(2); 1737 Value *Mask = CI->getArgOperand(3); 1738 1739 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1740 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1741 assert(VecType && "Unexpected data type in masked store intrinsic"); 1742 1743 Type *EltTy = VecType->getElementType(); 1744 1745 IRBuilder<> Builder(CI->getContext()); 1746 Instruction *InsertPt = CI; 1747 BasicBlock *IfBlock = CI->getParent(); 1748 Builder.SetInsertPoint(InsertPt); 1749 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1750 1751 // Short-cut if the mask is all-true. 1752 bool IsAllOnesMask = isa<Constant>(Mask) && 1753 cast<Constant>(Mask)->isAllOnesValue(); 1754 1755 if (IsAllOnesMask) { 1756 Builder.CreateAlignedStore(Src, Ptr, AlignVal); 1757 CI->eraseFromParent(); 1758 return; 1759 } 1760 1761 // Adjust alignment for the scalar instruction. 1762 AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8); 1763 // Bitcast %addr fron i8* to EltTy* 1764 Type *NewPtrType = 1765 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1766 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1767 unsigned VectorWidth = VecType->getNumElements(); 1768 1769 if (isa<ConstantVector>(Mask)) { 1770 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1771 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1772 continue; 1773 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1774 Value *Gep = 1775 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1776 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1777 } 1778 CI->eraseFromParent(); 1779 return; 1780 } 1781 1782 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1783 1784 // Fill the "else" block, created in the previous iteration 1785 // 1786 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1787 // %to_store = icmp eq i1 %mask_1, true 1788 // br i1 %to_store, label %cond.store, label %else 1789 // 1790 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1791 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1792 ConstantInt::get(Predicate->getType(), 1)); 1793 1794 // Create "cond" block 1795 // 1796 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1797 // %EltAddr = getelementptr i32* %1, i32 0 1798 // %store i32 %OneElt, i32* %EltAddr 1799 // 1800 BasicBlock *CondBlock = 1801 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store"); 1802 Builder.SetInsertPoint(InsertPt); 1803 1804 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1805 Value *Gep = 1806 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1807 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1808 1809 // Create "else" block, fill it in the next iteration 1810 BasicBlock *NewIfBlock = 1811 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1812 Builder.SetInsertPoint(InsertPt); 1813 Instruction *OldBr = IfBlock->getTerminator(); 1814 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1815 OldBr->eraseFromParent(); 1816 IfBlock = NewIfBlock; 1817 } 1818 CI->eraseFromParent(); 1819 } 1820 1821 // Translate a masked gather intrinsic like 1822 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4, 1823 // <16 x i1> %Mask, <16 x i32> %Src) 1824 // to a chain of basic blocks, with loading element one-by-one if 1825 // the appropriate mask bit is set 1826 // 1827 // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind 1828 // % Mask0 = extractelement <16 x i1> %Mask, i32 0 1829 // % ToLoad0 = icmp eq i1 % Mask0, true 1830 // br i1 % ToLoad0, label %cond.load, label %else 1831 // 1832 // cond.load: 1833 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1834 // % Load0 = load i32, i32* % Ptr0, align 4 1835 // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0 1836 // br label %else 1837 // 1838 // else: 1839 // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0] 1840 // % Mask1 = extractelement <16 x i1> %Mask, i32 1 1841 // % ToLoad1 = icmp eq i1 % Mask1, true 1842 // br i1 % ToLoad1, label %cond.load1, label %else2 1843 // 1844 // cond.load1: 1845 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1846 // % Load1 = load i32, i32* % Ptr1, align 4 1847 // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1 1848 // br label %else2 1849 // . . . 1850 // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src 1851 // ret <16 x i32> %Result 1852 static void scalarizeMaskedGather(CallInst *CI) { 1853 Value *Ptrs = CI->getArgOperand(0); 1854 Value *Alignment = CI->getArgOperand(1); 1855 Value *Mask = CI->getArgOperand(2); 1856 Value *Src0 = CI->getArgOperand(3); 1857 1858 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1859 1860 assert(VecType && "Unexpected return type of masked load intrinsic"); 1861 1862 IRBuilder<> Builder(CI->getContext()); 1863 Instruction *InsertPt = CI; 1864 BasicBlock *IfBlock = CI->getParent(); 1865 BasicBlock *CondBlock = nullptr; 1866 BasicBlock *PrevIfBlock = CI->getParent(); 1867 Builder.SetInsertPoint(InsertPt); 1868 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1869 1870 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1871 1872 Value *UndefVal = UndefValue::get(VecType); 1873 1874 // The result vector 1875 Value *VResult = UndefVal; 1876 unsigned VectorWidth = VecType->getNumElements(); 1877 1878 // Shorten the way if the mask is a vector of constants. 1879 bool IsConstMask = isa<ConstantVector>(Mask); 1880 1881 if (IsConstMask) { 1882 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1883 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1884 continue; 1885 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1886 "Ptr" + Twine(Idx)); 1887 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1888 "Load" + Twine(Idx)); 1889 VResult = Builder.CreateInsertElement(VResult, Load, 1890 Builder.getInt32(Idx), 1891 "Res" + Twine(Idx)); 1892 } 1893 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1894 CI->replaceAllUsesWith(NewI); 1895 CI->eraseFromParent(); 1896 return; 1897 } 1898 1899 PHINode *Phi = nullptr; 1900 Value *PrevPhi = UndefVal; 1901 1902 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1903 1904 // Fill the "else" block, created in the previous iteration 1905 // 1906 // %Mask1 = extractelement <16 x i1> %Mask, i32 1 1907 // %ToLoad1 = icmp eq i1 %Mask1, true 1908 // br i1 %ToLoad1, label %cond.load, label %else 1909 // 1910 if (Idx > 0) { 1911 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1912 Phi->addIncoming(VResult, CondBlock); 1913 Phi->addIncoming(PrevPhi, PrevIfBlock); 1914 PrevPhi = Phi; 1915 VResult = Phi; 1916 } 1917 1918 Value *Predicate = Builder.CreateExtractElement(Mask, 1919 Builder.getInt32(Idx), 1920 "Mask" + Twine(Idx)); 1921 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1922 ConstantInt::get(Predicate->getType(), 1), 1923 "ToLoad" + Twine(Idx)); 1924 1925 // Create "cond" block 1926 // 1927 // %EltAddr = getelementptr i32* %1, i32 0 1928 // %Elt = load i32* %EltAddr 1929 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1930 // 1931 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); 1932 Builder.SetInsertPoint(InsertPt); 1933 1934 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1935 "Ptr" + Twine(Idx)); 1936 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1937 "Load" + Twine(Idx)); 1938 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx), 1939 "Res" + Twine(Idx)); 1940 1941 // Create "else" block, fill it in the next iteration 1942 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1943 Builder.SetInsertPoint(InsertPt); 1944 Instruction *OldBr = IfBlock->getTerminator(); 1945 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1946 OldBr->eraseFromParent(); 1947 PrevIfBlock = IfBlock; 1948 IfBlock = NewIfBlock; 1949 } 1950 1951 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1952 Phi->addIncoming(VResult, CondBlock); 1953 Phi->addIncoming(PrevPhi, PrevIfBlock); 1954 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1955 CI->replaceAllUsesWith(NewI); 1956 CI->eraseFromParent(); 1957 } 1958 1959 // Translate a masked scatter intrinsic, like 1960 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4, 1961 // <16 x i1> %Mask) 1962 // to a chain of basic blocks, that stores element one-by-one if 1963 // the appropriate mask bit is set. 1964 // 1965 // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind 1966 // % Mask0 = extractelement <16 x i1> % Mask, i32 0 1967 // % ToStore0 = icmp eq i1 % Mask0, true 1968 // br i1 %ToStore0, label %cond.store, label %else 1969 // 1970 // cond.store: 1971 // % Elt0 = extractelement <16 x i32> %Src, i32 0 1972 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1973 // store i32 %Elt0, i32* % Ptr0, align 4 1974 // br label %else 1975 // 1976 // else: 1977 // % Mask1 = extractelement <16 x i1> % Mask, i32 1 1978 // % ToStore1 = icmp eq i1 % Mask1, true 1979 // br i1 % ToStore1, label %cond.store1, label %else2 1980 // 1981 // cond.store1: 1982 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1983 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1984 // store i32 % Elt1, i32* % Ptr1, align 4 1985 // br label %else2 1986 // . . . 1987 static void scalarizeMaskedScatter(CallInst *CI) { 1988 Value *Src = CI->getArgOperand(0); 1989 Value *Ptrs = CI->getArgOperand(1); 1990 Value *Alignment = CI->getArgOperand(2); 1991 Value *Mask = CI->getArgOperand(3); 1992 1993 assert(isa<VectorType>(Src->getType()) && 1994 "Unexpected data type in masked scatter intrinsic"); 1995 assert(isa<VectorType>(Ptrs->getType()) && 1996 isa<PointerType>(Ptrs->getType()->getVectorElementType()) && 1997 "Vector of pointers is expected in masked scatter intrinsic"); 1998 1999 IRBuilder<> Builder(CI->getContext()); 2000 Instruction *InsertPt = CI; 2001 BasicBlock *IfBlock = CI->getParent(); 2002 Builder.SetInsertPoint(InsertPt); 2003 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 2004 2005 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 2006 unsigned VectorWidth = Src->getType()->getVectorNumElements(); 2007 2008 // Shorten the way if the mask is a vector of constants. 2009 bool IsConstMask = isa<ConstantVector>(Mask); 2010 2011 if (IsConstMask) { 2012 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 2013 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 2014 continue; 2015 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 2016 "Elt" + Twine(Idx)); 2017 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 2018 "Ptr" + Twine(Idx)); 2019 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 2020 } 2021 CI->eraseFromParent(); 2022 return; 2023 } 2024 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 2025 // Fill the "else" block, created in the previous iteration 2026 // 2027 // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx 2028 // % ToStore = icmp eq i1 % Mask1, true 2029 // br i1 % ToStore, label %cond.store, label %else 2030 // 2031 Value *Predicate = Builder.CreateExtractElement(Mask, 2032 Builder.getInt32(Idx), 2033 "Mask" + Twine(Idx)); 2034 Value *Cmp = 2035 Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 2036 ConstantInt::get(Predicate->getType(), 1), 2037 "ToStore" + Twine(Idx)); 2038 2039 // Create "cond" block 2040 // 2041 // % Elt1 = extractelement <16 x i32> %Src, i32 1 2042 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 2043 // %store i32 % Elt1, i32* % Ptr1 2044 // 2045 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); 2046 Builder.SetInsertPoint(InsertPt); 2047 2048 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 2049 "Elt" + Twine(Idx)); 2050 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 2051 "Ptr" + Twine(Idx)); 2052 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 2053 2054 // Create "else" block, fill it in the next iteration 2055 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 2056 Builder.SetInsertPoint(InsertPt); 2057 Instruction *OldBr = IfBlock->getTerminator(); 2058 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 2059 OldBr->eraseFromParent(); 2060 IfBlock = NewIfBlock; 2061 } 2062 CI->eraseFromParent(); 2063 } 2064 2065 /// If counting leading or trailing zeros is an expensive operation and a zero 2066 /// input is defined, add a check for zero to avoid calling the intrinsic. 2067 /// 2068 /// We want to transform: 2069 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 2070 /// 2071 /// into: 2072 /// entry: 2073 /// %cmpz = icmp eq i64 %A, 0 2074 /// br i1 %cmpz, label %cond.end, label %cond.false 2075 /// cond.false: 2076 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 2077 /// br label %cond.end 2078 /// cond.end: 2079 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 2080 /// 2081 /// If the transform is performed, return true and set ModifiedDT to true. 2082 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 2083 const TargetLowering *TLI, 2084 const DataLayout *DL, 2085 bool &ModifiedDT) { 2086 if (!TLI || !DL) 2087 return false; 2088 2089 // If a zero input is undefined, it doesn't make sense to despeculate that. 2090 if (match(CountZeros->getOperand(1), m_One())) 2091 return false; 2092 2093 // If it's cheap to speculate, there's nothing to do. 2094 auto IntrinsicID = CountZeros->getIntrinsicID(); 2095 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 2096 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 2097 return false; 2098 2099 // Only handle legal scalar cases. Anything else requires too much work. 2100 Type *Ty = CountZeros->getType(); 2101 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 2102 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 2103 return false; 2104 2105 // The intrinsic will be sunk behind a compare against zero and branch. 2106 BasicBlock *StartBlock = CountZeros->getParent(); 2107 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 2108 2109 // Create another block after the count zero intrinsic. A PHI will be added 2110 // in this block to select the result of the intrinsic or the bit-width 2111 // constant if the input to the intrinsic is zero. 2112 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 2113 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 2114 2115 // Set up a builder to create a compare, conditional branch, and PHI. 2116 IRBuilder<> Builder(CountZeros->getContext()); 2117 Builder.SetInsertPoint(StartBlock->getTerminator()); 2118 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 2119 2120 // Replace the unconditional branch that was created by the first split with 2121 // a compare against zero and a conditional branch. 2122 Value *Zero = Constant::getNullValue(Ty); 2123 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 2124 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 2125 StartBlock->getTerminator()->eraseFromParent(); 2126 2127 // Create a PHI in the end block to select either the output of the intrinsic 2128 // or the bit width of the operand. 2129 Builder.SetInsertPoint(&EndBlock->front()); 2130 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 2131 CountZeros->replaceAllUsesWith(PN); 2132 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 2133 PN->addIncoming(BitWidth, StartBlock); 2134 PN->addIncoming(CountZeros, CallBlock); 2135 2136 // We are explicitly handling the zero case, so we can set the intrinsic's 2137 // undefined zero argument to 'true'. This will also prevent reprocessing the 2138 // intrinsic; we only despeculate when a zero input is defined. 2139 CountZeros->setArgOperand(1, Builder.getTrue()); 2140 ModifiedDT = true; 2141 return true; 2142 } 2143 2144 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { 2145 BasicBlock *BB = CI->getParent(); 2146 2147 // Lower inline assembly if we can. 2148 // If we found an inline asm expession, and if the target knows how to 2149 // lower it to normal LLVM code, do so now. 2150 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 2151 if (TLI->ExpandInlineAsm(CI)) { 2152 // Avoid invalidating the iterator. 2153 CurInstIterator = BB->begin(); 2154 // Avoid processing instructions out of order, which could cause 2155 // reuse before a value is defined. 2156 SunkAddrs.clear(); 2157 return true; 2158 } 2159 // Sink address computing for memory operands into the block. 2160 if (optimizeInlineAsmInst(CI)) 2161 return true; 2162 } 2163 2164 // Align the pointer arguments to this call if the target thinks it's a good 2165 // idea 2166 unsigned MinSize, PrefAlign; 2167 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2168 for (auto &Arg : CI->arg_operands()) { 2169 // We want to align both objects whose address is used directly and 2170 // objects whose address is used in casts and GEPs, though it only makes 2171 // sense for GEPs if the offset is a multiple of the desired alignment and 2172 // if size - offset meets the size threshold. 2173 if (!Arg->getType()->isPointerTy()) 2174 continue; 2175 APInt Offset(DL->getPointerSizeInBits( 2176 cast<PointerType>(Arg->getType())->getAddressSpace()), 2177 0); 2178 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2179 uint64_t Offset2 = Offset.getLimitedValue(); 2180 if ((Offset2 & (PrefAlign-1)) != 0) 2181 continue; 2182 AllocaInst *AI; 2183 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2184 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2185 AI->setAlignment(PrefAlign); 2186 // Global variables can only be aligned if they are defined in this 2187 // object (i.e. they are uniquely initialized in this object), and 2188 // over-aligning global variables that have an explicit section is 2189 // forbidden. 2190 GlobalVariable *GV; 2191 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2192 GV->getPointerAlignment(*DL) < PrefAlign && 2193 DL->getTypeAllocSize(GV->getValueType()) >= 2194 MinSize + Offset2) 2195 GV->setAlignment(PrefAlign); 2196 } 2197 // If this is a memcpy (or similar) then we may be able to improve the 2198 // alignment 2199 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2200 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 2201 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 2202 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 2203 if (Align > MI->getAlignment()) 2204 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 2205 } 2206 } 2207 2208 // If we have a cold call site, try to sink addressing computation into the 2209 // cold block. This interacts with our handling for loads and stores to 2210 // ensure that we can fold all uses of a potential addressing computation 2211 // into their uses. TODO: generalize this to work over profiling data 2212 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 2213 for (auto &Arg : CI->arg_operands()) { 2214 if (!Arg->getType()->isPointerTy()) 2215 continue; 2216 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2217 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2218 } 2219 2220 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2221 if (II) { 2222 switch (II->getIntrinsicID()) { 2223 default: break; 2224 case Intrinsic::objectsize: { 2225 // Lower all uses of llvm.objectsize.* 2226 ConstantInt *RetVal = 2227 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 2228 // Substituting this can cause recursive simplifications, which can 2229 // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case 2230 // this 2231 // happens. 2232 Value *CurValue = &*CurInstIterator; 2233 WeakTrackingVH IterHandle(CurValue); 2234 2235 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2236 2237 // If the iterator instruction was recursively deleted, start over at the 2238 // start of the block. 2239 if (IterHandle != CurValue) { 2240 CurInstIterator = BB->begin(); 2241 SunkAddrs.clear(); 2242 } 2243 return true; 2244 } 2245 case Intrinsic::masked_load: { 2246 // Scalarize unsupported vector masked load 2247 if (!TTI->isLegalMaskedLoad(CI->getType())) { 2248 scalarizeMaskedLoad(CI); 2249 ModifiedDT = true; 2250 return true; 2251 } 2252 return false; 2253 } 2254 case Intrinsic::masked_store: { 2255 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) { 2256 scalarizeMaskedStore(CI); 2257 ModifiedDT = true; 2258 return true; 2259 } 2260 return false; 2261 } 2262 case Intrinsic::masked_gather: { 2263 if (!TTI->isLegalMaskedGather(CI->getType())) { 2264 scalarizeMaskedGather(CI); 2265 ModifiedDT = true; 2266 return true; 2267 } 2268 return false; 2269 } 2270 case Intrinsic::masked_scatter: { 2271 if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) { 2272 scalarizeMaskedScatter(CI); 2273 ModifiedDT = true; 2274 return true; 2275 } 2276 return false; 2277 } 2278 case Intrinsic::aarch64_stlxr: 2279 case Intrinsic::aarch64_stxr: { 2280 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2281 if (!ExtVal || !ExtVal->hasOneUse() || 2282 ExtVal->getParent() == CI->getParent()) 2283 return false; 2284 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2285 ExtVal->moveBefore(CI); 2286 // Mark this instruction as "inserted by CGP", so that other 2287 // optimizations don't touch it. 2288 InsertedInsts.insert(ExtVal); 2289 return true; 2290 } 2291 case Intrinsic::invariant_group_barrier: 2292 II->replaceAllUsesWith(II->getArgOperand(0)); 2293 II->eraseFromParent(); 2294 return true; 2295 2296 case Intrinsic::cttz: 2297 case Intrinsic::ctlz: 2298 // If counting zeros is expensive, try to avoid it. 2299 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2300 } 2301 2302 if (TLI) { 2303 SmallVector<Value*, 2> PtrOps; 2304 Type *AccessTy; 2305 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2306 while (!PtrOps.empty()) { 2307 Value *PtrVal = PtrOps.pop_back_val(); 2308 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2309 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2310 return true; 2311 } 2312 } 2313 } 2314 2315 // From here on out we're working with named functions. 2316 if (!CI->getCalledFunction()) return false; 2317 2318 // Lower all default uses of _chk calls. This is very similar 2319 // to what InstCombineCalls does, but here we are only lowering calls 2320 // to fortified library functions (e.g. __memcpy_chk) that have the default 2321 // "don't know" as the objectsize. Anything else should be left alone. 2322 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2323 if (Value *V = Simplifier.optimizeCall(CI)) { 2324 CI->replaceAllUsesWith(V); 2325 CI->eraseFromParent(); 2326 return true; 2327 } 2328 return false; 2329 } 2330 2331 /// Look for opportunities to duplicate return instructions to the predecessor 2332 /// to enable tail call optimizations. The case it is currently looking for is: 2333 /// @code 2334 /// bb0: 2335 /// %tmp0 = tail call i32 @f0() 2336 /// br label %return 2337 /// bb1: 2338 /// %tmp1 = tail call i32 @f1() 2339 /// br label %return 2340 /// bb2: 2341 /// %tmp2 = tail call i32 @f2() 2342 /// br label %return 2343 /// return: 2344 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2345 /// ret i32 %retval 2346 /// @endcode 2347 /// 2348 /// => 2349 /// 2350 /// @code 2351 /// bb0: 2352 /// %tmp0 = tail call i32 @f0() 2353 /// ret i32 %tmp0 2354 /// bb1: 2355 /// %tmp1 = tail call i32 @f1() 2356 /// ret i32 %tmp1 2357 /// bb2: 2358 /// %tmp2 = tail call i32 @f2() 2359 /// ret i32 %tmp2 2360 /// @endcode 2361 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 2362 if (!TLI) 2363 return false; 2364 2365 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2366 if (!RetI) 2367 return false; 2368 2369 PHINode *PN = nullptr; 2370 BitCastInst *BCI = nullptr; 2371 Value *V = RetI->getReturnValue(); 2372 if (V) { 2373 BCI = dyn_cast<BitCastInst>(V); 2374 if (BCI) 2375 V = BCI->getOperand(0); 2376 2377 PN = dyn_cast<PHINode>(V); 2378 if (!PN) 2379 return false; 2380 } 2381 2382 if (PN && PN->getParent() != BB) 2383 return false; 2384 2385 // Make sure there are no instructions between the PHI and return, or that the 2386 // return is the first instruction in the block. 2387 if (PN) { 2388 BasicBlock::iterator BI = BB->begin(); 2389 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 2390 if (&*BI == BCI) 2391 // Also skip over the bitcast. 2392 ++BI; 2393 if (&*BI != RetI) 2394 return false; 2395 } else { 2396 BasicBlock::iterator BI = BB->begin(); 2397 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2398 if (&*BI != RetI) 2399 return false; 2400 } 2401 2402 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2403 /// call. 2404 const Function *F = BB->getParent(); 2405 SmallVector<CallInst*, 4> TailCalls; 2406 if (PN) { 2407 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2408 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2409 // Make sure the phi value is indeed produced by the tail call. 2410 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2411 TLI->mayBeEmittedAsTailCall(CI) && 2412 attributesPermitTailCall(F, CI, RetI, *TLI)) 2413 TailCalls.push_back(CI); 2414 } 2415 } else { 2416 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2417 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2418 if (!VisitedBBs.insert(*PI).second) 2419 continue; 2420 2421 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2422 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2423 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2424 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2425 if (RI == RE) 2426 continue; 2427 2428 CallInst *CI = dyn_cast<CallInst>(&*RI); 2429 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2430 attributesPermitTailCall(F, CI, RetI, *TLI)) 2431 TailCalls.push_back(CI); 2432 } 2433 } 2434 2435 bool Changed = false; 2436 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2437 CallInst *CI = TailCalls[i]; 2438 CallSite CS(CI); 2439 2440 // Conservatively require the attributes of the call to match those of the 2441 // return. Ignore noalias because it doesn't affect the call sequence. 2442 AttributeList CalleeAttrs = CS.getAttributes(); 2443 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2444 .removeAttribute(Attribute::NoAlias) != 2445 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2446 .removeAttribute(Attribute::NoAlias)) 2447 continue; 2448 2449 // Make sure the call instruction is followed by an unconditional branch to 2450 // the return block. 2451 BasicBlock *CallBB = CI->getParent(); 2452 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2453 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2454 continue; 2455 2456 // Duplicate the return into CallBB. 2457 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2458 ModifiedDT = Changed = true; 2459 ++NumRetsDup; 2460 } 2461 2462 // If we eliminated all predecessors of the block, delete the block now. 2463 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2464 BB->eraseFromParent(); 2465 2466 return Changed; 2467 } 2468 2469 //===----------------------------------------------------------------------===// 2470 // Memory Optimization 2471 //===----------------------------------------------------------------------===// 2472 2473 namespace { 2474 2475 /// This is an extended version of TargetLowering::AddrMode 2476 /// which holds actual Value*'s for register values. 2477 struct ExtAddrMode : public TargetLowering::AddrMode { 2478 Value *BaseReg; 2479 Value *ScaledReg; 2480 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 2481 void print(raw_ostream &OS) const; 2482 void dump() const; 2483 2484 bool operator==(const ExtAddrMode& O) const { 2485 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 2486 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 2487 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 2488 } 2489 }; 2490 2491 #ifndef NDEBUG 2492 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2493 AM.print(OS); 2494 return OS; 2495 } 2496 #endif 2497 2498 void ExtAddrMode::print(raw_ostream &OS) const { 2499 bool NeedPlus = false; 2500 OS << "["; 2501 if (BaseGV) { 2502 OS << (NeedPlus ? " + " : "") 2503 << "GV:"; 2504 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2505 NeedPlus = true; 2506 } 2507 2508 if (BaseOffs) { 2509 OS << (NeedPlus ? " + " : "") 2510 << BaseOffs; 2511 NeedPlus = true; 2512 } 2513 2514 if (BaseReg) { 2515 OS << (NeedPlus ? " + " : "") 2516 << "Base:"; 2517 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2518 NeedPlus = true; 2519 } 2520 if (Scale) { 2521 OS << (NeedPlus ? " + " : "") 2522 << Scale << "*"; 2523 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2524 } 2525 2526 OS << ']'; 2527 } 2528 2529 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2530 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2531 print(dbgs()); 2532 dbgs() << '\n'; 2533 } 2534 #endif 2535 2536 /// \brief This class provides transaction based operation on the IR. 2537 /// Every change made through this class is recorded in the internal state and 2538 /// can be undone (rollback) until commit is called. 2539 class TypePromotionTransaction { 2540 2541 /// \brief This represents the common interface of the individual transaction. 2542 /// Each class implements the logic for doing one specific modification on 2543 /// the IR via the TypePromotionTransaction. 2544 class TypePromotionAction { 2545 protected: 2546 /// The Instruction modified. 2547 Instruction *Inst; 2548 2549 public: 2550 /// \brief Constructor of the action. 2551 /// The constructor performs the related action on the IR. 2552 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2553 2554 virtual ~TypePromotionAction() {} 2555 2556 /// \brief Undo the modification done by this action. 2557 /// When this method is called, the IR must be in the same state as it was 2558 /// before this action was applied. 2559 /// \pre Undoing the action works if and only if the IR is in the exact same 2560 /// state as it was directly after this action was applied. 2561 virtual void undo() = 0; 2562 2563 /// \brief Advocate every change made by this action. 2564 /// When the results on the IR of the action are to be kept, it is important 2565 /// to call this function, otherwise hidden information may be kept forever. 2566 virtual void commit() { 2567 // Nothing to be done, this action is not doing anything. 2568 } 2569 }; 2570 2571 /// \brief Utility to remember the position of an instruction. 2572 class InsertionHandler { 2573 /// Position of an instruction. 2574 /// Either an instruction: 2575 /// - Is the first in a basic block: BB is used. 2576 /// - Has a previous instructon: PrevInst is used. 2577 union { 2578 Instruction *PrevInst; 2579 BasicBlock *BB; 2580 } Point; 2581 /// Remember whether or not the instruction had a previous instruction. 2582 bool HasPrevInstruction; 2583 2584 public: 2585 /// \brief Record the position of \p Inst. 2586 InsertionHandler(Instruction *Inst) { 2587 BasicBlock::iterator It = Inst->getIterator(); 2588 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2589 if (HasPrevInstruction) 2590 Point.PrevInst = &*--It; 2591 else 2592 Point.BB = Inst->getParent(); 2593 } 2594 2595 /// \brief Insert \p Inst at the recorded position. 2596 void insert(Instruction *Inst) { 2597 if (HasPrevInstruction) { 2598 if (Inst->getParent()) 2599 Inst->removeFromParent(); 2600 Inst->insertAfter(Point.PrevInst); 2601 } else { 2602 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2603 if (Inst->getParent()) 2604 Inst->moveBefore(Position); 2605 else 2606 Inst->insertBefore(Position); 2607 } 2608 } 2609 }; 2610 2611 /// \brief Move an instruction before another. 2612 class InstructionMoveBefore : public TypePromotionAction { 2613 /// Original position of the instruction. 2614 InsertionHandler Position; 2615 2616 public: 2617 /// \brief Move \p Inst before \p Before. 2618 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2619 : TypePromotionAction(Inst), Position(Inst) { 2620 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2621 Inst->moveBefore(Before); 2622 } 2623 2624 /// \brief Move the instruction back to its original position. 2625 void undo() override { 2626 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2627 Position.insert(Inst); 2628 } 2629 }; 2630 2631 /// \brief Set the operand of an instruction with a new value. 2632 class OperandSetter : public TypePromotionAction { 2633 /// Original operand of the instruction. 2634 Value *Origin; 2635 /// Index of the modified instruction. 2636 unsigned Idx; 2637 2638 public: 2639 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2640 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2641 : TypePromotionAction(Inst), Idx(Idx) { 2642 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2643 << "for:" << *Inst << "\n" 2644 << "with:" << *NewVal << "\n"); 2645 Origin = Inst->getOperand(Idx); 2646 Inst->setOperand(Idx, NewVal); 2647 } 2648 2649 /// \brief Restore the original value of the instruction. 2650 void undo() override { 2651 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2652 << "for: " << *Inst << "\n" 2653 << "with: " << *Origin << "\n"); 2654 Inst->setOperand(Idx, Origin); 2655 } 2656 }; 2657 2658 /// \brief Hide the operands of an instruction. 2659 /// Do as if this instruction was not using any of its operands. 2660 class OperandsHider : public TypePromotionAction { 2661 /// The list of original operands. 2662 SmallVector<Value *, 4> OriginalValues; 2663 2664 public: 2665 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2666 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2667 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2668 unsigned NumOpnds = Inst->getNumOperands(); 2669 OriginalValues.reserve(NumOpnds); 2670 for (unsigned It = 0; It < NumOpnds; ++It) { 2671 // Save the current operand. 2672 Value *Val = Inst->getOperand(It); 2673 OriginalValues.push_back(Val); 2674 // Set a dummy one. 2675 // We could use OperandSetter here, but that would imply an overhead 2676 // that we are not willing to pay. 2677 Inst->setOperand(It, UndefValue::get(Val->getType())); 2678 } 2679 } 2680 2681 /// \brief Restore the original list of uses. 2682 void undo() override { 2683 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2684 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2685 Inst->setOperand(It, OriginalValues[It]); 2686 } 2687 }; 2688 2689 /// \brief Build a truncate instruction. 2690 class TruncBuilder : public TypePromotionAction { 2691 Value *Val; 2692 public: 2693 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2694 /// result. 2695 /// trunc Opnd to Ty. 2696 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2697 IRBuilder<> Builder(Opnd); 2698 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2699 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2700 } 2701 2702 /// \brief Get the built value. 2703 Value *getBuiltValue() { return Val; } 2704 2705 /// \brief Remove the built instruction. 2706 void undo() override { 2707 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2708 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2709 IVal->eraseFromParent(); 2710 } 2711 }; 2712 2713 /// \brief Build a sign extension instruction. 2714 class SExtBuilder : public TypePromotionAction { 2715 Value *Val; 2716 public: 2717 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 2718 /// result. 2719 /// sext Opnd to Ty. 2720 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2721 : TypePromotionAction(InsertPt) { 2722 IRBuilder<> Builder(InsertPt); 2723 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2724 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2725 } 2726 2727 /// \brief Get the built value. 2728 Value *getBuiltValue() { return Val; } 2729 2730 /// \brief Remove the built instruction. 2731 void undo() override { 2732 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2733 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2734 IVal->eraseFromParent(); 2735 } 2736 }; 2737 2738 /// \brief Build a zero extension instruction. 2739 class ZExtBuilder : public TypePromotionAction { 2740 Value *Val; 2741 public: 2742 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 2743 /// result. 2744 /// zext Opnd to Ty. 2745 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2746 : TypePromotionAction(InsertPt) { 2747 IRBuilder<> Builder(InsertPt); 2748 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2749 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2750 } 2751 2752 /// \brief Get the built value. 2753 Value *getBuiltValue() { return Val; } 2754 2755 /// \brief Remove the built instruction. 2756 void undo() override { 2757 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2758 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2759 IVal->eraseFromParent(); 2760 } 2761 }; 2762 2763 /// \brief Mutate an instruction to another type. 2764 class TypeMutator : public TypePromotionAction { 2765 /// Record the original type. 2766 Type *OrigTy; 2767 2768 public: 2769 /// \brief Mutate the type of \p Inst into \p NewTy. 2770 TypeMutator(Instruction *Inst, Type *NewTy) 2771 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2772 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2773 << "\n"); 2774 Inst->mutateType(NewTy); 2775 } 2776 2777 /// \brief Mutate the instruction back to its original type. 2778 void undo() override { 2779 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2780 << "\n"); 2781 Inst->mutateType(OrigTy); 2782 } 2783 }; 2784 2785 /// \brief Replace the uses of an instruction by another instruction. 2786 class UsesReplacer : public TypePromotionAction { 2787 /// Helper structure to keep track of the replaced uses. 2788 struct InstructionAndIdx { 2789 /// The instruction using the instruction. 2790 Instruction *Inst; 2791 /// The index where this instruction is used for Inst. 2792 unsigned Idx; 2793 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2794 : Inst(Inst), Idx(Idx) {} 2795 }; 2796 2797 /// Keep track of the original uses (pair Instruction, Index). 2798 SmallVector<InstructionAndIdx, 4> OriginalUses; 2799 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 2800 2801 public: 2802 /// \brief Replace all the use of \p Inst by \p New. 2803 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2804 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2805 << "\n"); 2806 // Record the original uses. 2807 for (Use &U : Inst->uses()) { 2808 Instruction *UserI = cast<Instruction>(U.getUser()); 2809 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2810 } 2811 // Now, we can replace the uses. 2812 Inst->replaceAllUsesWith(New); 2813 } 2814 2815 /// \brief Reassign the original uses of Inst to Inst. 2816 void undo() override { 2817 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2818 for (use_iterator UseIt = OriginalUses.begin(), 2819 EndIt = OriginalUses.end(); 2820 UseIt != EndIt; ++UseIt) { 2821 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2822 } 2823 } 2824 }; 2825 2826 /// \brief Remove an instruction from the IR. 2827 class InstructionRemover : public TypePromotionAction { 2828 /// Original position of the instruction. 2829 InsertionHandler Inserter; 2830 /// Helper structure to hide all the link to the instruction. In other 2831 /// words, this helps to do as if the instruction was removed. 2832 OperandsHider Hider; 2833 /// Keep track of the uses replaced, if any. 2834 UsesReplacer *Replacer; 2835 /// Keep track of instructions removed. 2836 SetOfInstrs &RemovedInsts; 2837 2838 public: 2839 /// \brief Remove all reference of \p Inst and optinally replace all its 2840 /// uses with New. 2841 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2842 /// \pre If !Inst->use_empty(), then New != nullptr 2843 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2844 Value *New = nullptr) 2845 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2846 Replacer(nullptr), RemovedInsts(RemovedInsts) { 2847 if (New) 2848 Replacer = new UsesReplacer(Inst, New); 2849 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2850 RemovedInsts.insert(Inst); 2851 /// The instructions removed here will be freed after completing 2852 /// optimizeBlock() for all blocks as we need to keep track of the 2853 /// removed instructions during promotion. 2854 Inst->removeFromParent(); 2855 } 2856 2857 ~InstructionRemover() override { delete Replacer; } 2858 2859 /// \brief Resurrect the instruction and reassign it to the proper uses if 2860 /// new value was provided when build this action. 2861 void undo() override { 2862 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2863 Inserter.insert(Inst); 2864 if (Replacer) 2865 Replacer->undo(); 2866 Hider.undo(); 2867 RemovedInsts.erase(Inst); 2868 } 2869 }; 2870 2871 public: 2872 /// Restoration point. 2873 /// The restoration point is a pointer to an action instead of an iterator 2874 /// because the iterator may be invalidated but not the pointer. 2875 typedef const TypePromotionAction *ConstRestorationPt; 2876 2877 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2878 : RemovedInsts(RemovedInsts) {} 2879 2880 /// Advocate every changes made in that transaction. 2881 void commit(); 2882 /// Undo all the changes made after the given point. 2883 void rollback(ConstRestorationPt Point); 2884 /// Get the current restoration point. 2885 ConstRestorationPt getRestorationPoint() const; 2886 2887 /// \name API for IR modification with state keeping to support rollback. 2888 /// @{ 2889 /// Same as Instruction::setOperand. 2890 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2891 /// Same as Instruction::eraseFromParent. 2892 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2893 /// Same as Value::replaceAllUsesWith. 2894 void replaceAllUsesWith(Instruction *Inst, Value *New); 2895 /// Same as Value::mutateType. 2896 void mutateType(Instruction *Inst, Type *NewTy); 2897 /// Same as IRBuilder::createTrunc. 2898 Value *createTrunc(Instruction *Opnd, Type *Ty); 2899 /// Same as IRBuilder::createSExt. 2900 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2901 /// Same as IRBuilder::createZExt. 2902 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2903 /// Same as Instruction::moveBefore. 2904 void moveBefore(Instruction *Inst, Instruction *Before); 2905 /// @} 2906 2907 private: 2908 /// The ordered list of actions made so far. 2909 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2910 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2911 SetOfInstrs &RemovedInsts; 2912 }; 2913 2914 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2915 Value *NewVal) { 2916 Actions.push_back( 2917 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2918 } 2919 2920 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2921 Value *NewVal) { 2922 Actions.push_back( 2923 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, 2924 RemovedInsts, NewVal)); 2925 } 2926 2927 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2928 Value *New) { 2929 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2930 } 2931 2932 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2933 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2934 } 2935 2936 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2937 Type *Ty) { 2938 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2939 Value *Val = Ptr->getBuiltValue(); 2940 Actions.push_back(std::move(Ptr)); 2941 return Val; 2942 } 2943 2944 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2945 Value *Opnd, Type *Ty) { 2946 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2947 Value *Val = Ptr->getBuiltValue(); 2948 Actions.push_back(std::move(Ptr)); 2949 return Val; 2950 } 2951 2952 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2953 Value *Opnd, Type *Ty) { 2954 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2955 Value *Val = Ptr->getBuiltValue(); 2956 Actions.push_back(std::move(Ptr)); 2957 return Val; 2958 } 2959 2960 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2961 Instruction *Before) { 2962 Actions.push_back( 2963 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2964 } 2965 2966 TypePromotionTransaction::ConstRestorationPt 2967 TypePromotionTransaction::getRestorationPoint() const { 2968 return !Actions.empty() ? Actions.back().get() : nullptr; 2969 } 2970 2971 void TypePromotionTransaction::commit() { 2972 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2973 ++It) 2974 (*It)->commit(); 2975 Actions.clear(); 2976 } 2977 2978 void TypePromotionTransaction::rollback( 2979 TypePromotionTransaction::ConstRestorationPt Point) { 2980 while (!Actions.empty() && Point != Actions.back().get()) { 2981 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2982 Curr->undo(); 2983 } 2984 } 2985 2986 /// \brief A helper class for matching addressing modes. 2987 /// 2988 /// This encapsulates the logic for matching the target-legal addressing modes. 2989 class AddressingModeMatcher { 2990 SmallVectorImpl<Instruction*> &AddrModeInsts; 2991 const TargetLowering &TLI; 2992 const TargetRegisterInfo &TRI; 2993 const DataLayout &DL; 2994 2995 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2996 /// the memory instruction that we're computing this address for. 2997 Type *AccessTy; 2998 unsigned AddrSpace; 2999 Instruction *MemoryInst; 3000 3001 /// This is the addressing mode that we're building up. This is 3002 /// part of the return value of this addressing mode matching stuff. 3003 ExtAddrMode &AddrMode; 3004 3005 /// The instructions inserted by other CodeGenPrepare optimizations. 3006 const SetOfInstrs &InsertedInsts; 3007 /// A map from the instructions to their type before promotion. 3008 InstrToOrigTy &PromotedInsts; 3009 /// The ongoing transaction where every action should be registered. 3010 TypePromotionTransaction &TPT; 3011 3012 /// This is set to true when we should not do profitability checks. 3013 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3014 bool IgnoreProfitability; 3015 3016 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 3017 const TargetLowering &TLI, 3018 const TargetRegisterInfo &TRI, 3019 Type *AT, unsigned AS, 3020 Instruction *MI, ExtAddrMode &AM, 3021 const SetOfInstrs &InsertedInsts, 3022 InstrToOrigTy &PromotedInsts, 3023 TypePromotionTransaction &TPT) 3024 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3025 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3026 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3027 PromotedInsts(PromotedInsts), TPT(TPT) { 3028 IgnoreProfitability = false; 3029 } 3030 public: 3031 3032 /// Find the maximal addressing mode that a load/store of V can fold, 3033 /// give an access type of AccessTy. This returns a list of involved 3034 /// instructions in AddrModeInsts. 3035 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3036 /// optimizations. 3037 /// \p PromotedInsts maps the instructions to their type before promotion. 3038 /// \p The ongoing transaction where every action should be registered. 3039 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 3040 Instruction *MemoryInst, 3041 SmallVectorImpl<Instruction*> &AddrModeInsts, 3042 const TargetLowering &TLI, 3043 const TargetRegisterInfo &TRI, 3044 const SetOfInstrs &InsertedInsts, 3045 InstrToOrigTy &PromotedInsts, 3046 TypePromotionTransaction &TPT) { 3047 ExtAddrMode Result; 3048 3049 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, 3050 AccessTy, AS, 3051 MemoryInst, Result, InsertedInsts, 3052 PromotedInsts, TPT).matchAddr(V, 0); 3053 (void)Success; assert(Success && "Couldn't select *anything*?"); 3054 return Result; 3055 } 3056 private: 3057 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3058 bool matchAddr(Value *V, unsigned Depth); 3059 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 3060 bool *MovedAway = nullptr); 3061 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3062 ExtAddrMode &AMBefore, 3063 ExtAddrMode &AMAfter); 3064 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3065 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3066 Value *PromotedOperand) const; 3067 }; 3068 3069 /// Try adding ScaleReg*Scale to the current addressing mode. 3070 /// Return true and update AddrMode if this addr mode is legal for the target, 3071 /// false if not. 3072 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3073 unsigned Depth) { 3074 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3075 // mode. Just process that directly. 3076 if (Scale == 1) 3077 return matchAddr(ScaleReg, Depth); 3078 3079 // If the scale is 0, it takes nothing to add this. 3080 if (Scale == 0) 3081 return true; 3082 3083 // If we already have a scale of this value, we can add to it, otherwise, we 3084 // need an available scale field. 3085 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3086 return false; 3087 3088 ExtAddrMode TestAddrMode = AddrMode; 3089 3090 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3091 // [A+B + A*7] -> [B+A*8]. 3092 TestAddrMode.Scale += Scale; 3093 TestAddrMode.ScaledReg = ScaleReg; 3094 3095 // If the new address isn't legal, bail out. 3096 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3097 return false; 3098 3099 // It was legal, so commit it. 3100 AddrMode = TestAddrMode; 3101 3102 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3103 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3104 // X*Scale + C*Scale to addr mode. 3105 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3106 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3107 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3108 TestAddrMode.ScaledReg = AddLHS; 3109 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3110 3111 // If this addressing mode is legal, commit it and remember that we folded 3112 // this instruction. 3113 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3114 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3115 AddrMode = TestAddrMode; 3116 return true; 3117 } 3118 } 3119 3120 // Otherwise, not (x+c)*scale, just return what we have. 3121 return true; 3122 } 3123 3124 /// This is a little filter, which returns true if an addressing computation 3125 /// involving I might be folded into a load/store accessing it. 3126 /// This doesn't need to be perfect, but needs to accept at least 3127 /// the set of instructions that MatchOperationAddr can. 3128 static bool MightBeFoldableInst(Instruction *I) { 3129 switch (I->getOpcode()) { 3130 case Instruction::BitCast: 3131 case Instruction::AddrSpaceCast: 3132 // Don't touch identity bitcasts. 3133 if (I->getType() == I->getOperand(0)->getType()) 3134 return false; 3135 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 3136 case Instruction::PtrToInt: 3137 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3138 return true; 3139 case Instruction::IntToPtr: 3140 // We know the input is intptr_t, so this is foldable. 3141 return true; 3142 case Instruction::Add: 3143 return true; 3144 case Instruction::Mul: 3145 case Instruction::Shl: 3146 // Can only handle X*C and X << C. 3147 return isa<ConstantInt>(I->getOperand(1)); 3148 case Instruction::GetElementPtr: 3149 return true; 3150 default: 3151 return false; 3152 } 3153 } 3154 3155 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 3156 /// \note \p Val is assumed to be the product of some type promotion. 3157 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3158 /// to be legal, as the non-promoted value would have had the same state. 3159 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3160 const DataLayout &DL, Value *Val) { 3161 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3162 if (!PromotedInst) 3163 return false; 3164 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3165 // If the ISDOpcode is undefined, it was undefined before the promotion. 3166 if (!ISDOpcode) 3167 return true; 3168 // Otherwise, check if the promoted instruction is legal or not. 3169 return TLI.isOperationLegalOrCustom( 3170 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3171 } 3172 3173 /// \brief Hepler class to perform type promotion. 3174 class TypePromotionHelper { 3175 /// \brief Utility function to check whether or not a sign or zero extension 3176 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3177 /// either using the operands of \p Inst or promoting \p Inst. 3178 /// The type of the extension is defined by \p IsSExt. 3179 /// In other words, check if: 3180 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3181 /// #1 Promotion applies: 3182 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3183 /// #2 Operand reuses: 3184 /// ext opnd1 to ConsideredExtType. 3185 /// \p PromotedInsts maps the instructions to their type before promotion. 3186 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3187 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3188 3189 /// \brief Utility function to determine if \p OpIdx should be promoted when 3190 /// promoting \p Inst. 3191 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3192 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3193 } 3194 3195 /// \brief Utility function to promote the operand of \p Ext when this 3196 /// operand is a promotable trunc or sext or zext. 3197 /// \p PromotedInsts maps the instructions to their type before promotion. 3198 /// \p CreatedInstsCost[out] contains the cost of all instructions 3199 /// created to promote the operand of Ext. 3200 /// Newly added extensions are inserted in \p Exts. 3201 /// Newly added truncates are inserted in \p Truncs. 3202 /// Should never be called directly. 3203 /// \return The promoted value which is used instead of Ext. 3204 static Value *promoteOperandForTruncAndAnyExt( 3205 Instruction *Ext, TypePromotionTransaction &TPT, 3206 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3207 SmallVectorImpl<Instruction *> *Exts, 3208 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3209 3210 /// \brief Utility function to promote the operand of \p Ext when this 3211 /// operand is promotable and is not a supported trunc or sext. 3212 /// \p PromotedInsts maps the instructions to their type before promotion. 3213 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3214 /// created to promote the operand of Ext. 3215 /// Newly added extensions are inserted in \p Exts. 3216 /// Newly added truncates are inserted in \p Truncs. 3217 /// Should never be called directly. 3218 /// \return The promoted value which is used instead of Ext. 3219 static Value *promoteOperandForOther(Instruction *Ext, 3220 TypePromotionTransaction &TPT, 3221 InstrToOrigTy &PromotedInsts, 3222 unsigned &CreatedInstsCost, 3223 SmallVectorImpl<Instruction *> *Exts, 3224 SmallVectorImpl<Instruction *> *Truncs, 3225 const TargetLowering &TLI, bool IsSExt); 3226 3227 /// \see promoteOperandForOther. 3228 static Value *signExtendOperandForOther( 3229 Instruction *Ext, TypePromotionTransaction &TPT, 3230 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3231 SmallVectorImpl<Instruction *> *Exts, 3232 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3233 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3234 Exts, Truncs, TLI, true); 3235 } 3236 3237 /// \see promoteOperandForOther. 3238 static Value *zeroExtendOperandForOther( 3239 Instruction *Ext, TypePromotionTransaction &TPT, 3240 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3241 SmallVectorImpl<Instruction *> *Exts, 3242 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3243 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3244 Exts, Truncs, TLI, false); 3245 } 3246 3247 public: 3248 /// Type for the utility function that promotes the operand of Ext. 3249 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 3250 InstrToOrigTy &PromotedInsts, 3251 unsigned &CreatedInstsCost, 3252 SmallVectorImpl<Instruction *> *Exts, 3253 SmallVectorImpl<Instruction *> *Truncs, 3254 const TargetLowering &TLI); 3255 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 3256 /// action to promote the operand of \p Ext instead of using Ext. 3257 /// \return NULL if no promotable action is possible with the current 3258 /// sign extension. 3259 /// \p InsertedInsts keeps track of all the instructions inserted by the 3260 /// other CodeGenPrepare optimizations. This information is important 3261 /// because we do not want to promote these instructions as CodeGenPrepare 3262 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3263 /// \p PromotedInsts maps the instructions to their type before promotion. 3264 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3265 const TargetLowering &TLI, 3266 const InstrToOrigTy &PromotedInsts); 3267 }; 3268 3269 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3270 Type *ConsideredExtType, 3271 const InstrToOrigTy &PromotedInsts, 3272 bool IsSExt) { 3273 // The promotion helper does not know how to deal with vector types yet. 3274 // To be able to fix that, we would need to fix the places where we 3275 // statically extend, e.g., constants and such. 3276 if (Inst->getType()->isVectorTy()) 3277 return false; 3278 3279 // We can always get through zext. 3280 if (isa<ZExtInst>(Inst)) 3281 return true; 3282 3283 // sext(sext) is ok too. 3284 if (IsSExt && isa<SExtInst>(Inst)) 3285 return true; 3286 3287 // We can get through binary operator, if it is legal. In other words, the 3288 // binary operator must have a nuw or nsw flag. 3289 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3290 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3291 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3292 (IsSExt && BinOp->hasNoSignedWrap()))) 3293 return true; 3294 3295 // Check if we can do the following simplification. 3296 // ext(trunc(opnd)) --> ext(opnd) 3297 if (!isa<TruncInst>(Inst)) 3298 return false; 3299 3300 Value *OpndVal = Inst->getOperand(0); 3301 // Check if we can use this operand in the extension. 3302 // If the type is larger than the result type of the extension, we cannot. 3303 if (!OpndVal->getType()->isIntegerTy() || 3304 OpndVal->getType()->getIntegerBitWidth() > 3305 ConsideredExtType->getIntegerBitWidth()) 3306 return false; 3307 3308 // If the operand of the truncate is not an instruction, we will not have 3309 // any information on the dropped bits. 3310 // (Actually we could for constant but it is not worth the extra logic). 3311 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3312 if (!Opnd) 3313 return false; 3314 3315 // Check if the source of the type is narrow enough. 3316 // I.e., check that trunc just drops extended bits of the same kind of 3317 // the extension. 3318 // #1 get the type of the operand and check the kind of the extended bits. 3319 const Type *OpndType; 3320 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3321 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 3322 OpndType = It->second.getPointer(); 3323 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3324 OpndType = Opnd->getOperand(0)->getType(); 3325 else 3326 return false; 3327 3328 // #2 check that the truncate just drops extended bits. 3329 return Inst->getType()->getIntegerBitWidth() >= 3330 OpndType->getIntegerBitWidth(); 3331 } 3332 3333 TypePromotionHelper::Action TypePromotionHelper::getAction( 3334 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3335 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3336 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3337 "Unexpected instruction type"); 3338 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3339 Type *ExtTy = Ext->getType(); 3340 bool IsSExt = isa<SExtInst>(Ext); 3341 // If the operand of the extension is not an instruction, we cannot 3342 // get through. 3343 // If it, check we can get through. 3344 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3345 return nullptr; 3346 3347 // Do not promote if the operand has been added by codegenprepare. 3348 // Otherwise, it means we are undoing an optimization that is likely to be 3349 // redone, thus causing potential infinite loop. 3350 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3351 return nullptr; 3352 3353 // SExt or Trunc instructions. 3354 // Return the related handler. 3355 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3356 isa<ZExtInst>(ExtOpnd)) 3357 return promoteOperandForTruncAndAnyExt; 3358 3359 // Regular instruction. 3360 // Abort early if we will have to insert non-free instructions. 3361 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3362 return nullptr; 3363 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3364 } 3365 3366 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3367 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 3368 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3369 SmallVectorImpl<Instruction *> *Exts, 3370 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3371 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3372 // get through it and this method should not be called. 3373 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3374 Value *ExtVal = SExt; 3375 bool HasMergedNonFreeExt = false; 3376 if (isa<ZExtInst>(SExtOpnd)) { 3377 // Replace s|zext(zext(opnd)) 3378 // => zext(opnd). 3379 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3380 Value *ZExt = 3381 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3382 TPT.replaceAllUsesWith(SExt, ZExt); 3383 TPT.eraseInstruction(SExt); 3384 ExtVal = ZExt; 3385 } else { 3386 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3387 // => z|sext(opnd). 3388 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3389 } 3390 CreatedInstsCost = 0; 3391 3392 // Remove dead code. 3393 if (SExtOpnd->use_empty()) 3394 TPT.eraseInstruction(SExtOpnd); 3395 3396 // Check if the extension is still needed. 3397 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3398 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3399 if (ExtInst) { 3400 if (Exts) 3401 Exts->push_back(ExtInst); 3402 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3403 } 3404 return ExtVal; 3405 } 3406 3407 // At this point we have: ext ty opnd to ty. 3408 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3409 Value *NextVal = ExtInst->getOperand(0); 3410 TPT.eraseInstruction(ExtInst, NextVal); 3411 return NextVal; 3412 } 3413 3414 Value *TypePromotionHelper::promoteOperandForOther( 3415 Instruction *Ext, TypePromotionTransaction &TPT, 3416 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3417 SmallVectorImpl<Instruction *> *Exts, 3418 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3419 bool IsSExt) { 3420 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3421 // get through it and this method should not be called. 3422 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3423 CreatedInstsCost = 0; 3424 if (!ExtOpnd->hasOneUse()) { 3425 // ExtOpnd will be promoted. 3426 // All its uses, but Ext, will need to use a truncated value of the 3427 // promoted version. 3428 // Create the truncate now. 3429 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3430 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3431 ITrunc->removeFromParent(); 3432 // Insert it just after the definition. 3433 ITrunc->insertAfter(ExtOpnd); 3434 if (Truncs) 3435 Truncs->push_back(ITrunc); 3436 } 3437 3438 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3439 // Restore the operand of Ext (which has been replaced by the previous call 3440 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3441 TPT.setOperand(Ext, 0, ExtOpnd); 3442 } 3443 3444 // Get through the Instruction: 3445 // 1. Update its type. 3446 // 2. Replace the uses of Ext by Inst. 3447 // 3. Extend each operand that needs to be extended. 3448 3449 // Remember the original type of the instruction before promotion. 3450 // This is useful to know that the high bits are sign extended bits. 3451 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3452 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3453 // Step #1. 3454 TPT.mutateType(ExtOpnd, Ext->getType()); 3455 // Step #2. 3456 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3457 // Step #3. 3458 Instruction *ExtForOpnd = Ext; 3459 3460 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3461 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3462 ++OpIdx) { 3463 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3464 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3465 !shouldExtOperand(ExtOpnd, OpIdx)) { 3466 DEBUG(dbgs() << "No need to propagate\n"); 3467 continue; 3468 } 3469 // Check if we can statically extend the operand. 3470 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3471 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3472 DEBUG(dbgs() << "Statically extend\n"); 3473 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3474 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3475 : Cst->getValue().zext(BitWidth); 3476 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3477 continue; 3478 } 3479 // UndefValue are typed, so we have to statically sign extend them. 3480 if (isa<UndefValue>(Opnd)) { 3481 DEBUG(dbgs() << "Statically extend\n"); 3482 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3483 continue; 3484 } 3485 3486 // Otherwise we have to explicity sign extend the operand. 3487 // Check if Ext was reused to extend an operand. 3488 if (!ExtForOpnd) { 3489 // If yes, create a new one. 3490 DEBUG(dbgs() << "More operands to ext\n"); 3491 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3492 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3493 if (!isa<Instruction>(ValForExtOpnd)) { 3494 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3495 continue; 3496 } 3497 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3498 } 3499 if (Exts) 3500 Exts->push_back(ExtForOpnd); 3501 TPT.setOperand(ExtForOpnd, 0, Opnd); 3502 3503 // Move the sign extension before the insertion point. 3504 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3505 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3506 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3507 // If more sext are required, new instructions will have to be created. 3508 ExtForOpnd = nullptr; 3509 } 3510 if (ExtForOpnd == Ext) { 3511 DEBUG(dbgs() << "Extension is useless now\n"); 3512 TPT.eraseInstruction(Ext); 3513 } 3514 return ExtOpnd; 3515 } 3516 3517 /// Check whether or not promoting an instruction to a wider type is profitable. 3518 /// \p NewCost gives the cost of extension instructions created by the 3519 /// promotion. 3520 /// \p OldCost gives the cost of extension instructions before the promotion 3521 /// plus the number of instructions that have been 3522 /// matched in the addressing mode the promotion. 3523 /// \p PromotedOperand is the value that has been promoted. 3524 /// \return True if the promotion is profitable, false otherwise. 3525 bool AddressingModeMatcher::isPromotionProfitable( 3526 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3527 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3528 // The cost of the new extensions is greater than the cost of the 3529 // old extension plus what we folded. 3530 // This is not profitable. 3531 if (NewCost > OldCost) 3532 return false; 3533 if (NewCost < OldCost) 3534 return true; 3535 // The promotion is neutral but it may help folding the sign extension in 3536 // loads for instance. 3537 // Check that we did not create an illegal instruction. 3538 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3539 } 3540 3541 /// Given an instruction or constant expr, see if we can fold the operation 3542 /// into the addressing mode. If so, update the addressing mode and return 3543 /// true, otherwise return false without modifying AddrMode. 3544 /// If \p MovedAway is not NULL, it contains the information of whether or 3545 /// not AddrInst has to be folded into the addressing mode on success. 3546 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3547 /// because it has been moved away. 3548 /// Thus AddrInst must not be added in the matched instructions. 3549 /// This state can happen when AddrInst is a sext, since it may be moved away. 3550 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3551 /// not be referenced anymore. 3552 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3553 unsigned Depth, 3554 bool *MovedAway) { 3555 // Avoid exponential behavior on extremely deep expression trees. 3556 if (Depth >= 5) return false; 3557 3558 // By default, all matched instructions stay in place. 3559 if (MovedAway) 3560 *MovedAway = false; 3561 3562 switch (Opcode) { 3563 case Instruction::PtrToInt: 3564 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3565 return matchAddr(AddrInst->getOperand(0), Depth); 3566 case Instruction::IntToPtr: { 3567 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3568 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3569 // This inttoptr is a no-op if the integer type is pointer sized. 3570 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3571 return matchAddr(AddrInst->getOperand(0), Depth); 3572 return false; 3573 } 3574 case Instruction::BitCast: 3575 // BitCast is always a noop, and we can handle it as long as it is 3576 // int->int or pointer->pointer (we don't want int<->fp or something). 3577 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3578 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3579 // Don't touch identity bitcasts. These were probably put here by LSR, 3580 // and we don't want to mess around with them. Assume it knows what it 3581 // is doing. 3582 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3583 return matchAddr(AddrInst->getOperand(0), Depth); 3584 return false; 3585 case Instruction::AddrSpaceCast: { 3586 unsigned SrcAS 3587 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3588 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3589 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3590 return matchAddr(AddrInst->getOperand(0), Depth); 3591 return false; 3592 } 3593 case Instruction::Add: { 3594 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3595 ExtAddrMode BackupAddrMode = AddrMode; 3596 unsigned OldSize = AddrModeInsts.size(); 3597 // Start a transaction at this point. 3598 // The LHS may match but not the RHS. 3599 // Therefore, we need a higher level restoration point to undo partially 3600 // matched operation. 3601 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3602 TPT.getRestorationPoint(); 3603 3604 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 3605 matchAddr(AddrInst->getOperand(0), Depth+1)) 3606 return true; 3607 3608 // Restore the old addr mode info. 3609 AddrMode = BackupAddrMode; 3610 AddrModeInsts.resize(OldSize); 3611 TPT.rollback(LastKnownGood); 3612 3613 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 3614 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 3615 matchAddr(AddrInst->getOperand(1), Depth+1)) 3616 return true; 3617 3618 // Otherwise we definitely can't merge the ADD in. 3619 AddrMode = BackupAddrMode; 3620 AddrModeInsts.resize(OldSize); 3621 TPT.rollback(LastKnownGood); 3622 break; 3623 } 3624 //case Instruction::Or: 3625 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 3626 //break; 3627 case Instruction::Mul: 3628 case Instruction::Shl: { 3629 // Can only handle X*C and X << C. 3630 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 3631 if (!RHS) 3632 return false; 3633 int64_t Scale = RHS->getSExtValue(); 3634 if (Opcode == Instruction::Shl) 3635 Scale = 1LL << Scale; 3636 3637 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 3638 } 3639 case Instruction::GetElementPtr: { 3640 // Scan the GEP. We check it if it contains constant offsets and at most 3641 // one variable offset. 3642 int VariableOperand = -1; 3643 unsigned VariableScale = 0; 3644 3645 int64_t ConstantOffset = 0; 3646 gep_type_iterator GTI = gep_type_begin(AddrInst); 3647 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 3648 if (StructType *STy = GTI.getStructTypeOrNull()) { 3649 const StructLayout *SL = DL.getStructLayout(STy); 3650 unsigned Idx = 3651 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 3652 ConstantOffset += SL->getElementOffset(Idx); 3653 } else { 3654 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 3655 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 3656 ConstantOffset += CI->getSExtValue()*TypeSize; 3657 } else if (TypeSize) { // Scales of zero don't do anything. 3658 // We only allow one variable index at the moment. 3659 if (VariableOperand != -1) 3660 return false; 3661 3662 // Remember the variable index. 3663 VariableOperand = i; 3664 VariableScale = TypeSize; 3665 } 3666 } 3667 } 3668 3669 // A common case is for the GEP to only do a constant offset. In this case, 3670 // just add it to the disp field and check validity. 3671 if (VariableOperand == -1) { 3672 AddrMode.BaseOffs += ConstantOffset; 3673 if (ConstantOffset == 0 || 3674 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 3675 // Check to see if we can fold the base pointer in too. 3676 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 3677 return true; 3678 } 3679 AddrMode.BaseOffs -= ConstantOffset; 3680 return false; 3681 } 3682 3683 // Save the valid addressing mode in case we can't match. 3684 ExtAddrMode BackupAddrMode = AddrMode; 3685 unsigned OldSize = AddrModeInsts.size(); 3686 3687 // See if the scale and offset amount is valid for this target. 3688 AddrMode.BaseOffs += ConstantOffset; 3689 3690 // Match the base operand of the GEP. 3691 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 3692 // If it couldn't be matched, just stuff the value in a register. 3693 if (AddrMode.HasBaseReg) { 3694 AddrMode = BackupAddrMode; 3695 AddrModeInsts.resize(OldSize); 3696 return false; 3697 } 3698 AddrMode.HasBaseReg = true; 3699 AddrMode.BaseReg = AddrInst->getOperand(0); 3700 } 3701 3702 // Match the remaining variable portion of the GEP. 3703 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 3704 Depth)) { 3705 // If it couldn't be matched, try stuffing the base into a register 3706 // instead of matching it, and retrying the match of the scale. 3707 AddrMode = BackupAddrMode; 3708 AddrModeInsts.resize(OldSize); 3709 if (AddrMode.HasBaseReg) 3710 return false; 3711 AddrMode.HasBaseReg = true; 3712 AddrMode.BaseReg = AddrInst->getOperand(0); 3713 AddrMode.BaseOffs += ConstantOffset; 3714 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 3715 VariableScale, Depth)) { 3716 // If even that didn't work, bail. 3717 AddrMode = BackupAddrMode; 3718 AddrModeInsts.resize(OldSize); 3719 return false; 3720 } 3721 } 3722 3723 return true; 3724 } 3725 case Instruction::SExt: 3726 case Instruction::ZExt: { 3727 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 3728 if (!Ext) 3729 return false; 3730 3731 // Try to move this ext out of the way of the addressing mode. 3732 // Ask for a method for doing so. 3733 TypePromotionHelper::Action TPH = 3734 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 3735 if (!TPH) 3736 return false; 3737 3738 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3739 TPT.getRestorationPoint(); 3740 unsigned CreatedInstsCost = 0; 3741 unsigned ExtCost = !TLI.isExtFree(Ext); 3742 Value *PromotedOperand = 3743 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 3744 // SExt has been moved away. 3745 // Thus either it will be rematched later in the recursive calls or it is 3746 // gone. Anyway, we must not fold it into the addressing mode at this point. 3747 // E.g., 3748 // op = add opnd, 1 3749 // idx = ext op 3750 // addr = gep base, idx 3751 // is now: 3752 // promotedOpnd = ext opnd <- no match here 3753 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 3754 // addr = gep base, op <- match 3755 if (MovedAway) 3756 *MovedAway = true; 3757 3758 assert(PromotedOperand && 3759 "TypePromotionHelper should have filtered out those cases"); 3760 3761 ExtAddrMode BackupAddrMode = AddrMode; 3762 unsigned OldSize = AddrModeInsts.size(); 3763 3764 if (!matchAddr(PromotedOperand, Depth) || 3765 // The total of the new cost is equal to the cost of the created 3766 // instructions. 3767 // The total of the old cost is equal to the cost of the extension plus 3768 // what we have saved in the addressing mode. 3769 !isPromotionProfitable(CreatedInstsCost, 3770 ExtCost + (AddrModeInsts.size() - OldSize), 3771 PromotedOperand)) { 3772 AddrMode = BackupAddrMode; 3773 AddrModeInsts.resize(OldSize); 3774 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 3775 TPT.rollback(LastKnownGood); 3776 return false; 3777 } 3778 return true; 3779 } 3780 } 3781 return false; 3782 } 3783 3784 /// If we can, try to add the value of 'Addr' into the current addressing mode. 3785 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 3786 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 3787 /// for the target. 3788 /// 3789 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 3790 // Start a transaction at this point that we will rollback if the matching 3791 // fails. 3792 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3793 TPT.getRestorationPoint(); 3794 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 3795 // Fold in immediates if legal for the target. 3796 AddrMode.BaseOffs += CI->getSExtValue(); 3797 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3798 return true; 3799 AddrMode.BaseOffs -= CI->getSExtValue(); 3800 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 3801 // If this is a global variable, try to fold it into the addressing mode. 3802 if (!AddrMode.BaseGV) { 3803 AddrMode.BaseGV = GV; 3804 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3805 return true; 3806 AddrMode.BaseGV = nullptr; 3807 } 3808 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 3809 ExtAddrMode BackupAddrMode = AddrMode; 3810 unsigned OldSize = AddrModeInsts.size(); 3811 3812 // Check to see if it is possible to fold this operation. 3813 bool MovedAway = false; 3814 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 3815 // This instruction may have been moved away. If so, there is nothing 3816 // to check here. 3817 if (MovedAway) 3818 return true; 3819 // Okay, it's possible to fold this. Check to see if it is actually 3820 // *profitable* to do so. We use a simple cost model to avoid increasing 3821 // register pressure too much. 3822 if (I->hasOneUse() || 3823 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 3824 AddrModeInsts.push_back(I); 3825 return true; 3826 } 3827 3828 // It isn't profitable to do this, roll back. 3829 //cerr << "NOT FOLDING: " << *I; 3830 AddrMode = BackupAddrMode; 3831 AddrModeInsts.resize(OldSize); 3832 TPT.rollback(LastKnownGood); 3833 } 3834 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 3835 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 3836 return true; 3837 TPT.rollback(LastKnownGood); 3838 } else if (isa<ConstantPointerNull>(Addr)) { 3839 // Null pointer gets folded without affecting the addressing mode. 3840 return true; 3841 } 3842 3843 // Worse case, the target should support [reg] addressing modes. :) 3844 if (!AddrMode.HasBaseReg) { 3845 AddrMode.HasBaseReg = true; 3846 AddrMode.BaseReg = Addr; 3847 // Still check for legality in case the target supports [imm] but not [i+r]. 3848 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3849 return true; 3850 AddrMode.HasBaseReg = false; 3851 AddrMode.BaseReg = nullptr; 3852 } 3853 3854 // If the base register is already taken, see if we can do [r+r]. 3855 if (AddrMode.Scale == 0) { 3856 AddrMode.Scale = 1; 3857 AddrMode.ScaledReg = Addr; 3858 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3859 return true; 3860 AddrMode.Scale = 0; 3861 AddrMode.ScaledReg = nullptr; 3862 } 3863 // Couldn't match. 3864 TPT.rollback(LastKnownGood); 3865 return false; 3866 } 3867 3868 /// Check to see if all uses of OpVal by the specified inline asm call are due 3869 /// to memory operands. If so, return true, otherwise return false. 3870 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 3871 const TargetLowering &TLI, 3872 const TargetRegisterInfo &TRI) { 3873 const Function *F = CI->getParent()->getParent(); 3874 TargetLowering::AsmOperandInfoVector TargetConstraints = 3875 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 3876 ImmutableCallSite(CI)); 3877 3878 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3879 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3880 3881 // Compute the constraint code and ConstraintType to use. 3882 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 3883 3884 // If this asm operand is our Value*, and if it isn't an indirect memory 3885 // operand, we can't fold it! 3886 if (OpInfo.CallOperandVal == OpVal && 3887 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3888 !OpInfo.isIndirect)) 3889 return false; 3890 } 3891 3892 return true; 3893 } 3894 3895 /// Recursively walk all the uses of I until we find a memory use. 3896 /// If we find an obviously non-foldable instruction, return true. 3897 /// Add the ultimately found memory instructions to MemoryUses. 3898 static bool FindAllMemoryUses( 3899 Instruction *I, 3900 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3901 SmallPtrSetImpl<Instruction *> &ConsideredInsts, 3902 const TargetLowering &TLI, const TargetRegisterInfo &TRI) { 3903 // If we already considered this instruction, we're done. 3904 if (!ConsideredInsts.insert(I).second) 3905 return false; 3906 3907 // If this is an obviously unfoldable instruction, bail out. 3908 if (!MightBeFoldableInst(I)) 3909 return true; 3910 3911 const bool OptSize = I->getFunction()->optForSize(); 3912 3913 // Loop over all the uses, recursively processing them. 3914 for (Use &U : I->uses()) { 3915 Instruction *UserI = cast<Instruction>(U.getUser()); 3916 3917 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3918 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3919 continue; 3920 } 3921 3922 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3923 unsigned opNo = U.getOperandNo(); 3924 if (opNo != StoreInst::getPointerOperandIndex()) 3925 return true; // Storing addr, not into addr. 3926 MemoryUses.push_back(std::make_pair(SI, opNo)); 3927 continue; 3928 } 3929 3930 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 3931 unsigned opNo = U.getOperandNo(); 3932 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 3933 return true; // Storing addr, not into addr. 3934 MemoryUses.push_back(std::make_pair(RMW, opNo)); 3935 continue; 3936 } 3937 3938 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 3939 unsigned opNo = U.getOperandNo(); 3940 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 3941 return true; // Storing addr, not into addr. 3942 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 3943 continue; 3944 } 3945 3946 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3947 // If this is a cold call, we can sink the addressing calculation into 3948 // the cold path. See optimizeCallInst 3949 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 3950 continue; 3951 3952 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3953 if (!IA) return true; 3954 3955 // If this is a memory operand, we're cool, otherwise bail out. 3956 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 3957 return true; 3958 continue; 3959 } 3960 3961 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI)) 3962 return true; 3963 } 3964 3965 return false; 3966 } 3967 3968 /// Return true if Val is already known to be live at the use site that we're 3969 /// folding it into. If so, there is no cost to include it in the addressing 3970 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 3971 /// instruction already. 3972 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3973 Value *KnownLive2) { 3974 // If Val is either of the known-live values, we know it is live! 3975 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3976 return true; 3977 3978 // All values other than instructions and arguments (e.g. constants) are live. 3979 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3980 3981 // If Val is a constant sized alloca in the entry block, it is live, this is 3982 // true because it is just a reference to the stack/frame pointer, which is 3983 // live for the whole function. 3984 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3985 if (AI->isStaticAlloca()) 3986 return true; 3987 3988 // Check to see if this value is already used in the memory instruction's 3989 // block. If so, it's already live into the block at the very least, so we 3990 // can reasonably fold it. 3991 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3992 } 3993 3994 /// It is possible for the addressing mode of the machine to fold the specified 3995 /// instruction into a load or store that ultimately uses it. 3996 /// However, the specified instruction has multiple uses. 3997 /// Given this, it may actually increase register pressure to fold it 3998 /// into the load. For example, consider this code: 3999 /// 4000 /// X = ... 4001 /// Y = X+1 4002 /// use(Y) -> nonload/store 4003 /// Z = Y+1 4004 /// load Z 4005 /// 4006 /// In this case, Y has multiple uses, and can be folded into the load of Z 4007 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4008 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4009 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4010 /// number of computations either. 4011 /// 4012 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4013 /// X was live across 'load Z' for other reasons, we actually *would* want to 4014 /// fold the addressing mode in the Z case. This would make Y die earlier. 4015 bool AddressingModeMatcher:: 4016 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4017 ExtAddrMode &AMAfter) { 4018 if (IgnoreProfitability) return true; 4019 4020 // AMBefore is the addressing mode before this instruction was folded into it, 4021 // and AMAfter is the addressing mode after the instruction was folded. Get 4022 // the set of registers referenced by AMAfter and subtract out those 4023 // referenced by AMBefore: this is the set of values which folding in this 4024 // address extends the lifetime of. 4025 // 4026 // Note that there are only two potential values being referenced here, 4027 // BaseReg and ScaleReg (global addresses are always available, as are any 4028 // folded immediates). 4029 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4030 4031 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4032 // lifetime wasn't extended by adding this instruction. 4033 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4034 BaseReg = nullptr; 4035 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4036 ScaledReg = nullptr; 4037 4038 // If folding this instruction (and it's subexprs) didn't extend any live 4039 // ranges, we're ok with it. 4040 if (!BaseReg && !ScaledReg) 4041 return true; 4042 4043 // If all uses of this instruction can have the address mode sunk into them, 4044 // we can remove the addressing mode and effectively trade one live register 4045 // for another (at worst.) In this context, folding an addressing mode into 4046 // the use is just a particularly nice way of sinking it. 4047 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4048 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4049 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4050 return false; // Has a non-memory, non-foldable use! 4051 4052 // Now that we know that all uses of this instruction are part of a chain of 4053 // computation involving only operations that could theoretically be folded 4054 // into a memory use, loop over each of these memory operation uses and see 4055 // if they could *actually* fold the instruction. The assumption is that 4056 // addressing modes are cheap and that duplicating the computation involved 4057 // many times is worthwhile, even on a fastpath. For sinking candidates 4058 // (i.e. cold call sites), this serves as a way to prevent excessive code 4059 // growth since most architectures have some reasonable small and fast way to 4060 // compute an effective address. (i.e LEA on x86) 4061 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4062 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4063 Instruction *User = MemoryUses[i].first; 4064 unsigned OpNo = MemoryUses[i].second; 4065 4066 // Get the access type of this use. If the use isn't a pointer, we don't 4067 // know what it accesses. 4068 Value *Address = User->getOperand(OpNo); 4069 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4070 if (!AddrTy) 4071 return false; 4072 Type *AddressAccessTy = AddrTy->getElementType(); 4073 unsigned AS = AddrTy->getAddressSpace(); 4074 4075 // Do a match against the root of this address, ignoring profitability. This 4076 // will tell us if the addressing mode for the memory operation will 4077 // *actually* cover the shared instruction. 4078 ExtAddrMode Result; 4079 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4080 TPT.getRestorationPoint(); 4081 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, 4082 AddressAccessTy, AS, 4083 MemoryInst, Result, InsertedInsts, 4084 PromotedInsts, TPT); 4085 Matcher.IgnoreProfitability = true; 4086 bool Success = Matcher.matchAddr(Address, 0); 4087 (void)Success; assert(Success && "Couldn't select *anything*?"); 4088 4089 // The match was to check the profitability, the changes made are not 4090 // part of the original matcher. Therefore, they should be dropped 4091 // otherwise the original matcher will not present the right state. 4092 TPT.rollback(LastKnownGood); 4093 4094 // If the match didn't cover I, then it won't be shared by it. 4095 if (!is_contained(MatchedAddrModeInsts, I)) 4096 return false; 4097 4098 MatchedAddrModeInsts.clear(); 4099 } 4100 4101 return true; 4102 } 4103 4104 } // end anonymous namespace 4105 4106 /// Return true if the specified values are defined in a 4107 /// different basic block than BB. 4108 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4109 if (Instruction *I = dyn_cast<Instruction>(V)) 4110 return I->getParent() != BB; 4111 return false; 4112 } 4113 4114 /// Sink addressing mode computation immediate before MemoryInst if doing so 4115 /// can be done without increasing register pressure. The need for the 4116 /// register pressure constraint means this can end up being an all or nothing 4117 /// decision for all uses of the same addressing computation. 4118 /// 4119 /// Load and Store Instructions often have addressing modes that can do 4120 /// significant amounts of computation. As such, instruction selection will try 4121 /// to get the load or store to do as much computation as possible for the 4122 /// program. The problem is that isel can only see within a single block. As 4123 /// such, we sink as much legal addressing mode work into the block as possible. 4124 /// 4125 /// This method is used to optimize both load/store and inline asms with memory 4126 /// operands. It's also used to sink addressing computations feeding into cold 4127 /// call sites into their (cold) basic block. 4128 /// 4129 /// The motivation for handling sinking into cold blocks is that doing so can 4130 /// both enable other address mode sinking (by satisfying the register pressure 4131 /// constraint above), and reduce register pressure globally (by removing the 4132 /// addressing mode computation from the fast path entirely.). 4133 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4134 Type *AccessTy, unsigned AddrSpace) { 4135 Value *Repl = Addr; 4136 4137 // Try to collapse single-value PHI nodes. This is necessary to undo 4138 // unprofitable PRE transformations. 4139 SmallVector<Value*, 8> worklist; 4140 SmallPtrSet<Value*, 16> Visited; 4141 worklist.push_back(Addr); 4142 4143 // Use a worklist to iteratively look through PHI nodes, and ensure that 4144 // the addressing mode obtained from the non-PHI roots of the graph 4145 // are equivalent. 4146 Value *Consensus = nullptr; 4147 unsigned NumUsesConsensus = 0; 4148 bool IsNumUsesConsensusValid = false; 4149 SmallVector<Instruction*, 16> AddrModeInsts; 4150 ExtAddrMode AddrMode; 4151 TypePromotionTransaction TPT(RemovedInsts); 4152 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4153 TPT.getRestorationPoint(); 4154 while (!worklist.empty()) { 4155 Value *V = worklist.back(); 4156 worklist.pop_back(); 4157 4158 // Break use-def graph loops. 4159 if (!Visited.insert(V).second) { 4160 Consensus = nullptr; 4161 break; 4162 } 4163 4164 // For a PHI node, push all of its incoming values. 4165 if (PHINode *P = dyn_cast<PHINode>(V)) { 4166 for (Value *IncValue : P->incoming_values()) 4167 worklist.push_back(IncValue); 4168 continue; 4169 } 4170 4171 // For non-PHIs, determine the addressing mode being computed. Note that 4172 // the result may differ depending on what other uses our candidate 4173 // addressing instructions might have. 4174 SmallVector<Instruction*, 16> NewAddrModeInsts; 4175 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4176 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TLI, *TRI, 4177 InsertedInsts, PromotedInsts, TPT); 4178 4179 // This check is broken into two cases with very similar code to avoid using 4180 // getNumUses() as much as possible. Some values have a lot of uses, so 4181 // calling getNumUses() unconditionally caused a significant compile-time 4182 // regression. 4183 if (!Consensus) { 4184 Consensus = V; 4185 AddrMode = NewAddrMode; 4186 AddrModeInsts = NewAddrModeInsts; 4187 continue; 4188 } else if (NewAddrMode == AddrMode) { 4189 if (!IsNumUsesConsensusValid) { 4190 NumUsesConsensus = Consensus->getNumUses(); 4191 IsNumUsesConsensusValid = true; 4192 } 4193 4194 // Ensure that the obtained addressing mode is equivalent to that obtained 4195 // for all other roots of the PHI traversal. Also, when choosing one 4196 // such root as representative, select the one with the most uses in order 4197 // to keep the cost modeling heuristics in AddressingModeMatcher 4198 // applicable. 4199 unsigned NumUses = V->getNumUses(); 4200 if (NumUses > NumUsesConsensus) { 4201 Consensus = V; 4202 NumUsesConsensus = NumUses; 4203 AddrModeInsts = NewAddrModeInsts; 4204 } 4205 continue; 4206 } 4207 4208 Consensus = nullptr; 4209 break; 4210 } 4211 4212 // If the addressing mode couldn't be determined, or if multiple different 4213 // ones were determined, bail out now. 4214 if (!Consensus) { 4215 TPT.rollback(LastKnownGood); 4216 return false; 4217 } 4218 TPT.commit(); 4219 4220 // If all the instructions matched are already in this BB, don't do anything. 4221 if (none_of(AddrModeInsts, [&](Value *V) { 4222 return IsNonLocalValue(V, MemoryInst->getParent()); 4223 })) { 4224 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 4225 return false; 4226 } 4227 4228 // Insert this computation right after this user. Since our caller is 4229 // scanning from the top of the BB to the bottom, reuse of the expr are 4230 // guaranteed to happen later. 4231 IRBuilder<> Builder(MemoryInst); 4232 4233 // Now that we determined the addressing expression we want to use and know 4234 // that we have to sink it into this block. Check to see if we have already 4235 // done this for some other load/store instr in this block. If so, reuse the 4236 // computation. 4237 Value *&SunkAddr = SunkAddrs[Addr]; 4238 if (SunkAddr) { 4239 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 4240 << *MemoryInst << "\n"); 4241 if (SunkAddr->getType() != Addr->getType()) 4242 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4243 } else if (AddrSinkUsingGEPs || 4244 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 4245 SubtargetInfo->useAA())) { 4246 // By default, we use the GEP-based method when AA is used later. This 4247 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4248 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4249 << *MemoryInst << "\n"); 4250 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4251 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4252 4253 // First, find the pointer. 4254 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4255 ResultPtr = AddrMode.BaseReg; 4256 AddrMode.BaseReg = nullptr; 4257 } 4258 4259 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4260 // We can't add more than one pointer together, nor can we scale a 4261 // pointer (both of which seem meaningless). 4262 if (ResultPtr || AddrMode.Scale != 1) 4263 return false; 4264 4265 ResultPtr = AddrMode.ScaledReg; 4266 AddrMode.Scale = 0; 4267 } 4268 4269 if (AddrMode.BaseGV) { 4270 if (ResultPtr) 4271 return false; 4272 4273 ResultPtr = AddrMode.BaseGV; 4274 } 4275 4276 // If the real base value actually came from an inttoptr, then the matcher 4277 // will look through it and provide only the integer value. In that case, 4278 // use it here. 4279 if (!ResultPtr && AddrMode.BaseReg) { 4280 ResultPtr = 4281 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 4282 AddrMode.BaseReg = nullptr; 4283 } else if (!ResultPtr && AddrMode.Scale == 1) { 4284 ResultPtr = 4285 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 4286 AddrMode.Scale = 0; 4287 } 4288 4289 if (!ResultPtr && 4290 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4291 SunkAddr = Constant::getNullValue(Addr->getType()); 4292 } else if (!ResultPtr) { 4293 return false; 4294 } else { 4295 Type *I8PtrTy = 4296 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4297 Type *I8Ty = Builder.getInt8Ty(); 4298 4299 // Start with the base register. Do this first so that subsequent address 4300 // matching finds it last, which will prevent it from trying to match it 4301 // as the scaled value in case it happens to be a mul. That would be 4302 // problematic if we've sunk a different mul for the scale, because then 4303 // we'd end up sinking both muls. 4304 if (AddrMode.BaseReg) { 4305 Value *V = AddrMode.BaseReg; 4306 if (V->getType() != IntPtrTy) 4307 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4308 4309 ResultIndex = V; 4310 } 4311 4312 // Add the scale value. 4313 if (AddrMode.Scale) { 4314 Value *V = AddrMode.ScaledReg; 4315 if (V->getType() == IntPtrTy) { 4316 // done. 4317 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4318 cast<IntegerType>(V->getType())->getBitWidth()) { 4319 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4320 } else { 4321 // It is only safe to sign extend the BaseReg if we know that the math 4322 // required to create it did not overflow before we extend it. Since 4323 // the original IR value was tossed in favor of a constant back when 4324 // the AddrMode was created we need to bail out gracefully if widths 4325 // do not match instead of extending it. 4326 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 4327 if (I && (ResultIndex != AddrMode.BaseReg)) 4328 I->eraseFromParent(); 4329 return false; 4330 } 4331 4332 if (AddrMode.Scale != 1) 4333 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4334 "sunkaddr"); 4335 if (ResultIndex) 4336 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4337 else 4338 ResultIndex = V; 4339 } 4340 4341 // Add in the Base Offset if present. 4342 if (AddrMode.BaseOffs) { 4343 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4344 if (ResultIndex) { 4345 // We need to add this separately from the scale above to help with 4346 // SDAG consecutive load/store merging. 4347 if (ResultPtr->getType() != I8PtrTy) 4348 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4349 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4350 } 4351 4352 ResultIndex = V; 4353 } 4354 4355 if (!ResultIndex) { 4356 SunkAddr = ResultPtr; 4357 } else { 4358 if (ResultPtr->getType() != I8PtrTy) 4359 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4360 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4361 } 4362 4363 if (SunkAddr->getType() != Addr->getType()) 4364 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4365 } 4366 } else { 4367 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4368 << *MemoryInst << "\n"); 4369 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4370 Value *Result = nullptr; 4371 4372 // Start with the base register. Do this first so that subsequent address 4373 // matching finds it last, which will prevent it from trying to match it 4374 // as the scaled value in case it happens to be a mul. That would be 4375 // problematic if we've sunk a different mul for the scale, because then 4376 // we'd end up sinking both muls. 4377 if (AddrMode.BaseReg) { 4378 Value *V = AddrMode.BaseReg; 4379 if (V->getType()->isPointerTy()) 4380 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4381 if (V->getType() != IntPtrTy) 4382 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4383 Result = V; 4384 } 4385 4386 // Add the scale value. 4387 if (AddrMode.Scale) { 4388 Value *V = AddrMode.ScaledReg; 4389 if (V->getType() == IntPtrTy) { 4390 // done. 4391 } else if (V->getType()->isPointerTy()) { 4392 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4393 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4394 cast<IntegerType>(V->getType())->getBitWidth()) { 4395 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4396 } else { 4397 // It is only safe to sign extend the BaseReg if we know that the math 4398 // required to create it did not overflow before we extend it. Since 4399 // the original IR value was tossed in favor of a constant back when 4400 // the AddrMode was created we need to bail out gracefully if widths 4401 // do not match instead of extending it. 4402 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4403 if (I && (Result != AddrMode.BaseReg)) 4404 I->eraseFromParent(); 4405 return false; 4406 } 4407 if (AddrMode.Scale != 1) 4408 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4409 "sunkaddr"); 4410 if (Result) 4411 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4412 else 4413 Result = V; 4414 } 4415 4416 // Add in the BaseGV if present. 4417 if (AddrMode.BaseGV) { 4418 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4419 if (Result) 4420 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4421 else 4422 Result = V; 4423 } 4424 4425 // Add in the Base Offset if present. 4426 if (AddrMode.BaseOffs) { 4427 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4428 if (Result) 4429 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4430 else 4431 Result = V; 4432 } 4433 4434 if (!Result) 4435 SunkAddr = Constant::getNullValue(Addr->getType()); 4436 else 4437 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4438 } 4439 4440 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4441 4442 // If we have no uses, recursively delete the value and all dead instructions 4443 // using it. 4444 if (Repl->use_empty()) { 4445 // This can cause recursive deletion, which can invalidate our iterator. 4446 // Use a WeakTrackingVH to hold onto it in case this happens. 4447 Value *CurValue = &*CurInstIterator; 4448 WeakTrackingVH IterHandle(CurValue); 4449 BasicBlock *BB = CurInstIterator->getParent(); 4450 4451 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4452 4453 if (IterHandle != CurValue) { 4454 // If the iterator instruction was recursively deleted, start over at the 4455 // start of the block. 4456 CurInstIterator = BB->begin(); 4457 SunkAddrs.clear(); 4458 } 4459 } 4460 ++NumMemoryInsts; 4461 return true; 4462 } 4463 4464 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4465 /// address computing into the block when possible / profitable. 4466 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4467 bool MadeChange = false; 4468 4469 const TargetRegisterInfo *TRI = 4470 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 4471 TargetLowering::AsmOperandInfoVector TargetConstraints = 4472 TLI->ParseConstraints(*DL, TRI, CS); 4473 unsigned ArgNo = 0; 4474 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4475 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4476 4477 // Compute the constraint code and ConstraintType to use. 4478 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4479 4480 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4481 OpInfo.isIndirect) { 4482 Value *OpVal = CS->getArgOperand(ArgNo++); 4483 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4484 } else if (OpInfo.Type == InlineAsm::isInput) 4485 ArgNo++; 4486 } 4487 4488 return MadeChange; 4489 } 4490 4491 /// \brief Check if all the uses of \p Val are equivalent (or free) zero or 4492 /// sign extensions. 4493 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4494 assert(!Val->use_empty() && "Input must have at least one use"); 4495 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4496 bool IsSExt = isa<SExtInst>(FirstUser); 4497 Type *ExtTy = FirstUser->getType(); 4498 for (const User *U : Val->users()) { 4499 const Instruction *UI = cast<Instruction>(U); 4500 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4501 return false; 4502 Type *CurTy = UI->getType(); 4503 // Same input and output types: Same instruction after CSE. 4504 if (CurTy == ExtTy) 4505 continue; 4506 4507 // If IsSExt is true, we are in this situation: 4508 // a = Val 4509 // b = sext ty1 a to ty2 4510 // c = sext ty1 a to ty3 4511 // Assuming ty2 is shorter than ty3, this could be turned into: 4512 // a = Val 4513 // b = sext ty1 a to ty2 4514 // c = sext ty2 b to ty3 4515 // However, the last sext is not free. 4516 if (IsSExt) 4517 return false; 4518 4519 // This is a ZExt, maybe this is free to extend from one type to another. 4520 // In that case, we would not account for a different use. 4521 Type *NarrowTy; 4522 Type *LargeTy; 4523 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4524 CurTy->getScalarType()->getIntegerBitWidth()) { 4525 NarrowTy = CurTy; 4526 LargeTy = ExtTy; 4527 } else { 4528 NarrowTy = ExtTy; 4529 LargeTy = CurTy; 4530 } 4531 4532 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4533 return false; 4534 } 4535 // All uses are the same or can be derived from one another for free. 4536 return true; 4537 } 4538 4539 /// \brief Try to speculatively promote extensions in \p Exts and continue 4540 /// promoting through newly promoted operands recursively as far as doing so is 4541 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 4542 /// When some promotion happened, \p TPT contains the proper state to revert 4543 /// them. 4544 /// 4545 /// \return true if some promotion happened, false otherwise. 4546 bool CodeGenPrepare::tryToPromoteExts( 4547 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 4548 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 4549 unsigned CreatedInstsCost) { 4550 bool Promoted = false; 4551 4552 // Iterate over all the extensions to try to promote them. 4553 for (auto I : Exts) { 4554 // Early check if we directly have ext(load). 4555 if (isa<LoadInst>(I->getOperand(0))) { 4556 ProfitablyMovedExts.push_back(I); 4557 continue; 4558 } 4559 4560 // Check whether or not we want to do any promotion. The reason we have 4561 // this check inside the for loop is to catch the case where an extension 4562 // is directly fed by a load because in such case the extension can be moved 4563 // up without any promotion on its operands. 4564 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4565 return false; 4566 4567 // Get the action to perform the promotion. 4568 TypePromotionHelper::Action TPH = 4569 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 4570 // Check if we can promote. 4571 if (!TPH) { 4572 // Save the current extension as we cannot move up through its operand. 4573 ProfitablyMovedExts.push_back(I); 4574 continue; 4575 } 4576 4577 // Save the current state. 4578 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4579 TPT.getRestorationPoint(); 4580 SmallVector<Instruction *, 4> NewExts; 4581 unsigned NewCreatedInstsCost = 0; 4582 unsigned ExtCost = !TLI->isExtFree(I); 4583 // Promote. 4584 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 4585 &NewExts, nullptr, *TLI); 4586 assert(PromotedVal && 4587 "TypePromotionHelper should have filtered out those cases"); 4588 4589 // We would be able to merge only one extension in a load. 4590 // Therefore, if we have more than 1 new extension we heuristically 4591 // cut this search path, because it means we degrade the code quality. 4592 // With exactly 2, the transformation is neutral, because we will merge 4593 // one extension but leave one. However, we optimistically keep going, 4594 // because the new extension may be removed too. 4595 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 4596 // FIXME: It would be possible to propagate a negative value instead of 4597 // conservatively ceiling it to 0. 4598 TotalCreatedInstsCost = 4599 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 4600 if (!StressExtLdPromotion && 4601 (TotalCreatedInstsCost > 1 || 4602 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 4603 // This promotion is not profitable, rollback to the previous state, and 4604 // save the current extension in ProfitablyMovedExts as the latest 4605 // speculative promotion turned out to be unprofitable. 4606 TPT.rollback(LastKnownGood); 4607 ProfitablyMovedExts.push_back(I); 4608 continue; 4609 } 4610 // Continue promoting NewExts as far as doing so is profitable. 4611 SmallVector<Instruction *, 2> NewlyMovedExts; 4612 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 4613 bool NewPromoted = false; 4614 for (auto ExtInst : NewlyMovedExts) { 4615 Instruction *MovedExt = cast<Instruction>(ExtInst); 4616 Value *ExtOperand = MovedExt->getOperand(0); 4617 // If we have reached to a load, we need this extra profitability check 4618 // as it could potentially be merged into an ext(load). 4619 if (isa<LoadInst>(ExtOperand) && 4620 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 4621 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 4622 continue; 4623 4624 ProfitablyMovedExts.push_back(MovedExt); 4625 NewPromoted = true; 4626 } 4627 4628 // If none of speculative promotions for NewExts is profitable, rollback 4629 // and save the current extension (I) as the last profitable extension. 4630 if (!NewPromoted) { 4631 TPT.rollback(LastKnownGood); 4632 ProfitablyMovedExts.push_back(I); 4633 continue; 4634 } 4635 // The promotion is profitable. 4636 Promoted = true; 4637 } 4638 return Promoted; 4639 } 4640 4641 /// Merging redundant sexts when one is dominating the other. 4642 bool CodeGenPrepare::mergeSExts(Function &F) { 4643 DominatorTree DT(F); 4644 bool Changed = false; 4645 for (auto &Entry : ValToSExtendedUses) { 4646 SExts &Insts = Entry.second; 4647 SExts CurPts; 4648 for (Instruction *Inst : Insts) { 4649 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 4650 Inst->getOperand(0) != Entry.first) 4651 continue; 4652 bool inserted = false; 4653 for (auto &Pt : CurPts) { 4654 if (DT.dominates(Inst, Pt)) { 4655 Pt->replaceAllUsesWith(Inst); 4656 RemovedInsts.insert(Pt); 4657 Pt->removeFromParent(); 4658 Pt = Inst; 4659 inserted = true; 4660 Changed = true; 4661 break; 4662 } 4663 if (!DT.dominates(Pt, Inst)) 4664 // Give up if we need to merge in a common dominator as the 4665 // expermients show it is not profitable. 4666 continue; 4667 Inst->replaceAllUsesWith(Pt); 4668 RemovedInsts.insert(Inst); 4669 Inst->removeFromParent(); 4670 inserted = true; 4671 Changed = true; 4672 break; 4673 } 4674 if (!inserted) 4675 CurPts.push_back(Inst); 4676 } 4677 } 4678 return Changed; 4679 } 4680 4681 /// Return true, if an ext(load) can be formed from an extension in 4682 /// \p MovedExts. 4683 bool CodeGenPrepare::canFormExtLd( 4684 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 4685 Instruction *&Inst, bool HasPromoted) { 4686 for (auto *MovedExtInst : MovedExts) { 4687 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 4688 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 4689 Inst = MovedExtInst; 4690 break; 4691 } 4692 } 4693 if (!LI) 4694 return false; 4695 4696 // If they're already in the same block, there's nothing to do. 4697 // Make the cheap checks first if we did not promote. 4698 // If we promoted, we need to check if it is indeed profitable. 4699 if (!HasPromoted && LI->getParent() == Inst->getParent()) 4700 return false; 4701 4702 EVT VT = TLI->getValueType(*DL, Inst->getType()); 4703 EVT LoadVT = TLI->getValueType(*DL, LI->getType()); 4704 4705 // If the load has other users and the truncate is not free, this probably 4706 // isn't worthwhile. 4707 if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 4708 !TLI->isTruncateFree(Inst->getType(), LI->getType())) 4709 return false; 4710 4711 // Check whether the target supports casts folded into loads. 4712 unsigned LType; 4713 if (isa<ZExtInst>(Inst)) 4714 LType = ISD::ZEXTLOAD; 4715 else { 4716 assert(isa<SExtInst>(Inst) && "Unexpected ext type!"); 4717 LType = ISD::SEXTLOAD; 4718 } 4719 4720 return TLI->isLoadExtLegal(LType, VT, LoadVT); 4721 } 4722 4723 /// Move a zext or sext fed by a load into the same basic block as the load, 4724 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 4725 /// extend into the load. 4726 /// 4727 /// E.g., 4728 /// \code 4729 /// %ld = load i32* %addr 4730 /// %add = add nuw i32 %ld, 4 4731 /// %zext = zext i32 %add to i64 4732 // \endcode 4733 /// => 4734 /// \code 4735 /// %ld = load i32* %addr 4736 /// %zext = zext i32 %ld to i64 4737 /// %add = add nuw i64 %zext, 4 4738 /// \encode 4739 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 4740 /// allow us to match zext(load i32*) to i64. 4741 /// 4742 /// Also, try to promote the computations used to obtain a sign extended 4743 /// value used into memory accesses. 4744 /// E.g., 4745 /// \code 4746 /// a = add nsw i32 b, 3 4747 /// d = sext i32 a to i64 4748 /// e = getelementptr ..., i64 d 4749 /// \endcode 4750 /// => 4751 /// \code 4752 /// f = sext i32 b to i64 4753 /// a = add nsw i64 f, 3 4754 /// e = getelementptr ..., i64 a 4755 /// \endcode 4756 /// 4757 /// \p Inst[in/out] the extension may be modified during the process if some 4758 /// promotions apply. 4759 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 4760 // ExtLoad formation and address type promotion infrastructure requires TLI to 4761 // be effective. 4762 if (!TLI) 4763 return false; 4764 4765 bool AllowPromotionWithoutCommonHeader = false; 4766 /// See if it is an interesting sext operations for the address type 4767 /// promotion before trying to promote it, e.g., the ones with the right 4768 /// type and used in memory accesses. 4769 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 4770 *Inst, AllowPromotionWithoutCommonHeader); 4771 TypePromotionTransaction TPT(RemovedInsts); 4772 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4773 TPT.getRestorationPoint(); 4774 SmallVector<Instruction *, 1> Exts; 4775 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 4776 Exts.push_back(Inst); 4777 4778 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 4779 4780 // Look for a load being extended. 4781 LoadInst *LI = nullptr; 4782 Instruction *ExtFedByLoad; 4783 4784 // Try to promote a chain of computation if it allows to form an extended 4785 // load. 4786 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 4787 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 4788 TPT.commit(); 4789 // Move the extend into the same block as the load 4790 ExtFedByLoad->removeFromParent(); 4791 ExtFedByLoad->insertAfter(LI); 4792 // CGP does not check if the zext would be speculatively executed when moved 4793 // to the same basic block as the load. Preserving its original location 4794 // would pessimize the debugging experience, as well as negatively impact 4795 // the quality of sample pgo. We don't want to use "line 0" as that has a 4796 // size cost in the line-table section and logically the zext can be seen as 4797 // part of the load. Therefore we conservatively reuse the same debug 4798 // location for the load and the zext. 4799 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 4800 ++NumExtsMoved; 4801 Inst = ExtFedByLoad; 4802 return true; 4803 } 4804 4805 // Continue promoting SExts if known as considerable depending on targets. 4806 if (ATPConsiderable && 4807 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 4808 HasPromoted, TPT, SpeculativelyMovedExts)) 4809 return true; 4810 4811 TPT.rollback(LastKnownGood); 4812 return false; 4813 } 4814 4815 // Perform address type promotion if doing so is profitable. 4816 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 4817 // instructions that sign extended the same initial value. However, if 4818 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 4819 // extension is just profitable. 4820 bool CodeGenPrepare::performAddressTypePromotion( 4821 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 4822 bool HasPromoted, TypePromotionTransaction &TPT, 4823 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 4824 bool Promoted = false; 4825 SmallPtrSet<Instruction *, 1> UnhandledExts; 4826 bool AllSeenFirst = true; 4827 for (auto I : SpeculativelyMovedExts) { 4828 Value *HeadOfChain = I->getOperand(0); 4829 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 4830 SeenChainsForSExt.find(HeadOfChain); 4831 // If there is an unhandled SExt which has the same header, try to promote 4832 // it as well. 4833 if (AlreadySeen != SeenChainsForSExt.end()) { 4834 if (AlreadySeen->second != nullptr) 4835 UnhandledExts.insert(AlreadySeen->second); 4836 AllSeenFirst = false; 4837 } 4838 } 4839 4840 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 4841 SpeculativelyMovedExts.size() == 1)) { 4842 TPT.commit(); 4843 if (HasPromoted) 4844 Promoted = true; 4845 for (auto I : SpeculativelyMovedExts) { 4846 Value *HeadOfChain = I->getOperand(0); 4847 SeenChainsForSExt[HeadOfChain] = nullptr; 4848 ValToSExtendedUses[HeadOfChain].push_back(I); 4849 } 4850 // Update Inst as promotion happen. 4851 Inst = SpeculativelyMovedExts.pop_back_val(); 4852 } else { 4853 // This is the first chain visited from the header, keep the current chain 4854 // as unhandled. Defer to promote this until we encounter another SExt 4855 // chain derived from the same header. 4856 for (auto I : SpeculativelyMovedExts) { 4857 Value *HeadOfChain = I->getOperand(0); 4858 SeenChainsForSExt[HeadOfChain] = Inst; 4859 } 4860 return false; 4861 } 4862 4863 if (!AllSeenFirst && !UnhandledExts.empty()) 4864 for (auto VisitedSExt : UnhandledExts) { 4865 if (RemovedInsts.count(VisitedSExt)) 4866 continue; 4867 TypePromotionTransaction TPT(RemovedInsts); 4868 SmallVector<Instruction *, 1> Exts; 4869 SmallVector<Instruction *, 2> Chains; 4870 Exts.push_back(VisitedSExt); 4871 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 4872 TPT.commit(); 4873 if (HasPromoted) 4874 Promoted = true; 4875 for (auto I : Chains) { 4876 Value *HeadOfChain = I->getOperand(0); 4877 // Mark this as handled. 4878 SeenChainsForSExt[HeadOfChain] = nullptr; 4879 ValToSExtendedUses[HeadOfChain].push_back(I); 4880 } 4881 } 4882 return Promoted; 4883 } 4884 4885 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 4886 BasicBlock *DefBB = I->getParent(); 4887 4888 // If the result of a {s|z}ext and its source are both live out, rewrite all 4889 // other uses of the source with result of extension. 4890 Value *Src = I->getOperand(0); 4891 if (Src->hasOneUse()) 4892 return false; 4893 4894 // Only do this xform if truncating is free. 4895 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 4896 return false; 4897 4898 // Only safe to perform the optimization if the source is also defined in 4899 // this block. 4900 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 4901 return false; 4902 4903 bool DefIsLiveOut = false; 4904 for (User *U : I->users()) { 4905 Instruction *UI = cast<Instruction>(U); 4906 4907 // Figure out which BB this ext is used in. 4908 BasicBlock *UserBB = UI->getParent(); 4909 if (UserBB == DefBB) continue; 4910 DefIsLiveOut = true; 4911 break; 4912 } 4913 if (!DefIsLiveOut) 4914 return false; 4915 4916 // Make sure none of the uses are PHI nodes. 4917 for (User *U : Src->users()) { 4918 Instruction *UI = cast<Instruction>(U); 4919 BasicBlock *UserBB = UI->getParent(); 4920 if (UserBB == DefBB) continue; 4921 // Be conservative. We don't want this xform to end up introducing 4922 // reloads just before load / store instructions. 4923 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 4924 return false; 4925 } 4926 4927 // InsertedTruncs - Only insert one trunc in each block once. 4928 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 4929 4930 bool MadeChange = false; 4931 for (Use &U : Src->uses()) { 4932 Instruction *User = cast<Instruction>(U.getUser()); 4933 4934 // Figure out which BB this ext is used in. 4935 BasicBlock *UserBB = User->getParent(); 4936 if (UserBB == DefBB) continue; 4937 4938 // Both src and def are live in this block. Rewrite the use. 4939 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 4940 4941 if (!InsertedTrunc) { 4942 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4943 assert(InsertPt != UserBB->end()); 4944 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 4945 InsertedInsts.insert(InsertedTrunc); 4946 } 4947 4948 // Replace a use of the {s|z}ext source with a use of the result. 4949 U = InsertedTrunc; 4950 ++NumExtUses; 4951 MadeChange = true; 4952 } 4953 4954 return MadeChange; 4955 } 4956 4957 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 4958 // just after the load if the target can fold this into one extload instruction, 4959 // with the hope of eliminating some of the other later "and" instructions using 4960 // the loaded value. "and"s that are made trivially redundant by the insertion 4961 // of the new "and" are removed by this function, while others (e.g. those whose 4962 // path from the load goes through a phi) are left for isel to potentially 4963 // remove. 4964 // 4965 // For example: 4966 // 4967 // b0: 4968 // x = load i32 4969 // ... 4970 // b1: 4971 // y = and x, 0xff 4972 // z = use y 4973 // 4974 // becomes: 4975 // 4976 // b0: 4977 // x = load i32 4978 // x' = and x, 0xff 4979 // ... 4980 // b1: 4981 // z = use x' 4982 // 4983 // whereas: 4984 // 4985 // b0: 4986 // x1 = load i32 4987 // ... 4988 // b1: 4989 // x2 = load i32 4990 // ... 4991 // b2: 4992 // x = phi x1, x2 4993 // y = and x, 0xff 4994 // 4995 // becomes (after a call to optimizeLoadExt for each load): 4996 // 4997 // b0: 4998 // x1 = load i32 4999 // x1' = and x1, 0xff 5000 // ... 5001 // b1: 5002 // x2 = load i32 5003 // x2' = and x2, 0xff 5004 // ... 5005 // b2: 5006 // x = phi x1', x2' 5007 // y = and x, 0xff 5008 // 5009 5010 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5011 5012 if (!Load->isSimple() || 5013 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 5014 return false; 5015 5016 // Skip loads we've already transformed. 5017 if (Load->hasOneUse() && 5018 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5019 return false; 5020 5021 // Look at all uses of Load, looking through phis, to determine how many bits 5022 // of the loaded value are needed. 5023 SmallVector<Instruction *, 8> WorkList; 5024 SmallPtrSet<Instruction *, 16> Visited; 5025 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5026 for (auto *U : Load->users()) 5027 WorkList.push_back(cast<Instruction>(U)); 5028 5029 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5030 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5031 APInt DemandBits(BitWidth, 0); 5032 APInt WidestAndBits(BitWidth, 0); 5033 5034 while (!WorkList.empty()) { 5035 Instruction *I = WorkList.back(); 5036 WorkList.pop_back(); 5037 5038 // Break use-def graph loops. 5039 if (!Visited.insert(I).second) 5040 continue; 5041 5042 // For a PHI node, push all of its users. 5043 if (auto *Phi = dyn_cast<PHINode>(I)) { 5044 for (auto *U : Phi->users()) 5045 WorkList.push_back(cast<Instruction>(U)); 5046 continue; 5047 } 5048 5049 switch (I->getOpcode()) { 5050 case llvm::Instruction::And: { 5051 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5052 if (!AndC) 5053 return false; 5054 APInt AndBits = AndC->getValue(); 5055 DemandBits |= AndBits; 5056 // Keep track of the widest and mask we see. 5057 if (AndBits.ugt(WidestAndBits)) 5058 WidestAndBits = AndBits; 5059 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5060 AndsToMaybeRemove.push_back(I); 5061 break; 5062 } 5063 5064 case llvm::Instruction::Shl: { 5065 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5066 if (!ShlC) 5067 return false; 5068 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5069 DemandBits.setLowBits(BitWidth - ShiftAmt); 5070 break; 5071 } 5072 5073 case llvm::Instruction::Trunc: { 5074 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5075 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5076 DemandBits.setLowBits(TruncBitWidth); 5077 break; 5078 } 5079 5080 default: 5081 return false; 5082 } 5083 } 5084 5085 uint32_t ActiveBits = DemandBits.getActiveBits(); 5086 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5087 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5088 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5089 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5090 // followed by an AND. 5091 // TODO: Look into removing this restriction by fixing backends to either 5092 // return false for isLoadExtLegal for i1 or have them select this pattern to 5093 // a single instruction. 5094 // 5095 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5096 // mask, since these are the only ands that will be removed by isel. 5097 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5098 WidestAndBits != DemandBits) 5099 return false; 5100 5101 LLVMContext &Ctx = Load->getType()->getContext(); 5102 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5103 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5104 5105 // Reject cases that won't be matched as extloads. 5106 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5107 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5108 return false; 5109 5110 IRBuilder<> Builder(Load->getNextNode()); 5111 auto *NewAnd = dyn_cast<Instruction>( 5112 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5113 // Mark this instruction as "inserted by CGP", so that other 5114 // optimizations don't touch it. 5115 InsertedInsts.insert(NewAnd); 5116 5117 // Replace all uses of load with new and (except for the use of load in the 5118 // new and itself). 5119 Load->replaceAllUsesWith(NewAnd); 5120 NewAnd->setOperand(0, Load); 5121 5122 // Remove any and instructions that are now redundant. 5123 for (auto *And : AndsToMaybeRemove) 5124 // Check that the and mask is the same as the one we decided to put on the 5125 // new and. 5126 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5127 And->replaceAllUsesWith(NewAnd); 5128 if (&*CurInstIterator == And) 5129 CurInstIterator = std::next(And->getIterator()); 5130 And->eraseFromParent(); 5131 ++NumAndUses; 5132 } 5133 5134 ++NumAndsAdded; 5135 return true; 5136 } 5137 5138 /// Check if V (an operand of a select instruction) is an expensive instruction 5139 /// that is only used once. 5140 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5141 auto *I = dyn_cast<Instruction>(V); 5142 // If it's safe to speculatively execute, then it should not have side 5143 // effects; therefore, it's safe to sink and possibly *not* execute. 5144 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5145 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5146 } 5147 5148 /// Returns true if a SelectInst should be turned into an explicit branch. 5149 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5150 const TargetLowering *TLI, 5151 SelectInst *SI) { 5152 // If even a predictable select is cheap, then a branch can't be cheaper. 5153 if (!TLI->isPredictableSelectExpensive()) 5154 return false; 5155 5156 // FIXME: This should use the same heuristics as IfConversion to determine 5157 // whether a select is better represented as a branch. 5158 5159 // If metadata tells us that the select condition is obviously predictable, 5160 // then we want to replace the select with a branch. 5161 uint64_t TrueWeight, FalseWeight; 5162 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5163 uint64_t Max = std::max(TrueWeight, FalseWeight); 5164 uint64_t Sum = TrueWeight + FalseWeight; 5165 if (Sum != 0) { 5166 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5167 if (Probability > TLI->getPredictableBranchThreshold()) 5168 return true; 5169 } 5170 } 5171 5172 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5173 5174 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5175 // comparison condition. If the compare has more than one use, there's 5176 // probably another cmov or setcc around, so it's not worth emitting a branch. 5177 if (!Cmp || !Cmp->hasOneUse()) 5178 return false; 5179 5180 // If either operand of the select is expensive and only needed on one side 5181 // of the select, we should form a branch. 5182 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5183 sinkSelectOperand(TTI, SI->getFalseValue())) 5184 return true; 5185 5186 return false; 5187 } 5188 5189 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5190 /// false value of \p SI. If the true/false value of \p SI is defined by any 5191 /// select instructions in \p Selects, look through the defining select 5192 /// instruction until the true/false value is not defined in \p Selects. 5193 static Value *getTrueOrFalseValue( 5194 SelectInst *SI, bool isTrue, 5195 const SmallPtrSet<const Instruction *, 2> &Selects) { 5196 Value *V; 5197 5198 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5199 DefSI = dyn_cast<SelectInst>(V)) { 5200 assert(DefSI->getCondition() == SI->getCondition() && 5201 "The condition of DefSI does not match with SI"); 5202 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5203 } 5204 return V; 5205 } 5206 5207 /// If we have a SelectInst that will likely profit from branch prediction, 5208 /// turn it into a branch. 5209 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5210 // Find all consecutive select instructions that share the same condition. 5211 SmallVector<SelectInst *, 2> ASI; 5212 ASI.push_back(SI); 5213 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5214 It != SI->getParent()->end(); ++It) { 5215 SelectInst *I = dyn_cast<SelectInst>(&*It); 5216 if (I && SI->getCondition() == I->getCondition()) { 5217 ASI.push_back(I); 5218 } else { 5219 break; 5220 } 5221 } 5222 5223 SelectInst *LastSI = ASI.back(); 5224 // Increment the current iterator to skip all the rest of select instructions 5225 // because they will be either "not lowered" or "all lowered" to branch. 5226 CurInstIterator = std::next(LastSI->getIterator()); 5227 5228 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5229 5230 // Can we convert the 'select' to CF ? 5231 if (DisableSelectToBranch || OptSize || !TLI || VectorCond || 5232 SI->getMetadata(LLVMContext::MD_unpredictable)) 5233 return false; 5234 5235 TargetLowering::SelectSupportKind SelectKind; 5236 if (VectorCond) 5237 SelectKind = TargetLowering::VectorMaskSelect; 5238 else if (SI->getType()->isVectorTy()) 5239 SelectKind = TargetLowering::ScalarCondVectorVal; 5240 else 5241 SelectKind = TargetLowering::ScalarValSelect; 5242 5243 if (TLI->isSelectSupported(SelectKind) && 5244 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5245 return false; 5246 5247 ModifiedDT = true; 5248 5249 // Transform a sequence like this: 5250 // start: 5251 // %cmp = cmp uge i32 %a, %b 5252 // %sel = select i1 %cmp, i32 %c, i32 %d 5253 // 5254 // Into: 5255 // start: 5256 // %cmp = cmp uge i32 %a, %b 5257 // br i1 %cmp, label %select.true, label %select.false 5258 // select.true: 5259 // br label %select.end 5260 // select.false: 5261 // br label %select.end 5262 // select.end: 5263 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5264 // 5265 // In addition, we may sink instructions that produce %c or %d from 5266 // the entry block into the destination(s) of the new branch. 5267 // If the true or false blocks do not contain a sunken instruction, that 5268 // block and its branch may be optimized away. In that case, one side of the 5269 // first branch will point directly to select.end, and the corresponding PHI 5270 // predecessor block will be the start block. 5271 5272 // First, we split the block containing the select into 2 blocks. 5273 BasicBlock *StartBlock = SI->getParent(); 5274 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5275 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5276 5277 // Delete the unconditional branch that was just created by the split. 5278 StartBlock->getTerminator()->eraseFromParent(); 5279 5280 // These are the new basic blocks for the conditional branch. 5281 // At least one will become an actual new basic block. 5282 BasicBlock *TrueBlock = nullptr; 5283 BasicBlock *FalseBlock = nullptr; 5284 BranchInst *TrueBranch = nullptr; 5285 BranchInst *FalseBranch = nullptr; 5286 5287 // Sink expensive instructions into the conditional blocks to avoid executing 5288 // them speculatively. 5289 for (SelectInst *SI : ASI) { 5290 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5291 if (TrueBlock == nullptr) { 5292 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5293 EndBlock->getParent(), EndBlock); 5294 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5295 } 5296 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5297 TrueInst->moveBefore(TrueBranch); 5298 } 5299 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5300 if (FalseBlock == nullptr) { 5301 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5302 EndBlock->getParent(), EndBlock); 5303 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5304 } 5305 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5306 FalseInst->moveBefore(FalseBranch); 5307 } 5308 } 5309 5310 // If there was nothing to sink, then arbitrarily choose the 'false' side 5311 // for a new input value to the PHI. 5312 if (TrueBlock == FalseBlock) { 5313 assert(TrueBlock == nullptr && 5314 "Unexpected basic block transform while optimizing select"); 5315 5316 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5317 EndBlock->getParent(), EndBlock); 5318 BranchInst::Create(EndBlock, FalseBlock); 5319 } 5320 5321 // Insert the real conditional branch based on the original condition. 5322 // If we did not create a new block for one of the 'true' or 'false' paths 5323 // of the condition, it means that side of the branch goes to the end block 5324 // directly and the path originates from the start block from the point of 5325 // view of the new PHI. 5326 BasicBlock *TT, *FT; 5327 if (TrueBlock == nullptr) { 5328 TT = EndBlock; 5329 FT = FalseBlock; 5330 TrueBlock = StartBlock; 5331 } else if (FalseBlock == nullptr) { 5332 TT = TrueBlock; 5333 FT = EndBlock; 5334 FalseBlock = StartBlock; 5335 } else { 5336 TT = TrueBlock; 5337 FT = FalseBlock; 5338 } 5339 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5340 5341 SmallPtrSet<const Instruction *, 2> INS; 5342 INS.insert(ASI.begin(), ASI.end()); 5343 // Use reverse iterator because later select may use the value of the 5344 // earlier select, and we need to propagate value through earlier select 5345 // to get the PHI operand. 5346 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5347 SelectInst *SI = *It; 5348 // The select itself is replaced with a PHI Node. 5349 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5350 PN->takeName(SI); 5351 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5352 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5353 5354 SI->replaceAllUsesWith(PN); 5355 SI->eraseFromParent(); 5356 INS.erase(SI); 5357 ++NumSelectsExpanded; 5358 } 5359 5360 // Instruct OptimizeBlock to skip to the next block. 5361 CurInstIterator = StartBlock->end(); 5362 return true; 5363 } 5364 5365 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5366 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5367 int SplatElem = -1; 5368 for (unsigned i = 0; i < Mask.size(); ++i) { 5369 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5370 return false; 5371 SplatElem = Mask[i]; 5372 } 5373 5374 return true; 5375 } 5376 5377 /// Some targets have expensive vector shifts if the lanes aren't all the same 5378 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 5379 /// it's often worth sinking a shufflevector splat down to its use so that 5380 /// codegen can spot all lanes are identical. 5381 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 5382 BasicBlock *DefBB = SVI->getParent(); 5383 5384 // Only do this xform if variable vector shifts are particularly expensive. 5385 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 5386 return false; 5387 5388 // We only expect better codegen by sinking a shuffle if we can recognise a 5389 // constant splat. 5390 if (!isBroadcastShuffle(SVI)) 5391 return false; 5392 5393 // InsertedShuffles - Only insert a shuffle in each block once. 5394 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 5395 5396 bool MadeChange = false; 5397 for (User *U : SVI->users()) { 5398 Instruction *UI = cast<Instruction>(U); 5399 5400 // Figure out which BB this ext is used in. 5401 BasicBlock *UserBB = UI->getParent(); 5402 if (UserBB == DefBB) continue; 5403 5404 // For now only apply this when the splat is used by a shift instruction. 5405 if (!UI->isShift()) continue; 5406 5407 // Everything checks out, sink the shuffle if the user's block doesn't 5408 // already have a copy. 5409 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 5410 5411 if (!InsertedShuffle) { 5412 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5413 assert(InsertPt != UserBB->end()); 5414 InsertedShuffle = 5415 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5416 SVI->getOperand(2), "", &*InsertPt); 5417 } 5418 5419 UI->replaceUsesOfWith(SVI, InsertedShuffle); 5420 MadeChange = true; 5421 } 5422 5423 // If we removed all uses, nuke the shuffle. 5424 if (SVI->use_empty()) { 5425 SVI->eraseFromParent(); 5426 MadeChange = true; 5427 } 5428 5429 return MadeChange; 5430 } 5431 5432 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 5433 if (!TLI || !DL) 5434 return false; 5435 5436 Value *Cond = SI->getCondition(); 5437 Type *OldType = Cond->getType(); 5438 LLVMContext &Context = Cond->getContext(); 5439 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 5440 unsigned RegWidth = RegType.getSizeInBits(); 5441 5442 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 5443 return false; 5444 5445 // If the register width is greater than the type width, expand the condition 5446 // of the switch instruction and each case constant to the width of the 5447 // register. By widening the type of the switch condition, subsequent 5448 // comparisons (for case comparisons) will not need to be extended to the 5449 // preferred register width, so we will potentially eliminate N-1 extends, 5450 // where N is the number of cases in the switch. 5451 auto *NewType = Type::getIntNTy(Context, RegWidth); 5452 5453 // Zero-extend the switch condition and case constants unless the switch 5454 // condition is a function argument that is already being sign-extended. 5455 // In that case, we can avoid an unnecessary mask/extension by sign-extending 5456 // everything instead. 5457 Instruction::CastOps ExtType = Instruction::ZExt; 5458 if (auto *Arg = dyn_cast<Argument>(Cond)) 5459 if (Arg->hasSExtAttr()) 5460 ExtType = Instruction::SExt; 5461 5462 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 5463 ExtInst->insertBefore(SI); 5464 SI->setCondition(ExtInst); 5465 for (auto Case : SI->cases()) { 5466 APInt NarrowConst = Case.getCaseValue()->getValue(); 5467 APInt WideConst = (ExtType == Instruction::ZExt) ? 5468 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 5469 Case.setValue(ConstantInt::get(Context, WideConst)); 5470 } 5471 5472 return true; 5473 } 5474 5475 namespace { 5476 /// \brief Helper class to promote a scalar operation to a vector one. 5477 /// This class is used to move downward extractelement transition. 5478 /// E.g., 5479 /// a = vector_op <2 x i32> 5480 /// b = extractelement <2 x i32> a, i32 0 5481 /// c = scalar_op b 5482 /// store c 5483 /// 5484 /// => 5485 /// a = vector_op <2 x i32> 5486 /// c = vector_op a (equivalent to scalar_op on the related lane) 5487 /// * d = extractelement <2 x i32> c, i32 0 5488 /// * store d 5489 /// Assuming both extractelement and store can be combine, we get rid of the 5490 /// transition. 5491 class VectorPromoteHelper { 5492 /// DataLayout associated with the current module. 5493 const DataLayout &DL; 5494 5495 /// Used to perform some checks on the legality of vector operations. 5496 const TargetLowering &TLI; 5497 5498 /// Used to estimated the cost of the promoted chain. 5499 const TargetTransformInfo &TTI; 5500 5501 /// The transition being moved downwards. 5502 Instruction *Transition; 5503 /// The sequence of instructions to be promoted. 5504 SmallVector<Instruction *, 4> InstsToBePromoted; 5505 /// Cost of combining a store and an extract. 5506 unsigned StoreExtractCombineCost; 5507 /// Instruction that will be combined with the transition. 5508 Instruction *CombineInst; 5509 5510 /// \brief The instruction that represents the current end of the transition. 5511 /// Since we are faking the promotion until we reach the end of the chain 5512 /// of computation, we need a way to get the current end of the transition. 5513 Instruction *getEndOfTransition() const { 5514 if (InstsToBePromoted.empty()) 5515 return Transition; 5516 return InstsToBePromoted.back(); 5517 } 5518 5519 /// \brief Return the index of the original value in the transition. 5520 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 5521 /// c, is at index 0. 5522 unsigned getTransitionOriginalValueIdx() const { 5523 assert(isa<ExtractElementInst>(Transition) && 5524 "Other kind of transitions are not supported yet"); 5525 return 0; 5526 } 5527 5528 /// \brief Return the index of the index in the transition. 5529 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 5530 /// is at index 1. 5531 unsigned getTransitionIdx() const { 5532 assert(isa<ExtractElementInst>(Transition) && 5533 "Other kind of transitions are not supported yet"); 5534 return 1; 5535 } 5536 5537 /// \brief Get the type of the transition. 5538 /// This is the type of the original value. 5539 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 5540 /// transition is <2 x i32>. 5541 Type *getTransitionType() const { 5542 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 5543 } 5544 5545 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 5546 /// I.e., we have the following sequence: 5547 /// Def = Transition <ty1> a to <ty2> 5548 /// b = ToBePromoted <ty2> Def, ... 5549 /// => 5550 /// b = ToBePromoted <ty1> a, ... 5551 /// Def = Transition <ty1> ToBePromoted to <ty2> 5552 void promoteImpl(Instruction *ToBePromoted); 5553 5554 /// \brief Check whether or not it is profitable to promote all the 5555 /// instructions enqueued to be promoted. 5556 bool isProfitableToPromote() { 5557 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 5558 unsigned Index = isa<ConstantInt>(ValIdx) 5559 ? cast<ConstantInt>(ValIdx)->getZExtValue() 5560 : -1; 5561 Type *PromotedType = getTransitionType(); 5562 5563 StoreInst *ST = cast<StoreInst>(CombineInst); 5564 unsigned AS = ST->getPointerAddressSpace(); 5565 unsigned Align = ST->getAlignment(); 5566 // Check if this store is supported. 5567 if (!TLI.allowsMisalignedMemoryAccesses( 5568 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 5569 Align)) { 5570 // If this is not supported, there is no way we can combine 5571 // the extract with the store. 5572 return false; 5573 } 5574 5575 // The scalar chain of computation has to pay for the transition 5576 // scalar to vector. 5577 // The vector chain has to account for the combining cost. 5578 uint64_t ScalarCost = 5579 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 5580 uint64_t VectorCost = StoreExtractCombineCost; 5581 for (const auto &Inst : InstsToBePromoted) { 5582 // Compute the cost. 5583 // By construction, all instructions being promoted are arithmetic ones. 5584 // Moreover, one argument is a constant that can be viewed as a splat 5585 // constant. 5586 Value *Arg0 = Inst->getOperand(0); 5587 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 5588 isa<ConstantFP>(Arg0); 5589 TargetTransformInfo::OperandValueKind Arg0OVK = 5590 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5591 : TargetTransformInfo::OK_AnyValue; 5592 TargetTransformInfo::OperandValueKind Arg1OVK = 5593 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5594 : TargetTransformInfo::OK_AnyValue; 5595 ScalarCost += TTI.getArithmeticInstrCost( 5596 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 5597 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 5598 Arg0OVK, Arg1OVK); 5599 } 5600 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 5601 << ScalarCost << "\nVector: " << VectorCost << '\n'); 5602 return ScalarCost > VectorCost; 5603 } 5604 5605 /// \brief Generate a constant vector with \p Val with the same 5606 /// number of elements as the transition. 5607 /// \p UseSplat defines whether or not \p Val should be replicated 5608 /// across the whole vector. 5609 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 5610 /// otherwise we generate a vector with as many undef as possible: 5611 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 5612 /// used at the index of the extract. 5613 Value *getConstantVector(Constant *Val, bool UseSplat) const { 5614 unsigned ExtractIdx = UINT_MAX; 5615 if (!UseSplat) { 5616 // If we cannot determine where the constant must be, we have to 5617 // use a splat constant. 5618 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 5619 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 5620 ExtractIdx = CstVal->getSExtValue(); 5621 else 5622 UseSplat = true; 5623 } 5624 5625 unsigned End = getTransitionType()->getVectorNumElements(); 5626 if (UseSplat) 5627 return ConstantVector::getSplat(End, Val); 5628 5629 SmallVector<Constant *, 4> ConstVec; 5630 UndefValue *UndefVal = UndefValue::get(Val->getType()); 5631 for (unsigned Idx = 0; Idx != End; ++Idx) { 5632 if (Idx == ExtractIdx) 5633 ConstVec.push_back(Val); 5634 else 5635 ConstVec.push_back(UndefVal); 5636 } 5637 return ConstantVector::get(ConstVec); 5638 } 5639 5640 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 5641 /// in \p Use can trigger undefined behavior. 5642 static bool canCauseUndefinedBehavior(const Instruction *Use, 5643 unsigned OperandIdx) { 5644 // This is not safe to introduce undef when the operand is on 5645 // the right hand side of a division-like instruction. 5646 if (OperandIdx != 1) 5647 return false; 5648 switch (Use->getOpcode()) { 5649 default: 5650 return false; 5651 case Instruction::SDiv: 5652 case Instruction::UDiv: 5653 case Instruction::SRem: 5654 case Instruction::URem: 5655 return true; 5656 case Instruction::FDiv: 5657 case Instruction::FRem: 5658 return !Use->hasNoNaNs(); 5659 } 5660 llvm_unreachable(nullptr); 5661 } 5662 5663 public: 5664 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 5665 const TargetTransformInfo &TTI, Instruction *Transition, 5666 unsigned CombineCost) 5667 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 5668 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 5669 assert(Transition && "Do not know how to promote null"); 5670 } 5671 5672 /// \brief Check if we can promote \p ToBePromoted to \p Type. 5673 bool canPromote(const Instruction *ToBePromoted) const { 5674 // We could support CastInst too. 5675 return isa<BinaryOperator>(ToBePromoted); 5676 } 5677 5678 /// \brief Check if it is profitable to promote \p ToBePromoted 5679 /// by moving downward the transition through. 5680 bool shouldPromote(const Instruction *ToBePromoted) const { 5681 // Promote only if all the operands can be statically expanded. 5682 // Indeed, we do not want to introduce any new kind of transitions. 5683 for (const Use &U : ToBePromoted->operands()) { 5684 const Value *Val = U.get(); 5685 if (Val == getEndOfTransition()) { 5686 // If the use is a division and the transition is on the rhs, 5687 // we cannot promote the operation, otherwise we may create a 5688 // division by zero. 5689 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 5690 return false; 5691 continue; 5692 } 5693 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 5694 !isa<ConstantFP>(Val)) 5695 return false; 5696 } 5697 // Check that the resulting operation is legal. 5698 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 5699 if (!ISDOpcode) 5700 return false; 5701 return StressStoreExtract || 5702 TLI.isOperationLegalOrCustom( 5703 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 5704 } 5705 5706 /// \brief Check whether or not \p Use can be combined 5707 /// with the transition. 5708 /// I.e., is it possible to do Use(Transition) => AnotherUse? 5709 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 5710 5711 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 5712 void enqueueForPromotion(Instruction *ToBePromoted) { 5713 InstsToBePromoted.push_back(ToBePromoted); 5714 } 5715 5716 /// \brief Set the instruction that will be combined with the transition. 5717 void recordCombineInstruction(Instruction *ToBeCombined) { 5718 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 5719 CombineInst = ToBeCombined; 5720 } 5721 5722 /// \brief Promote all the instructions enqueued for promotion if it is 5723 /// is profitable. 5724 /// \return True if the promotion happened, false otherwise. 5725 bool promote() { 5726 // Check if there is something to promote. 5727 // Right now, if we do not have anything to combine with, 5728 // we assume the promotion is not profitable. 5729 if (InstsToBePromoted.empty() || !CombineInst) 5730 return false; 5731 5732 // Check cost. 5733 if (!StressStoreExtract && !isProfitableToPromote()) 5734 return false; 5735 5736 // Promote. 5737 for (auto &ToBePromoted : InstsToBePromoted) 5738 promoteImpl(ToBePromoted); 5739 InstsToBePromoted.clear(); 5740 return true; 5741 } 5742 }; 5743 } // End of anonymous namespace. 5744 5745 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 5746 // At this point, we know that all the operands of ToBePromoted but Def 5747 // can be statically promoted. 5748 // For Def, we need to use its parameter in ToBePromoted: 5749 // b = ToBePromoted ty1 a 5750 // Def = Transition ty1 b to ty2 5751 // Move the transition down. 5752 // 1. Replace all uses of the promoted operation by the transition. 5753 // = ... b => = ... Def. 5754 assert(ToBePromoted->getType() == Transition->getType() && 5755 "The type of the result of the transition does not match " 5756 "the final type"); 5757 ToBePromoted->replaceAllUsesWith(Transition); 5758 // 2. Update the type of the uses. 5759 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 5760 Type *TransitionTy = getTransitionType(); 5761 ToBePromoted->mutateType(TransitionTy); 5762 // 3. Update all the operands of the promoted operation with promoted 5763 // operands. 5764 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 5765 for (Use &U : ToBePromoted->operands()) { 5766 Value *Val = U.get(); 5767 Value *NewVal = nullptr; 5768 if (Val == Transition) 5769 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 5770 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 5771 isa<ConstantFP>(Val)) { 5772 // Use a splat constant if it is not safe to use undef. 5773 NewVal = getConstantVector( 5774 cast<Constant>(Val), 5775 isa<UndefValue>(Val) || 5776 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 5777 } else 5778 llvm_unreachable("Did you modified shouldPromote and forgot to update " 5779 "this?"); 5780 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 5781 } 5782 Transition->removeFromParent(); 5783 Transition->insertAfter(ToBePromoted); 5784 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 5785 } 5786 5787 /// Some targets can do store(extractelement) with one instruction. 5788 /// Try to push the extractelement towards the stores when the target 5789 /// has this feature and this is profitable. 5790 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 5791 unsigned CombineCost = UINT_MAX; 5792 if (DisableStoreExtract || !TLI || 5793 (!StressStoreExtract && 5794 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 5795 Inst->getOperand(1), CombineCost))) 5796 return false; 5797 5798 // At this point we know that Inst is a vector to scalar transition. 5799 // Try to move it down the def-use chain, until: 5800 // - We can combine the transition with its single use 5801 // => we got rid of the transition. 5802 // - We escape the current basic block 5803 // => we would need to check that we are moving it at a cheaper place and 5804 // we do not do that for now. 5805 BasicBlock *Parent = Inst->getParent(); 5806 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 5807 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 5808 // If the transition has more than one use, assume this is not going to be 5809 // beneficial. 5810 while (Inst->hasOneUse()) { 5811 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 5812 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 5813 5814 if (ToBePromoted->getParent() != Parent) { 5815 DEBUG(dbgs() << "Instruction to promote is in a different block (" 5816 << ToBePromoted->getParent()->getName() 5817 << ") than the transition (" << Parent->getName() << ").\n"); 5818 return false; 5819 } 5820 5821 if (VPH.canCombine(ToBePromoted)) { 5822 DEBUG(dbgs() << "Assume " << *Inst << '\n' 5823 << "will be combined with: " << *ToBePromoted << '\n'); 5824 VPH.recordCombineInstruction(ToBePromoted); 5825 bool Changed = VPH.promote(); 5826 NumStoreExtractExposed += Changed; 5827 return Changed; 5828 } 5829 5830 DEBUG(dbgs() << "Try promoting.\n"); 5831 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 5832 return false; 5833 5834 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 5835 5836 VPH.enqueueForPromotion(ToBePromoted); 5837 Inst = ToBePromoted; 5838 } 5839 return false; 5840 } 5841 5842 /// For the instruction sequence of store below, F and I values 5843 /// are bundled together as an i64 value before being stored into memory. 5844 /// Sometimes it is more efficent to generate separate stores for F and I, 5845 /// which can remove the bitwise instructions or sink them to colder places. 5846 /// 5847 /// (store (or (zext (bitcast F to i32) to i64), 5848 /// (shl (zext I to i64), 32)), addr) --> 5849 /// (store F, addr) and (store I, addr+4) 5850 /// 5851 /// Similarly, splitting for other merged store can also be beneficial, like: 5852 /// For pair of {i32, i32}, i64 store --> two i32 stores. 5853 /// For pair of {i32, i16}, i64 store --> two i32 stores. 5854 /// For pair of {i16, i16}, i32 store --> two i16 stores. 5855 /// For pair of {i16, i8}, i32 store --> two i16 stores. 5856 /// For pair of {i8, i8}, i16 store --> two i8 stores. 5857 /// 5858 /// We allow each target to determine specifically which kind of splitting is 5859 /// supported. 5860 /// 5861 /// The store patterns are commonly seen from the simple code snippet below 5862 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 5863 /// void goo(const std::pair<int, float> &); 5864 /// hoo() { 5865 /// ... 5866 /// goo(std::make_pair(tmp, ftmp)); 5867 /// ... 5868 /// } 5869 /// 5870 /// Although we already have similar splitting in DAG Combine, we duplicate 5871 /// it in CodeGenPrepare to catch the case in which pattern is across 5872 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 5873 /// during code expansion. 5874 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 5875 const TargetLowering &TLI) { 5876 // Handle simple but common cases only. 5877 Type *StoreType = SI.getValueOperand()->getType(); 5878 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 5879 DL.getTypeSizeInBits(StoreType) == 0) 5880 return false; 5881 5882 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 5883 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 5884 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 5885 DL.getTypeSizeInBits(SplitStoreType)) 5886 return false; 5887 5888 // Match the following patterns: 5889 // (store (or (zext LValue to i64), 5890 // (shl (zext HValue to i64), 32)), HalfValBitSize) 5891 // or 5892 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 5893 // (zext LValue to i64), 5894 // Expect both operands of OR and the first operand of SHL have only 5895 // one use. 5896 Value *LValue, *HValue; 5897 if (!match(SI.getValueOperand(), 5898 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 5899 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 5900 m_SpecificInt(HalfValBitSize)))))) 5901 return false; 5902 5903 // Check LValue and HValue are int with size less or equal than 32. 5904 if (!LValue->getType()->isIntegerTy() || 5905 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 5906 !HValue->getType()->isIntegerTy() || 5907 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 5908 return false; 5909 5910 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 5911 // as the input of target query. 5912 auto *LBC = dyn_cast<BitCastInst>(LValue); 5913 auto *HBC = dyn_cast<BitCastInst>(HValue); 5914 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 5915 : EVT::getEVT(LValue->getType()); 5916 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 5917 : EVT::getEVT(HValue->getType()); 5918 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 5919 return false; 5920 5921 // Start to split store. 5922 IRBuilder<> Builder(SI.getContext()); 5923 Builder.SetInsertPoint(&SI); 5924 5925 // If LValue/HValue is a bitcast in another BB, create a new one in current 5926 // BB so it may be merged with the splitted stores by dag combiner. 5927 if (LBC && LBC->getParent() != SI.getParent()) 5928 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 5929 if (HBC && HBC->getParent() != SI.getParent()) 5930 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 5931 5932 auto CreateSplitStore = [&](Value *V, bool Upper) { 5933 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 5934 Value *Addr = Builder.CreateBitCast( 5935 SI.getOperand(1), 5936 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 5937 if (Upper) 5938 Addr = Builder.CreateGEP( 5939 SplitStoreType, Addr, 5940 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 5941 Builder.CreateAlignedStore( 5942 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 5943 }; 5944 5945 CreateSplitStore(LValue, false); 5946 CreateSplitStore(HValue, true); 5947 5948 // Delete the old store. 5949 SI.eraseFromParent(); 5950 return true; 5951 } 5952 5953 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { 5954 // Bail out if we inserted the instruction to prevent optimizations from 5955 // stepping on each other's toes. 5956 if (InsertedInsts.count(I)) 5957 return false; 5958 5959 if (PHINode *P = dyn_cast<PHINode>(I)) { 5960 // It is possible for very late stage optimizations (such as SimplifyCFG) 5961 // to introduce PHI nodes too late to be cleaned up. If we detect such a 5962 // trivial PHI, go ahead and zap it here. 5963 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 5964 P->replaceAllUsesWith(V); 5965 P->eraseFromParent(); 5966 ++NumPHIsElim; 5967 return true; 5968 } 5969 return false; 5970 } 5971 5972 if (CastInst *CI = dyn_cast<CastInst>(I)) { 5973 // If the source of the cast is a constant, then this should have 5974 // already been constant folded. The only reason NOT to constant fold 5975 // it is if something (e.g. LSR) was careful to place the constant 5976 // evaluation in a block other than then one that uses it (e.g. to hoist 5977 // the address of globals out of a loop). If this is the case, we don't 5978 // want to forward-subst the cast. 5979 if (isa<Constant>(CI->getOperand(0))) 5980 return false; 5981 5982 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 5983 return true; 5984 5985 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 5986 /// Sink a zext or sext into its user blocks if the target type doesn't 5987 /// fit in one register 5988 if (TLI && 5989 TLI->getTypeAction(CI->getContext(), 5990 TLI->getValueType(*DL, CI->getType())) == 5991 TargetLowering::TypeExpandInteger) { 5992 return SinkCast(CI); 5993 } else { 5994 bool MadeChange = optimizeExt(I); 5995 return MadeChange | optimizeExtUses(I); 5996 } 5997 } 5998 return false; 5999 } 6000 6001 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6002 if (!TLI || !TLI->hasMultipleConditionRegisters()) 6003 return OptimizeCmpExpression(CI, TLI); 6004 6005 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6006 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6007 if (TLI) { 6008 bool Modified = optimizeLoadExt(LI); 6009 unsigned AS = LI->getPointerAddressSpace(); 6010 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6011 return Modified; 6012 } 6013 return false; 6014 } 6015 6016 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6017 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6018 return true; 6019 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6020 if (TLI) { 6021 unsigned AS = SI->getPointerAddressSpace(); 6022 return optimizeMemoryInst(I, SI->getOperand(1), 6023 SI->getOperand(0)->getType(), AS); 6024 } 6025 return false; 6026 } 6027 6028 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6029 unsigned AS = RMW->getPointerAddressSpace(); 6030 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6031 RMW->getType(), AS); 6032 } 6033 6034 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6035 unsigned AS = CmpX->getPointerAddressSpace(); 6036 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6037 CmpX->getCompareOperand()->getType(), AS); 6038 } 6039 6040 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6041 6042 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6043 EnableAndCmpSinking && TLI) 6044 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6045 6046 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6047 BinOp->getOpcode() == Instruction::LShr)) { 6048 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6049 if (TLI && CI && TLI->hasExtractBitsInsn()) 6050 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6051 6052 return false; 6053 } 6054 6055 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6056 if (GEPI->hasAllZeroIndices()) { 6057 /// The GEP operand must be a pointer, so must its result -> BitCast 6058 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6059 GEPI->getName(), GEPI); 6060 GEPI->replaceAllUsesWith(NC); 6061 GEPI->eraseFromParent(); 6062 ++NumGEPsElim; 6063 optimizeInst(NC, ModifiedDT); 6064 return true; 6065 } 6066 return false; 6067 } 6068 6069 if (CallInst *CI = dyn_cast<CallInst>(I)) 6070 return optimizeCallInst(CI, ModifiedDT); 6071 6072 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6073 return optimizeSelectInst(SI); 6074 6075 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6076 return optimizeShuffleVectorInst(SVI); 6077 6078 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6079 return optimizeSwitchInst(Switch); 6080 6081 if (isa<ExtractElementInst>(I)) 6082 return optimizeExtractElementInst(I); 6083 6084 return false; 6085 } 6086 6087 /// Given an OR instruction, check to see if this is a bitreverse 6088 /// idiom. If so, insert the new intrinsic and return true. 6089 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6090 const TargetLowering &TLI) { 6091 if (!I.getType()->isIntegerTy() || 6092 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6093 TLI.getValueType(DL, I.getType(), true))) 6094 return false; 6095 6096 SmallVector<Instruction*, 4> Insts; 6097 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6098 return false; 6099 Instruction *LastInst = Insts.back(); 6100 I.replaceAllUsesWith(LastInst); 6101 RecursivelyDeleteTriviallyDeadInstructions(&I); 6102 return true; 6103 } 6104 6105 // In this pass we look for GEP and cast instructions that are used 6106 // across basic blocks and rewrite them to improve basic-block-at-a-time 6107 // selection. 6108 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 6109 SunkAddrs.clear(); 6110 bool MadeChange = false; 6111 6112 CurInstIterator = BB.begin(); 6113 while (CurInstIterator != BB.end()) { 6114 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6115 if (ModifiedDT) 6116 return true; 6117 } 6118 6119 bool MadeBitReverse = true; 6120 while (TLI && MadeBitReverse) { 6121 MadeBitReverse = false; 6122 for (auto &I : reverse(BB)) { 6123 if (makeBitReverse(I, *DL, *TLI)) { 6124 MadeBitReverse = MadeChange = true; 6125 ModifiedDT = true; 6126 break; 6127 } 6128 } 6129 } 6130 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6131 6132 return MadeChange; 6133 } 6134 6135 // llvm.dbg.value is far away from the value then iSel may not be able 6136 // handle it properly. iSel will drop llvm.dbg.value if it can not 6137 // find a node corresponding to the value. 6138 bool CodeGenPrepare::placeDbgValues(Function &F) { 6139 bool MadeChange = false; 6140 for (BasicBlock &BB : F) { 6141 Instruction *PrevNonDbgInst = nullptr; 6142 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6143 Instruction *Insn = &*BI++; 6144 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6145 // Leave dbg.values that refer to an alloca alone. These 6146 // instrinsics describe the address of a variable (= the alloca) 6147 // being taken. They should not be moved next to the alloca 6148 // (and to the beginning of the scope), but rather stay close to 6149 // where said address is used. 6150 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6151 PrevNonDbgInst = Insn; 6152 continue; 6153 } 6154 6155 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 6156 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 6157 // If VI is a phi in a block with an EHPad terminator, we can't insert 6158 // after it. 6159 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 6160 continue; 6161 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 6162 DVI->removeFromParent(); 6163 if (isa<PHINode>(VI)) 6164 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 6165 else 6166 DVI->insertAfter(VI); 6167 MadeChange = true; 6168 ++NumDbgValueMoved; 6169 } 6170 } 6171 } 6172 return MadeChange; 6173 } 6174 6175 /// \brief Scale down both weights to fit into uint32_t. 6176 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 6177 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 6178 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 6179 NewTrue = NewTrue / Scale; 6180 NewFalse = NewFalse / Scale; 6181 } 6182 6183 /// \brief Some targets prefer to split a conditional branch like: 6184 /// \code 6185 /// %0 = icmp ne i32 %a, 0 6186 /// %1 = icmp ne i32 %b, 0 6187 /// %or.cond = or i1 %0, %1 6188 /// br i1 %or.cond, label %TrueBB, label %FalseBB 6189 /// \endcode 6190 /// into multiple branch instructions like: 6191 /// \code 6192 /// bb1: 6193 /// %0 = icmp ne i32 %a, 0 6194 /// br i1 %0, label %TrueBB, label %bb2 6195 /// bb2: 6196 /// %1 = icmp ne i32 %b, 0 6197 /// br i1 %1, label %TrueBB, label %FalseBB 6198 /// \endcode 6199 /// This usually allows instruction selection to do even further optimizations 6200 /// and combine the compare with the branch instruction. Currently this is 6201 /// applied for targets which have "cheap" jump instructions. 6202 /// 6203 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 6204 /// 6205 bool CodeGenPrepare::splitBranchCondition(Function &F) { 6206 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 6207 return false; 6208 6209 bool MadeChange = false; 6210 for (auto &BB : F) { 6211 // Does this BB end with the following? 6212 // %cond1 = icmp|fcmp|binary instruction ... 6213 // %cond2 = icmp|fcmp|binary instruction ... 6214 // %cond.or = or|and i1 %cond1, cond2 6215 // br i1 %cond.or label %dest1, label %dest2" 6216 BinaryOperator *LogicOp; 6217 BasicBlock *TBB, *FBB; 6218 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 6219 continue; 6220 6221 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 6222 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 6223 continue; 6224 6225 unsigned Opc; 6226 Value *Cond1, *Cond2; 6227 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 6228 m_OneUse(m_Value(Cond2))))) 6229 Opc = Instruction::And; 6230 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 6231 m_OneUse(m_Value(Cond2))))) 6232 Opc = Instruction::Or; 6233 else 6234 continue; 6235 6236 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 6237 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 6238 continue; 6239 6240 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 6241 6242 // Create a new BB. 6243 auto TmpBB = 6244 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 6245 BB.getParent(), BB.getNextNode()); 6246 6247 // Update original basic block by using the first condition directly by the 6248 // branch instruction and removing the no longer needed and/or instruction. 6249 Br1->setCondition(Cond1); 6250 LogicOp->eraseFromParent(); 6251 6252 // Depending on the conditon we have to either replace the true or the false 6253 // successor of the original branch instruction. 6254 if (Opc == Instruction::And) 6255 Br1->setSuccessor(0, TmpBB); 6256 else 6257 Br1->setSuccessor(1, TmpBB); 6258 6259 // Fill in the new basic block. 6260 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 6261 if (auto *I = dyn_cast<Instruction>(Cond2)) { 6262 I->removeFromParent(); 6263 I->insertBefore(Br2); 6264 } 6265 6266 // Update PHI nodes in both successors. The original BB needs to be 6267 // replaced in one succesor's PHI nodes, because the branch comes now from 6268 // the newly generated BB (NewBB). In the other successor we need to add one 6269 // incoming edge to the PHI nodes, because both branch instructions target 6270 // now the same successor. Depending on the original branch condition 6271 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 6272 // we perform the correct update for the PHI nodes. 6273 // This doesn't change the successor order of the just created branch 6274 // instruction (or any other instruction). 6275 if (Opc == Instruction::Or) 6276 std::swap(TBB, FBB); 6277 6278 // Replace the old BB with the new BB. 6279 for (auto &I : *TBB) { 6280 PHINode *PN = dyn_cast<PHINode>(&I); 6281 if (!PN) 6282 break; 6283 int i; 6284 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 6285 PN->setIncomingBlock(i, TmpBB); 6286 } 6287 6288 // Add another incoming edge form the new BB. 6289 for (auto &I : *FBB) { 6290 PHINode *PN = dyn_cast<PHINode>(&I); 6291 if (!PN) 6292 break; 6293 auto *Val = PN->getIncomingValueForBlock(&BB); 6294 PN->addIncoming(Val, TmpBB); 6295 } 6296 6297 // Update the branch weights (from SelectionDAGBuilder:: 6298 // FindMergedConditions). 6299 if (Opc == Instruction::Or) { 6300 // Codegen X | Y as: 6301 // BB1: 6302 // jmp_if_X TBB 6303 // jmp TmpBB 6304 // TmpBB: 6305 // jmp_if_Y TBB 6306 // jmp FBB 6307 // 6308 6309 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 6310 // The requirement is that 6311 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 6312 // = TrueProb for orignal BB. 6313 // Assuming the orignal weights are A and B, one choice is to set BB1's 6314 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 6315 // assumes that 6316 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 6317 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 6318 // TmpBB, but the math is more complicated. 6319 uint64_t TrueWeight, FalseWeight; 6320 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6321 uint64_t NewTrueWeight = TrueWeight; 6322 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 6323 scaleWeights(NewTrueWeight, NewFalseWeight); 6324 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6325 .createBranchWeights(TrueWeight, FalseWeight)); 6326 6327 NewTrueWeight = TrueWeight; 6328 NewFalseWeight = 2 * FalseWeight; 6329 scaleWeights(NewTrueWeight, NewFalseWeight); 6330 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6331 .createBranchWeights(TrueWeight, FalseWeight)); 6332 } 6333 } else { 6334 // Codegen X & Y as: 6335 // BB1: 6336 // jmp_if_X TmpBB 6337 // jmp FBB 6338 // TmpBB: 6339 // jmp_if_Y TBB 6340 // jmp FBB 6341 // 6342 // This requires creation of TmpBB after CurBB. 6343 6344 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 6345 // The requirement is that 6346 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 6347 // = FalseProb for orignal BB. 6348 // Assuming the orignal weights are A and B, one choice is to set BB1's 6349 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 6350 // assumes that 6351 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 6352 uint64_t TrueWeight, FalseWeight; 6353 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6354 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 6355 uint64_t NewFalseWeight = FalseWeight; 6356 scaleWeights(NewTrueWeight, NewFalseWeight); 6357 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6358 .createBranchWeights(TrueWeight, FalseWeight)); 6359 6360 NewTrueWeight = 2 * TrueWeight; 6361 NewFalseWeight = FalseWeight; 6362 scaleWeights(NewTrueWeight, NewFalseWeight); 6363 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6364 .createBranchWeights(TrueWeight, FalseWeight)); 6365 } 6366 } 6367 6368 // Note: No point in getting fancy here, since the DT info is never 6369 // available to CodeGenPrepare. 6370 ModifiedDT = true; 6371 6372 MadeChange = true; 6373 6374 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 6375 TmpBB->dump()); 6376 } 6377 return MadeChange; 6378 } 6379