1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/BlockFrequencyInfo.h" 22 #include "llvm/Analysis/BranchProbabilityInfo.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ProfileSummaryInfo.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/CodeGen/Analysis.h" 32 #include "llvm/IR/CallSite.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/InlineAsm.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/MDBuilder.h" 44 #include "llvm/IR/PatternMatch.h" 45 #include "llvm/IR/Statepoint.h" 46 #include "llvm/IR/ValueHandle.h" 47 #include "llvm/IR/ValueMap.h" 48 #include "llvm/Pass.h" 49 #include "llvm/Support/BranchProbability.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Target/TargetLowering.h" 54 #include "llvm/Target/TargetSubtargetInfo.h" 55 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 56 #include "llvm/Transforms/Utils/BuildLibCalls.h" 57 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 58 #include "llvm/Transforms/Utils/Cloning.h" 59 #include "llvm/Transforms/Utils/Local.h" 60 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 61 #include "llvm/Transforms/Utils/ValueMapper.h" 62 using namespace llvm; 63 using namespace llvm::PatternMatch; 64 65 #define DEBUG_TYPE "codegenprepare" 66 67 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 68 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 69 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 70 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 71 "sunken Cmps"); 72 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 73 "of sunken Casts"); 74 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 75 "computations were sunk"); 76 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 77 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 78 STATISTIC(NumAndsAdded, 79 "Number of and mask instructions added to form ext loads"); 80 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 81 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 82 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 83 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 84 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 85 86 static cl::opt<bool> DisableBranchOpts( 87 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 88 cl::desc("Disable branch optimizations in CodeGenPrepare")); 89 90 static cl::opt<bool> 91 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 92 cl::desc("Disable GC optimizations in CodeGenPrepare")); 93 94 static cl::opt<bool> DisableSelectToBranch( 95 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 96 cl::desc("Disable select to branch conversion.")); 97 98 static cl::opt<bool> AddrSinkUsingGEPs( 99 "addr-sink-using-gep", cl::Hidden, cl::init(true), 100 cl::desc("Address sinking in CGP using GEPs.")); 101 102 static cl::opt<bool> EnableAndCmpSinking( 103 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 104 cl::desc("Enable sinkinig and/cmp into branches.")); 105 106 static cl::opt<bool> DisableStoreExtract( 107 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 108 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 109 110 static cl::opt<bool> StressStoreExtract( 111 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 112 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 113 114 static cl::opt<bool> DisableExtLdPromotion( 115 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 116 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 117 "CodeGenPrepare")); 118 119 static cl::opt<bool> StressExtLdPromotion( 120 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 121 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 122 "optimization in CodeGenPrepare")); 123 124 static cl::opt<bool> DisablePreheaderProtect( 125 "disable-preheader-prot", cl::Hidden, cl::init(false), 126 cl::desc("Disable protection against removing loop preheaders")); 127 128 static cl::opt<bool> ProfileGuidedSectionPrefix( 129 "profile-guided-section-prefix", cl::Hidden, cl::init(true), 130 cl::desc("Use profile info to add section prefix for hot/cold functions")); 131 132 static cl::opt<unsigned> FreqRatioToSkipMerge( 133 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 134 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 135 "(frequency of destination block) is greater than this ratio")); 136 137 static cl::opt<bool> ForceSplitStore( 138 "force-split-store", cl::Hidden, cl::init(false), 139 cl::desc("Force store splitting no matter what the target query says.")); 140 141 static cl::opt<bool> 142 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 143 cl::desc("Enable merging of redundant sexts when one is dominating" 144 " the other."), cl::init(true)); 145 146 namespace { 147 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 148 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt; 149 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 150 typedef SmallVector<Instruction *, 16> SExts; 151 typedef DenseMap<Value *, SExts> ValueToSExts; 152 class TypePromotionTransaction; 153 154 class CodeGenPrepare : public FunctionPass { 155 const TargetMachine *TM; 156 const TargetSubtargetInfo *SubtargetInfo; 157 const TargetLowering *TLI; 158 const TargetRegisterInfo *TRI; 159 const TargetTransformInfo *TTI; 160 const TargetLibraryInfo *TLInfo; 161 const LoopInfo *LI; 162 std::unique_ptr<BlockFrequencyInfo> BFI; 163 std::unique_ptr<BranchProbabilityInfo> BPI; 164 165 /// As we scan instructions optimizing them, this is the next instruction 166 /// to optimize. Transforms that can invalidate this should update it. 167 BasicBlock::iterator CurInstIterator; 168 169 /// Keeps track of non-local addresses that have been sunk into a block. 170 /// This allows us to avoid inserting duplicate code for blocks with 171 /// multiple load/stores of the same address. 172 ValueMap<Value*, Value*> SunkAddrs; 173 174 /// Keeps track of all instructions inserted for the current function. 175 SetOfInstrs InsertedInsts; 176 /// Keeps track of the type of the related instruction before their 177 /// promotion for the current function. 178 InstrToOrigTy PromotedInsts; 179 180 /// Keep track of instructions removed during promotion. 181 SetOfInstrs RemovedInsts; 182 183 /// Keep track of sext chains based on their initial value. 184 DenseMap<Value *, Instruction *> SeenChainsForSExt; 185 186 /// Keep track of SExt promoted. 187 ValueToSExts ValToSExtendedUses; 188 189 /// True if CFG is modified in any way. 190 bool ModifiedDT; 191 192 /// True if optimizing for size. 193 bool OptSize; 194 195 /// DataLayout for the Function being processed. 196 const DataLayout *DL; 197 198 public: 199 static char ID; // Pass identification, replacement for typeid 200 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 201 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) { 202 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 203 } 204 bool runOnFunction(Function &F) override; 205 206 StringRef getPassName() const override { return "CodeGen Prepare"; } 207 208 void getAnalysisUsage(AnalysisUsage &AU) const override { 209 // FIXME: When we can selectively preserve passes, preserve the domtree. 210 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 211 AU.addRequired<TargetLibraryInfoWrapperPass>(); 212 AU.addRequired<TargetTransformInfoWrapperPass>(); 213 AU.addRequired<LoopInfoWrapperPass>(); 214 } 215 216 private: 217 bool eliminateFallThrough(Function &F); 218 bool eliminateMostlyEmptyBlocks(Function &F); 219 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 220 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 221 void eliminateMostlyEmptyBlock(BasicBlock *BB); 222 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 223 bool isPreheader); 224 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT); 225 bool optimizeInst(Instruction *I, bool& ModifiedDT); 226 bool optimizeMemoryInst(Instruction *I, Value *Addr, 227 Type *AccessTy, unsigned AS); 228 bool optimizeInlineAsmInst(CallInst *CS); 229 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT); 230 bool optimizeExt(Instruction *&I); 231 bool optimizeExtUses(Instruction *I); 232 bool optimizeLoadExt(LoadInst *I); 233 bool optimizeSelectInst(SelectInst *SI); 234 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 235 bool optimizeSwitchInst(SwitchInst *CI); 236 bool optimizeExtractElementInst(Instruction *Inst); 237 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 238 bool placeDbgValues(Function &F); 239 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 240 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 241 bool tryToPromoteExts(TypePromotionTransaction &TPT, 242 const SmallVectorImpl<Instruction *> &Exts, 243 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 244 unsigned CreatedInstsCost = 0); 245 bool mergeSExts(Function &F); 246 bool performAddressTypePromotion( 247 Instruction *&Inst, 248 bool AllowPromotionWithoutCommonHeader, 249 bool HasPromoted, TypePromotionTransaction &TPT, 250 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 251 bool splitBranchCondition(Function &F); 252 bool simplifyOffsetableRelocate(Instruction &I); 253 bool splitIndirectCriticalEdges(Function &F); 254 }; 255 } 256 257 char CodeGenPrepare::ID = 0; 258 INITIALIZE_TM_PASS_BEGIN(CodeGenPrepare, "codegenprepare", 259 "Optimize for code generation", false, false) 260 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 261 INITIALIZE_TM_PASS_END(CodeGenPrepare, "codegenprepare", 262 "Optimize for code generation", false, false) 263 264 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 265 return new CodeGenPrepare(TM); 266 } 267 268 bool CodeGenPrepare::runOnFunction(Function &F) { 269 if (skipFunction(F)) 270 return false; 271 272 DL = &F.getParent()->getDataLayout(); 273 274 bool EverMadeChange = false; 275 // Clear per function information. 276 InsertedInsts.clear(); 277 PromotedInsts.clear(); 278 BFI.reset(); 279 BPI.reset(); 280 281 ModifiedDT = false; 282 if (TM) { 283 SubtargetInfo = TM->getSubtargetImpl(F); 284 TLI = SubtargetInfo->getTargetLowering(); 285 TRI = SubtargetInfo->getRegisterInfo(); 286 } 287 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 288 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 289 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 290 OptSize = F.optForSize(); 291 292 if (ProfileGuidedSectionPrefix) { 293 ProfileSummaryInfo *PSI = 294 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 295 if (PSI->isFunctionHotInCallGraph(&F)) 296 F.setSectionPrefix(".hot"); 297 else if (PSI->isFunctionColdInCallGraph(&F)) 298 F.setSectionPrefix(".cold"); 299 } 300 301 /// This optimization identifies DIV instructions that can be 302 /// profitably bypassed and carried out with a shorter, faster divide. 303 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 304 const DenseMap<unsigned int, unsigned int> &BypassWidths = 305 TLI->getBypassSlowDivWidths(); 306 BasicBlock* BB = &*F.begin(); 307 while (BB != nullptr) { 308 // bypassSlowDivision may create new BBs, but we don't want to reapply the 309 // optimization to those blocks. 310 BasicBlock* Next = BB->getNextNode(); 311 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 312 BB = Next; 313 } 314 } 315 316 // Eliminate blocks that contain only PHI nodes and an 317 // unconditional branch. 318 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 319 320 // llvm.dbg.value is far away from the value then iSel may not be able 321 // handle it properly. iSel will drop llvm.dbg.value if it can not 322 // find a node corresponding to the value. 323 EverMadeChange |= placeDbgValues(F); 324 325 if (!DisableBranchOpts) 326 EverMadeChange |= splitBranchCondition(F); 327 328 // Split some critical edges where one of the sources is an indirect branch, 329 // to help generate sane code for PHIs involving such edges. 330 EverMadeChange |= splitIndirectCriticalEdges(F); 331 332 bool MadeChange = true; 333 while (MadeChange) { 334 MadeChange = false; 335 SeenChainsForSExt.clear(); 336 ValToSExtendedUses.clear(); 337 RemovedInsts.clear(); 338 for (Function::iterator I = F.begin(); I != F.end(); ) { 339 BasicBlock *BB = &*I++; 340 bool ModifiedDTOnIteration = false; 341 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 342 343 // Restart BB iteration if the dominator tree of the Function was changed 344 if (ModifiedDTOnIteration) 345 break; 346 } 347 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 348 MadeChange |= mergeSExts(F); 349 350 // Really free removed instructions during promotion. 351 for (Instruction *I : RemovedInsts) 352 delete I; 353 354 EverMadeChange |= MadeChange; 355 } 356 357 SunkAddrs.clear(); 358 359 if (!DisableBranchOpts) { 360 MadeChange = false; 361 SmallPtrSet<BasicBlock*, 8> WorkList; 362 for (BasicBlock &BB : F) { 363 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 364 MadeChange |= ConstantFoldTerminator(&BB, true); 365 if (!MadeChange) continue; 366 367 for (SmallVectorImpl<BasicBlock*>::iterator 368 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 369 if (pred_begin(*II) == pred_end(*II)) 370 WorkList.insert(*II); 371 } 372 373 // Delete the dead blocks and any of their dead successors. 374 MadeChange |= !WorkList.empty(); 375 while (!WorkList.empty()) { 376 BasicBlock *BB = *WorkList.begin(); 377 WorkList.erase(BB); 378 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 379 380 DeleteDeadBlock(BB); 381 382 for (SmallVectorImpl<BasicBlock*>::iterator 383 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 384 if (pred_begin(*II) == pred_end(*II)) 385 WorkList.insert(*II); 386 } 387 388 // Merge pairs of basic blocks with unconditional branches, connected by 389 // a single edge. 390 if (EverMadeChange || MadeChange) 391 MadeChange |= eliminateFallThrough(F); 392 393 EverMadeChange |= MadeChange; 394 } 395 396 if (!DisableGCOpts) { 397 SmallVector<Instruction *, 2> Statepoints; 398 for (BasicBlock &BB : F) 399 for (Instruction &I : BB) 400 if (isStatepoint(I)) 401 Statepoints.push_back(&I); 402 for (auto &I : Statepoints) 403 EverMadeChange |= simplifyOffsetableRelocate(*I); 404 } 405 406 return EverMadeChange; 407 } 408 409 /// Merge basic blocks which are connected by a single edge, where one of the 410 /// basic blocks has a single successor pointing to the other basic block, 411 /// which has a single predecessor. 412 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 413 bool Changed = false; 414 // Scan all of the blocks in the function, except for the entry block. 415 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 416 BasicBlock *BB = &*I++; 417 // If the destination block has a single pred, then this is a trivial 418 // edge, just collapse it. 419 BasicBlock *SinglePred = BB->getSinglePredecessor(); 420 421 // Don't merge if BB's address is taken. 422 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 423 424 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 425 if (Term && !Term->isConditional()) { 426 Changed = true; 427 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 428 // Remember if SinglePred was the entry block of the function. 429 // If so, we will need to move BB back to the entry position. 430 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 431 MergeBasicBlockIntoOnlyPred(BB, nullptr); 432 433 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 434 BB->moveBefore(&BB->getParent()->getEntryBlock()); 435 436 // We have erased a block. Update the iterator. 437 I = BB->getIterator(); 438 } 439 } 440 return Changed; 441 } 442 443 /// Find a destination block from BB if BB is mergeable empty block. 444 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 445 // If this block doesn't end with an uncond branch, ignore it. 446 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 447 if (!BI || !BI->isUnconditional()) 448 return nullptr; 449 450 // If the instruction before the branch (skipping debug info) isn't a phi 451 // node, then other stuff is happening here. 452 BasicBlock::iterator BBI = BI->getIterator(); 453 if (BBI != BB->begin()) { 454 --BBI; 455 while (isa<DbgInfoIntrinsic>(BBI)) { 456 if (BBI == BB->begin()) 457 break; 458 --BBI; 459 } 460 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 461 return nullptr; 462 } 463 464 // Do not break infinite loops. 465 BasicBlock *DestBB = BI->getSuccessor(0); 466 if (DestBB == BB) 467 return nullptr; 468 469 if (!canMergeBlocks(BB, DestBB)) 470 DestBB = nullptr; 471 472 return DestBB; 473 } 474 475 // Return the unique indirectbr predecessor of a block. This may return null 476 // even if such a predecessor exists, if it's not useful for splitting. 477 // If a predecessor is found, OtherPreds will contain all other (non-indirectbr) 478 // predecessors of BB. 479 static BasicBlock * 480 findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) { 481 // If the block doesn't have any PHIs, we don't care about it, since there's 482 // no point in splitting it. 483 PHINode *PN = dyn_cast<PHINode>(BB->begin()); 484 if (!PN) 485 return nullptr; 486 487 // Verify we have exactly one IBR predecessor. 488 // Conservatively bail out if one of the other predecessors is not a "regular" 489 // terminator (that is, not a switch or a br). 490 BasicBlock *IBB = nullptr; 491 for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) { 492 BasicBlock *PredBB = PN->getIncomingBlock(Pred); 493 TerminatorInst *PredTerm = PredBB->getTerminator(); 494 switch (PredTerm->getOpcode()) { 495 case Instruction::IndirectBr: 496 if (IBB) 497 return nullptr; 498 IBB = PredBB; 499 break; 500 case Instruction::Br: 501 case Instruction::Switch: 502 OtherPreds.push_back(PredBB); 503 continue; 504 default: 505 return nullptr; 506 } 507 } 508 509 return IBB; 510 } 511 512 // Split critical edges where the source of the edge is an indirectbr 513 // instruction. This isn't always possible, but we can handle some easy cases. 514 // This is useful because MI is unable to split such critical edges, 515 // which means it will not be able to sink instructions along those edges. 516 // This is especially painful for indirect branches with many successors, where 517 // we end up having to prepare all outgoing values in the origin block. 518 // 519 // Our normal algorithm for splitting critical edges requires us to update 520 // the outgoing edges of the edge origin block, but for an indirectbr this 521 // is hard, since it would require finding and updating the block addresses 522 // the indirect branch uses. But if a block only has a single indirectbr 523 // predecessor, with the others being regular branches, we can do it in a 524 // different way. 525 // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr. 526 // We can split D into D0 and D1, where D0 contains only the PHIs from D, 527 // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and 528 // create the following structure: 529 // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1 530 bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { 531 // Check whether the function has any indirectbrs, and collect which blocks 532 // they may jump to. Since most functions don't have indirect branches, 533 // this lowers the common case's overhead to O(Blocks) instead of O(Edges). 534 SmallSetVector<BasicBlock *, 16> Targets; 535 for (auto &BB : F) { 536 auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator()); 537 if (!IBI) 538 continue; 539 540 for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ) 541 Targets.insert(IBI->getSuccessor(Succ)); 542 } 543 544 if (Targets.empty()) 545 return false; 546 547 bool Changed = false; 548 for (BasicBlock *Target : Targets) { 549 SmallVector<BasicBlock *, 16> OtherPreds; 550 BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds); 551 // If we did not found an indirectbr, or the indirectbr is the only 552 // incoming edge, this isn't the kind of edge we're looking for. 553 if (!IBRPred || OtherPreds.empty()) 554 continue; 555 556 // Don't even think about ehpads/landingpads. 557 Instruction *FirstNonPHI = Target->getFirstNonPHI(); 558 if (FirstNonPHI->isEHPad() || Target->isLandingPad()) 559 continue; 560 561 BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split"); 562 // It's possible Target was its own successor through an indirectbr. 563 // In this case, the indirectbr now comes from BodyBlock. 564 if (IBRPred == Target) 565 IBRPred = BodyBlock; 566 567 // At this point Target only has PHIs, and BodyBlock has the rest of the 568 // block's body. Create a copy of Target that will be used by the "direct" 569 // preds. 570 ValueToValueMapTy VMap; 571 BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); 572 573 for (BasicBlock *Pred : OtherPreds) { 574 // If the target is a loop to itself, then the terminator of the split 575 // block needs to be updated. 576 if (Pred == Target) 577 BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 578 else 579 Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 580 } 581 582 // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that 583 // they are clones, so the number of PHIs are the same. 584 // (a) Remove the edge coming from IBRPred from the "Direct" PHI 585 // (b) Leave that as the only edge in the "Indirect" PHI. 586 // (c) Merge the two in the body block. 587 BasicBlock::iterator Indirect = Target->begin(), 588 End = Target->getFirstNonPHI()->getIterator(); 589 BasicBlock::iterator Direct = DirectSucc->begin(); 590 BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt(); 591 592 assert(&*End == Target->getTerminator() && 593 "Block was expected to only contain PHIs"); 594 595 while (Indirect != End) { 596 PHINode *DirPHI = cast<PHINode>(Direct); 597 PHINode *IndPHI = cast<PHINode>(Indirect); 598 599 // Now, clean up - the direct block shouldn't get the indirect value, 600 // and vice versa. 601 DirPHI->removeIncomingValue(IBRPred); 602 Direct++; 603 604 // Advance the pointer here, to avoid invalidation issues when the old 605 // PHI is erased. 606 Indirect++; 607 608 PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI); 609 NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred), 610 IBRPred); 611 612 // Create a PHI in the body block, to merge the direct and indirect 613 // predecessors. 614 PHINode *MergePHI = 615 PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert); 616 MergePHI->addIncoming(NewIndPHI, Target); 617 MergePHI->addIncoming(DirPHI, DirectSucc); 618 619 IndPHI->replaceAllUsesWith(MergePHI); 620 IndPHI->eraseFromParent(); 621 } 622 623 Changed = true; 624 } 625 626 return Changed; 627 } 628 629 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 630 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 631 /// edges in ways that are non-optimal for isel. Start by eliminating these 632 /// blocks so we can split them the way we want them. 633 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 634 SmallPtrSet<BasicBlock *, 16> Preheaders; 635 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 636 while (!LoopList.empty()) { 637 Loop *L = LoopList.pop_back_val(); 638 LoopList.insert(LoopList.end(), L->begin(), L->end()); 639 if (BasicBlock *Preheader = L->getLoopPreheader()) 640 Preheaders.insert(Preheader); 641 } 642 643 bool MadeChange = false; 644 // Note that this intentionally skips the entry block. 645 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 646 BasicBlock *BB = &*I++; 647 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 648 if (!DestBB || 649 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 650 continue; 651 652 eliminateMostlyEmptyBlock(BB); 653 MadeChange = true; 654 } 655 return MadeChange; 656 } 657 658 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 659 BasicBlock *DestBB, 660 bool isPreheader) { 661 // Do not delete loop preheaders if doing so would create a critical edge. 662 // Loop preheaders can be good locations to spill registers. If the 663 // preheader is deleted and we create a critical edge, registers may be 664 // spilled in the loop body instead. 665 if (!DisablePreheaderProtect && isPreheader && 666 !(BB->getSinglePredecessor() && 667 BB->getSinglePredecessor()->getSingleSuccessor())) 668 return false; 669 670 // Try to skip merging if the unique predecessor of BB is terminated by a 671 // switch or indirect branch instruction, and BB is used as an incoming block 672 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 673 // add COPY instructions in the predecessor of BB instead of BB (if it is not 674 // merged). Note that the critical edge created by merging such blocks wont be 675 // split in MachineSink because the jump table is not analyzable. By keeping 676 // such empty block (BB), ISel will place COPY instructions in BB, not in the 677 // predecessor of BB. 678 BasicBlock *Pred = BB->getUniquePredecessor(); 679 if (!Pred || 680 !(isa<SwitchInst>(Pred->getTerminator()) || 681 isa<IndirectBrInst>(Pred->getTerminator()))) 682 return true; 683 684 if (BB->getTerminator() != BB->getFirstNonPHI()) 685 return true; 686 687 // We use a simple cost heuristic which determine skipping merging is 688 // profitable if the cost of skipping merging is less than the cost of 689 // merging : Cost(skipping merging) < Cost(merging BB), where the 690 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 691 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 692 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 693 // Freq(Pred) / Freq(BB) > 2. 694 // Note that if there are multiple empty blocks sharing the same incoming 695 // value for the PHIs in the DestBB, we consider them together. In such 696 // case, Cost(merging BB) will be the sum of their frequencies. 697 698 if (!isa<PHINode>(DestBB->begin())) 699 return true; 700 701 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 702 703 // Find all other incoming blocks from which incoming values of all PHIs in 704 // DestBB are the same as the ones from BB. 705 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 706 ++PI) { 707 BasicBlock *DestBBPred = *PI; 708 if (DestBBPred == BB) 709 continue; 710 711 bool HasAllSameValue = true; 712 BasicBlock::const_iterator DestBBI = DestBB->begin(); 713 while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) { 714 if (DestPN->getIncomingValueForBlock(BB) != 715 DestPN->getIncomingValueForBlock(DestBBPred)) { 716 HasAllSameValue = false; 717 break; 718 } 719 } 720 if (HasAllSameValue) 721 SameIncomingValueBBs.insert(DestBBPred); 722 } 723 724 // See if all BB's incoming values are same as the value from Pred. In this 725 // case, no reason to skip merging because COPYs are expected to be place in 726 // Pred already. 727 if (SameIncomingValueBBs.count(Pred)) 728 return true; 729 730 if (!BFI) { 731 Function &F = *BB->getParent(); 732 LoopInfo LI{DominatorTree(F)}; 733 BPI.reset(new BranchProbabilityInfo(F, LI)); 734 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 735 } 736 737 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 738 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 739 740 for (auto SameValueBB : SameIncomingValueBBs) 741 if (SameValueBB->getUniquePredecessor() == Pred && 742 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 743 BBFreq += BFI->getBlockFreq(SameValueBB); 744 745 return PredFreq.getFrequency() <= 746 BBFreq.getFrequency() * FreqRatioToSkipMerge; 747 } 748 749 /// Return true if we can merge BB into DestBB if there is a single 750 /// unconditional branch between them, and BB contains no other non-phi 751 /// instructions. 752 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 753 const BasicBlock *DestBB) const { 754 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 755 // the successor. If there are more complex condition (e.g. preheaders), 756 // don't mess around with them. 757 BasicBlock::const_iterator BBI = BB->begin(); 758 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 759 for (const User *U : PN->users()) { 760 const Instruction *UI = cast<Instruction>(U); 761 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 762 return false; 763 // If User is inside DestBB block and it is a PHINode then check 764 // incoming value. If incoming value is not from BB then this is 765 // a complex condition (e.g. preheaders) we want to avoid here. 766 if (UI->getParent() == DestBB) { 767 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 768 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 769 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 770 if (Insn && Insn->getParent() == BB && 771 Insn->getParent() != UPN->getIncomingBlock(I)) 772 return false; 773 } 774 } 775 } 776 } 777 778 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 779 // and DestBB may have conflicting incoming values for the block. If so, we 780 // can't merge the block. 781 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 782 if (!DestBBPN) return true; // no conflict. 783 784 // Collect the preds of BB. 785 SmallPtrSet<const BasicBlock*, 16> BBPreds; 786 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 787 // It is faster to get preds from a PHI than with pred_iterator. 788 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 789 BBPreds.insert(BBPN->getIncomingBlock(i)); 790 } else { 791 BBPreds.insert(pred_begin(BB), pred_end(BB)); 792 } 793 794 // Walk the preds of DestBB. 795 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 796 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 797 if (BBPreds.count(Pred)) { // Common predecessor? 798 BBI = DestBB->begin(); 799 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 800 const Value *V1 = PN->getIncomingValueForBlock(Pred); 801 const Value *V2 = PN->getIncomingValueForBlock(BB); 802 803 // If V2 is a phi node in BB, look up what the mapped value will be. 804 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 805 if (V2PN->getParent() == BB) 806 V2 = V2PN->getIncomingValueForBlock(Pred); 807 808 // If there is a conflict, bail out. 809 if (V1 != V2) return false; 810 } 811 } 812 } 813 814 return true; 815 } 816 817 818 /// Eliminate a basic block that has only phi's and an unconditional branch in 819 /// it. 820 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 821 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 822 BasicBlock *DestBB = BI->getSuccessor(0); 823 824 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 825 826 // If the destination block has a single pred, then this is a trivial edge, 827 // just collapse it. 828 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 829 if (SinglePred != DestBB) { 830 // Remember if SinglePred was the entry block of the function. If so, we 831 // will need to move BB back to the entry position. 832 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 833 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 834 835 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 836 BB->moveBefore(&BB->getParent()->getEntryBlock()); 837 838 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 839 return; 840 } 841 } 842 843 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 844 // to handle the new incoming edges it is about to have. 845 PHINode *PN; 846 for (BasicBlock::iterator BBI = DestBB->begin(); 847 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 848 // Remove the incoming value for BB, and remember it. 849 Value *InVal = PN->removeIncomingValue(BB, false); 850 851 // Two options: either the InVal is a phi node defined in BB or it is some 852 // value that dominates BB. 853 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 854 if (InValPhi && InValPhi->getParent() == BB) { 855 // Add all of the input values of the input PHI as inputs of this phi. 856 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 857 PN->addIncoming(InValPhi->getIncomingValue(i), 858 InValPhi->getIncomingBlock(i)); 859 } else { 860 // Otherwise, add one instance of the dominating value for each edge that 861 // we will be adding. 862 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 863 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 864 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 865 } else { 866 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 867 PN->addIncoming(InVal, *PI); 868 } 869 } 870 } 871 872 // The PHIs are now updated, change everything that refers to BB to use 873 // DestBB and remove BB. 874 BB->replaceAllUsesWith(DestBB); 875 BB->eraseFromParent(); 876 ++NumBlocksElim; 877 878 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 879 } 880 881 // Computes a map of base pointer relocation instructions to corresponding 882 // derived pointer relocation instructions given a vector of all relocate calls 883 static void computeBaseDerivedRelocateMap( 884 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 885 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 886 &RelocateInstMap) { 887 // Collect information in two maps: one primarily for locating the base object 888 // while filling the second map; the second map is the final structure holding 889 // a mapping between Base and corresponding Derived relocate calls 890 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 891 for (auto *ThisRelocate : AllRelocateCalls) { 892 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 893 ThisRelocate->getDerivedPtrIndex()); 894 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 895 } 896 for (auto &Item : RelocateIdxMap) { 897 std::pair<unsigned, unsigned> Key = Item.first; 898 if (Key.first == Key.second) 899 // Base relocation: nothing to insert 900 continue; 901 902 GCRelocateInst *I = Item.second; 903 auto BaseKey = std::make_pair(Key.first, Key.first); 904 905 // We're iterating over RelocateIdxMap so we cannot modify it. 906 auto MaybeBase = RelocateIdxMap.find(BaseKey); 907 if (MaybeBase == RelocateIdxMap.end()) 908 // TODO: We might want to insert a new base object relocate and gep off 909 // that, if there are enough derived object relocates. 910 continue; 911 912 RelocateInstMap[MaybeBase->second].push_back(I); 913 } 914 } 915 916 // Accepts a GEP and extracts the operands into a vector provided they're all 917 // small integer constants 918 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 919 SmallVectorImpl<Value *> &OffsetV) { 920 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 921 // Only accept small constant integer operands 922 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 923 if (!Op || Op->getZExtValue() > 20) 924 return false; 925 } 926 927 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 928 OffsetV.push_back(GEP->getOperand(i)); 929 return true; 930 } 931 932 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 933 // replace, computes a replacement, and affects it. 934 static bool 935 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 936 const SmallVectorImpl<GCRelocateInst *> &Targets) { 937 bool MadeChange = false; 938 for (GCRelocateInst *ToReplace : Targets) { 939 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 940 "Not relocating a derived object of the original base object"); 941 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 942 // A duplicate relocate call. TODO: coalesce duplicates. 943 continue; 944 } 945 946 if (RelocatedBase->getParent() != ToReplace->getParent()) { 947 // Base and derived relocates are in different basic blocks. 948 // In this case transform is only valid when base dominates derived 949 // relocate. However it would be too expensive to check dominance 950 // for each such relocate, so we skip the whole transformation. 951 continue; 952 } 953 954 Value *Base = ToReplace->getBasePtr(); 955 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 956 if (!Derived || Derived->getPointerOperand() != Base) 957 continue; 958 959 SmallVector<Value *, 2> OffsetV; 960 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 961 continue; 962 963 // Create a Builder and replace the target callsite with a gep 964 assert(RelocatedBase->getNextNode() && 965 "Should always have one since it's not a terminator"); 966 967 // Insert after RelocatedBase 968 IRBuilder<> Builder(RelocatedBase->getNextNode()); 969 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 970 971 // If gc_relocate does not match the actual type, cast it to the right type. 972 // In theory, there must be a bitcast after gc_relocate if the type does not 973 // match, and we should reuse it to get the derived pointer. But it could be 974 // cases like this: 975 // bb1: 976 // ... 977 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 978 // br label %merge 979 // 980 // bb2: 981 // ... 982 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 983 // br label %merge 984 // 985 // merge: 986 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 987 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 988 // 989 // In this case, we can not find the bitcast any more. So we insert a new bitcast 990 // no matter there is already one or not. In this way, we can handle all cases, and 991 // the extra bitcast should be optimized away in later passes. 992 Value *ActualRelocatedBase = RelocatedBase; 993 if (RelocatedBase->getType() != Base->getType()) { 994 ActualRelocatedBase = 995 Builder.CreateBitCast(RelocatedBase, Base->getType()); 996 } 997 Value *Replacement = Builder.CreateGEP( 998 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 999 Replacement->takeName(ToReplace); 1000 // If the newly generated derived pointer's type does not match the original derived 1001 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1002 Value *ActualReplacement = Replacement; 1003 if (Replacement->getType() != ToReplace->getType()) { 1004 ActualReplacement = 1005 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1006 } 1007 ToReplace->replaceAllUsesWith(ActualReplacement); 1008 ToReplace->eraseFromParent(); 1009 1010 MadeChange = true; 1011 } 1012 return MadeChange; 1013 } 1014 1015 // Turns this: 1016 // 1017 // %base = ... 1018 // %ptr = gep %base + 15 1019 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1020 // %base' = relocate(%tok, i32 4, i32 4) 1021 // %ptr' = relocate(%tok, i32 4, i32 5) 1022 // %val = load %ptr' 1023 // 1024 // into this: 1025 // 1026 // %base = ... 1027 // %ptr = gep %base + 15 1028 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1029 // %base' = gc.relocate(%tok, i32 4, i32 4) 1030 // %ptr' = gep %base' + 15 1031 // %val = load %ptr' 1032 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1033 bool MadeChange = false; 1034 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1035 1036 for (auto *U : I.users()) 1037 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1038 // Collect all the relocate calls associated with a statepoint 1039 AllRelocateCalls.push_back(Relocate); 1040 1041 // We need atleast one base pointer relocation + one derived pointer 1042 // relocation to mangle 1043 if (AllRelocateCalls.size() < 2) 1044 return false; 1045 1046 // RelocateInstMap is a mapping from the base relocate instruction to the 1047 // corresponding derived relocate instructions 1048 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1049 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1050 if (RelocateInstMap.empty()) 1051 return false; 1052 1053 for (auto &Item : RelocateInstMap) 1054 // Item.first is the RelocatedBase to offset against 1055 // Item.second is the vector of Targets to replace 1056 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1057 return MadeChange; 1058 } 1059 1060 /// SinkCast - Sink the specified cast instruction into its user blocks 1061 static bool SinkCast(CastInst *CI) { 1062 BasicBlock *DefBB = CI->getParent(); 1063 1064 /// InsertedCasts - Only insert a cast in each block once. 1065 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1066 1067 bool MadeChange = false; 1068 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1069 UI != E; ) { 1070 Use &TheUse = UI.getUse(); 1071 Instruction *User = cast<Instruction>(*UI); 1072 1073 // Figure out which BB this cast is used in. For PHI's this is the 1074 // appropriate predecessor block. 1075 BasicBlock *UserBB = User->getParent(); 1076 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1077 UserBB = PN->getIncomingBlock(TheUse); 1078 } 1079 1080 // Preincrement use iterator so we don't invalidate it. 1081 ++UI; 1082 1083 // The first insertion point of a block containing an EH pad is after the 1084 // pad. If the pad is the user, we cannot sink the cast past the pad. 1085 if (User->isEHPad()) 1086 continue; 1087 1088 // If the block selected to receive the cast is an EH pad that does not 1089 // allow non-PHI instructions before the terminator, we can't sink the 1090 // cast. 1091 if (UserBB->getTerminator()->isEHPad()) 1092 continue; 1093 1094 // If this user is in the same block as the cast, don't change the cast. 1095 if (UserBB == DefBB) continue; 1096 1097 // If we have already inserted a cast into this block, use it. 1098 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1099 1100 if (!InsertedCast) { 1101 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1102 assert(InsertPt != UserBB->end()); 1103 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1104 CI->getType(), "", &*InsertPt); 1105 } 1106 1107 // Replace a use of the cast with a use of the new cast. 1108 TheUse = InsertedCast; 1109 MadeChange = true; 1110 ++NumCastUses; 1111 } 1112 1113 // If we removed all uses, nuke the cast. 1114 if (CI->use_empty()) { 1115 CI->eraseFromParent(); 1116 MadeChange = true; 1117 } 1118 1119 return MadeChange; 1120 } 1121 1122 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1123 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1124 /// reduce the number of virtual registers that must be created and coalesced. 1125 /// 1126 /// Return true if any changes are made. 1127 /// 1128 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1129 const DataLayout &DL) { 1130 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1131 // than sinking only nop casts, but is helpful on some platforms. 1132 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1133 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1134 ASC->getDestAddressSpace())) 1135 return false; 1136 } 1137 1138 // If this is a noop copy, 1139 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1140 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1141 1142 // This is an fp<->int conversion? 1143 if (SrcVT.isInteger() != DstVT.isInteger()) 1144 return false; 1145 1146 // If this is an extension, it will be a zero or sign extension, which 1147 // isn't a noop. 1148 if (SrcVT.bitsLT(DstVT)) return false; 1149 1150 // If these values will be promoted, find out what they will be promoted 1151 // to. This helps us consider truncates on PPC as noop copies when they 1152 // are. 1153 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1154 TargetLowering::TypePromoteInteger) 1155 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1156 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1157 TargetLowering::TypePromoteInteger) 1158 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1159 1160 // If, after promotion, these are the same types, this is a noop copy. 1161 if (SrcVT != DstVT) 1162 return false; 1163 1164 return SinkCast(CI); 1165 } 1166 1167 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 1168 /// possible. 1169 /// 1170 /// Return true if any changes were made. 1171 static bool CombineUAddWithOverflow(CmpInst *CI) { 1172 Value *A, *B; 1173 Instruction *AddI; 1174 if (!match(CI, 1175 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 1176 return false; 1177 1178 Type *Ty = AddI->getType(); 1179 if (!isa<IntegerType>(Ty)) 1180 return false; 1181 1182 // We don't want to move around uses of condition values this late, so we we 1183 // check if it is legal to create the call to the intrinsic in the basic 1184 // block containing the icmp: 1185 1186 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 1187 return false; 1188 1189 #ifndef NDEBUG 1190 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1191 // for now: 1192 if (AddI->hasOneUse()) 1193 assert(*AddI->user_begin() == CI && "expected!"); 1194 #endif 1195 1196 Module *M = CI->getModule(); 1197 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1198 1199 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 1200 1201 auto *UAddWithOverflow = 1202 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 1203 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 1204 auto *Overflow = 1205 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 1206 1207 CI->replaceAllUsesWith(Overflow); 1208 AddI->replaceAllUsesWith(UAdd); 1209 CI->eraseFromParent(); 1210 AddI->eraseFromParent(); 1211 return true; 1212 } 1213 1214 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1215 /// registers that must be created and coalesced. This is a clear win except on 1216 /// targets with multiple condition code registers (PowerPC), where it might 1217 /// lose; some adjustment may be wanted there. 1218 /// 1219 /// Return true if any changes are made. 1220 static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1221 BasicBlock *DefBB = CI->getParent(); 1222 1223 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1224 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) 1225 return false; 1226 1227 // Only insert a cmp in each block once. 1228 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1229 1230 bool MadeChange = false; 1231 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1232 UI != E; ) { 1233 Use &TheUse = UI.getUse(); 1234 Instruction *User = cast<Instruction>(*UI); 1235 1236 // Preincrement use iterator so we don't invalidate it. 1237 ++UI; 1238 1239 // Don't bother for PHI nodes. 1240 if (isa<PHINode>(User)) 1241 continue; 1242 1243 // Figure out which BB this cmp is used in. 1244 BasicBlock *UserBB = User->getParent(); 1245 1246 // If this user is in the same block as the cmp, don't change the cmp. 1247 if (UserBB == DefBB) continue; 1248 1249 // If we have already inserted a cmp into this block, use it. 1250 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1251 1252 if (!InsertedCmp) { 1253 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1254 assert(InsertPt != UserBB->end()); 1255 InsertedCmp = 1256 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 1257 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 1258 // Propagate the debug info. 1259 InsertedCmp->setDebugLoc(CI->getDebugLoc()); 1260 } 1261 1262 // Replace a use of the cmp with a use of the new cmp. 1263 TheUse = InsertedCmp; 1264 MadeChange = true; 1265 ++NumCmpUses; 1266 } 1267 1268 // If we removed all uses, nuke the cmp. 1269 if (CI->use_empty()) { 1270 CI->eraseFromParent(); 1271 MadeChange = true; 1272 } 1273 1274 return MadeChange; 1275 } 1276 1277 static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1278 if (SinkCmpExpression(CI, TLI)) 1279 return true; 1280 1281 if (CombineUAddWithOverflow(CI)) 1282 return true; 1283 1284 return false; 1285 } 1286 1287 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1288 /// used in a compare to allow isel to generate better code for targets where 1289 /// this operation can be combined. 1290 /// 1291 /// Return true if any changes are made. 1292 static bool sinkAndCmp0Expression(Instruction *AndI, 1293 const TargetLowering &TLI, 1294 SetOfInstrs &InsertedInsts) { 1295 // Double-check that we're not trying to optimize an instruction that was 1296 // already optimized by some other part of this pass. 1297 assert(!InsertedInsts.count(AndI) && 1298 "Attempting to optimize already optimized and instruction"); 1299 (void) InsertedInsts; 1300 1301 // Nothing to do for single use in same basic block. 1302 if (AndI->hasOneUse() && 1303 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1304 return false; 1305 1306 // Try to avoid cases where sinking/duplicating is likely to increase register 1307 // pressure. 1308 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1309 !isa<ConstantInt>(AndI->getOperand(1)) && 1310 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1311 return false; 1312 1313 for (auto *U : AndI->users()) { 1314 Instruction *User = cast<Instruction>(U); 1315 1316 // Only sink for and mask feeding icmp with 0. 1317 if (!isa<ICmpInst>(User)) 1318 return false; 1319 1320 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1321 if (!CmpC || !CmpC->isZero()) 1322 return false; 1323 } 1324 1325 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1326 return false; 1327 1328 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1329 DEBUG(AndI->getParent()->dump()); 1330 1331 // Push the 'and' into the same block as the icmp 0. There should only be 1332 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1333 // others, so we don't need to keep track of which BBs we insert into. 1334 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1335 UI != E; ) { 1336 Use &TheUse = UI.getUse(); 1337 Instruction *User = cast<Instruction>(*UI); 1338 1339 // Preincrement use iterator so we don't invalidate it. 1340 ++UI; 1341 1342 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1343 1344 // Keep the 'and' in the same place if the use is already in the same block. 1345 Instruction *InsertPt = 1346 User->getParent() == AndI->getParent() ? AndI : User; 1347 Instruction *InsertedAnd = 1348 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1349 AndI->getOperand(1), "", InsertPt); 1350 // Propagate the debug info. 1351 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1352 1353 // Replace a use of the 'and' with a use of the new 'and'. 1354 TheUse = InsertedAnd; 1355 ++NumAndUses; 1356 DEBUG(User->getParent()->dump()); 1357 } 1358 1359 // We removed all uses, nuke the and. 1360 AndI->eraseFromParent(); 1361 return true; 1362 } 1363 1364 /// Check if the candidates could be combined with a shift instruction, which 1365 /// includes: 1366 /// 1. Truncate instruction 1367 /// 2. And instruction and the imm is a mask of the low bits: 1368 /// imm & (imm+1) == 0 1369 static bool isExtractBitsCandidateUse(Instruction *User) { 1370 if (!isa<TruncInst>(User)) { 1371 if (User->getOpcode() != Instruction::And || 1372 !isa<ConstantInt>(User->getOperand(1))) 1373 return false; 1374 1375 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1376 1377 if ((Cimm & (Cimm + 1)).getBoolValue()) 1378 return false; 1379 } 1380 return true; 1381 } 1382 1383 /// Sink both shift and truncate instruction to the use of truncate's BB. 1384 static bool 1385 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1386 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1387 const TargetLowering &TLI, const DataLayout &DL) { 1388 BasicBlock *UserBB = User->getParent(); 1389 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1390 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1391 bool MadeChange = false; 1392 1393 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1394 TruncE = TruncI->user_end(); 1395 TruncUI != TruncE;) { 1396 1397 Use &TruncTheUse = TruncUI.getUse(); 1398 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1399 // Preincrement use iterator so we don't invalidate it. 1400 1401 ++TruncUI; 1402 1403 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1404 if (!ISDOpcode) 1405 continue; 1406 1407 // If the use is actually a legal node, there will not be an 1408 // implicit truncate. 1409 // FIXME: always querying the result type is just an 1410 // approximation; some nodes' legality is determined by the 1411 // operand or other means. There's no good way to find out though. 1412 if (TLI.isOperationLegalOrCustom( 1413 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1414 continue; 1415 1416 // Don't bother for PHI nodes. 1417 if (isa<PHINode>(TruncUser)) 1418 continue; 1419 1420 BasicBlock *TruncUserBB = TruncUser->getParent(); 1421 1422 if (UserBB == TruncUserBB) 1423 continue; 1424 1425 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1426 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1427 1428 if (!InsertedShift && !InsertedTrunc) { 1429 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1430 assert(InsertPt != TruncUserBB->end()); 1431 // Sink the shift 1432 if (ShiftI->getOpcode() == Instruction::AShr) 1433 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1434 "", &*InsertPt); 1435 else 1436 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1437 "", &*InsertPt); 1438 1439 // Sink the trunc 1440 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1441 TruncInsertPt++; 1442 assert(TruncInsertPt != TruncUserBB->end()); 1443 1444 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1445 TruncI->getType(), "", &*TruncInsertPt); 1446 1447 MadeChange = true; 1448 1449 TruncTheUse = InsertedTrunc; 1450 } 1451 } 1452 return MadeChange; 1453 } 1454 1455 /// Sink the shift *right* instruction into user blocks if the uses could 1456 /// potentially be combined with this shift instruction and generate BitExtract 1457 /// instruction. It will only be applied if the architecture supports BitExtract 1458 /// instruction. Here is an example: 1459 /// BB1: 1460 /// %x.extract.shift = lshr i64 %arg1, 32 1461 /// BB2: 1462 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1463 /// ==> 1464 /// 1465 /// BB2: 1466 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1467 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1468 /// 1469 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1470 /// instruction. 1471 /// Return true if any changes are made. 1472 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1473 const TargetLowering &TLI, 1474 const DataLayout &DL) { 1475 BasicBlock *DefBB = ShiftI->getParent(); 1476 1477 /// Only insert instructions in each block once. 1478 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1479 1480 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1481 1482 bool MadeChange = false; 1483 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1484 UI != E;) { 1485 Use &TheUse = UI.getUse(); 1486 Instruction *User = cast<Instruction>(*UI); 1487 // Preincrement use iterator so we don't invalidate it. 1488 ++UI; 1489 1490 // Don't bother for PHI nodes. 1491 if (isa<PHINode>(User)) 1492 continue; 1493 1494 if (!isExtractBitsCandidateUse(User)) 1495 continue; 1496 1497 BasicBlock *UserBB = User->getParent(); 1498 1499 if (UserBB == DefBB) { 1500 // If the shift and truncate instruction are in the same BB. The use of 1501 // the truncate(TruncUse) may still introduce another truncate if not 1502 // legal. In this case, we would like to sink both shift and truncate 1503 // instruction to the BB of TruncUse. 1504 // for example: 1505 // BB1: 1506 // i64 shift.result = lshr i64 opnd, imm 1507 // trunc.result = trunc shift.result to i16 1508 // 1509 // BB2: 1510 // ----> We will have an implicit truncate here if the architecture does 1511 // not have i16 compare. 1512 // cmp i16 trunc.result, opnd2 1513 // 1514 if (isa<TruncInst>(User) && shiftIsLegal 1515 // If the type of the truncate is legal, no trucate will be 1516 // introduced in other basic blocks. 1517 && 1518 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1519 MadeChange = 1520 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1521 1522 continue; 1523 } 1524 // If we have already inserted a shift into this block, use it. 1525 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1526 1527 if (!InsertedShift) { 1528 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1529 assert(InsertPt != UserBB->end()); 1530 1531 if (ShiftI->getOpcode() == Instruction::AShr) 1532 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1533 "", &*InsertPt); 1534 else 1535 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1536 "", &*InsertPt); 1537 1538 MadeChange = true; 1539 } 1540 1541 // Replace a use of the shift with a use of the new shift. 1542 TheUse = InsertedShift; 1543 } 1544 1545 // If we removed all uses, nuke the shift. 1546 if (ShiftI->use_empty()) 1547 ShiftI->eraseFromParent(); 1548 1549 return MadeChange; 1550 } 1551 1552 // Translate a masked load intrinsic like 1553 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1554 // <16 x i1> %mask, <16 x i32> %passthru) 1555 // to a chain of basic blocks, with loading element one-by-one if 1556 // the appropriate mask bit is set 1557 // 1558 // %1 = bitcast i8* %addr to i32* 1559 // %2 = extractelement <16 x i1> %mask, i32 0 1560 // %3 = icmp eq i1 %2, true 1561 // br i1 %3, label %cond.load, label %else 1562 // 1563 //cond.load: ; preds = %0 1564 // %4 = getelementptr i32* %1, i32 0 1565 // %5 = load i32* %4 1566 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1567 // br label %else 1568 // 1569 //else: ; preds = %0, %cond.load 1570 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1571 // %7 = extractelement <16 x i1> %mask, i32 1 1572 // %8 = icmp eq i1 %7, true 1573 // br i1 %8, label %cond.load1, label %else2 1574 // 1575 //cond.load1: ; preds = %else 1576 // %9 = getelementptr i32* %1, i32 1 1577 // %10 = load i32* %9 1578 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1579 // br label %else2 1580 // 1581 //else2: ; preds = %else, %cond.load1 1582 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1583 // %12 = extractelement <16 x i1> %mask, i32 2 1584 // %13 = icmp eq i1 %12, true 1585 // br i1 %13, label %cond.load4, label %else5 1586 // 1587 static void scalarizeMaskedLoad(CallInst *CI) { 1588 Value *Ptr = CI->getArgOperand(0); 1589 Value *Alignment = CI->getArgOperand(1); 1590 Value *Mask = CI->getArgOperand(2); 1591 Value *Src0 = CI->getArgOperand(3); 1592 1593 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1594 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1595 assert(VecType && "Unexpected return type of masked load intrinsic"); 1596 1597 Type *EltTy = CI->getType()->getVectorElementType(); 1598 1599 IRBuilder<> Builder(CI->getContext()); 1600 Instruction *InsertPt = CI; 1601 BasicBlock *IfBlock = CI->getParent(); 1602 BasicBlock *CondBlock = nullptr; 1603 BasicBlock *PrevIfBlock = CI->getParent(); 1604 1605 Builder.SetInsertPoint(InsertPt); 1606 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1607 1608 // Short-cut if the mask is all-true. 1609 bool IsAllOnesMask = isa<Constant>(Mask) && 1610 cast<Constant>(Mask)->isAllOnesValue(); 1611 1612 if (IsAllOnesMask) { 1613 Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal); 1614 CI->replaceAllUsesWith(NewI); 1615 CI->eraseFromParent(); 1616 return; 1617 } 1618 1619 // Adjust alignment for the scalar instruction. 1620 AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8); 1621 // Bitcast %addr fron i8* to EltTy* 1622 Type *NewPtrType = 1623 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1624 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1625 unsigned VectorWidth = VecType->getNumElements(); 1626 1627 Value *UndefVal = UndefValue::get(VecType); 1628 1629 // The result vector 1630 Value *VResult = UndefVal; 1631 1632 if (isa<ConstantVector>(Mask)) { 1633 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1634 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1635 continue; 1636 Value *Gep = 1637 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1638 LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1639 VResult = Builder.CreateInsertElement(VResult, Load, 1640 Builder.getInt32(Idx)); 1641 } 1642 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1643 CI->replaceAllUsesWith(NewI); 1644 CI->eraseFromParent(); 1645 return; 1646 } 1647 1648 PHINode *Phi = nullptr; 1649 Value *PrevPhi = UndefVal; 1650 1651 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1652 1653 // Fill the "else" block, created in the previous iteration 1654 // 1655 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1656 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1657 // %to_load = icmp eq i1 %mask_1, true 1658 // br i1 %to_load, label %cond.load, label %else 1659 // 1660 if (Idx > 0) { 1661 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1662 Phi->addIncoming(VResult, CondBlock); 1663 Phi->addIncoming(PrevPhi, PrevIfBlock); 1664 PrevPhi = Phi; 1665 VResult = Phi; 1666 } 1667 1668 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1669 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1670 ConstantInt::get(Predicate->getType(), 1)); 1671 1672 // Create "cond" block 1673 // 1674 // %EltAddr = getelementptr i32* %1, i32 0 1675 // %Elt = load i32* %EltAddr 1676 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1677 // 1678 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load"); 1679 Builder.SetInsertPoint(InsertPt); 1680 1681 Value *Gep = 1682 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1683 LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1684 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1685 1686 // Create "else" block, fill it in the next iteration 1687 BasicBlock *NewIfBlock = 1688 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1689 Builder.SetInsertPoint(InsertPt); 1690 Instruction *OldBr = IfBlock->getTerminator(); 1691 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1692 OldBr->eraseFromParent(); 1693 PrevIfBlock = IfBlock; 1694 IfBlock = NewIfBlock; 1695 } 1696 1697 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1698 Phi->addIncoming(VResult, CondBlock); 1699 Phi->addIncoming(PrevPhi, PrevIfBlock); 1700 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1701 CI->replaceAllUsesWith(NewI); 1702 CI->eraseFromParent(); 1703 } 1704 1705 // Translate a masked store intrinsic, like 1706 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1707 // <16 x i1> %mask) 1708 // to a chain of basic blocks, that stores element one-by-one if 1709 // the appropriate mask bit is set 1710 // 1711 // %1 = bitcast i8* %addr to i32* 1712 // %2 = extractelement <16 x i1> %mask, i32 0 1713 // %3 = icmp eq i1 %2, true 1714 // br i1 %3, label %cond.store, label %else 1715 // 1716 // cond.store: ; preds = %0 1717 // %4 = extractelement <16 x i32> %val, i32 0 1718 // %5 = getelementptr i32* %1, i32 0 1719 // store i32 %4, i32* %5 1720 // br label %else 1721 // 1722 // else: ; preds = %0, %cond.store 1723 // %6 = extractelement <16 x i1> %mask, i32 1 1724 // %7 = icmp eq i1 %6, true 1725 // br i1 %7, label %cond.store1, label %else2 1726 // 1727 // cond.store1: ; preds = %else 1728 // %8 = extractelement <16 x i32> %val, i32 1 1729 // %9 = getelementptr i32* %1, i32 1 1730 // store i32 %8, i32* %9 1731 // br label %else2 1732 // . . . 1733 static void scalarizeMaskedStore(CallInst *CI) { 1734 Value *Src = CI->getArgOperand(0); 1735 Value *Ptr = CI->getArgOperand(1); 1736 Value *Alignment = CI->getArgOperand(2); 1737 Value *Mask = CI->getArgOperand(3); 1738 1739 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1740 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1741 assert(VecType && "Unexpected data type in masked store intrinsic"); 1742 1743 Type *EltTy = VecType->getElementType(); 1744 1745 IRBuilder<> Builder(CI->getContext()); 1746 Instruction *InsertPt = CI; 1747 BasicBlock *IfBlock = CI->getParent(); 1748 Builder.SetInsertPoint(InsertPt); 1749 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1750 1751 // Short-cut if the mask is all-true. 1752 bool IsAllOnesMask = isa<Constant>(Mask) && 1753 cast<Constant>(Mask)->isAllOnesValue(); 1754 1755 if (IsAllOnesMask) { 1756 Builder.CreateAlignedStore(Src, Ptr, AlignVal); 1757 CI->eraseFromParent(); 1758 return; 1759 } 1760 1761 // Adjust alignment for the scalar instruction. 1762 AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8); 1763 // Bitcast %addr fron i8* to EltTy* 1764 Type *NewPtrType = 1765 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1766 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1767 unsigned VectorWidth = VecType->getNumElements(); 1768 1769 if (isa<ConstantVector>(Mask)) { 1770 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1771 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1772 continue; 1773 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1774 Value *Gep = 1775 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1776 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1777 } 1778 CI->eraseFromParent(); 1779 return; 1780 } 1781 1782 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1783 1784 // Fill the "else" block, created in the previous iteration 1785 // 1786 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1787 // %to_store = icmp eq i1 %mask_1, true 1788 // br i1 %to_store, label %cond.store, label %else 1789 // 1790 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1791 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1792 ConstantInt::get(Predicate->getType(), 1)); 1793 1794 // Create "cond" block 1795 // 1796 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1797 // %EltAddr = getelementptr i32* %1, i32 0 1798 // %store i32 %OneElt, i32* %EltAddr 1799 // 1800 BasicBlock *CondBlock = 1801 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store"); 1802 Builder.SetInsertPoint(InsertPt); 1803 1804 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1805 Value *Gep = 1806 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1807 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1808 1809 // Create "else" block, fill it in the next iteration 1810 BasicBlock *NewIfBlock = 1811 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1812 Builder.SetInsertPoint(InsertPt); 1813 Instruction *OldBr = IfBlock->getTerminator(); 1814 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1815 OldBr->eraseFromParent(); 1816 IfBlock = NewIfBlock; 1817 } 1818 CI->eraseFromParent(); 1819 } 1820 1821 // Translate a masked gather intrinsic like 1822 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4, 1823 // <16 x i1> %Mask, <16 x i32> %Src) 1824 // to a chain of basic blocks, with loading element one-by-one if 1825 // the appropriate mask bit is set 1826 // 1827 // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind 1828 // % Mask0 = extractelement <16 x i1> %Mask, i32 0 1829 // % ToLoad0 = icmp eq i1 % Mask0, true 1830 // br i1 % ToLoad0, label %cond.load, label %else 1831 // 1832 // cond.load: 1833 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1834 // % Load0 = load i32, i32* % Ptr0, align 4 1835 // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0 1836 // br label %else 1837 // 1838 // else: 1839 // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0] 1840 // % Mask1 = extractelement <16 x i1> %Mask, i32 1 1841 // % ToLoad1 = icmp eq i1 % Mask1, true 1842 // br i1 % ToLoad1, label %cond.load1, label %else2 1843 // 1844 // cond.load1: 1845 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1846 // % Load1 = load i32, i32* % Ptr1, align 4 1847 // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1 1848 // br label %else2 1849 // . . . 1850 // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src 1851 // ret <16 x i32> %Result 1852 static void scalarizeMaskedGather(CallInst *CI) { 1853 Value *Ptrs = CI->getArgOperand(0); 1854 Value *Alignment = CI->getArgOperand(1); 1855 Value *Mask = CI->getArgOperand(2); 1856 Value *Src0 = CI->getArgOperand(3); 1857 1858 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1859 1860 assert(VecType && "Unexpected return type of masked load intrinsic"); 1861 1862 IRBuilder<> Builder(CI->getContext()); 1863 Instruction *InsertPt = CI; 1864 BasicBlock *IfBlock = CI->getParent(); 1865 BasicBlock *CondBlock = nullptr; 1866 BasicBlock *PrevIfBlock = CI->getParent(); 1867 Builder.SetInsertPoint(InsertPt); 1868 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1869 1870 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1871 1872 Value *UndefVal = UndefValue::get(VecType); 1873 1874 // The result vector 1875 Value *VResult = UndefVal; 1876 unsigned VectorWidth = VecType->getNumElements(); 1877 1878 // Shorten the way if the mask is a vector of constants. 1879 bool IsConstMask = isa<ConstantVector>(Mask); 1880 1881 if (IsConstMask) { 1882 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1883 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1884 continue; 1885 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1886 "Ptr" + Twine(Idx)); 1887 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1888 "Load" + Twine(Idx)); 1889 VResult = Builder.CreateInsertElement(VResult, Load, 1890 Builder.getInt32(Idx), 1891 "Res" + Twine(Idx)); 1892 } 1893 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1894 CI->replaceAllUsesWith(NewI); 1895 CI->eraseFromParent(); 1896 return; 1897 } 1898 1899 PHINode *Phi = nullptr; 1900 Value *PrevPhi = UndefVal; 1901 1902 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1903 1904 // Fill the "else" block, created in the previous iteration 1905 // 1906 // %Mask1 = extractelement <16 x i1> %Mask, i32 1 1907 // %ToLoad1 = icmp eq i1 %Mask1, true 1908 // br i1 %ToLoad1, label %cond.load, label %else 1909 // 1910 if (Idx > 0) { 1911 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1912 Phi->addIncoming(VResult, CondBlock); 1913 Phi->addIncoming(PrevPhi, PrevIfBlock); 1914 PrevPhi = Phi; 1915 VResult = Phi; 1916 } 1917 1918 Value *Predicate = Builder.CreateExtractElement(Mask, 1919 Builder.getInt32(Idx), 1920 "Mask" + Twine(Idx)); 1921 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1922 ConstantInt::get(Predicate->getType(), 1), 1923 "ToLoad" + Twine(Idx)); 1924 1925 // Create "cond" block 1926 // 1927 // %EltAddr = getelementptr i32* %1, i32 0 1928 // %Elt = load i32* %EltAddr 1929 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1930 // 1931 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); 1932 Builder.SetInsertPoint(InsertPt); 1933 1934 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1935 "Ptr" + Twine(Idx)); 1936 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1937 "Load" + Twine(Idx)); 1938 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx), 1939 "Res" + Twine(Idx)); 1940 1941 // Create "else" block, fill it in the next iteration 1942 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1943 Builder.SetInsertPoint(InsertPt); 1944 Instruction *OldBr = IfBlock->getTerminator(); 1945 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1946 OldBr->eraseFromParent(); 1947 PrevIfBlock = IfBlock; 1948 IfBlock = NewIfBlock; 1949 } 1950 1951 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1952 Phi->addIncoming(VResult, CondBlock); 1953 Phi->addIncoming(PrevPhi, PrevIfBlock); 1954 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1955 CI->replaceAllUsesWith(NewI); 1956 CI->eraseFromParent(); 1957 } 1958 1959 // Translate a masked scatter intrinsic, like 1960 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4, 1961 // <16 x i1> %Mask) 1962 // to a chain of basic blocks, that stores element one-by-one if 1963 // the appropriate mask bit is set. 1964 // 1965 // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind 1966 // % Mask0 = extractelement <16 x i1> % Mask, i32 0 1967 // % ToStore0 = icmp eq i1 % Mask0, true 1968 // br i1 %ToStore0, label %cond.store, label %else 1969 // 1970 // cond.store: 1971 // % Elt0 = extractelement <16 x i32> %Src, i32 0 1972 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1973 // store i32 %Elt0, i32* % Ptr0, align 4 1974 // br label %else 1975 // 1976 // else: 1977 // % Mask1 = extractelement <16 x i1> % Mask, i32 1 1978 // % ToStore1 = icmp eq i1 % Mask1, true 1979 // br i1 % ToStore1, label %cond.store1, label %else2 1980 // 1981 // cond.store1: 1982 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1983 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1984 // store i32 % Elt1, i32* % Ptr1, align 4 1985 // br label %else2 1986 // . . . 1987 static void scalarizeMaskedScatter(CallInst *CI) { 1988 Value *Src = CI->getArgOperand(0); 1989 Value *Ptrs = CI->getArgOperand(1); 1990 Value *Alignment = CI->getArgOperand(2); 1991 Value *Mask = CI->getArgOperand(3); 1992 1993 assert(isa<VectorType>(Src->getType()) && 1994 "Unexpected data type in masked scatter intrinsic"); 1995 assert(isa<VectorType>(Ptrs->getType()) && 1996 isa<PointerType>(Ptrs->getType()->getVectorElementType()) && 1997 "Vector of pointers is expected in masked scatter intrinsic"); 1998 1999 IRBuilder<> Builder(CI->getContext()); 2000 Instruction *InsertPt = CI; 2001 BasicBlock *IfBlock = CI->getParent(); 2002 Builder.SetInsertPoint(InsertPt); 2003 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 2004 2005 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 2006 unsigned VectorWidth = Src->getType()->getVectorNumElements(); 2007 2008 // Shorten the way if the mask is a vector of constants. 2009 bool IsConstMask = isa<ConstantVector>(Mask); 2010 2011 if (IsConstMask) { 2012 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 2013 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 2014 continue; 2015 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 2016 "Elt" + Twine(Idx)); 2017 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 2018 "Ptr" + Twine(Idx)); 2019 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 2020 } 2021 CI->eraseFromParent(); 2022 return; 2023 } 2024 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 2025 // Fill the "else" block, created in the previous iteration 2026 // 2027 // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx 2028 // % ToStore = icmp eq i1 % Mask1, true 2029 // br i1 % ToStore, label %cond.store, label %else 2030 // 2031 Value *Predicate = Builder.CreateExtractElement(Mask, 2032 Builder.getInt32(Idx), 2033 "Mask" + Twine(Idx)); 2034 Value *Cmp = 2035 Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 2036 ConstantInt::get(Predicate->getType(), 1), 2037 "ToStore" + Twine(Idx)); 2038 2039 // Create "cond" block 2040 // 2041 // % Elt1 = extractelement <16 x i32> %Src, i32 1 2042 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 2043 // %store i32 % Elt1, i32* % Ptr1 2044 // 2045 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); 2046 Builder.SetInsertPoint(InsertPt); 2047 2048 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 2049 "Elt" + Twine(Idx)); 2050 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 2051 "Ptr" + Twine(Idx)); 2052 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 2053 2054 // Create "else" block, fill it in the next iteration 2055 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 2056 Builder.SetInsertPoint(InsertPt); 2057 Instruction *OldBr = IfBlock->getTerminator(); 2058 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 2059 OldBr->eraseFromParent(); 2060 IfBlock = NewIfBlock; 2061 } 2062 CI->eraseFromParent(); 2063 } 2064 2065 /// If counting leading or trailing zeros is an expensive operation and a zero 2066 /// input is defined, add a check for zero to avoid calling the intrinsic. 2067 /// 2068 /// We want to transform: 2069 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 2070 /// 2071 /// into: 2072 /// entry: 2073 /// %cmpz = icmp eq i64 %A, 0 2074 /// br i1 %cmpz, label %cond.end, label %cond.false 2075 /// cond.false: 2076 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 2077 /// br label %cond.end 2078 /// cond.end: 2079 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 2080 /// 2081 /// If the transform is performed, return true and set ModifiedDT to true. 2082 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 2083 const TargetLowering *TLI, 2084 const DataLayout *DL, 2085 bool &ModifiedDT) { 2086 if (!TLI || !DL) 2087 return false; 2088 2089 // If a zero input is undefined, it doesn't make sense to despeculate that. 2090 if (match(CountZeros->getOperand(1), m_One())) 2091 return false; 2092 2093 // If it's cheap to speculate, there's nothing to do. 2094 auto IntrinsicID = CountZeros->getIntrinsicID(); 2095 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 2096 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 2097 return false; 2098 2099 // Only handle legal scalar cases. Anything else requires too much work. 2100 Type *Ty = CountZeros->getType(); 2101 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 2102 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 2103 return false; 2104 2105 // The intrinsic will be sunk behind a compare against zero and branch. 2106 BasicBlock *StartBlock = CountZeros->getParent(); 2107 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 2108 2109 // Create another block after the count zero intrinsic. A PHI will be added 2110 // in this block to select the result of the intrinsic or the bit-width 2111 // constant if the input to the intrinsic is zero. 2112 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 2113 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 2114 2115 // Set up a builder to create a compare, conditional branch, and PHI. 2116 IRBuilder<> Builder(CountZeros->getContext()); 2117 Builder.SetInsertPoint(StartBlock->getTerminator()); 2118 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 2119 2120 // Replace the unconditional branch that was created by the first split with 2121 // a compare against zero and a conditional branch. 2122 Value *Zero = Constant::getNullValue(Ty); 2123 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 2124 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 2125 StartBlock->getTerminator()->eraseFromParent(); 2126 2127 // Create a PHI in the end block to select either the output of the intrinsic 2128 // or the bit width of the operand. 2129 Builder.SetInsertPoint(&EndBlock->front()); 2130 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 2131 CountZeros->replaceAllUsesWith(PN); 2132 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 2133 PN->addIncoming(BitWidth, StartBlock); 2134 PN->addIncoming(CountZeros, CallBlock); 2135 2136 // We are explicitly handling the zero case, so we can set the intrinsic's 2137 // undefined zero argument to 'true'. This will also prevent reprocessing the 2138 // intrinsic; we only despeculate when a zero input is defined. 2139 CountZeros->setArgOperand(1, Builder.getTrue()); 2140 ModifiedDT = true; 2141 return true; 2142 } 2143 2144 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { 2145 BasicBlock *BB = CI->getParent(); 2146 2147 // Lower inline assembly if we can. 2148 // If we found an inline asm expession, and if the target knows how to 2149 // lower it to normal LLVM code, do so now. 2150 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 2151 if (TLI->ExpandInlineAsm(CI)) { 2152 // Avoid invalidating the iterator. 2153 CurInstIterator = BB->begin(); 2154 // Avoid processing instructions out of order, which could cause 2155 // reuse before a value is defined. 2156 SunkAddrs.clear(); 2157 return true; 2158 } 2159 // Sink address computing for memory operands into the block. 2160 if (optimizeInlineAsmInst(CI)) 2161 return true; 2162 } 2163 2164 // Align the pointer arguments to this call if the target thinks it's a good 2165 // idea 2166 unsigned MinSize, PrefAlign; 2167 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2168 for (auto &Arg : CI->arg_operands()) { 2169 // We want to align both objects whose address is used directly and 2170 // objects whose address is used in casts and GEPs, though it only makes 2171 // sense for GEPs if the offset is a multiple of the desired alignment and 2172 // if size - offset meets the size threshold. 2173 if (!Arg->getType()->isPointerTy()) 2174 continue; 2175 APInt Offset(DL->getPointerSizeInBits( 2176 cast<PointerType>(Arg->getType())->getAddressSpace()), 2177 0); 2178 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2179 uint64_t Offset2 = Offset.getLimitedValue(); 2180 if ((Offset2 & (PrefAlign-1)) != 0) 2181 continue; 2182 AllocaInst *AI; 2183 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2184 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2185 AI->setAlignment(PrefAlign); 2186 // Global variables can only be aligned if they are defined in this 2187 // object (i.e. they are uniquely initialized in this object), and 2188 // over-aligning global variables that have an explicit section is 2189 // forbidden. 2190 GlobalVariable *GV; 2191 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2192 GV->getPointerAlignment(*DL) < PrefAlign && 2193 DL->getTypeAllocSize(GV->getValueType()) >= 2194 MinSize + Offset2) 2195 GV->setAlignment(PrefAlign); 2196 } 2197 // If this is a memcpy (or similar) then we may be able to improve the 2198 // alignment 2199 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2200 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 2201 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 2202 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 2203 if (Align > MI->getAlignment()) 2204 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 2205 } 2206 } 2207 2208 // If we have a cold call site, try to sink addressing computation into the 2209 // cold block. This interacts with our handling for loads and stores to 2210 // ensure that we can fold all uses of a potential addressing computation 2211 // into their uses. TODO: generalize this to work over profiling data 2212 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 2213 for (auto &Arg : CI->arg_operands()) { 2214 if (!Arg->getType()->isPointerTy()) 2215 continue; 2216 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2217 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2218 } 2219 2220 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2221 if (II) { 2222 switch (II->getIntrinsicID()) { 2223 default: break; 2224 case Intrinsic::objectsize: { 2225 // Lower all uses of llvm.objectsize.* 2226 ConstantInt *RetVal = 2227 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 2228 // Substituting this can cause recursive simplifications, which can 2229 // invalidate our iterator. Use a WeakVH to hold onto it in case this 2230 // happens. 2231 Value *CurValue = &*CurInstIterator; 2232 WeakVH IterHandle(CurValue); 2233 2234 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2235 2236 // If the iterator instruction was recursively deleted, start over at the 2237 // start of the block. 2238 if (IterHandle != CurValue) { 2239 CurInstIterator = BB->begin(); 2240 SunkAddrs.clear(); 2241 } 2242 return true; 2243 } 2244 case Intrinsic::masked_load: { 2245 // Scalarize unsupported vector masked load 2246 if (!TTI->isLegalMaskedLoad(CI->getType())) { 2247 scalarizeMaskedLoad(CI); 2248 ModifiedDT = true; 2249 return true; 2250 } 2251 return false; 2252 } 2253 case Intrinsic::masked_store: { 2254 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) { 2255 scalarizeMaskedStore(CI); 2256 ModifiedDT = true; 2257 return true; 2258 } 2259 return false; 2260 } 2261 case Intrinsic::masked_gather: { 2262 if (!TTI->isLegalMaskedGather(CI->getType())) { 2263 scalarizeMaskedGather(CI); 2264 ModifiedDT = true; 2265 return true; 2266 } 2267 return false; 2268 } 2269 case Intrinsic::masked_scatter: { 2270 if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) { 2271 scalarizeMaskedScatter(CI); 2272 ModifiedDT = true; 2273 return true; 2274 } 2275 return false; 2276 } 2277 case Intrinsic::aarch64_stlxr: 2278 case Intrinsic::aarch64_stxr: { 2279 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2280 if (!ExtVal || !ExtVal->hasOneUse() || 2281 ExtVal->getParent() == CI->getParent()) 2282 return false; 2283 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2284 ExtVal->moveBefore(CI); 2285 // Mark this instruction as "inserted by CGP", so that other 2286 // optimizations don't touch it. 2287 InsertedInsts.insert(ExtVal); 2288 return true; 2289 } 2290 case Intrinsic::invariant_group_barrier: 2291 II->replaceAllUsesWith(II->getArgOperand(0)); 2292 II->eraseFromParent(); 2293 return true; 2294 2295 case Intrinsic::cttz: 2296 case Intrinsic::ctlz: 2297 // If counting zeros is expensive, try to avoid it. 2298 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2299 } 2300 2301 if (TLI) { 2302 SmallVector<Value*, 2> PtrOps; 2303 Type *AccessTy; 2304 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2305 while (!PtrOps.empty()) { 2306 Value *PtrVal = PtrOps.pop_back_val(); 2307 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2308 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2309 return true; 2310 } 2311 } 2312 } 2313 2314 // From here on out we're working with named functions. 2315 if (!CI->getCalledFunction()) return false; 2316 2317 // Lower all default uses of _chk calls. This is very similar 2318 // to what InstCombineCalls does, but here we are only lowering calls 2319 // to fortified library functions (e.g. __memcpy_chk) that have the default 2320 // "don't know" as the objectsize. Anything else should be left alone. 2321 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2322 if (Value *V = Simplifier.optimizeCall(CI)) { 2323 CI->replaceAllUsesWith(V); 2324 CI->eraseFromParent(); 2325 return true; 2326 } 2327 return false; 2328 } 2329 2330 /// Look for opportunities to duplicate return instructions to the predecessor 2331 /// to enable tail call optimizations. The case it is currently looking for is: 2332 /// @code 2333 /// bb0: 2334 /// %tmp0 = tail call i32 @f0() 2335 /// br label %return 2336 /// bb1: 2337 /// %tmp1 = tail call i32 @f1() 2338 /// br label %return 2339 /// bb2: 2340 /// %tmp2 = tail call i32 @f2() 2341 /// br label %return 2342 /// return: 2343 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2344 /// ret i32 %retval 2345 /// @endcode 2346 /// 2347 /// => 2348 /// 2349 /// @code 2350 /// bb0: 2351 /// %tmp0 = tail call i32 @f0() 2352 /// ret i32 %tmp0 2353 /// bb1: 2354 /// %tmp1 = tail call i32 @f1() 2355 /// ret i32 %tmp1 2356 /// bb2: 2357 /// %tmp2 = tail call i32 @f2() 2358 /// ret i32 %tmp2 2359 /// @endcode 2360 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 2361 if (!TLI) 2362 return false; 2363 2364 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2365 if (!RetI) 2366 return false; 2367 2368 PHINode *PN = nullptr; 2369 BitCastInst *BCI = nullptr; 2370 Value *V = RetI->getReturnValue(); 2371 if (V) { 2372 BCI = dyn_cast<BitCastInst>(V); 2373 if (BCI) 2374 V = BCI->getOperand(0); 2375 2376 PN = dyn_cast<PHINode>(V); 2377 if (!PN) 2378 return false; 2379 } 2380 2381 if (PN && PN->getParent() != BB) 2382 return false; 2383 2384 // Make sure there are no instructions between the PHI and return, or that the 2385 // return is the first instruction in the block. 2386 if (PN) { 2387 BasicBlock::iterator BI = BB->begin(); 2388 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 2389 if (&*BI == BCI) 2390 // Also skip over the bitcast. 2391 ++BI; 2392 if (&*BI != RetI) 2393 return false; 2394 } else { 2395 BasicBlock::iterator BI = BB->begin(); 2396 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2397 if (&*BI != RetI) 2398 return false; 2399 } 2400 2401 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2402 /// call. 2403 const Function *F = BB->getParent(); 2404 SmallVector<CallInst*, 4> TailCalls; 2405 if (PN) { 2406 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2407 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2408 // Make sure the phi value is indeed produced by the tail call. 2409 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2410 TLI->mayBeEmittedAsTailCall(CI) && 2411 attributesPermitTailCall(F, CI, RetI, *TLI)) 2412 TailCalls.push_back(CI); 2413 } 2414 } else { 2415 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2416 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2417 if (!VisitedBBs.insert(*PI).second) 2418 continue; 2419 2420 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2421 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2422 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2423 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2424 if (RI == RE) 2425 continue; 2426 2427 CallInst *CI = dyn_cast<CallInst>(&*RI); 2428 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2429 attributesPermitTailCall(F, CI, RetI, *TLI)) 2430 TailCalls.push_back(CI); 2431 } 2432 } 2433 2434 bool Changed = false; 2435 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2436 CallInst *CI = TailCalls[i]; 2437 CallSite CS(CI); 2438 2439 // Conservatively require the attributes of the call to match those of the 2440 // return. Ignore noalias because it doesn't affect the call sequence. 2441 AttributeList CalleeAttrs = CS.getAttributes(); 2442 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2443 .removeAttribute(Attribute::NoAlias) != 2444 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2445 .removeAttribute(Attribute::NoAlias)) 2446 continue; 2447 2448 // Make sure the call instruction is followed by an unconditional branch to 2449 // the return block. 2450 BasicBlock *CallBB = CI->getParent(); 2451 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2452 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2453 continue; 2454 2455 // Duplicate the return into CallBB. 2456 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2457 ModifiedDT = Changed = true; 2458 ++NumRetsDup; 2459 } 2460 2461 // If we eliminated all predecessors of the block, delete the block now. 2462 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2463 BB->eraseFromParent(); 2464 2465 return Changed; 2466 } 2467 2468 //===----------------------------------------------------------------------===// 2469 // Memory Optimization 2470 //===----------------------------------------------------------------------===// 2471 2472 namespace { 2473 2474 /// This is an extended version of TargetLowering::AddrMode 2475 /// which holds actual Value*'s for register values. 2476 struct ExtAddrMode : public TargetLowering::AddrMode { 2477 Value *BaseReg; 2478 Value *ScaledReg; 2479 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 2480 void print(raw_ostream &OS) const; 2481 void dump() const; 2482 2483 bool operator==(const ExtAddrMode& O) const { 2484 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 2485 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 2486 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 2487 } 2488 }; 2489 2490 #ifndef NDEBUG 2491 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2492 AM.print(OS); 2493 return OS; 2494 } 2495 #endif 2496 2497 void ExtAddrMode::print(raw_ostream &OS) const { 2498 bool NeedPlus = false; 2499 OS << "["; 2500 if (BaseGV) { 2501 OS << (NeedPlus ? " + " : "") 2502 << "GV:"; 2503 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2504 NeedPlus = true; 2505 } 2506 2507 if (BaseOffs) { 2508 OS << (NeedPlus ? " + " : "") 2509 << BaseOffs; 2510 NeedPlus = true; 2511 } 2512 2513 if (BaseReg) { 2514 OS << (NeedPlus ? " + " : "") 2515 << "Base:"; 2516 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2517 NeedPlus = true; 2518 } 2519 if (Scale) { 2520 OS << (NeedPlus ? " + " : "") 2521 << Scale << "*"; 2522 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2523 } 2524 2525 OS << ']'; 2526 } 2527 2528 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2529 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2530 print(dbgs()); 2531 dbgs() << '\n'; 2532 } 2533 #endif 2534 2535 /// \brief This class provides transaction based operation on the IR. 2536 /// Every change made through this class is recorded in the internal state and 2537 /// can be undone (rollback) until commit is called. 2538 class TypePromotionTransaction { 2539 2540 /// \brief This represents the common interface of the individual transaction. 2541 /// Each class implements the logic for doing one specific modification on 2542 /// the IR via the TypePromotionTransaction. 2543 class TypePromotionAction { 2544 protected: 2545 /// The Instruction modified. 2546 Instruction *Inst; 2547 2548 public: 2549 /// \brief Constructor of the action. 2550 /// The constructor performs the related action on the IR. 2551 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2552 2553 virtual ~TypePromotionAction() {} 2554 2555 /// \brief Undo the modification done by this action. 2556 /// When this method is called, the IR must be in the same state as it was 2557 /// before this action was applied. 2558 /// \pre Undoing the action works if and only if the IR is in the exact same 2559 /// state as it was directly after this action was applied. 2560 virtual void undo() = 0; 2561 2562 /// \brief Advocate every change made by this action. 2563 /// When the results on the IR of the action are to be kept, it is important 2564 /// to call this function, otherwise hidden information may be kept forever. 2565 virtual void commit() { 2566 // Nothing to be done, this action is not doing anything. 2567 } 2568 }; 2569 2570 /// \brief Utility to remember the position of an instruction. 2571 class InsertionHandler { 2572 /// Position of an instruction. 2573 /// Either an instruction: 2574 /// - Is the first in a basic block: BB is used. 2575 /// - Has a previous instructon: PrevInst is used. 2576 union { 2577 Instruction *PrevInst; 2578 BasicBlock *BB; 2579 } Point; 2580 /// Remember whether or not the instruction had a previous instruction. 2581 bool HasPrevInstruction; 2582 2583 public: 2584 /// \brief Record the position of \p Inst. 2585 InsertionHandler(Instruction *Inst) { 2586 BasicBlock::iterator It = Inst->getIterator(); 2587 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2588 if (HasPrevInstruction) 2589 Point.PrevInst = &*--It; 2590 else 2591 Point.BB = Inst->getParent(); 2592 } 2593 2594 /// \brief Insert \p Inst at the recorded position. 2595 void insert(Instruction *Inst) { 2596 if (HasPrevInstruction) { 2597 if (Inst->getParent()) 2598 Inst->removeFromParent(); 2599 Inst->insertAfter(Point.PrevInst); 2600 } else { 2601 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2602 if (Inst->getParent()) 2603 Inst->moveBefore(Position); 2604 else 2605 Inst->insertBefore(Position); 2606 } 2607 } 2608 }; 2609 2610 /// \brief Move an instruction before another. 2611 class InstructionMoveBefore : public TypePromotionAction { 2612 /// Original position of the instruction. 2613 InsertionHandler Position; 2614 2615 public: 2616 /// \brief Move \p Inst before \p Before. 2617 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2618 : TypePromotionAction(Inst), Position(Inst) { 2619 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2620 Inst->moveBefore(Before); 2621 } 2622 2623 /// \brief Move the instruction back to its original position. 2624 void undo() override { 2625 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2626 Position.insert(Inst); 2627 } 2628 }; 2629 2630 /// \brief Set the operand of an instruction with a new value. 2631 class OperandSetter : public TypePromotionAction { 2632 /// Original operand of the instruction. 2633 Value *Origin; 2634 /// Index of the modified instruction. 2635 unsigned Idx; 2636 2637 public: 2638 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2639 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2640 : TypePromotionAction(Inst), Idx(Idx) { 2641 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2642 << "for:" << *Inst << "\n" 2643 << "with:" << *NewVal << "\n"); 2644 Origin = Inst->getOperand(Idx); 2645 Inst->setOperand(Idx, NewVal); 2646 } 2647 2648 /// \brief Restore the original value of the instruction. 2649 void undo() override { 2650 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2651 << "for: " << *Inst << "\n" 2652 << "with: " << *Origin << "\n"); 2653 Inst->setOperand(Idx, Origin); 2654 } 2655 }; 2656 2657 /// \brief Hide the operands of an instruction. 2658 /// Do as if this instruction was not using any of its operands. 2659 class OperandsHider : public TypePromotionAction { 2660 /// The list of original operands. 2661 SmallVector<Value *, 4> OriginalValues; 2662 2663 public: 2664 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2665 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2666 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2667 unsigned NumOpnds = Inst->getNumOperands(); 2668 OriginalValues.reserve(NumOpnds); 2669 for (unsigned It = 0; It < NumOpnds; ++It) { 2670 // Save the current operand. 2671 Value *Val = Inst->getOperand(It); 2672 OriginalValues.push_back(Val); 2673 // Set a dummy one. 2674 // We could use OperandSetter here, but that would imply an overhead 2675 // that we are not willing to pay. 2676 Inst->setOperand(It, UndefValue::get(Val->getType())); 2677 } 2678 } 2679 2680 /// \brief Restore the original list of uses. 2681 void undo() override { 2682 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2683 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2684 Inst->setOperand(It, OriginalValues[It]); 2685 } 2686 }; 2687 2688 /// \brief Build a truncate instruction. 2689 class TruncBuilder : public TypePromotionAction { 2690 Value *Val; 2691 public: 2692 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2693 /// result. 2694 /// trunc Opnd to Ty. 2695 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2696 IRBuilder<> Builder(Opnd); 2697 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2698 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2699 } 2700 2701 /// \brief Get the built value. 2702 Value *getBuiltValue() { return Val; } 2703 2704 /// \brief Remove the built instruction. 2705 void undo() override { 2706 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2707 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2708 IVal->eraseFromParent(); 2709 } 2710 }; 2711 2712 /// \brief Build a sign extension instruction. 2713 class SExtBuilder : public TypePromotionAction { 2714 Value *Val; 2715 public: 2716 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 2717 /// result. 2718 /// sext Opnd to Ty. 2719 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2720 : TypePromotionAction(InsertPt) { 2721 IRBuilder<> Builder(InsertPt); 2722 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2723 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2724 } 2725 2726 /// \brief Get the built value. 2727 Value *getBuiltValue() { return Val; } 2728 2729 /// \brief Remove the built instruction. 2730 void undo() override { 2731 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2732 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2733 IVal->eraseFromParent(); 2734 } 2735 }; 2736 2737 /// \brief Build a zero extension instruction. 2738 class ZExtBuilder : public TypePromotionAction { 2739 Value *Val; 2740 public: 2741 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 2742 /// result. 2743 /// zext Opnd to Ty. 2744 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2745 : TypePromotionAction(InsertPt) { 2746 IRBuilder<> Builder(InsertPt); 2747 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2748 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2749 } 2750 2751 /// \brief Get the built value. 2752 Value *getBuiltValue() { return Val; } 2753 2754 /// \brief Remove the built instruction. 2755 void undo() override { 2756 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2757 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2758 IVal->eraseFromParent(); 2759 } 2760 }; 2761 2762 /// \brief Mutate an instruction to another type. 2763 class TypeMutator : public TypePromotionAction { 2764 /// Record the original type. 2765 Type *OrigTy; 2766 2767 public: 2768 /// \brief Mutate the type of \p Inst into \p NewTy. 2769 TypeMutator(Instruction *Inst, Type *NewTy) 2770 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2771 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2772 << "\n"); 2773 Inst->mutateType(NewTy); 2774 } 2775 2776 /// \brief Mutate the instruction back to its original type. 2777 void undo() override { 2778 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2779 << "\n"); 2780 Inst->mutateType(OrigTy); 2781 } 2782 }; 2783 2784 /// \brief Replace the uses of an instruction by another instruction. 2785 class UsesReplacer : public TypePromotionAction { 2786 /// Helper structure to keep track of the replaced uses. 2787 struct InstructionAndIdx { 2788 /// The instruction using the instruction. 2789 Instruction *Inst; 2790 /// The index where this instruction is used for Inst. 2791 unsigned Idx; 2792 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2793 : Inst(Inst), Idx(Idx) {} 2794 }; 2795 2796 /// Keep track of the original uses (pair Instruction, Index). 2797 SmallVector<InstructionAndIdx, 4> OriginalUses; 2798 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 2799 2800 public: 2801 /// \brief Replace all the use of \p Inst by \p New. 2802 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2803 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2804 << "\n"); 2805 // Record the original uses. 2806 for (Use &U : Inst->uses()) { 2807 Instruction *UserI = cast<Instruction>(U.getUser()); 2808 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2809 } 2810 // Now, we can replace the uses. 2811 Inst->replaceAllUsesWith(New); 2812 } 2813 2814 /// \brief Reassign the original uses of Inst to Inst. 2815 void undo() override { 2816 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2817 for (use_iterator UseIt = OriginalUses.begin(), 2818 EndIt = OriginalUses.end(); 2819 UseIt != EndIt; ++UseIt) { 2820 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2821 } 2822 } 2823 }; 2824 2825 /// \brief Remove an instruction from the IR. 2826 class InstructionRemover : public TypePromotionAction { 2827 /// Original position of the instruction. 2828 InsertionHandler Inserter; 2829 /// Helper structure to hide all the link to the instruction. In other 2830 /// words, this helps to do as if the instruction was removed. 2831 OperandsHider Hider; 2832 /// Keep track of the uses replaced, if any. 2833 UsesReplacer *Replacer; 2834 /// Keep track of instructions removed. 2835 SetOfInstrs &RemovedInsts; 2836 2837 public: 2838 /// \brief Remove all reference of \p Inst and optinally replace all its 2839 /// uses with New. 2840 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2841 /// \pre If !Inst->use_empty(), then New != nullptr 2842 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2843 Value *New = nullptr) 2844 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2845 Replacer(nullptr), RemovedInsts(RemovedInsts) { 2846 if (New) 2847 Replacer = new UsesReplacer(Inst, New); 2848 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2849 RemovedInsts.insert(Inst); 2850 /// The instructions removed here will be freed after completing 2851 /// optimizeBlock() for all blocks as we need to keep track of the 2852 /// removed instructions during promotion. 2853 Inst->removeFromParent(); 2854 } 2855 2856 ~InstructionRemover() override { delete Replacer; } 2857 2858 /// \brief Resurrect the instruction and reassign it to the proper uses if 2859 /// new value was provided when build this action. 2860 void undo() override { 2861 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2862 Inserter.insert(Inst); 2863 if (Replacer) 2864 Replacer->undo(); 2865 Hider.undo(); 2866 RemovedInsts.erase(Inst); 2867 } 2868 }; 2869 2870 public: 2871 /// Restoration point. 2872 /// The restoration point is a pointer to an action instead of an iterator 2873 /// because the iterator may be invalidated but not the pointer. 2874 typedef const TypePromotionAction *ConstRestorationPt; 2875 2876 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2877 : RemovedInsts(RemovedInsts) {} 2878 2879 /// Advocate every changes made in that transaction. 2880 void commit(); 2881 /// Undo all the changes made after the given point. 2882 void rollback(ConstRestorationPt Point); 2883 /// Get the current restoration point. 2884 ConstRestorationPt getRestorationPoint() const; 2885 2886 /// \name API for IR modification with state keeping to support rollback. 2887 /// @{ 2888 /// Same as Instruction::setOperand. 2889 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2890 /// Same as Instruction::eraseFromParent. 2891 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2892 /// Same as Value::replaceAllUsesWith. 2893 void replaceAllUsesWith(Instruction *Inst, Value *New); 2894 /// Same as Value::mutateType. 2895 void mutateType(Instruction *Inst, Type *NewTy); 2896 /// Same as IRBuilder::createTrunc. 2897 Value *createTrunc(Instruction *Opnd, Type *Ty); 2898 /// Same as IRBuilder::createSExt. 2899 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2900 /// Same as IRBuilder::createZExt. 2901 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2902 /// Same as Instruction::moveBefore. 2903 void moveBefore(Instruction *Inst, Instruction *Before); 2904 /// @} 2905 2906 private: 2907 /// The ordered list of actions made so far. 2908 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2909 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2910 SetOfInstrs &RemovedInsts; 2911 }; 2912 2913 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2914 Value *NewVal) { 2915 Actions.push_back( 2916 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2917 } 2918 2919 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2920 Value *NewVal) { 2921 Actions.push_back( 2922 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, 2923 RemovedInsts, NewVal)); 2924 } 2925 2926 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2927 Value *New) { 2928 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2929 } 2930 2931 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2932 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2933 } 2934 2935 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2936 Type *Ty) { 2937 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2938 Value *Val = Ptr->getBuiltValue(); 2939 Actions.push_back(std::move(Ptr)); 2940 return Val; 2941 } 2942 2943 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2944 Value *Opnd, Type *Ty) { 2945 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2946 Value *Val = Ptr->getBuiltValue(); 2947 Actions.push_back(std::move(Ptr)); 2948 return Val; 2949 } 2950 2951 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2952 Value *Opnd, Type *Ty) { 2953 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2954 Value *Val = Ptr->getBuiltValue(); 2955 Actions.push_back(std::move(Ptr)); 2956 return Val; 2957 } 2958 2959 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2960 Instruction *Before) { 2961 Actions.push_back( 2962 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2963 } 2964 2965 TypePromotionTransaction::ConstRestorationPt 2966 TypePromotionTransaction::getRestorationPoint() const { 2967 return !Actions.empty() ? Actions.back().get() : nullptr; 2968 } 2969 2970 void TypePromotionTransaction::commit() { 2971 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2972 ++It) 2973 (*It)->commit(); 2974 Actions.clear(); 2975 } 2976 2977 void TypePromotionTransaction::rollback( 2978 TypePromotionTransaction::ConstRestorationPt Point) { 2979 while (!Actions.empty() && Point != Actions.back().get()) { 2980 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2981 Curr->undo(); 2982 } 2983 } 2984 2985 /// \brief A helper class for matching addressing modes. 2986 /// 2987 /// This encapsulates the logic for matching the target-legal addressing modes. 2988 class AddressingModeMatcher { 2989 SmallVectorImpl<Instruction*> &AddrModeInsts; 2990 const TargetLowering &TLI; 2991 const TargetRegisterInfo &TRI; 2992 const DataLayout &DL; 2993 2994 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2995 /// the memory instruction that we're computing this address for. 2996 Type *AccessTy; 2997 unsigned AddrSpace; 2998 Instruction *MemoryInst; 2999 3000 /// This is the addressing mode that we're building up. This is 3001 /// part of the return value of this addressing mode matching stuff. 3002 ExtAddrMode &AddrMode; 3003 3004 /// The instructions inserted by other CodeGenPrepare optimizations. 3005 const SetOfInstrs &InsertedInsts; 3006 /// A map from the instructions to their type before promotion. 3007 InstrToOrigTy &PromotedInsts; 3008 /// The ongoing transaction where every action should be registered. 3009 TypePromotionTransaction &TPT; 3010 3011 /// This is set to true when we should not do profitability checks. 3012 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3013 bool IgnoreProfitability; 3014 3015 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 3016 const TargetLowering &TLI, 3017 const TargetRegisterInfo &TRI, 3018 Type *AT, unsigned AS, 3019 Instruction *MI, ExtAddrMode &AM, 3020 const SetOfInstrs &InsertedInsts, 3021 InstrToOrigTy &PromotedInsts, 3022 TypePromotionTransaction &TPT) 3023 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3024 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3025 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3026 PromotedInsts(PromotedInsts), TPT(TPT) { 3027 IgnoreProfitability = false; 3028 } 3029 public: 3030 3031 /// Find the maximal addressing mode that a load/store of V can fold, 3032 /// give an access type of AccessTy. This returns a list of involved 3033 /// instructions in AddrModeInsts. 3034 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3035 /// optimizations. 3036 /// \p PromotedInsts maps the instructions to their type before promotion. 3037 /// \p The ongoing transaction where every action should be registered. 3038 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 3039 Instruction *MemoryInst, 3040 SmallVectorImpl<Instruction*> &AddrModeInsts, 3041 const TargetLowering &TLI, 3042 const TargetRegisterInfo &TRI, 3043 const SetOfInstrs &InsertedInsts, 3044 InstrToOrigTy &PromotedInsts, 3045 TypePromotionTransaction &TPT) { 3046 ExtAddrMode Result; 3047 3048 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, 3049 AccessTy, AS, 3050 MemoryInst, Result, InsertedInsts, 3051 PromotedInsts, TPT).matchAddr(V, 0); 3052 (void)Success; assert(Success && "Couldn't select *anything*?"); 3053 return Result; 3054 } 3055 private: 3056 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3057 bool matchAddr(Value *V, unsigned Depth); 3058 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 3059 bool *MovedAway = nullptr); 3060 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3061 ExtAddrMode &AMBefore, 3062 ExtAddrMode &AMAfter); 3063 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3064 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3065 Value *PromotedOperand) const; 3066 }; 3067 3068 /// Try adding ScaleReg*Scale to the current addressing mode. 3069 /// Return true and update AddrMode if this addr mode is legal for the target, 3070 /// false if not. 3071 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3072 unsigned Depth) { 3073 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3074 // mode. Just process that directly. 3075 if (Scale == 1) 3076 return matchAddr(ScaleReg, Depth); 3077 3078 // If the scale is 0, it takes nothing to add this. 3079 if (Scale == 0) 3080 return true; 3081 3082 // If we already have a scale of this value, we can add to it, otherwise, we 3083 // need an available scale field. 3084 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3085 return false; 3086 3087 ExtAddrMode TestAddrMode = AddrMode; 3088 3089 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3090 // [A+B + A*7] -> [B+A*8]. 3091 TestAddrMode.Scale += Scale; 3092 TestAddrMode.ScaledReg = ScaleReg; 3093 3094 // If the new address isn't legal, bail out. 3095 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3096 return false; 3097 3098 // It was legal, so commit it. 3099 AddrMode = TestAddrMode; 3100 3101 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3102 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3103 // X*Scale + C*Scale to addr mode. 3104 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3105 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3106 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3107 TestAddrMode.ScaledReg = AddLHS; 3108 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3109 3110 // If this addressing mode is legal, commit it and remember that we folded 3111 // this instruction. 3112 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3113 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3114 AddrMode = TestAddrMode; 3115 return true; 3116 } 3117 } 3118 3119 // Otherwise, not (x+c)*scale, just return what we have. 3120 return true; 3121 } 3122 3123 /// This is a little filter, which returns true if an addressing computation 3124 /// involving I might be folded into a load/store accessing it. 3125 /// This doesn't need to be perfect, but needs to accept at least 3126 /// the set of instructions that MatchOperationAddr can. 3127 static bool MightBeFoldableInst(Instruction *I) { 3128 switch (I->getOpcode()) { 3129 case Instruction::BitCast: 3130 case Instruction::AddrSpaceCast: 3131 // Don't touch identity bitcasts. 3132 if (I->getType() == I->getOperand(0)->getType()) 3133 return false; 3134 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 3135 case Instruction::PtrToInt: 3136 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3137 return true; 3138 case Instruction::IntToPtr: 3139 // We know the input is intptr_t, so this is foldable. 3140 return true; 3141 case Instruction::Add: 3142 return true; 3143 case Instruction::Mul: 3144 case Instruction::Shl: 3145 // Can only handle X*C and X << C. 3146 return isa<ConstantInt>(I->getOperand(1)); 3147 case Instruction::GetElementPtr: 3148 return true; 3149 default: 3150 return false; 3151 } 3152 } 3153 3154 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 3155 /// \note \p Val is assumed to be the product of some type promotion. 3156 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3157 /// to be legal, as the non-promoted value would have had the same state. 3158 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3159 const DataLayout &DL, Value *Val) { 3160 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3161 if (!PromotedInst) 3162 return false; 3163 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3164 // If the ISDOpcode is undefined, it was undefined before the promotion. 3165 if (!ISDOpcode) 3166 return true; 3167 // Otherwise, check if the promoted instruction is legal or not. 3168 return TLI.isOperationLegalOrCustom( 3169 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3170 } 3171 3172 /// \brief Hepler class to perform type promotion. 3173 class TypePromotionHelper { 3174 /// \brief Utility function to check whether or not a sign or zero extension 3175 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3176 /// either using the operands of \p Inst or promoting \p Inst. 3177 /// The type of the extension is defined by \p IsSExt. 3178 /// In other words, check if: 3179 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3180 /// #1 Promotion applies: 3181 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3182 /// #2 Operand reuses: 3183 /// ext opnd1 to ConsideredExtType. 3184 /// \p PromotedInsts maps the instructions to their type before promotion. 3185 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3186 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3187 3188 /// \brief Utility function to determine if \p OpIdx should be promoted when 3189 /// promoting \p Inst. 3190 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3191 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3192 } 3193 3194 /// \brief Utility function to promote the operand of \p Ext when this 3195 /// operand is a promotable trunc or sext or zext. 3196 /// \p PromotedInsts maps the instructions to their type before promotion. 3197 /// \p CreatedInstsCost[out] contains the cost of all instructions 3198 /// created to promote the operand of Ext. 3199 /// Newly added extensions are inserted in \p Exts. 3200 /// Newly added truncates are inserted in \p Truncs. 3201 /// Should never be called directly. 3202 /// \return The promoted value which is used instead of Ext. 3203 static Value *promoteOperandForTruncAndAnyExt( 3204 Instruction *Ext, TypePromotionTransaction &TPT, 3205 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3206 SmallVectorImpl<Instruction *> *Exts, 3207 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3208 3209 /// \brief Utility function to promote the operand of \p Ext when this 3210 /// operand is promotable and is not a supported trunc or sext. 3211 /// \p PromotedInsts maps the instructions to their type before promotion. 3212 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3213 /// created to promote the operand of Ext. 3214 /// Newly added extensions are inserted in \p Exts. 3215 /// Newly added truncates are inserted in \p Truncs. 3216 /// Should never be called directly. 3217 /// \return The promoted value which is used instead of Ext. 3218 static Value *promoteOperandForOther(Instruction *Ext, 3219 TypePromotionTransaction &TPT, 3220 InstrToOrigTy &PromotedInsts, 3221 unsigned &CreatedInstsCost, 3222 SmallVectorImpl<Instruction *> *Exts, 3223 SmallVectorImpl<Instruction *> *Truncs, 3224 const TargetLowering &TLI, bool IsSExt); 3225 3226 /// \see promoteOperandForOther. 3227 static Value *signExtendOperandForOther( 3228 Instruction *Ext, TypePromotionTransaction &TPT, 3229 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3230 SmallVectorImpl<Instruction *> *Exts, 3231 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3232 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3233 Exts, Truncs, TLI, true); 3234 } 3235 3236 /// \see promoteOperandForOther. 3237 static Value *zeroExtendOperandForOther( 3238 Instruction *Ext, TypePromotionTransaction &TPT, 3239 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3240 SmallVectorImpl<Instruction *> *Exts, 3241 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3242 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3243 Exts, Truncs, TLI, false); 3244 } 3245 3246 public: 3247 /// Type for the utility function that promotes the operand of Ext. 3248 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 3249 InstrToOrigTy &PromotedInsts, 3250 unsigned &CreatedInstsCost, 3251 SmallVectorImpl<Instruction *> *Exts, 3252 SmallVectorImpl<Instruction *> *Truncs, 3253 const TargetLowering &TLI); 3254 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 3255 /// action to promote the operand of \p Ext instead of using Ext. 3256 /// \return NULL if no promotable action is possible with the current 3257 /// sign extension. 3258 /// \p InsertedInsts keeps track of all the instructions inserted by the 3259 /// other CodeGenPrepare optimizations. This information is important 3260 /// because we do not want to promote these instructions as CodeGenPrepare 3261 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3262 /// \p PromotedInsts maps the instructions to their type before promotion. 3263 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3264 const TargetLowering &TLI, 3265 const InstrToOrigTy &PromotedInsts); 3266 }; 3267 3268 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3269 Type *ConsideredExtType, 3270 const InstrToOrigTy &PromotedInsts, 3271 bool IsSExt) { 3272 // The promotion helper does not know how to deal with vector types yet. 3273 // To be able to fix that, we would need to fix the places where we 3274 // statically extend, e.g., constants and such. 3275 if (Inst->getType()->isVectorTy()) 3276 return false; 3277 3278 // We can always get through zext. 3279 if (isa<ZExtInst>(Inst)) 3280 return true; 3281 3282 // sext(sext) is ok too. 3283 if (IsSExt && isa<SExtInst>(Inst)) 3284 return true; 3285 3286 // We can get through binary operator, if it is legal. In other words, the 3287 // binary operator must have a nuw or nsw flag. 3288 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3289 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3290 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3291 (IsSExt && BinOp->hasNoSignedWrap()))) 3292 return true; 3293 3294 // Check if we can do the following simplification. 3295 // ext(trunc(opnd)) --> ext(opnd) 3296 if (!isa<TruncInst>(Inst)) 3297 return false; 3298 3299 Value *OpndVal = Inst->getOperand(0); 3300 // Check if we can use this operand in the extension. 3301 // If the type is larger than the result type of the extension, we cannot. 3302 if (!OpndVal->getType()->isIntegerTy() || 3303 OpndVal->getType()->getIntegerBitWidth() > 3304 ConsideredExtType->getIntegerBitWidth()) 3305 return false; 3306 3307 // If the operand of the truncate is not an instruction, we will not have 3308 // any information on the dropped bits. 3309 // (Actually we could for constant but it is not worth the extra logic). 3310 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3311 if (!Opnd) 3312 return false; 3313 3314 // Check if the source of the type is narrow enough. 3315 // I.e., check that trunc just drops extended bits of the same kind of 3316 // the extension. 3317 // #1 get the type of the operand and check the kind of the extended bits. 3318 const Type *OpndType; 3319 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3320 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 3321 OpndType = It->second.getPointer(); 3322 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3323 OpndType = Opnd->getOperand(0)->getType(); 3324 else 3325 return false; 3326 3327 // #2 check that the truncate just drops extended bits. 3328 return Inst->getType()->getIntegerBitWidth() >= 3329 OpndType->getIntegerBitWidth(); 3330 } 3331 3332 TypePromotionHelper::Action TypePromotionHelper::getAction( 3333 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3334 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3335 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3336 "Unexpected instruction type"); 3337 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3338 Type *ExtTy = Ext->getType(); 3339 bool IsSExt = isa<SExtInst>(Ext); 3340 // If the operand of the extension is not an instruction, we cannot 3341 // get through. 3342 // If it, check we can get through. 3343 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3344 return nullptr; 3345 3346 // Do not promote if the operand has been added by codegenprepare. 3347 // Otherwise, it means we are undoing an optimization that is likely to be 3348 // redone, thus causing potential infinite loop. 3349 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3350 return nullptr; 3351 3352 // SExt or Trunc instructions. 3353 // Return the related handler. 3354 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3355 isa<ZExtInst>(ExtOpnd)) 3356 return promoteOperandForTruncAndAnyExt; 3357 3358 // Regular instruction. 3359 // Abort early if we will have to insert non-free instructions. 3360 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3361 return nullptr; 3362 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3363 } 3364 3365 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3366 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 3367 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3368 SmallVectorImpl<Instruction *> *Exts, 3369 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3370 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3371 // get through it and this method should not be called. 3372 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3373 Value *ExtVal = SExt; 3374 bool HasMergedNonFreeExt = false; 3375 if (isa<ZExtInst>(SExtOpnd)) { 3376 // Replace s|zext(zext(opnd)) 3377 // => zext(opnd). 3378 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3379 Value *ZExt = 3380 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3381 TPT.replaceAllUsesWith(SExt, ZExt); 3382 TPT.eraseInstruction(SExt); 3383 ExtVal = ZExt; 3384 } else { 3385 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3386 // => z|sext(opnd). 3387 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3388 } 3389 CreatedInstsCost = 0; 3390 3391 // Remove dead code. 3392 if (SExtOpnd->use_empty()) 3393 TPT.eraseInstruction(SExtOpnd); 3394 3395 // Check if the extension is still needed. 3396 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3397 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3398 if (ExtInst) { 3399 if (Exts) 3400 Exts->push_back(ExtInst); 3401 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3402 } 3403 return ExtVal; 3404 } 3405 3406 // At this point we have: ext ty opnd to ty. 3407 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3408 Value *NextVal = ExtInst->getOperand(0); 3409 TPT.eraseInstruction(ExtInst, NextVal); 3410 return NextVal; 3411 } 3412 3413 Value *TypePromotionHelper::promoteOperandForOther( 3414 Instruction *Ext, TypePromotionTransaction &TPT, 3415 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3416 SmallVectorImpl<Instruction *> *Exts, 3417 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3418 bool IsSExt) { 3419 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3420 // get through it and this method should not be called. 3421 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3422 CreatedInstsCost = 0; 3423 if (!ExtOpnd->hasOneUse()) { 3424 // ExtOpnd will be promoted. 3425 // All its uses, but Ext, will need to use a truncated value of the 3426 // promoted version. 3427 // Create the truncate now. 3428 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3429 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3430 ITrunc->removeFromParent(); 3431 // Insert it just after the definition. 3432 ITrunc->insertAfter(ExtOpnd); 3433 if (Truncs) 3434 Truncs->push_back(ITrunc); 3435 } 3436 3437 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3438 // Restore the operand of Ext (which has been replaced by the previous call 3439 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3440 TPT.setOperand(Ext, 0, ExtOpnd); 3441 } 3442 3443 // Get through the Instruction: 3444 // 1. Update its type. 3445 // 2. Replace the uses of Ext by Inst. 3446 // 3. Extend each operand that needs to be extended. 3447 3448 // Remember the original type of the instruction before promotion. 3449 // This is useful to know that the high bits are sign extended bits. 3450 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3451 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3452 // Step #1. 3453 TPT.mutateType(ExtOpnd, Ext->getType()); 3454 // Step #2. 3455 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3456 // Step #3. 3457 Instruction *ExtForOpnd = Ext; 3458 3459 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3460 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3461 ++OpIdx) { 3462 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3463 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3464 !shouldExtOperand(ExtOpnd, OpIdx)) { 3465 DEBUG(dbgs() << "No need to propagate\n"); 3466 continue; 3467 } 3468 // Check if we can statically extend the operand. 3469 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3470 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3471 DEBUG(dbgs() << "Statically extend\n"); 3472 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3473 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3474 : Cst->getValue().zext(BitWidth); 3475 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3476 continue; 3477 } 3478 // UndefValue are typed, so we have to statically sign extend them. 3479 if (isa<UndefValue>(Opnd)) { 3480 DEBUG(dbgs() << "Statically extend\n"); 3481 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3482 continue; 3483 } 3484 3485 // Otherwise we have to explicity sign extend the operand. 3486 // Check if Ext was reused to extend an operand. 3487 if (!ExtForOpnd) { 3488 // If yes, create a new one. 3489 DEBUG(dbgs() << "More operands to ext\n"); 3490 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3491 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3492 if (!isa<Instruction>(ValForExtOpnd)) { 3493 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3494 continue; 3495 } 3496 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3497 } 3498 if (Exts) 3499 Exts->push_back(ExtForOpnd); 3500 TPT.setOperand(ExtForOpnd, 0, Opnd); 3501 3502 // Move the sign extension before the insertion point. 3503 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3504 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3505 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3506 // If more sext are required, new instructions will have to be created. 3507 ExtForOpnd = nullptr; 3508 } 3509 if (ExtForOpnd == Ext) { 3510 DEBUG(dbgs() << "Extension is useless now\n"); 3511 TPT.eraseInstruction(Ext); 3512 } 3513 return ExtOpnd; 3514 } 3515 3516 /// Check whether or not promoting an instruction to a wider type is profitable. 3517 /// \p NewCost gives the cost of extension instructions created by the 3518 /// promotion. 3519 /// \p OldCost gives the cost of extension instructions before the promotion 3520 /// plus the number of instructions that have been 3521 /// matched in the addressing mode the promotion. 3522 /// \p PromotedOperand is the value that has been promoted. 3523 /// \return True if the promotion is profitable, false otherwise. 3524 bool AddressingModeMatcher::isPromotionProfitable( 3525 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3526 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3527 // The cost of the new extensions is greater than the cost of the 3528 // old extension plus what we folded. 3529 // This is not profitable. 3530 if (NewCost > OldCost) 3531 return false; 3532 if (NewCost < OldCost) 3533 return true; 3534 // The promotion is neutral but it may help folding the sign extension in 3535 // loads for instance. 3536 // Check that we did not create an illegal instruction. 3537 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3538 } 3539 3540 /// Given an instruction or constant expr, see if we can fold the operation 3541 /// into the addressing mode. If so, update the addressing mode and return 3542 /// true, otherwise return false without modifying AddrMode. 3543 /// If \p MovedAway is not NULL, it contains the information of whether or 3544 /// not AddrInst has to be folded into the addressing mode on success. 3545 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3546 /// because it has been moved away. 3547 /// Thus AddrInst must not be added in the matched instructions. 3548 /// This state can happen when AddrInst is a sext, since it may be moved away. 3549 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3550 /// not be referenced anymore. 3551 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3552 unsigned Depth, 3553 bool *MovedAway) { 3554 // Avoid exponential behavior on extremely deep expression trees. 3555 if (Depth >= 5) return false; 3556 3557 // By default, all matched instructions stay in place. 3558 if (MovedAway) 3559 *MovedAway = false; 3560 3561 switch (Opcode) { 3562 case Instruction::PtrToInt: 3563 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3564 return matchAddr(AddrInst->getOperand(0), Depth); 3565 case Instruction::IntToPtr: { 3566 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3567 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3568 // This inttoptr is a no-op if the integer type is pointer sized. 3569 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3570 return matchAddr(AddrInst->getOperand(0), Depth); 3571 return false; 3572 } 3573 case Instruction::BitCast: 3574 // BitCast is always a noop, and we can handle it as long as it is 3575 // int->int or pointer->pointer (we don't want int<->fp or something). 3576 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3577 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3578 // Don't touch identity bitcasts. These were probably put here by LSR, 3579 // and we don't want to mess around with them. Assume it knows what it 3580 // is doing. 3581 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3582 return matchAddr(AddrInst->getOperand(0), Depth); 3583 return false; 3584 case Instruction::AddrSpaceCast: { 3585 unsigned SrcAS 3586 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3587 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3588 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3589 return matchAddr(AddrInst->getOperand(0), Depth); 3590 return false; 3591 } 3592 case Instruction::Add: { 3593 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3594 ExtAddrMode BackupAddrMode = AddrMode; 3595 unsigned OldSize = AddrModeInsts.size(); 3596 // Start a transaction at this point. 3597 // The LHS may match but not the RHS. 3598 // Therefore, we need a higher level restoration point to undo partially 3599 // matched operation. 3600 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3601 TPT.getRestorationPoint(); 3602 3603 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 3604 matchAddr(AddrInst->getOperand(0), Depth+1)) 3605 return true; 3606 3607 // Restore the old addr mode info. 3608 AddrMode = BackupAddrMode; 3609 AddrModeInsts.resize(OldSize); 3610 TPT.rollback(LastKnownGood); 3611 3612 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 3613 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 3614 matchAddr(AddrInst->getOperand(1), Depth+1)) 3615 return true; 3616 3617 // Otherwise we definitely can't merge the ADD in. 3618 AddrMode = BackupAddrMode; 3619 AddrModeInsts.resize(OldSize); 3620 TPT.rollback(LastKnownGood); 3621 break; 3622 } 3623 //case Instruction::Or: 3624 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 3625 //break; 3626 case Instruction::Mul: 3627 case Instruction::Shl: { 3628 // Can only handle X*C and X << C. 3629 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 3630 if (!RHS) 3631 return false; 3632 int64_t Scale = RHS->getSExtValue(); 3633 if (Opcode == Instruction::Shl) 3634 Scale = 1LL << Scale; 3635 3636 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 3637 } 3638 case Instruction::GetElementPtr: { 3639 // Scan the GEP. We check it if it contains constant offsets and at most 3640 // one variable offset. 3641 int VariableOperand = -1; 3642 unsigned VariableScale = 0; 3643 3644 int64_t ConstantOffset = 0; 3645 gep_type_iterator GTI = gep_type_begin(AddrInst); 3646 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 3647 if (StructType *STy = GTI.getStructTypeOrNull()) { 3648 const StructLayout *SL = DL.getStructLayout(STy); 3649 unsigned Idx = 3650 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 3651 ConstantOffset += SL->getElementOffset(Idx); 3652 } else { 3653 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 3654 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 3655 ConstantOffset += CI->getSExtValue()*TypeSize; 3656 } else if (TypeSize) { // Scales of zero don't do anything. 3657 // We only allow one variable index at the moment. 3658 if (VariableOperand != -1) 3659 return false; 3660 3661 // Remember the variable index. 3662 VariableOperand = i; 3663 VariableScale = TypeSize; 3664 } 3665 } 3666 } 3667 3668 // A common case is for the GEP to only do a constant offset. In this case, 3669 // just add it to the disp field and check validity. 3670 if (VariableOperand == -1) { 3671 AddrMode.BaseOffs += ConstantOffset; 3672 if (ConstantOffset == 0 || 3673 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 3674 // Check to see if we can fold the base pointer in too. 3675 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 3676 return true; 3677 } 3678 AddrMode.BaseOffs -= ConstantOffset; 3679 return false; 3680 } 3681 3682 // Save the valid addressing mode in case we can't match. 3683 ExtAddrMode BackupAddrMode = AddrMode; 3684 unsigned OldSize = AddrModeInsts.size(); 3685 3686 // See if the scale and offset amount is valid for this target. 3687 AddrMode.BaseOffs += ConstantOffset; 3688 3689 // Match the base operand of the GEP. 3690 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 3691 // If it couldn't be matched, just stuff the value in a register. 3692 if (AddrMode.HasBaseReg) { 3693 AddrMode = BackupAddrMode; 3694 AddrModeInsts.resize(OldSize); 3695 return false; 3696 } 3697 AddrMode.HasBaseReg = true; 3698 AddrMode.BaseReg = AddrInst->getOperand(0); 3699 } 3700 3701 // Match the remaining variable portion of the GEP. 3702 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 3703 Depth)) { 3704 // If it couldn't be matched, try stuffing the base into a register 3705 // instead of matching it, and retrying the match of the scale. 3706 AddrMode = BackupAddrMode; 3707 AddrModeInsts.resize(OldSize); 3708 if (AddrMode.HasBaseReg) 3709 return false; 3710 AddrMode.HasBaseReg = true; 3711 AddrMode.BaseReg = AddrInst->getOperand(0); 3712 AddrMode.BaseOffs += ConstantOffset; 3713 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 3714 VariableScale, Depth)) { 3715 // If even that didn't work, bail. 3716 AddrMode = BackupAddrMode; 3717 AddrModeInsts.resize(OldSize); 3718 return false; 3719 } 3720 } 3721 3722 return true; 3723 } 3724 case Instruction::SExt: 3725 case Instruction::ZExt: { 3726 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 3727 if (!Ext) 3728 return false; 3729 3730 // Try to move this ext out of the way of the addressing mode. 3731 // Ask for a method for doing so. 3732 TypePromotionHelper::Action TPH = 3733 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 3734 if (!TPH) 3735 return false; 3736 3737 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3738 TPT.getRestorationPoint(); 3739 unsigned CreatedInstsCost = 0; 3740 unsigned ExtCost = !TLI.isExtFree(Ext); 3741 Value *PromotedOperand = 3742 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 3743 // SExt has been moved away. 3744 // Thus either it will be rematched later in the recursive calls or it is 3745 // gone. Anyway, we must not fold it into the addressing mode at this point. 3746 // E.g., 3747 // op = add opnd, 1 3748 // idx = ext op 3749 // addr = gep base, idx 3750 // is now: 3751 // promotedOpnd = ext opnd <- no match here 3752 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 3753 // addr = gep base, op <- match 3754 if (MovedAway) 3755 *MovedAway = true; 3756 3757 assert(PromotedOperand && 3758 "TypePromotionHelper should have filtered out those cases"); 3759 3760 ExtAddrMode BackupAddrMode = AddrMode; 3761 unsigned OldSize = AddrModeInsts.size(); 3762 3763 if (!matchAddr(PromotedOperand, Depth) || 3764 // The total of the new cost is equal to the cost of the created 3765 // instructions. 3766 // The total of the old cost is equal to the cost of the extension plus 3767 // what we have saved in the addressing mode. 3768 !isPromotionProfitable(CreatedInstsCost, 3769 ExtCost + (AddrModeInsts.size() - OldSize), 3770 PromotedOperand)) { 3771 AddrMode = BackupAddrMode; 3772 AddrModeInsts.resize(OldSize); 3773 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 3774 TPT.rollback(LastKnownGood); 3775 return false; 3776 } 3777 return true; 3778 } 3779 } 3780 return false; 3781 } 3782 3783 /// If we can, try to add the value of 'Addr' into the current addressing mode. 3784 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 3785 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 3786 /// for the target. 3787 /// 3788 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 3789 // Start a transaction at this point that we will rollback if the matching 3790 // fails. 3791 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3792 TPT.getRestorationPoint(); 3793 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 3794 // Fold in immediates if legal for the target. 3795 AddrMode.BaseOffs += CI->getSExtValue(); 3796 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3797 return true; 3798 AddrMode.BaseOffs -= CI->getSExtValue(); 3799 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 3800 // If this is a global variable, try to fold it into the addressing mode. 3801 if (!AddrMode.BaseGV) { 3802 AddrMode.BaseGV = GV; 3803 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3804 return true; 3805 AddrMode.BaseGV = nullptr; 3806 } 3807 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 3808 ExtAddrMode BackupAddrMode = AddrMode; 3809 unsigned OldSize = AddrModeInsts.size(); 3810 3811 // Check to see if it is possible to fold this operation. 3812 bool MovedAway = false; 3813 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 3814 // This instruction may have been moved away. If so, there is nothing 3815 // to check here. 3816 if (MovedAway) 3817 return true; 3818 // Okay, it's possible to fold this. Check to see if it is actually 3819 // *profitable* to do so. We use a simple cost model to avoid increasing 3820 // register pressure too much. 3821 if (I->hasOneUse() || 3822 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 3823 AddrModeInsts.push_back(I); 3824 return true; 3825 } 3826 3827 // It isn't profitable to do this, roll back. 3828 //cerr << "NOT FOLDING: " << *I; 3829 AddrMode = BackupAddrMode; 3830 AddrModeInsts.resize(OldSize); 3831 TPT.rollback(LastKnownGood); 3832 } 3833 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 3834 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 3835 return true; 3836 TPT.rollback(LastKnownGood); 3837 } else if (isa<ConstantPointerNull>(Addr)) { 3838 // Null pointer gets folded without affecting the addressing mode. 3839 return true; 3840 } 3841 3842 // Worse case, the target should support [reg] addressing modes. :) 3843 if (!AddrMode.HasBaseReg) { 3844 AddrMode.HasBaseReg = true; 3845 AddrMode.BaseReg = Addr; 3846 // Still check for legality in case the target supports [imm] but not [i+r]. 3847 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3848 return true; 3849 AddrMode.HasBaseReg = false; 3850 AddrMode.BaseReg = nullptr; 3851 } 3852 3853 // If the base register is already taken, see if we can do [r+r]. 3854 if (AddrMode.Scale == 0) { 3855 AddrMode.Scale = 1; 3856 AddrMode.ScaledReg = Addr; 3857 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3858 return true; 3859 AddrMode.Scale = 0; 3860 AddrMode.ScaledReg = nullptr; 3861 } 3862 // Couldn't match. 3863 TPT.rollback(LastKnownGood); 3864 return false; 3865 } 3866 3867 /// Check to see if all uses of OpVal by the specified inline asm call are due 3868 /// to memory operands. If so, return true, otherwise return false. 3869 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 3870 const TargetLowering &TLI, 3871 const TargetRegisterInfo &TRI) { 3872 const Function *F = CI->getParent()->getParent(); 3873 TargetLowering::AsmOperandInfoVector TargetConstraints = 3874 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 3875 ImmutableCallSite(CI)); 3876 3877 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3878 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3879 3880 // Compute the constraint code and ConstraintType to use. 3881 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 3882 3883 // If this asm operand is our Value*, and if it isn't an indirect memory 3884 // operand, we can't fold it! 3885 if (OpInfo.CallOperandVal == OpVal && 3886 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3887 !OpInfo.isIndirect)) 3888 return false; 3889 } 3890 3891 return true; 3892 } 3893 3894 /// Recursively walk all the uses of I until we find a memory use. 3895 /// If we find an obviously non-foldable instruction, return true. 3896 /// Add the ultimately found memory instructions to MemoryUses. 3897 static bool FindAllMemoryUses( 3898 Instruction *I, 3899 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3900 SmallPtrSetImpl<Instruction *> &ConsideredInsts, 3901 const TargetLowering &TLI, const TargetRegisterInfo &TRI) { 3902 // If we already considered this instruction, we're done. 3903 if (!ConsideredInsts.insert(I).second) 3904 return false; 3905 3906 // If this is an obviously unfoldable instruction, bail out. 3907 if (!MightBeFoldableInst(I)) 3908 return true; 3909 3910 const bool OptSize = I->getFunction()->optForSize(); 3911 3912 // Loop over all the uses, recursively processing them. 3913 for (Use &U : I->uses()) { 3914 Instruction *UserI = cast<Instruction>(U.getUser()); 3915 3916 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3917 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3918 continue; 3919 } 3920 3921 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3922 unsigned opNo = U.getOperandNo(); 3923 if (opNo != StoreInst::getPointerOperandIndex()) 3924 return true; // Storing addr, not into addr. 3925 MemoryUses.push_back(std::make_pair(SI, opNo)); 3926 continue; 3927 } 3928 3929 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 3930 unsigned opNo = U.getOperandNo(); 3931 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 3932 return true; // Storing addr, not into addr. 3933 MemoryUses.push_back(std::make_pair(RMW, opNo)); 3934 continue; 3935 } 3936 3937 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 3938 unsigned opNo = U.getOperandNo(); 3939 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 3940 return true; // Storing addr, not into addr. 3941 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 3942 continue; 3943 } 3944 3945 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3946 // If this is a cold call, we can sink the addressing calculation into 3947 // the cold path. See optimizeCallInst 3948 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 3949 continue; 3950 3951 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3952 if (!IA) return true; 3953 3954 // If this is a memory operand, we're cool, otherwise bail out. 3955 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 3956 return true; 3957 continue; 3958 } 3959 3960 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI)) 3961 return true; 3962 } 3963 3964 return false; 3965 } 3966 3967 /// Return true if Val is already known to be live at the use site that we're 3968 /// folding it into. If so, there is no cost to include it in the addressing 3969 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 3970 /// instruction already. 3971 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3972 Value *KnownLive2) { 3973 // If Val is either of the known-live values, we know it is live! 3974 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3975 return true; 3976 3977 // All values other than instructions and arguments (e.g. constants) are live. 3978 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3979 3980 // If Val is a constant sized alloca in the entry block, it is live, this is 3981 // true because it is just a reference to the stack/frame pointer, which is 3982 // live for the whole function. 3983 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3984 if (AI->isStaticAlloca()) 3985 return true; 3986 3987 // Check to see if this value is already used in the memory instruction's 3988 // block. If so, it's already live into the block at the very least, so we 3989 // can reasonably fold it. 3990 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3991 } 3992 3993 /// It is possible for the addressing mode of the machine to fold the specified 3994 /// instruction into a load or store that ultimately uses it. 3995 /// However, the specified instruction has multiple uses. 3996 /// Given this, it may actually increase register pressure to fold it 3997 /// into the load. For example, consider this code: 3998 /// 3999 /// X = ... 4000 /// Y = X+1 4001 /// use(Y) -> nonload/store 4002 /// Z = Y+1 4003 /// load Z 4004 /// 4005 /// In this case, Y has multiple uses, and can be folded into the load of Z 4006 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4007 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4008 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4009 /// number of computations either. 4010 /// 4011 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4012 /// X was live across 'load Z' for other reasons, we actually *would* want to 4013 /// fold the addressing mode in the Z case. This would make Y die earlier. 4014 bool AddressingModeMatcher:: 4015 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4016 ExtAddrMode &AMAfter) { 4017 if (IgnoreProfitability) return true; 4018 4019 // AMBefore is the addressing mode before this instruction was folded into it, 4020 // and AMAfter is the addressing mode after the instruction was folded. Get 4021 // the set of registers referenced by AMAfter and subtract out those 4022 // referenced by AMBefore: this is the set of values which folding in this 4023 // address extends the lifetime of. 4024 // 4025 // Note that there are only two potential values being referenced here, 4026 // BaseReg and ScaleReg (global addresses are always available, as are any 4027 // folded immediates). 4028 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4029 4030 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4031 // lifetime wasn't extended by adding this instruction. 4032 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4033 BaseReg = nullptr; 4034 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4035 ScaledReg = nullptr; 4036 4037 // If folding this instruction (and it's subexprs) didn't extend any live 4038 // ranges, we're ok with it. 4039 if (!BaseReg && !ScaledReg) 4040 return true; 4041 4042 // If all uses of this instruction can have the address mode sunk into them, 4043 // we can remove the addressing mode and effectively trade one live register 4044 // for another (at worst.) In this context, folding an addressing mode into 4045 // the use is just a particularly nice way of sinking it. 4046 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4047 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4048 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4049 return false; // Has a non-memory, non-foldable use! 4050 4051 // Now that we know that all uses of this instruction are part of a chain of 4052 // computation involving only operations that could theoretically be folded 4053 // into a memory use, loop over each of these memory operation uses and see 4054 // if they could *actually* fold the instruction. The assumption is that 4055 // addressing modes are cheap and that duplicating the computation involved 4056 // many times is worthwhile, even on a fastpath. For sinking candidates 4057 // (i.e. cold call sites), this serves as a way to prevent excessive code 4058 // growth since most architectures have some reasonable small and fast way to 4059 // compute an effective address. (i.e LEA on x86) 4060 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4061 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4062 Instruction *User = MemoryUses[i].first; 4063 unsigned OpNo = MemoryUses[i].second; 4064 4065 // Get the access type of this use. If the use isn't a pointer, we don't 4066 // know what it accesses. 4067 Value *Address = User->getOperand(OpNo); 4068 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4069 if (!AddrTy) 4070 return false; 4071 Type *AddressAccessTy = AddrTy->getElementType(); 4072 unsigned AS = AddrTy->getAddressSpace(); 4073 4074 // Do a match against the root of this address, ignoring profitability. This 4075 // will tell us if the addressing mode for the memory operation will 4076 // *actually* cover the shared instruction. 4077 ExtAddrMode Result; 4078 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4079 TPT.getRestorationPoint(); 4080 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, 4081 AddressAccessTy, AS, 4082 MemoryInst, Result, InsertedInsts, 4083 PromotedInsts, TPT); 4084 Matcher.IgnoreProfitability = true; 4085 bool Success = Matcher.matchAddr(Address, 0); 4086 (void)Success; assert(Success && "Couldn't select *anything*?"); 4087 4088 // The match was to check the profitability, the changes made are not 4089 // part of the original matcher. Therefore, they should be dropped 4090 // otherwise the original matcher will not present the right state. 4091 TPT.rollback(LastKnownGood); 4092 4093 // If the match didn't cover I, then it won't be shared by it. 4094 if (!is_contained(MatchedAddrModeInsts, I)) 4095 return false; 4096 4097 MatchedAddrModeInsts.clear(); 4098 } 4099 4100 return true; 4101 } 4102 4103 } // end anonymous namespace 4104 4105 /// Return true if the specified values are defined in a 4106 /// different basic block than BB. 4107 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4108 if (Instruction *I = dyn_cast<Instruction>(V)) 4109 return I->getParent() != BB; 4110 return false; 4111 } 4112 4113 /// Sink addressing mode computation immediate before MemoryInst if doing so 4114 /// can be done without increasing register pressure. The need for the 4115 /// register pressure constraint means this can end up being an all or nothing 4116 /// decision for all uses of the same addressing computation. 4117 /// 4118 /// Load and Store Instructions often have addressing modes that can do 4119 /// significant amounts of computation. As such, instruction selection will try 4120 /// to get the load or store to do as much computation as possible for the 4121 /// program. The problem is that isel can only see within a single block. As 4122 /// such, we sink as much legal addressing mode work into the block as possible. 4123 /// 4124 /// This method is used to optimize both load/store and inline asms with memory 4125 /// operands. It's also used to sink addressing computations feeding into cold 4126 /// call sites into their (cold) basic block. 4127 /// 4128 /// The motivation for handling sinking into cold blocks is that doing so can 4129 /// both enable other address mode sinking (by satisfying the register pressure 4130 /// constraint above), and reduce register pressure globally (by removing the 4131 /// addressing mode computation from the fast path entirely.). 4132 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4133 Type *AccessTy, unsigned AddrSpace) { 4134 Value *Repl = Addr; 4135 4136 // Try to collapse single-value PHI nodes. This is necessary to undo 4137 // unprofitable PRE transformations. 4138 SmallVector<Value*, 8> worklist; 4139 SmallPtrSet<Value*, 16> Visited; 4140 worklist.push_back(Addr); 4141 4142 // Use a worklist to iteratively look through PHI nodes, and ensure that 4143 // the addressing mode obtained from the non-PHI roots of the graph 4144 // are equivalent. 4145 Value *Consensus = nullptr; 4146 unsigned NumUsesConsensus = 0; 4147 bool IsNumUsesConsensusValid = false; 4148 SmallVector<Instruction*, 16> AddrModeInsts; 4149 ExtAddrMode AddrMode; 4150 TypePromotionTransaction TPT(RemovedInsts); 4151 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4152 TPT.getRestorationPoint(); 4153 while (!worklist.empty()) { 4154 Value *V = worklist.back(); 4155 worklist.pop_back(); 4156 4157 // Break use-def graph loops. 4158 if (!Visited.insert(V).second) { 4159 Consensus = nullptr; 4160 break; 4161 } 4162 4163 // For a PHI node, push all of its incoming values. 4164 if (PHINode *P = dyn_cast<PHINode>(V)) { 4165 for (Value *IncValue : P->incoming_values()) 4166 worklist.push_back(IncValue); 4167 continue; 4168 } 4169 4170 // For non-PHIs, determine the addressing mode being computed. Note that 4171 // the result may differ depending on what other uses our candidate 4172 // addressing instructions might have. 4173 SmallVector<Instruction*, 16> NewAddrModeInsts; 4174 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4175 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TLI, *TRI, 4176 InsertedInsts, PromotedInsts, TPT); 4177 4178 // This check is broken into two cases with very similar code to avoid using 4179 // getNumUses() as much as possible. Some values have a lot of uses, so 4180 // calling getNumUses() unconditionally caused a significant compile-time 4181 // regression. 4182 if (!Consensus) { 4183 Consensus = V; 4184 AddrMode = NewAddrMode; 4185 AddrModeInsts = NewAddrModeInsts; 4186 continue; 4187 } else if (NewAddrMode == AddrMode) { 4188 if (!IsNumUsesConsensusValid) { 4189 NumUsesConsensus = Consensus->getNumUses(); 4190 IsNumUsesConsensusValid = true; 4191 } 4192 4193 // Ensure that the obtained addressing mode is equivalent to that obtained 4194 // for all other roots of the PHI traversal. Also, when choosing one 4195 // such root as representative, select the one with the most uses in order 4196 // to keep the cost modeling heuristics in AddressingModeMatcher 4197 // applicable. 4198 unsigned NumUses = V->getNumUses(); 4199 if (NumUses > NumUsesConsensus) { 4200 Consensus = V; 4201 NumUsesConsensus = NumUses; 4202 AddrModeInsts = NewAddrModeInsts; 4203 } 4204 continue; 4205 } 4206 4207 Consensus = nullptr; 4208 break; 4209 } 4210 4211 // If the addressing mode couldn't be determined, or if multiple different 4212 // ones were determined, bail out now. 4213 if (!Consensus) { 4214 TPT.rollback(LastKnownGood); 4215 return false; 4216 } 4217 TPT.commit(); 4218 4219 // If all the instructions matched are already in this BB, don't do anything. 4220 if (none_of(AddrModeInsts, [&](Value *V) { 4221 return IsNonLocalValue(V, MemoryInst->getParent()); 4222 })) { 4223 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 4224 return false; 4225 } 4226 4227 // Insert this computation right after this user. Since our caller is 4228 // scanning from the top of the BB to the bottom, reuse of the expr are 4229 // guaranteed to happen later. 4230 IRBuilder<> Builder(MemoryInst); 4231 4232 // Now that we determined the addressing expression we want to use and know 4233 // that we have to sink it into this block. Check to see if we have already 4234 // done this for some other load/store instr in this block. If so, reuse the 4235 // computation. 4236 Value *&SunkAddr = SunkAddrs[Addr]; 4237 if (SunkAddr) { 4238 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 4239 << *MemoryInst << "\n"); 4240 if (SunkAddr->getType() != Addr->getType()) 4241 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4242 } else if (AddrSinkUsingGEPs || 4243 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 4244 SubtargetInfo->useAA())) { 4245 // By default, we use the GEP-based method when AA is used later. This 4246 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4247 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4248 << *MemoryInst << "\n"); 4249 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4250 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4251 4252 // First, find the pointer. 4253 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4254 ResultPtr = AddrMode.BaseReg; 4255 AddrMode.BaseReg = nullptr; 4256 } 4257 4258 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4259 // We can't add more than one pointer together, nor can we scale a 4260 // pointer (both of which seem meaningless). 4261 if (ResultPtr || AddrMode.Scale != 1) 4262 return false; 4263 4264 ResultPtr = AddrMode.ScaledReg; 4265 AddrMode.Scale = 0; 4266 } 4267 4268 if (AddrMode.BaseGV) { 4269 if (ResultPtr) 4270 return false; 4271 4272 ResultPtr = AddrMode.BaseGV; 4273 } 4274 4275 // If the real base value actually came from an inttoptr, then the matcher 4276 // will look through it and provide only the integer value. In that case, 4277 // use it here. 4278 if (!ResultPtr && AddrMode.BaseReg) { 4279 ResultPtr = 4280 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 4281 AddrMode.BaseReg = nullptr; 4282 } else if (!ResultPtr && AddrMode.Scale == 1) { 4283 ResultPtr = 4284 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 4285 AddrMode.Scale = 0; 4286 } 4287 4288 if (!ResultPtr && 4289 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4290 SunkAddr = Constant::getNullValue(Addr->getType()); 4291 } else if (!ResultPtr) { 4292 return false; 4293 } else { 4294 Type *I8PtrTy = 4295 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4296 Type *I8Ty = Builder.getInt8Ty(); 4297 4298 // Start with the base register. Do this first so that subsequent address 4299 // matching finds it last, which will prevent it from trying to match it 4300 // as the scaled value in case it happens to be a mul. That would be 4301 // problematic if we've sunk a different mul for the scale, because then 4302 // we'd end up sinking both muls. 4303 if (AddrMode.BaseReg) { 4304 Value *V = AddrMode.BaseReg; 4305 if (V->getType() != IntPtrTy) 4306 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4307 4308 ResultIndex = V; 4309 } 4310 4311 // Add the scale value. 4312 if (AddrMode.Scale) { 4313 Value *V = AddrMode.ScaledReg; 4314 if (V->getType() == IntPtrTy) { 4315 // done. 4316 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4317 cast<IntegerType>(V->getType())->getBitWidth()) { 4318 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4319 } else { 4320 // It is only safe to sign extend the BaseReg if we know that the math 4321 // required to create it did not overflow before we extend it. Since 4322 // the original IR value was tossed in favor of a constant back when 4323 // the AddrMode was created we need to bail out gracefully if widths 4324 // do not match instead of extending it. 4325 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 4326 if (I && (ResultIndex != AddrMode.BaseReg)) 4327 I->eraseFromParent(); 4328 return false; 4329 } 4330 4331 if (AddrMode.Scale != 1) 4332 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4333 "sunkaddr"); 4334 if (ResultIndex) 4335 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4336 else 4337 ResultIndex = V; 4338 } 4339 4340 // Add in the Base Offset if present. 4341 if (AddrMode.BaseOffs) { 4342 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4343 if (ResultIndex) { 4344 // We need to add this separately from the scale above to help with 4345 // SDAG consecutive load/store merging. 4346 if (ResultPtr->getType() != I8PtrTy) 4347 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4348 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4349 } 4350 4351 ResultIndex = V; 4352 } 4353 4354 if (!ResultIndex) { 4355 SunkAddr = ResultPtr; 4356 } else { 4357 if (ResultPtr->getType() != I8PtrTy) 4358 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4359 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4360 } 4361 4362 if (SunkAddr->getType() != Addr->getType()) 4363 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4364 } 4365 } else { 4366 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4367 << *MemoryInst << "\n"); 4368 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4369 Value *Result = nullptr; 4370 4371 // Start with the base register. Do this first so that subsequent address 4372 // matching finds it last, which will prevent it from trying to match it 4373 // as the scaled value in case it happens to be a mul. That would be 4374 // problematic if we've sunk a different mul for the scale, because then 4375 // we'd end up sinking both muls. 4376 if (AddrMode.BaseReg) { 4377 Value *V = AddrMode.BaseReg; 4378 if (V->getType()->isPointerTy()) 4379 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4380 if (V->getType() != IntPtrTy) 4381 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4382 Result = V; 4383 } 4384 4385 // Add the scale value. 4386 if (AddrMode.Scale) { 4387 Value *V = AddrMode.ScaledReg; 4388 if (V->getType() == IntPtrTy) { 4389 // done. 4390 } else if (V->getType()->isPointerTy()) { 4391 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4392 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4393 cast<IntegerType>(V->getType())->getBitWidth()) { 4394 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4395 } else { 4396 // It is only safe to sign extend the BaseReg if we know that the math 4397 // required to create it did not overflow before we extend it. Since 4398 // the original IR value was tossed in favor of a constant back when 4399 // the AddrMode was created we need to bail out gracefully if widths 4400 // do not match instead of extending it. 4401 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4402 if (I && (Result != AddrMode.BaseReg)) 4403 I->eraseFromParent(); 4404 return false; 4405 } 4406 if (AddrMode.Scale != 1) 4407 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4408 "sunkaddr"); 4409 if (Result) 4410 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4411 else 4412 Result = V; 4413 } 4414 4415 // Add in the BaseGV if present. 4416 if (AddrMode.BaseGV) { 4417 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4418 if (Result) 4419 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4420 else 4421 Result = V; 4422 } 4423 4424 // Add in the Base Offset if present. 4425 if (AddrMode.BaseOffs) { 4426 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4427 if (Result) 4428 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4429 else 4430 Result = V; 4431 } 4432 4433 if (!Result) 4434 SunkAddr = Constant::getNullValue(Addr->getType()); 4435 else 4436 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4437 } 4438 4439 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4440 4441 // If we have no uses, recursively delete the value and all dead instructions 4442 // using it. 4443 if (Repl->use_empty()) { 4444 // This can cause recursive deletion, which can invalidate our iterator. 4445 // Use a WeakVH to hold onto it in case this happens. 4446 Value *CurValue = &*CurInstIterator; 4447 WeakVH IterHandle(CurValue); 4448 BasicBlock *BB = CurInstIterator->getParent(); 4449 4450 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4451 4452 if (IterHandle != CurValue) { 4453 // If the iterator instruction was recursively deleted, start over at the 4454 // start of the block. 4455 CurInstIterator = BB->begin(); 4456 SunkAddrs.clear(); 4457 } 4458 } 4459 ++NumMemoryInsts; 4460 return true; 4461 } 4462 4463 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4464 /// address computing into the block when possible / profitable. 4465 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4466 bool MadeChange = false; 4467 4468 const TargetRegisterInfo *TRI = 4469 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 4470 TargetLowering::AsmOperandInfoVector TargetConstraints = 4471 TLI->ParseConstraints(*DL, TRI, CS); 4472 unsigned ArgNo = 0; 4473 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4474 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4475 4476 // Compute the constraint code and ConstraintType to use. 4477 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4478 4479 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4480 OpInfo.isIndirect) { 4481 Value *OpVal = CS->getArgOperand(ArgNo++); 4482 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4483 } else if (OpInfo.Type == InlineAsm::isInput) 4484 ArgNo++; 4485 } 4486 4487 return MadeChange; 4488 } 4489 4490 /// \brief Check if all the uses of \p Val are equivalent (or free) zero or 4491 /// sign extensions. 4492 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4493 assert(!Val->use_empty() && "Input must have at least one use"); 4494 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4495 bool IsSExt = isa<SExtInst>(FirstUser); 4496 Type *ExtTy = FirstUser->getType(); 4497 for (const User *U : Val->users()) { 4498 const Instruction *UI = cast<Instruction>(U); 4499 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4500 return false; 4501 Type *CurTy = UI->getType(); 4502 // Same input and output types: Same instruction after CSE. 4503 if (CurTy == ExtTy) 4504 continue; 4505 4506 // If IsSExt is true, we are in this situation: 4507 // a = Val 4508 // b = sext ty1 a to ty2 4509 // c = sext ty1 a to ty3 4510 // Assuming ty2 is shorter than ty3, this could be turned into: 4511 // a = Val 4512 // b = sext ty1 a to ty2 4513 // c = sext ty2 b to ty3 4514 // However, the last sext is not free. 4515 if (IsSExt) 4516 return false; 4517 4518 // This is a ZExt, maybe this is free to extend from one type to another. 4519 // In that case, we would not account for a different use. 4520 Type *NarrowTy; 4521 Type *LargeTy; 4522 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4523 CurTy->getScalarType()->getIntegerBitWidth()) { 4524 NarrowTy = CurTy; 4525 LargeTy = ExtTy; 4526 } else { 4527 NarrowTy = ExtTy; 4528 LargeTy = CurTy; 4529 } 4530 4531 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4532 return false; 4533 } 4534 // All uses are the same or can be derived from one another for free. 4535 return true; 4536 } 4537 4538 /// \brief Try to speculatively promote extensions in \p Exts and continue 4539 /// promoting through newly promoted operands recursively as far as doing so is 4540 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 4541 /// When some promotion happened, \p TPT contains the proper state to revert 4542 /// them. 4543 /// 4544 /// \return true if some promotion happened, false otherwise. 4545 bool CodeGenPrepare::tryToPromoteExts( 4546 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 4547 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 4548 unsigned CreatedInstsCost) { 4549 bool Promoted = false; 4550 4551 // Iterate over all the extensions to try to promote them. 4552 for (auto I : Exts) { 4553 // Early check if we directly have ext(load). 4554 if (isa<LoadInst>(I->getOperand(0))) { 4555 ProfitablyMovedExts.push_back(I); 4556 continue; 4557 } 4558 4559 // Check whether or not we want to do any promotion. The reason we have 4560 // this check inside the for loop is to catch the case where an extension 4561 // is directly fed by a load because in such case the extension can be moved 4562 // up without any promotion on its operands. 4563 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4564 return false; 4565 4566 // Get the action to perform the promotion. 4567 TypePromotionHelper::Action TPH = 4568 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 4569 // Check if we can promote. 4570 if (!TPH) { 4571 // Save the current extension as we cannot move up through its operand. 4572 ProfitablyMovedExts.push_back(I); 4573 continue; 4574 } 4575 4576 // Save the current state. 4577 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4578 TPT.getRestorationPoint(); 4579 SmallVector<Instruction *, 4> NewExts; 4580 unsigned NewCreatedInstsCost = 0; 4581 unsigned ExtCost = !TLI->isExtFree(I); 4582 // Promote. 4583 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 4584 &NewExts, nullptr, *TLI); 4585 assert(PromotedVal && 4586 "TypePromotionHelper should have filtered out those cases"); 4587 4588 // We would be able to merge only one extension in a load. 4589 // Therefore, if we have more than 1 new extension we heuristically 4590 // cut this search path, because it means we degrade the code quality. 4591 // With exactly 2, the transformation is neutral, because we will merge 4592 // one extension but leave one. However, we optimistically keep going, 4593 // because the new extension may be removed too. 4594 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 4595 // FIXME: It would be possible to propagate a negative value instead of 4596 // conservatively ceiling it to 0. 4597 TotalCreatedInstsCost = 4598 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 4599 if (!StressExtLdPromotion && 4600 (TotalCreatedInstsCost > 1 || 4601 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 4602 // This promotion is not profitable, rollback to the previous state, and 4603 // save the current extension in ProfitablyMovedExts as the latest 4604 // speculative promotion turned out to be unprofitable. 4605 TPT.rollback(LastKnownGood); 4606 ProfitablyMovedExts.push_back(I); 4607 continue; 4608 } 4609 // Continue promoting NewExts as far as doing so is profitable. 4610 SmallVector<Instruction *, 2> NewlyMovedExts; 4611 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 4612 bool NewPromoted = false; 4613 for (auto ExtInst : NewlyMovedExts) { 4614 Instruction *MovedExt = cast<Instruction>(ExtInst); 4615 Value *ExtOperand = MovedExt->getOperand(0); 4616 // If we have reached to a load, we need this extra profitability check 4617 // as it could potentially be merged into an ext(load). 4618 if (isa<LoadInst>(ExtOperand) && 4619 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 4620 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 4621 continue; 4622 4623 ProfitablyMovedExts.push_back(MovedExt); 4624 NewPromoted = true; 4625 } 4626 4627 // If none of speculative promotions for NewExts is profitable, rollback 4628 // and save the current extension (I) as the last profitable extension. 4629 if (!NewPromoted) { 4630 TPT.rollback(LastKnownGood); 4631 ProfitablyMovedExts.push_back(I); 4632 continue; 4633 } 4634 // The promotion is profitable. 4635 Promoted = true; 4636 } 4637 return Promoted; 4638 } 4639 4640 /// Merging redundant sexts when one is dominating the other. 4641 bool CodeGenPrepare::mergeSExts(Function &F) { 4642 DominatorTree DT(F); 4643 bool Changed = false; 4644 for (auto &Entry : ValToSExtendedUses) { 4645 SExts &Insts = Entry.second; 4646 SExts CurPts; 4647 for (Instruction *Inst : Insts) { 4648 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 4649 Inst->getOperand(0) != Entry.first) 4650 continue; 4651 bool inserted = false; 4652 for (auto &Pt : CurPts) { 4653 if (DT.dominates(Inst, Pt)) { 4654 Pt->replaceAllUsesWith(Inst); 4655 RemovedInsts.insert(Pt); 4656 Pt->removeFromParent(); 4657 Pt = Inst; 4658 inserted = true; 4659 Changed = true; 4660 break; 4661 } 4662 if (!DT.dominates(Pt, Inst)) 4663 // Give up if we need to merge in a common dominator as the 4664 // expermients show it is not profitable. 4665 continue; 4666 Inst->replaceAllUsesWith(Pt); 4667 RemovedInsts.insert(Inst); 4668 Inst->removeFromParent(); 4669 inserted = true; 4670 Changed = true; 4671 break; 4672 } 4673 if (!inserted) 4674 CurPts.push_back(Inst); 4675 } 4676 } 4677 return Changed; 4678 } 4679 4680 /// Return true, if an ext(load) can be formed from an extension in 4681 /// \p MovedExts. 4682 bool CodeGenPrepare::canFormExtLd( 4683 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 4684 Instruction *&Inst, bool HasPromoted) { 4685 for (auto *MovedExtInst : MovedExts) { 4686 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 4687 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 4688 Inst = MovedExtInst; 4689 break; 4690 } 4691 } 4692 if (!LI) 4693 return false; 4694 4695 // If they're already in the same block, there's nothing to do. 4696 // Make the cheap checks first if we did not promote. 4697 // If we promoted, we need to check if it is indeed profitable. 4698 if (!HasPromoted && LI->getParent() == Inst->getParent()) 4699 return false; 4700 4701 EVT VT = TLI->getValueType(*DL, Inst->getType()); 4702 EVT LoadVT = TLI->getValueType(*DL, LI->getType()); 4703 4704 // If the load has other users and the truncate is not free, this probably 4705 // isn't worthwhile. 4706 if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 4707 !TLI->isTruncateFree(Inst->getType(), LI->getType())) 4708 return false; 4709 4710 // Check whether the target supports casts folded into loads. 4711 unsigned LType; 4712 if (isa<ZExtInst>(Inst)) 4713 LType = ISD::ZEXTLOAD; 4714 else { 4715 assert(isa<SExtInst>(Inst) && "Unexpected ext type!"); 4716 LType = ISD::SEXTLOAD; 4717 } 4718 4719 return TLI->isLoadExtLegal(LType, VT, LoadVT); 4720 } 4721 4722 /// Move a zext or sext fed by a load into the same basic block as the load, 4723 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 4724 /// extend into the load. 4725 /// 4726 /// E.g., 4727 /// \code 4728 /// %ld = load i32* %addr 4729 /// %add = add nuw i32 %ld, 4 4730 /// %zext = zext i32 %add to i64 4731 // \endcode 4732 /// => 4733 /// \code 4734 /// %ld = load i32* %addr 4735 /// %zext = zext i32 %ld to i64 4736 /// %add = add nuw i64 %zext, 4 4737 /// \encode 4738 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 4739 /// allow us to match zext(load i32*) to i64. 4740 /// 4741 /// Also, try to promote the computations used to obtain a sign extended 4742 /// value used into memory accesses. 4743 /// E.g., 4744 /// \code 4745 /// a = add nsw i32 b, 3 4746 /// d = sext i32 a to i64 4747 /// e = getelementptr ..., i64 d 4748 /// \endcode 4749 /// => 4750 /// \code 4751 /// f = sext i32 b to i64 4752 /// a = add nsw i64 f, 3 4753 /// e = getelementptr ..., i64 a 4754 /// \endcode 4755 /// 4756 /// \p Inst[in/out] the extension may be modified during the process if some 4757 /// promotions apply. 4758 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 4759 // ExtLoad formation and address type promotion infrastructure requires TLI to 4760 // be effective. 4761 if (!TLI) 4762 return false; 4763 4764 bool AllowPromotionWithoutCommonHeader = false; 4765 /// See if it is an interesting sext operations for the address type 4766 /// promotion before trying to promote it, e.g., the ones with the right 4767 /// type and used in memory accesses. 4768 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 4769 *Inst, AllowPromotionWithoutCommonHeader); 4770 TypePromotionTransaction TPT(RemovedInsts); 4771 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4772 TPT.getRestorationPoint(); 4773 SmallVector<Instruction *, 1> Exts; 4774 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 4775 Exts.push_back(Inst); 4776 4777 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 4778 4779 // Look for a load being extended. 4780 LoadInst *LI = nullptr; 4781 Instruction *ExtFedByLoad; 4782 4783 // Try to promote a chain of computation if it allows to form an extended 4784 // load. 4785 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 4786 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 4787 TPT.commit(); 4788 // Move the extend into the same block as the load 4789 ExtFedByLoad->removeFromParent(); 4790 ExtFedByLoad->insertAfter(LI); 4791 // CGP does not check if the zext would be speculatively executed when moved 4792 // to the same basic block as the load. Preserving its original location 4793 // would pessimize the debugging experience, as well as negatively impact 4794 // the quality of sample pgo. We don't want to use "line 0" as that has a 4795 // size cost in the line-table section and logically the zext can be seen as 4796 // part of the load. Therefore we conservatively reuse the same debug 4797 // location for the load and the zext. 4798 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 4799 ++NumExtsMoved; 4800 Inst = ExtFedByLoad; 4801 return true; 4802 } 4803 4804 // Continue promoting SExts if known as considerable depending on targets. 4805 if (ATPConsiderable && 4806 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 4807 HasPromoted, TPT, SpeculativelyMovedExts)) 4808 return true; 4809 4810 TPT.rollback(LastKnownGood); 4811 return false; 4812 } 4813 4814 // Perform address type promotion if doing so is profitable. 4815 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 4816 // instructions that sign extended the same initial value. However, if 4817 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 4818 // extension is just profitable. 4819 bool CodeGenPrepare::performAddressTypePromotion( 4820 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 4821 bool HasPromoted, TypePromotionTransaction &TPT, 4822 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 4823 bool Promoted = false; 4824 SmallPtrSet<Instruction *, 1> UnhandledExts; 4825 bool AllSeenFirst = true; 4826 for (auto I : SpeculativelyMovedExts) { 4827 Value *HeadOfChain = I->getOperand(0); 4828 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 4829 SeenChainsForSExt.find(HeadOfChain); 4830 // If there is an unhandled SExt which has the same header, try to promote 4831 // it as well. 4832 if (AlreadySeen != SeenChainsForSExt.end()) { 4833 if (AlreadySeen->second != nullptr) 4834 UnhandledExts.insert(AlreadySeen->second); 4835 AllSeenFirst = false; 4836 } 4837 } 4838 4839 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 4840 SpeculativelyMovedExts.size() == 1)) { 4841 TPT.commit(); 4842 if (HasPromoted) 4843 Promoted = true; 4844 for (auto I : SpeculativelyMovedExts) { 4845 Value *HeadOfChain = I->getOperand(0); 4846 SeenChainsForSExt[HeadOfChain] = nullptr; 4847 ValToSExtendedUses[HeadOfChain].push_back(I); 4848 } 4849 // Update Inst as promotion happen. 4850 Inst = SpeculativelyMovedExts.pop_back_val(); 4851 } else { 4852 // This is the first chain visited from the header, keep the current chain 4853 // as unhandled. Defer to promote this until we encounter another SExt 4854 // chain derived from the same header. 4855 for (auto I : SpeculativelyMovedExts) { 4856 Value *HeadOfChain = I->getOperand(0); 4857 SeenChainsForSExt[HeadOfChain] = Inst; 4858 } 4859 return false; 4860 } 4861 4862 if (!AllSeenFirst && !UnhandledExts.empty()) 4863 for (auto VisitedSExt : UnhandledExts) { 4864 if (RemovedInsts.count(VisitedSExt)) 4865 continue; 4866 TypePromotionTransaction TPT(RemovedInsts); 4867 SmallVector<Instruction *, 1> Exts; 4868 SmallVector<Instruction *, 2> Chains; 4869 Exts.push_back(VisitedSExt); 4870 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 4871 TPT.commit(); 4872 if (HasPromoted) 4873 Promoted = true; 4874 for (auto I : Chains) { 4875 Value *HeadOfChain = I->getOperand(0); 4876 // Mark this as handled. 4877 SeenChainsForSExt[HeadOfChain] = nullptr; 4878 ValToSExtendedUses[HeadOfChain].push_back(I); 4879 } 4880 } 4881 return Promoted; 4882 } 4883 4884 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 4885 BasicBlock *DefBB = I->getParent(); 4886 4887 // If the result of a {s|z}ext and its source are both live out, rewrite all 4888 // other uses of the source with result of extension. 4889 Value *Src = I->getOperand(0); 4890 if (Src->hasOneUse()) 4891 return false; 4892 4893 // Only do this xform if truncating is free. 4894 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 4895 return false; 4896 4897 // Only safe to perform the optimization if the source is also defined in 4898 // this block. 4899 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 4900 return false; 4901 4902 bool DefIsLiveOut = false; 4903 for (User *U : I->users()) { 4904 Instruction *UI = cast<Instruction>(U); 4905 4906 // Figure out which BB this ext is used in. 4907 BasicBlock *UserBB = UI->getParent(); 4908 if (UserBB == DefBB) continue; 4909 DefIsLiveOut = true; 4910 break; 4911 } 4912 if (!DefIsLiveOut) 4913 return false; 4914 4915 // Make sure none of the uses are PHI nodes. 4916 for (User *U : Src->users()) { 4917 Instruction *UI = cast<Instruction>(U); 4918 BasicBlock *UserBB = UI->getParent(); 4919 if (UserBB == DefBB) continue; 4920 // Be conservative. We don't want this xform to end up introducing 4921 // reloads just before load / store instructions. 4922 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 4923 return false; 4924 } 4925 4926 // InsertedTruncs - Only insert one trunc in each block once. 4927 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 4928 4929 bool MadeChange = false; 4930 for (Use &U : Src->uses()) { 4931 Instruction *User = cast<Instruction>(U.getUser()); 4932 4933 // Figure out which BB this ext is used in. 4934 BasicBlock *UserBB = User->getParent(); 4935 if (UserBB == DefBB) continue; 4936 4937 // Both src and def are live in this block. Rewrite the use. 4938 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 4939 4940 if (!InsertedTrunc) { 4941 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4942 assert(InsertPt != UserBB->end()); 4943 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 4944 InsertedInsts.insert(InsertedTrunc); 4945 } 4946 4947 // Replace a use of the {s|z}ext source with a use of the result. 4948 U = InsertedTrunc; 4949 ++NumExtUses; 4950 MadeChange = true; 4951 } 4952 4953 return MadeChange; 4954 } 4955 4956 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 4957 // just after the load if the target can fold this into one extload instruction, 4958 // with the hope of eliminating some of the other later "and" instructions using 4959 // the loaded value. "and"s that are made trivially redundant by the insertion 4960 // of the new "and" are removed by this function, while others (e.g. those whose 4961 // path from the load goes through a phi) are left for isel to potentially 4962 // remove. 4963 // 4964 // For example: 4965 // 4966 // b0: 4967 // x = load i32 4968 // ... 4969 // b1: 4970 // y = and x, 0xff 4971 // z = use y 4972 // 4973 // becomes: 4974 // 4975 // b0: 4976 // x = load i32 4977 // x' = and x, 0xff 4978 // ... 4979 // b1: 4980 // z = use x' 4981 // 4982 // whereas: 4983 // 4984 // b0: 4985 // x1 = load i32 4986 // ... 4987 // b1: 4988 // x2 = load i32 4989 // ... 4990 // b2: 4991 // x = phi x1, x2 4992 // y = and x, 0xff 4993 // 4994 // becomes (after a call to optimizeLoadExt for each load): 4995 // 4996 // b0: 4997 // x1 = load i32 4998 // x1' = and x1, 0xff 4999 // ... 5000 // b1: 5001 // x2 = load i32 5002 // x2' = and x2, 0xff 5003 // ... 5004 // b2: 5005 // x = phi x1', x2' 5006 // y = and x, 0xff 5007 // 5008 5009 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5010 5011 if (!Load->isSimple() || 5012 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 5013 return false; 5014 5015 // Skip loads we've already transformed. 5016 if (Load->hasOneUse() && 5017 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5018 return false; 5019 5020 // Look at all uses of Load, looking through phis, to determine how many bits 5021 // of the loaded value are needed. 5022 SmallVector<Instruction *, 8> WorkList; 5023 SmallPtrSet<Instruction *, 16> Visited; 5024 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5025 for (auto *U : Load->users()) 5026 WorkList.push_back(cast<Instruction>(U)); 5027 5028 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5029 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5030 APInt DemandBits(BitWidth, 0); 5031 APInt WidestAndBits(BitWidth, 0); 5032 5033 while (!WorkList.empty()) { 5034 Instruction *I = WorkList.back(); 5035 WorkList.pop_back(); 5036 5037 // Break use-def graph loops. 5038 if (!Visited.insert(I).second) 5039 continue; 5040 5041 // For a PHI node, push all of its users. 5042 if (auto *Phi = dyn_cast<PHINode>(I)) { 5043 for (auto *U : Phi->users()) 5044 WorkList.push_back(cast<Instruction>(U)); 5045 continue; 5046 } 5047 5048 switch (I->getOpcode()) { 5049 case llvm::Instruction::And: { 5050 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5051 if (!AndC) 5052 return false; 5053 APInt AndBits = AndC->getValue(); 5054 DemandBits |= AndBits; 5055 // Keep track of the widest and mask we see. 5056 if (AndBits.ugt(WidestAndBits)) 5057 WidestAndBits = AndBits; 5058 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5059 AndsToMaybeRemove.push_back(I); 5060 break; 5061 } 5062 5063 case llvm::Instruction::Shl: { 5064 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5065 if (!ShlC) 5066 return false; 5067 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5068 DemandBits.setLowBits(BitWidth - ShiftAmt); 5069 break; 5070 } 5071 5072 case llvm::Instruction::Trunc: { 5073 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5074 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5075 DemandBits.setLowBits(TruncBitWidth); 5076 break; 5077 } 5078 5079 default: 5080 return false; 5081 } 5082 } 5083 5084 uint32_t ActiveBits = DemandBits.getActiveBits(); 5085 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5086 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5087 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5088 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5089 // followed by an AND. 5090 // TODO: Look into removing this restriction by fixing backends to either 5091 // return false for isLoadExtLegal for i1 or have them select this pattern to 5092 // a single instruction. 5093 // 5094 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5095 // mask, since these are the only ands that will be removed by isel. 5096 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5097 WidestAndBits != DemandBits) 5098 return false; 5099 5100 LLVMContext &Ctx = Load->getType()->getContext(); 5101 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5102 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5103 5104 // Reject cases that won't be matched as extloads. 5105 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5106 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5107 return false; 5108 5109 IRBuilder<> Builder(Load->getNextNode()); 5110 auto *NewAnd = dyn_cast<Instruction>( 5111 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5112 // Mark this instruction as "inserted by CGP", so that other 5113 // optimizations don't touch it. 5114 InsertedInsts.insert(NewAnd); 5115 5116 // Replace all uses of load with new and (except for the use of load in the 5117 // new and itself). 5118 Load->replaceAllUsesWith(NewAnd); 5119 NewAnd->setOperand(0, Load); 5120 5121 // Remove any and instructions that are now redundant. 5122 for (auto *And : AndsToMaybeRemove) 5123 // Check that the and mask is the same as the one we decided to put on the 5124 // new and. 5125 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5126 And->replaceAllUsesWith(NewAnd); 5127 if (&*CurInstIterator == And) 5128 CurInstIterator = std::next(And->getIterator()); 5129 And->eraseFromParent(); 5130 ++NumAndUses; 5131 } 5132 5133 ++NumAndsAdded; 5134 return true; 5135 } 5136 5137 /// Check if V (an operand of a select instruction) is an expensive instruction 5138 /// that is only used once. 5139 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5140 auto *I = dyn_cast<Instruction>(V); 5141 // If it's safe to speculatively execute, then it should not have side 5142 // effects; therefore, it's safe to sink and possibly *not* execute. 5143 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5144 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5145 } 5146 5147 /// Returns true if a SelectInst should be turned into an explicit branch. 5148 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5149 const TargetLowering *TLI, 5150 SelectInst *SI) { 5151 // If even a predictable select is cheap, then a branch can't be cheaper. 5152 if (!TLI->isPredictableSelectExpensive()) 5153 return false; 5154 5155 // FIXME: This should use the same heuristics as IfConversion to determine 5156 // whether a select is better represented as a branch. 5157 5158 // If metadata tells us that the select condition is obviously predictable, 5159 // then we want to replace the select with a branch. 5160 uint64_t TrueWeight, FalseWeight; 5161 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5162 uint64_t Max = std::max(TrueWeight, FalseWeight); 5163 uint64_t Sum = TrueWeight + FalseWeight; 5164 if (Sum != 0) { 5165 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5166 if (Probability > TLI->getPredictableBranchThreshold()) 5167 return true; 5168 } 5169 } 5170 5171 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5172 5173 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5174 // comparison condition. If the compare has more than one use, there's 5175 // probably another cmov or setcc around, so it's not worth emitting a branch. 5176 if (!Cmp || !Cmp->hasOneUse()) 5177 return false; 5178 5179 // If either operand of the select is expensive and only needed on one side 5180 // of the select, we should form a branch. 5181 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5182 sinkSelectOperand(TTI, SI->getFalseValue())) 5183 return true; 5184 5185 return false; 5186 } 5187 5188 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5189 /// false value of \p SI. If the true/false value of \p SI is defined by any 5190 /// select instructions in \p Selects, look through the defining select 5191 /// instruction until the true/false value is not defined in \p Selects. 5192 static Value *getTrueOrFalseValue( 5193 SelectInst *SI, bool isTrue, 5194 const SmallPtrSet<const Instruction *, 2> &Selects) { 5195 Value *V; 5196 5197 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5198 DefSI = dyn_cast<SelectInst>(V)) { 5199 assert(DefSI->getCondition() == SI->getCondition() && 5200 "The condition of DefSI does not match with SI"); 5201 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5202 } 5203 return V; 5204 } 5205 5206 /// If we have a SelectInst that will likely profit from branch prediction, 5207 /// turn it into a branch. 5208 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5209 // Find all consecutive select instructions that share the same condition. 5210 SmallVector<SelectInst *, 2> ASI; 5211 ASI.push_back(SI); 5212 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5213 It != SI->getParent()->end(); ++It) { 5214 SelectInst *I = dyn_cast<SelectInst>(&*It); 5215 if (I && SI->getCondition() == I->getCondition()) { 5216 ASI.push_back(I); 5217 } else { 5218 break; 5219 } 5220 } 5221 5222 SelectInst *LastSI = ASI.back(); 5223 // Increment the current iterator to skip all the rest of select instructions 5224 // because they will be either "not lowered" or "all lowered" to branch. 5225 CurInstIterator = std::next(LastSI->getIterator()); 5226 5227 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5228 5229 // Can we convert the 'select' to CF ? 5230 if (DisableSelectToBranch || OptSize || !TLI || VectorCond || 5231 SI->getMetadata(LLVMContext::MD_unpredictable)) 5232 return false; 5233 5234 TargetLowering::SelectSupportKind SelectKind; 5235 if (VectorCond) 5236 SelectKind = TargetLowering::VectorMaskSelect; 5237 else if (SI->getType()->isVectorTy()) 5238 SelectKind = TargetLowering::ScalarCondVectorVal; 5239 else 5240 SelectKind = TargetLowering::ScalarValSelect; 5241 5242 if (TLI->isSelectSupported(SelectKind) && 5243 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5244 return false; 5245 5246 ModifiedDT = true; 5247 5248 // Transform a sequence like this: 5249 // start: 5250 // %cmp = cmp uge i32 %a, %b 5251 // %sel = select i1 %cmp, i32 %c, i32 %d 5252 // 5253 // Into: 5254 // start: 5255 // %cmp = cmp uge i32 %a, %b 5256 // br i1 %cmp, label %select.true, label %select.false 5257 // select.true: 5258 // br label %select.end 5259 // select.false: 5260 // br label %select.end 5261 // select.end: 5262 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5263 // 5264 // In addition, we may sink instructions that produce %c or %d from 5265 // the entry block into the destination(s) of the new branch. 5266 // If the true or false blocks do not contain a sunken instruction, that 5267 // block and its branch may be optimized away. In that case, one side of the 5268 // first branch will point directly to select.end, and the corresponding PHI 5269 // predecessor block will be the start block. 5270 5271 // First, we split the block containing the select into 2 blocks. 5272 BasicBlock *StartBlock = SI->getParent(); 5273 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5274 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5275 5276 // Delete the unconditional branch that was just created by the split. 5277 StartBlock->getTerminator()->eraseFromParent(); 5278 5279 // These are the new basic blocks for the conditional branch. 5280 // At least one will become an actual new basic block. 5281 BasicBlock *TrueBlock = nullptr; 5282 BasicBlock *FalseBlock = nullptr; 5283 BranchInst *TrueBranch = nullptr; 5284 BranchInst *FalseBranch = nullptr; 5285 5286 // Sink expensive instructions into the conditional blocks to avoid executing 5287 // them speculatively. 5288 for (SelectInst *SI : ASI) { 5289 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5290 if (TrueBlock == nullptr) { 5291 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5292 EndBlock->getParent(), EndBlock); 5293 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5294 } 5295 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5296 TrueInst->moveBefore(TrueBranch); 5297 } 5298 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5299 if (FalseBlock == nullptr) { 5300 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5301 EndBlock->getParent(), EndBlock); 5302 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5303 } 5304 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5305 FalseInst->moveBefore(FalseBranch); 5306 } 5307 } 5308 5309 // If there was nothing to sink, then arbitrarily choose the 'false' side 5310 // for a new input value to the PHI. 5311 if (TrueBlock == FalseBlock) { 5312 assert(TrueBlock == nullptr && 5313 "Unexpected basic block transform while optimizing select"); 5314 5315 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5316 EndBlock->getParent(), EndBlock); 5317 BranchInst::Create(EndBlock, FalseBlock); 5318 } 5319 5320 // Insert the real conditional branch based on the original condition. 5321 // If we did not create a new block for one of the 'true' or 'false' paths 5322 // of the condition, it means that side of the branch goes to the end block 5323 // directly and the path originates from the start block from the point of 5324 // view of the new PHI. 5325 BasicBlock *TT, *FT; 5326 if (TrueBlock == nullptr) { 5327 TT = EndBlock; 5328 FT = FalseBlock; 5329 TrueBlock = StartBlock; 5330 } else if (FalseBlock == nullptr) { 5331 TT = TrueBlock; 5332 FT = EndBlock; 5333 FalseBlock = StartBlock; 5334 } else { 5335 TT = TrueBlock; 5336 FT = FalseBlock; 5337 } 5338 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5339 5340 SmallPtrSet<const Instruction *, 2> INS; 5341 INS.insert(ASI.begin(), ASI.end()); 5342 // Use reverse iterator because later select may use the value of the 5343 // earlier select, and we need to propagate value through earlier select 5344 // to get the PHI operand. 5345 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5346 SelectInst *SI = *It; 5347 // The select itself is replaced with a PHI Node. 5348 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5349 PN->takeName(SI); 5350 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5351 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5352 5353 SI->replaceAllUsesWith(PN); 5354 SI->eraseFromParent(); 5355 INS.erase(SI); 5356 ++NumSelectsExpanded; 5357 } 5358 5359 // Instruct OptimizeBlock to skip to the next block. 5360 CurInstIterator = StartBlock->end(); 5361 return true; 5362 } 5363 5364 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5365 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5366 int SplatElem = -1; 5367 for (unsigned i = 0; i < Mask.size(); ++i) { 5368 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5369 return false; 5370 SplatElem = Mask[i]; 5371 } 5372 5373 return true; 5374 } 5375 5376 /// Some targets have expensive vector shifts if the lanes aren't all the same 5377 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 5378 /// it's often worth sinking a shufflevector splat down to its use so that 5379 /// codegen can spot all lanes are identical. 5380 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 5381 BasicBlock *DefBB = SVI->getParent(); 5382 5383 // Only do this xform if variable vector shifts are particularly expensive. 5384 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 5385 return false; 5386 5387 // We only expect better codegen by sinking a shuffle if we can recognise a 5388 // constant splat. 5389 if (!isBroadcastShuffle(SVI)) 5390 return false; 5391 5392 // InsertedShuffles - Only insert a shuffle in each block once. 5393 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 5394 5395 bool MadeChange = false; 5396 for (User *U : SVI->users()) { 5397 Instruction *UI = cast<Instruction>(U); 5398 5399 // Figure out which BB this ext is used in. 5400 BasicBlock *UserBB = UI->getParent(); 5401 if (UserBB == DefBB) continue; 5402 5403 // For now only apply this when the splat is used by a shift instruction. 5404 if (!UI->isShift()) continue; 5405 5406 // Everything checks out, sink the shuffle if the user's block doesn't 5407 // already have a copy. 5408 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 5409 5410 if (!InsertedShuffle) { 5411 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5412 assert(InsertPt != UserBB->end()); 5413 InsertedShuffle = 5414 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5415 SVI->getOperand(2), "", &*InsertPt); 5416 } 5417 5418 UI->replaceUsesOfWith(SVI, InsertedShuffle); 5419 MadeChange = true; 5420 } 5421 5422 // If we removed all uses, nuke the shuffle. 5423 if (SVI->use_empty()) { 5424 SVI->eraseFromParent(); 5425 MadeChange = true; 5426 } 5427 5428 return MadeChange; 5429 } 5430 5431 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 5432 if (!TLI || !DL) 5433 return false; 5434 5435 Value *Cond = SI->getCondition(); 5436 Type *OldType = Cond->getType(); 5437 LLVMContext &Context = Cond->getContext(); 5438 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 5439 unsigned RegWidth = RegType.getSizeInBits(); 5440 5441 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 5442 return false; 5443 5444 // If the register width is greater than the type width, expand the condition 5445 // of the switch instruction and each case constant to the width of the 5446 // register. By widening the type of the switch condition, subsequent 5447 // comparisons (for case comparisons) will not need to be extended to the 5448 // preferred register width, so we will potentially eliminate N-1 extends, 5449 // where N is the number of cases in the switch. 5450 auto *NewType = Type::getIntNTy(Context, RegWidth); 5451 5452 // Zero-extend the switch condition and case constants unless the switch 5453 // condition is a function argument that is already being sign-extended. 5454 // In that case, we can avoid an unnecessary mask/extension by sign-extending 5455 // everything instead. 5456 Instruction::CastOps ExtType = Instruction::ZExt; 5457 if (auto *Arg = dyn_cast<Argument>(Cond)) 5458 if (Arg->hasSExtAttr()) 5459 ExtType = Instruction::SExt; 5460 5461 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 5462 ExtInst->insertBefore(SI); 5463 SI->setCondition(ExtInst); 5464 for (auto Case : SI->cases()) { 5465 APInt NarrowConst = Case.getCaseValue()->getValue(); 5466 APInt WideConst = (ExtType == Instruction::ZExt) ? 5467 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 5468 Case.setValue(ConstantInt::get(Context, WideConst)); 5469 } 5470 5471 return true; 5472 } 5473 5474 namespace { 5475 /// \brief Helper class to promote a scalar operation to a vector one. 5476 /// This class is used to move downward extractelement transition. 5477 /// E.g., 5478 /// a = vector_op <2 x i32> 5479 /// b = extractelement <2 x i32> a, i32 0 5480 /// c = scalar_op b 5481 /// store c 5482 /// 5483 /// => 5484 /// a = vector_op <2 x i32> 5485 /// c = vector_op a (equivalent to scalar_op on the related lane) 5486 /// * d = extractelement <2 x i32> c, i32 0 5487 /// * store d 5488 /// Assuming both extractelement and store can be combine, we get rid of the 5489 /// transition. 5490 class VectorPromoteHelper { 5491 /// DataLayout associated with the current module. 5492 const DataLayout &DL; 5493 5494 /// Used to perform some checks on the legality of vector operations. 5495 const TargetLowering &TLI; 5496 5497 /// Used to estimated the cost of the promoted chain. 5498 const TargetTransformInfo &TTI; 5499 5500 /// The transition being moved downwards. 5501 Instruction *Transition; 5502 /// The sequence of instructions to be promoted. 5503 SmallVector<Instruction *, 4> InstsToBePromoted; 5504 /// Cost of combining a store and an extract. 5505 unsigned StoreExtractCombineCost; 5506 /// Instruction that will be combined with the transition. 5507 Instruction *CombineInst; 5508 5509 /// \brief The instruction that represents the current end of the transition. 5510 /// Since we are faking the promotion until we reach the end of the chain 5511 /// of computation, we need a way to get the current end of the transition. 5512 Instruction *getEndOfTransition() const { 5513 if (InstsToBePromoted.empty()) 5514 return Transition; 5515 return InstsToBePromoted.back(); 5516 } 5517 5518 /// \brief Return the index of the original value in the transition. 5519 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 5520 /// c, is at index 0. 5521 unsigned getTransitionOriginalValueIdx() const { 5522 assert(isa<ExtractElementInst>(Transition) && 5523 "Other kind of transitions are not supported yet"); 5524 return 0; 5525 } 5526 5527 /// \brief Return the index of the index in the transition. 5528 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 5529 /// is at index 1. 5530 unsigned getTransitionIdx() const { 5531 assert(isa<ExtractElementInst>(Transition) && 5532 "Other kind of transitions are not supported yet"); 5533 return 1; 5534 } 5535 5536 /// \brief Get the type of the transition. 5537 /// This is the type of the original value. 5538 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 5539 /// transition is <2 x i32>. 5540 Type *getTransitionType() const { 5541 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 5542 } 5543 5544 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 5545 /// I.e., we have the following sequence: 5546 /// Def = Transition <ty1> a to <ty2> 5547 /// b = ToBePromoted <ty2> Def, ... 5548 /// => 5549 /// b = ToBePromoted <ty1> a, ... 5550 /// Def = Transition <ty1> ToBePromoted to <ty2> 5551 void promoteImpl(Instruction *ToBePromoted); 5552 5553 /// \brief Check whether or not it is profitable to promote all the 5554 /// instructions enqueued to be promoted. 5555 bool isProfitableToPromote() { 5556 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 5557 unsigned Index = isa<ConstantInt>(ValIdx) 5558 ? cast<ConstantInt>(ValIdx)->getZExtValue() 5559 : -1; 5560 Type *PromotedType = getTransitionType(); 5561 5562 StoreInst *ST = cast<StoreInst>(CombineInst); 5563 unsigned AS = ST->getPointerAddressSpace(); 5564 unsigned Align = ST->getAlignment(); 5565 // Check if this store is supported. 5566 if (!TLI.allowsMisalignedMemoryAccesses( 5567 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 5568 Align)) { 5569 // If this is not supported, there is no way we can combine 5570 // the extract with the store. 5571 return false; 5572 } 5573 5574 // The scalar chain of computation has to pay for the transition 5575 // scalar to vector. 5576 // The vector chain has to account for the combining cost. 5577 uint64_t ScalarCost = 5578 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 5579 uint64_t VectorCost = StoreExtractCombineCost; 5580 for (const auto &Inst : InstsToBePromoted) { 5581 // Compute the cost. 5582 // By construction, all instructions being promoted are arithmetic ones. 5583 // Moreover, one argument is a constant that can be viewed as a splat 5584 // constant. 5585 Value *Arg0 = Inst->getOperand(0); 5586 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 5587 isa<ConstantFP>(Arg0); 5588 TargetTransformInfo::OperandValueKind Arg0OVK = 5589 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5590 : TargetTransformInfo::OK_AnyValue; 5591 TargetTransformInfo::OperandValueKind Arg1OVK = 5592 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5593 : TargetTransformInfo::OK_AnyValue; 5594 ScalarCost += TTI.getArithmeticInstrCost( 5595 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 5596 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 5597 Arg0OVK, Arg1OVK); 5598 } 5599 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 5600 << ScalarCost << "\nVector: " << VectorCost << '\n'); 5601 return ScalarCost > VectorCost; 5602 } 5603 5604 /// \brief Generate a constant vector with \p Val with the same 5605 /// number of elements as the transition. 5606 /// \p UseSplat defines whether or not \p Val should be replicated 5607 /// across the whole vector. 5608 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 5609 /// otherwise we generate a vector with as many undef as possible: 5610 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 5611 /// used at the index of the extract. 5612 Value *getConstantVector(Constant *Val, bool UseSplat) const { 5613 unsigned ExtractIdx = UINT_MAX; 5614 if (!UseSplat) { 5615 // If we cannot determine where the constant must be, we have to 5616 // use a splat constant. 5617 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 5618 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 5619 ExtractIdx = CstVal->getSExtValue(); 5620 else 5621 UseSplat = true; 5622 } 5623 5624 unsigned End = getTransitionType()->getVectorNumElements(); 5625 if (UseSplat) 5626 return ConstantVector::getSplat(End, Val); 5627 5628 SmallVector<Constant *, 4> ConstVec; 5629 UndefValue *UndefVal = UndefValue::get(Val->getType()); 5630 for (unsigned Idx = 0; Idx != End; ++Idx) { 5631 if (Idx == ExtractIdx) 5632 ConstVec.push_back(Val); 5633 else 5634 ConstVec.push_back(UndefVal); 5635 } 5636 return ConstantVector::get(ConstVec); 5637 } 5638 5639 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 5640 /// in \p Use can trigger undefined behavior. 5641 static bool canCauseUndefinedBehavior(const Instruction *Use, 5642 unsigned OperandIdx) { 5643 // This is not safe to introduce undef when the operand is on 5644 // the right hand side of a division-like instruction. 5645 if (OperandIdx != 1) 5646 return false; 5647 switch (Use->getOpcode()) { 5648 default: 5649 return false; 5650 case Instruction::SDiv: 5651 case Instruction::UDiv: 5652 case Instruction::SRem: 5653 case Instruction::URem: 5654 return true; 5655 case Instruction::FDiv: 5656 case Instruction::FRem: 5657 return !Use->hasNoNaNs(); 5658 } 5659 llvm_unreachable(nullptr); 5660 } 5661 5662 public: 5663 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 5664 const TargetTransformInfo &TTI, Instruction *Transition, 5665 unsigned CombineCost) 5666 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 5667 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 5668 assert(Transition && "Do not know how to promote null"); 5669 } 5670 5671 /// \brief Check if we can promote \p ToBePromoted to \p Type. 5672 bool canPromote(const Instruction *ToBePromoted) const { 5673 // We could support CastInst too. 5674 return isa<BinaryOperator>(ToBePromoted); 5675 } 5676 5677 /// \brief Check if it is profitable to promote \p ToBePromoted 5678 /// by moving downward the transition through. 5679 bool shouldPromote(const Instruction *ToBePromoted) const { 5680 // Promote only if all the operands can be statically expanded. 5681 // Indeed, we do not want to introduce any new kind of transitions. 5682 for (const Use &U : ToBePromoted->operands()) { 5683 const Value *Val = U.get(); 5684 if (Val == getEndOfTransition()) { 5685 // If the use is a division and the transition is on the rhs, 5686 // we cannot promote the operation, otherwise we may create a 5687 // division by zero. 5688 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 5689 return false; 5690 continue; 5691 } 5692 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 5693 !isa<ConstantFP>(Val)) 5694 return false; 5695 } 5696 // Check that the resulting operation is legal. 5697 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 5698 if (!ISDOpcode) 5699 return false; 5700 return StressStoreExtract || 5701 TLI.isOperationLegalOrCustom( 5702 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 5703 } 5704 5705 /// \brief Check whether or not \p Use can be combined 5706 /// with the transition. 5707 /// I.e., is it possible to do Use(Transition) => AnotherUse? 5708 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 5709 5710 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 5711 void enqueueForPromotion(Instruction *ToBePromoted) { 5712 InstsToBePromoted.push_back(ToBePromoted); 5713 } 5714 5715 /// \brief Set the instruction that will be combined with the transition. 5716 void recordCombineInstruction(Instruction *ToBeCombined) { 5717 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 5718 CombineInst = ToBeCombined; 5719 } 5720 5721 /// \brief Promote all the instructions enqueued for promotion if it is 5722 /// is profitable. 5723 /// \return True if the promotion happened, false otherwise. 5724 bool promote() { 5725 // Check if there is something to promote. 5726 // Right now, if we do not have anything to combine with, 5727 // we assume the promotion is not profitable. 5728 if (InstsToBePromoted.empty() || !CombineInst) 5729 return false; 5730 5731 // Check cost. 5732 if (!StressStoreExtract && !isProfitableToPromote()) 5733 return false; 5734 5735 // Promote. 5736 for (auto &ToBePromoted : InstsToBePromoted) 5737 promoteImpl(ToBePromoted); 5738 InstsToBePromoted.clear(); 5739 return true; 5740 } 5741 }; 5742 } // End of anonymous namespace. 5743 5744 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 5745 // At this point, we know that all the operands of ToBePromoted but Def 5746 // can be statically promoted. 5747 // For Def, we need to use its parameter in ToBePromoted: 5748 // b = ToBePromoted ty1 a 5749 // Def = Transition ty1 b to ty2 5750 // Move the transition down. 5751 // 1. Replace all uses of the promoted operation by the transition. 5752 // = ... b => = ... Def. 5753 assert(ToBePromoted->getType() == Transition->getType() && 5754 "The type of the result of the transition does not match " 5755 "the final type"); 5756 ToBePromoted->replaceAllUsesWith(Transition); 5757 // 2. Update the type of the uses. 5758 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 5759 Type *TransitionTy = getTransitionType(); 5760 ToBePromoted->mutateType(TransitionTy); 5761 // 3. Update all the operands of the promoted operation with promoted 5762 // operands. 5763 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 5764 for (Use &U : ToBePromoted->operands()) { 5765 Value *Val = U.get(); 5766 Value *NewVal = nullptr; 5767 if (Val == Transition) 5768 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 5769 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 5770 isa<ConstantFP>(Val)) { 5771 // Use a splat constant if it is not safe to use undef. 5772 NewVal = getConstantVector( 5773 cast<Constant>(Val), 5774 isa<UndefValue>(Val) || 5775 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 5776 } else 5777 llvm_unreachable("Did you modified shouldPromote and forgot to update " 5778 "this?"); 5779 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 5780 } 5781 Transition->removeFromParent(); 5782 Transition->insertAfter(ToBePromoted); 5783 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 5784 } 5785 5786 /// Some targets can do store(extractelement) with one instruction. 5787 /// Try to push the extractelement towards the stores when the target 5788 /// has this feature and this is profitable. 5789 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 5790 unsigned CombineCost = UINT_MAX; 5791 if (DisableStoreExtract || !TLI || 5792 (!StressStoreExtract && 5793 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 5794 Inst->getOperand(1), CombineCost))) 5795 return false; 5796 5797 // At this point we know that Inst is a vector to scalar transition. 5798 // Try to move it down the def-use chain, until: 5799 // - We can combine the transition with its single use 5800 // => we got rid of the transition. 5801 // - We escape the current basic block 5802 // => we would need to check that we are moving it at a cheaper place and 5803 // we do not do that for now. 5804 BasicBlock *Parent = Inst->getParent(); 5805 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 5806 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 5807 // If the transition has more than one use, assume this is not going to be 5808 // beneficial. 5809 while (Inst->hasOneUse()) { 5810 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 5811 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 5812 5813 if (ToBePromoted->getParent() != Parent) { 5814 DEBUG(dbgs() << "Instruction to promote is in a different block (" 5815 << ToBePromoted->getParent()->getName() 5816 << ") than the transition (" << Parent->getName() << ").\n"); 5817 return false; 5818 } 5819 5820 if (VPH.canCombine(ToBePromoted)) { 5821 DEBUG(dbgs() << "Assume " << *Inst << '\n' 5822 << "will be combined with: " << *ToBePromoted << '\n'); 5823 VPH.recordCombineInstruction(ToBePromoted); 5824 bool Changed = VPH.promote(); 5825 NumStoreExtractExposed += Changed; 5826 return Changed; 5827 } 5828 5829 DEBUG(dbgs() << "Try promoting.\n"); 5830 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 5831 return false; 5832 5833 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 5834 5835 VPH.enqueueForPromotion(ToBePromoted); 5836 Inst = ToBePromoted; 5837 } 5838 return false; 5839 } 5840 5841 /// For the instruction sequence of store below, F and I values 5842 /// are bundled together as an i64 value before being stored into memory. 5843 /// Sometimes it is more efficent to generate separate stores for F and I, 5844 /// which can remove the bitwise instructions or sink them to colder places. 5845 /// 5846 /// (store (or (zext (bitcast F to i32) to i64), 5847 /// (shl (zext I to i64), 32)), addr) --> 5848 /// (store F, addr) and (store I, addr+4) 5849 /// 5850 /// Similarly, splitting for other merged store can also be beneficial, like: 5851 /// For pair of {i32, i32}, i64 store --> two i32 stores. 5852 /// For pair of {i32, i16}, i64 store --> two i32 stores. 5853 /// For pair of {i16, i16}, i32 store --> two i16 stores. 5854 /// For pair of {i16, i8}, i32 store --> two i16 stores. 5855 /// For pair of {i8, i8}, i16 store --> two i8 stores. 5856 /// 5857 /// We allow each target to determine specifically which kind of splitting is 5858 /// supported. 5859 /// 5860 /// The store patterns are commonly seen from the simple code snippet below 5861 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 5862 /// void goo(const std::pair<int, float> &); 5863 /// hoo() { 5864 /// ... 5865 /// goo(std::make_pair(tmp, ftmp)); 5866 /// ... 5867 /// } 5868 /// 5869 /// Although we already have similar splitting in DAG Combine, we duplicate 5870 /// it in CodeGenPrepare to catch the case in which pattern is across 5871 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 5872 /// during code expansion. 5873 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 5874 const TargetLowering &TLI) { 5875 // Handle simple but common cases only. 5876 Type *StoreType = SI.getValueOperand()->getType(); 5877 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 5878 DL.getTypeSizeInBits(StoreType) == 0) 5879 return false; 5880 5881 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 5882 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 5883 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 5884 DL.getTypeSizeInBits(SplitStoreType)) 5885 return false; 5886 5887 // Match the following patterns: 5888 // (store (or (zext LValue to i64), 5889 // (shl (zext HValue to i64), 32)), HalfValBitSize) 5890 // or 5891 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 5892 // (zext LValue to i64), 5893 // Expect both operands of OR and the first operand of SHL have only 5894 // one use. 5895 Value *LValue, *HValue; 5896 if (!match(SI.getValueOperand(), 5897 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 5898 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 5899 m_SpecificInt(HalfValBitSize)))))) 5900 return false; 5901 5902 // Check LValue and HValue are int with size less or equal than 32. 5903 if (!LValue->getType()->isIntegerTy() || 5904 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 5905 !HValue->getType()->isIntegerTy() || 5906 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 5907 return false; 5908 5909 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 5910 // as the input of target query. 5911 auto *LBC = dyn_cast<BitCastInst>(LValue); 5912 auto *HBC = dyn_cast<BitCastInst>(HValue); 5913 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 5914 : EVT::getEVT(LValue->getType()); 5915 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 5916 : EVT::getEVT(HValue->getType()); 5917 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 5918 return false; 5919 5920 // Start to split store. 5921 IRBuilder<> Builder(SI.getContext()); 5922 Builder.SetInsertPoint(&SI); 5923 5924 // If LValue/HValue is a bitcast in another BB, create a new one in current 5925 // BB so it may be merged with the splitted stores by dag combiner. 5926 if (LBC && LBC->getParent() != SI.getParent()) 5927 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 5928 if (HBC && HBC->getParent() != SI.getParent()) 5929 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 5930 5931 auto CreateSplitStore = [&](Value *V, bool Upper) { 5932 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 5933 Value *Addr = Builder.CreateBitCast( 5934 SI.getOperand(1), 5935 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 5936 if (Upper) 5937 Addr = Builder.CreateGEP( 5938 SplitStoreType, Addr, 5939 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 5940 Builder.CreateAlignedStore( 5941 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 5942 }; 5943 5944 CreateSplitStore(LValue, false); 5945 CreateSplitStore(HValue, true); 5946 5947 // Delete the old store. 5948 SI.eraseFromParent(); 5949 return true; 5950 } 5951 5952 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { 5953 // Bail out if we inserted the instruction to prevent optimizations from 5954 // stepping on each other's toes. 5955 if (InsertedInsts.count(I)) 5956 return false; 5957 5958 if (PHINode *P = dyn_cast<PHINode>(I)) { 5959 // It is possible for very late stage optimizations (such as SimplifyCFG) 5960 // to introduce PHI nodes too late to be cleaned up. If we detect such a 5961 // trivial PHI, go ahead and zap it here. 5962 if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) { 5963 P->replaceAllUsesWith(V); 5964 P->eraseFromParent(); 5965 ++NumPHIsElim; 5966 return true; 5967 } 5968 return false; 5969 } 5970 5971 if (CastInst *CI = dyn_cast<CastInst>(I)) { 5972 // If the source of the cast is a constant, then this should have 5973 // already been constant folded. The only reason NOT to constant fold 5974 // it is if something (e.g. LSR) was careful to place the constant 5975 // evaluation in a block other than then one that uses it (e.g. to hoist 5976 // the address of globals out of a loop). If this is the case, we don't 5977 // want to forward-subst the cast. 5978 if (isa<Constant>(CI->getOperand(0))) 5979 return false; 5980 5981 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 5982 return true; 5983 5984 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 5985 /// Sink a zext or sext into its user blocks if the target type doesn't 5986 /// fit in one register 5987 if (TLI && 5988 TLI->getTypeAction(CI->getContext(), 5989 TLI->getValueType(*DL, CI->getType())) == 5990 TargetLowering::TypeExpandInteger) { 5991 return SinkCast(CI); 5992 } else { 5993 bool MadeChange = optimizeExt(I); 5994 return MadeChange | optimizeExtUses(I); 5995 } 5996 } 5997 return false; 5998 } 5999 6000 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6001 if (!TLI || !TLI->hasMultipleConditionRegisters()) 6002 return OptimizeCmpExpression(CI, TLI); 6003 6004 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6005 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6006 if (TLI) { 6007 bool Modified = optimizeLoadExt(LI); 6008 unsigned AS = LI->getPointerAddressSpace(); 6009 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6010 return Modified; 6011 } 6012 return false; 6013 } 6014 6015 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6016 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6017 return true; 6018 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6019 if (TLI) { 6020 unsigned AS = SI->getPointerAddressSpace(); 6021 return optimizeMemoryInst(I, SI->getOperand(1), 6022 SI->getOperand(0)->getType(), AS); 6023 } 6024 return false; 6025 } 6026 6027 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6028 unsigned AS = RMW->getPointerAddressSpace(); 6029 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6030 RMW->getType(), AS); 6031 } 6032 6033 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6034 unsigned AS = CmpX->getPointerAddressSpace(); 6035 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6036 CmpX->getCompareOperand()->getType(), AS); 6037 } 6038 6039 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6040 6041 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6042 EnableAndCmpSinking && TLI) 6043 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6044 6045 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6046 BinOp->getOpcode() == Instruction::LShr)) { 6047 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6048 if (TLI && CI && TLI->hasExtractBitsInsn()) 6049 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6050 6051 return false; 6052 } 6053 6054 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6055 if (GEPI->hasAllZeroIndices()) { 6056 /// The GEP operand must be a pointer, so must its result -> BitCast 6057 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6058 GEPI->getName(), GEPI); 6059 GEPI->replaceAllUsesWith(NC); 6060 GEPI->eraseFromParent(); 6061 ++NumGEPsElim; 6062 optimizeInst(NC, ModifiedDT); 6063 return true; 6064 } 6065 return false; 6066 } 6067 6068 if (CallInst *CI = dyn_cast<CallInst>(I)) 6069 return optimizeCallInst(CI, ModifiedDT); 6070 6071 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6072 return optimizeSelectInst(SI); 6073 6074 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6075 return optimizeShuffleVectorInst(SVI); 6076 6077 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6078 return optimizeSwitchInst(Switch); 6079 6080 if (isa<ExtractElementInst>(I)) 6081 return optimizeExtractElementInst(I); 6082 6083 return false; 6084 } 6085 6086 /// Given an OR instruction, check to see if this is a bitreverse 6087 /// idiom. If so, insert the new intrinsic and return true. 6088 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6089 const TargetLowering &TLI) { 6090 if (!I.getType()->isIntegerTy() || 6091 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6092 TLI.getValueType(DL, I.getType(), true))) 6093 return false; 6094 6095 SmallVector<Instruction*, 4> Insts; 6096 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6097 return false; 6098 Instruction *LastInst = Insts.back(); 6099 I.replaceAllUsesWith(LastInst); 6100 RecursivelyDeleteTriviallyDeadInstructions(&I); 6101 return true; 6102 } 6103 6104 // In this pass we look for GEP and cast instructions that are used 6105 // across basic blocks and rewrite them to improve basic-block-at-a-time 6106 // selection. 6107 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 6108 SunkAddrs.clear(); 6109 bool MadeChange = false; 6110 6111 CurInstIterator = BB.begin(); 6112 while (CurInstIterator != BB.end()) { 6113 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6114 if (ModifiedDT) 6115 return true; 6116 } 6117 6118 bool MadeBitReverse = true; 6119 while (TLI && MadeBitReverse) { 6120 MadeBitReverse = false; 6121 for (auto &I : reverse(BB)) { 6122 if (makeBitReverse(I, *DL, *TLI)) { 6123 MadeBitReverse = MadeChange = true; 6124 ModifiedDT = true; 6125 break; 6126 } 6127 } 6128 } 6129 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6130 6131 return MadeChange; 6132 } 6133 6134 // llvm.dbg.value is far away from the value then iSel may not be able 6135 // handle it properly. iSel will drop llvm.dbg.value if it can not 6136 // find a node corresponding to the value. 6137 bool CodeGenPrepare::placeDbgValues(Function &F) { 6138 bool MadeChange = false; 6139 for (BasicBlock &BB : F) { 6140 Instruction *PrevNonDbgInst = nullptr; 6141 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6142 Instruction *Insn = &*BI++; 6143 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6144 // Leave dbg.values that refer to an alloca alone. These 6145 // instrinsics describe the address of a variable (= the alloca) 6146 // being taken. They should not be moved next to the alloca 6147 // (and to the beginning of the scope), but rather stay close to 6148 // where said address is used. 6149 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6150 PrevNonDbgInst = Insn; 6151 continue; 6152 } 6153 6154 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 6155 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 6156 // If VI is a phi in a block with an EHPad terminator, we can't insert 6157 // after it. 6158 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 6159 continue; 6160 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 6161 DVI->removeFromParent(); 6162 if (isa<PHINode>(VI)) 6163 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 6164 else 6165 DVI->insertAfter(VI); 6166 MadeChange = true; 6167 ++NumDbgValueMoved; 6168 } 6169 } 6170 } 6171 return MadeChange; 6172 } 6173 6174 /// \brief Scale down both weights to fit into uint32_t. 6175 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 6176 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 6177 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 6178 NewTrue = NewTrue / Scale; 6179 NewFalse = NewFalse / Scale; 6180 } 6181 6182 /// \brief Some targets prefer to split a conditional branch like: 6183 /// \code 6184 /// %0 = icmp ne i32 %a, 0 6185 /// %1 = icmp ne i32 %b, 0 6186 /// %or.cond = or i1 %0, %1 6187 /// br i1 %or.cond, label %TrueBB, label %FalseBB 6188 /// \endcode 6189 /// into multiple branch instructions like: 6190 /// \code 6191 /// bb1: 6192 /// %0 = icmp ne i32 %a, 0 6193 /// br i1 %0, label %TrueBB, label %bb2 6194 /// bb2: 6195 /// %1 = icmp ne i32 %b, 0 6196 /// br i1 %1, label %TrueBB, label %FalseBB 6197 /// \endcode 6198 /// This usually allows instruction selection to do even further optimizations 6199 /// and combine the compare with the branch instruction. Currently this is 6200 /// applied for targets which have "cheap" jump instructions. 6201 /// 6202 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 6203 /// 6204 bool CodeGenPrepare::splitBranchCondition(Function &F) { 6205 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 6206 return false; 6207 6208 bool MadeChange = false; 6209 for (auto &BB : F) { 6210 // Does this BB end with the following? 6211 // %cond1 = icmp|fcmp|binary instruction ... 6212 // %cond2 = icmp|fcmp|binary instruction ... 6213 // %cond.or = or|and i1 %cond1, cond2 6214 // br i1 %cond.or label %dest1, label %dest2" 6215 BinaryOperator *LogicOp; 6216 BasicBlock *TBB, *FBB; 6217 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 6218 continue; 6219 6220 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 6221 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 6222 continue; 6223 6224 unsigned Opc; 6225 Value *Cond1, *Cond2; 6226 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 6227 m_OneUse(m_Value(Cond2))))) 6228 Opc = Instruction::And; 6229 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 6230 m_OneUse(m_Value(Cond2))))) 6231 Opc = Instruction::Or; 6232 else 6233 continue; 6234 6235 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 6236 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 6237 continue; 6238 6239 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 6240 6241 // Create a new BB. 6242 auto TmpBB = 6243 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 6244 BB.getParent(), BB.getNextNode()); 6245 6246 // Update original basic block by using the first condition directly by the 6247 // branch instruction and removing the no longer needed and/or instruction. 6248 Br1->setCondition(Cond1); 6249 LogicOp->eraseFromParent(); 6250 6251 // Depending on the conditon we have to either replace the true or the false 6252 // successor of the original branch instruction. 6253 if (Opc == Instruction::And) 6254 Br1->setSuccessor(0, TmpBB); 6255 else 6256 Br1->setSuccessor(1, TmpBB); 6257 6258 // Fill in the new basic block. 6259 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 6260 if (auto *I = dyn_cast<Instruction>(Cond2)) { 6261 I->removeFromParent(); 6262 I->insertBefore(Br2); 6263 } 6264 6265 // Update PHI nodes in both successors. The original BB needs to be 6266 // replaced in one succesor's PHI nodes, because the branch comes now from 6267 // the newly generated BB (NewBB). In the other successor we need to add one 6268 // incoming edge to the PHI nodes, because both branch instructions target 6269 // now the same successor. Depending on the original branch condition 6270 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 6271 // we perform the correct update for the PHI nodes. 6272 // This doesn't change the successor order of the just created branch 6273 // instruction (or any other instruction). 6274 if (Opc == Instruction::Or) 6275 std::swap(TBB, FBB); 6276 6277 // Replace the old BB with the new BB. 6278 for (auto &I : *TBB) { 6279 PHINode *PN = dyn_cast<PHINode>(&I); 6280 if (!PN) 6281 break; 6282 int i; 6283 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 6284 PN->setIncomingBlock(i, TmpBB); 6285 } 6286 6287 // Add another incoming edge form the new BB. 6288 for (auto &I : *FBB) { 6289 PHINode *PN = dyn_cast<PHINode>(&I); 6290 if (!PN) 6291 break; 6292 auto *Val = PN->getIncomingValueForBlock(&BB); 6293 PN->addIncoming(Val, TmpBB); 6294 } 6295 6296 // Update the branch weights (from SelectionDAGBuilder:: 6297 // FindMergedConditions). 6298 if (Opc == Instruction::Or) { 6299 // Codegen X | Y as: 6300 // BB1: 6301 // jmp_if_X TBB 6302 // jmp TmpBB 6303 // TmpBB: 6304 // jmp_if_Y TBB 6305 // jmp FBB 6306 // 6307 6308 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 6309 // The requirement is that 6310 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 6311 // = TrueProb for orignal BB. 6312 // Assuming the orignal weights are A and B, one choice is to set BB1's 6313 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 6314 // assumes that 6315 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 6316 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 6317 // TmpBB, but the math is more complicated. 6318 uint64_t TrueWeight, FalseWeight; 6319 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6320 uint64_t NewTrueWeight = TrueWeight; 6321 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 6322 scaleWeights(NewTrueWeight, NewFalseWeight); 6323 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6324 .createBranchWeights(TrueWeight, FalseWeight)); 6325 6326 NewTrueWeight = TrueWeight; 6327 NewFalseWeight = 2 * FalseWeight; 6328 scaleWeights(NewTrueWeight, NewFalseWeight); 6329 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6330 .createBranchWeights(TrueWeight, FalseWeight)); 6331 } 6332 } else { 6333 // Codegen X & Y as: 6334 // BB1: 6335 // jmp_if_X TmpBB 6336 // jmp FBB 6337 // TmpBB: 6338 // jmp_if_Y TBB 6339 // jmp FBB 6340 // 6341 // This requires creation of TmpBB after CurBB. 6342 6343 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 6344 // The requirement is that 6345 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 6346 // = FalseProb for orignal BB. 6347 // Assuming the orignal weights are A and B, one choice is to set BB1's 6348 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 6349 // assumes that 6350 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 6351 uint64_t TrueWeight, FalseWeight; 6352 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6353 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 6354 uint64_t NewFalseWeight = FalseWeight; 6355 scaleWeights(NewTrueWeight, NewFalseWeight); 6356 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6357 .createBranchWeights(TrueWeight, FalseWeight)); 6358 6359 NewTrueWeight = 2 * TrueWeight; 6360 NewFalseWeight = FalseWeight; 6361 scaleWeights(NewTrueWeight, NewFalseWeight); 6362 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6363 .createBranchWeights(TrueWeight, FalseWeight)); 6364 } 6365 } 6366 6367 // Note: No point in getting fancy here, since the DT info is never 6368 // available to CodeGenPrepare. 6369 ModifiedDT = true; 6370 6371 MadeChange = true; 6372 6373 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 6374 TmpBB->dump()); 6375 } 6376 return MadeChange; 6377 } 6378