1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BranchProbabilityInfo.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/ProfileSummaryInfo.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/CodeGen/ISDOpcodes.h" 36 #include "llvm/CodeGen/MachineValueType.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/ValueTypes.h" 42 #include "llvm/IR/Argument.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Statepoint.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/IR/ValueMap.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/BlockFrequency.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/MathExtras.h" 83 #include "llvm/Support/raw_ostream.h" 84 #include "llvm/Target/TargetMachine.h" 85 #include "llvm/Target/TargetOptions.h" 86 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 87 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <limits> 95 #include <memory> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 102 #define DEBUG_TYPE "codegenprepare" 103 104 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 105 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 106 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 107 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 108 "sunken Cmps"); 109 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 110 "of sunken Casts"); 111 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 112 "computations were sunk"); 113 STATISTIC(NumMemoryInstsPhiCreated, 114 "Number of phis created when address " 115 "computations were sunk to memory instructions"); 116 STATISTIC(NumMemoryInstsSelectCreated, 117 "Number of select created when address " 118 "computations were sunk to memory instructions"); 119 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 120 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 121 STATISTIC(NumAndsAdded, 122 "Number of and mask instructions added to form ext loads"); 123 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 124 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 125 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 126 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 127 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 128 129 static cl::opt<bool> DisableBranchOpts( 130 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 131 cl::desc("Disable branch optimizations in CodeGenPrepare")); 132 133 static cl::opt<bool> 134 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable GC optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> DisableSelectToBranch( 138 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 139 cl::desc("Disable select to branch conversion.")); 140 141 static cl::opt<bool> AddrSinkUsingGEPs( 142 "addr-sink-using-gep", cl::Hidden, cl::init(true), 143 cl::desc("Address sinking in CGP using GEPs.")); 144 145 static cl::opt<bool> EnableAndCmpSinking( 146 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 147 cl::desc("Enable sinkinig and/cmp into branches.")); 148 149 static cl::opt<bool> DisableStoreExtract( 150 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 151 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 152 153 static cl::opt<bool> StressStoreExtract( 154 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> DisableExtLdPromotion( 158 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 159 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 160 "CodeGenPrepare")); 161 162 static cl::opt<bool> StressExtLdPromotion( 163 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 164 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 165 "optimization in CodeGenPrepare")); 166 167 static cl::opt<bool> DisablePreheaderProtect( 168 "disable-preheader-prot", cl::Hidden, cl::init(false), 169 cl::desc("Disable protection against removing loop preheaders")); 170 171 static cl::opt<bool> ProfileGuidedSectionPrefix( 172 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 173 cl::desc("Use profile info to add section prefix for hot/cold functions")); 174 175 static cl::opt<unsigned> FreqRatioToSkipMerge( 176 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 177 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 178 "(frequency of destination block) is greater than this ratio")); 179 180 static cl::opt<bool> ForceSplitStore( 181 "force-split-store", cl::Hidden, cl::init(false), 182 cl::desc("Force store splitting no matter what the target query says.")); 183 184 static cl::opt<bool> 185 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 186 cl::desc("Enable merging of redundant sexts when one is dominating" 187 " the other."), cl::init(true)); 188 189 static cl::opt<bool> DisableComplexAddrModes( 190 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 191 cl::desc("Disables combining addressing modes with different parts " 192 "in optimizeMemoryInst.")); 193 194 static cl::opt<bool> 195 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 196 cl::desc("Allow creation of Phis in Address sinking.")); 197 198 static cl::opt<bool> 199 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 200 cl::desc("Allow creation of selects in Address sinking.")); 201 202 static cl::opt<bool> AddrSinkCombineBaseReg( 203 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 204 cl::desc("Allow combining of BaseReg field in Address sinking.")); 205 206 static cl::opt<bool> AddrSinkCombineBaseGV( 207 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 208 cl::desc("Allow combining of BaseGV field in Address sinking.")); 209 210 static cl::opt<bool> AddrSinkCombineBaseOffs( 211 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 212 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 213 214 static cl::opt<bool> AddrSinkCombineScaledReg( 215 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 216 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 217 218 namespace { 219 220 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 221 using TypeIsSExt = PointerIntPair<Type *, 1, bool>; 222 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 223 using SExts = SmallVector<Instruction *, 16>; 224 using ValueToSExts = DenseMap<Value *, SExts>; 225 226 class TypePromotionTransaction; 227 228 class CodeGenPrepare : public FunctionPass { 229 const TargetMachine *TM = nullptr; 230 const TargetSubtargetInfo *SubtargetInfo; 231 const TargetLowering *TLI = nullptr; 232 const TargetRegisterInfo *TRI; 233 const TargetTransformInfo *TTI = nullptr; 234 const TargetLibraryInfo *TLInfo; 235 const LoopInfo *LI; 236 std::unique_ptr<BlockFrequencyInfo> BFI; 237 std::unique_ptr<BranchProbabilityInfo> BPI; 238 239 /// As we scan instructions optimizing them, this is the next instruction 240 /// to optimize. Transforms that can invalidate this should update it. 241 BasicBlock::iterator CurInstIterator; 242 243 /// Keeps track of non-local addresses that have been sunk into a block. 244 /// This allows us to avoid inserting duplicate code for blocks with 245 /// multiple load/stores of the same address. The usage of WeakTrackingVH 246 /// enables SunkAddrs to be treated as a cache whose entries can be 247 /// invalidated if a sunken address computation has been erased. 248 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 249 250 /// Keeps track of all instructions inserted for the current function. 251 SetOfInstrs InsertedInsts; 252 253 /// Keeps track of the type of the related instruction before their 254 /// promotion for the current function. 255 InstrToOrigTy PromotedInsts; 256 257 /// Keep track of instructions removed during promotion. 258 SetOfInstrs RemovedInsts; 259 260 /// Keep track of sext chains based on their initial value. 261 DenseMap<Value *, Instruction *> SeenChainsForSExt; 262 263 /// Keep track of SExt promoted. 264 ValueToSExts ValToSExtendedUses; 265 266 /// True if CFG is modified in any way. 267 bool ModifiedDT; 268 269 /// True if optimizing for size. 270 bool OptSize; 271 272 /// DataLayout for the Function being processed. 273 const DataLayout *DL = nullptr; 274 275 public: 276 static char ID; // Pass identification, replacement for typeid 277 278 CodeGenPrepare() : FunctionPass(ID) { 279 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 280 } 281 282 bool runOnFunction(Function &F) override; 283 284 StringRef getPassName() const override { return "CodeGen Prepare"; } 285 286 void getAnalysisUsage(AnalysisUsage &AU) const override { 287 // FIXME: When we can selectively preserve passes, preserve the domtree. 288 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 289 AU.addRequired<TargetLibraryInfoWrapperPass>(); 290 AU.addRequired<TargetTransformInfoWrapperPass>(); 291 AU.addRequired<LoopInfoWrapperPass>(); 292 } 293 294 private: 295 bool eliminateFallThrough(Function &F); 296 bool eliminateMostlyEmptyBlocks(Function &F); 297 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 298 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 299 void eliminateMostlyEmptyBlock(BasicBlock *BB); 300 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 301 bool isPreheader); 302 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 303 bool optimizeInst(Instruction *I, bool &ModifiedDT); 304 bool optimizeMemoryInst(Instruction *I, Value *Addr, 305 Type *AccessTy, unsigned AS); 306 bool optimizeInlineAsmInst(CallInst *CS); 307 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 308 bool optimizeExt(Instruction *&I); 309 bool optimizeExtUses(Instruction *I); 310 bool optimizeLoadExt(LoadInst *I); 311 bool optimizeSelectInst(SelectInst *SI); 312 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 313 bool optimizeSwitchInst(SwitchInst *CI); 314 bool optimizeExtractElementInst(Instruction *Inst); 315 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 316 bool placeDbgValues(Function &F); 317 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 318 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 319 bool tryToPromoteExts(TypePromotionTransaction &TPT, 320 const SmallVectorImpl<Instruction *> &Exts, 321 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 322 unsigned CreatedInstsCost = 0); 323 bool mergeSExts(Function &F); 324 bool performAddressTypePromotion( 325 Instruction *&Inst, 326 bool AllowPromotionWithoutCommonHeader, 327 bool HasPromoted, TypePromotionTransaction &TPT, 328 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 329 bool splitBranchCondition(Function &F); 330 bool simplifyOffsetableRelocate(Instruction &I); 331 }; 332 333 } // end anonymous namespace 334 335 char CodeGenPrepare::ID = 0; 336 337 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 338 "Optimize for code generation", false, false) 339 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 340 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 341 "Optimize for code generation", false, false) 342 343 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 344 345 bool CodeGenPrepare::runOnFunction(Function &F) { 346 if (skipFunction(F)) 347 return false; 348 349 DL = &F.getParent()->getDataLayout(); 350 351 bool EverMadeChange = false; 352 // Clear per function information. 353 InsertedInsts.clear(); 354 PromotedInsts.clear(); 355 356 ModifiedDT = false; 357 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 358 TM = &TPC->getTM<TargetMachine>(); 359 SubtargetInfo = TM->getSubtargetImpl(F); 360 TLI = SubtargetInfo->getTargetLowering(); 361 TRI = SubtargetInfo->getRegisterInfo(); 362 } 363 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 364 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 365 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 366 BPI.reset(new BranchProbabilityInfo(F, *LI)); 367 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 368 OptSize = F.optForSize(); 369 370 ProfileSummaryInfo *PSI = 371 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 372 if (ProfileGuidedSectionPrefix) { 373 if (PSI->isFunctionHotInCallGraph(&F, *BFI)) 374 F.setSectionPrefix(".hot"); 375 else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) 376 F.setSectionPrefix(".unlikely"); 377 } 378 379 /// This optimization identifies DIV instructions that can be 380 /// profitably bypassed and carried out with a shorter, faster divide. 381 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI && 382 TLI->isSlowDivBypassed()) { 383 const DenseMap<unsigned int, unsigned int> &BypassWidths = 384 TLI->getBypassSlowDivWidths(); 385 BasicBlock* BB = &*F.begin(); 386 while (BB != nullptr) { 387 // bypassSlowDivision may create new BBs, but we don't want to reapply the 388 // optimization to those blocks. 389 BasicBlock* Next = BB->getNextNode(); 390 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 391 BB = Next; 392 } 393 } 394 395 // Eliminate blocks that contain only PHI nodes and an 396 // unconditional branch. 397 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 398 399 // llvm.dbg.value is far away from the value then iSel may not be able 400 // handle it properly. iSel will drop llvm.dbg.value if it can not 401 // find a node corresponding to the value. 402 EverMadeChange |= placeDbgValues(F); 403 404 if (!DisableBranchOpts) 405 EverMadeChange |= splitBranchCondition(F); 406 407 // Split some critical edges where one of the sources is an indirect branch, 408 // to help generate sane code for PHIs involving such edges. 409 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 410 411 bool MadeChange = true; 412 while (MadeChange) { 413 MadeChange = false; 414 SeenChainsForSExt.clear(); 415 ValToSExtendedUses.clear(); 416 RemovedInsts.clear(); 417 for (Function::iterator I = F.begin(); I != F.end(); ) { 418 BasicBlock *BB = &*I++; 419 bool ModifiedDTOnIteration = false; 420 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 421 422 // Restart BB iteration if the dominator tree of the Function was changed 423 if (ModifiedDTOnIteration) 424 break; 425 } 426 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 427 MadeChange |= mergeSExts(F); 428 429 // Really free removed instructions during promotion. 430 for (Instruction *I : RemovedInsts) 431 I->deleteValue(); 432 433 EverMadeChange |= MadeChange; 434 } 435 436 SunkAddrs.clear(); 437 438 if (!DisableBranchOpts) { 439 MadeChange = false; 440 SmallPtrSet<BasicBlock*, 8> WorkList; 441 for (BasicBlock &BB : F) { 442 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 443 MadeChange |= ConstantFoldTerminator(&BB, true); 444 if (!MadeChange) continue; 445 446 for (SmallVectorImpl<BasicBlock*>::iterator 447 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 448 if (pred_begin(*II) == pred_end(*II)) 449 WorkList.insert(*II); 450 } 451 452 // Delete the dead blocks and any of their dead successors. 453 MadeChange |= !WorkList.empty(); 454 while (!WorkList.empty()) { 455 BasicBlock *BB = *WorkList.begin(); 456 WorkList.erase(BB); 457 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 458 459 DeleteDeadBlock(BB); 460 461 for (SmallVectorImpl<BasicBlock*>::iterator 462 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 463 if (pred_begin(*II) == pred_end(*II)) 464 WorkList.insert(*II); 465 } 466 467 // Merge pairs of basic blocks with unconditional branches, connected by 468 // a single edge. 469 if (EverMadeChange || MadeChange) 470 MadeChange |= eliminateFallThrough(F); 471 472 EverMadeChange |= MadeChange; 473 } 474 475 if (!DisableGCOpts) { 476 SmallVector<Instruction *, 2> Statepoints; 477 for (BasicBlock &BB : F) 478 for (Instruction &I : BB) 479 if (isStatepoint(I)) 480 Statepoints.push_back(&I); 481 for (auto &I : Statepoints) 482 EverMadeChange |= simplifyOffsetableRelocate(*I); 483 } 484 485 return EverMadeChange; 486 } 487 488 /// Merge basic blocks which are connected by a single edge, where one of the 489 /// basic blocks has a single successor pointing to the other basic block, 490 /// which has a single predecessor. 491 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 492 bool Changed = false; 493 // Scan all of the blocks in the function, except for the entry block. 494 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 495 BasicBlock *BB = &*I++; 496 // If the destination block has a single pred, then this is a trivial 497 // edge, just collapse it. 498 BasicBlock *SinglePred = BB->getSinglePredecessor(); 499 500 // Don't merge if BB's address is taken. 501 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 502 503 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 504 if (Term && !Term->isConditional()) { 505 Changed = true; 506 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 507 // Remember if SinglePred was the entry block of the function. 508 // If so, we will need to move BB back to the entry position. 509 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 510 MergeBasicBlockIntoOnlyPred(BB, nullptr); 511 512 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 513 BB->moveBefore(&BB->getParent()->getEntryBlock()); 514 515 // We have erased a block. Update the iterator. 516 I = BB->getIterator(); 517 } 518 } 519 return Changed; 520 } 521 522 /// Find a destination block from BB if BB is mergeable empty block. 523 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 524 // If this block doesn't end with an uncond branch, ignore it. 525 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 526 if (!BI || !BI->isUnconditional()) 527 return nullptr; 528 529 // If the instruction before the branch (skipping debug info) isn't a phi 530 // node, then other stuff is happening here. 531 BasicBlock::iterator BBI = BI->getIterator(); 532 if (BBI != BB->begin()) { 533 --BBI; 534 while (isa<DbgInfoIntrinsic>(BBI)) { 535 if (BBI == BB->begin()) 536 break; 537 --BBI; 538 } 539 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 540 return nullptr; 541 } 542 543 // Do not break infinite loops. 544 BasicBlock *DestBB = BI->getSuccessor(0); 545 if (DestBB == BB) 546 return nullptr; 547 548 if (!canMergeBlocks(BB, DestBB)) 549 DestBB = nullptr; 550 551 return DestBB; 552 } 553 554 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 555 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 556 /// edges in ways that are non-optimal for isel. Start by eliminating these 557 /// blocks so we can split them the way we want them. 558 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 559 SmallPtrSet<BasicBlock *, 16> Preheaders; 560 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 561 while (!LoopList.empty()) { 562 Loop *L = LoopList.pop_back_val(); 563 LoopList.insert(LoopList.end(), L->begin(), L->end()); 564 if (BasicBlock *Preheader = L->getLoopPreheader()) 565 Preheaders.insert(Preheader); 566 } 567 568 bool MadeChange = false; 569 // Note that this intentionally skips the entry block. 570 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 571 BasicBlock *BB = &*I++; 572 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 573 if (!DestBB || 574 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 575 continue; 576 577 eliminateMostlyEmptyBlock(BB); 578 MadeChange = true; 579 } 580 return MadeChange; 581 } 582 583 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 584 BasicBlock *DestBB, 585 bool isPreheader) { 586 // Do not delete loop preheaders if doing so would create a critical edge. 587 // Loop preheaders can be good locations to spill registers. If the 588 // preheader is deleted and we create a critical edge, registers may be 589 // spilled in the loop body instead. 590 if (!DisablePreheaderProtect && isPreheader && 591 !(BB->getSinglePredecessor() && 592 BB->getSinglePredecessor()->getSingleSuccessor())) 593 return false; 594 595 // Try to skip merging if the unique predecessor of BB is terminated by a 596 // switch or indirect branch instruction, and BB is used as an incoming block 597 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 598 // add COPY instructions in the predecessor of BB instead of BB (if it is not 599 // merged). Note that the critical edge created by merging such blocks wont be 600 // split in MachineSink because the jump table is not analyzable. By keeping 601 // such empty block (BB), ISel will place COPY instructions in BB, not in the 602 // predecessor of BB. 603 BasicBlock *Pred = BB->getUniquePredecessor(); 604 if (!Pred || 605 !(isa<SwitchInst>(Pred->getTerminator()) || 606 isa<IndirectBrInst>(Pred->getTerminator()))) 607 return true; 608 609 if (BB->getTerminator() != BB->getFirstNonPHI()) 610 return true; 611 612 // We use a simple cost heuristic which determine skipping merging is 613 // profitable if the cost of skipping merging is less than the cost of 614 // merging : Cost(skipping merging) < Cost(merging BB), where the 615 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 616 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 617 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 618 // Freq(Pred) / Freq(BB) > 2. 619 // Note that if there are multiple empty blocks sharing the same incoming 620 // value for the PHIs in the DestBB, we consider them together. In such 621 // case, Cost(merging BB) will be the sum of their frequencies. 622 623 if (!isa<PHINode>(DestBB->begin())) 624 return true; 625 626 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 627 628 // Find all other incoming blocks from which incoming values of all PHIs in 629 // DestBB are the same as the ones from BB. 630 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 631 ++PI) { 632 BasicBlock *DestBBPred = *PI; 633 if (DestBBPred == BB) 634 continue; 635 636 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 637 return DestPN.getIncomingValueForBlock(BB) == 638 DestPN.getIncomingValueForBlock(DestBBPred); 639 })) 640 SameIncomingValueBBs.insert(DestBBPred); 641 } 642 643 // See if all BB's incoming values are same as the value from Pred. In this 644 // case, no reason to skip merging because COPYs are expected to be place in 645 // Pred already. 646 if (SameIncomingValueBBs.count(Pred)) 647 return true; 648 649 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 650 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 651 652 for (auto SameValueBB : SameIncomingValueBBs) 653 if (SameValueBB->getUniquePredecessor() == Pred && 654 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 655 BBFreq += BFI->getBlockFreq(SameValueBB); 656 657 return PredFreq.getFrequency() <= 658 BBFreq.getFrequency() * FreqRatioToSkipMerge; 659 } 660 661 /// Return true if we can merge BB into DestBB if there is a single 662 /// unconditional branch between them, and BB contains no other non-phi 663 /// instructions. 664 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 665 const BasicBlock *DestBB) const { 666 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 667 // the successor. If there are more complex condition (e.g. preheaders), 668 // don't mess around with them. 669 for (const PHINode &PN : BB->phis()) { 670 for (const User *U : PN.users()) { 671 const Instruction *UI = cast<Instruction>(U); 672 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 673 return false; 674 // If User is inside DestBB block and it is a PHINode then check 675 // incoming value. If incoming value is not from BB then this is 676 // a complex condition (e.g. preheaders) we want to avoid here. 677 if (UI->getParent() == DestBB) { 678 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 679 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 680 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 681 if (Insn && Insn->getParent() == BB && 682 Insn->getParent() != UPN->getIncomingBlock(I)) 683 return false; 684 } 685 } 686 } 687 } 688 689 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 690 // and DestBB may have conflicting incoming values for the block. If so, we 691 // can't merge the block. 692 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 693 if (!DestBBPN) return true; // no conflict. 694 695 // Collect the preds of BB. 696 SmallPtrSet<const BasicBlock*, 16> BBPreds; 697 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 698 // It is faster to get preds from a PHI than with pred_iterator. 699 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 700 BBPreds.insert(BBPN->getIncomingBlock(i)); 701 } else { 702 BBPreds.insert(pred_begin(BB), pred_end(BB)); 703 } 704 705 // Walk the preds of DestBB. 706 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 707 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 708 if (BBPreds.count(Pred)) { // Common predecessor? 709 for (const PHINode &PN : DestBB->phis()) { 710 const Value *V1 = PN.getIncomingValueForBlock(Pred); 711 const Value *V2 = PN.getIncomingValueForBlock(BB); 712 713 // If V2 is a phi node in BB, look up what the mapped value will be. 714 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 715 if (V2PN->getParent() == BB) 716 V2 = V2PN->getIncomingValueForBlock(Pred); 717 718 // If there is a conflict, bail out. 719 if (V1 != V2) return false; 720 } 721 } 722 } 723 724 return true; 725 } 726 727 /// Eliminate a basic block that has only phi's and an unconditional branch in 728 /// it. 729 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 730 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 731 BasicBlock *DestBB = BI->getSuccessor(0); 732 733 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 734 735 // If the destination block has a single pred, then this is a trivial edge, 736 // just collapse it. 737 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 738 if (SinglePred != DestBB) { 739 // Remember if SinglePred was the entry block of the function. If so, we 740 // will need to move BB back to the entry position. 741 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 742 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 743 744 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 745 BB->moveBefore(&BB->getParent()->getEntryBlock()); 746 747 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 748 return; 749 } 750 } 751 752 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 753 // to handle the new incoming edges it is about to have. 754 for (PHINode &PN : DestBB->phis()) { 755 // Remove the incoming value for BB, and remember it. 756 Value *InVal = PN.removeIncomingValue(BB, false); 757 758 // Two options: either the InVal is a phi node defined in BB or it is some 759 // value that dominates BB. 760 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 761 if (InValPhi && InValPhi->getParent() == BB) { 762 // Add all of the input values of the input PHI as inputs of this phi. 763 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 764 PN.addIncoming(InValPhi->getIncomingValue(i), 765 InValPhi->getIncomingBlock(i)); 766 } else { 767 // Otherwise, add one instance of the dominating value for each edge that 768 // we will be adding. 769 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 770 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 771 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 772 } else { 773 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 774 PN.addIncoming(InVal, *PI); 775 } 776 } 777 } 778 779 // The PHIs are now updated, change everything that refers to BB to use 780 // DestBB and remove BB. 781 BB->replaceAllUsesWith(DestBB); 782 BB->eraseFromParent(); 783 ++NumBlocksElim; 784 785 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 786 } 787 788 // Computes a map of base pointer relocation instructions to corresponding 789 // derived pointer relocation instructions given a vector of all relocate calls 790 static void computeBaseDerivedRelocateMap( 791 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 792 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 793 &RelocateInstMap) { 794 // Collect information in two maps: one primarily for locating the base object 795 // while filling the second map; the second map is the final structure holding 796 // a mapping between Base and corresponding Derived relocate calls 797 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 798 for (auto *ThisRelocate : AllRelocateCalls) { 799 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 800 ThisRelocate->getDerivedPtrIndex()); 801 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 802 } 803 for (auto &Item : RelocateIdxMap) { 804 std::pair<unsigned, unsigned> Key = Item.first; 805 if (Key.first == Key.second) 806 // Base relocation: nothing to insert 807 continue; 808 809 GCRelocateInst *I = Item.second; 810 auto BaseKey = std::make_pair(Key.first, Key.first); 811 812 // We're iterating over RelocateIdxMap so we cannot modify it. 813 auto MaybeBase = RelocateIdxMap.find(BaseKey); 814 if (MaybeBase == RelocateIdxMap.end()) 815 // TODO: We might want to insert a new base object relocate and gep off 816 // that, if there are enough derived object relocates. 817 continue; 818 819 RelocateInstMap[MaybeBase->second].push_back(I); 820 } 821 } 822 823 // Accepts a GEP and extracts the operands into a vector provided they're all 824 // small integer constants 825 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 826 SmallVectorImpl<Value *> &OffsetV) { 827 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 828 // Only accept small constant integer operands 829 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 830 if (!Op || Op->getZExtValue() > 20) 831 return false; 832 } 833 834 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 835 OffsetV.push_back(GEP->getOperand(i)); 836 return true; 837 } 838 839 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 840 // replace, computes a replacement, and affects it. 841 static bool 842 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 843 const SmallVectorImpl<GCRelocateInst *> &Targets) { 844 bool MadeChange = false; 845 // We must ensure the relocation of derived pointer is defined after 846 // relocation of base pointer. If we find a relocation corresponding to base 847 // defined earlier than relocation of base then we move relocation of base 848 // right before found relocation. We consider only relocation in the same 849 // basic block as relocation of base. Relocations from other basic block will 850 // be skipped by optimization and we do not care about them. 851 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 852 &*R != RelocatedBase; ++R) 853 if (auto RI = dyn_cast<GCRelocateInst>(R)) 854 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 855 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 856 RelocatedBase->moveBefore(RI); 857 break; 858 } 859 860 for (GCRelocateInst *ToReplace : Targets) { 861 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 862 "Not relocating a derived object of the original base object"); 863 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 864 // A duplicate relocate call. TODO: coalesce duplicates. 865 continue; 866 } 867 868 if (RelocatedBase->getParent() != ToReplace->getParent()) { 869 // Base and derived relocates are in different basic blocks. 870 // In this case transform is only valid when base dominates derived 871 // relocate. However it would be too expensive to check dominance 872 // for each such relocate, so we skip the whole transformation. 873 continue; 874 } 875 876 Value *Base = ToReplace->getBasePtr(); 877 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 878 if (!Derived || Derived->getPointerOperand() != Base) 879 continue; 880 881 SmallVector<Value *, 2> OffsetV; 882 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 883 continue; 884 885 // Create a Builder and replace the target callsite with a gep 886 assert(RelocatedBase->getNextNode() && 887 "Should always have one since it's not a terminator"); 888 889 // Insert after RelocatedBase 890 IRBuilder<> Builder(RelocatedBase->getNextNode()); 891 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 892 893 // If gc_relocate does not match the actual type, cast it to the right type. 894 // In theory, there must be a bitcast after gc_relocate if the type does not 895 // match, and we should reuse it to get the derived pointer. But it could be 896 // cases like this: 897 // bb1: 898 // ... 899 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 900 // br label %merge 901 // 902 // bb2: 903 // ... 904 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 905 // br label %merge 906 // 907 // merge: 908 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 909 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 910 // 911 // In this case, we can not find the bitcast any more. So we insert a new bitcast 912 // no matter there is already one or not. In this way, we can handle all cases, and 913 // the extra bitcast should be optimized away in later passes. 914 Value *ActualRelocatedBase = RelocatedBase; 915 if (RelocatedBase->getType() != Base->getType()) { 916 ActualRelocatedBase = 917 Builder.CreateBitCast(RelocatedBase, Base->getType()); 918 } 919 Value *Replacement = Builder.CreateGEP( 920 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 921 Replacement->takeName(ToReplace); 922 // If the newly generated derived pointer's type does not match the original derived 923 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 924 Value *ActualReplacement = Replacement; 925 if (Replacement->getType() != ToReplace->getType()) { 926 ActualReplacement = 927 Builder.CreateBitCast(Replacement, ToReplace->getType()); 928 } 929 ToReplace->replaceAllUsesWith(ActualReplacement); 930 ToReplace->eraseFromParent(); 931 932 MadeChange = true; 933 } 934 return MadeChange; 935 } 936 937 // Turns this: 938 // 939 // %base = ... 940 // %ptr = gep %base + 15 941 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 942 // %base' = relocate(%tok, i32 4, i32 4) 943 // %ptr' = relocate(%tok, i32 4, i32 5) 944 // %val = load %ptr' 945 // 946 // into this: 947 // 948 // %base = ... 949 // %ptr = gep %base + 15 950 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 951 // %base' = gc.relocate(%tok, i32 4, i32 4) 952 // %ptr' = gep %base' + 15 953 // %val = load %ptr' 954 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 955 bool MadeChange = false; 956 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 957 958 for (auto *U : I.users()) 959 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 960 // Collect all the relocate calls associated with a statepoint 961 AllRelocateCalls.push_back(Relocate); 962 963 // We need atleast one base pointer relocation + one derived pointer 964 // relocation to mangle 965 if (AllRelocateCalls.size() < 2) 966 return false; 967 968 // RelocateInstMap is a mapping from the base relocate instruction to the 969 // corresponding derived relocate instructions 970 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 971 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 972 if (RelocateInstMap.empty()) 973 return false; 974 975 for (auto &Item : RelocateInstMap) 976 // Item.first is the RelocatedBase to offset against 977 // Item.second is the vector of Targets to replace 978 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 979 return MadeChange; 980 } 981 982 /// SinkCast - Sink the specified cast instruction into its user blocks 983 static bool SinkCast(CastInst *CI) { 984 BasicBlock *DefBB = CI->getParent(); 985 986 /// InsertedCasts - Only insert a cast in each block once. 987 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 988 989 bool MadeChange = false; 990 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 991 UI != E; ) { 992 Use &TheUse = UI.getUse(); 993 Instruction *User = cast<Instruction>(*UI); 994 995 // Figure out which BB this cast is used in. For PHI's this is the 996 // appropriate predecessor block. 997 BasicBlock *UserBB = User->getParent(); 998 if (PHINode *PN = dyn_cast<PHINode>(User)) { 999 UserBB = PN->getIncomingBlock(TheUse); 1000 } 1001 1002 // Preincrement use iterator so we don't invalidate it. 1003 ++UI; 1004 1005 // The first insertion point of a block containing an EH pad is after the 1006 // pad. If the pad is the user, we cannot sink the cast past the pad. 1007 if (User->isEHPad()) 1008 continue; 1009 1010 // If the block selected to receive the cast is an EH pad that does not 1011 // allow non-PHI instructions before the terminator, we can't sink the 1012 // cast. 1013 if (UserBB->getTerminator()->isEHPad()) 1014 continue; 1015 1016 // If this user is in the same block as the cast, don't change the cast. 1017 if (UserBB == DefBB) continue; 1018 1019 // If we have already inserted a cast into this block, use it. 1020 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1021 1022 if (!InsertedCast) { 1023 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1024 assert(InsertPt != UserBB->end()); 1025 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1026 CI->getType(), "", &*InsertPt); 1027 } 1028 1029 // Replace a use of the cast with a use of the new cast. 1030 TheUse = InsertedCast; 1031 MadeChange = true; 1032 ++NumCastUses; 1033 } 1034 1035 // If we removed all uses, nuke the cast. 1036 if (CI->use_empty()) { 1037 salvageDebugInfo(*CI); 1038 CI->eraseFromParent(); 1039 MadeChange = true; 1040 } 1041 1042 return MadeChange; 1043 } 1044 1045 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1046 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1047 /// reduce the number of virtual registers that must be created and coalesced. 1048 /// 1049 /// Return true if any changes are made. 1050 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1051 const DataLayout &DL) { 1052 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1053 // than sinking only nop casts, but is helpful on some platforms. 1054 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1055 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1056 ASC->getDestAddressSpace())) 1057 return false; 1058 } 1059 1060 // If this is a noop copy, 1061 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1062 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1063 1064 // This is an fp<->int conversion? 1065 if (SrcVT.isInteger() != DstVT.isInteger()) 1066 return false; 1067 1068 // If this is an extension, it will be a zero or sign extension, which 1069 // isn't a noop. 1070 if (SrcVT.bitsLT(DstVT)) return false; 1071 1072 // If these values will be promoted, find out what they will be promoted 1073 // to. This helps us consider truncates on PPC as noop copies when they 1074 // are. 1075 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1076 TargetLowering::TypePromoteInteger) 1077 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1078 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1079 TargetLowering::TypePromoteInteger) 1080 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1081 1082 // If, after promotion, these are the same types, this is a noop copy. 1083 if (SrcVT != DstVT) 1084 return false; 1085 1086 return SinkCast(CI); 1087 } 1088 1089 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 1090 /// possible. 1091 /// 1092 /// Return true if any changes were made. 1093 static bool CombineUAddWithOverflow(CmpInst *CI) { 1094 Value *A, *B; 1095 Instruction *AddI; 1096 if (!match(CI, 1097 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 1098 return false; 1099 1100 Type *Ty = AddI->getType(); 1101 if (!isa<IntegerType>(Ty)) 1102 return false; 1103 1104 // We don't want to move around uses of condition values this late, so we we 1105 // check if it is legal to create the call to the intrinsic in the basic 1106 // block containing the icmp: 1107 1108 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 1109 return false; 1110 1111 #ifndef NDEBUG 1112 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1113 // for now: 1114 if (AddI->hasOneUse()) 1115 assert(*AddI->user_begin() == CI && "expected!"); 1116 #endif 1117 1118 Module *M = CI->getModule(); 1119 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1120 1121 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 1122 1123 auto *UAddWithOverflow = 1124 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 1125 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 1126 auto *Overflow = 1127 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 1128 1129 CI->replaceAllUsesWith(Overflow); 1130 AddI->replaceAllUsesWith(UAdd); 1131 CI->eraseFromParent(); 1132 AddI->eraseFromParent(); 1133 return true; 1134 } 1135 1136 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1137 /// registers that must be created and coalesced. This is a clear win except on 1138 /// targets with multiple condition code registers (PowerPC), where it might 1139 /// lose; some adjustment may be wanted there. 1140 /// 1141 /// Return true if any changes are made. 1142 static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1143 BasicBlock *DefBB = CI->getParent(); 1144 1145 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1146 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) 1147 return false; 1148 1149 // Only insert a cmp in each block once. 1150 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1151 1152 bool MadeChange = false; 1153 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1154 UI != E; ) { 1155 Use &TheUse = UI.getUse(); 1156 Instruction *User = cast<Instruction>(*UI); 1157 1158 // Preincrement use iterator so we don't invalidate it. 1159 ++UI; 1160 1161 // Don't bother for PHI nodes. 1162 if (isa<PHINode>(User)) 1163 continue; 1164 1165 // Figure out which BB this cmp is used in. 1166 BasicBlock *UserBB = User->getParent(); 1167 1168 // If this user is in the same block as the cmp, don't change the cmp. 1169 if (UserBB == DefBB) continue; 1170 1171 // If we have already inserted a cmp into this block, use it. 1172 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1173 1174 if (!InsertedCmp) { 1175 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1176 assert(InsertPt != UserBB->end()); 1177 InsertedCmp = 1178 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 1179 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 1180 // Propagate the debug info. 1181 InsertedCmp->setDebugLoc(CI->getDebugLoc()); 1182 } 1183 1184 // Replace a use of the cmp with a use of the new cmp. 1185 TheUse = InsertedCmp; 1186 MadeChange = true; 1187 ++NumCmpUses; 1188 } 1189 1190 // If we removed all uses, nuke the cmp. 1191 if (CI->use_empty()) { 1192 CI->eraseFromParent(); 1193 MadeChange = true; 1194 } 1195 1196 return MadeChange; 1197 } 1198 1199 static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1200 if (SinkCmpExpression(CI, TLI)) 1201 return true; 1202 1203 if (CombineUAddWithOverflow(CI)) 1204 return true; 1205 1206 return false; 1207 } 1208 1209 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1210 /// used in a compare to allow isel to generate better code for targets where 1211 /// this operation can be combined. 1212 /// 1213 /// Return true if any changes are made. 1214 static bool sinkAndCmp0Expression(Instruction *AndI, 1215 const TargetLowering &TLI, 1216 SetOfInstrs &InsertedInsts) { 1217 // Double-check that we're not trying to optimize an instruction that was 1218 // already optimized by some other part of this pass. 1219 assert(!InsertedInsts.count(AndI) && 1220 "Attempting to optimize already optimized and instruction"); 1221 (void) InsertedInsts; 1222 1223 // Nothing to do for single use in same basic block. 1224 if (AndI->hasOneUse() && 1225 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1226 return false; 1227 1228 // Try to avoid cases where sinking/duplicating is likely to increase register 1229 // pressure. 1230 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1231 !isa<ConstantInt>(AndI->getOperand(1)) && 1232 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1233 return false; 1234 1235 for (auto *U : AndI->users()) { 1236 Instruction *User = cast<Instruction>(U); 1237 1238 // Only sink for and mask feeding icmp with 0. 1239 if (!isa<ICmpInst>(User)) 1240 return false; 1241 1242 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1243 if (!CmpC || !CmpC->isZero()) 1244 return false; 1245 } 1246 1247 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1248 return false; 1249 1250 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1251 DEBUG(AndI->getParent()->dump()); 1252 1253 // Push the 'and' into the same block as the icmp 0. There should only be 1254 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1255 // others, so we don't need to keep track of which BBs we insert into. 1256 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1257 UI != E; ) { 1258 Use &TheUse = UI.getUse(); 1259 Instruction *User = cast<Instruction>(*UI); 1260 1261 // Preincrement use iterator so we don't invalidate it. 1262 ++UI; 1263 1264 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1265 1266 // Keep the 'and' in the same place if the use is already in the same block. 1267 Instruction *InsertPt = 1268 User->getParent() == AndI->getParent() ? AndI : User; 1269 Instruction *InsertedAnd = 1270 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1271 AndI->getOperand(1), "", InsertPt); 1272 // Propagate the debug info. 1273 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1274 1275 // Replace a use of the 'and' with a use of the new 'and'. 1276 TheUse = InsertedAnd; 1277 ++NumAndUses; 1278 DEBUG(User->getParent()->dump()); 1279 } 1280 1281 // We removed all uses, nuke the and. 1282 AndI->eraseFromParent(); 1283 return true; 1284 } 1285 1286 /// Check if the candidates could be combined with a shift instruction, which 1287 /// includes: 1288 /// 1. Truncate instruction 1289 /// 2. And instruction and the imm is a mask of the low bits: 1290 /// imm & (imm+1) == 0 1291 static bool isExtractBitsCandidateUse(Instruction *User) { 1292 if (!isa<TruncInst>(User)) { 1293 if (User->getOpcode() != Instruction::And || 1294 !isa<ConstantInt>(User->getOperand(1))) 1295 return false; 1296 1297 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1298 1299 if ((Cimm & (Cimm + 1)).getBoolValue()) 1300 return false; 1301 } 1302 return true; 1303 } 1304 1305 /// Sink both shift and truncate instruction to the use of truncate's BB. 1306 static bool 1307 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1308 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1309 const TargetLowering &TLI, const DataLayout &DL) { 1310 BasicBlock *UserBB = User->getParent(); 1311 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1312 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1313 bool MadeChange = false; 1314 1315 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1316 TruncE = TruncI->user_end(); 1317 TruncUI != TruncE;) { 1318 1319 Use &TruncTheUse = TruncUI.getUse(); 1320 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1321 // Preincrement use iterator so we don't invalidate it. 1322 1323 ++TruncUI; 1324 1325 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1326 if (!ISDOpcode) 1327 continue; 1328 1329 // If the use is actually a legal node, there will not be an 1330 // implicit truncate. 1331 // FIXME: always querying the result type is just an 1332 // approximation; some nodes' legality is determined by the 1333 // operand or other means. There's no good way to find out though. 1334 if (TLI.isOperationLegalOrCustom( 1335 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1336 continue; 1337 1338 // Don't bother for PHI nodes. 1339 if (isa<PHINode>(TruncUser)) 1340 continue; 1341 1342 BasicBlock *TruncUserBB = TruncUser->getParent(); 1343 1344 if (UserBB == TruncUserBB) 1345 continue; 1346 1347 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1348 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1349 1350 if (!InsertedShift && !InsertedTrunc) { 1351 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1352 assert(InsertPt != TruncUserBB->end()); 1353 // Sink the shift 1354 if (ShiftI->getOpcode() == Instruction::AShr) 1355 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1356 "", &*InsertPt); 1357 else 1358 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1359 "", &*InsertPt); 1360 1361 // Sink the trunc 1362 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1363 TruncInsertPt++; 1364 assert(TruncInsertPt != TruncUserBB->end()); 1365 1366 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1367 TruncI->getType(), "", &*TruncInsertPt); 1368 1369 MadeChange = true; 1370 1371 TruncTheUse = InsertedTrunc; 1372 } 1373 } 1374 return MadeChange; 1375 } 1376 1377 /// Sink the shift *right* instruction into user blocks if the uses could 1378 /// potentially be combined with this shift instruction and generate BitExtract 1379 /// instruction. It will only be applied if the architecture supports BitExtract 1380 /// instruction. Here is an example: 1381 /// BB1: 1382 /// %x.extract.shift = lshr i64 %arg1, 32 1383 /// BB2: 1384 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1385 /// ==> 1386 /// 1387 /// BB2: 1388 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1389 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1390 /// 1391 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1392 /// instruction. 1393 /// Return true if any changes are made. 1394 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1395 const TargetLowering &TLI, 1396 const DataLayout &DL) { 1397 BasicBlock *DefBB = ShiftI->getParent(); 1398 1399 /// Only insert instructions in each block once. 1400 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1401 1402 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1403 1404 bool MadeChange = false; 1405 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1406 UI != E;) { 1407 Use &TheUse = UI.getUse(); 1408 Instruction *User = cast<Instruction>(*UI); 1409 // Preincrement use iterator so we don't invalidate it. 1410 ++UI; 1411 1412 // Don't bother for PHI nodes. 1413 if (isa<PHINode>(User)) 1414 continue; 1415 1416 if (!isExtractBitsCandidateUse(User)) 1417 continue; 1418 1419 BasicBlock *UserBB = User->getParent(); 1420 1421 if (UserBB == DefBB) { 1422 // If the shift and truncate instruction are in the same BB. The use of 1423 // the truncate(TruncUse) may still introduce another truncate if not 1424 // legal. In this case, we would like to sink both shift and truncate 1425 // instruction to the BB of TruncUse. 1426 // for example: 1427 // BB1: 1428 // i64 shift.result = lshr i64 opnd, imm 1429 // trunc.result = trunc shift.result to i16 1430 // 1431 // BB2: 1432 // ----> We will have an implicit truncate here if the architecture does 1433 // not have i16 compare. 1434 // cmp i16 trunc.result, opnd2 1435 // 1436 if (isa<TruncInst>(User) && shiftIsLegal 1437 // If the type of the truncate is legal, no trucate will be 1438 // introduced in other basic blocks. 1439 && 1440 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1441 MadeChange = 1442 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1443 1444 continue; 1445 } 1446 // If we have already inserted a shift into this block, use it. 1447 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1448 1449 if (!InsertedShift) { 1450 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1451 assert(InsertPt != UserBB->end()); 1452 1453 if (ShiftI->getOpcode() == Instruction::AShr) 1454 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1455 "", &*InsertPt); 1456 else 1457 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1458 "", &*InsertPt); 1459 1460 MadeChange = true; 1461 } 1462 1463 // Replace a use of the shift with a use of the new shift. 1464 TheUse = InsertedShift; 1465 } 1466 1467 // If we removed all uses, nuke the shift. 1468 if (ShiftI->use_empty()) 1469 ShiftI->eraseFromParent(); 1470 1471 return MadeChange; 1472 } 1473 1474 /// If counting leading or trailing zeros is an expensive operation and a zero 1475 /// input is defined, add a check for zero to avoid calling the intrinsic. 1476 /// 1477 /// We want to transform: 1478 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1479 /// 1480 /// into: 1481 /// entry: 1482 /// %cmpz = icmp eq i64 %A, 0 1483 /// br i1 %cmpz, label %cond.end, label %cond.false 1484 /// cond.false: 1485 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1486 /// br label %cond.end 1487 /// cond.end: 1488 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1489 /// 1490 /// If the transform is performed, return true and set ModifiedDT to true. 1491 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1492 const TargetLowering *TLI, 1493 const DataLayout *DL, 1494 bool &ModifiedDT) { 1495 if (!TLI || !DL) 1496 return false; 1497 1498 // If a zero input is undefined, it doesn't make sense to despeculate that. 1499 if (match(CountZeros->getOperand(1), m_One())) 1500 return false; 1501 1502 // If it's cheap to speculate, there's nothing to do. 1503 auto IntrinsicID = CountZeros->getIntrinsicID(); 1504 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1505 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1506 return false; 1507 1508 // Only handle legal scalar cases. Anything else requires too much work. 1509 Type *Ty = CountZeros->getType(); 1510 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1511 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1512 return false; 1513 1514 // The intrinsic will be sunk behind a compare against zero and branch. 1515 BasicBlock *StartBlock = CountZeros->getParent(); 1516 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1517 1518 // Create another block after the count zero intrinsic. A PHI will be added 1519 // in this block to select the result of the intrinsic or the bit-width 1520 // constant if the input to the intrinsic is zero. 1521 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1522 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1523 1524 // Set up a builder to create a compare, conditional branch, and PHI. 1525 IRBuilder<> Builder(CountZeros->getContext()); 1526 Builder.SetInsertPoint(StartBlock->getTerminator()); 1527 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1528 1529 // Replace the unconditional branch that was created by the first split with 1530 // a compare against zero and a conditional branch. 1531 Value *Zero = Constant::getNullValue(Ty); 1532 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1533 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1534 StartBlock->getTerminator()->eraseFromParent(); 1535 1536 // Create a PHI in the end block to select either the output of the intrinsic 1537 // or the bit width of the operand. 1538 Builder.SetInsertPoint(&EndBlock->front()); 1539 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1540 CountZeros->replaceAllUsesWith(PN); 1541 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1542 PN->addIncoming(BitWidth, StartBlock); 1543 PN->addIncoming(CountZeros, CallBlock); 1544 1545 // We are explicitly handling the zero case, so we can set the intrinsic's 1546 // undefined zero argument to 'true'. This will also prevent reprocessing the 1547 // intrinsic; we only despeculate when a zero input is defined. 1548 CountZeros->setArgOperand(1, Builder.getTrue()); 1549 ModifiedDT = true; 1550 return true; 1551 } 1552 1553 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 1554 BasicBlock *BB = CI->getParent(); 1555 1556 // Lower inline assembly if we can. 1557 // If we found an inline asm expession, and if the target knows how to 1558 // lower it to normal LLVM code, do so now. 1559 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1560 if (TLI->ExpandInlineAsm(CI)) { 1561 // Avoid invalidating the iterator. 1562 CurInstIterator = BB->begin(); 1563 // Avoid processing instructions out of order, which could cause 1564 // reuse before a value is defined. 1565 SunkAddrs.clear(); 1566 return true; 1567 } 1568 // Sink address computing for memory operands into the block. 1569 if (optimizeInlineAsmInst(CI)) 1570 return true; 1571 } 1572 1573 // Align the pointer arguments to this call if the target thinks it's a good 1574 // idea 1575 unsigned MinSize, PrefAlign; 1576 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1577 for (auto &Arg : CI->arg_operands()) { 1578 // We want to align both objects whose address is used directly and 1579 // objects whose address is used in casts and GEPs, though it only makes 1580 // sense for GEPs if the offset is a multiple of the desired alignment and 1581 // if size - offset meets the size threshold. 1582 if (!Arg->getType()->isPointerTy()) 1583 continue; 1584 APInt Offset(DL->getIndexSizeInBits( 1585 cast<PointerType>(Arg->getType())->getAddressSpace()), 1586 0); 1587 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1588 uint64_t Offset2 = Offset.getLimitedValue(); 1589 if ((Offset2 & (PrefAlign-1)) != 0) 1590 continue; 1591 AllocaInst *AI; 1592 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1593 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1594 AI->setAlignment(PrefAlign); 1595 // Global variables can only be aligned if they are defined in this 1596 // object (i.e. they are uniquely initialized in this object), and 1597 // over-aligning global variables that have an explicit section is 1598 // forbidden. 1599 GlobalVariable *GV; 1600 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1601 GV->getPointerAlignment(*DL) < PrefAlign && 1602 DL->getTypeAllocSize(GV->getValueType()) >= 1603 MinSize + Offset2) 1604 GV->setAlignment(PrefAlign); 1605 } 1606 // If this is a memcpy (or similar) then we may be able to improve the 1607 // alignment 1608 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1609 unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); 1610 if (DestAlign > MI->getDestAlignment()) 1611 MI->setDestAlignment(DestAlign); 1612 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 1613 unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 1614 if (SrcAlign > MTI->getSourceAlignment()) 1615 MTI->setSourceAlignment(SrcAlign); 1616 } 1617 } 1618 } 1619 1620 // If we have a cold call site, try to sink addressing computation into the 1621 // cold block. This interacts with our handling for loads and stores to 1622 // ensure that we can fold all uses of a potential addressing computation 1623 // into their uses. TODO: generalize this to work over profiling data 1624 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 1625 for (auto &Arg : CI->arg_operands()) { 1626 if (!Arg->getType()->isPointerTy()) 1627 continue; 1628 unsigned AS = Arg->getType()->getPointerAddressSpace(); 1629 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 1630 } 1631 1632 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1633 if (II) { 1634 switch (II->getIntrinsicID()) { 1635 default: break; 1636 case Intrinsic::objectsize: { 1637 // Lower all uses of llvm.objectsize.* 1638 ConstantInt *RetVal = 1639 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 1640 // Substituting this can cause recursive simplifications, which can 1641 // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case 1642 // this 1643 // happens. 1644 Value *CurValue = &*CurInstIterator; 1645 WeakTrackingVH IterHandle(CurValue); 1646 1647 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1648 1649 // If the iterator instruction was recursively deleted, start over at the 1650 // start of the block. 1651 if (IterHandle != CurValue) { 1652 CurInstIterator = BB->begin(); 1653 SunkAddrs.clear(); 1654 } 1655 return true; 1656 } 1657 case Intrinsic::aarch64_stlxr: 1658 case Intrinsic::aarch64_stxr: { 1659 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1660 if (!ExtVal || !ExtVal->hasOneUse() || 1661 ExtVal->getParent() == CI->getParent()) 1662 return false; 1663 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1664 ExtVal->moveBefore(CI); 1665 // Mark this instruction as "inserted by CGP", so that other 1666 // optimizations don't touch it. 1667 InsertedInsts.insert(ExtVal); 1668 return true; 1669 } 1670 case Intrinsic::invariant_group_barrier: 1671 II->replaceAllUsesWith(II->getArgOperand(0)); 1672 II->eraseFromParent(); 1673 return true; 1674 1675 case Intrinsic::cttz: 1676 case Intrinsic::ctlz: 1677 // If counting zeros is expensive, try to avoid it. 1678 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1679 } 1680 1681 if (TLI) { 1682 SmallVector<Value*, 2> PtrOps; 1683 Type *AccessTy; 1684 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 1685 while (!PtrOps.empty()) { 1686 Value *PtrVal = PtrOps.pop_back_val(); 1687 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 1688 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 1689 return true; 1690 } 1691 } 1692 } 1693 1694 // From here on out we're working with named functions. 1695 if (!CI->getCalledFunction()) return false; 1696 1697 // Lower all default uses of _chk calls. This is very similar 1698 // to what InstCombineCalls does, but here we are only lowering calls 1699 // to fortified library functions (e.g. __memcpy_chk) that have the default 1700 // "don't know" as the objectsize. Anything else should be left alone. 1701 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1702 if (Value *V = Simplifier.optimizeCall(CI)) { 1703 CI->replaceAllUsesWith(V); 1704 CI->eraseFromParent(); 1705 return true; 1706 } 1707 1708 return false; 1709 } 1710 1711 /// Look for opportunities to duplicate return instructions to the predecessor 1712 /// to enable tail call optimizations. The case it is currently looking for is: 1713 /// @code 1714 /// bb0: 1715 /// %tmp0 = tail call i32 @f0() 1716 /// br label %return 1717 /// bb1: 1718 /// %tmp1 = tail call i32 @f1() 1719 /// br label %return 1720 /// bb2: 1721 /// %tmp2 = tail call i32 @f2() 1722 /// br label %return 1723 /// return: 1724 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1725 /// ret i32 %retval 1726 /// @endcode 1727 /// 1728 /// => 1729 /// 1730 /// @code 1731 /// bb0: 1732 /// %tmp0 = tail call i32 @f0() 1733 /// ret i32 %tmp0 1734 /// bb1: 1735 /// %tmp1 = tail call i32 @f1() 1736 /// ret i32 %tmp1 1737 /// bb2: 1738 /// %tmp2 = tail call i32 @f2() 1739 /// ret i32 %tmp2 1740 /// @endcode 1741 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1742 if (!TLI) 1743 return false; 1744 1745 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 1746 if (!RetI) 1747 return false; 1748 1749 PHINode *PN = nullptr; 1750 BitCastInst *BCI = nullptr; 1751 Value *V = RetI->getReturnValue(); 1752 if (V) { 1753 BCI = dyn_cast<BitCastInst>(V); 1754 if (BCI) 1755 V = BCI->getOperand(0); 1756 1757 PN = dyn_cast<PHINode>(V); 1758 if (!PN) 1759 return false; 1760 } 1761 1762 if (PN && PN->getParent() != BB) 1763 return false; 1764 1765 // Make sure there are no instructions between the PHI and return, or that the 1766 // return is the first instruction in the block. 1767 if (PN) { 1768 BasicBlock::iterator BI = BB->begin(); 1769 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1770 if (&*BI == BCI) 1771 // Also skip over the bitcast. 1772 ++BI; 1773 if (&*BI != RetI) 1774 return false; 1775 } else { 1776 BasicBlock::iterator BI = BB->begin(); 1777 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1778 if (&*BI != RetI) 1779 return false; 1780 } 1781 1782 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1783 /// call. 1784 const Function *F = BB->getParent(); 1785 SmallVector<CallInst*, 4> TailCalls; 1786 if (PN) { 1787 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1788 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1789 // Make sure the phi value is indeed produced by the tail call. 1790 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1791 TLI->mayBeEmittedAsTailCall(CI) && 1792 attributesPermitTailCall(F, CI, RetI, *TLI)) 1793 TailCalls.push_back(CI); 1794 } 1795 } else { 1796 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1797 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1798 if (!VisitedBBs.insert(*PI).second) 1799 continue; 1800 1801 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1802 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1803 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1804 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1805 if (RI == RE) 1806 continue; 1807 1808 CallInst *CI = dyn_cast<CallInst>(&*RI); 1809 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 1810 attributesPermitTailCall(F, CI, RetI, *TLI)) 1811 TailCalls.push_back(CI); 1812 } 1813 } 1814 1815 bool Changed = false; 1816 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1817 CallInst *CI = TailCalls[i]; 1818 CallSite CS(CI); 1819 1820 // Conservatively require the attributes of the call to match those of the 1821 // return. Ignore noalias because it doesn't affect the call sequence. 1822 AttributeList CalleeAttrs = CS.getAttributes(); 1823 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 1824 .removeAttribute(Attribute::NoAlias) != 1825 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 1826 .removeAttribute(Attribute::NoAlias)) 1827 continue; 1828 1829 // Make sure the call instruction is followed by an unconditional branch to 1830 // the return block. 1831 BasicBlock *CallBB = CI->getParent(); 1832 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1833 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1834 continue; 1835 1836 // Duplicate the return into CallBB. 1837 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 1838 ModifiedDT = Changed = true; 1839 ++NumRetsDup; 1840 } 1841 1842 // If we eliminated all predecessors of the block, delete the block now. 1843 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 1844 BB->eraseFromParent(); 1845 1846 return Changed; 1847 } 1848 1849 //===----------------------------------------------------------------------===// 1850 // Memory Optimization 1851 //===----------------------------------------------------------------------===// 1852 1853 namespace { 1854 1855 /// This is an extended version of TargetLowering::AddrMode 1856 /// which holds actual Value*'s for register values. 1857 struct ExtAddrMode : public TargetLowering::AddrMode { 1858 Value *BaseReg = nullptr; 1859 Value *ScaledReg = nullptr; 1860 Value *OriginalValue = nullptr; 1861 1862 enum FieldName { 1863 NoField = 0x00, 1864 BaseRegField = 0x01, 1865 BaseGVField = 0x02, 1866 BaseOffsField = 0x04, 1867 ScaledRegField = 0x08, 1868 ScaleField = 0x10, 1869 MultipleFields = 0xff 1870 }; 1871 1872 ExtAddrMode() = default; 1873 1874 void print(raw_ostream &OS) const; 1875 void dump() const; 1876 1877 FieldName compare(const ExtAddrMode &other) { 1878 // First check that the types are the same on each field, as differing types 1879 // is something we can't cope with later on. 1880 if (BaseReg && other.BaseReg && 1881 BaseReg->getType() != other.BaseReg->getType()) 1882 return MultipleFields; 1883 if (BaseGV && other.BaseGV && 1884 BaseGV->getType() != other.BaseGV->getType()) 1885 return MultipleFields; 1886 if (ScaledReg && other.ScaledReg && 1887 ScaledReg->getType() != other.ScaledReg->getType()) 1888 return MultipleFields; 1889 1890 // Check each field to see if it differs. 1891 unsigned Result = NoField; 1892 if (BaseReg != other.BaseReg) 1893 Result |= BaseRegField; 1894 if (BaseGV != other.BaseGV) 1895 Result |= BaseGVField; 1896 if (BaseOffs != other.BaseOffs) 1897 Result |= BaseOffsField; 1898 if (ScaledReg != other.ScaledReg) 1899 Result |= ScaledRegField; 1900 // Don't count 0 as being a different scale, because that actually means 1901 // unscaled (which will already be counted by having no ScaledReg). 1902 if (Scale && other.Scale && Scale != other.Scale) 1903 Result |= ScaleField; 1904 1905 if (countPopulation(Result) > 1) 1906 return MultipleFields; 1907 else 1908 return static_cast<FieldName>(Result); 1909 } 1910 1911 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 1912 // with no offset. 1913 bool isTrivial() { 1914 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 1915 // trivial if at most one of these terms is nonzero, except that BaseGV and 1916 // BaseReg both being zero actually means a null pointer value, which we 1917 // consider to be 'non-zero' here. 1918 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 1919 } 1920 1921 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 1922 switch (Field) { 1923 default: 1924 return nullptr; 1925 case BaseRegField: 1926 return BaseReg; 1927 case BaseGVField: 1928 return BaseGV; 1929 case ScaledRegField: 1930 return ScaledReg; 1931 case BaseOffsField: 1932 return ConstantInt::get(IntPtrTy, BaseOffs); 1933 } 1934 } 1935 1936 void SetCombinedField(FieldName Field, Value *V, 1937 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 1938 switch (Field) { 1939 default: 1940 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 1941 break; 1942 case ExtAddrMode::BaseRegField: 1943 BaseReg = V; 1944 break; 1945 case ExtAddrMode::BaseGVField: 1946 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 1947 // in the BaseReg field. 1948 assert(BaseReg == nullptr); 1949 BaseReg = V; 1950 BaseGV = nullptr; 1951 break; 1952 case ExtAddrMode::ScaledRegField: 1953 ScaledReg = V; 1954 // If we have a mix of scaled and unscaled addrmodes then we want scale 1955 // to be the scale and not zero. 1956 if (!Scale) 1957 for (const ExtAddrMode &AM : AddrModes) 1958 if (AM.Scale) { 1959 Scale = AM.Scale; 1960 break; 1961 } 1962 break; 1963 case ExtAddrMode::BaseOffsField: 1964 // The offset is no longer a constant, so it goes in ScaledReg with a 1965 // scale of 1. 1966 assert(ScaledReg == nullptr); 1967 ScaledReg = V; 1968 Scale = 1; 1969 BaseOffs = 0; 1970 break; 1971 } 1972 } 1973 }; 1974 1975 } // end anonymous namespace 1976 1977 #ifndef NDEBUG 1978 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 1979 AM.print(OS); 1980 return OS; 1981 } 1982 #endif 1983 1984 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1985 void ExtAddrMode::print(raw_ostream &OS) const { 1986 bool NeedPlus = false; 1987 OS << "["; 1988 if (BaseGV) { 1989 OS << (NeedPlus ? " + " : "") 1990 << "GV:"; 1991 BaseGV->printAsOperand(OS, /*PrintType=*/false); 1992 NeedPlus = true; 1993 } 1994 1995 if (BaseOffs) { 1996 OS << (NeedPlus ? " + " : "") 1997 << BaseOffs; 1998 NeedPlus = true; 1999 } 2000 2001 if (BaseReg) { 2002 OS << (NeedPlus ? " + " : "") 2003 << "Base:"; 2004 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2005 NeedPlus = true; 2006 } 2007 if (Scale) { 2008 OS << (NeedPlus ? " + " : "") 2009 << Scale << "*"; 2010 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2011 } 2012 2013 OS << ']'; 2014 } 2015 2016 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2017 print(dbgs()); 2018 dbgs() << '\n'; 2019 } 2020 #endif 2021 2022 namespace { 2023 2024 /// \brief This class provides transaction based operation on the IR. 2025 /// Every change made through this class is recorded in the internal state and 2026 /// can be undone (rollback) until commit is called. 2027 class TypePromotionTransaction { 2028 /// \brief This represents the common interface of the individual transaction. 2029 /// Each class implements the logic for doing one specific modification on 2030 /// the IR via the TypePromotionTransaction. 2031 class TypePromotionAction { 2032 protected: 2033 /// The Instruction modified. 2034 Instruction *Inst; 2035 2036 public: 2037 /// \brief Constructor of the action. 2038 /// The constructor performs the related action on the IR. 2039 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2040 2041 virtual ~TypePromotionAction() = default; 2042 2043 /// \brief Undo the modification done by this action. 2044 /// When this method is called, the IR must be in the same state as it was 2045 /// before this action was applied. 2046 /// \pre Undoing the action works if and only if the IR is in the exact same 2047 /// state as it was directly after this action was applied. 2048 virtual void undo() = 0; 2049 2050 /// \brief Advocate every change made by this action. 2051 /// When the results on the IR of the action are to be kept, it is important 2052 /// to call this function, otherwise hidden information may be kept forever. 2053 virtual void commit() { 2054 // Nothing to be done, this action is not doing anything. 2055 } 2056 }; 2057 2058 /// \brief Utility to remember the position of an instruction. 2059 class InsertionHandler { 2060 /// Position of an instruction. 2061 /// Either an instruction: 2062 /// - Is the first in a basic block: BB is used. 2063 /// - Has a previous instructon: PrevInst is used. 2064 union { 2065 Instruction *PrevInst; 2066 BasicBlock *BB; 2067 } Point; 2068 2069 /// Remember whether or not the instruction had a previous instruction. 2070 bool HasPrevInstruction; 2071 2072 public: 2073 /// \brief Record the position of \p Inst. 2074 InsertionHandler(Instruction *Inst) { 2075 BasicBlock::iterator It = Inst->getIterator(); 2076 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2077 if (HasPrevInstruction) 2078 Point.PrevInst = &*--It; 2079 else 2080 Point.BB = Inst->getParent(); 2081 } 2082 2083 /// \brief Insert \p Inst at the recorded position. 2084 void insert(Instruction *Inst) { 2085 if (HasPrevInstruction) { 2086 if (Inst->getParent()) 2087 Inst->removeFromParent(); 2088 Inst->insertAfter(Point.PrevInst); 2089 } else { 2090 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2091 if (Inst->getParent()) 2092 Inst->moveBefore(Position); 2093 else 2094 Inst->insertBefore(Position); 2095 } 2096 } 2097 }; 2098 2099 /// \brief Move an instruction before another. 2100 class InstructionMoveBefore : public TypePromotionAction { 2101 /// Original position of the instruction. 2102 InsertionHandler Position; 2103 2104 public: 2105 /// \brief Move \p Inst before \p Before. 2106 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2107 : TypePromotionAction(Inst), Position(Inst) { 2108 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2109 Inst->moveBefore(Before); 2110 } 2111 2112 /// \brief Move the instruction back to its original position. 2113 void undo() override { 2114 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2115 Position.insert(Inst); 2116 } 2117 }; 2118 2119 /// \brief Set the operand of an instruction with a new value. 2120 class OperandSetter : public TypePromotionAction { 2121 /// Original operand of the instruction. 2122 Value *Origin; 2123 2124 /// Index of the modified instruction. 2125 unsigned Idx; 2126 2127 public: 2128 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2129 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2130 : TypePromotionAction(Inst), Idx(Idx) { 2131 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2132 << "for:" << *Inst << "\n" 2133 << "with:" << *NewVal << "\n"); 2134 Origin = Inst->getOperand(Idx); 2135 Inst->setOperand(Idx, NewVal); 2136 } 2137 2138 /// \brief Restore the original value of the instruction. 2139 void undo() override { 2140 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2141 << "for: " << *Inst << "\n" 2142 << "with: " << *Origin << "\n"); 2143 Inst->setOperand(Idx, Origin); 2144 } 2145 }; 2146 2147 /// \brief Hide the operands of an instruction. 2148 /// Do as if this instruction was not using any of its operands. 2149 class OperandsHider : public TypePromotionAction { 2150 /// The list of original operands. 2151 SmallVector<Value *, 4> OriginalValues; 2152 2153 public: 2154 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2155 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2156 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2157 unsigned NumOpnds = Inst->getNumOperands(); 2158 OriginalValues.reserve(NumOpnds); 2159 for (unsigned It = 0; It < NumOpnds; ++It) { 2160 // Save the current operand. 2161 Value *Val = Inst->getOperand(It); 2162 OriginalValues.push_back(Val); 2163 // Set a dummy one. 2164 // We could use OperandSetter here, but that would imply an overhead 2165 // that we are not willing to pay. 2166 Inst->setOperand(It, UndefValue::get(Val->getType())); 2167 } 2168 } 2169 2170 /// \brief Restore the original list of uses. 2171 void undo() override { 2172 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2173 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2174 Inst->setOperand(It, OriginalValues[It]); 2175 } 2176 }; 2177 2178 /// \brief Build a truncate instruction. 2179 class TruncBuilder : public TypePromotionAction { 2180 Value *Val; 2181 2182 public: 2183 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2184 /// result. 2185 /// trunc Opnd to Ty. 2186 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2187 IRBuilder<> Builder(Opnd); 2188 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2189 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2190 } 2191 2192 /// \brief Get the built value. 2193 Value *getBuiltValue() { return Val; } 2194 2195 /// \brief Remove the built instruction. 2196 void undo() override { 2197 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2198 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2199 IVal->eraseFromParent(); 2200 } 2201 }; 2202 2203 /// \brief Build a sign extension instruction. 2204 class SExtBuilder : public TypePromotionAction { 2205 Value *Val; 2206 2207 public: 2208 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 2209 /// result. 2210 /// sext Opnd to Ty. 2211 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2212 : TypePromotionAction(InsertPt) { 2213 IRBuilder<> Builder(InsertPt); 2214 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2215 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2216 } 2217 2218 /// \brief Get the built value. 2219 Value *getBuiltValue() { return Val; } 2220 2221 /// \brief Remove the built instruction. 2222 void undo() override { 2223 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2224 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2225 IVal->eraseFromParent(); 2226 } 2227 }; 2228 2229 /// \brief Build a zero extension instruction. 2230 class ZExtBuilder : public TypePromotionAction { 2231 Value *Val; 2232 2233 public: 2234 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 2235 /// result. 2236 /// zext Opnd to Ty. 2237 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2238 : TypePromotionAction(InsertPt) { 2239 IRBuilder<> Builder(InsertPt); 2240 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2241 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2242 } 2243 2244 /// \brief Get the built value. 2245 Value *getBuiltValue() { return Val; } 2246 2247 /// \brief Remove the built instruction. 2248 void undo() override { 2249 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2250 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2251 IVal->eraseFromParent(); 2252 } 2253 }; 2254 2255 /// \brief Mutate an instruction to another type. 2256 class TypeMutator : public TypePromotionAction { 2257 /// Record the original type. 2258 Type *OrigTy; 2259 2260 public: 2261 /// \brief Mutate the type of \p Inst into \p NewTy. 2262 TypeMutator(Instruction *Inst, Type *NewTy) 2263 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2264 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2265 << "\n"); 2266 Inst->mutateType(NewTy); 2267 } 2268 2269 /// \brief Mutate the instruction back to its original type. 2270 void undo() override { 2271 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2272 << "\n"); 2273 Inst->mutateType(OrigTy); 2274 } 2275 }; 2276 2277 /// \brief Replace the uses of an instruction by another instruction. 2278 class UsesReplacer : public TypePromotionAction { 2279 /// Helper structure to keep track of the replaced uses. 2280 struct InstructionAndIdx { 2281 /// The instruction using the instruction. 2282 Instruction *Inst; 2283 2284 /// The index where this instruction is used for Inst. 2285 unsigned Idx; 2286 2287 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2288 : Inst(Inst), Idx(Idx) {} 2289 }; 2290 2291 /// Keep track of the original uses (pair Instruction, Index). 2292 SmallVector<InstructionAndIdx, 4> OriginalUses; 2293 2294 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2295 2296 public: 2297 /// \brief Replace all the use of \p Inst by \p New. 2298 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2299 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2300 << "\n"); 2301 // Record the original uses. 2302 for (Use &U : Inst->uses()) { 2303 Instruction *UserI = cast<Instruction>(U.getUser()); 2304 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2305 } 2306 // Now, we can replace the uses. 2307 Inst->replaceAllUsesWith(New); 2308 } 2309 2310 /// \brief Reassign the original uses of Inst to Inst. 2311 void undo() override { 2312 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2313 for (use_iterator UseIt = OriginalUses.begin(), 2314 EndIt = OriginalUses.end(); 2315 UseIt != EndIt; ++UseIt) { 2316 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2317 } 2318 } 2319 }; 2320 2321 /// \brief Remove an instruction from the IR. 2322 class InstructionRemover : public TypePromotionAction { 2323 /// Original position of the instruction. 2324 InsertionHandler Inserter; 2325 2326 /// Helper structure to hide all the link to the instruction. In other 2327 /// words, this helps to do as if the instruction was removed. 2328 OperandsHider Hider; 2329 2330 /// Keep track of the uses replaced, if any. 2331 UsesReplacer *Replacer = nullptr; 2332 2333 /// Keep track of instructions removed. 2334 SetOfInstrs &RemovedInsts; 2335 2336 public: 2337 /// \brief Remove all reference of \p Inst and optinally replace all its 2338 /// uses with New. 2339 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2340 /// \pre If !Inst->use_empty(), then New != nullptr 2341 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2342 Value *New = nullptr) 2343 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2344 RemovedInsts(RemovedInsts) { 2345 if (New) 2346 Replacer = new UsesReplacer(Inst, New); 2347 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2348 RemovedInsts.insert(Inst); 2349 /// The instructions removed here will be freed after completing 2350 /// optimizeBlock() for all blocks as we need to keep track of the 2351 /// removed instructions during promotion. 2352 Inst->removeFromParent(); 2353 } 2354 2355 ~InstructionRemover() override { delete Replacer; } 2356 2357 /// \brief Resurrect the instruction and reassign it to the proper uses if 2358 /// new value was provided when build this action. 2359 void undo() override { 2360 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2361 Inserter.insert(Inst); 2362 if (Replacer) 2363 Replacer->undo(); 2364 Hider.undo(); 2365 RemovedInsts.erase(Inst); 2366 } 2367 }; 2368 2369 public: 2370 /// Restoration point. 2371 /// The restoration point is a pointer to an action instead of an iterator 2372 /// because the iterator may be invalidated but not the pointer. 2373 using ConstRestorationPt = const TypePromotionAction *; 2374 2375 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2376 : RemovedInsts(RemovedInsts) {} 2377 2378 /// Advocate every changes made in that transaction. 2379 void commit(); 2380 2381 /// Undo all the changes made after the given point. 2382 void rollback(ConstRestorationPt Point); 2383 2384 /// Get the current restoration point. 2385 ConstRestorationPt getRestorationPoint() const; 2386 2387 /// \name API for IR modification with state keeping to support rollback. 2388 /// @{ 2389 /// Same as Instruction::setOperand. 2390 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2391 2392 /// Same as Instruction::eraseFromParent. 2393 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2394 2395 /// Same as Value::replaceAllUsesWith. 2396 void replaceAllUsesWith(Instruction *Inst, Value *New); 2397 2398 /// Same as Value::mutateType. 2399 void mutateType(Instruction *Inst, Type *NewTy); 2400 2401 /// Same as IRBuilder::createTrunc. 2402 Value *createTrunc(Instruction *Opnd, Type *Ty); 2403 2404 /// Same as IRBuilder::createSExt. 2405 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2406 2407 /// Same as IRBuilder::createZExt. 2408 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2409 2410 /// Same as Instruction::moveBefore. 2411 void moveBefore(Instruction *Inst, Instruction *Before); 2412 /// @} 2413 2414 private: 2415 /// The ordered list of actions made so far. 2416 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2417 2418 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2419 2420 SetOfInstrs &RemovedInsts; 2421 }; 2422 2423 } // end anonymous namespace 2424 2425 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2426 Value *NewVal) { 2427 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 2428 Inst, Idx, NewVal)); 2429 } 2430 2431 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2432 Value *NewVal) { 2433 Actions.push_back( 2434 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 2435 Inst, RemovedInsts, NewVal)); 2436 } 2437 2438 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2439 Value *New) { 2440 Actions.push_back( 2441 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2442 } 2443 2444 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2445 Actions.push_back( 2446 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2447 } 2448 2449 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2450 Type *Ty) { 2451 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2452 Value *Val = Ptr->getBuiltValue(); 2453 Actions.push_back(std::move(Ptr)); 2454 return Val; 2455 } 2456 2457 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2458 Value *Opnd, Type *Ty) { 2459 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2460 Value *Val = Ptr->getBuiltValue(); 2461 Actions.push_back(std::move(Ptr)); 2462 return Val; 2463 } 2464 2465 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2466 Value *Opnd, Type *Ty) { 2467 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2468 Value *Val = Ptr->getBuiltValue(); 2469 Actions.push_back(std::move(Ptr)); 2470 return Val; 2471 } 2472 2473 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2474 Instruction *Before) { 2475 Actions.push_back( 2476 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 2477 Inst, Before)); 2478 } 2479 2480 TypePromotionTransaction::ConstRestorationPt 2481 TypePromotionTransaction::getRestorationPoint() const { 2482 return !Actions.empty() ? Actions.back().get() : nullptr; 2483 } 2484 2485 void TypePromotionTransaction::commit() { 2486 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2487 ++It) 2488 (*It)->commit(); 2489 Actions.clear(); 2490 } 2491 2492 void TypePromotionTransaction::rollback( 2493 TypePromotionTransaction::ConstRestorationPt Point) { 2494 while (!Actions.empty() && Point != Actions.back().get()) { 2495 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2496 Curr->undo(); 2497 } 2498 } 2499 2500 namespace { 2501 2502 /// \brief A helper class for matching addressing modes. 2503 /// 2504 /// This encapsulates the logic for matching the target-legal addressing modes. 2505 class AddressingModeMatcher { 2506 SmallVectorImpl<Instruction*> &AddrModeInsts; 2507 const TargetLowering &TLI; 2508 const TargetRegisterInfo &TRI; 2509 const DataLayout &DL; 2510 2511 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2512 /// the memory instruction that we're computing this address for. 2513 Type *AccessTy; 2514 unsigned AddrSpace; 2515 Instruction *MemoryInst; 2516 2517 /// This is the addressing mode that we're building up. This is 2518 /// part of the return value of this addressing mode matching stuff. 2519 ExtAddrMode &AddrMode; 2520 2521 /// The instructions inserted by other CodeGenPrepare optimizations. 2522 const SetOfInstrs &InsertedInsts; 2523 2524 /// A map from the instructions to their type before promotion. 2525 InstrToOrigTy &PromotedInsts; 2526 2527 /// The ongoing transaction where every action should be registered. 2528 TypePromotionTransaction &TPT; 2529 2530 /// This is set to true when we should not do profitability checks. 2531 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2532 bool IgnoreProfitability; 2533 2534 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 2535 const TargetLowering &TLI, 2536 const TargetRegisterInfo &TRI, 2537 Type *AT, unsigned AS, 2538 Instruction *MI, ExtAddrMode &AM, 2539 const SetOfInstrs &InsertedInsts, 2540 InstrToOrigTy &PromotedInsts, 2541 TypePromotionTransaction &TPT) 2542 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 2543 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2544 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2545 PromotedInsts(PromotedInsts), TPT(TPT) { 2546 IgnoreProfitability = false; 2547 } 2548 2549 public: 2550 /// Find the maximal addressing mode that a load/store of V can fold, 2551 /// give an access type of AccessTy. This returns a list of involved 2552 /// instructions in AddrModeInsts. 2553 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2554 /// optimizations. 2555 /// \p PromotedInsts maps the instructions to their type before promotion. 2556 /// \p The ongoing transaction where every action should be registered. 2557 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 2558 Instruction *MemoryInst, 2559 SmallVectorImpl<Instruction*> &AddrModeInsts, 2560 const TargetLowering &TLI, 2561 const TargetRegisterInfo &TRI, 2562 const SetOfInstrs &InsertedInsts, 2563 InstrToOrigTy &PromotedInsts, 2564 TypePromotionTransaction &TPT) { 2565 ExtAddrMode Result; 2566 2567 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, 2568 AccessTy, AS, 2569 MemoryInst, Result, InsertedInsts, 2570 PromotedInsts, TPT).matchAddr(V, 0); 2571 (void)Success; assert(Success && "Couldn't select *anything*?"); 2572 return Result; 2573 } 2574 2575 private: 2576 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2577 bool matchAddr(Value *V, unsigned Depth); 2578 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 2579 bool *MovedAway = nullptr); 2580 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2581 ExtAddrMode &AMBefore, 2582 ExtAddrMode &AMAfter); 2583 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2584 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2585 Value *PromotedOperand) const; 2586 }; 2587 2588 /// \brief Keep track of simplification of Phi nodes. 2589 /// Accept the set of all phi nodes and erase phi node from this set 2590 /// if it is simplified. 2591 class SimplificationTracker { 2592 DenseMap<Value *, Value *> Storage; 2593 const SimplifyQuery &SQ; 2594 SmallPtrSetImpl<PHINode *> &AllPhiNodes; 2595 SmallPtrSetImpl<SelectInst *> &AllSelectNodes; 2596 2597 public: 2598 SimplificationTracker(const SimplifyQuery &sq, 2599 SmallPtrSetImpl<PHINode *> &APN, 2600 SmallPtrSetImpl<SelectInst *> &ASN) 2601 : SQ(sq), AllPhiNodes(APN), AllSelectNodes(ASN) {} 2602 2603 Value *Get(Value *V) { 2604 do { 2605 auto SV = Storage.find(V); 2606 if (SV == Storage.end()) 2607 return V; 2608 V = SV->second; 2609 } while (true); 2610 } 2611 2612 Value *Simplify(Value *Val) { 2613 SmallVector<Value *, 32> WorkList; 2614 SmallPtrSet<Value *, 32> Visited; 2615 WorkList.push_back(Val); 2616 while (!WorkList.empty()) { 2617 auto P = WorkList.pop_back_val(); 2618 if (!Visited.insert(P).second) 2619 continue; 2620 if (auto *PI = dyn_cast<Instruction>(P)) 2621 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 2622 for (auto *U : PI->users()) 2623 WorkList.push_back(cast<Value>(U)); 2624 Put(PI, V); 2625 PI->replaceAllUsesWith(V); 2626 if (auto *PHI = dyn_cast<PHINode>(PI)) 2627 AllPhiNodes.erase(PHI); 2628 if (auto *Select = dyn_cast<SelectInst>(PI)) 2629 AllSelectNodes.erase(Select); 2630 PI->eraseFromParent(); 2631 } 2632 } 2633 return Get(Val); 2634 } 2635 2636 void Put(Value *From, Value *To) { 2637 Storage.insert({ From, To }); 2638 } 2639 }; 2640 2641 /// \brief A helper class for combining addressing modes. 2642 class AddressingModeCombiner { 2643 typedef std::pair<Value *, BasicBlock *> ValueInBB; 2644 typedef DenseMap<ValueInBB, Value *> FoldAddrToValueMapping; 2645 typedef std::pair<PHINode *, PHINode *> PHIPair; 2646 2647 private: 2648 /// The addressing modes we've collected. 2649 SmallVector<ExtAddrMode, 16> AddrModes; 2650 2651 /// The field in which the AddrModes differ, when we have more than one. 2652 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 2653 2654 /// Are the AddrModes that we have all just equal to their original values? 2655 bool AllAddrModesTrivial = true; 2656 2657 /// Common Type for all different fields in addressing modes. 2658 Type *CommonType; 2659 2660 /// SimplifyQuery for simplifyInstruction utility. 2661 const SimplifyQuery &SQ; 2662 2663 /// Original Address. 2664 ValueInBB Original; 2665 2666 public: 2667 AddressingModeCombiner(const SimplifyQuery &_SQ, ValueInBB OriginalValue) 2668 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 2669 2670 /// \brief Get the combined AddrMode 2671 const ExtAddrMode &getAddrMode() const { 2672 return AddrModes[0]; 2673 } 2674 2675 /// \brief Add a new AddrMode if it's compatible with the AddrModes we already 2676 /// have. 2677 /// \return True iff we succeeded in doing so. 2678 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 2679 // Take note of if we have any non-trivial AddrModes, as we need to detect 2680 // when all AddrModes are trivial as then we would introduce a phi or select 2681 // which just duplicates what's already there. 2682 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 2683 2684 // If this is the first addrmode then everything is fine. 2685 if (AddrModes.empty()) { 2686 AddrModes.emplace_back(NewAddrMode); 2687 return true; 2688 } 2689 2690 // Figure out how different this is from the other address modes, which we 2691 // can do just by comparing against the first one given that we only care 2692 // about the cumulative difference. 2693 ExtAddrMode::FieldName ThisDifferentField = 2694 AddrModes[0].compare(NewAddrMode); 2695 if (DifferentField == ExtAddrMode::NoField) 2696 DifferentField = ThisDifferentField; 2697 else if (DifferentField != ThisDifferentField) 2698 DifferentField = ExtAddrMode::MultipleFields; 2699 2700 // If NewAddrMode differs in more than one dimension we cannot handle it. 2701 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 2702 2703 // If Scale Field is different then we reject. 2704 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 2705 2706 // We also must reject the case when base offset is different and 2707 // scale reg is not null, we cannot handle this case due to merge of 2708 // different offsets will be used as ScaleReg. 2709 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 2710 !NewAddrMode.ScaledReg); 2711 2712 // We also must reject the case when GV is different and BaseReg installed 2713 // due to we want to use base reg as a merge of GV values. 2714 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 2715 !NewAddrMode.HasBaseReg); 2716 2717 // Even if NewAddMode is the same we still need to collect it due to 2718 // original value is different. And later we will need all original values 2719 // as anchors during finding the common Phi node. 2720 if (CanHandle) 2721 AddrModes.emplace_back(NewAddrMode); 2722 else 2723 AddrModes.clear(); 2724 2725 return CanHandle; 2726 } 2727 2728 /// \brief Combine the addressing modes we've collected into a single 2729 /// addressing mode. 2730 /// \return True iff we successfully combined them or we only had one so 2731 /// didn't need to combine them anyway. 2732 bool combineAddrModes() { 2733 // If we have no AddrModes then they can't be combined. 2734 if (AddrModes.size() == 0) 2735 return false; 2736 2737 // A single AddrMode can trivially be combined. 2738 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 2739 return true; 2740 2741 // If the AddrModes we collected are all just equal to the value they are 2742 // derived from then combining them wouldn't do anything useful. 2743 if (AllAddrModesTrivial) 2744 return false; 2745 2746 if (!addrModeCombiningAllowed()) 2747 return false; 2748 2749 // Build a map between <original value, basic block where we saw it> to 2750 // value of base register. 2751 // Bail out if there is no common type. 2752 FoldAddrToValueMapping Map; 2753 if (!initializeMap(Map)) 2754 return false; 2755 2756 Value *CommonValue = findCommon(Map); 2757 if (CommonValue) 2758 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 2759 return CommonValue != nullptr; 2760 } 2761 2762 private: 2763 /// \brief Initialize Map with anchor values. For address seen in some BB 2764 /// we set the value of different field saw in this address. 2765 /// If address is not an instruction than basic block is set to null. 2766 /// At the same time we find a common type for different field we will 2767 /// use to create new Phi/Select nodes. Keep it in CommonType field. 2768 /// Return false if there is no common type found. 2769 bool initializeMap(FoldAddrToValueMapping &Map) { 2770 // Keep track of keys where the value is null. We will need to replace it 2771 // with constant null when we know the common type. 2772 SmallVector<ValueInBB, 2> NullValue; 2773 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 2774 for (auto &AM : AddrModes) { 2775 BasicBlock *BB = nullptr; 2776 if (Instruction *I = dyn_cast<Instruction>(AM.OriginalValue)) 2777 BB = I->getParent(); 2778 2779 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 2780 if (DV) { 2781 auto *Type = DV->getType(); 2782 if (CommonType && CommonType != Type) 2783 return false; 2784 CommonType = Type; 2785 Map[{ AM.OriginalValue, BB }] = DV; 2786 } else { 2787 NullValue.push_back({ AM.OriginalValue, BB }); 2788 } 2789 } 2790 assert(CommonType && "At least one non-null value must be!"); 2791 for (auto VIBB : NullValue) 2792 Map[VIBB] = Constant::getNullValue(CommonType); 2793 return true; 2794 } 2795 2796 /// \brief We have mapping between value A and basic block where value A 2797 /// seen to other value B where B was a field in addressing mode represented 2798 /// by A. Also we have an original value C representin an address in some 2799 /// basic block. Traversing from C through phi and selects we ended up with 2800 /// A's in a map. This utility function tries to find a value V which is a 2801 /// field in addressing mode C and traversing through phi nodes and selects 2802 /// we will end up in corresponded values B in a map. 2803 /// The utility will create a new Phi/Selects if needed. 2804 // The simple example looks as follows: 2805 // BB1: 2806 // p1 = b1 + 40 2807 // br cond BB2, BB3 2808 // BB2: 2809 // p2 = b2 + 40 2810 // br BB3 2811 // BB3: 2812 // p = phi [p1, BB1], [p2, BB2] 2813 // v = load p 2814 // Map is 2815 // <p1, BB1> -> b1 2816 // <p2, BB2> -> b2 2817 // Request is 2818 // <p, BB3> -> ? 2819 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3 2820 Value *findCommon(FoldAddrToValueMapping &Map) { 2821 // Tracks newly created Phi nodes. 2822 SmallPtrSet<PHINode *, 32> NewPhiNodes; 2823 // Tracks newly created Select nodes. 2824 SmallPtrSet<SelectInst *, 32> NewSelectNodes; 2825 // Tracks the simplification of newly created phi nodes. The reason we use 2826 // this mapping is because we will add new created Phi nodes in AddrToBase. 2827 // Simplification of Phi nodes is recursive, so some Phi node may 2828 // be simplified after we added it to AddrToBase. 2829 // Using this mapping we can find the current value in AddrToBase. 2830 SimplificationTracker ST(SQ, NewPhiNodes, NewSelectNodes); 2831 2832 // First step, DFS to create PHI nodes for all intermediate blocks. 2833 // Also fill traverse order for the second step. 2834 SmallVector<ValueInBB, 32> TraverseOrder; 2835 InsertPlaceholders(Map, TraverseOrder, NewPhiNodes, NewSelectNodes); 2836 2837 // Second Step, fill new nodes by merged values and simplify if possible. 2838 FillPlaceholders(Map, TraverseOrder, ST); 2839 2840 if (!AddrSinkNewSelects && NewSelectNodes.size() > 0) { 2841 DestroyNodes(NewPhiNodes); 2842 DestroyNodes(NewSelectNodes); 2843 return nullptr; 2844 } 2845 2846 // Now we'd like to match New Phi nodes to existed ones. 2847 unsigned PhiNotMatchedCount = 0; 2848 if (!MatchPhiSet(NewPhiNodes, ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 2849 DestroyNodes(NewPhiNodes); 2850 DestroyNodes(NewSelectNodes); 2851 return nullptr; 2852 } 2853 2854 auto *Result = ST.Get(Map.find(Original)->second); 2855 if (Result) { 2856 NumMemoryInstsPhiCreated += NewPhiNodes.size() + PhiNotMatchedCount; 2857 NumMemoryInstsSelectCreated += NewSelectNodes.size(); 2858 } 2859 return Result; 2860 } 2861 2862 /// \brief Destroy nodes from a set. 2863 template <typename T> void DestroyNodes(SmallPtrSetImpl<T *> &Instructions) { 2864 // For safe erasing, replace the Phi with dummy value first. 2865 auto Dummy = UndefValue::get(CommonType); 2866 for (auto I : Instructions) { 2867 I->replaceAllUsesWith(Dummy); 2868 I->eraseFromParent(); 2869 } 2870 } 2871 2872 /// \brief Try to match PHI node to Candidate. 2873 /// Matcher tracks the matched Phi nodes. 2874 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 2875 DenseSet<PHIPair> &Matcher, 2876 SmallPtrSetImpl<PHINode *> &PhiNodesToMatch) { 2877 SmallVector<PHIPair, 8> WorkList; 2878 Matcher.insert({ PHI, Candidate }); 2879 WorkList.push_back({ PHI, Candidate }); 2880 SmallSet<PHIPair, 8> Visited; 2881 while (!WorkList.empty()) { 2882 auto Item = WorkList.pop_back_val(); 2883 if (!Visited.insert(Item).second) 2884 continue; 2885 // We iterate over all incoming values to Phi to compare them. 2886 // If values are different and both of them Phi and the first one is a 2887 // Phi we added (subject to match) and both of them is in the same basic 2888 // block then we can match our pair if values match. So we state that 2889 // these values match and add it to work list to verify that. 2890 for (auto B : Item.first->blocks()) { 2891 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 2892 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 2893 if (FirstValue == SecondValue) 2894 continue; 2895 2896 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 2897 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 2898 2899 // One of them is not Phi or 2900 // The first one is not Phi node from the set we'd like to match or 2901 // Phi nodes from different basic blocks then 2902 // we will not be able to match. 2903 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 2904 FirstPhi->getParent() != SecondPhi->getParent()) 2905 return false; 2906 2907 // If we already matched them then continue. 2908 if (Matcher.count({ FirstPhi, SecondPhi })) 2909 continue; 2910 // So the values are different and does not match. So we need them to 2911 // match. 2912 Matcher.insert({ FirstPhi, SecondPhi }); 2913 // But me must check it. 2914 WorkList.push_back({ FirstPhi, SecondPhi }); 2915 } 2916 } 2917 return true; 2918 } 2919 2920 /// \brief For the given set of PHI nodes try to find their equivalents. 2921 /// Returns false if this matching fails and creation of new Phi is disabled. 2922 bool MatchPhiSet(SmallPtrSetImpl<PHINode *> &PhiNodesToMatch, 2923 SimplificationTracker &ST, bool AllowNewPhiNodes, 2924 unsigned &PhiNotMatchedCount) { 2925 DenseSet<PHIPair> Matched; 2926 SmallPtrSet<PHINode *, 8> WillNotMatch; 2927 while (PhiNodesToMatch.size()) { 2928 PHINode *PHI = *PhiNodesToMatch.begin(); 2929 2930 // Add us, if no Phi nodes in the basic block we do not match. 2931 WillNotMatch.clear(); 2932 WillNotMatch.insert(PHI); 2933 2934 // Traverse all Phis until we found equivalent or fail to do that. 2935 bool IsMatched = false; 2936 for (auto &P : PHI->getParent()->phis()) { 2937 if (&P == PHI) 2938 continue; 2939 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 2940 break; 2941 // If it does not match, collect all Phi nodes from matcher. 2942 // if we end up with no match, them all these Phi nodes will not match 2943 // later. 2944 for (auto M : Matched) 2945 WillNotMatch.insert(M.first); 2946 Matched.clear(); 2947 } 2948 if (IsMatched) { 2949 // Replace all matched values and erase them. 2950 for (auto MV : Matched) { 2951 MV.first->replaceAllUsesWith(MV.second); 2952 PhiNodesToMatch.erase(MV.first); 2953 ST.Put(MV.first, MV.second); 2954 MV.first->eraseFromParent(); 2955 } 2956 Matched.clear(); 2957 continue; 2958 } 2959 // If we are not allowed to create new nodes then bail out. 2960 if (!AllowNewPhiNodes) 2961 return false; 2962 // Just remove all seen values in matcher. They will not match anything. 2963 PhiNotMatchedCount += WillNotMatch.size(); 2964 for (auto *P : WillNotMatch) 2965 PhiNodesToMatch.erase(P); 2966 } 2967 return true; 2968 } 2969 /// \brief Fill the placeholder with values from predecessors and simplify it. 2970 void FillPlaceholders(FoldAddrToValueMapping &Map, 2971 SmallVectorImpl<ValueInBB> &TraverseOrder, 2972 SimplificationTracker &ST) { 2973 while (!TraverseOrder.empty()) { 2974 auto Current = TraverseOrder.pop_back_val(); 2975 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 2976 Value *CurrentValue = Current.first; 2977 BasicBlock *CurrentBlock = Current.second; 2978 Value *V = Map[Current]; 2979 2980 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 2981 // CurrentValue also must be Select. 2982 auto *CurrentSelect = cast<SelectInst>(CurrentValue); 2983 auto *TrueValue = CurrentSelect->getTrueValue(); 2984 ValueInBB TrueItem = { TrueValue, isa<Instruction>(TrueValue) 2985 ? CurrentBlock 2986 : nullptr }; 2987 assert(Map.find(TrueItem) != Map.end() && "No True Value!"); 2988 Select->setTrueValue(ST.Get(Map[TrueItem])); 2989 auto *FalseValue = CurrentSelect->getFalseValue(); 2990 ValueInBB FalseItem = { FalseValue, isa<Instruction>(FalseValue) 2991 ? CurrentBlock 2992 : nullptr }; 2993 assert(Map.find(FalseItem) != Map.end() && "No False Value!"); 2994 Select->setFalseValue(ST.Get(Map[FalseItem])); 2995 } else { 2996 // Must be a Phi node then. 2997 PHINode *PHI = cast<PHINode>(V); 2998 // Fill the Phi node with values from predecessors. 2999 bool IsDefinedInThisBB = 3000 cast<Instruction>(CurrentValue)->getParent() == CurrentBlock; 3001 auto *CurrentPhi = dyn_cast<PHINode>(CurrentValue); 3002 for (auto B : predecessors(CurrentBlock)) { 3003 Value *PV = IsDefinedInThisBB 3004 ? CurrentPhi->getIncomingValueForBlock(B) 3005 : CurrentValue; 3006 ValueInBB item = { PV, isa<Instruction>(PV) ? B : nullptr }; 3007 assert(Map.find(item) != Map.end() && "No predecessor Value!"); 3008 PHI->addIncoming(ST.Get(Map[item]), B); 3009 } 3010 } 3011 // Simplify if possible. 3012 Map[Current] = ST.Simplify(V); 3013 } 3014 } 3015 3016 /// Starting from value recursively iterates over predecessors up to known 3017 /// ending values represented in a map. For each traversed block inserts 3018 /// a placeholder Phi or Select. 3019 /// Reports all new created Phi/Select nodes by adding them to set. 3020 /// Also reports and order in what basic blocks have been traversed. 3021 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3022 SmallVectorImpl<ValueInBB> &TraverseOrder, 3023 SmallPtrSetImpl<PHINode *> &NewPhiNodes, 3024 SmallPtrSetImpl<SelectInst *> &NewSelectNodes) { 3025 SmallVector<ValueInBB, 32> Worklist; 3026 assert((isa<PHINode>(Original.first) || isa<SelectInst>(Original.first)) && 3027 "Address must be a Phi or Select node"); 3028 auto *Dummy = UndefValue::get(CommonType); 3029 Worklist.push_back(Original); 3030 while (!Worklist.empty()) { 3031 auto Current = Worklist.pop_back_val(); 3032 // If value is not an instruction it is something global, constant, 3033 // parameter and we can say that this value is observable in any block. 3034 // Set block to null to denote it. 3035 // Also please take into account that it is how we build anchors. 3036 if (!isa<Instruction>(Current.first)) 3037 Current.second = nullptr; 3038 // if it is already visited or it is an ending value then skip it. 3039 if (Map.find(Current) != Map.end()) 3040 continue; 3041 TraverseOrder.push_back(Current); 3042 3043 Value *CurrentValue = Current.first; 3044 BasicBlock *CurrentBlock = Current.second; 3045 // CurrentValue must be a Phi node or select. All others must be covered 3046 // by anchors. 3047 Instruction *CurrentI = cast<Instruction>(CurrentValue); 3048 bool IsDefinedInThisBB = CurrentI->getParent() == CurrentBlock; 3049 3050 unsigned PredCount = 3051 std::distance(pred_begin(CurrentBlock), pred_end(CurrentBlock)); 3052 // if Current Value is not defined in this basic block we are interested 3053 // in values in predecessors. 3054 if (!IsDefinedInThisBB) { 3055 assert(PredCount && "Unreachable block?!"); 3056 PHINode *PHI = PHINode::Create(CommonType, PredCount, "sunk_phi", 3057 &CurrentBlock->front()); 3058 Map[Current] = PHI; 3059 NewPhiNodes.insert(PHI); 3060 // Add all predecessors in work list. 3061 for (auto B : predecessors(CurrentBlock)) 3062 Worklist.push_back({ CurrentValue, B }); 3063 continue; 3064 } 3065 // Value is defined in this basic block. 3066 if (SelectInst *OrigSelect = dyn_cast<SelectInst>(CurrentI)) { 3067 // Is it OK to get metadata from OrigSelect?! 3068 // Create a Select placeholder with dummy value. 3069 SelectInst *Select = 3070 SelectInst::Create(OrigSelect->getCondition(), Dummy, Dummy, 3071 OrigSelect->getName(), OrigSelect, OrigSelect); 3072 Map[Current] = Select; 3073 NewSelectNodes.insert(Select); 3074 // We are interested in True and False value in this basic block. 3075 Worklist.push_back({ OrigSelect->getTrueValue(), CurrentBlock }); 3076 Worklist.push_back({ OrigSelect->getFalseValue(), CurrentBlock }); 3077 } else { 3078 // It must be a Phi node then. 3079 auto *CurrentPhi = cast<PHINode>(CurrentI); 3080 // Create new Phi node for merge of bases. 3081 assert(PredCount && "Unreachable block?!"); 3082 PHINode *PHI = PHINode::Create(CommonType, PredCount, "sunk_phi", 3083 &CurrentBlock->front()); 3084 Map[Current] = PHI; 3085 NewPhiNodes.insert(PHI); 3086 3087 // Add all predecessors in work list. 3088 for (auto B : predecessors(CurrentBlock)) 3089 Worklist.push_back({ CurrentPhi->getIncomingValueForBlock(B), B }); 3090 } 3091 } 3092 } 3093 3094 bool addrModeCombiningAllowed() { 3095 if (DisableComplexAddrModes) 3096 return false; 3097 switch (DifferentField) { 3098 default: 3099 return false; 3100 case ExtAddrMode::BaseRegField: 3101 return AddrSinkCombineBaseReg; 3102 case ExtAddrMode::BaseGVField: 3103 return AddrSinkCombineBaseGV; 3104 case ExtAddrMode::BaseOffsField: 3105 return AddrSinkCombineBaseOffs; 3106 case ExtAddrMode::ScaledRegField: 3107 return AddrSinkCombineScaledReg; 3108 } 3109 } 3110 }; 3111 } // end anonymous namespace 3112 3113 /// Try adding ScaleReg*Scale to the current addressing mode. 3114 /// Return true and update AddrMode if this addr mode is legal for the target, 3115 /// false if not. 3116 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3117 unsigned Depth) { 3118 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3119 // mode. Just process that directly. 3120 if (Scale == 1) 3121 return matchAddr(ScaleReg, Depth); 3122 3123 // If the scale is 0, it takes nothing to add this. 3124 if (Scale == 0) 3125 return true; 3126 3127 // If we already have a scale of this value, we can add to it, otherwise, we 3128 // need an available scale field. 3129 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3130 return false; 3131 3132 ExtAddrMode TestAddrMode = AddrMode; 3133 3134 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3135 // [A+B + A*7] -> [B+A*8]. 3136 TestAddrMode.Scale += Scale; 3137 TestAddrMode.ScaledReg = ScaleReg; 3138 3139 // If the new address isn't legal, bail out. 3140 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3141 return false; 3142 3143 // It was legal, so commit it. 3144 AddrMode = TestAddrMode; 3145 3146 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3147 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3148 // X*Scale + C*Scale to addr mode. 3149 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3150 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3151 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3152 TestAddrMode.ScaledReg = AddLHS; 3153 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3154 3155 // If this addressing mode is legal, commit it and remember that we folded 3156 // this instruction. 3157 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3158 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3159 AddrMode = TestAddrMode; 3160 return true; 3161 } 3162 } 3163 3164 // Otherwise, not (x+c)*scale, just return what we have. 3165 return true; 3166 } 3167 3168 /// This is a little filter, which returns true if an addressing computation 3169 /// involving I might be folded into a load/store accessing it. 3170 /// This doesn't need to be perfect, but needs to accept at least 3171 /// the set of instructions that MatchOperationAddr can. 3172 static bool MightBeFoldableInst(Instruction *I) { 3173 switch (I->getOpcode()) { 3174 case Instruction::BitCast: 3175 case Instruction::AddrSpaceCast: 3176 // Don't touch identity bitcasts. 3177 if (I->getType() == I->getOperand(0)->getType()) 3178 return false; 3179 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 3180 case Instruction::PtrToInt: 3181 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3182 return true; 3183 case Instruction::IntToPtr: 3184 // We know the input is intptr_t, so this is foldable. 3185 return true; 3186 case Instruction::Add: 3187 return true; 3188 case Instruction::Mul: 3189 case Instruction::Shl: 3190 // Can only handle X*C and X << C. 3191 return isa<ConstantInt>(I->getOperand(1)); 3192 case Instruction::GetElementPtr: 3193 return true; 3194 default: 3195 return false; 3196 } 3197 } 3198 3199 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 3200 /// \note \p Val is assumed to be the product of some type promotion. 3201 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3202 /// to be legal, as the non-promoted value would have had the same state. 3203 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3204 const DataLayout &DL, Value *Val) { 3205 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3206 if (!PromotedInst) 3207 return false; 3208 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3209 // If the ISDOpcode is undefined, it was undefined before the promotion. 3210 if (!ISDOpcode) 3211 return true; 3212 // Otherwise, check if the promoted instruction is legal or not. 3213 return TLI.isOperationLegalOrCustom( 3214 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3215 } 3216 3217 namespace { 3218 3219 /// \brief Hepler class to perform type promotion. 3220 class TypePromotionHelper { 3221 /// \brief Utility function to check whether or not a sign or zero extension 3222 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3223 /// either using the operands of \p Inst or promoting \p Inst. 3224 /// The type of the extension is defined by \p IsSExt. 3225 /// In other words, check if: 3226 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3227 /// #1 Promotion applies: 3228 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3229 /// #2 Operand reuses: 3230 /// ext opnd1 to ConsideredExtType. 3231 /// \p PromotedInsts maps the instructions to their type before promotion. 3232 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3233 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3234 3235 /// \brief Utility function to determine if \p OpIdx should be promoted when 3236 /// promoting \p Inst. 3237 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3238 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3239 } 3240 3241 /// \brief Utility function to promote the operand of \p Ext when this 3242 /// operand is a promotable trunc or sext or zext. 3243 /// \p PromotedInsts maps the instructions to their type before promotion. 3244 /// \p CreatedInstsCost[out] contains the cost of all instructions 3245 /// created to promote the operand of Ext. 3246 /// Newly added extensions are inserted in \p Exts. 3247 /// Newly added truncates are inserted in \p Truncs. 3248 /// Should never be called directly. 3249 /// \return The promoted value which is used instead of Ext. 3250 static Value *promoteOperandForTruncAndAnyExt( 3251 Instruction *Ext, TypePromotionTransaction &TPT, 3252 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3253 SmallVectorImpl<Instruction *> *Exts, 3254 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3255 3256 /// \brief Utility function to promote the operand of \p Ext when this 3257 /// operand is promotable and is not a supported trunc or sext. 3258 /// \p PromotedInsts maps the instructions to their type before promotion. 3259 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3260 /// created to promote the operand of Ext. 3261 /// Newly added extensions are inserted in \p Exts. 3262 /// Newly added truncates are inserted in \p Truncs. 3263 /// Should never be called directly. 3264 /// \return The promoted value which is used instead of Ext. 3265 static Value *promoteOperandForOther(Instruction *Ext, 3266 TypePromotionTransaction &TPT, 3267 InstrToOrigTy &PromotedInsts, 3268 unsigned &CreatedInstsCost, 3269 SmallVectorImpl<Instruction *> *Exts, 3270 SmallVectorImpl<Instruction *> *Truncs, 3271 const TargetLowering &TLI, bool IsSExt); 3272 3273 /// \see promoteOperandForOther. 3274 static Value *signExtendOperandForOther( 3275 Instruction *Ext, TypePromotionTransaction &TPT, 3276 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3277 SmallVectorImpl<Instruction *> *Exts, 3278 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3279 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3280 Exts, Truncs, TLI, true); 3281 } 3282 3283 /// \see promoteOperandForOther. 3284 static Value *zeroExtendOperandForOther( 3285 Instruction *Ext, TypePromotionTransaction &TPT, 3286 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3287 SmallVectorImpl<Instruction *> *Exts, 3288 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3289 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3290 Exts, Truncs, TLI, false); 3291 } 3292 3293 public: 3294 /// Type for the utility function that promotes the operand of Ext. 3295 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3296 InstrToOrigTy &PromotedInsts, 3297 unsigned &CreatedInstsCost, 3298 SmallVectorImpl<Instruction *> *Exts, 3299 SmallVectorImpl<Instruction *> *Truncs, 3300 const TargetLowering &TLI); 3301 3302 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 3303 /// action to promote the operand of \p Ext instead of using Ext. 3304 /// \return NULL if no promotable action is possible with the current 3305 /// sign extension. 3306 /// \p InsertedInsts keeps track of all the instructions inserted by the 3307 /// other CodeGenPrepare optimizations. This information is important 3308 /// because we do not want to promote these instructions as CodeGenPrepare 3309 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3310 /// \p PromotedInsts maps the instructions to their type before promotion. 3311 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3312 const TargetLowering &TLI, 3313 const InstrToOrigTy &PromotedInsts); 3314 }; 3315 3316 } // end anonymous namespace 3317 3318 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3319 Type *ConsideredExtType, 3320 const InstrToOrigTy &PromotedInsts, 3321 bool IsSExt) { 3322 // The promotion helper does not know how to deal with vector types yet. 3323 // To be able to fix that, we would need to fix the places where we 3324 // statically extend, e.g., constants and such. 3325 if (Inst->getType()->isVectorTy()) 3326 return false; 3327 3328 // We can always get through zext. 3329 if (isa<ZExtInst>(Inst)) 3330 return true; 3331 3332 // sext(sext) is ok too. 3333 if (IsSExt && isa<SExtInst>(Inst)) 3334 return true; 3335 3336 // We can get through binary operator, if it is legal. In other words, the 3337 // binary operator must have a nuw or nsw flag. 3338 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3339 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3340 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3341 (IsSExt && BinOp->hasNoSignedWrap()))) 3342 return true; 3343 3344 // Check if we can do the following simplification. 3345 // ext(trunc(opnd)) --> ext(opnd) 3346 if (!isa<TruncInst>(Inst)) 3347 return false; 3348 3349 Value *OpndVal = Inst->getOperand(0); 3350 // Check if we can use this operand in the extension. 3351 // If the type is larger than the result type of the extension, we cannot. 3352 if (!OpndVal->getType()->isIntegerTy() || 3353 OpndVal->getType()->getIntegerBitWidth() > 3354 ConsideredExtType->getIntegerBitWidth()) 3355 return false; 3356 3357 // If the operand of the truncate is not an instruction, we will not have 3358 // any information on the dropped bits. 3359 // (Actually we could for constant but it is not worth the extra logic). 3360 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3361 if (!Opnd) 3362 return false; 3363 3364 // Check if the source of the type is narrow enough. 3365 // I.e., check that trunc just drops extended bits of the same kind of 3366 // the extension. 3367 // #1 get the type of the operand and check the kind of the extended bits. 3368 const Type *OpndType; 3369 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3370 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 3371 OpndType = It->second.getPointer(); 3372 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3373 OpndType = Opnd->getOperand(0)->getType(); 3374 else 3375 return false; 3376 3377 // #2 check that the truncate just drops extended bits. 3378 return Inst->getType()->getIntegerBitWidth() >= 3379 OpndType->getIntegerBitWidth(); 3380 } 3381 3382 TypePromotionHelper::Action TypePromotionHelper::getAction( 3383 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3384 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3385 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3386 "Unexpected instruction type"); 3387 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3388 Type *ExtTy = Ext->getType(); 3389 bool IsSExt = isa<SExtInst>(Ext); 3390 // If the operand of the extension is not an instruction, we cannot 3391 // get through. 3392 // If it, check we can get through. 3393 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3394 return nullptr; 3395 3396 // Do not promote if the operand has been added by codegenprepare. 3397 // Otherwise, it means we are undoing an optimization that is likely to be 3398 // redone, thus causing potential infinite loop. 3399 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3400 return nullptr; 3401 3402 // SExt or Trunc instructions. 3403 // Return the related handler. 3404 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3405 isa<ZExtInst>(ExtOpnd)) 3406 return promoteOperandForTruncAndAnyExt; 3407 3408 // Regular instruction. 3409 // Abort early if we will have to insert non-free instructions. 3410 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3411 return nullptr; 3412 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3413 } 3414 3415 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3416 Instruction *SExt, TypePromotionTransaction &TPT, 3417 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3418 SmallVectorImpl<Instruction *> *Exts, 3419 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3420 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3421 // get through it and this method should not be called. 3422 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3423 Value *ExtVal = SExt; 3424 bool HasMergedNonFreeExt = false; 3425 if (isa<ZExtInst>(SExtOpnd)) { 3426 // Replace s|zext(zext(opnd)) 3427 // => zext(opnd). 3428 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3429 Value *ZExt = 3430 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3431 TPT.replaceAllUsesWith(SExt, ZExt); 3432 TPT.eraseInstruction(SExt); 3433 ExtVal = ZExt; 3434 } else { 3435 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3436 // => z|sext(opnd). 3437 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3438 } 3439 CreatedInstsCost = 0; 3440 3441 // Remove dead code. 3442 if (SExtOpnd->use_empty()) 3443 TPT.eraseInstruction(SExtOpnd); 3444 3445 // Check if the extension is still needed. 3446 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3447 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3448 if (ExtInst) { 3449 if (Exts) 3450 Exts->push_back(ExtInst); 3451 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3452 } 3453 return ExtVal; 3454 } 3455 3456 // At this point we have: ext ty opnd to ty. 3457 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3458 Value *NextVal = ExtInst->getOperand(0); 3459 TPT.eraseInstruction(ExtInst, NextVal); 3460 return NextVal; 3461 } 3462 3463 Value *TypePromotionHelper::promoteOperandForOther( 3464 Instruction *Ext, TypePromotionTransaction &TPT, 3465 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3466 SmallVectorImpl<Instruction *> *Exts, 3467 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3468 bool IsSExt) { 3469 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3470 // get through it and this method should not be called. 3471 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3472 CreatedInstsCost = 0; 3473 if (!ExtOpnd->hasOneUse()) { 3474 // ExtOpnd will be promoted. 3475 // All its uses, but Ext, will need to use a truncated value of the 3476 // promoted version. 3477 // Create the truncate now. 3478 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3479 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3480 // Insert it just after the definition. 3481 ITrunc->moveAfter(ExtOpnd); 3482 if (Truncs) 3483 Truncs->push_back(ITrunc); 3484 } 3485 3486 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3487 // Restore the operand of Ext (which has been replaced by the previous call 3488 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3489 TPT.setOperand(Ext, 0, ExtOpnd); 3490 } 3491 3492 // Get through the Instruction: 3493 // 1. Update its type. 3494 // 2. Replace the uses of Ext by Inst. 3495 // 3. Extend each operand that needs to be extended. 3496 3497 // Remember the original type of the instruction before promotion. 3498 // This is useful to know that the high bits are sign extended bits. 3499 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3500 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3501 // Step #1. 3502 TPT.mutateType(ExtOpnd, Ext->getType()); 3503 // Step #2. 3504 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3505 // Step #3. 3506 Instruction *ExtForOpnd = Ext; 3507 3508 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3509 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3510 ++OpIdx) { 3511 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3512 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3513 !shouldExtOperand(ExtOpnd, OpIdx)) { 3514 DEBUG(dbgs() << "No need to propagate\n"); 3515 continue; 3516 } 3517 // Check if we can statically extend the operand. 3518 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3519 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3520 DEBUG(dbgs() << "Statically extend\n"); 3521 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3522 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3523 : Cst->getValue().zext(BitWidth); 3524 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3525 continue; 3526 } 3527 // UndefValue are typed, so we have to statically sign extend them. 3528 if (isa<UndefValue>(Opnd)) { 3529 DEBUG(dbgs() << "Statically extend\n"); 3530 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3531 continue; 3532 } 3533 3534 // Otherwise we have to explicity sign extend the operand. 3535 // Check if Ext was reused to extend an operand. 3536 if (!ExtForOpnd) { 3537 // If yes, create a new one. 3538 DEBUG(dbgs() << "More operands to ext\n"); 3539 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3540 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3541 if (!isa<Instruction>(ValForExtOpnd)) { 3542 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3543 continue; 3544 } 3545 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3546 } 3547 if (Exts) 3548 Exts->push_back(ExtForOpnd); 3549 TPT.setOperand(ExtForOpnd, 0, Opnd); 3550 3551 // Move the sign extension before the insertion point. 3552 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3553 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3554 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3555 // If more sext are required, new instructions will have to be created. 3556 ExtForOpnd = nullptr; 3557 } 3558 if (ExtForOpnd == Ext) { 3559 DEBUG(dbgs() << "Extension is useless now\n"); 3560 TPT.eraseInstruction(Ext); 3561 } 3562 return ExtOpnd; 3563 } 3564 3565 /// Check whether or not promoting an instruction to a wider type is profitable. 3566 /// \p NewCost gives the cost of extension instructions created by the 3567 /// promotion. 3568 /// \p OldCost gives the cost of extension instructions before the promotion 3569 /// plus the number of instructions that have been 3570 /// matched in the addressing mode the promotion. 3571 /// \p PromotedOperand is the value that has been promoted. 3572 /// \return True if the promotion is profitable, false otherwise. 3573 bool AddressingModeMatcher::isPromotionProfitable( 3574 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3575 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3576 // The cost of the new extensions is greater than the cost of the 3577 // old extension plus what we folded. 3578 // This is not profitable. 3579 if (NewCost > OldCost) 3580 return false; 3581 if (NewCost < OldCost) 3582 return true; 3583 // The promotion is neutral but it may help folding the sign extension in 3584 // loads for instance. 3585 // Check that we did not create an illegal instruction. 3586 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3587 } 3588 3589 /// Given an instruction or constant expr, see if we can fold the operation 3590 /// into the addressing mode. If so, update the addressing mode and return 3591 /// true, otherwise return false without modifying AddrMode. 3592 /// If \p MovedAway is not NULL, it contains the information of whether or 3593 /// not AddrInst has to be folded into the addressing mode on success. 3594 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3595 /// because it has been moved away. 3596 /// Thus AddrInst must not be added in the matched instructions. 3597 /// This state can happen when AddrInst is a sext, since it may be moved away. 3598 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3599 /// not be referenced anymore. 3600 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3601 unsigned Depth, 3602 bool *MovedAway) { 3603 // Avoid exponential behavior on extremely deep expression trees. 3604 if (Depth >= 5) return false; 3605 3606 // By default, all matched instructions stay in place. 3607 if (MovedAway) 3608 *MovedAway = false; 3609 3610 switch (Opcode) { 3611 case Instruction::PtrToInt: 3612 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3613 return matchAddr(AddrInst->getOperand(0), Depth); 3614 case Instruction::IntToPtr: { 3615 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3616 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3617 // This inttoptr is a no-op if the integer type is pointer sized. 3618 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3619 return matchAddr(AddrInst->getOperand(0), Depth); 3620 return false; 3621 } 3622 case Instruction::BitCast: 3623 // BitCast is always a noop, and we can handle it as long as it is 3624 // int->int or pointer->pointer (we don't want int<->fp or something). 3625 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3626 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3627 // Don't touch identity bitcasts. These were probably put here by LSR, 3628 // and we don't want to mess around with them. Assume it knows what it 3629 // is doing. 3630 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3631 return matchAddr(AddrInst->getOperand(0), Depth); 3632 return false; 3633 case Instruction::AddrSpaceCast: { 3634 unsigned SrcAS 3635 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3636 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3637 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3638 return matchAddr(AddrInst->getOperand(0), Depth); 3639 return false; 3640 } 3641 case Instruction::Add: { 3642 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3643 ExtAddrMode BackupAddrMode = AddrMode; 3644 unsigned OldSize = AddrModeInsts.size(); 3645 // Start a transaction at this point. 3646 // The LHS may match but not the RHS. 3647 // Therefore, we need a higher level restoration point to undo partially 3648 // matched operation. 3649 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3650 TPT.getRestorationPoint(); 3651 3652 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 3653 matchAddr(AddrInst->getOperand(0), Depth+1)) 3654 return true; 3655 3656 // Restore the old addr mode info. 3657 AddrMode = BackupAddrMode; 3658 AddrModeInsts.resize(OldSize); 3659 TPT.rollback(LastKnownGood); 3660 3661 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 3662 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 3663 matchAddr(AddrInst->getOperand(1), Depth+1)) 3664 return true; 3665 3666 // Otherwise we definitely can't merge the ADD in. 3667 AddrMode = BackupAddrMode; 3668 AddrModeInsts.resize(OldSize); 3669 TPT.rollback(LastKnownGood); 3670 break; 3671 } 3672 //case Instruction::Or: 3673 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 3674 //break; 3675 case Instruction::Mul: 3676 case Instruction::Shl: { 3677 // Can only handle X*C and X << C. 3678 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 3679 if (!RHS || RHS->getBitWidth() > 64) 3680 return false; 3681 int64_t Scale = RHS->getSExtValue(); 3682 if (Opcode == Instruction::Shl) 3683 Scale = 1LL << Scale; 3684 3685 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 3686 } 3687 case Instruction::GetElementPtr: { 3688 // Scan the GEP. We check it if it contains constant offsets and at most 3689 // one variable offset. 3690 int VariableOperand = -1; 3691 unsigned VariableScale = 0; 3692 3693 int64_t ConstantOffset = 0; 3694 gep_type_iterator GTI = gep_type_begin(AddrInst); 3695 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 3696 if (StructType *STy = GTI.getStructTypeOrNull()) { 3697 const StructLayout *SL = DL.getStructLayout(STy); 3698 unsigned Idx = 3699 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 3700 ConstantOffset += SL->getElementOffset(Idx); 3701 } else { 3702 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 3703 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 3704 ConstantOffset += CI->getSExtValue() * TypeSize; 3705 } else if (TypeSize) { // Scales of zero don't do anything. 3706 // We only allow one variable index at the moment. 3707 if (VariableOperand != -1) 3708 return false; 3709 3710 // Remember the variable index. 3711 VariableOperand = i; 3712 VariableScale = TypeSize; 3713 } 3714 } 3715 } 3716 3717 // A common case is for the GEP to only do a constant offset. In this case, 3718 // just add it to the disp field and check validity. 3719 if (VariableOperand == -1) { 3720 AddrMode.BaseOffs += ConstantOffset; 3721 if (ConstantOffset == 0 || 3722 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 3723 // Check to see if we can fold the base pointer in too. 3724 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 3725 return true; 3726 } 3727 AddrMode.BaseOffs -= ConstantOffset; 3728 return false; 3729 } 3730 3731 // Save the valid addressing mode in case we can't match. 3732 ExtAddrMode BackupAddrMode = AddrMode; 3733 unsigned OldSize = AddrModeInsts.size(); 3734 3735 // See if the scale and offset amount is valid for this target. 3736 AddrMode.BaseOffs += ConstantOffset; 3737 3738 // Match the base operand of the GEP. 3739 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 3740 // If it couldn't be matched, just stuff the value in a register. 3741 if (AddrMode.HasBaseReg) { 3742 AddrMode = BackupAddrMode; 3743 AddrModeInsts.resize(OldSize); 3744 return false; 3745 } 3746 AddrMode.HasBaseReg = true; 3747 AddrMode.BaseReg = AddrInst->getOperand(0); 3748 } 3749 3750 // Match the remaining variable portion of the GEP. 3751 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 3752 Depth)) { 3753 // If it couldn't be matched, try stuffing the base into a register 3754 // instead of matching it, and retrying the match of the scale. 3755 AddrMode = BackupAddrMode; 3756 AddrModeInsts.resize(OldSize); 3757 if (AddrMode.HasBaseReg) 3758 return false; 3759 AddrMode.HasBaseReg = true; 3760 AddrMode.BaseReg = AddrInst->getOperand(0); 3761 AddrMode.BaseOffs += ConstantOffset; 3762 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 3763 VariableScale, Depth)) { 3764 // If even that didn't work, bail. 3765 AddrMode = BackupAddrMode; 3766 AddrModeInsts.resize(OldSize); 3767 return false; 3768 } 3769 } 3770 3771 return true; 3772 } 3773 case Instruction::SExt: 3774 case Instruction::ZExt: { 3775 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 3776 if (!Ext) 3777 return false; 3778 3779 // Try to move this ext out of the way of the addressing mode. 3780 // Ask for a method for doing so. 3781 TypePromotionHelper::Action TPH = 3782 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 3783 if (!TPH) 3784 return false; 3785 3786 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3787 TPT.getRestorationPoint(); 3788 unsigned CreatedInstsCost = 0; 3789 unsigned ExtCost = !TLI.isExtFree(Ext); 3790 Value *PromotedOperand = 3791 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 3792 // SExt has been moved away. 3793 // Thus either it will be rematched later in the recursive calls or it is 3794 // gone. Anyway, we must not fold it into the addressing mode at this point. 3795 // E.g., 3796 // op = add opnd, 1 3797 // idx = ext op 3798 // addr = gep base, idx 3799 // is now: 3800 // promotedOpnd = ext opnd <- no match here 3801 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 3802 // addr = gep base, op <- match 3803 if (MovedAway) 3804 *MovedAway = true; 3805 3806 assert(PromotedOperand && 3807 "TypePromotionHelper should have filtered out those cases"); 3808 3809 ExtAddrMode BackupAddrMode = AddrMode; 3810 unsigned OldSize = AddrModeInsts.size(); 3811 3812 if (!matchAddr(PromotedOperand, Depth) || 3813 // The total of the new cost is equal to the cost of the created 3814 // instructions. 3815 // The total of the old cost is equal to the cost of the extension plus 3816 // what we have saved in the addressing mode. 3817 !isPromotionProfitable(CreatedInstsCost, 3818 ExtCost + (AddrModeInsts.size() - OldSize), 3819 PromotedOperand)) { 3820 AddrMode = BackupAddrMode; 3821 AddrModeInsts.resize(OldSize); 3822 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 3823 TPT.rollback(LastKnownGood); 3824 return false; 3825 } 3826 return true; 3827 } 3828 } 3829 return false; 3830 } 3831 3832 /// If we can, try to add the value of 'Addr' into the current addressing mode. 3833 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 3834 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 3835 /// for the target. 3836 /// 3837 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 3838 // Start a transaction at this point that we will rollback if the matching 3839 // fails. 3840 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3841 TPT.getRestorationPoint(); 3842 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 3843 // Fold in immediates if legal for the target. 3844 AddrMode.BaseOffs += CI->getSExtValue(); 3845 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3846 return true; 3847 AddrMode.BaseOffs -= CI->getSExtValue(); 3848 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 3849 // If this is a global variable, try to fold it into the addressing mode. 3850 if (!AddrMode.BaseGV) { 3851 AddrMode.BaseGV = GV; 3852 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3853 return true; 3854 AddrMode.BaseGV = nullptr; 3855 } 3856 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 3857 ExtAddrMode BackupAddrMode = AddrMode; 3858 unsigned OldSize = AddrModeInsts.size(); 3859 3860 // Check to see if it is possible to fold this operation. 3861 bool MovedAway = false; 3862 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 3863 // This instruction may have been moved away. If so, there is nothing 3864 // to check here. 3865 if (MovedAway) 3866 return true; 3867 // Okay, it's possible to fold this. Check to see if it is actually 3868 // *profitable* to do so. We use a simple cost model to avoid increasing 3869 // register pressure too much. 3870 if (I->hasOneUse() || 3871 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 3872 AddrModeInsts.push_back(I); 3873 return true; 3874 } 3875 3876 // It isn't profitable to do this, roll back. 3877 //cerr << "NOT FOLDING: " << *I; 3878 AddrMode = BackupAddrMode; 3879 AddrModeInsts.resize(OldSize); 3880 TPT.rollback(LastKnownGood); 3881 } 3882 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 3883 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 3884 return true; 3885 TPT.rollback(LastKnownGood); 3886 } else if (isa<ConstantPointerNull>(Addr)) { 3887 // Null pointer gets folded without affecting the addressing mode. 3888 return true; 3889 } 3890 3891 // Worse case, the target should support [reg] addressing modes. :) 3892 if (!AddrMode.HasBaseReg) { 3893 AddrMode.HasBaseReg = true; 3894 AddrMode.BaseReg = Addr; 3895 // Still check for legality in case the target supports [imm] but not [i+r]. 3896 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3897 return true; 3898 AddrMode.HasBaseReg = false; 3899 AddrMode.BaseReg = nullptr; 3900 } 3901 3902 // If the base register is already taken, see if we can do [r+r]. 3903 if (AddrMode.Scale == 0) { 3904 AddrMode.Scale = 1; 3905 AddrMode.ScaledReg = Addr; 3906 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3907 return true; 3908 AddrMode.Scale = 0; 3909 AddrMode.ScaledReg = nullptr; 3910 } 3911 // Couldn't match. 3912 TPT.rollback(LastKnownGood); 3913 return false; 3914 } 3915 3916 /// Check to see if all uses of OpVal by the specified inline asm call are due 3917 /// to memory operands. If so, return true, otherwise return false. 3918 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 3919 const TargetLowering &TLI, 3920 const TargetRegisterInfo &TRI) { 3921 const Function *F = CI->getFunction(); 3922 TargetLowering::AsmOperandInfoVector TargetConstraints = 3923 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 3924 ImmutableCallSite(CI)); 3925 3926 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3927 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3928 3929 // Compute the constraint code and ConstraintType to use. 3930 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 3931 3932 // If this asm operand is our Value*, and if it isn't an indirect memory 3933 // operand, we can't fold it! 3934 if (OpInfo.CallOperandVal == OpVal && 3935 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3936 !OpInfo.isIndirect)) 3937 return false; 3938 } 3939 3940 return true; 3941 } 3942 3943 // Max number of memory uses to look at before aborting the search to conserve 3944 // compile time. 3945 static constexpr int MaxMemoryUsesToScan = 20; 3946 3947 /// Recursively walk all the uses of I until we find a memory use. 3948 /// If we find an obviously non-foldable instruction, return true. 3949 /// Add the ultimately found memory instructions to MemoryUses. 3950 static bool FindAllMemoryUses( 3951 Instruction *I, 3952 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3953 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 3954 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 3955 // If we already considered this instruction, we're done. 3956 if (!ConsideredInsts.insert(I).second) 3957 return false; 3958 3959 // If this is an obviously unfoldable instruction, bail out. 3960 if (!MightBeFoldableInst(I)) 3961 return true; 3962 3963 const bool OptSize = I->getFunction()->optForSize(); 3964 3965 // Loop over all the uses, recursively processing them. 3966 for (Use &U : I->uses()) { 3967 // Conservatively return true if we're seeing a large number or a deep chain 3968 // of users. This avoids excessive compilation times in pathological cases. 3969 if (SeenInsts++ >= MaxMemoryUsesToScan) 3970 return true; 3971 3972 Instruction *UserI = cast<Instruction>(U.getUser()); 3973 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3974 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3975 continue; 3976 } 3977 3978 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3979 unsigned opNo = U.getOperandNo(); 3980 if (opNo != StoreInst::getPointerOperandIndex()) 3981 return true; // Storing addr, not into addr. 3982 MemoryUses.push_back(std::make_pair(SI, opNo)); 3983 continue; 3984 } 3985 3986 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 3987 unsigned opNo = U.getOperandNo(); 3988 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 3989 return true; // Storing addr, not into addr. 3990 MemoryUses.push_back(std::make_pair(RMW, opNo)); 3991 continue; 3992 } 3993 3994 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 3995 unsigned opNo = U.getOperandNo(); 3996 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 3997 return true; // Storing addr, not into addr. 3998 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 3999 continue; 4000 } 4001 4002 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4003 // If this is a cold call, we can sink the addressing calculation into 4004 // the cold path. See optimizeCallInst 4005 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4006 continue; 4007 4008 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4009 if (!IA) return true; 4010 4011 // If this is a memory operand, we're cool, otherwise bail out. 4012 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4013 return true; 4014 continue; 4015 } 4016 4017 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4018 SeenInsts)) 4019 return true; 4020 } 4021 4022 return false; 4023 } 4024 4025 /// Return true if Val is already known to be live at the use site that we're 4026 /// folding it into. If so, there is no cost to include it in the addressing 4027 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4028 /// instruction already. 4029 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4030 Value *KnownLive2) { 4031 // If Val is either of the known-live values, we know it is live! 4032 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4033 return true; 4034 4035 // All values other than instructions and arguments (e.g. constants) are live. 4036 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4037 4038 // If Val is a constant sized alloca in the entry block, it is live, this is 4039 // true because it is just a reference to the stack/frame pointer, which is 4040 // live for the whole function. 4041 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4042 if (AI->isStaticAlloca()) 4043 return true; 4044 4045 // Check to see if this value is already used in the memory instruction's 4046 // block. If so, it's already live into the block at the very least, so we 4047 // can reasonably fold it. 4048 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4049 } 4050 4051 /// It is possible for the addressing mode of the machine to fold the specified 4052 /// instruction into a load or store that ultimately uses it. 4053 /// However, the specified instruction has multiple uses. 4054 /// Given this, it may actually increase register pressure to fold it 4055 /// into the load. For example, consider this code: 4056 /// 4057 /// X = ... 4058 /// Y = X+1 4059 /// use(Y) -> nonload/store 4060 /// Z = Y+1 4061 /// load Z 4062 /// 4063 /// In this case, Y has multiple uses, and can be folded into the load of Z 4064 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4065 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4066 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4067 /// number of computations either. 4068 /// 4069 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4070 /// X was live across 'load Z' for other reasons, we actually *would* want to 4071 /// fold the addressing mode in the Z case. This would make Y die earlier. 4072 bool AddressingModeMatcher:: 4073 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4074 ExtAddrMode &AMAfter) { 4075 if (IgnoreProfitability) return true; 4076 4077 // AMBefore is the addressing mode before this instruction was folded into it, 4078 // and AMAfter is the addressing mode after the instruction was folded. Get 4079 // the set of registers referenced by AMAfter and subtract out those 4080 // referenced by AMBefore: this is the set of values which folding in this 4081 // address extends the lifetime of. 4082 // 4083 // Note that there are only two potential values being referenced here, 4084 // BaseReg and ScaleReg (global addresses are always available, as are any 4085 // folded immediates). 4086 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4087 4088 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4089 // lifetime wasn't extended by adding this instruction. 4090 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4091 BaseReg = nullptr; 4092 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4093 ScaledReg = nullptr; 4094 4095 // If folding this instruction (and it's subexprs) didn't extend any live 4096 // ranges, we're ok with it. 4097 if (!BaseReg && !ScaledReg) 4098 return true; 4099 4100 // If all uses of this instruction can have the address mode sunk into them, 4101 // we can remove the addressing mode and effectively trade one live register 4102 // for another (at worst.) In this context, folding an addressing mode into 4103 // the use is just a particularly nice way of sinking it. 4104 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4105 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4106 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4107 return false; // Has a non-memory, non-foldable use! 4108 4109 // Now that we know that all uses of this instruction are part of a chain of 4110 // computation involving only operations that could theoretically be folded 4111 // into a memory use, loop over each of these memory operation uses and see 4112 // if they could *actually* fold the instruction. The assumption is that 4113 // addressing modes are cheap and that duplicating the computation involved 4114 // many times is worthwhile, even on a fastpath. For sinking candidates 4115 // (i.e. cold call sites), this serves as a way to prevent excessive code 4116 // growth since most architectures have some reasonable small and fast way to 4117 // compute an effective address. (i.e LEA on x86) 4118 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4119 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4120 Instruction *User = MemoryUses[i].first; 4121 unsigned OpNo = MemoryUses[i].second; 4122 4123 // Get the access type of this use. If the use isn't a pointer, we don't 4124 // know what it accesses. 4125 Value *Address = User->getOperand(OpNo); 4126 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4127 if (!AddrTy) 4128 return false; 4129 Type *AddressAccessTy = AddrTy->getElementType(); 4130 unsigned AS = AddrTy->getAddressSpace(); 4131 4132 // Do a match against the root of this address, ignoring profitability. This 4133 // will tell us if the addressing mode for the memory operation will 4134 // *actually* cover the shared instruction. 4135 ExtAddrMode Result; 4136 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4137 TPT.getRestorationPoint(); 4138 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, 4139 AddressAccessTy, AS, 4140 MemoryInst, Result, InsertedInsts, 4141 PromotedInsts, TPT); 4142 Matcher.IgnoreProfitability = true; 4143 bool Success = Matcher.matchAddr(Address, 0); 4144 (void)Success; assert(Success && "Couldn't select *anything*?"); 4145 4146 // The match was to check the profitability, the changes made are not 4147 // part of the original matcher. Therefore, they should be dropped 4148 // otherwise the original matcher will not present the right state. 4149 TPT.rollback(LastKnownGood); 4150 4151 // If the match didn't cover I, then it won't be shared by it. 4152 if (!is_contained(MatchedAddrModeInsts, I)) 4153 return false; 4154 4155 MatchedAddrModeInsts.clear(); 4156 } 4157 4158 return true; 4159 } 4160 4161 /// Return true if the specified values are defined in a 4162 /// different basic block than BB. 4163 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4164 if (Instruction *I = dyn_cast<Instruction>(V)) 4165 return I->getParent() != BB; 4166 return false; 4167 } 4168 4169 /// Sink addressing mode computation immediate before MemoryInst if doing so 4170 /// can be done without increasing register pressure. The need for the 4171 /// register pressure constraint means this can end up being an all or nothing 4172 /// decision for all uses of the same addressing computation. 4173 /// 4174 /// Load and Store Instructions often have addressing modes that can do 4175 /// significant amounts of computation. As such, instruction selection will try 4176 /// to get the load or store to do as much computation as possible for the 4177 /// program. The problem is that isel can only see within a single block. As 4178 /// such, we sink as much legal addressing mode work into the block as possible. 4179 /// 4180 /// This method is used to optimize both load/store and inline asms with memory 4181 /// operands. It's also used to sink addressing computations feeding into cold 4182 /// call sites into their (cold) basic block. 4183 /// 4184 /// The motivation for handling sinking into cold blocks is that doing so can 4185 /// both enable other address mode sinking (by satisfying the register pressure 4186 /// constraint above), and reduce register pressure globally (by removing the 4187 /// addressing mode computation from the fast path entirely.). 4188 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4189 Type *AccessTy, unsigned AddrSpace) { 4190 Value *Repl = Addr; 4191 4192 // Try to collapse single-value PHI nodes. This is necessary to undo 4193 // unprofitable PRE transformations. 4194 SmallVector<Value*, 8> worklist; 4195 SmallPtrSet<Value*, 16> Visited; 4196 worklist.push_back(Addr); 4197 4198 // Use a worklist to iteratively look through PHI and select nodes, and 4199 // ensure that the addressing mode obtained from the non-PHI/select roots of 4200 // the graph are compatible. 4201 bool PhiOrSelectSeen = false; 4202 SmallVector<Instruction*, 16> AddrModeInsts; 4203 const SimplifyQuery SQ(*DL, TLInfo); 4204 AddressingModeCombiner AddrModes(SQ, { Addr, MemoryInst->getParent() }); 4205 TypePromotionTransaction TPT(RemovedInsts); 4206 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4207 TPT.getRestorationPoint(); 4208 while (!worklist.empty()) { 4209 Value *V = worklist.back(); 4210 worklist.pop_back(); 4211 4212 // We allow traversing cyclic Phi nodes. 4213 // In case of success after this loop we ensure that traversing through 4214 // Phi nodes ends up with all cases to compute address of the form 4215 // BaseGV + Base + Scale * Index + Offset 4216 // where Scale and Offset are constans and BaseGV, Base and Index 4217 // are exactly the same Values in all cases. 4218 // It means that BaseGV, Scale and Offset dominate our memory instruction 4219 // and have the same value as they had in address computation represented 4220 // as Phi. So we can safely sink address computation to memory instruction. 4221 if (!Visited.insert(V).second) 4222 continue; 4223 4224 // For a PHI node, push all of its incoming values. 4225 if (PHINode *P = dyn_cast<PHINode>(V)) { 4226 for (Value *IncValue : P->incoming_values()) 4227 worklist.push_back(IncValue); 4228 PhiOrSelectSeen = true; 4229 continue; 4230 } 4231 // Similar for select. 4232 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4233 worklist.push_back(SI->getFalseValue()); 4234 worklist.push_back(SI->getTrueValue()); 4235 PhiOrSelectSeen = true; 4236 continue; 4237 } 4238 4239 // For non-PHIs, determine the addressing mode being computed. Note that 4240 // the result may differ depending on what other uses our candidate 4241 // addressing instructions might have. 4242 AddrModeInsts.clear(); 4243 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4244 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4245 InsertedInsts, PromotedInsts, TPT); 4246 NewAddrMode.OriginalValue = V; 4247 4248 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4249 break; 4250 } 4251 4252 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4253 // or we have multiple but either couldn't combine them or combining them 4254 // wouldn't do anything useful, bail out now. 4255 if (!AddrModes.combineAddrModes()) { 4256 TPT.rollback(LastKnownGood); 4257 return false; 4258 } 4259 TPT.commit(); 4260 4261 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4262 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4263 4264 // If all the instructions matched are already in this BB, don't do anything. 4265 // If we saw a Phi node then it is not local definitely, and if we saw a select 4266 // then we want to push the address calculation past it even if it's already 4267 // in this BB. 4268 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4269 return IsNonLocalValue(V, MemoryInst->getParent()); 4270 })) { 4271 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 4272 return false; 4273 } 4274 4275 // Insert this computation right after this user. Since our caller is 4276 // scanning from the top of the BB to the bottom, reuse of the expr are 4277 // guaranteed to happen later. 4278 IRBuilder<> Builder(MemoryInst); 4279 4280 // Now that we determined the addressing expression we want to use and know 4281 // that we have to sink it into this block. Check to see if we have already 4282 // done this for some other load/store instr in this block. If so, reuse 4283 // the computation. Before attempting reuse, check if the address is valid 4284 // as it may have been erased. 4285 4286 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 4287 4288 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 4289 if (SunkAddr) { 4290 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 4291 << *MemoryInst << "\n"); 4292 if (SunkAddr->getType() != Addr->getType()) 4293 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4294 } else if (AddrSinkUsingGEPs || 4295 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 4296 SubtargetInfo->useAA())) { 4297 // By default, we use the GEP-based method when AA is used later. This 4298 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4299 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4300 << *MemoryInst << "\n"); 4301 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4302 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4303 4304 // First, find the pointer. 4305 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4306 ResultPtr = AddrMode.BaseReg; 4307 AddrMode.BaseReg = nullptr; 4308 } 4309 4310 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4311 // We can't add more than one pointer together, nor can we scale a 4312 // pointer (both of which seem meaningless). 4313 if (ResultPtr || AddrMode.Scale != 1) 4314 return false; 4315 4316 ResultPtr = AddrMode.ScaledReg; 4317 AddrMode.Scale = 0; 4318 } 4319 4320 // It is only safe to sign extend the BaseReg if we know that the math 4321 // required to create it did not overflow before we extend it. Since 4322 // the original IR value was tossed in favor of a constant back when 4323 // the AddrMode was created we need to bail out gracefully if widths 4324 // do not match instead of extending it. 4325 // 4326 // (See below for code to add the scale.) 4327 if (AddrMode.Scale) { 4328 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4329 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4330 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4331 return false; 4332 } 4333 4334 if (AddrMode.BaseGV) { 4335 if (ResultPtr) 4336 return false; 4337 4338 ResultPtr = AddrMode.BaseGV; 4339 } 4340 4341 // If the real base value actually came from an inttoptr, then the matcher 4342 // will look through it and provide only the integer value. In that case, 4343 // use it here. 4344 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4345 if (!ResultPtr && AddrMode.BaseReg) { 4346 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4347 "sunkaddr"); 4348 AddrMode.BaseReg = nullptr; 4349 } else if (!ResultPtr && AddrMode.Scale == 1) { 4350 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4351 "sunkaddr"); 4352 AddrMode.Scale = 0; 4353 } 4354 } 4355 4356 if (!ResultPtr && 4357 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4358 SunkAddr = Constant::getNullValue(Addr->getType()); 4359 } else if (!ResultPtr) { 4360 return false; 4361 } else { 4362 Type *I8PtrTy = 4363 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4364 Type *I8Ty = Builder.getInt8Ty(); 4365 4366 // Start with the base register. Do this first so that subsequent address 4367 // matching finds it last, which will prevent it from trying to match it 4368 // as the scaled value in case it happens to be a mul. That would be 4369 // problematic if we've sunk a different mul for the scale, because then 4370 // we'd end up sinking both muls. 4371 if (AddrMode.BaseReg) { 4372 Value *V = AddrMode.BaseReg; 4373 if (V->getType() != IntPtrTy) 4374 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4375 4376 ResultIndex = V; 4377 } 4378 4379 // Add the scale value. 4380 if (AddrMode.Scale) { 4381 Value *V = AddrMode.ScaledReg; 4382 if (V->getType() == IntPtrTy) { 4383 // done. 4384 } else { 4385 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4386 cast<IntegerType>(V->getType())->getBitWidth() && 4387 "We can't transform if ScaledReg is too narrow"); 4388 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4389 } 4390 4391 if (AddrMode.Scale != 1) 4392 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4393 "sunkaddr"); 4394 if (ResultIndex) 4395 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4396 else 4397 ResultIndex = V; 4398 } 4399 4400 // Add in the Base Offset if present. 4401 if (AddrMode.BaseOffs) { 4402 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4403 if (ResultIndex) { 4404 // We need to add this separately from the scale above to help with 4405 // SDAG consecutive load/store merging. 4406 if (ResultPtr->getType() != I8PtrTy) 4407 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4408 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4409 } 4410 4411 ResultIndex = V; 4412 } 4413 4414 if (!ResultIndex) { 4415 SunkAddr = ResultPtr; 4416 } else { 4417 if (ResultPtr->getType() != I8PtrTy) 4418 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4419 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4420 } 4421 4422 if (SunkAddr->getType() != Addr->getType()) 4423 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4424 } 4425 } else { 4426 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4427 // non-integral pointers, so in that case bail out now. 4428 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4429 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4430 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4431 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4432 if (DL->isNonIntegralPointerType(Addr->getType()) || 4433 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4434 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4435 (AddrMode.BaseGV && 4436 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4437 return false; 4438 4439 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4440 << *MemoryInst << "\n"); 4441 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4442 Value *Result = nullptr; 4443 4444 // Start with the base register. Do this first so that subsequent address 4445 // matching finds it last, which will prevent it from trying to match it 4446 // as the scaled value in case it happens to be a mul. That would be 4447 // problematic if we've sunk a different mul for the scale, because then 4448 // we'd end up sinking both muls. 4449 if (AddrMode.BaseReg) { 4450 Value *V = AddrMode.BaseReg; 4451 if (V->getType()->isPointerTy()) 4452 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4453 if (V->getType() != IntPtrTy) 4454 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4455 Result = V; 4456 } 4457 4458 // Add the scale value. 4459 if (AddrMode.Scale) { 4460 Value *V = AddrMode.ScaledReg; 4461 if (V->getType() == IntPtrTy) { 4462 // done. 4463 } else if (V->getType()->isPointerTy()) { 4464 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4465 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4466 cast<IntegerType>(V->getType())->getBitWidth()) { 4467 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4468 } else { 4469 // It is only safe to sign extend the BaseReg if we know that the math 4470 // required to create it did not overflow before we extend it. Since 4471 // the original IR value was tossed in favor of a constant back when 4472 // the AddrMode was created we need to bail out gracefully if widths 4473 // do not match instead of extending it. 4474 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4475 if (I && (Result != AddrMode.BaseReg)) 4476 I->eraseFromParent(); 4477 return false; 4478 } 4479 if (AddrMode.Scale != 1) 4480 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4481 "sunkaddr"); 4482 if (Result) 4483 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4484 else 4485 Result = V; 4486 } 4487 4488 // Add in the BaseGV if present. 4489 if (AddrMode.BaseGV) { 4490 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4491 if (Result) 4492 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4493 else 4494 Result = V; 4495 } 4496 4497 // Add in the Base Offset if present. 4498 if (AddrMode.BaseOffs) { 4499 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4500 if (Result) 4501 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4502 else 4503 Result = V; 4504 } 4505 4506 if (!Result) 4507 SunkAddr = Constant::getNullValue(Addr->getType()); 4508 else 4509 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4510 } 4511 4512 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4513 // Store the newly computed address into the cache. In the case we reused a 4514 // value, this should be idempotent. 4515 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 4516 4517 // If we have no uses, recursively delete the value and all dead instructions 4518 // using it. 4519 if (Repl->use_empty()) { 4520 // This can cause recursive deletion, which can invalidate our iterator. 4521 // Use a WeakTrackingVH to hold onto it in case this happens. 4522 Value *CurValue = &*CurInstIterator; 4523 WeakTrackingVH IterHandle(CurValue); 4524 BasicBlock *BB = CurInstIterator->getParent(); 4525 4526 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4527 4528 if (IterHandle != CurValue) { 4529 // If the iterator instruction was recursively deleted, start over at the 4530 // start of the block. 4531 CurInstIterator = BB->begin(); 4532 SunkAddrs.clear(); 4533 } 4534 } 4535 ++NumMemoryInsts; 4536 return true; 4537 } 4538 4539 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4540 /// address computing into the block when possible / profitable. 4541 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4542 bool MadeChange = false; 4543 4544 const TargetRegisterInfo *TRI = 4545 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 4546 TargetLowering::AsmOperandInfoVector TargetConstraints = 4547 TLI->ParseConstraints(*DL, TRI, CS); 4548 unsigned ArgNo = 0; 4549 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4550 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4551 4552 // Compute the constraint code and ConstraintType to use. 4553 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4554 4555 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4556 OpInfo.isIndirect) { 4557 Value *OpVal = CS->getArgOperand(ArgNo++); 4558 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4559 } else if (OpInfo.Type == InlineAsm::isInput) 4560 ArgNo++; 4561 } 4562 4563 return MadeChange; 4564 } 4565 4566 /// \brief Check if all the uses of \p Val are equivalent (or free) zero or 4567 /// sign extensions. 4568 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4569 assert(!Val->use_empty() && "Input must have at least one use"); 4570 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4571 bool IsSExt = isa<SExtInst>(FirstUser); 4572 Type *ExtTy = FirstUser->getType(); 4573 for (const User *U : Val->users()) { 4574 const Instruction *UI = cast<Instruction>(U); 4575 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4576 return false; 4577 Type *CurTy = UI->getType(); 4578 // Same input and output types: Same instruction after CSE. 4579 if (CurTy == ExtTy) 4580 continue; 4581 4582 // If IsSExt is true, we are in this situation: 4583 // a = Val 4584 // b = sext ty1 a to ty2 4585 // c = sext ty1 a to ty3 4586 // Assuming ty2 is shorter than ty3, this could be turned into: 4587 // a = Val 4588 // b = sext ty1 a to ty2 4589 // c = sext ty2 b to ty3 4590 // However, the last sext is not free. 4591 if (IsSExt) 4592 return false; 4593 4594 // This is a ZExt, maybe this is free to extend from one type to another. 4595 // In that case, we would not account for a different use. 4596 Type *NarrowTy; 4597 Type *LargeTy; 4598 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4599 CurTy->getScalarType()->getIntegerBitWidth()) { 4600 NarrowTy = CurTy; 4601 LargeTy = ExtTy; 4602 } else { 4603 NarrowTy = ExtTy; 4604 LargeTy = CurTy; 4605 } 4606 4607 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4608 return false; 4609 } 4610 // All uses are the same or can be derived from one another for free. 4611 return true; 4612 } 4613 4614 /// \brief Try to speculatively promote extensions in \p Exts and continue 4615 /// promoting through newly promoted operands recursively as far as doing so is 4616 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 4617 /// When some promotion happened, \p TPT contains the proper state to revert 4618 /// them. 4619 /// 4620 /// \return true if some promotion happened, false otherwise. 4621 bool CodeGenPrepare::tryToPromoteExts( 4622 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 4623 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 4624 unsigned CreatedInstsCost) { 4625 bool Promoted = false; 4626 4627 // Iterate over all the extensions to try to promote them. 4628 for (auto I : Exts) { 4629 // Early check if we directly have ext(load). 4630 if (isa<LoadInst>(I->getOperand(0))) { 4631 ProfitablyMovedExts.push_back(I); 4632 continue; 4633 } 4634 4635 // Check whether or not we want to do any promotion. The reason we have 4636 // this check inside the for loop is to catch the case where an extension 4637 // is directly fed by a load because in such case the extension can be moved 4638 // up without any promotion on its operands. 4639 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4640 return false; 4641 4642 // Get the action to perform the promotion. 4643 TypePromotionHelper::Action TPH = 4644 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 4645 // Check if we can promote. 4646 if (!TPH) { 4647 // Save the current extension as we cannot move up through its operand. 4648 ProfitablyMovedExts.push_back(I); 4649 continue; 4650 } 4651 4652 // Save the current state. 4653 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4654 TPT.getRestorationPoint(); 4655 SmallVector<Instruction *, 4> NewExts; 4656 unsigned NewCreatedInstsCost = 0; 4657 unsigned ExtCost = !TLI->isExtFree(I); 4658 // Promote. 4659 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 4660 &NewExts, nullptr, *TLI); 4661 assert(PromotedVal && 4662 "TypePromotionHelper should have filtered out those cases"); 4663 4664 // We would be able to merge only one extension in a load. 4665 // Therefore, if we have more than 1 new extension we heuristically 4666 // cut this search path, because it means we degrade the code quality. 4667 // With exactly 2, the transformation is neutral, because we will merge 4668 // one extension but leave one. However, we optimistically keep going, 4669 // because the new extension may be removed too. 4670 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 4671 // FIXME: It would be possible to propagate a negative value instead of 4672 // conservatively ceiling it to 0. 4673 TotalCreatedInstsCost = 4674 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 4675 if (!StressExtLdPromotion && 4676 (TotalCreatedInstsCost > 1 || 4677 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 4678 // This promotion is not profitable, rollback to the previous state, and 4679 // save the current extension in ProfitablyMovedExts as the latest 4680 // speculative promotion turned out to be unprofitable. 4681 TPT.rollback(LastKnownGood); 4682 ProfitablyMovedExts.push_back(I); 4683 continue; 4684 } 4685 // Continue promoting NewExts as far as doing so is profitable. 4686 SmallVector<Instruction *, 2> NewlyMovedExts; 4687 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 4688 bool NewPromoted = false; 4689 for (auto ExtInst : NewlyMovedExts) { 4690 Instruction *MovedExt = cast<Instruction>(ExtInst); 4691 Value *ExtOperand = MovedExt->getOperand(0); 4692 // If we have reached to a load, we need this extra profitability check 4693 // as it could potentially be merged into an ext(load). 4694 if (isa<LoadInst>(ExtOperand) && 4695 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 4696 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 4697 continue; 4698 4699 ProfitablyMovedExts.push_back(MovedExt); 4700 NewPromoted = true; 4701 } 4702 4703 // If none of speculative promotions for NewExts is profitable, rollback 4704 // and save the current extension (I) as the last profitable extension. 4705 if (!NewPromoted) { 4706 TPT.rollback(LastKnownGood); 4707 ProfitablyMovedExts.push_back(I); 4708 continue; 4709 } 4710 // The promotion is profitable. 4711 Promoted = true; 4712 } 4713 return Promoted; 4714 } 4715 4716 /// Merging redundant sexts when one is dominating the other. 4717 bool CodeGenPrepare::mergeSExts(Function &F) { 4718 DominatorTree DT(F); 4719 bool Changed = false; 4720 for (auto &Entry : ValToSExtendedUses) { 4721 SExts &Insts = Entry.second; 4722 SExts CurPts; 4723 for (Instruction *Inst : Insts) { 4724 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 4725 Inst->getOperand(0) != Entry.first) 4726 continue; 4727 bool inserted = false; 4728 for (auto &Pt : CurPts) { 4729 if (DT.dominates(Inst, Pt)) { 4730 Pt->replaceAllUsesWith(Inst); 4731 RemovedInsts.insert(Pt); 4732 Pt->removeFromParent(); 4733 Pt = Inst; 4734 inserted = true; 4735 Changed = true; 4736 break; 4737 } 4738 if (!DT.dominates(Pt, Inst)) 4739 // Give up if we need to merge in a common dominator as the 4740 // expermients show it is not profitable. 4741 continue; 4742 Inst->replaceAllUsesWith(Pt); 4743 RemovedInsts.insert(Inst); 4744 Inst->removeFromParent(); 4745 inserted = true; 4746 Changed = true; 4747 break; 4748 } 4749 if (!inserted) 4750 CurPts.push_back(Inst); 4751 } 4752 } 4753 return Changed; 4754 } 4755 4756 /// Return true, if an ext(load) can be formed from an extension in 4757 /// \p MovedExts. 4758 bool CodeGenPrepare::canFormExtLd( 4759 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 4760 Instruction *&Inst, bool HasPromoted) { 4761 for (auto *MovedExtInst : MovedExts) { 4762 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 4763 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 4764 Inst = MovedExtInst; 4765 break; 4766 } 4767 } 4768 if (!LI) 4769 return false; 4770 4771 // If they're already in the same block, there's nothing to do. 4772 // Make the cheap checks first if we did not promote. 4773 // If we promoted, we need to check if it is indeed profitable. 4774 if (!HasPromoted && LI->getParent() == Inst->getParent()) 4775 return false; 4776 4777 return TLI->isExtLoad(LI, Inst, *DL); 4778 } 4779 4780 /// Move a zext or sext fed by a load into the same basic block as the load, 4781 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 4782 /// extend into the load. 4783 /// 4784 /// E.g., 4785 /// \code 4786 /// %ld = load i32* %addr 4787 /// %add = add nuw i32 %ld, 4 4788 /// %zext = zext i32 %add to i64 4789 // \endcode 4790 /// => 4791 /// \code 4792 /// %ld = load i32* %addr 4793 /// %zext = zext i32 %ld to i64 4794 /// %add = add nuw i64 %zext, 4 4795 /// \encode 4796 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 4797 /// allow us to match zext(load i32*) to i64. 4798 /// 4799 /// Also, try to promote the computations used to obtain a sign extended 4800 /// value used into memory accesses. 4801 /// E.g., 4802 /// \code 4803 /// a = add nsw i32 b, 3 4804 /// d = sext i32 a to i64 4805 /// e = getelementptr ..., i64 d 4806 /// \endcode 4807 /// => 4808 /// \code 4809 /// f = sext i32 b to i64 4810 /// a = add nsw i64 f, 3 4811 /// e = getelementptr ..., i64 a 4812 /// \endcode 4813 /// 4814 /// \p Inst[in/out] the extension may be modified during the process if some 4815 /// promotions apply. 4816 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 4817 // ExtLoad formation and address type promotion infrastructure requires TLI to 4818 // be effective. 4819 if (!TLI) 4820 return false; 4821 4822 bool AllowPromotionWithoutCommonHeader = false; 4823 /// See if it is an interesting sext operations for the address type 4824 /// promotion before trying to promote it, e.g., the ones with the right 4825 /// type and used in memory accesses. 4826 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 4827 *Inst, AllowPromotionWithoutCommonHeader); 4828 TypePromotionTransaction TPT(RemovedInsts); 4829 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4830 TPT.getRestorationPoint(); 4831 SmallVector<Instruction *, 1> Exts; 4832 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 4833 Exts.push_back(Inst); 4834 4835 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 4836 4837 // Look for a load being extended. 4838 LoadInst *LI = nullptr; 4839 Instruction *ExtFedByLoad; 4840 4841 // Try to promote a chain of computation if it allows to form an extended 4842 // load. 4843 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 4844 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 4845 TPT.commit(); 4846 // Move the extend into the same block as the load 4847 ExtFedByLoad->moveAfter(LI); 4848 // CGP does not check if the zext would be speculatively executed when moved 4849 // to the same basic block as the load. Preserving its original location 4850 // would pessimize the debugging experience, as well as negatively impact 4851 // the quality of sample pgo. We don't want to use "line 0" as that has a 4852 // size cost in the line-table section and logically the zext can be seen as 4853 // part of the load. Therefore we conservatively reuse the same debug 4854 // location for the load and the zext. 4855 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 4856 ++NumExtsMoved; 4857 Inst = ExtFedByLoad; 4858 return true; 4859 } 4860 4861 // Continue promoting SExts if known as considerable depending on targets. 4862 if (ATPConsiderable && 4863 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 4864 HasPromoted, TPT, SpeculativelyMovedExts)) 4865 return true; 4866 4867 TPT.rollback(LastKnownGood); 4868 return false; 4869 } 4870 4871 // Perform address type promotion if doing so is profitable. 4872 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 4873 // instructions that sign extended the same initial value. However, if 4874 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 4875 // extension is just profitable. 4876 bool CodeGenPrepare::performAddressTypePromotion( 4877 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 4878 bool HasPromoted, TypePromotionTransaction &TPT, 4879 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 4880 bool Promoted = false; 4881 SmallPtrSet<Instruction *, 1> UnhandledExts; 4882 bool AllSeenFirst = true; 4883 for (auto I : SpeculativelyMovedExts) { 4884 Value *HeadOfChain = I->getOperand(0); 4885 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 4886 SeenChainsForSExt.find(HeadOfChain); 4887 // If there is an unhandled SExt which has the same header, try to promote 4888 // it as well. 4889 if (AlreadySeen != SeenChainsForSExt.end()) { 4890 if (AlreadySeen->second != nullptr) 4891 UnhandledExts.insert(AlreadySeen->second); 4892 AllSeenFirst = false; 4893 } 4894 } 4895 4896 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 4897 SpeculativelyMovedExts.size() == 1)) { 4898 TPT.commit(); 4899 if (HasPromoted) 4900 Promoted = true; 4901 for (auto I : SpeculativelyMovedExts) { 4902 Value *HeadOfChain = I->getOperand(0); 4903 SeenChainsForSExt[HeadOfChain] = nullptr; 4904 ValToSExtendedUses[HeadOfChain].push_back(I); 4905 } 4906 // Update Inst as promotion happen. 4907 Inst = SpeculativelyMovedExts.pop_back_val(); 4908 } else { 4909 // This is the first chain visited from the header, keep the current chain 4910 // as unhandled. Defer to promote this until we encounter another SExt 4911 // chain derived from the same header. 4912 for (auto I : SpeculativelyMovedExts) { 4913 Value *HeadOfChain = I->getOperand(0); 4914 SeenChainsForSExt[HeadOfChain] = Inst; 4915 } 4916 return false; 4917 } 4918 4919 if (!AllSeenFirst && !UnhandledExts.empty()) 4920 for (auto VisitedSExt : UnhandledExts) { 4921 if (RemovedInsts.count(VisitedSExt)) 4922 continue; 4923 TypePromotionTransaction TPT(RemovedInsts); 4924 SmallVector<Instruction *, 1> Exts; 4925 SmallVector<Instruction *, 2> Chains; 4926 Exts.push_back(VisitedSExt); 4927 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 4928 TPT.commit(); 4929 if (HasPromoted) 4930 Promoted = true; 4931 for (auto I : Chains) { 4932 Value *HeadOfChain = I->getOperand(0); 4933 // Mark this as handled. 4934 SeenChainsForSExt[HeadOfChain] = nullptr; 4935 ValToSExtendedUses[HeadOfChain].push_back(I); 4936 } 4937 } 4938 return Promoted; 4939 } 4940 4941 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 4942 BasicBlock *DefBB = I->getParent(); 4943 4944 // If the result of a {s|z}ext and its source are both live out, rewrite all 4945 // other uses of the source with result of extension. 4946 Value *Src = I->getOperand(0); 4947 if (Src->hasOneUse()) 4948 return false; 4949 4950 // Only do this xform if truncating is free. 4951 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 4952 return false; 4953 4954 // Only safe to perform the optimization if the source is also defined in 4955 // this block. 4956 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 4957 return false; 4958 4959 bool DefIsLiveOut = false; 4960 for (User *U : I->users()) { 4961 Instruction *UI = cast<Instruction>(U); 4962 4963 // Figure out which BB this ext is used in. 4964 BasicBlock *UserBB = UI->getParent(); 4965 if (UserBB == DefBB) continue; 4966 DefIsLiveOut = true; 4967 break; 4968 } 4969 if (!DefIsLiveOut) 4970 return false; 4971 4972 // Make sure none of the uses are PHI nodes. 4973 for (User *U : Src->users()) { 4974 Instruction *UI = cast<Instruction>(U); 4975 BasicBlock *UserBB = UI->getParent(); 4976 if (UserBB == DefBB) continue; 4977 // Be conservative. We don't want this xform to end up introducing 4978 // reloads just before load / store instructions. 4979 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 4980 return false; 4981 } 4982 4983 // InsertedTruncs - Only insert one trunc in each block once. 4984 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 4985 4986 bool MadeChange = false; 4987 for (Use &U : Src->uses()) { 4988 Instruction *User = cast<Instruction>(U.getUser()); 4989 4990 // Figure out which BB this ext is used in. 4991 BasicBlock *UserBB = User->getParent(); 4992 if (UserBB == DefBB) continue; 4993 4994 // Both src and def are live in this block. Rewrite the use. 4995 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 4996 4997 if (!InsertedTrunc) { 4998 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4999 assert(InsertPt != UserBB->end()); 5000 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5001 InsertedInsts.insert(InsertedTrunc); 5002 } 5003 5004 // Replace a use of the {s|z}ext source with a use of the result. 5005 U = InsertedTrunc; 5006 ++NumExtUses; 5007 MadeChange = true; 5008 } 5009 5010 return MadeChange; 5011 } 5012 5013 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5014 // just after the load if the target can fold this into one extload instruction, 5015 // with the hope of eliminating some of the other later "and" instructions using 5016 // the loaded value. "and"s that are made trivially redundant by the insertion 5017 // of the new "and" are removed by this function, while others (e.g. those whose 5018 // path from the load goes through a phi) are left for isel to potentially 5019 // remove. 5020 // 5021 // For example: 5022 // 5023 // b0: 5024 // x = load i32 5025 // ... 5026 // b1: 5027 // y = and x, 0xff 5028 // z = use y 5029 // 5030 // becomes: 5031 // 5032 // b0: 5033 // x = load i32 5034 // x' = and x, 0xff 5035 // ... 5036 // b1: 5037 // z = use x' 5038 // 5039 // whereas: 5040 // 5041 // b0: 5042 // x1 = load i32 5043 // ... 5044 // b1: 5045 // x2 = load i32 5046 // ... 5047 // b2: 5048 // x = phi x1, x2 5049 // y = and x, 0xff 5050 // 5051 // becomes (after a call to optimizeLoadExt for each load): 5052 // 5053 // b0: 5054 // x1 = load i32 5055 // x1' = and x1, 0xff 5056 // ... 5057 // b1: 5058 // x2 = load i32 5059 // x2' = and x2, 0xff 5060 // ... 5061 // b2: 5062 // x = phi x1', x2' 5063 // y = and x, 0xff 5064 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5065 if (!Load->isSimple() || 5066 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 5067 return false; 5068 5069 // Skip loads we've already transformed. 5070 if (Load->hasOneUse() && 5071 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5072 return false; 5073 5074 // Look at all uses of Load, looking through phis, to determine how many bits 5075 // of the loaded value are needed. 5076 SmallVector<Instruction *, 8> WorkList; 5077 SmallPtrSet<Instruction *, 16> Visited; 5078 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5079 for (auto *U : Load->users()) 5080 WorkList.push_back(cast<Instruction>(U)); 5081 5082 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5083 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5084 APInt DemandBits(BitWidth, 0); 5085 APInt WidestAndBits(BitWidth, 0); 5086 5087 while (!WorkList.empty()) { 5088 Instruction *I = WorkList.back(); 5089 WorkList.pop_back(); 5090 5091 // Break use-def graph loops. 5092 if (!Visited.insert(I).second) 5093 continue; 5094 5095 // For a PHI node, push all of its users. 5096 if (auto *Phi = dyn_cast<PHINode>(I)) { 5097 for (auto *U : Phi->users()) 5098 WorkList.push_back(cast<Instruction>(U)); 5099 continue; 5100 } 5101 5102 switch (I->getOpcode()) { 5103 case Instruction::And: { 5104 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5105 if (!AndC) 5106 return false; 5107 APInt AndBits = AndC->getValue(); 5108 DemandBits |= AndBits; 5109 // Keep track of the widest and mask we see. 5110 if (AndBits.ugt(WidestAndBits)) 5111 WidestAndBits = AndBits; 5112 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5113 AndsToMaybeRemove.push_back(I); 5114 break; 5115 } 5116 5117 case Instruction::Shl: { 5118 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5119 if (!ShlC) 5120 return false; 5121 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5122 DemandBits.setLowBits(BitWidth - ShiftAmt); 5123 break; 5124 } 5125 5126 case Instruction::Trunc: { 5127 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5128 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5129 DemandBits.setLowBits(TruncBitWidth); 5130 break; 5131 } 5132 5133 default: 5134 return false; 5135 } 5136 } 5137 5138 uint32_t ActiveBits = DemandBits.getActiveBits(); 5139 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5140 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5141 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5142 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5143 // followed by an AND. 5144 // TODO: Look into removing this restriction by fixing backends to either 5145 // return false for isLoadExtLegal for i1 or have them select this pattern to 5146 // a single instruction. 5147 // 5148 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5149 // mask, since these are the only ands that will be removed by isel. 5150 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5151 WidestAndBits != DemandBits) 5152 return false; 5153 5154 LLVMContext &Ctx = Load->getType()->getContext(); 5155 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5156 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5157 5158 // Reject cases that won't be matched as extloads. 5159 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5160 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5161 return false; 5162 5163 IRBuilder<> Builder(Load->getNextNode()); 5164 auto *NewAnd = dyn_cast<Instruction>( 5165 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5166 // Mark this instruction as "inserted by CGP", so that other 5167 // optimizations don't touch it. 5168 InsertedInsts.insert(NewAnd); 5169 5170 // Replace all uses of load with new and (except for the use of load in the 5171 // new and itself). 5172 Load->replaceAllUsesWith(NewAnd); 5173 NewAnd->setOperand(0, Load); 5174 5175 // Remove any and instructions that are now redundant. 5176 for (auto *And : AndsToMaybeRemove) 5177 // Check that the and mask is the same as the one we decided to put on the 5178 // new and. 5179 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5180 And->replaceAllUsesWith(NewAnd); 5181 if (&*CurInstIterator == And) 5182 CurInstIterator = std::next(And->getIterator()); 5183 And->eraseFromParent(); 5184 ++NumAndUses; 5185 } 5186 5187 ++NumAndsAdded; 5188 return true; 5189 } 5190 5191 /// Check if V (an operand of a select instruction) is an expensive instruction 5192 /// that is only used once. 5193 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5194 auto *I = dyn_cast<Instruction>(V); 5195 // If it's safe to speculatively execute, then it should not have side 5196 // effects; therefore, it's safe to sink and possibly *not* execute. 5197 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5198 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5199 } 5200 5201 /// Returns true if a SelectInst should be turned into an explicit branch. 5202 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5203 const TargetLowering *TLI, 5204 SelectInst *SI) { 5205 // If even a predictable select is cheap, then a branch can't be cheaper. 5206 if (!TLI->isPredictableSelectExpensive()) 5207 return false; 5208 5209 // FIXME: This should use the same heuristics as IfConversion to determine 5210 // whether a select is better represented as a branch. 5211 5212 // If metadata tells us that the select condition is obviously predictable, 5213 // then we want to replace the select with a branch. 5214 uint64_t TrueWeight, FalseWeight; 5215 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5216 uint64_t Max = std::max(TrueWeight, FalseWeight); 5217 uint64_t Sum = TrueWeight + FalseWeight; 5218 if (Sum != 0) { 5219 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5220 if (Probability > TLI->getPredictableBranchThreshold()) 5221 return true; 5222 } 5223 } 5224 5225 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5226 5227 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5228 // comparison condition. If the compare has more than one use, there's 5229 // probably another cmov or setcc around, so it's not worth emitting a branch. 5230 if (!Cmp || !Cmp->hasOneUse()) 5231 return false; 5232 5233 // If either operand of the select is expensive and only needed on one side 5234 // of the select, we should form a branch. 5235 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5236 sinkSelectOperand(TTI, SI->getFalseValue())) 5237 return true; 5238 5239 return false; 5240 } 5241 5242 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5243 /// false value of \p SI. If the true/false value of \p SI is defined by any 5244 /// select instructions in \p Selects, look through the defining select 5245 /// instruction until the true/false value is not defined in \p Selects. 5246 static Value *getTrueOrFalseValue( 5247 SelectInst *SI, bool isTrue, 5248 const SmallPtrSet<const Instruction *, 2> &Selects) { 5249 Value *V; 5250 5251 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5252 DefSI = dyn_cast<SelectInst>(V)) { 5253 assert(DefSI->getCondition() == SI->getCondition() && 5254 "The condition of DefSI does not match with SI"); 5255 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5256 } 5257 return V; 5258 } 5259 5260 /// If we have a SelectInst that will likely profit from branch prediction, 5261 /// turn it into a branch. 5262 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5263 // Find all consecutive select instructions that share the same condition. 5264 SmallVector<SelectInst *, 2> ASI; 5265 ASI.push_back(SI); 5266 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5267 It != SI->getParent()->end(); ++It) { 5268 SelectInst *I = dyn_cast<SelectInst>(&*It); 5269 if (I && SI->getCondition() == I->getCondition()) { 5270 ASI.push_back(I); 5271 } else { 5272 break; 5273 } 5274 } 5275 5276 SelectInst *LastSI = ASI.back(); 5277 // Increment the current iterator to skip all the rest of select instructions 5278 // because they will be either "not lowered" or "all lowered" to branch. 5279 CurInstIterator = std::next(LastSI->getIterator()); 5280 5281 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5282 5283 // Can we convert the 'select' to CF ? 5284 if (DisableSelectToBranch || OptSize || !TLI || VectorCond || 5285 SI->getMetadata(LLVMContext::MD_unpredictable)) 5286 return false; 5287 5288 TargetLowering::SelectSupportKind SelectKind; 5289 if (VectorCond) 5290 SelectKind = TargetLowering::VectorMaskSelect; 5291 else if (SI->getType()->isVectorTy()) 5292 SelectKind = TargetLowering::ScalarCondVectorVal; 5293 else 5294 SelectKind = TargetLowering::ScalarValSelect; 5295 5296 if (TLI->isSelectSupported(SelectKind) && 5297 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5298 return false; 5299 5300 ModifiedDT = true; 5301 5302 // Transform a sequence like this: 5303 // start: 5304 // %cmp = cmp uge i32 %a, %b 5305 // %sel = select i1 %cmp, i32 %c, i32 %d 5306 // 5307 // Into: 5308 // start: 5309 // %cmp = cmp uge i32 %a, %b 5310 // br i1 %cmp, label %select.true, label %select.false 5311 // select.true: 5312 // br label %select.end 5313 // select.false: 5314 // br label %select.end 5315 // select.end: 5316 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5317 // 5318 // In addition, we may sink instructions that produce %c or %d from 5319 // the entry block into the destination(s) of the new branch. 5320 // If the true or false blocks do not contain a sunken instruction, that 5321 // block and its branch may be optimized away. In that case, one side of the 5322 // first branch will point directly to select.end, and the corresponding PHI 5323 // predecessor block will be the start block. 5324 5325 // First, we split the block containing the select into 2 blocks. 5326 BasicBlock *StartBlock = SI->getParent(); 5327 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5328 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5329 5330 // Delete the unconditional branch that was just created by the split. 5331 StartBlock->getTerminator()->eraseFromParent(); 5332 5333 // These are the new basic blocks for the conditional branch. 5334 // At least one will become an actual new basic block. 5335 BasicBlock *TrueBlock = nullptr; 5336 BasicBlock *FalseBlock = nullptr; 5337 BranchInst *TrueBranch = nullptr; 5338 BranchInst *FalseBranch = nullptr; 5339 5340 // Sink expensive instructions into the conditional blocks to avoid executing 5341 // them speculatively. 5342 for (SelectInst *SI : ASI) { 5343 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5344 if (TrueBlock == nullptr) { 5345 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5346 EndBlock->getParent(), EndBlock); 5347 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5348 } 5349 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5350 TrueInst->moveBefore(TrueBranch); 5351 } 5352 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5353 if (FalseBlock == nullptr) { 5354 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5355 EndBlock->getParent(), EndBlock); 5356 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5357 } 5358 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5359 FalseInst->moveBefore(FalseBranch); 5360 } 5361 } 5362 5363 // If there was nothing to sink, then arbitrarily choose the 'false' side 5364 // for a new input value to the PHI. 5365 if (TrueBlock == FalseBlock) { 5366 assert(TrueBlock == nullptr && 5367 "Unexpected basic block transform while optimizing select"); 5368 5369 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5370 EndBlock->getParent(), EndBlock); 5371 BranchInst::Create(EndBlock, FalseBlock); 5372 } 5373 5374 // Insert the real conditional branch based on the original condition. 5375 // If we did not create a new block for one of the 'true' or 'false' paths 5376 // of the condition, it means that side of the branch goes to the end block 5377 // directly and the path originates from the start block from the point of 5378 // view of the new PHI. 5379 BasicBlock *TT, *FT; 5380 if (TrueBlock == nullptr) { 5381 TT = EndBlock; 5382 FT = FalseBlock; 5383 TrueBlock = StartBlock; 5384 } else if (FalseBlock == nullptr) { 5385 TT = TrueBlock; 5386 FT = EndBlock; 5387 FalseBlock = StartBlock; 5388 } else { 5389 TT = TrueBlock; 5390 FT = FalseBlock; 5391 } 5392 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5393 5394 SmallPtrSet<const Instruction *, 2> INS; 5395 INS.insert(ASI.begin(), ASI.end()); 5396 // Use reverse iterator because later select may use the value of the 5397 // earlier select, and we need to propagate value through earlier select 5398 // to get the PHI operand. 5399 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5400 SelectInst *SI = *It; 5401 // The select itself is replaced with a PHI Node. 5402 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5403 PN->takeName(SI); 5404 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5405 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5406 5407 SI->replaceAllUsesWith(PN); 5408 SI->eraseFromParent(); 5409 INS.erase(SI); 5410 ++NumSelectsExpanded; 5411 } 5412 5413 // Instruct OptimizeBlock to skip to the next block. 5414 CurInstIterator = StartBlock->end(); 5415 return true; 5416 } 5417 5418 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5419 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5420 int SplatElem = -1; 5421 for (unsigned i = 0; i < Mask.size(); ++i) { 5422 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5423 return false; 5424 SplatElem = Mask[i]; 5425 } 5426 5427 return true; 5428 } 5429 5430 /// Some targets have expensive vector shifts if the lanes aren't all the same 5431 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 5432 /// it's often worth sinking a shufflevector splat down to its use so that 5433 /// codegen can spot all lanes are identical. 5434 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 5435 BasicBlock *DefBB = SVI->getParent(); 5436 5437 // Only do this xform if variable vector shifts are particularly expensive. 5438 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 5439 return false; 5440 5441 // We only expect better codegen by sinking a shuffle if we can recognise a 5442 // constant splat. 5443 if (!isBroadcastShuffle(SVI)) 5444 return false; 5445 5446 // InsertedShuffles - Only insert a shuffle in each block once. 5447 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 5448 5449 bool MadeChange = false; 5450 for (User *U : SVI->users()) { 5451 Instruction *UI = cast<Instruction>(U); 5452 5453 // Figure out which BB this ext is used in. 5454 BasicBlock *UserBB = UI->getParent(); 5455 if (UserBB == DefBB) continue; 5456 5457 // For now only apply this when the splat is used by a shift instruction. 5458 if (!UI->isShift()) continue; 5459 5460 // Everything checks out, sink the shuffle if the user's block doesn't 5461 // already have a copy. 5462 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 5463 5464 if (!InsertedShuffle) { 5465 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5466 assert(InsertPt != UserBB->end()); 5467 InsertedShuffle = 5468 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5469 SVI->getOperand(2), "", &*InsertPt); 5470 } 5471 5472 UI->replaceUsesOfWith(SVI, InsertedShuffle); 5473 MadeChange = true; 5474 } 5475 5476 // If we removed all uses, nuke the shuffle. 5477 if (SVI->use_empty()) { 5478 SVI->eraseFromParent(); 5479 MadeChange = true; 5480 } 5481 5482 return MadeChange; 5483 } 5484 5485 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 5486 if (!TLI || !DL) 5487 return false; 5488 5489 Value *Cond = SI->getCondition(); 5490 Type *OldType = Cond->getType(); 5491 LLVMContext &Context = Cond->getContext(); 5492 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 5493 unsigned RegWidth = RegType.getSizeInBits(); 5494 5495 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 5496 return false; 5497 5498 // If the register width is greater than the type width, expand the condition 5499 // of the switch instruction and each case constant to the width of the 5500 // register. By widening the type of the switch condition, subsequent 5501 // comparisons (for case comparisons) will not need to be extended to the 5502 // preferred register width, so we will potentially eliminate N-1 extends, 5503 // where N is the number of cases in the switch. 5504 auto *NewType = Type::getIntNTy(Context, RegWidth); 5505 5506 // Zero-extend the switch condition and case constants unless the switch 5507 // condition is a function argument that is already being sign-extended. 5508 // In that case, we can avoid an unnecessary mask/extension by sign-extending 5509 // everything instead. 5510 Instruction::CastOps ExtType = Instruction::ZExt; 5511 if (auto *Arg = dyn_cast<Argument>(Cond)) 5512 if (Arg->hasSExtAttr()) 5513 ExtType = Instruction::SExt; 5514 5515 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 5516 ExtInst->insertBefore(SI); 5517 SI->setCondition(ExtInst); 5518 for (auto Case : SI->cases()) { 5519 APInt NarrowConst = Case.getCaseValue()->getValue(); 5520 APInt WideConst = (ExtType == Instruction::ZExt) ? 5521 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 5522 Case.setValue(ConstantInt::get(Context, WideConst)); 5523 } 5524 5525 return true; 5526 } 5527 5528 5529 namespace { 5530 5531 /// \brief Helper class to promote a scalar operation to a vector one. 5532 /// This class is used to move downward extractelement transition. 5533 /// E.g., 5534 /// a = vector_op <2 x i32> 5535 /// b = extractelement <2 x i32> a, i32 0 5536 /// c = scalar_op b 5537 /// store c 5538 /// 5539 /// => 5540 /// a = vector_op <2 x i32> 5541 /// c = vector_op a (equivalent to scalar_op on the related lane) 5542 /// * d = extractelement <2 x i32> c, i32 0 5543 /// * store d 5544 /// Assuming both extractelement and store can be combine, we get rid of the 5545 /// transition. 5546 class VectorPromoteHelper { 5547 /// DataLayout associated with the current module. 5548 const DataLayout &DL; 5549 5550 /// Used to perform some checks on the legality of vector operations. 5551 const TargetLowering &TLI; 5552 5553 /// Used to estimated the cost of the promoted chain. 5554 const TargetTransformInfo &TTI; 5555 5556 /// The transition being moved downwards. 5557 Instruction *Transition; 5558 5559 /// The sequence of instructions to be promoted. 5560 SmallVector<Instruction *, 4> InstsToBePromoted; 5561 5562 /// Cost of combining a store and an extract. 5563 unsigned StoreExtractCombineCost; 5564 5565 /// Instruction that will be combined with the transition. 5566 Instruction *CombineInst = nullptr; 5567 5568 /// \brief The instruction that represents the current end of the transition. 5569 /// Since we are faking the promotion until we reach the end of the chain 5570 /// of computation, we need a way to get the current end of the transition. 5571 Instruction *getEndOfTransition() const { 5572 if (InstsToBePromoted.empty()) 5573 return Transition; 5574 return InstsToBePromoted.back(); 5575 } 5576 5577 /// \brief Return the index of the original value in the transition. 5578 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 5579 /// c, is at index 0. 5580 unsigned getTransitionOriginalValueIdx() const { 5581 assert(isa<ExtractElementInst>(Transition) && 5582 "Other kind of transitions are not supported yet"); 5583 return 0; 5584 } 5585 5586 /// \brief Return the index of the index in the transition. 5587 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 5588 /// is at index 1. 5589 unsigned getTransitionIdx() const { 5590 assert(isa<ExtractElementInst>(Transition) && 5591 "Other kind of transitions are not supported yet"); 5592 return 1; 5593 } 5594 5595 /// \brief Get the type of the transition. 5596 /// This is the type of the original value. 5597 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 5598 /// transition is <2 x i32>. 5599 Type *getTransitionType() const { 5600 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 5601 } 5602 5603 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 5604 /// I.e., we have the following sequence: 5605 /// Def = Transition <ty1> a to <ty2> 5606 /// b = ToBePromoted <ty2> Def, ... 5607 /// => 5608 /// b = ToBePromoted <ty1> a, ... 5609 /// Def = Transition <ty1> ToBePromoted to <ty2> 5610 void promoteImpl(Instruction *ToBePromoted); 5611 5612 /// \brief Check whether or not it is profitable to promote all the 5613 /// instructions enqueued to be promoted. 5614 bool isProfitableToPromote() { 5615 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 5616 unsigned Index = isa<ConstantInt>(ValIdx) 5617 ? cast<ConstantInt>(ValIdx)->getZExtValue() 5618 : -1; 5619 Type *PromotedType = getTransitionType(); 5620 5621 StoreInst *ST = cast<StoreInst>(CombineInst); 5622 unsigned AS = ST->getPointerAddressSpace(); 5623 unsigned Align = ST->getAlignment(); 5624 // Check if this store is supported. 5625 if (!TLI.allowsMisalignedMemoryAccesses( 5626 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 5627 Align)) { 5628 // If this is not supported, there is no way we can combine 5629 // the extract with the store. 5630 return false; 5631 } 5632 5633 // The scalar chain of computation has to pay for the transition 5634 // scalar to vector. 5635 // The vector chain has to account for the combining cost. 5636 uint64_t ScalarCost = 5637 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 5638 uint64_t VectorCost = StoreExtractCombineCost; 5639 for (const auto &Inst : InstsToBePromoted) { 5640 // Compute the cost. 5641 // By construction, all instructions being promoted are arithmetic ones. 5642 // Moreover, one argument is a constant that can be viewed as a splat 5643 // constant. 5644 Value *Arg0 = Inst->getOperand(0); 5645 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 5646 isa<ConstantFP>(Arg0); 5647 TargetTransformInfo::OperandValueKind Arg0OVK = 5648 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5649 : TargetTransformInfo::OK_AnyValue; 5650 TargetTransformInfo::OperandValueKind Arg1OVK = 5651 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5652 : TargetTransformInfo::OK_AnyValue; 5653 ScalarCost += TTI.getArithmeticInstrCost( 5654 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 5655 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 5656 Arg0OVK, Arg1OVK); 5657 } 5658 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 5659 << ScalarCost << "\nVector: " << VectorCost << '\n'); 5660 return ScalarCost > VectorCost; 5661 } 5662 5663 /// \brief Generate a constant vector with \p Val with the same 5664 /// number of elements as the transition. 5665 /// \p UseSplat defines whether or not \p Val should be replicated 5666 /// across the whole vector. 5667 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 5668 /// otherwise we generate a vector with as many undef as possible: 5669 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 5670 /// used at the index of the extract. 5671 Value *getConstantVector(Constant *Val, bool UseSplat) const { 5672 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 5673 if (!UseSplat) { 5674 // If we cannot determine where the constant must be, we have to 5675 // use a splat constant. 5676 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 5677 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 5678 ExtractIdx = CstVal->getSExtValue(); 5679 else 5680 UseSplat = true; 5681 } 5682 5683 unsigned End = getTransitionType()->getVectorNumElements(); 5684 if (UseSplat) 5685 return ConstantVector::getSplat(End, Val); 5686 5687 SmallVector<Constant *, 4> ConstVec; 5688 UndefValue *UndefVal = UndefValue::get(Val->getType()); 5689 for (unsigned Idx = 0; Idx != End; ++Idx) { 5690 if (Idx == ExtractIdx) 5691 ConstVec.push_back(Val); 5692 else 5693 ConstVec.push_back(UndefVal); 5694 } 5695 return ConstantVector::get(ConstVec); 5696 } 5697 5698 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 5699 /// in \p Use can trigger undefined behavior. 5700 static bool canCauseUndefinedBehavior(const Instruction *Use, 5701 unsigned OperandIdx) { 5702 // This is not safe to introduce undef when the operand is on 5703 // the right hand side of a division-like instruction. 5704 if (OperandIdx != 1) 5705 return false; 5706 switch (Use->getOpcode()) { 5707 default: 5708 return false; 5709 case Instruction::SDiv: 5710 case Instruction::UDiv: 5711 case Instruction::SRem: 5712 case Instruction::URem: 5713 return true; 5714 case Instruction::FDiv: 5715 case Instruction::FRem: 5716 return !Use->hasNoNaNs(); 5717 } 5718 llvm_unreachable(nullptr); 5719 } 5720 5721 public: 5722 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 5723 const TargetTransformInfo &TTI, Instruction *Transition, 5724 unsigned CombineCost) 5725 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 5726 StoreExtractCombineCost(CombineCost) { 5727 assert(Transition && "Do not know how to promote null"); 5728 } 5729 5730 /// \brief Check if we can promote \p ToBePromoted to \p Type. 5731 bool canPromote(const Instruction *ToBePromoted) const { 5732 // We could support CastInst too. 5733 return isa<BinaryOperator>(ToBePromoted); 5734 } 5735 5736 /// \brief Check if it is profitable to promote \p ToBePromoted 5737 /// by moving downward the transition through. 5738 bool shouldPromote(const Instruction *ToBePromoted) const { 5739 // Promote only if all the operands can be statically expanded. 5740 // Indeed, we do not want to introduce any new kind of transitions. 5741 for (const Use &U : ToBePromoted->operands()) { 5742 const Value *Val = U.get(); 5743 if (Val == getEndOfTransition()) { 5744 // If the use is a division and the transition is on the rhs, 5745 // we cannot promote the operation, otherwise we may create a 5746 // division by zero. 5747 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 5748 return false; 5749 continue; 5750 } 5751 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 5752 !isa<ConstantFP>(Val)) 5753 return false; 5754 } 5755 // Check that the resulting operation is legal. 5756 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 5757 if (!ISDOpcode) 5758 return false; 5759 return StressStoreExtract || 5760 TLI.isOperationLegalOrCustom( 5761 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 5762 } 5763 5764 /// \brief Check whether or not \p Use can be combined 5765 /// with the transition. 5766 /// I.e., is it possible to do Use(Transition) => AnotherUse? 5767 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 5768 5769 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 5770 void enqueueForPromotion(Instruction *ToBePromoted) { 5771 InstsToBePromoted.push_back(ToBePromoted); 5772 } 5773 5774 /// \brief Set the instruction that will be combined with the transition. 5775 void recordCombineInstruction(Instruction *ToBeCombined) { 5776 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 5777 CombineInst = ToBeCombined; 5778 } 5779 5780 /// \brief Promote all the instructions enqueued for promotion if it is 5781 /// is profitable. 5782 /// \return True if the promotion happened, false otherwise. 5783 bool promote() { 5784 // Check if there is something to promote. 5785 // Right now, if we do not have anything to combine with, 5786 // we assume the promotion is not profitable. 5787 if (InstsToBePromoted.empty() || !CombineInst) 5788 return false; 5789 5790 // Check cost. 5791 if (!StressStoreExtract && !isProfitableToPromote()) 5792 return false; 5793 5794 // Promote. 5795 for (auto &ToBePromoted : InstsToBePromoted) 5796 promoteImpl(ToBePromoted); 5797 InstsToBePromoted.clear(); 5798 return true; 5799 } 5800 }; 5801 5802 } // end anonymous namespace 5803 5804 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 5805 // At this point, we know that all the operands of ToBePromoted but Def 5806 // can be statically promoted. 5807 // For Def, we need to use its parameter in ToBePromoted: 5808 // b = ToBePromoted ty1 a 5809 // Def = Transition ty1 b to ty2 5810 // Move the transition down. 5811 // 1. Replace all uses of the promoted operation by the transition. 5812 // = ... b => = ... Def. 5813 assert(ToBePromoted->getType() == Transition->getType() && 5814 "The type of the result of the transition does not match " 5815 "the final type"); 5816 ToBePromoted->replaceAllUsesWith(Transition); 5817 // 2. Update the type of the uses. 5818 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 5819 Type *TransitionTy = getTransitionType(); 5820 ToBePromoted->mutateType(TransitionTy); 5821 // 3. Update all the operands of the promoted operation with promoted 5822 // operands. 5823 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 5824 for (Use &U : ToBePromoted->operands()) { 5825 Value *Val = U.get(); 5826 Value *NewVal = nullptr; 5827 if (Val == Transition) 5828 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 5829 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 5830 isa<ConstantFP>(Val)) { 5831 // Use a splat constant if it is not safe to use undef. 5832 NewVal = getConstantVector( 5833 cast<Constant>(Val), 5834 isa<UndefValue>(Val) || 5835 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 5836 } else 5837 llvm_unreachable("Did you modified shouldPromote and forgot to update " 5838 "this?"); 5839 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 5840 } 5841 Transition->moveAfter(ToBePromoted); 5842 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 5843 } 5844 5845 /// Some targets can do store(extractelement) with one instruction. 5846 /// Try to push the extractelement towards the stores when the target 5847 /// has this feature and this is profitable. 5848 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 5849 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 5850 if (DisableStoreExtract || !TLI || 5851 (!StressStoreExtract && 5852 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 5853 Inst->getOperand(1), CombineCost))) 5854 return false; 5855 5856 // At this point we know that Inst is a vector to scalar transition. 5857 // Try to move it down the def-use chain, until: 5858 // - We can combine the transition with its single use 5859 // => we got rid of the transition. 5860 // - We escape the current basic block 5861 // => we would need to check that we are moving it at a cheaper place and 5862 // we do not do that for now. 5863 BasicBlock *Parent = Inst->getParent(); 5864 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 5865 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 5866 // If the transition has more than one use, assume this is not going to be 5867 // beneficial. 5868 while (Inst->hasOneUse()) { 5869 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 5870 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 5871 5872 if (ToBePromoted->getParent() != Parent) { 5873 DEBUG(dbgs() << "Instruction to promote is in a different block (" 5874 << ToBePromoted->getParent()->getName() 5875 << ") than the transition (" << Parent->getName() << ").\n"); 5876 return false; 5877 } 5878 5879 if (VPH.canCombine(ToBePromoted)) { 5880 DEBUG(dbgs() << "Assume " << *Inst << '\n' 5881 << "will be combined with: " << *ToBePromoted << '\n'); 5882 VPH.recordCombineInstruction(ToBePromoted); 5883 bool Changed = VPH.promote(); 5884 NumStoreExtractExposed += Changed; 5885 return Changed; 5886 } 5887 5888 DEBUG(dbgs() << "Try promoting.\n"); 5889 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 5890 return false; 5891 5892 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 5893 5894 VPH.enqueueForPromotion(ToBePromoted); 5895 Inst = ToBePromoted; 5896 } 5897 return false; 5898 } 5899 5900 /// For the instruction sequence of store below, F and I values 5901 /// are bundled together as an i64 value before being stored into memory. 5902 /// Sometimes it is more efficent to generate separate stores for F and I, 5903 /// which can remove the bitwise instructions or sink them to colder places. 5904 /// 5905 /// (store (or (zext (bitcast F to i32) to i64), 5906 /// (shl (zext I to i64), 32)), addr) --> 5907 /// (store F, addr) and (store I, addr+4) 5908 /// 5909 /// Similarly, splitting for other merged store can also be beneficial, like: 5910 /// For pair of {i32, i32}, i64 store --> two i32 stores. 5911 /// For pair of {i32, i16}, i64 store --> two i32 stores. 5912 /// For pair of {i16, i16}, i32 store --> two i16 stores. 5913 /// For pair of {i16, i8}, i32 store --> two i16 stores. 5914 /// For pair of {i8, i8}, i16 store --> two i8 stores. 5915 /// 5916 /// We allow each target to determine specifically which kind of splitting is 5917 /// supported. 5918 /// 5919 /// The store patterns are commonly seen from the simple code snippet below 5920 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 5921 /// void goo(const std::pair<int, float> &); 5922 /// hoo() { 5923 /// ... 5924 /// goo(std::make_pair(tmp, ftmp)); 5925 /// ... 5926 /// } 5927 /// 5928 /// Although we already have similar splitting in DAG Combine, we duplicate 5929 /// it in CodeGenPrepare to catch the case in which pattern is across 5930 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 5931 /// during code expansion. 5932 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 5933 const TargetLowering &TLI) { 5934 // Handle simple but common cases only. 5935 Type *StoreType = SI.getValueOperand()->getType(); 5936 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 5937 DL.getTypeSizeInBits(StoreType) == 0) 5938 return false; 5939 5940 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 5941 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 5942 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 5943 DL.getTypeSizeInBits(SplitStoreType)) 5944 return false; 5945 5946 // Match the following patterns: 5947 // (store (or (zext LValue to i64), 5948 // (shl (zext HValue to i64), 32)), HalfValBitSize) 5949 // or 5950 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 5951 // (zext LValue to i64), 5952 // Expect both operands of OR and the first operand of SHL have only 5953 // one use. 5954 Value *LValue, *HValue; 5955 if (!match(SI.getValueOperand(), 5956 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 5957 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 5958 m_SpecificInt(HalfValBitSize)))))) 5959 return false; 5960 5961 // Check LValue and HValue are int with size less or equal than 32. 5962 if (!LValue->getType()->isIntegerTy() || 5963 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 5964 !HValue->getType()->isIntegerTy() || 5965 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 5966 return false; 5967 5968 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 5969 // as the input of target query. 5970 auto *LBC = dyn_cast<BitCastInst>(LValue); 5971 auto *HBC = dyn_cast<BitCastInst>(HValue); 5972 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 5973 : EVT::getEVT(LValue->getType()); 5974 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 5975 : EVT::getEVT(HValue->getType()); 5976 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 5977 return false; 5978 5979 // Start to split store. 5980 IRBuilder<> Builder(SI.getContext()); 5981 Builder.SetInsertPoint(&SI); 5982 5983 // If LValue/HValue is a bitcast in another BB, create a new one in current 5984 // BB so it may be merged with the splitted stores by dag combiner. 5985 if (LBC && LBC->getParent() != SI.getParent()) 5986 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 5987 if (HBC && HBC->getParent() != SI.getParent()) 5988 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 5989 5990 auto CreateSplitStore = [&](Value *V, bool Upper) { 5991 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 5992 Value *Addr = Builder.CreateBitCast( 5993 SI.getOperand(1), 5994 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 5995 if (Upper) 5996 Addr = Builder.CreateGEP( 5997 SplitStoreType, Addr, 5998 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 5999 Builder.CreateAlignedStore( 6000 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6001 }; 6002 6003 CreateSplitStore(LValue, false); 6004 CreateSplitStore(HValue, true); 6005 6006 // Delete the old store. 6007 SI.eraseFromParent(); 6008 return true; 6009 } 6010 6011 // Return true if the GEP has two operands, the first operand is of a sequential 6012 // type, and the second operand is a constant. 6013 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6014 gep_type_iterator I = gep_type_begin(*GEP); 6015 return GEP->getNumOperands() == 2 && 6016 I.isSequential() && 6017 isa<ConstantInt>(GEP->getOperand(1)); 6018 } 6019 6020 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6021 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6022 // reducing liveness interference across those edges benefits global register 6023 // allocation. Currently handles only certain cases. 6024 // 6025 // For example, unmerge %GEPI and %UGEPI as below. 6026 // 6027 // ---------- BEFORE ---------- 6028 // SrcBlock: 6029 // ... 6030 // %GEPIOp = ... 6031 // ... 6032 // %GEPI = gep %GEPIOp, Idx 6033 // ... 6034 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6035 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6036 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6037 // %UGEPI) 6038 // 6039 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6040 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6041 // ... 6042 // 6043 // DstBi: 6044 // ... 6045 // %UGEPI = gep %GEPIOp, UIdx 6046 // ... 6047 // --------------------------- 6048 // 6049 // ---------- AFTER ---------- 6050 // SrcBlock: 6051 // ... (same as above) 6052 // (* %GEPI is still alive on the indirectbr edges) 6053 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6054 // unmerging) 6055 // ... 6056 // 6057 // DstBi: 6058 // ... 6059 // %UGEPI = gep %GEPI, (UIdx-Idx) 6060 // ... 6061 // --------------------------- 6062 // 6063 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6064 // no longer alive on them. 6065 // 6066 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6067 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6068 // not to disable further simplications and optimizations as a result of GEP 6069 // merging. 6070 // 6071 // Note this unmerging may increase the length of the data flow critical path 6072 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6073 // between the register pressure and the length of data-flow critical 6074 // path. Restricting this to the uncommon IndirectBr case would minimize the 6075 // impact of potentially longer critical path, if any, and the impact on compile 6076 // time. 6077 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6078 const TargetTransformInfo *TTI) { 6079 BasicBlock *SrcBlock = GEPI->getParent(); 6080 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6081 // (non-IndirectBr) cases exit early here. 6082 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6083 return false; 6084 // Check that GEPI is a simple gep with a single constant index. 6085 if (!GEPSequentialConstIndexed(GEPI)) 6086 return false; 6087 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6088 // Check that GEPI is a cheap one. 6089 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6090 > TargetTransformInfo::TCC_Basic) 6091 return false; 6092 Value *GEPIOp = GEPI->getOperand(0); 6093 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6094 if (!isa<Instruction>(GEPIOp)) 6095 return false; 6096 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6097 if (GEPIOpI->getParent() != SrcBlock) 6098 return false; 6099 // Check that GEP is used outside the block, meaning it's alive on the 6100 // IndirectBr edge(s). 6101 if (find_if(GEPI->users(), [&](User *Usr) { 6102 if (auto *I = dyn_cast<Instruction>(Usr)) { 6103 if (I->getParent() != SrcBlock) { 6104 return true; 6105 } 6106 } 6107 return false; 6108 }) == GEPI->users().end()) 6109 return false; 6110 // The second elements of the GEP chains to be unmerged. 6111 std::vector<GetElementPtrInst *> UGEPIs; 6112 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6113 // on IndirectBr edges. 6114 for (User *Usr : GEPIOp->users()) { 6115 if (Usr == GEPI) continue; 6116 // Check if Usr is an Instruction. If not, give up. 6117 if (!isa<Instruction>(Usr)) 6118 return false; 6119 auto *UI = cast<Instruction>(Usr); 6120 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6121 if (UI->getParent() == SrcBlock) 6122 continue; 6123 // Check if Usr is a GEP. If not, give up. 6124 if (!isa<GetElementPtrInst>(Usr)) 6125 return false; 6126 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6127 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6128 // the pointer operand to it. If so, record it in the vector. If not, give 6129 // up. 6130 if (!GEPSequentialConstIndexed(UGEPI)) 6131 return false; 6132 if (UGEPI->getOperand(0) != GEPIOp) 6133 return false; 6134 if (GEPIIdx->getType() != 6135 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6136 return false; 6137 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6138 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6139 > TargetTransformInfo::TCC_Basic) 6140 return false; 6141 UGEPIs.push_back(UGEPI); 6142 } 6143 if (UGEPIs.size() == 0) 6144 return false; 6145 // Check the materializing cost of (Uidx-Idx). 6146 for (GetElementPtrInst *UGEPI : UGEPIs) { 6147 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6148 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6149 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6150 if (ImmCost > TargetTransformInfo::TCC_Basic) 6151 return false; 6152 } 6153 // Now unmerge between GEPI and UGEPIs. 6154 for (GetElementPtrInst *UGEPI : UGEPIs) { 6155 UGEPI->setOperand(0, GEPI); 6156 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6157 Constant *NewUGEPIIdx = 6158 ConstantInt::get(GEPIIdx->getType(), 6159 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6160 UGEPI->setOperand(1, NewUGEPIIdx); 6161 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6162 // inbounds to avoid UB. 6163 if (!GEPI->isInBounds()) { 6164 UGEPI->setIsInBounds(false); 6165 } 6166 } 6167 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6168 // alive on IndirectBr edges). 6169 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6170 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6171 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6172 return true; 6173 } 6174 6175 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 6176 // Bail out if we inserted the instruction to prevent optimizations from 6177 // stepping on each other's toes. 6178 if (InsertedInsts.count(I)) 6179 return false; 6180 6181 if (PHINode *P = dyn_cast<PHINode>(I)) { 6182 // It is possible for very late stage optimizations (such as SimplifyCFG) 6183 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6184 // trivial PHI, go ahead and zap it here. 6185 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6186 P->replaceAllUsesWith(V); 6187 P->eraseFromParent(); 6188 ++NumPHIsElim; 6189 return true; 6190 } 6191 return false; 6192 } 6193 6194 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6195 // If the source of the cast is a constant, then this should have 6196 // already been constant folded. The only reason NOT to constant fold 6197 // it is if something (e.g. LSR) was careful to place the constant 6198 // evaluation in a block other than then one that uses it (e.g. to hoist 6199 // the address of globals out of a loop). If this is the case, we don't 6200 // want to forward-subst the cast. 6201 if (isa<Constant>(CI->getOperand(0))) 6202 return false; 6203 6204 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6205 return true; 6206 6207 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6208 /// Sink a zext or sext into its user blocks if the target type doesn't 6209 /// fit in one register 6210 if (TLI && 6211 TLI->getTypeAction(CI->getContext(), 6212 TLI->getValueType(*DL, CI->getType())) == 6213 TargetLowering::TypeExpandInteger) { 6214 return SinkCast(CI); 6215 } else { 6216 bool MadeChange = optimizeExt(I); 6217 return MadeChange | optimizeExtUses(I); 6218 } 6219 } 6220 return false; 6221 } 6222 6223 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6224 if (!TLI || !TLI->hasMultipleConditionRegisters()) 6225 return OptimizeCmpExpression(CI, TLI); 6226 6227 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6228 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6229 if (TLI) { 6230 bool Modified = optimizeLoadExt(LI); 6231 unsigned AS = LI->getPointerAddressSpace(); 6232 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6233 return Modified; 6234 } 6235 return false; 6236 } 6237 6238 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6239 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6240 return true; 6241 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6242 if (TLI) { 6243 unsigned AS = SI->getPointerAddressSpace(); 6244 return optimizeMemoryInst(I, SI->getOperand(1), 6245 SI->getOperand(0)->getType(), AS); 6246 } 6247 return false; 6248 } 6249 6250 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6251 unsigned AS = RMW->getPointerAddressSpace(); 6252 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6253 RMW->getType(), AS); 6254 } 6255 6256 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6257 unsigned AS = CmpX->getPointerAddressSpace(); 6258 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6259 CmpX->getCompareOperand()->getType(), AS); 6260 } 6261 6262 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6263 6264 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6265 EnableAndCmpSinking && TLI) 6266 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6267 6268 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6269 BinOp->getOpcode() == Instruction::LShr)) { 6270 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6271 if (TLI && CI && TLI->hasExtractBitsInsn()) 6272 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6273 6274 return false; 6275 } 6276 6277 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6278 if (GEPI->hasAllZeroIndices()) { 6279 /// The GEP operand must be a pointer, so must its result -> BitCast 6280 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6281 GEPI->getName(), GEPI); 6282 GEPI->replaceAllUsesWith(NC); 6283 GEPI->eraseFromParent(); 6284 ++NumGEPsElim; 6285 optimizeInst(NC, ModifiedDT); 6286 return true; 6287 } 6288 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6289 return true; 6290 } 6291 return false; 6292 } 6293 6294 if (CallInst *CI = dyn_cast<CallInst>(I)) 6295 return optimizeCallInst(CI, ModifiedDT); 6296 6297 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6298 return optimizeSelectInst(SI); 6299 6300 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6301 return optimizeShuffleVectorInst(SVI); 6302 6303 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6304 return optimizeSwitchInst(Switch); 6305 6306 if (isa<ExtractElementInst>(I)) 6307 return optimizeExtractElementInst(I); 6308 6309 return false; 6310 } 6311 6312 /// Given an OR instruction, check to see if this is a bitreverse 6313 /// idiom. If so, insert the new intrinsic and return true. 6314 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6315 const TargetLowering &TLI) { 6316 if (!I.getType()->isIntegerTy() || 6317 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6318 TLI.getValueType(DL, I.getType(), true))) 6319 return false; 6320 6321 SmallVector<Instruction*, 4> Insts; 6322 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6323 return false; 6324 Instruction *LastInst = Insts.back(); 6325 I.replaceAllUsesWith(LastInst); 6326 RecursivelyDeleteTriviallyDeadInstructions(&I); 6327 return true; 6328 } 6329 6330 // In this pass we look for GEP and cast instructions that are used 6331 // across basic blocks and rewrite them to improve basic-block-at-a-time 6332 // selection. 6333 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 6334 SunkAddrs.clear(); 6335 bool MadeChange = false; 6336 6337 CurInstIterator = BB.begin(); 6338 while (CurInstIterator != BB.end()) { 6339 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6340 if (ModifiedDT) 6341 return true; 6342 } 6343 6344 bool MadeBitReverse = true; 6345 while (TLI && MadeBitReverse) { 6346 MadeBitReverse = false; 6347 for (auto &I : reverse(BB)) { 6348 if (makeBitReverse(I, *DL, *TLI)) { 6349 MadeBitReverse = MadeChange = true; 6350 ModifiedDT = true; 6351 break; 6352 } 6353 } 6354 } 6355 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6356 6357 return MadeChange; 6358 } 6359 6360 // llvm.dbg.value is far away from the value then iSel may not be able 6361 // handle it properly. iSel will drop llvm.dbg.value if it can not 6362 // find a node corresponding to the value. 6363 bool CodeGenPrepare::placeDbgValues(Function &F) { 6364 bool MadeChange = false; 6365 for (BasicBlock &BB : F) { 6366 Instruction *PrevNonDbgInst = nullptr; 6367 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6368 Instruction *Insn = &*BI++; 6369 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6370 // Leave dbg.values that refer to an alloca alone. These 6371 // intrinsics describe the address of a variable (= the alloca) 6372 // being taken. They should not be moved next to the alloca 6373 // (and to the beginning of the scope), but rather stay close to 6374 // where said address is used. 6375 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6376 PrevNonDbgInst = Insn; 6377 continue; 6378 } 6379 6380 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 6381 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 6382 // If VI is a phi in a block with an EHPad terminator, we can't insert 6383 // after it. 6384 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 6385 continue; 6386 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 6387 DVI->removeFromParent(); 6388 if (isa<PHINode>(VI)) 6389 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 6390 else 6391 DVI->insertAfter(VI); 6392 MadeChange = true; 6393 ++NumDbgValueMoved; 6394 } 6395 } 6396 } 6397 return MadeChange; 6398 } 6399 6400 /// \brief Scale down both weights to fit into uint32_t. 6401 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 6402 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 6403 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 6404 NewTrue = NewTrue / Scale; 6405 NewFalse = NewFalse / Scale; 6406 } 6407 6408 /// \brief Some targets prefer to split a conditional branch like: 6409 /// \code 6410 /// %0 = icmp ne i32 %a, 0 6411 /// %1 = icmp ne i32 %b, 0 6412 /// %or.cond = or i1 %0, %1 6413 /// br i1 %or.cond, label %TrueBB, label %FalseBB 6414 /// \endcode 6415 /// into multiple branch instructions like: 6416 /// \code 6417 /// bb1: 6418 /// %0 = icmp ne i32 %a, 0 6419 /// br i1 %0, label %TrueBB, label %bb2 6420 /// bb2: 6421 /// %1 = icmp ne i32 %b, 0 6422 /// br i1 %1, label %TrueBB, label %FalseBB 6423 /// \endcode 6424 /// This usually allows instruction selection to do even further optimizations 6425 /// and combine the compare with the branch instruction. Currently this is 6426 /// applied for targets which have "cheap" jump instructions. 6427 /// 6428 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 6429 /// 6430 bool CodeGenPrepare::splitBranchCondition(Function &F) { 6431 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 6432 return false; 6433 6434 bool MadeChange = false; 6435 for (auto &BB : F) { 6436 // Does this BB end with the following? 6437 // %cond1 = icmp|fcmp|binary instruction ... 6438 // %cond2 = icmp|fcmp|binary instruction ... 6439 // %cond.or = or|and i1 %cond1, cond2 6440 // br i1 %cond.or label %dest1, label %dest2" 6441 BinaryOperator *LogicOp; 6442 BasicBlock *TBB, *FBB; 6443 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 6444 continue; 6445 6446 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 6447 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 6448 continue; 6449 6450 unsigned Opc; 6451 Value *Cond1, *Cond2; 6452 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 6453 m_OneUse(m_Value(Cond2))))) 6454 Opc = Instruction::And; 6455 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 6456 m_OneUse(m_Value(Cond2))))) 6457 Opc = Instruction::Or; 6458 else 6459 continue; 6460 6461 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 6462 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 6463 continue; 6464 6465 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 6466 6467 // Create a new BB. 6468 auto TmpBB = 6469 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 6470 BB.getParent(), BB.getNextNode()); 6471 6472 // Update original basic block by using the first condition directly by the 6473 // branch instruction and removing the no longer needed and/or instruction. 6474 Br1->setCondition(Cond1); 6475 LogicOp->eraseFromParent(); 6476 6477 // Depending on the conditon we have to either replace the true or the false 6478 // successor of the original branch instruction. 6479 if (Opc == Instruction::And) 6480 Br1->setSuccessor(0, TmpBB); 6481 else 6482 Br1->setSuccessor(1, TmpBB); 6483 6484 // Fill in the new basic block. 6485 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 6486 if (auto *I = dyn_cast<Instruction>(Cond2)) { 6487 I->removeFromParent(); 6488 I->insertBefore(Br2); 6489 } 6490 6491 // Update PHI nodes in both successors. The original BB needs to be 6492 // replaced in one successor's PHI nodes, because the branch comes now from 6493 // the newly generated BB (NewBB). In the other successor we need to add one 6494 // incoming edge to the PHI nodes, because both branch instructions target 6495 // now the same successor. Depending on the original branch condition 6496 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 6497 // we perform the correct update for the PHI nodes. 6498 // This doesn't change the successor order of the just created branch 6499 // instruction (or any other instruction). 6500 if (Opc == Instruction::Or) 6501 std::swap(TBB, FBB); 6502 6503 // Replace the old BB with the new BB. 6504 for (PHINode &PN : TBB->phis()) { 6505 int i; 6506 while ((i = PN.getBasicBlockIndex(&BB)) >= 0) 6507 PN.setIncomingBlock(i, TmpBB); 6508 } 6509 6510 // Add another incoming edge form the new BB. 6511 for (PHINode &PN : FBB->phis()) { 6512 auto *Val = PN.getIncomingValueForBlock(&BB); 6513 PN.addIncoming(Val, TmpBB); 6514 } 6515 6516 // Update the branch weights (from SelectionDAGBuilder:: 6517 // FindMergedConditions). 6518 if (Opc == Instruction::Or) { 6519 // Codegen X | Y as: 6520 // BB1: 6521 // jmp_if_X TBB 6522 // jmp TmpBB 6523 // TmpBB: 6524 // jmp_if_Y TBB 6525 // jmp FBB 6526 // 6527 6528 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 6529 // The requirement is that 6530 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 6531 // = TrueProb for orignal BB. 6532 // Assuming the orignal weights are A and B, one choice is to set BB1's 6533 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 6534 // assumes that 6535 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 6536 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 6537 // TmpBB, but the math is more complicated. 6538 uint64_t TrueWeight, FalseWeight; 6539 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6540 uint64_t NewTrueWeight = TrueWeight; 6541 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 6542 scaleWeights(NewTrueWeight, NewFalseWeight); 6543 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6544 .createBranchWeights(TrueWeight, FalseWeight)); 6545 6546 NewTrueWeight = TrueWeight; 6547 NewFalseWeight = 2 * FalseWeight; 6548 scaleWeights(NewTrueWeight, NewFalseWeight); 6549 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6550 .createBranchWeights(TrueWeight, FalseWeight)); 6551 } 6552 } else { 6553 // Codegen X & Y as: 6554 // BB1: 6555 // jmp_if_X TmpBB 6556 // jmp FBB 6557 // TmpBB: 6558 // jmp_if_Y TBB 6559 // jmp FBB 6560 // 6561 // This requires creation of TmpBB after CurBB. 6562 6563 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 6564 // The requirement is that 6565 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 6566 // = FalseProb for orignal BB. 6567 // Assuming the orignal weights are A and B, one choice is to set BB1's 6568 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 6569 // assumes that 6570 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 6571 uint64_t TrueWeight, FalseWeight; 6572 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6573 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 6574 uint64_t NewFalseWeight = FalseWeight; 6575 scaleWeights(NewTrueWeight, NewFalseWeight); 6576 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6577 .createBranchWeights(TrueWeight, FalseWeight)); 6578 6579 NewTrueWeight = 2 * TrueWeight; 6580 NewFalseWeight = FalseWeight; 6581 scaleWeights(NewTrueWeight, NewFalseWeight); 6582 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6583 .createBranchWeights(TrueWeight, FalseWeight)); 6584 } 6585 } 6586 6587 // Note: No point in getting fancy here, since the DT info is never 6588 // available to CodeGenPrepare. 6589 ModifiedDT = true; 6590 6591 MadeChange = true; 6592 6593 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 6594 TmpBB->dump()); 6595 } 6596 return MadeChange; 6597 } 6598