1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/BlockFrequencyInfo.h" 26 #include "llvm/Analysis/BranchProbabilityInfo.h" 27 #include "llvm/Analysis/ConstantFolding.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/ProfileSummaryInfo.h" 32 #include "llvm/Analysis/TargetLibraryInfo.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/CodeGen/Analysis.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/MachineValueType.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Argument.h" 42 #include "llvm/IR/Attributes.h" 43 #include "llvm/IR/BasicBlock.h" 44 #include "llvm/IR/CallSite.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/Dominators.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/GlobalVariable.h" 54 #include "llvm/IR/IRBuilder.h" 55 #include "llvm/IR/InlineAsm.h" 56 #include "llvm/IR/InstrTypes.h" 57 #include "llvm/IR/Instruction.h" 58 #include "llvm/IR/Instructions.h" 59 #include "llvm/IR/IntrinsicInst.h" 60 #include "llvm/IR/Intrinsics.h" 61 #include "llvm/IR/LLVMContext.h" 62 #include "llvm/IR/MDBuilder.h" 63 #include "llvm/IR/Module.h" 64 #include "llvm/IR/Operator.h" 65 #include "llvm/IR/PatternMatch.h" 66 #include "llvm/IR/Statepoint.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #include "llvm/IR/ValueMap.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/BlockFrequency.h" 75 #include "llvm/Support/BranchProbability.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/MathExtras.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Target/TargetLowering.h" 84 #include "llvm/Target/TargetMachine.h" 85 #include "llvm/Target/TargetOptions.h" 86 #include "llvm/Target/TargetSubtargetInfo.h" 87 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 88 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 89 #include "llvm/Transforms/Utils/Cloning.h" 90 #include "llvm/Transforms/Utils/Local.h" 91 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 92 #include "llvm/Transforms/Utils/ValueMapper.h" 93 #include <algorithm> 94 #include <cassert> 95 #include <cstdint> 96 #include <iterator> 97 #include <limits> 98 #include <memory> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 using namespace llvm::PatternMatch; 104 105 #define DEBUG_TYPE "codegenprepare" 106 107 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 108 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 109 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 110 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 111 "sunken Cmps"); 112 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 113 "of sunken Casts"); 114 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 115 "computations were sunk"); 116 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 117 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 118 STATISTIC(NumAndsAdded, 119 "Number of and mask instructions added to form ext loads"); 120 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 121 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 122 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 123 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 124 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 125 126 STATISTIC(NumMemCmpCalls, "Number of memcmp calls"); 127 STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size"); 128 STATISTIC(NumMemCmpGreaterThanMax, 129 "Number of memcmp calls with size greater than max size"); 130 STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls"); 131 132 static cl::opt<bool> DisableBranchOpts( 133 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 134 cl::desc("Disable branch optimizations in CodeGenPrepare")); 135 136 static cl::opt<bool> 137 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 138 cl::desc("Disable GC optimizations in CodeGenPrepare")); 139 140 static cl::opt<bool> DisableSelectToBranch( 141 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 142 cl::desc("Disable select to branch conversion.")); 143 144 static cl::opt<bool> AddrSinkUsingGEPs( 145 "addr-sink-using-gep", cl::Hidden, cl::init(true), 146 cl::desc("Address sinking in CGP using GEPs.")); 147 148 static cl::opt<bool> EnableAndCmpSinking( 149 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 150 cl::desc("Enable sinkinig and/cmp into branches.")); 151 152 static cl::opt<bool> DisableStoreExtract( 153 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 154 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 155 156 static cl::opt<bool> StressStoreExtract( 157 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 158 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 159 160 static cl::opt<bool> DisableExtLdPromotion( 161 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 162 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 163 "CodeGenPrepare")); 164 165 static cl::opt<bool> StressExtLdPromotion( 166 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 167 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 168 "optimization in CodeGenPrepare")); 169 170 static cl::opt<bool> DisablePreheaderProtect( 171 "disable-preheader-prot", cl::Hidden, cl::init(false), 172 cl::desc("Disable protection against removing loop preheaders")); 173 174 static cl::opt<bool> ProfileGuidedSectionPrefix( 175 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 176 cl::desc("Use profile info to add section prefix for hot/cold functions")); 177 178 static cl::opt<unsigned> FreqRatioToSkipMerge( 179 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 180 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 181 "(frequency of destination block) is greater than this ratio")); 182 183 static cl::opt<bool> ForceSplitStore( 184 "force-split-store", cl::Hidden, cl::init(false), 185 cl::desc("Force store splitting no matter what the target query says.")); 186 187 static cl::opt<bool> 188 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 189 cl::desc("Enable merging of redundant sexts when one is dominating" 190 " the other."), cl::init(true)); 191 192 static cl::opt<unsigned> MemCmpNumLoadsPerBlock( 193 "memcmp-num-loads-per-block", cl::Hidden, cl::init(1), 194 cl::desc("The number of loads per basic block for inline expansion of " 195 "memcmp that is only being compared against zero.")); 196 197 namespace { 198 199 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 200 using TypeIsSExt = PointerIntPair<Type *, 1, bool>; 201 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 202 using SExts = SmallVector<Instruction *, 16>; 203 using ValueToSExts = DenseMap<Value *, SExts>; 204 205 class TypePromotionTransaction; 206 207 class CodeGenPrepare : public FunctionPass { 208 const TargetMachine *TM = nullptr; 209 const TargetSubtargetInfo *SubtargetInfo; 210 const TargetLowering *TLI = nullptr; 211 const TargetRegisterInfo *TRI; 212 const TargetTransformInfo *TTI = nullptr; 213 const TargetLibraryInfo *TLInfo; 214 const LoopInfo *LI; 215 std::unique_ptr<BlockFrequencyInfo> BFI; 216 std::unique_ptr<BranchProbabilityInfo> BPI; 217 218 /// As we scan instructions optimizing them, this is the next instruction 219 /// to optimize. Transforms that can invalidate this should update it. 220 BasicBlock::iterator CurInstIterator; 221 222 /// Keeps track of non-local addresses that have been sunk into a block. 223 /// This allows us to avoid inserting duplicate code for blocks with 224 /// multiple load/stores of the same address. 225 ValueMap<Value*, Value*> SunkAddrs; 226 227 /// Keeps track of all instructions inserted for the current function. 228 SetOfInstrs InsertedInsts; 229 230 /// Keeps track of the type of the related instruction before their 231 /// promotion for the current function. 232 InstrToOrigTy PromotedInsts; 233 234 /// Keep track of instructions removed during promotion. 235 SetOfInstrs RemovedInsts; 236 237 /// Keep track of sext chains based on their initial value. 238 DenseMap<Value *, Instruction *> SeenChainsForSExt; 239 240 /// Keep track of SExt promoted. 241 ValueToSExts ValToSExtendedUses; 242 243 /// True if CFG is modified in any way. 244 bool ModifiedDT; 245 246 /// True if optimizing for size. 247 bool OptSize; 248 249 /// DataLayout for the Function being processed. 250 const DataLayout *DL = nullptr; 251 252 public: 253 static char ID; // Pass identification, replacement for typeid 254 255 CodeGenPrepare() : FunctionPass(ID) { 256 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 257 } 258 259 bool runOnFunction(Function &F) override; 260 261 StringRef getPassName() const override { return "CodeGen Prepare"; } 262 263 void getAnalysisUsage(AnalysisUsage &AU) const override { 264 // FIXME: When we can selectively preserve passes, preserve the domtree. 265 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 266 AU.addRequired<TargetLibraryInfoWrapperPass>(); 267 AU.addRequired<TargetTransformInfoWrapperPass>(); 268 AU.addRequired<LoopInfoWrapperPass>(); 269 } 270 271 private: 272 bool eliminateFallThrough(Function &F); 273 bool eliminateMostlyEmptyBlocks(Function &F); 274 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 275 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 276 void eliminateMostlyEmptyBlock(BasicBlock *BB); 277 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 278 bool isPreheader); 279 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 280 bool optimizeInst(Instruction *I, bool &ModifiedDT); 281 bool optimizeMemoryInst(Instruction *I, Value *Addr, 282 Type *AccessTy, unsigned AS); 283 bool optimizeInlineAsmInst(CallInst *CS); 284 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 285 bool optimizeExt(Instruction *&I); 286 bool optimizeExtUses(Instruction *I); 287 bool optimizeLoadExt(LoadInst *I); 288 bool optimizeSelectInst(SelectInst *SI); 289 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 290 bool optimizeSwitchInst(SwitchInst *CI); 291 bool optimizeExtractElementInst(Instruction *Inst); 292 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 293 bool placeDbgValues(Function &F); 294 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 295 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 296 bool tryToPromoteExts(TypePromotionTransaction &TPT, 297 const SmallVectorImpl<Instruction *> &Exts, 298 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 299 unsigned CreatedInstsCost = 0); 300 bool mergeSExts(Function &F); 301 bool performAddressTypePromotion( 302 Instruction *&Inst, 303 bool AllowPromotionWithoutCommonHeader, 304 bool HasPromoted, TypePromotionTransaction &TPT, 305 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 306 bool splitBranchCondition(Function &F); 307 bool simplifyOffsetableRelocate(Instruction &I); 308 bool splitIndirectCriticalEdges(Function &F); 309 }; 310 311 } // end anonymous namespace 312 313 char CodeGenPrepare::ID = 0; 314 315 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 316 "Optimize for code generation", false, false) 317 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 318 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 319 "Optimize for code generation", false, false) 320 321 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 322 323 bool CodeGenPrepare::runOnFunction(Function &F) { 324 if (skipFunction(F)) 325 return false; 326 327 DL = &F.getParent()->getDataLayout(); 328 329 bool EverMadeChange = false; 330 // Clear per function information. 331 InsertedInsts.clear(); 332 PromotedInsts.clear(); 333 BFI.reset(); 334 BPI.reset(); 335 336 ModifiedDT = false; 337 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 338 TM = &TPC->getTM<TargetMachine>(); 339 SubtargetInfo = TM->getSubtargetImpl(F); 340 TLI = SubtargetInfo->getTargetLowering(); 341 TRI = SubtargetInfo->getRegisterInfo(); 342 } 343 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 344 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 345 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 346 OptSize = F.optForSize(); 347 348 if (ProfileGuidedSectionPrefix) { 349 ProfileSummaryInfo *PSI = 350 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 351 if (PSI->isFunctionHotInCallGraph(&F)) 352 F.setSectionPrefix(".hot"); 353 else if (PSI->isFunctionColdInCallGraph(&F)) 354 F.setSectionPrefix(".unlikely"); 355 } 356 357 /// This optimization identifies DIV instructions that can be 358 /// profitably bypassed and carried out with a shorter, faster divide. 359 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 360 const DenseMap<unsigned int, unsigned int> &BypassWidths = 361 TLI->getBypassSlowDivWidths(); 362 BasicBlock* BB = &*F.begin(); 363 while (BB != nullptr) { 364 // bypassSlowDivision may create new BBs, but we don't want to reapply the 365 // optimization to those blocks. 366 BasicBlock* Next = BB->getNextNode(); 367 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 368 BB = Next; 369 } 370 } 371 372 // Eliminate blocks that contain only PHI nodes and an 373 // unconditional branch. 374 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 375 376 // llvm.dbg.value is far away from the value then iSel may not be able 377 // handle it properly. iSel will drop llvm.dbg.value if it can not 378 // find a node corresponding to the value. 379 EverMadeChange |= placeDbgValues(F); 380 381 if (!DisableBranchOpts) 382 EverMadeChange |= splitBranchCondition(F); 383 384 // Split some critical edges where one of the sources is an indirect branch, 385 // to help generate sane code for PHIs involving such edges. 386 EverMadeChange |= splitIndirectCriticalEdges(F); 387 388 bool MadeChange = true; 389 while (MadeChange) { 390 MadeChange = false; 391 SeenChainsForSExt.clear(); 392 ValToSExtendedUses.clear(); 393 RemovedInsts.clear(); 394 for (Function::iterator I = F.begin(); I != F.end(); ) { 395 BasicBlock *BB = &*I++; 396 bool ModifiedDTOnIteration = false; 397 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 398 399 // Restart BB iteration if the dominator tree of the Function was changed 400 if (ModifiedDTOnIteration) 401 break; 402 } 403 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 404 MadeChange |= mergeSExts(F); 405 406 // Really free removed instructions during promotion. 407 for (Instruction *I : RemovedInsts) 408 I->deleteValue(); 409 410 EverMadeChange |= MadeChange; 411 } 412 413 SunkAddrs.clear(); 414 415 if (!DisableBranchOpts) { 416 MadeChange = false; 417 SmallPtrSet<BasicBlock*, 8> WorkList; 418 for (BasicBlock &BB : F) { 419 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 420 MadeChange |= ConstantFoldTerminator(&BB, true); 421 if (!MadeChange) continue; 422 423 for (SmallVectorImpl<BasicBlock*>::iterator 424 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 425 if (pred_begin(*II) == pred_end(*II)) 426 WorkList.insert(*II); 427 } 428 429 // Delete the dead blocks and any of their dead successors. 430 MadeChange |= !WorkList.empty(); 431 while (!WorkList.empty()) { 432 BasicBlock *BB = *WorkList.begin(); 433 WorkList.erase(BB); 434 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 435 436 DeleteDeadBlock(BB); 437 438 for (SmallVectorImpl<BasicBlock*>::iterator 439 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 440 if (pred_begin(*II) == pred_end(*II)) 441 WorkList.insert(*II); 442 } 443 444 // Merge pairs of basic blocks with unconditional branches, connected by 445 // a single edge. 446 if (EverMadeChange || MadeChange) 447 MadeChange |= eliminateFallThrough(F); 448 449 EverMadeChange |= MadeChange; 450 } 451 452 if (!DisableGCOpts) { 453 SmallVector<Instruction *, 2> Statepoints; 454 for (BasicBlock &BB : F) 455 for (Instruction &I : BB) 456 if (isStatepoint(I)) 457 Statepoints.push_back(&I); 458 for (auto &I : Statepoints) 459 EverMadeChange |= simplifyOffsetableRelocate(*I); 460 } 461 462 return EverMadeChange; 463 } 464 465 /// Merge basic blocks which are connected by a single edge, where one of the 466 /// basic blocks has a single successor pointing to the other basic block, 467 /// which has a single predecessor. 468 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 469 bool Changed = false; 470 // Scan all of the blocks in the function, except for the entry block. 471 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 472 BasicBlock *BB = &*I++; 473 // If the destination block has a single pred, then this is a trivial 474 // edge, just collapse it. 475 BasicBlock *SinglePred = BB->getSinglePredecessor(); 476 477 // Don't merge if BB's address is taken. 478 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 479 480 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 481 if (Term && !Term->isConditional()) { 482 Changed = true; 483 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 484 // Remember if SinglePred was the entry block of the function. 485 // If so, we will need to move BB back to the entry position. 486 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 487 MergeBasicBlockIntoOnlyPred(BB, nullptr); 488 489 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 490 BB->moveBefore(&BB->getParent()->getEntryBlock()); 491 492 // We have erased a block. Update the iterator. 493 I = BB->getIterator(); 494 } 495 } 496 return Changed; 497 } 498 499 /// Find a destination block from BB if BB is mergeable empty block. 500 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 501 // If this block doesn't end with an uncond branch, ignore it. 502 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 503 if (!BI || !BI->isUnconditional()) 504 return nullptr; 505 506 // If the instruction before the branch (skipping debug info) isn't a phi 507 // node, then other stuff is happening here. 508 BasicBlock::iterator BBI = BI->getIterator(); 509 if (BBI != BB->begin()) { 510 --BBI; 511 while (isa<DbgInfoIntrinsic>(BBI)) { 512 if (BBI == BB->begin()) 513 break; 514 --BBI; 515 } 516 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 517 return nullptr; 518 } 519 520 // Do not break infinite loops. 521 BasicBlock *DestBB = BI->getSuccessor(0); 522 if (DestBB == BB) 523 return nullptr; 524 525 if (!canMergeBlocks(BB, DestBB)) 526 DestBB = nullptr; 527 528 return DestBB; 529 } 530 531 // Return the unique indirectbr predecessor of a block. This may return null 532 // even if such a predecessor exists, if it's not useful for splitting. 533 // If a predecessor is found, OtherPreds will contain all other (non-indirectbr) 534 // predecessors of BB. 535 static BasicBlock * 536 findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) { 537 // If the block doesn't have any PHIs, we don't care about it, since there's 538 // no point in splitting it. 539 PHINode *PN = dyn_cast<PHINode>(BB->begin()); 540 if (!PN) 541 return nullptr; 542 543 // Verify we have exactly one IBR predecessor. 544 // Conservatively bail out if one of the other predecessors is not a "regular" 545 // terminator (that is, not a switch or a br). 546 BasicBlock *IBB = nullptr; 547 for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) { 548 BasicBlock *PredBB = PN->getIncomingBlock(Pred); 549 TerminatorInst *PredTerm = PredBB->getTerminator(); 550 switch (PredTerm->getOpcode()) { 551 case Instruction::IndirectBr: 552 if (IBB) 553 return nullptr; 554 IBB = PredBB; 555 break; 556 case Instruction::Br: 557 case Instruction::Switch: 558 OtherPreds.push_back(PredBB); 559 continue; 560 default: 561 return nullptr; 562 } 563 } 564 565 return IBB; 566 } 567 568 // Split critical edges where the source of the edge is an indirectbr 569 // instruction. This isn't always possible, but we can handle some easy cases. 570 // This is useful because MI is unable to split such critical edges, 571 // which means it will not be able to sink instructions along those edges. 572 // This is especially painful for indirect branches with many successors, where 573 // we end up having to prepare all outgoing values in the origin block. 574 // 575 // Our normal algorithm for splitting critical edges requires us to update 576 // the outgoing edges of the edge origin block, but for an indirectbr this 577 // is hard, since it would require finding and updating the block addresses 578 // the indirect branch uses. But if a block only has a single indirectbr 579 // predecessor, with the others being regular branches, we can do it in a 580 // different way. 581 // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr. 582 // We can split D into D0 and D1, where D0 contains only the PHIs from D, 583 // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and 584 // create the following structure: 585 // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1 586 bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { 587 // Check whether the function has any indirectbrs, and collect which blocks 588 // they may jump to. Since most functions don't have indirect branches, 589 // this lowers the common case's overhead to O(Blocks) instead of O(Edges). 590 SmallSetVector<BasicBlock *, 16> Targets; 591 for (auto &BB : F) { 592 auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator()); 593 if (!IBI) 594 continue; 595 596 for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ) 597 Targets.insert(IBI->getSuccessor(Succ)); 598 } 599 600 if (Targets.empty()) 601 return false; 602 603 bool Changed = false; 604 for (BasicBlock *Target : Targets) { 605 SmallVector<BasicBlock *, 16> OtherPreds; 606 BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds); 607 // If we did not found an indirectbr, or the indirectbr is the only 608 // incoming edge, this isn't the kind of edge we're looking for. 609 if (!IBRPred || OtherPreds.empty()) 610 continue; 611 612 // Don't even think about ehpads/landingpads. 613 Instruction *FirstNonPHI = Target->getFirstNonPHI(); 614 if (FirstNonPHI->isEHPad() || Target->isLandingPad()) 615 continue; 616 617 BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split"); 618 // It's possible Target was its own successor through an indirectbr. 619 // In this case, the indirectbr now comes from BodyBlock. 620 if (IBRPred == Target) 621 IBRPred = BodyBlock; 622 623 // At this point Target only has PHIs, and BodyBlock has the rest of the 624 // block's body. Create a copy of Target that will be used by the "direct" 625 // preds. 626 ValueToValueMapTy VMap; 627 BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); 628 629 for (BasicBlock *Pred : OtherPreds) { 630 // If the target is a loop to itself, then the terminator of the split 631 // block needs to be updated. 632 if (Pred == Target) 633 BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 634 else 635 Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 636 } 637 638 // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that 639 // they are clones, so the number of PHIs are the same. 640 // (a) Remove the edge coming from IBRPred from the "Direct" PHI 641 // (b) Leave that as the only edge in the "Indirect" PHI. 642 // (c) Merge the two in the body block. 643 BasicBlock::iterator Indirect = Target->begin(), 644 End = Target->getFirstNonPHI()->getIterator(); 645 BasicBlock::iterator Direct = DirectSucc->begin(); 646 BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt(); 647 648 assert(&*End == Target->getTerminator() && 649 "Block was expected to only contain PHIs"); 650 651 while (Indirect != End) { 652 PHINode *DirPHI = cast<PHINode>(Direct); 653 PHINode *IndPHI = cast<PHINode>(Indirect); 654 655 // Now, clean up - the direct block shouldn't get the indirect value, 656 // and vice versa. 657 DirPHI->removeIncomingValue(IBRPred); 658 Direct++; 659 660 // Advance the pointer here, to avoid invalidation issues when the old 661 // PHI is erased. 662 Indirect++; 663 664 PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI); 665 NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred), 666 IBRPred); 667 668 // Create a PHI in the body block, to merge the direct and indirect 669 // predecessors. 670 PHINode *MergePHI = 671 PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert); 672 MergePHI->addIncoming(NewIndPHI, Target); 673 MergePHI->addIncoming(DirPHI, DirectSucc); 674 675 IndPHI->replaceAllUsesWith(MergePHI); 676 IndPHI->eraseFromParent(); 677 } 678 679 Changed = true; 680 } 681 682 return Changed; 683 } 684 685 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 686 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 687 /// edges in ways that are non-optimal for isel. Start by eliminating these 688 /// blocks so we can split them the way we want them. 689 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 690 SmallPtrSet<BasicBlock *, 16> Preheaders; 691 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 692 while (!LoopList.empty()) { 693 Loop *L = LoopList.pop_back_val(); 694 LoopList.insert(LoopList.end(), L->begin(), L->end()); 695 if (BasicBlock *Preheader = L->getLoopPreheader()) 696 Preheaders.insert(Preheader); 697 } 698 699 bool MadeChange = false; 700 // Note that this intentionally skips the entry block. 701 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 702 BasicBlock *BB = &*I++; 703 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 704 if (!DestBB || 705 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 706 continue; 707 708 eliminateMostlyEmptyBlock(BB); 709 MadeChange = true; 710 } 711 return MadeChange; 712 } 713 714 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 715 BasicBlock *DestBB, 716 bool isPreheader) { 717 // Do not delete loop preheaders if doing so would create a critical edge. 718 // Loop preheaders can be good locations to spill registers. If the 719 // preheader is deleted and we create a critical edge, registers may be 720 // spilled in the loop body instead. 721 if (!DisablePreheaderProtect && isPreheader && 722 !(BB->getSinglePredecessor() && 723 BB->getSinglePredecessor()->getSingleSuccessor())) 724 return false; 725 726 // Try to skip merging if the unique predecessor of BB is terminated by a 727 // switch or indirect branch instruction, and BB is used as an incoming block 728 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 729 // add COPY instructions in the predecessor of BB instead of BB (if it is not 730 // merged). Note that the critical edge created by merging such blocks wont be 731 // split in MachineSink because the jump table is not analyzable. By keeping 732 // such empty block (BB), ISel will place COPY instructions in BB, not in the 733 // predecessor of BB. 734 BasicBlock *Pred = BB->getUniquePredecessor(); 735 if (!Pred || 736 !(isa<SwitchInst>(Pred->getTerminator()) || 737 isa<IndirectBrInst>(Pred->getTerminator()))) 738 return true; 739 740 if (BB->getTerminator() != BB->getFirstNonPHI()) 741 return true; 742 743 // We use a simple cost heuristic which determine skipping merging is 744 // profitable if the cost of skipping merging is less than the cost of 745 // merging : Cost(skipping merging) < Cost(merging BB), where the 746 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 747 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 748 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 749 // Freq(Pred) / Freq(BB) > 2. 750 // Note that if there are multiple empty blocks sharing the same incoming 751 // value for the PHIs in the DestBB, we consider them together. In such 752 // case, Cost(merging BB) will be the sum of their frequencies. 753 754 if (!isa<PHINode>(DestBB->begin())) 755 return true; 756 757 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 758 759 // Find all other incoming blocks from which incoming values of all PHIs in 760 // DestBB are the same as the ones from BB. 761 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 762 ++PI) { 763 BasicBlock *DestBBPred = *PI; 764 if (DestBBPred == BB) 765 continue; 766 767 bool HasAllSameValue = true; 768 BasicBlock::const_iterator DestBBI = DestBB->begin(); 769 while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) { 770 if (DestPN->getIncomingValueForBlock(BB) != 771 DestPN->getIncomingValueForBlock(DestBBPred)) { 772 HasAllSameValue = false; 773 break; 774 } 775 } 776 if (HasAllSameValue) 777 SameIncomingValueBBs.insert(DestBBPred); 778 } 779 780 // See if all BB's incoming values are same as the value from Pred. In this 781 // case, no reason to skip merging because COPYs are expected to be place in 782 // Pred already. 783 if (SameIncomingValueBBs.count(Pred)) 784 return true; 785 786 if (!BFI) { 787 Function &F = *BB->getParent(); 788 LoopInfo LI{DominatorTree(F)}; 789 BPI.reset(new BranchProbabilityInfo(F, LI)); 790 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 791 } 792 793 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 794 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 795 796 for (auto SameValueBB : SameIncomingValueBBs) 797 if (SameValueBB->getUniquePredecessor() == Pred && 798 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 799 BBFreq += BFI->getBlockFreq(SameValueBB); 800 801 return PredFreq.getFrequency() <= 802 BBFreq.getFrequency() * FreqRatioToSkipMerge; 803 } 804 805 /// Return true if we can merge BB into DestBB if there is a single 806 /// unconditional branch between them, and BB contains no other non-phi 807 /// instructions. 808 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 809 const BasicBlock *DestBB) const { 810 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 811 // the successor. If there are more complex condition (e.g. preheaders), 812 // don't mess around with them. 813 BasicBlock::const_iterator BBI = BB->begin(); 814 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 815 for (const User *U : PN->users()) { 816 const Instruction *UI = cast<Instruction>(U); 817 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 818 return false; 819 // If User is inside DestBB block and it is a PHINode then check 820 // incoming value. If incoming value is not from BB then this is 821 // a complex condition (e.g. preheaders) we want to avoid here. 822 if (UI->getParent() == DestBB) { 823 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 824 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 825 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 826 if (Insn && Insn->getParent() == BB && 827 Insn->getParent() != UPN->getIncomingBlock(I)) 828 return false; 829 } 830 } 831 } 832 } 833 834 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 835 // and DestBB may have conflicting incoming values for the block. If so, we 836 // can't merge the block. 837 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 838 if (!DestBBPN) return true; // no conflict. 839 840 // Collect the preds of BB. 841 SmallPtrSet<const BasicBlock*, 16> BBPreds; 842 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 843 // It is faster to get preds from a PHI than with pred_iterator. 844 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 845 BBPreds.insert(BBPN->getIncomingBlock(i)); 846 } else { 847 BBPreds.insert(pred_begin(BB), pred_end(BB)); 848 } 849 850 // Walk the preds of DestBB. 851 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 852 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 853 if (BBPreds.count(Pred)) { // Common predecessor? 854 BBI = DestBB->begin(); 855 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 856 const Value *V1 = PN->getIncomingValueForBlock(Pred); 857 const Value *V2 = PN->getIncomingValueForBlock(BB); 858 859 // If V2 is a phi node in BB, look up what the mapped value will be. 860 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 861 if (V2PN->getParent() == BB) 862 V2 = V2PN->getIncomingValueForBlock(Pred); 863 864 // If there is a conflict, bail out. 865 if (V1 != V2) return false; 866 } 867 } 868 } 869 870 return true; 871 } 872 873 /// Eliminate a basic block that has only phi's and an unconditional branch in 874 /// it. 875 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 876 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 877 BasicBlock *DestBB = BI->getSuccessor(0); 878 879 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 880 881 // If the destination block has a single pred, then this is a trivial edge, 882 // just collapse it. 883 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 884 if (SinglePred != DestBB) { 885 // Remember if SinglePred was the entry block of the function. If so, we 886 // will need to move BB back to the entry position. 887 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 888 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 889 890 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 891 BB->moveBefore(&BB->getParent()->getEntryBlock()); 892 893 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 894 return; 895 } 896 } 897 898 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 899 // to handle the new incoming edges it is about to have. 900 PHINode *PN; 901 for (BasicBlock::iterator BBI = DestBB->begin(); 902 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 903 // Remove the incoming value for BB, and remember it. 904 Value *InVal = PN->removeIncomingValue(BB, false); 905 906 // Two options: either the InVal is a phi node defined in BB or it is some 907 // value that dominates BB. 908 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 909 if (InValPhi && InValPhi->getParent() == BB) { 910 // Add all of the input values of the input PHI as inputs of this phi. 911 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 912 PN->addIncoming(InValPhi->getIncomingValue(i), 913 InValPhi->getIncomingBlock(i)); 914 } else { 915 // Otherwise, add one instance of the dominating value for each edge that 916 // we will be adding. 917 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 918 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 919 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 920 } else { 921 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 922 PN->addIncoming(InVal, *PI); 923 } 924 } 925 } 926 927 // The PHIs are now updated, change everything that refers to BB to use 928 // DestBB and remove BB. 929 BB->replaceAllUsesWith(DestBB); 930 BB->eraseFromParent(); 931 ++NumBlocksElim; 932 933 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 934 } 935 936 // Computes a map of base pointer relocation instructions to corresponding 937 // derived pointer relocation instructions given a vector of all relocate calls 938 static void computeBaseDerivedRelocateMap( 939 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 940 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 941 &RelocateInstMap) { 942 // Collect information in two maps: one primarily for locating the base object 943 // while filling the second map; the second map is the final structure holding 944 // a mapping between Base and corresponding Derived relocate calls 945 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 946 for (auto *ThisRelocate : AllRelocateCalls) { 947 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 948 ThisRelocate->getDerivedPtrIndex()); 949 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 950 } 951 for (auto &Item : RelocateIdxMap) { 952 std::pair<unsigned, unsigned> Key = Item.first; 953 if (Key.first == Key.second) 954 // Base relocation: nothing to insert 955 continue; 956 957 GCRelocateInst *I = Item.second; 958 auto BaseKey = std::make_pair(Key.first, Key.first); 959 960 // We're iterating over RelocateIdxMap so we cannot modify it. 961 auto MaybeBase = RelocateIdxMap.find(BaseKey); 962 if (MaybeBase == RelocateIdxMap.end()) 963 // TODO: We might want to insert a new base object relocate and gep off 964 // that, if there are enough derived object relocates. 965 continue; 966 967 RelocateInstMap[MaybeBase->second].push_back(I); 968 } 969 } 970 971 // Accepts a GEP and extracts the operands into a vector provided they're all 972 // small integer constants 973 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 974 SmallVectorImpl<Value *> &OffsetV) { 975 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 976 // Only accept small constant integer operands 977 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 978 if (!Op || Op->getZExtValue() > 20) 979 return false; 980 } 981 982 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 983 OffsetV.push_back(GEP->getOperand(i)); 984 return true; 985 } 986 987 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 988 // replace, computes a replacement, and affects it. 989 static bool 990 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 991 const SmallVectorImpl<GCRelocateInst *> &Targets) { 992 bool MadeChange = false; 993 // We must ensure the relocation of derived pointer is defined after 994 // relocation of base pointer. If we find a relocation corresponding to base 995 // defined earlier than relocation of base then we move relocation of base 996 // right before found relocation. We consider only relocation in the same 997 // basic block as relocation of base. Relocations from other basic block will 998 // be skipped by optimization and we do not care about them. 999 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 1000 &*R != RelocatedBase; ++R) 1001 if (auto RI = dyn_cast<GCRelocateInst>(R)) 1002 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 1003 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 1004 RelocatedBase->moveBefore(RI); 1005 break; 1006 } 1007 1008 for (GCRelocateInst *ToReplace : Targets) { 1009 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 1010 "Not relocating a derived object of the original base object"); 1011 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 1012 // A duplicate relocate call. TODO: coalesce duplicates. 1013 continue; 1014 } 1015 1016 if (RelocatedBase->getParent() != ToReplace->getParent()) { 1017 // Base and derived relocates are in different basic blocks. 1018 // In this case transform is only valid when base dominates derived 1019 // relocate. However it would be too expensive to check dominance 1020 // for each such relocate, so we skip the whole transformation. 1021 continue; 1022 } 1023 1024 Value *Base = ToReplace->getBasePtr(); 1025 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 1026 if (!Derived || Derived->getPointerOperand() != Base) 1027 continue; 1028 1029 SmallVector<Value *, 2> OffsetV; 1030 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 1031 continue; 1032 1033 // Create a Builder and replace the target callsite with a gep 1034 assert(RelocatedBase->getNextNode() && 1035 "Should always have one since it's not a terminator"); 1036 1037 // Insert after RelocatedBase 1038 IRBuilder<> Builder(RelocatedBase->getNextNode()); 1039 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 1040 1041 // If gc_relocate does not match the actual type, cast it to the right type. 1042 // In theory, there must be a bitcast after gc_relocate if the type does not 1043 // match, and we should reuse it to get the derived pointer. But it could be 1044 // cases like this: 1045 // bb1: 1046 // ... 1047 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1048 // br label %merge 1049 // 1050 // bb2: 1051 // ... 1052 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1053 // br label %merge 1054 // 1055 // merge: 1056 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 1057 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 1058 // 1059 // In this case, we can not find the bitcast any more. So we insert a new bitcast 1060 // no matter there is already one or not. In this way, we can handle all cases, and 1061 // the extra bitcast should be optimized away in later passes. 1062 Value *ActualRelocatedBase = RelocatedBase; 1063 if (RelocatedBase->getType() != Base->getType()) { 1064 ActualRelocatedBase = 1065 Builder.CreateBitCast(RelocatedBase, Base->getType()); 1066 } 1067 Value *Replacement = Builder.CreateGEP( 1068 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 1069 Replacement->takeName(ToReplace); 1070 // If the newly generated derived pointer's type does not match the original derived 1071 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1072 Value *ActualReplacement = Replacement; 1073 if (Replacement->getType() != ToReplace->getType()) { 1074 ActualReplacement = 1075 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1076 } 1077 ToReplace->replaceAllUsesWith(ActualReplacement); 1078 ToReplace->eraseFromParent(); 1079 1080 MadeChange = true; 1081 } 1082 return MadeChange; 1083 } 1084 1085 // Turns this: 1086 // 1087 // %base = ... 1088 // %ptr = gep %base + 15 1089 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1090 // %base' = relocate(%tok, i32 4, i32 4) 1091 // %ptr' = relocate(%tok, i32 4, i32 5) 1092 // %val = load %ptr' 1093 // 1094 // into this: 1095 // 1096 // %base = ... 1097 // %ptr = gep %base + 15 1098 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1099 // %base' = gc.relocate(%tok, i32 4, i32 4) 1100 // %ptr' = gep %base' + 15 1101 // %val = load %ptr' 1102 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1103 bool MadeChange = false; 1104 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1105 1106 for (auto *U : I.users()) 1107 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1108 // Collect all the relocate calls associated with a statepoint 1109 AllRelocateCalls.push_back(Relocate); 1110 1111 // We need atleast one base pointer relocation + one derived pointer 1112 // relocation to mangle 1113 if (AllRelocateCalls.size() < 2) 1114 return false; 1115 1116 // RelocateInstMap is a mapping from the base relocate instruction to the 1117 // corresponding derived relocate instructions 1118 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1119 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1120 if (RelocateInstMap.empty()) 1121 return false; 1122 1123 for (auto &Item : RelocateInstMap) 1124 // Item.first is the RelocatedBase to offset against 1125 // Item.second is the vector of Targets to replace 1126 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1127 return MadeChange; 1128 } 1129 1130 /// SinkCast - Sink the specified cast instruction into its user blocks 1131 static bool SinkCast(CastInst *CI) { 1132 BasicBlock *DefBB = CI->getParent(); 1133 1134 /// InsertedCasts - Only insert a cast in each block once. 1135 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1136 1137 bool MadeChange = false; 1138 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1139 UI != E; ) { 1140 Use &TheUse = UI.getUse(); 1141 Instruction *User = cast<Instruction>(*UI); 1142 1143 // Figure out which BB this cast is used in. For PHI's this is the 1144 // appropriate predecessor block. 1145 BasicBlock *UserBB = User->getParent(); 1146 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1147 UserBB = PN->getIncomingBlock(TheUse); 1148 } 1149 1150 // Preincrement use iterator so we don't invalidate it. 1151 ++UI; 1152 1153 // The first insertion point of a block containing an EH pad is after the 1154 // pad. If the pad is the user, we cannot sink the cast past the pad. 1155 if (User->isEHPad()) 1156 continue; 1157 1158 // If the block selected to receive the cast is an EH pad that does not 1159 // allow non-PHI instructions before the terminator, we can't sink the 1160 // cast. 1161 if (UserBB->getTerminator()->isEHPad()) 1162 continue; 1163 1164 // If this user is in the same block as the cast, don't change the cast. 1165 if (UserBB == DefBB) continue; 1166 1167 // If we have already inserted a cast into this block, use it. 1168 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1169 1170 if (!InsertedCast) { 1171 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1172 assert(InsertPt != UserBB->end()); 1173 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1174 CI->getType(), "", &*InsertPt); 1175 } 1176 1177 // Replace a use of the cast with a use of the new cast. 1178 TheUse = InsertedCast; 1179 MadeChange = true; 1180 ++NumCastUses; 1181 } 1182 1183 // If we removed all uses, nuke the cast. 1184 if (CI->use_empty()) { 1185 CI->eraseFromParent(); 1186 MadeChange = true; 1187 } 1188 1189 return MadeChange; 1190 } 1191 1192 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1193 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1194 /// reduce the number of virtual registers that must be created and coalesced. 1195 /// 1196 /// Return true if any changes are made. 1197 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1198 const DataLayout &DL) { 1199 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1200 // than sinking only nop casts, but is helpful on some platforms. 1201 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1202 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1203 ASC->getDestAddressSpace())) 1204 return false; 1205 } 1206 1207 // If this is a noop copy, 1208 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1209 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1210 1211 // This is an fp<->int conversion? 1212 if (SrcVT.isInteger() != DstVT.isInteger()) 1213 return false; 1214 1215 // If this is an extension, it will be a zero or sign extension, which 1216 // isn't a noop. 1217 if (SrcVT.bitsLT(DstVT)) return false; 1218 1219 // If these values will be promoted, find out what they will be promoted 1220 // to. This helps us consider truncates on PPC as noop copies when they 1221 // are. 1222 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1223 TargetLowering::TypePromoteInteger) 1224 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1225 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1226 TargetLowering::TypePromoteInteger) 1227 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1228 1229 // If, after promotion, these are the same types, this is a noop copy. 1230 if (SrcVT != DstVT) 1231 return false; 1232 1233 return SinkCast(CI); 1234 } 1235 1236 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 1237 /// possible. 1238 /// 1239 /// Return true if any changes were made. 1240 static bool CombineUAddWithOverflow(CmpInst *CI) { 1241 Value *A, *B; 1242 Instruction *AddI; 1243 if (!match(CI, 1244 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 1245 return false; 1246 1247 Type *Ty = AddI->getType(); 1248 if (!isa<IntegerType>(Ty)) 1249 return false; 1250 1251 // We don't want to move around uses of condition values this late, so we we 1252 // check if it is legal to create the call to the intrinsic in the basic 1253 // block containing the icmp: 1254 1255 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 1256 return false; 1257 1258 #ifndef NDEBUG 1259 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1260 // for now: 1261 if (AddI->hasOneUse()) 1262 assert(*AddI->user_begin() == CI && "expected!"); 1263 #endif 1264 1265 Module *M = CI->getModule(); 1266 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1267 1268 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 1269 1270 auto *UAddWithOverflow = 1271 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 1272 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 1273 auto *Overflow = 1274 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 1275 1276 CI->replaceAllUsesWith(Overflow); 1277 AddI->replaceAllUsesWith(UAdd); 1278 CI->eraseFromParent(); 1279 AddI->eraseFromParent(); 1280 return true; 1281 } 1282 1283 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1284 /// registers that must be created and coalesced. This is a clear win except on 1285 /// targets with multiple condition code registers (PowerPC), where it might 1286 /// lose; some adjustment may be wanted there. 1287 /// 1288 /// Return true if any changes are made. 1289 static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1290 BasicBlock *DefBB = CI->getParent(); 1291 1292 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1293 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) 1294 return false; 1295 1296 // Only insert a cmp in each block once. 1297 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1298 1299 bool MadeChange = false; 1300 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1301 UI != E; ) { 1302 Use &TheUse = UI.getUse(); 1303 Instruction *User = cast<Instruction>(*UI); 1304 1305 // Preincrement use iterator so we don't invalidate it. 1306 ++UI; 1307 1308 // Don't bother for PHI nodes. 1309 if (isa<PHINode>(User)) 1310 continue; 1311 1312 // Figure out which BB this cmp is used in. 1313 BasicBlock *UserBB = User->getParent(); 1314 1315 // If this user is in the same block as the cmp, don't change the cmp. 1316 if (UserBB == DefBB) continue; 1317 1318 // If we have already inserted a cmp into this block, use it. 1319 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1320 1321 if (!InsertedCmp) { 1322 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1323 assert(InsertPt != UserBB->end()); 1324 InsertedCmp = 1325 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 1326 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 1327 // Propagate the debug info. 1328 InsertedCmp->setDebugLoc(CI->getDebugLoc()); 1329 } 1330 1331 // Replace a use of the cmp with a use of the new cmp. 1332 TheUse = InsertedCmp; 1333 MadeChange = true; 1334 ++NumCmpUses; 1335 } 1336 1337 // If we removed all uses, nuke the cmp. 1338 if (CI->use_empty()) { 1339 CI->eraseFromParent(); 1340 MadeChange = true; 1341 } 1342 1343 return MadeChange; 1344 } 1345 1346 static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1347 if (SinkCmpExpression(CI, TLI)) 1348 return true; 1349 1350 if (CombineUAddWithOverflow(CI)) 1351 return true; 1352 1353 return false; 1354 } 1355 1356 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1357 /// used in a compare to allow isel to generate better code for targets where 1358 /// this operation can be combined. 1359 /// 1360 /// Return true if any changes are made. 1361 static bool sinkAndCmp0Expression(Instruction *AndI, 1362 const TargetLowering &TLI, 1363 SetOfInstrs &InsertedInsts) { 1364 // Double-check that we're not trying to optimize an instruction that was 1365 // already optimized by some other part of this pass. 1366 assert(!InsertedInsts.count(AndI) && 1367 "Attempting to optimize already optimized and instruction"); 1368 (void) InsertedInsts; 1369 1370 // Nothing to do for single use in same basic block. 1371 if (AndI->hasOneUse() && 1372 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1373 return false; 1374 1375 // Try to avoid cases where sinking/duplicating is likely to increase register 1376 // pressure. 1377 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1378 !isa<ConstantInt>(AndI->getOperand(1)) && 1379 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1380 return false; 1381 1382 for (auto *U : AndI->users()) { 1383 Instruction *User = cast<Instruction>(U); 1384 1385 // Only sink for and mask feeding icmp with 0. 1386 if (!isa<ICmpInst>(User)) 1387 return false; 1388 1389 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1390 if (!CmpC || !CmpC->isZero()) 1391 return false; 1392 } 1393 1394 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1395 return false; 1396 1397 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1398 DEBUG(AndI->getParent()->dump()); 1399 1400 // Push the 'and' into the same block as the icmp 0. There should only be 1401 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1402 // others, so we don't need to keep track of which BBs we insert into. 1403 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1404 UI != E; ) { 1405 Use &TheUse = UI.getUse(); 1406 Instruction *User = cast<Instruction>(*UI); 1407 1408 // Preincrement use iterator so we don't invalidate it. 1409 ++UI; 1410 1411 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1412 1413 // Keep the 'and' in the same place if the use is already in the same block. 1414 Instruction *InsertPt = 1415 User->getParent() == AndI->getParent() ? AndI : User; 1416 Instruction *InsertedAnd = 1417 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1418 AndI->getOperand(1), "", InsertPt); 1419 // Propagate the debug info. 1420 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1421 1422 // Replace a use of the 'and' with a use of the new 'and'. 1423 TheUse = InsertedAnd; 1424 ++NumAndUses; 1425 DEBUG(User->getParent()->dump()); 1426 } 1427 1428 // We removed all uses, nuke the and. 1429 AndI->eraseFromParent(); 1430 return true; 1431 } 1432 1433 /// Check if the candidates could be combined with a shift instruction, which 1434 /// includes: 1435 /// 1. Truncate instruction 1436 /// 2. And instruction and the imm is a mask of the low bits: 1437 /// imm & (imm+1) == 0 1438 static bool isExtractBitsCandidateUse(Instruction *User) { 1439 if (!isa<TruncInst>(User)) { 1440 if (User->getOpcode() != Instruction::And || 1441 !isa<ConstantInt>(User->getOperand(1))) 1442 return false; 1443 1444 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1445 1446 if ((Cimm & (Cimm + 1)).getBoolValue()) 1447 return false; 1448 } 1449 return true; 1450 } 1451 1452 /// Sink both shift and truncate instruction to the use of truncate's BB. 1453 static bool 1454 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1455 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1456 const TargetLowering &TLI, const DataLayout &DL) { 1457 BasicBlock *UserBB = User->getParent(); 1458 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1459 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1460 bool MadeChange = false; 1461 1462 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1463 TruncE = TruncI->user_end(); 1464 TruncUI != TruncE;) { 1465 1466 Use &TruncTheUse = TruncUI.getUse(); 1467 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1468 // Preincrement use iterator so we don't invalidate it. 1469 1470 ++TruncUI; 1471 1472 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1473 if (!ISDOpcode) 1474 continue; 1475 1476 // If the use is actually a legal node, there will not be an 1477 // implicit truncate. 1478 // FIXME: always querying the result type is just an 1479 // approximation; some nodes' legality is determined by the 1480 // operand or other means. There's no good way to find out though. 1481 if (TLI.isOperationLegalOrCustom( 1482 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1483 continue; 1484 1485 // Don't bother for PHI nodes. 1486 if (isa<PHINode>(TruncUser)) 1487 continue; 1488 1489 BasicBlock *TruncUserBB = TruncUser->getParent(); 1490 1491 if (UserBB == TruncUserBB) 1492 continue; 1493 1494 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1495 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1496 1497 if (!InsertedShift && !InsertedTrunc) { 1498 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1499 assert(InsertPt != TruncUserBB->end()); 1500 // Sink the shift 1501 if (ShiftI->getOpcode() == Instruction::AShr) 1502 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1503 "", &*InsertPt); 1504 else 1505 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1506 "", &*InsertPt); 1507 1508 // Sink the trunc 1509 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1510 TruncInsertPt++; 1511 assert(TruncInsertPt != TruncUserBB->end()); 1512 1513 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1514 TruncI->getType(), "", &*TruncInsertPt); 1515 1516 MadeChange = true; 1517 1518 TruncTheUse = InsertedTrunc; 1519 } 1520 } 1521 return MadeChange; 1522 } 1523 1524 /// Sink the shift *right* instruction into user blocks if the uses could 1525 /// potentially be combined with this shift instruction and generate BitExtract 1526 /// instruction. It will only be applied if the architecture supports BitExtract 1527 /// instruction. Here is an example: 1528 /// BB1: 1529 /// %x.extract.shift = lshr i64 %arg1, 32 1530 /// BB2: 1531 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1532 /// ==> 1533 /// 1534 /// BB2: 1535 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1536 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1537 /// 1538 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1539 /// instruction. 1540 /// Return true if any changes are made. 1541 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1542 const TargetLowering &TLI, 1543 const DataLayout &DL) { 1544 BasicBlock *DefBB = ShiftI->getParent(); 1545 1546 /// Only insert instructions in each block once. 1547 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1548 1549 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1550 1551 bool MadeChange = false; 1552 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1553 UI != E;) { 1554 Use &TheUse = UI.getUse(); 1555 Instruction *User = cast<Instruction>(*UI); 1556 // Preincrement use iterator so we don't invalidate it. 1557 ++UI; 1558 1559 // Don't bother for PHI nodes. 1560 if (isa<PHINode>(User)) 1561 continue; 1562 1563 if (!isExtractBitsCandidateUse(User)) 1564 continue; 1565 1566 BasicBlock *UserBB = User->getParent(); 1567 1568 if (UserBB == DefBB) { 1569 // If the shift and truncate instruction are in the same BB. The use of 1570 // the truncate(TruncUse) may still introduce another truncate if not 1571 // legal. In this case, we would like to sink both shift and truncate 1572 // instruction to the BB of TruncUse. 1573 // for example: 1574 // BB1: 1575 // i64 shift.result = lshr i64 opnd, imm 1576 // trunc.result = trunc shift.result to i16 1577 // 1578 // BB2: 1579 // ----> We will have an implicit truncate here if the architecture does 1580 // not have i16 compare. 1581 // cmp i16 trunc.result, opnd2 1582 // 1583 if (isa<TruncInst>(User) && shiftIsLegal 1584 // If the type of the truncate is legal, no trucate will be 1585 // introduced in other basic blocks. 1586 && 1587 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1588 MadeChange = 1589 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1590 1591 continue; 1592 } 1593 // If we have already inserted a shift into this block, use it. 1594 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1595 1596 if (!InsertedShift) { 1597 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1598 assert(InsertPt != UserBB->end()); 1599 1600 if (ShiftI->getOpcode() == Instruction::AShr) 1601 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1602 "", &*InsertPt); 1603 else 1604 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1605 "", &*InsertPt); 1606 1607 MadeChange = true; 1608 } 1609 1610 // Replace a use of the shift with a use of the new shift. 1611 TheUse = InsertedShift; 1612 } 1613 1614 // If we removed all uses, nuke the shift. 1615 if (ShiftI->use_empty()) 1616 ShiftI->eraseFromParent(); 1617 1618 return MadeChange; 1619 } 1620 1621 /// If counting leading or trailing zeros is an expensive operation and a zero 1622 /// input is defined, add a check for zero to avoid calling the intrinsic. 1623 /// 1624 /// We want to transform: 1625 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1626 /// 1627 /// into: 1628 /// entry: 1629 /// %cmpz = icmp eq i64 %A, 0 1630 /// br i1 %cmpz, label %cond.end, label %cond.false 1631 /// cond.false: 1632 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1633 /// br label %cond.end 1634 /// cond.end: 1635 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1636 /// 1637 /// If the transform is performed, return true and set ModifiedDT to true. 1638 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1639 const TargetLowering *TLI, 1640 const DataLayout *DL, 1641 bool &ModifiedDT) { 1642 if (!TLI || !DL) 1643 return false; 1644 1645 // If a zero input is undefined, it doesn't make sense to despeculate that. 1646 if (match(CountZeros->getOperand(1), m_One())) 1647 return false; 1648 1649 // If it's cheap to speculate, there's nothing to do. 1650 auto IntrinsicID = CountZeros->getIntrinsicID(); 1651 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1652 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1653 return false; 1654 1655 // Only handle legal scalar cases. Anything else requires too much work. 1656 Type *Ty = CountZeros->getType(); 1657 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1658 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1659 return false; 1660 1661 // The intrinsic will be sunk behind a compare against zero and branch. 1662 BasicBlock *StartBlock = CountZeros->getParent(); 1663 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1664 1665 // Create another block after the count zero intrinsic. A PHI will be added 1666 // in this block to select the result of the intrinsic or the bit-width 1667 // constant if the input to the intrinsic is zero. 1668 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1669 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1670 1671 // Set up a builder to create a compare, conditional branch, and PHI. 1672 IRBuilder<> Builder(CountZeros->getContext()); 1673 Builder.SetInsertPoint(StartBlock->getTerminator()); 1674 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1675 1676 // Replace the unconditional branch that was created by the first split with 1677 // a compare against zero and a conditional branch. 1678 Value *Zero = Constant::getNullValue(Ty); 1679 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1680 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1681 StartBlock->getTerminator()->eraseFromParent(); 1682 1683 // Create a PHI in the end block to select either the output of the intrinsic 1684 // or the bit width of the operand. 1685 Builder.SetInsertPoint(&EndBlock->front()); 1686 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1687 CountZeros->replaceAllUsesWith(PN); 1688 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1689 PN->addIncoming(BitWidth, StartBlock); 1690 PN->addIncoming(CountZeros, CallBlock); 1691 1692 // We are explicitly handling the zero case, so we can set the intrinsic's 1693 // undefined zero argument to 'true'. This will also prevent reprocessing the 1694 // intrinsic; we only despeculate when a zero input is defined. 1695 CountZeros->setArgOperand(1, Builder.getTrue()); 1696 ModifiedDT = true; 1697 return true; 1698 } 1699 1700 namespace { 1701 1702 // This class provides helper functions to expand a memcmp library call into an 1703 // inline expansion. 1704 class MemCmpExpansion { 1705 struct ResultBlock { 1706 BasicBlock *BB = nullptr; 1707 PHINode *PhiSrc1 = nullptr; 1708 PHINode *PhiSrc2 = nullptr; 1709 1710 ResultBlock() = default; 1711 }; 1712 1713 CallInst *const CI; 1714 ResultBlock ResBlock; 1715 const uint64_t Size; 1716 unsigned MaxLoadSize; 1717 uint64_t NumLoads; 1718 uint64_t NumLoadsNonOneByte; 1719 const uint64_t NumLoadsPerBlock; 1720 std::vector<BasicBlock *> LoadCmpBlocks; 1721 BasicBlock *EndBlock; 1722 PHINode *PhiRes; 1723 const bool IsUsedForZeroCmp; 1724 const DataLayout &DL; 1725 IRBuilder<> Builder; 1726 // Represents the decomposition in blocks of the expansion. For example, 1727 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and 1728 // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}. 1729 // TODO(courbet): Involve the target more in this computation. On X86, 7 1730 // bytes can be done more efficiently with two overlaping 4-byte loads than 1731 // covering the interval with [{4, 0},{2, 4},{1, 6}}. 1732 struct LoadEntry { 1733 LoadEntry(unsigned LoadSize, uint64_t Offset) 1734 : LoadSize(LoadSize), Offset(Offset) { 1735 assert(Offset % LoadSize == 0 && "invalid load entry"); 1736 } 1737 1738 uint64_t getGEPIndex() const { return Offset / LoadSize; } 1739 1740 // The size of the load for this block, in bytes. 1741 const unsigned LoadSize; 1742 // The offset of this load WRT the base pointer, in bytes. 1743 const uint64_t Offset; 1744 }; 1745 SmallVector<LoadEntry, 8> LoadSequence; 1746 void computeLoadSequence(); 1747 1748 void createLoadCmpBlocks(); 1749 void createResultBlock(); 1750 void setupResultBlockPHINodes(); 1751 void setupEndBlockPHINodes(); 1752 Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex); 1753 void emitLoadCompareBlock(unsigned BlockIndex); 1754 void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex, 1755 unsigned &LoadIndex); 1756 void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned GEPIndex); 1757 void emitMemCmpResultBlock(); 1758 Value *getMemCmpExpansionZeroCase(); 1759 Value *getMemCmpEqZeroOneBlock(); 1760 Value *getMemCmpOneBlock(); 1761 1762 // Computes the decomposition. THis is the common code to compute the number 1763 // of loads and the actual load sequence. `callback` is called with each load 1764 // size and number of loads for the block size. 1765 template <typename CallBackT> 1766 void getDecomposition(CallBackT callback) const; 1767 1768 public: 1769 MemCmpExpansion(CallInst *CI, uint64_t Size, unsigned MaxLoadSize, 1770 unsigned NumLoadsPerBlock, const DataLayout &DL); 1771 1772 unsigned getNumBlocks(); 1773 uint64_t getNumLoads() const { return NumLoads; } 1774 1775 Value *getMemCmpExpansion(); 1776 }; 1777 1778 } // end anonymous namespace 1779 1780 // Initialize the basic block structure required for expansion of memcmp call 1781 // with given maximum load size and memcmp size parameter. 1782 // This structure includes: 1783 // 1. A list of load compare blocks - LoadCmpBlocks. 1784 // 2. An EndBlock, split from original instruction point, which is the block to 1785 // return from. 1786 // 3. ResultBlock, block to branch to for early exit when a 1787 // LoadCmpBlock finds a difference. 1788 MemCmpExpansion::MemCmpExpansion(CallInst *const CI, uint64_t Size, 1789 const unsigned MaxLoadSize, 1790 const unsigned LoadsPerBlock, 1791 const DataLayout &TheDataLayout) 1792 : CI(CI), 1793 Size(Size), 1794 MaxLoadSize(MaxLoadSize), 1795 NumLoads(0), 1796 NumLoadsNonOneByte(0), 1797 NumLoadsPerBlock(LoadsPerBlock), 1798 IsUsedForZeroCmp(isOnlyUsedInZeroEqualityComparison(CI)), 1799 DL(TheDataLayout), 1800 Builder(CI) { 1801 // Scale the max size down if the target can load more bytes than we need. 1802 while (this->MaxLoadSize > Size) { 1803 this->MaxLoadSize /= 2; 1804 } 1805 // Compute the number of loads. At that point we don't want to compute the 1806 // actual decomposition because it might be too large to fit in memory. 1807 getDecomposition([this](unsigned LoadSize, uint64_t NumLoadsForSize) { 1808 NumLoads += NumLoadsForSize; 1809 }); 1810 } 1811 1812 template <typename CallBackT> 1813 void MemCmpExpansion::getDecomposition(CallBackT callback) const { 1814 unsigned LoadSize = this->MaxLoadSize; 1815 assert(Size > 0 && "zero blocks"); 1816 uint64_t CurSize = Size; 1817 while (CurSize) { 1818 assert(LoadSize > 0 && "zero load size"); 1819 const uint64_t NumLoadsForThisSize = CurSize / LoadSize; 1820 if (NumLoadsForThisSize > 0) { 1821 callback(LoadSize, NumLoadsForThisSize); 1822 CurSize = CurSize % LoadSize; 1823 } 1824 // FIXME: This can result in a non-native load size (e.g. X86-32+SSE can 1825 // load 16 and 4 but not 8), which throws the load count off (e.g. in the 1826 // aforementioned case, 16 bytes will count for 2 loads but will generate 1827 // 4). 1828 LoadSize /= 2; 1829 } 1830 } 1831 1832 void MemCmpExpansion::computeLoadSequence() { 1833 uint64_t Offset = 0; 1834 getDecomposition( 1835 [this, &Offset](unsigned LoadSize, uint64_t NumLoadsForSize) { 1836 for (uint64_t I = 0; I < NumLoadsForSize; ++I) { 1837 LoadSequence.push_back({LoadSize, Offset}); 1838 Offset += LoadSize; 1839 } 1840 if (LoadSize > 1) { 1841 ++NumLoadsNonOneByte; 1842 } 1843 }); 1844 assert(LoadSequence.size() == getNumLoads() && "mismatch in numbe rof loads"); 1845 } 1846 1847 unsigned MemCmpExpansion::getNumBlocks() { 1848 if (IsUsedForZeroCmp) 1849 return getNumLoads() / NumLoadsPerBlock + 1850 (getNumLoads() % NumLoadsPerBlock != 0 ? 1 : 0); 1851 return getNumLoads(); 1852 } 1853 1854 void MemCmpExpansion::createLoadCmpBlocks() { 1855 for (unsigned i = 0; i < getNumBlocks(); i++) { 1856 BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb", 1857 EndBlock->getParent(), EndBlock); 1858 LoadCmpBlocks.push_back(BB); 1859 } 1860 } 1861 1862 void MemCmpExpansion::createResultBlock() { 1863 ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block", 1864 EndBlock->getParent(), EndBlock); 1865 } 1866 1867 // This function creates the IR instructions for loading and comparing 1 byte. 1868 // It loads 1 byte from each source of the memcmp parameters with the given 1869 // GEPIndex. It then subtracts the two loaded values and adds this result to the 1870 // final phi node for selecting the memcmp result. 1871 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex, 1872 unsigned GEPIndex) { 1873 Value *Source1 = CI->getArgOperand(0); 1874 Value *Source2 = CI->getArgOperand(1); 1875 1876 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 1877 Type *LoadSizeType = Type::getInt8Ty(CI->getContext()); 1878 // Cast source to LoadSizeType*. 1879 if (Source1->getType() != LoadSizeType) 1880 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 1881 if (Source2->getType() != LoadSizeType) 1882 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 1883 1884 // Get the base address using the GEPIndex. 1885 if (GEPIndex != 0) { 1886 Source1 = Builder.CreateGEP(LoadSizeType, Source1, 1887 ConstantInt::get(LoadSizeType, GEPIndex)); 1888 Source2 = Builder.CreateGEP(LoadSizeType, Source2, 1889 ConstantInt::get(LoadSizeType, GEPIndex)); 1890 } 1891 1892 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 1893 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 1894 1895 LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext())); 1896 LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext())); 1897 Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2); 1898 1899 PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]); 1900 1901 if (BlockIndex < (LoadCmpBlocks.size() - 1)) { 1902 // Early exit branch if difference found to EndBlock. Otherwise, continue to 1903 // next LoadCmpBlock, 1904 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff, 1905 ConstantInt::get(Diff->getType(), 0)); 1906 BranchInst *CmpBr = 1907 BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp); 1908 Builder.Insert(CmpBr); 1909 } else { 1910 // The last block has an unconditional branch to EndBlock. 1911 BranchInst *CmpBr = BranchInst::Create(EndBlock); 1912 Builder.Insert(CmpBr); 1913 } 1914 } 1915 1916 /// Generate an equality comparison for one or more pairs of loaded values. 1917 /// This is used in the case where the memcmp() call is compared equal or not 1918 /// equal to zero. 1919 Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex, 1920 unsigned &LoadIndex) { 1921 assert(LoadIndex < getNumLoads() && 1922 "getCompareLoadPairs() called with no remaining loads"); 1923 std::vector<Value *> XorList, OrList; 1924 Value *Diff; 1925 1926 const unsigned NumLoads = 1927 std::min(getNumLoads() - LoadIndex, NumLoadsPerBlock); 1928 1929 // For a single-block expansion, start inserting before the memcmp call. 1930 if (LoadCmpBlocks.empty()) 1931 Builder.SetInsertPoint(CI); 1932 else 1933 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 1934 1935 Value *Cmp = nullptr; 1936 // If we have multiple loads per block, we need to generate a composite 1937 // comparison using xor+or. The type for the combinations is the largest load 1938 // type. 1939 IntegerType *const MaxLoadType = 1940 NumLoads == 1 ? nullptr 1941 : IntegerType::get(CI->getContext(), MaxLoadSize * 8); 1942 for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) { 1943 const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex]; 1944 1945 IntegerType *LoadSizeType = 1946 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8); 1947 1948 Value *Source1 = CI->getArgOperand(0); 1949 Value *Source2 = CI->getArgOperand(1); 1950 1951 // Cast source to LoadSizeType*. 1952 if (Source1->getType() != LoadSizeType) 1953 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 1954 if (Source2->getType() != LoadSizeType) 1955 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 1956 1957 // Get the base address using a GEP. 1958 if (CurLoadEntry.Offset != 0) { 1959 Source1 = Builder.CreateGEP( 1960 LoadSizeType, Source1, 1961 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 1962 Source2 = Builder.CreateGEP( 1963 LoadSizeType, Source2, 1964 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 1965 } 1966 1967 // Get a constant or load a value for each source address. 1968 Value *LoadSrc1 = nullptr; 1969 if (auto *Source1C = dyn_cast<Constant>(Source1)) 1970 LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL); 1971 if (!LoadSrc1) 1972 LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 1973 1974 Value *LoadSrc2 = nullptr; 1975 if (auto *Source2C = dyn_cast<Constant>(Source2)) 1976 LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL); 1977 if (!LoadSrc2) 1978 LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 1979 1980 if (NumLoads != 1) { 1981 if (LoadSizeType != MaxLoadType) { 1982 LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); 1983 LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); 1984 } 1985 // If we have multiple loads per block, we need to generate a composite 1986 // comparison using xor+or. 1987 Diff = Builder.CreateXor(LoadSrc1, LoadSrc2); 1988 Diff = Builder.CreateZExt(Diff, MaxLoadType); 1989 XorList.push_back(Diff); 1990 } else { 1991 // If there's only one load per block, we just compare the loaded values. 1992 Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2); 1993 } 1994 } 1995 1996 auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> { 1997 std::vector<Value *> OutList; 1998 for (unsigned i = 0; i < InList.size() - 1; i = i + 2) { 1999 Value *Or = Builder.CreateOr(InList[i], InList[i + 1]); 2000 OutList.push_back(Or); 2001 } 2002 if (InList.size() % 2 != 0) 2003 OutList.push_back(InList.back()); 2004 return OutList; 2005 }; 2006 2007 if (!Cmp) { 2008 // Pairwise OR the XOR results. 2009 OrList = pairWiseOr(XorList); 2010 2011 // Pairwise OR the OR results until one result left. 2012 while (OrList.size() != 1) { 2013 OrList = pairWiseOr(OrList); 2014 } 2015 Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0)); 2016 } 2017 2018 return Cmp; 2019 } 2020 2021 void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex, 2022 unsigned &LoadIndex) { 2023 Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex); 2024 2025 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1)) 2026 ? EndBlock 2027 : LoadCmpBlocks[BlockIndex + 1]; 2028 // Early exit branch if difference found to ResultBlock. Otherwise, 2029 // continue to next LoadCmpBlock or EndBlock. 2030 BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp); 2031 Builder.Insert(CmpBr); 2032 2033 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 2034 // since early exit to ResultBlock was not taken (no difference was found in 2035 // any of the bytes). 2036 if (BlockIndex == LoadCmpBlocks.size() - 1) { 2037 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); 2038 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]); 2039 } 2040 } 2041 2042 // This function creates the IR intructions for loading and comparing using the 2043 // given LoadSize. It loads the number of bytes specified by LoadSize from each 2044 // source of the memcmp parameters. It then does a subtract to see if there was 2045 // a difference in the loaded values. If a difference is found, it branches 2046 // with an early exit to the ResultBlock for calculating which source was 2047 // larger. Otherwise, it falls through to the either the next LoadCmpBlock or 2048 // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with 2049 // a special case through emitLoadCompareByteBlock. The special handling can 2050 // simply subtract the loaded values and add it to the result phi node. 2051 void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) { 2052 // There is one load per block in this case, BlockIndex == LoadIndex. 2053 const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex]; 2054 2055 if (CurLoadEntry.LoadSize == 1) { 2056 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, 2057 CurLoadEntry.getGEPIndex()); 2058 return; 2059 } 2060 2061 Type *LoadSizeType = 2062 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8); 2063 Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); 2064 assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type"); 2065 2066 Value *Source1 = CI->getArgOperand(0); 2067 Value *Source2 = CI->getArgOperand(1); 2068 2069 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 2070 // Cast source to LoadSizeType*. 2071 if (Source1->getType() != LoadSizeType) 2072 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 2073 if (Source2->getType() != LoadSizeType) 2074 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 2075 2076 // Get the base address using a GEP. 2077 if (CurLoadEntry.Offset != 0) { 2078 Source1 = Builder.CreateGEP( 2079 LoadSizeType, Source1, 2080 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 2081 Source2 = Builder.CreateGEP( 2082 LoadSizeType, Source2, 2083 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 2084 } 2085 2086 // Load LoadSizeType from the base address. 2087 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 2088 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 2089 2090 if (DL.isLittleEndian()) { 2091 Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), 2092 Intrinsic::bswap, LoadSizeType); 2093 LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); 2094 LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); 2095 } 2096 2097 if (LoadSizeType != MaxLoadType) { 2098 LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); 2099 LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); 2100 } 2101 2102 // Add the loaded values to the phi nodes for calculating memcmp result only 2103 // if result is not used in a zero equality. 2104 if (!IsUsedForZeroCmp) { 2105 ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[BlockIndex]); 2106 ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[BlockIndex]); 2107 } 2108 2109 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2); 2110 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1)) 2111 ? EndBlock 2112 : LoadCmpBlocks[BlockIndex + 1]; 2113 // Early exit branch if difference found to ResultBlock. Otherwise, continue 2114 // to next LoadCmpBlock or EndBlock. 2115 BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp); 2116 Builder.Insert(CmpBr); 2117 2118 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 2119 // since early exit to ResultBlock was not taken (no difference was found in 2120 // any of the bytes). 2121 if (BlockIndex == LoadCmpBlocks.size() - 1) { 2122 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); 2123 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]); 2124 } 2125 } 2126 2127 // This function populates the ResultBlock with a sequence to calculate the 2128 // memcmp result. It compares the two loaded source values and returns -1 if 2129 // src1 < src2 and 1 if src1 > src2. 2130 void MemCmpExpansion::emitMemCmpResultBlock() { 2131 // Special case: if memcmp result is used in a zero equality, result does not 2132 // need to be calculated and can simply return 1. 2133 if (IsUsedForZeroCmp) { 2134 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); 2135 Builder.SetInsertPoint(ResBlock.BB, InsertPt); 2136 Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1); 2137 PhiRes->addIncoming(Res, ResBlock.BB); 2138 BranchInst *NewBr = BranchInst::Create(EndBlock); 2139 Builder.Insert(NewBr); 2140 return; 2141 } 2142 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); 2143 Builder.SetInsertPoint(ResBlock.BB, InsertPt); 2144 2145 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1, 2146 ResBlock.PhiSrc2); 2147 2148 Value *Res = 2149 Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1), 2150 ConstantInt::get(Builder.getInt32Ty(), 1)); 2151 2152 BranchInst *NewBr = BranchInst::Create(EndBlock); 2153 Builder.Insert(NewBr); 2154 PhiRes->addIncoming(Res, ResBlock.BB); 2155 } 2156 2157 void MemCmpExpansion::setupResultBlockPHINodes() { 2158 Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); 2159 Builder.SetInsertPoint(ResBlock.BB); 2160 // Note: this assumes one load per block. 2161 ResBlock.PhiSrc1 = 2162 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1"); 2163 ResBlock.PhiSrc2 = 2164 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2"); 2165 } 2166 2167 void MemCmpExpansion::setupEndBlockPHINodes() { 2168 Builder.SetInsertPoint(&EndBlock->front()); 2169 PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res"); 2170 } 2171 2172 Value *MemCmpExpansion::getMemCmpExpansionZeroCase() { 2173 unsigned LoadIndex = 0; 2174 // This loop populates each of the LoadCmpBlocks with the IR sequence to 2175 // handle multiple loads per block. 2176 for (unsigned I = 0; I < getNumBlocks(); ++I) { 2177 emitLoadCompareBlockMultipleLoads(I, LoadIndex); 2178 } 2179 2180 emitMemCmpResultBlock(); 2181 return PhiRes; 2182 } 2183 2184 /// A memcmp expansion that compares equality with 0 and only has one block of 2185 /// load and compare can bypass the compare, branch, and phi IR that is required 2186 /// in the general case. 2187 Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() { 2188 unsigned LoadIndex = 0; 2189 Value *Cmp = getCompareLoadPairs(0, LoadIndex); 2190 assert(LoadIndex == getNumLoads() && "some entries were not consumed"); 2191 return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext())); 2192 } 2193 2194 /// A memcmp expansion that only has one block of load and compare can bypass 2195 /// the compare, branch, and phi IR that is required in the general case. 2196 Value *MemCmpExpansion::getMemCmpOneBlock() { 2197 assert(NumLoadsPerBlock == 1 && "Only handles one load pair per block"); 2198 2199 Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8); 2200 Value *Source1 = CI->getArgOperand(0); 2201 Value *Source2 = CI->getArgOperand(1); 2202 2203 // Cast source to LoadSizeType*. 2204 if (Source1->getType() != LoadSizeType) 2205 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 2206 if (Source2->getType() != LoadSizeType) 2207 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 2208 2209 // Load LoadSizeType from the base address. 2210 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 2211 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 2212 2213 if (DL.isLittleEndian() && Size != 1) { 2214 Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), 2215 Intrinsic::bswap, LoadSizeType); 2216 LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); 2217 LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); 2218 } 2219 2220 if (Size < 4) { 2221 // The i8 and i16 cases don't need compares. We zext the loaded values and 2222 // subtract them to get the suitable negative, zero, or positive i32 result. 2223 LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty()); 2224 LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty()); 2225 return Builder.CreateSub(LoadSrc1, LoadSrc2); 2226 } 2227 2228 // The result of memcmp is negative, zero, or positive, so produce that by 2229 // subtracting 2 extended compare bits: sub (ugt, ult). 2230 // If a target prefers to use selects to get -1/0/1, they should be able 2231 // to transform this later. The inverse transform (going from selects to math) 2232 // may not be possible in the DAG because the selects got converted into 2233 // branches before we got there. 2234 Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2); 2235 Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2); 2236 Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty()); 2237 Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty()); 2238 return Builder.CreateSub(ZextUGT, ZextULT); 2239 } 2240 2241 // This function expands the memcmp call into an inline expansion and returns 2242 // the memcmp result. 2243 Value *MemCmpExpansion::getMemCmpExpansion() { 2244 computeLoadSequence(); 2245 // A memcmp with zero-comparison with only one block of load and compare does 2246 // not need to set up any extra blocks. This case could be handled in the DAG, 2247 // but since we have all of the machinery to flexibly expand any memcpy here, 2248 // we choose to handle this case too to avoid fragmented lowering. 2249 if ((!IsUsedForZeroCmp && NumLoadsPerBlock != 1) || getNumBlocks() != 1) { 2250 BasicBlock *StartBlock = CI->getParent(); 2251 EndBlock = StartBlock->splitBasicBlock(CI, "endblock"); 2252 setupEndBlockPHINodes(); 2253 createResultBlock(); 2254 2255 // If return value of memcmp is not used in a zero equality, we need to 2256 // calculate which source was larger. The calculation requires the 2257 // two loaded source values of each load compare block. 2258 // These will be saved in the phi nodes created by setupResultBlockPHINodes. 2259 if (!IsUsedForZeroCmp) setupResultBlockPHINodes(); 2260 2261 // Create the number of required load compare basic blocks. 2262 createLoadCmpBlocks(); 2263 2264 // Update the terminator added by splitBasicBlock to branch to the first 2265 // LoadCmpBlock. 2266 StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]); 2267 } 2268 2269 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 2270 2271 if (IsUsedForZeroCmp) 2272 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock() 2273 : getMemCmpExpansionZeroCase(); 2274 2275 // TODO: Handle more than one load pair per block in getMemCmpOneBlock(). 2276 if (getNumBlocks() == 1 && NumLoadsPerBlock == 1) return getMemCmpOneBlock(); 2277 2278 for (unsigned I = 0; I < getNumBlocks(); ++I) { 2279 emitLoadCompareBlock(I); 2280 } 2281 2282 emitMemCmpResultBlock(); 2283 return PhiRes; 2284 } 2285 2286 // This function checks to see if an expansion of memcmp can be generated. 2287 // It checks for constant compare size that is less than the max inline size. 2288 // If an expansion cannot occur, returns false to leave as a library call. 2289 // Otherwise, the library call is replaced with a new IR instruction sequence. 2290 /// We want to transform: 2291 /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15) 2292 /// To: 2293 /// loadbb: 2294 /// %0 = bitcast i32* %buffer2 to i8* 2295 /// %1 = bitcast i32* %buffer1 to i8* 2296 /// %2 = bitcast i8* %1 to i64* 2297 /// %3 = bitcast i8* %0 to i64* 2298 /// %4 = load i64, i64* %2 2299 /// %5 = load i64, i64* %3 2300 /// %6 = call i64 @llvm.bswap.i64(i64 %4) 2301 /// %7 = call i64 @llvm.bswap.i64(i64 %5) 2302 /// %8 = sub i64 %6, %7 2303 /// %9 = icmp ne i64 %8, 0 2304 /// br i1 %9, label %res_block, label %loadbb1 2305 /// res_block: ; preds = %loadbb2, 2306 /// %loadbb1, %loadbb 2307 /// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ] 2308 /// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ] 2309 /// %10 = icmp ult i64 %phi.src1, %phi.src2 2310 /// %11 = select i1 %10, i32 -1, i32 1 2311 /// br label %endblock 2312 /// loadbb1: ; preds = %loadbb 2313 /// %12 = bitcast i32* %buffer2 to i8* 2314 /// %13 = bitcast i32* %buffer1 to i8* 2315 /// %14 = bitcast i8* %13 to i32* 2316 /// %15 = bitcast i8* %12 to i32* 2317 /// %16 = getelementptr i32, i32* %14, i32 2 2318 /// %17 = getelementptr i32, i32* %15, i32 2 2319 /// %18 = load i32, i32* %16 2320 /// %19 = load i32, i32* %17 2321 /// %20 = call i32 @llvm.bswap.i32(i32 %18) 2322 /// %21 = call i32 @llvm.bswap.i32(i32 %19) 2323 /// %22 = zext i32 %20 to i64 2324 /// %23 = zext i32 %21 to i64 2325 /// %24 = sub i64 %22, %23 2326 /// %25 = icmp ne i64 %24, 0 2327 /// br i1 %25, label %res_block, label %loadbb2 2328 /// loadbb2: ; preds = %loadbb1 2329 /// %26 = bitcast i32* %buffer2 to i8* 2330 /// %27 = bitcast i32* %buffer1 to i8* 2331 /// %28 = bitcast i8* %27 to i16* 2332 /// %29 = bitcast i8* %26 to i16* 2333 /// %30 = getelementptr i16, i16* %28, i16 6 2334 /// %31 = getelementptr i16, i16* %29, i16 6 2335 /// %32 = load i16, i16* %30 2336 /// %33 = load i16, i16* %31 2337 /// %34 = call i16 @llvm.bswap.i16(i16 %32) 2338 /// %35 = call i16 @llvm.bswap.i16(i16 %33) 2339 /// %36 = zext i16 %34 to i64 2340 /// %37 = zext i16 %35 to i64 2341 /// %38 = sub i64 %36, %37 2342 /// %39 = icmp ne i64 %38, 0 2343 /// br i1 %39, label %res_block, label %loadbb3 2344 /// loadbb3: ; preds = %loadbb2 2345 /// %40 = bitcast i32* %buffer2 to i8* 2346 /// %41 = bitcast i32* %buffer1 to i8* 2347 /// %42 = getelementptr i8, i8* %41, i8 14 2348 /// %43 = getelementptr i8, i8* %40, i8 14 2349 /// %44 = load i8, i8* %42 2350 /// %45 = load i8, i8* %43 2351 /// %46 = zext i8 %44 to i32 2352 /// %47 = zext i8 %45 to i32 2353 /// %48 = sub i32 %46, %47 2354 /// br label %endblock 2355 /// endblock: ; preds = %res_block, 2356 /// %loadbb3 2357 /// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ] 2358 /// ret i32 %phi.res 2359 static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, 2360 const TargetLowering *TLI, const DataLayout *DL) { 2361 NumMemCmpCalls++; 2362 2363 // Early exit from expansion if -Oz. 2364 if (CI->getFunction()->optForMinSize()) 2365 return false; 2366 2367 // Early exit from expansion if size is not a constant. 2368 ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2)); 2369 if (!SizeCast) { 2370 NumMemCmpNotConstant++; 2371 return false; 2372 } 2373 const uint64_t SizeVal = SizeCast->getZExtValue(); 2374 2375 // TTI call to check if target would like to expand memcmp. Also, get the 2376 // max LoadSize. 2377 unsigned MaxLoadSize; 2378 if (!TTI->enableMemCmpExpansion(MaxLoadSize)) return false; 2379 2380 MemCmpExpansion Expansion(CI, SizeVal, MaxLoadSize, MemCmpNumLoadsPerBlock, 2381 *DL); 2382 2383 // Don't expand if this will require more loads than desired by the target. 2384 if (Expansion.getNumLoads() > 2385 TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize())) { 2386 NumMemCmpGreaterThanMax++; 2387 return false; 2388 } 2389 2390 NumMemCmpInlined++; 2391 2392 Value *Res = Expansion.getMemCmpExpansion(); 2393 2394 // Replace call with result of expansion and erase call. 2395 CI->replaceAllUsesWith(Res); 2396 CI->eraseFromParent(); 2397 2398 return true; 2399 } 2400 2401 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 2402 BasicBlock *BB = CI->getParent(); 2403 2404 // Lower inline assembly if we can. 2405 // If we found an inline asm expession, and if the target knows how to 2406 // lower it to normal LLVM code, do so now. 2407 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 2408 if (TLI->ExpandInlineAsm(CI)) { 2409 // Avoid invalidating the iterator. 2410 CurInstIterator = BB->begin(); 2411 // Avoid processing instructions out of order, which could cause 2412 // reuse before a value is defined. 2413 SunkAddrs.clear(); 2414 return true; 2415 } 2416 // Sink address computing for memory operands into the block. 2417 if (optimizeInlineAsmInst(CI)) 2418 return true; 2419 } 2420 2421 // Align the pointer arguments to this call if the target thinks it's a good 2422 // idea 2423 unsigned MinSize, PrefAlign; 2424 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2425 for (auto &Arg : CI->arg_operands()) { 2426 // We want to align both objects whose address is used directly and 2427 // objects whose address is used in casts and GEPs, though it only makes 2428 // sense for GEPs if the offset is a multiple of the desired alignment and 2429 // if size - offset meets the size threshold. 2430 if (!Arg->getType()->isPointerTy()) 2431 continue; 2432 APInt Offset(DL->getPointerSizeInBits( 2433 cast<PointerType>(Arg->getType())->getAddressSpace()), 2434 0); 2435 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2436 uint64_t Offset2 = Offset.getLimitedValue(); 2437 if ((Offset2 & (PrefAlign-1)) != 0) 2438 continue; 2439 AllocaInst *AI; 2440 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2441 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2442 AI->setAlignment(PrefAlign); 2443 // Global variables can only be aligned if they are defined in this 2444 // object (i.e. they are uniquely initialized in this object), and 2445 // over-aligning global variables that have an explicit section is 2446 // forbidden. 2447 GlobalVariable *GV; 2448 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2449 GV->getPointerAlignment(*DL) < PrefAlign && 2450 DL->getTypeAllocSize(GV->getValueType()) >= 2451 MinSize + Offset2) 2452 GV->setAlignment(PrefAlign); 2453 } 2454 // If this is a memcpy (or similar) then we may be able to improve the 2455 // alignment 2456 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2457 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 2458 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 2459 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 2460 if (Align > MI->getAlignment()) 2461 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 2462 } 2463 } 2464 2465 // If we have a cold call site, try to sink addressing computation into the 2466 // cold block. This interacts with our handling for loads and stores to 2467 // ensure that we can fold all uses of a potential addressing computation 2468 // into their uses. TODO: generalize this to work over profiling data 2469 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 2470 for (auto &Arg : CI->arg_operands()) { 2471 if (!Arg->getType()->isPointerTy()) 2472 continue; 2473 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2474 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2475 } 2476 2477 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2478 if (II) { 2479 switch (II->getIntrinsicID()) { 2480 default: break; 2481 case Intrinsic::objectsize: { 2482 // Lower all uses of llvm.objectsize.* 2483 ConstantInt *RetVal = 2484 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 2485 // Substituting this can cause recursive simplifications, which can 2486 // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case 2487 // this 2488 // happens. 2489 Value *CurValue = &*CurInstIterator; 2490 WeakTrackingVH IterHandle(CurValue); 2491 2492 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2493 2494 // If the iterator instruction was recursively deleted, start over at the 2495 // start of the block. 2496 if (IterHandle != CurValue) { 2497 CurInstIterator = BB->begin(); 2498 SunkAddrs.clear(); 2499 } 2500 return true; 2501 } 2502 case Intrinsic::aarch64_stlxr: 2503 case Intrinsic::aarch64_stxr: { 2504 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2505 if (!ExtVal || !ExtVal->hasOneUse() || 2506 ExtVal->getParent() == CI->getParent()) 2507 return false; 2508 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2509 ExtVal->moveBefore(CI); 2510 // Mark this instruction as "inserted by CGP", so that other 2511 // optimizations don't touch it. 2512 InsertedInsts.insert(ExtVal); 2513 return true; 2514 } 2515 case Intrinsic::invariant_group_barrier: 2516 II->replaceAllUsesWith(II->getArgOperand(0)); 2517 II->eraseFromParent(); 2518 return true; 2519 2520 case Intrinsic::cttz: 2521 case Intrinsic::ctlz: 2522 // If counting zeros is expensive, try to avoid it. 2523 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2524 } 2525 2526 if (TLI) { 2527 SmallVector<Value*, 2> PtrOps; 2528 Type *AccessTy; 2529 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2530 while (!PtrOps.empty()) { 2531 Value *PtrVal = PtrOps.pop_back_val(); 2532 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2533 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2534 return true; 2535 } 2536 } 2537 } 2538 2539 // From here on out we're working with named functions. 2540 if (!CI->getCalledFunction()) return false; 2541 2542 // Lower all default uses of _chk calls. This is very similar 2543 // to what InstCombineCalls does, but here we are only lowering calls 2544 // to fortified library functions (e.g. __memcpy_chk) that have the default 2545 // "don't know" as the objectsize. Anything else should be left alone. 2546 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2547 if (Value *V = Simplifier.optimizeCall(CI)) { 2548 CI->replaceAllUsesWith(V); 2549 CI->eraseFromParent(); 2550 return true; 2551 } 2552 2553 LibFunc Func; 2554 if (TLInfo->getLibFunc(ImmutableCallSite(CI), Func) && 2555 Func == LibFunc_memcmp && expandMemCmp(CI, TTI, TLI, DL)) { 2556 ModifiedDT = true; 2557 return true; 2558 } 2559 return false; 2560 } 2561 2562 /// Look for opportunities to duplicate return instructions to the predecessor 2563 /// to enable tail call optimizations. The case it is currently looking for is: 2564 /// @code 2565 /// bb0: 2566 /// %tmp0 = tail call i32 @f0() 2567 /// br label %return 2568 /// bb1: 2569 /// %tmp1 = tail call i32 @f1() 2570 /// br label %return 2571 /// bb2: 2572 /// %tmp2 = tail call i32 @f2() 2573 /// br label %return 2574 /// return: 2575 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2576 /// ret i32 %retval 2577 /// @endcode 2578 /// 2579 /// => 2580 /// 2581 /// @code 2582 /// bb0: 2583 /// %tmp0 = tail call i32 @f0() 2584 /// ret i32 %tmp0 2585 /// bb1: 2586 /// %tmp1 = tail call i32 @f1() 2587 /// ret i32 %tmp1 2588 /// bb2: 2589 /// %tmp2 = tail call i32 @f2() 2590 /// ret i32 %tmp2 2591 /// @endcode 2592 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 2593 if (!TLI) 2594 return false; 2595 2596 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2597 if (!RetI) 2598 return false; 2599 2600 PHINode *PN = nullptr; 2601 BitCastInst *BCI = nullptr; 2602 Value *V = RetI->getReturnValue(); 2603 if (V) { 2604 BCI = dyn_cast<BitCastInst>(V); 2605 if (BCI) 2606 V = BCI->getOperand(0); 2607 2608 PN = dyn_cast<PHINode>(V); 2609 if (!PN) 2610 return false; 2611 } 2612 2613 if (PN && PN->getParent() != BB) 2614 return false; 2615 2616 // Make sure there are no instructions between the PHI and return, or that the 2617 // return is the first instruction in the block. 2618 if (PN) { 2619 BasicBlock::iterator BI = BB->begin(); 2620 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 2621 if (&*BI == BCI) 2622 // Also skip over the bitcast. 2623 ++BI; 2624 if (&*BI != RetI) 2625 return false; 2626 } else { 2627 BasicBlock::iterator BI = BB->begin(); 2628 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2629 if (&*BI != RetI) 2630 return false; 2631 } 2632 2633 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2634 /// call. 2635 const Function *F = BB->getParent(); 2636 SmallVector<CallInst*, 4> TailCalls; 2637 if (PN) { 2638 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2639 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2640 // Make sure the phi value is indeed produced by the tail call. 2641 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2642 TLI->mayBeEmittedAsTailCall(CI) && 2643 attributesPermitTailCall(F, CI, RetI, *TLI)) 2644 TailCalls.push_back(CI); 2645 } 2646 } else { 2647 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2648 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2649 if (!VisitedBBs.insert(*PI).second) 2650 continue; 2651 2652 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2653 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2654 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2655 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2656 if (RI == RE) 2657 continue; 2658 2659 CallInst *CI = dyn_cast<CallInst>(&*RI); 2660 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2661 attributesPermitTailCall(F, CI, RetI, *TLI)) 2662 TailCalls.push_back(CI); 2663 } 2664 } 2665 2666 bool Changed = false; 2667 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2668 CallInst *CI = TailCalls[i]; 2669 CallSite CS(CI); 2670 2671 // Conservatively require the attributes of the call to match those of the 2672 // return. Ignore noalias because it doesn't affect the call sequence. 2673 AttributeList CalleeAttrs = CS.getAttributes(); 2674 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2675 .removeAttribute(Attribute::NoAlias) != 2676 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2677 .removeAttribute(Attribute::NoAlias)) 2678 continue; 2679 2680 // Make sure the call instruction is followed by an unconditional branch to 2681 // the return block. 2682 BasicBlock *CallBB = CI->getParent(); 2683 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2684 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2685 continue; 2686 2687 // Duplicate the return into CallBB. 2688 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2689 ModifiedDT = Changed = true; 2690 ++NumRetsDup; 2691 } 2692 2693 // If we eliminated all predecessors of the block, delete the block now. 2694 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2695 BB->eraseFromParent(); 2696 2697 return Changed; 2698 } 2699 2700 //===----------------------------------------------------------------------===// 2701 // Memory Optimization 2702 //===----------------------------------------------------------------------===// 2703 2704 namespace { 2705 2706 /// This is an extended version of TargetLowering::AddrMode 2707 /// which holds actual Value*'s for register values. 2708 struct ExtAddrMode : public TargetLowering::AddrMode { 2709 Value *BaseReg = nullptr; 2710 Value *ScaledReg = nullptr; 2711 Value *OriginalValue = nullptr; 2712 2713 enum FieldName { 2714 NoField = 0x00, 2715 BaseRegField = 0x01, 2716 BaseGVField = 0x02, 2717 BaseOffsField = 0x04, 2718 ScaledRegField = 0x08, 2719 ScaleField = 0x10, 2720 MultipleFields = 0xff 2721 }; 2722 2723 ExtAddrMode() = default; 2724 2725 void print(raw_ostream &OS) const; 2726 void dump() const; 2727 2728 FieldName compare(const ExtAddrMode &other) { 2729 // First check that the types are the same on each field, as differing types 2730 // is something we can't cope with later on. 2731 if (BaseReg && other.BaseReg && 2732 BaseReg->getType() != other.BaseReg->getType()) 2733 return MultipleFields; 2734 if (BaseGV && other.BaseGV && 2735 BaseGV->getType() != other.BaseGV->getType()) 2736 return MultipleFields; 2737 if (ScaledReg && other.ScaledReg && 2738 ScaledReg->getType() != other.ScaledReg->getType()) 2739 return MultipleFields; 2740 2741 // Check each field to see if it differs. 2742 unsigned Result = NoField; 2743 if (BaseReg != other.BaseReg) 2744 Result |= BaseRegField; 2745 if (BaseGV != other.BaseGV) 2746 Result |= BaseGVField; 2747 if (BaseOffs != other.BaseOffs) 2748 Result |= BaseOffsField; 2749 if (ScaledReg != other.ScaledReg) 2750 Result |= ScaledRegField; 2751 // Don't count 0 as being a different scale, because that actually means 2752 // unscaled (which will already be counted by having no ScaledReg). 2753 if (Scale && other.Scale && Scale != other.Scale) 2754 Result |= ScaleField; 2755 2756 if (countPopulation(Result) > 1) 2757 return MultipleFields; 2758 else 2759 return static_cast<FieldName>(Result); 2760 } 2761 2762 // AddrModes with a base reg or gv where the reg/gv is just the original 2763 // value are trivial. 2764 bool isTrivial() { 2765 bool Trivial = (BaseGV && BaseGV == OriginalValue) || 2766 (BaseReg && BaseReg == OriginalValue); 2767 // If the AddrMode is trivial it shouldn't have an offset or be scaled. 2768 if (Trivial) { 2769 assert(BaseOffs == 0); 2770 assert(Scale == 0); 2771 } 2772 return Trivial; 2773 } 2774 }; 2775 2776 } // end anonymous namespace 2777 2778 #ifndef NDEBUG 2779 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2780 AM.print(OS); 2781 return OS; 2782 } 2783 #endif 2784 2785 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2786 void ExtAddrMode::print(raw_ostream &OS) const { 2787 bool NeedPlus = false; 2788 OS << "["; 2789 if (BaseGV) { 2790 OS << (NeedPlus ? " + " : "") 2791 << "GV:"; 2792 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2793 NeedPlus = true; 2794 } 2795 2796 if (BaseOffs) { 2797 OS << (NeedPlus ? " + " : "") 2798 << BaseOffs; 2799 NeedPlus = true; 2800 } 2801 2802 if (BaseReg) { 2803 OS << (NeedPlus ? " + " : "") 2804 << "Base:"; 2805 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2806 NeedPlus = true; 2807 } 2808 if (Scale) { 2809 OS << (NeedPlus ? " + " : "") 2810 << Scale << "*"; 2811 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2812 } 2813 2814 OS << ']'; 2815 } 2816 2817 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2818 print(dbgs()); 2819 dbgs() << '\n'; 2820 } 2821 #endif 2822 2823 namespace { 2824 2825 /// \brief This class provides transaction based operation on the IR. 2826 /// Every change made through this class is recorded in the internal state and 2827 /// can be undone (rollback) until commit is called. 2828 class TypePromotionTransaction { 2829 /// \brief This represents the common interface of the individual transaction. 2830 /// Each class implements the logic for doing one specific modification on 2831 /// the IR via the TypePromotionTransaction. 2832 class TypePromotionAction { 2833 protected: 2834 /// The Instruction modified. 2835 Instruction *Inst; 2836 2837 public: 2838 /// \brief Constructor of the action. 2839 /// The constructor performs the related action on the IR. 2840 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2841 2842 virtual ~TypePromotionAction() = default; 2843 2844 /// \brief Undo the modification done by this action. 2845 /// When this method is called, the IR must be in the same state as it was 2846 /// before this action was applied. 2847 /// \pre Undoing the action works if and only if the IR is in the exact same 2848 /// state as it was directly after this action was applied. 2849 virtual void undo() = 0; 2850 2851 /// \brief Advocate every change made by this action. 2852 /// When the results on the IR of the action are to be kept, it is important 2853 /// to call this function, otherwise hidden information may be kept forever. 2854 virtual void commit() { 2855 // Nothing to be done, this action is not doing anything. 2856 } 2857 }; 2858 2859 /// \brief Utility to remember the position of an instruction. 2860 class InsertionHandler { 2861 /// Position of an instruction. 2862 /// Either an instruction: 2863 /// - Is the first in a basic block: BB is used. 2864 /// - Has a previous instructon: PrevInst is used. 2865 union { 2866 Instruction *PrevInst; 2867 BasicBlock *BB; 2868 } Point; 2869 2870 /// Remember whether or not the instruction had a previous instruction. 2871 bool HasPrevInstruction; 2872 2873 public: 2874 /// \brief Record the position of \p Inst. 2875 InsertionHandler(Instruction *Inst) { 2876 BasicBlock::iterator It = Inst->getIterator(); 2877 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2878 if (HasPrevInstruction) 2879 Point.PrevInst = &*--It; 2880 else 2881 Point.BB = Inst->getParent(); 2882 } 2883 2884 /// \brief Insert \p Inst at the recorded position. 2885 void insert(Instruction *Inst) { 2886 if (HasPrevInstruction) { 2887 if (Inst->getParent()) 2888 Inst->removeFromParent(); 2889 Inst->insertAfter(Point.PrevInst); 2890 } else { 2891 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2892 if (Inst->getParent()) 2893 Inst->moveBefore(Position); 2894 else 2895 Inst->insertBefore(Position); 2896 } 2897 } 2898 }; 2899 2900 /// \brief Move an instruction before another. 2901 class InstructionMoveBefore : public TypePromotionAction { 2902 /// Original position of the instruction. 2903 InsertionHandler Position; 2904 2905 public: 2906 /// \brief Move \p Inst before \p Before. 2907 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2908 : TypePromotionAction(Inst), Position(Inst) { 2909 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2910 Inst->moveBefore(Before); 2911 } 2912 2913 /// \brief Move the instruction back to its original position. 2914 void undo() override { 2915 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2916 Position.insert(Inst); 2917 } 2918 }; 2919 2920 /// \brief Set the operand of an instruction with a new value. 2921 class OperandSetter : public TypePromotionAction { 2922 /// Original operand of the instruction. 2923 Value *Origin; 2924 2925 /// Index of the modified instruction. 2926 unsigned Idx; 2927 2928 public: 2929 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2930 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2931 : TypePromotionAction(Inst), Idx(Idx) { 2932 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2933 << "for:" << *Inst << "\n" 2934 << "with:" << *NewVal << "\n"); 2935 Origin = Inst->getOperand(Idx); 2936 Inst->setOperand(Idx, NewVal); 2937 } 2938 2939 /// \brief Restore the original value of the instruction. 2940 void undo() override { 2941 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2942 << "for: " << *Inst << "\n" 2943 << "with: " << *Origin << "\n"); 2944 Inst->setOperand(Idx, Origin); 2945 } 2946 }; 2947 2948 /// \brief Hide the operands of an instruction. 2949 /// Do as if this instruction was not using any of its operands. 2950 class OperandsHider : public TypePromotionAction { 2951 /// The list of original operands. 2952 SmallVector<Value *, 4> OriginalValues; 2953 2954 public: 2955 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2956 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2957 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2958 unsigned NumOpnds = Inst->getNumOperands(); 2959 OriginalValues.reserve(NumOpnds); 2960 for (unsigned It = 0; It < NumOpnds; ++It) { 2961 // Save the current operand. 2962 Value *Val = Inst->getOperand(It); 2963 OriginalValues.push_back(Val); 2964 // Set a dummy one. 2965 // We could use OperandSetter here, but that would imply an overhead 2966 // that we are not willing to pay. 2967 Inst->setOperand(It, UndefValue::get(Val->getType())); 2968 } 2969 } 2970 2971 /// \brief Restore the original list of uses. 2972 void undo() override { 2973 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2974 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2975 Inst->setOperand(It, OriginalValues[It]); 2976 } 2977 }; 2978 2979 /// \brief Build a truncate instruction. 2980 class TruncBuilder : public TypePromotionAction { 2981 Value *Val; 2982 2983 public: 2984 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2985 /// result. 2986 /// trunc Opnd to Ty. 2987 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2988 IRBuilder<> Builder(Opnd); 2989 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2990 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2991 } 2992 2993 /// \brief Get the built value. 2994 Value *getBuiltValue() { return Val; } 2995 2996 /// \brief Remove the built instruction. 2997 void undo() override { 2998 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2999 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 3000 IVal->eraseFromParent(); 3001 } 3002 }; 3003 3004 /// \brief Build a sign extension instruction. 3005 class SExtBuilder : public TypePromotionAction { 3006 Value *Val; 3007 3008 public: 3009 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 3010 /// result. 3011 /// sext Opnd to Ty. 3012 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 3013 : TypePromotionAction(InsertPt) { 3014 IRBuilder<> Builder(InsertPt); 3015 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 3016 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 3017 } 3018 3019 /// \brief Get the built value. 3020 Value *getBuiltValue() { return Val; } 3021 3022 /// \brief Remove the built instruction. 3023 void undo() override { 3024 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 3025 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 3026 IVal->eraseFromParent(); 3027 } 3028 }; 3029 3030 /// \brief Build a zero extension instruction. 3031 class ZExtBuilder : public TypePromotionAction { 3032 Value *Val; 3033 3034 public: 3035 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 3036 /// result. 3037 /// zext Opnd to Ty. 3038 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 3039 : TypePromotionAction(InsertPt) { 3040 IRBuilder<> Builder(InsertPt); 3041 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 3042 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 3043 } 3044 3045 /// \brief Get the built value. 3046 Value *getBuiltValue() { return Val; } 3047 3048 /// \brief Remove the built instruction. 3049 void undo() override { 3050 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 3051 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 3052 IVal->eraseFromParent(); 3053 } 3054 }; 3055 3056 /// \brief Mutate an instruction to another type. 3057 class TypeMutator : public TypePromotionAction { 3058 /// Record the original type. 3059 Type *OrigTy; 3060 3061 public: 3062 /// \brief Mutate the type of \p Inst into \p NewTy. 3063 TypeMutator(Instruction *Inst, Type *NewTy) 3064 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 3065 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 3066 << "\n"); 3067 Inst->mutateType(NewTy); 3068 } 3069 3070 /// \brief Mutate the instruction back to its original type. 3071 void undo() override { 3072 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 3073 << "\n"); 3074 Inst->mutateType(OrigTy); 3075 } 3076 }; 3077 3078 /// \brief Replace the uses of an instruction by another instruction. 3079 class UsesReplacer : public TypePromotionAction { 3080 /// Helper structure to keep track of the replaced uses. 3081 struct InstructionAndIdx { 3082 /// The instruction using the instruction. 3083 Instruction *Inst; 3084 3085 /// The index where this instruction is used for Inst. 3086 unsigned Idx; 3087 3088 InstructionAndIdx(Instruction *Inst, unsigned Idx) 3089 : Inst(Inst), Idx(Idx) {} 3090 }; 3091 3092 /// Keep track of the original uses (pair Instruction, Index). 3093 SmallVector<InstructionAndIdx, 4> OriginalUses; 3094 3095 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 3096 3097 public: 3098 /// \brief Replace all the use of \p Inst by \p New. 3099 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 3100 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 3101 << "\n"); 3102 // Record the original uses. 3103 for (Use &U : Inst->uses()) { 3104 Instruction *UserI = cast<Instruction>(U.getUser()); 3105 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 3106 } 3107 // Now, we can replace the uses. 3108 Inst->replaceAllUsesWith(New); 3109 } 3110 3111 /// \brief Reassign the original uses of Inst to Inst. 3112 void undo() override { 3113 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 3114 for (use_iterator UseIt = OriginalUses.begin(), 3115 EndIt = OriginalUses.end(); 3116 UseIt != EndIt; ++UseIt) { 3117 UseIt->Inst->setOperand(UseIt->Idx, Inst); 3118 } 3119 } 3120 }; 3121 3122 /// \brief Remove an instruction from the IR. 3123 class InstructionRemover : public TypePromotionAction { 3124 /// Original position of the instruction. 3125 InsertionHandler Inserter; 3126 3127 /// Helper structure to hide all the link to the instruction. In other 3128 /// words, this helps to do as if the instruction was removed. 3129 OperandsHider Hider; 3130 3131 /// Keep track of the uses replaced, if any. 3132 UsesReplacer *Replacer = nullptr; 3133 3134 /// Keep track of instructions removed. 3135 SetOfInstrs &RemovedInsts; 3136 3137 public: 3138 /// \brief Remove all reference of \p Inst and optinally replace all its 3139 /// uses with New. 3140 /// \p RemovedInsts Keep track of the instructions removed by this Action. 3141 /// \pre If !Inst->use_empty(), then New != nullptr 3142 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 3143 Value *New = nullptr) 3144 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 3145 RemovedInsts(RemovedInsts) { 3146 if (New) 3147 Replacer = new UsesReplacer(Inst, New); 3148 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 3149 RemovedInsts.insert(Inst); 3150 /// The instructions removed here will be freed after completing 3151 /// optimizeBlock() for all blocks as we need to keep track of the 3152 /// removed instructions during promotion. 3153 Inst->removeFromParent(); 3154 } 3155 3156 ~InstructionRemover() override { delete Replacer; } 3157 3158 /// \brief Resurrect the instruction and reassign it to the proper uses if 3159 /// new value was provided when build this action. 3160 void undo() override { 3161 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 3162 Inserter.insert(Inst); 3163 if (Replacer) 3164 Replacer->undo(); 3165 Hider.undo(); 3166 RemovedInsts.erase(Inst); 3167 } 3168 }; 3169 3170 public: 3171 /// Restoration point. 3172 /// The restoration point is a pointer to an action instead of an iterator 3173 /// because the iterator may be invalidated but not the pointer. 3174 using ConstRestorationPt = const TypePromotionAction *; 3175 3176 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 3177 : RemovedInsts(RemovedInsts) {} 3178 3179 /// Advocate every changes made in that transaction. 3180 void commit(); 3181 3182 /// Undo all the changes made after the given point. 3183 void rollback(ConstRestorationPt Point); 3184 3185 /// Get the current restoration point. 3186 ConstRestorationPt getRestorationPoint() const; 3187 3188 /// \name API for IR modification with state keeping to support rollback. 3189 /// @{ 3190 /// Same as Instruction::setOperand. 3191 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 3192 3193 /// Same as Instruction::eraseFromParent. 3194 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 3195 3196 /// Same as Value::replaceAllUsesWith. 3197 void replaceAllUsesWith(Instruction *Inst, Value *New); 3198 3199 /// Same as Value::mutateType. 3200 void mutateType(Instruction *Inst, Type *NewTy); 3201 3202 /// Same as IRBuilder::createTrunc. 3203 Value *createTrunc(Instruction *Opnd, Type *Ty); 3204 3205 /// Same as IRBuilder::createSExt. 3206 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 3207 3208 /// Same as IRBuilder::createZExt. 3209 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 3210 3211 /// Same as Instruction::moveBefore. 3212 void moveBefore(Instruction *Inst, Instruction *Before); 3213 /// @} 3214 3215 private: 3216 /// The ordered list of actions made so far. 3217 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 3218 3219 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 3220 3221 SetOfInstrs &RemovedInsts; 3222 }; 3223 3224 } // end anonymous namespace 3225 3226 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 3227 Value *NewVal) { 3228 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 3229 Inst, Idx, NewVal)); 3230 } 3231 3232 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 3233 Value *NewVal) { 3234 Actions.push_back( 3235 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 3236 Inst, RemovedInsts, NewVal)); 3237 } 3238 3239 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 3240 Value *New) { 3241 Actions.push_back( 3242 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 3243 } 3244 3245 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 3246 Actions.push_back( 3247 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 3248 } 3249 3250 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 3251 Type *Ty) { 3252 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 3253 Value *Val = Ptr->getBuiltValue(); 3254 Actions.push_back(std::move(Ptr)); 3255 return Val; 3256 } 3257 3258 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 3259 Value *Opnd, Type *Ty) { 3260 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 3261 Value *Val = Ptr->getBuiltValue(); 3262 Actions.push_back(std::move(Ptr)); 3263 return Val; 3264 } 3265 3266 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 3267 Value *Opnd, Type *Ty) { 3268 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 3269 Value *Val = Ptr->getBuiltValue(); 3270 Actions.push_back(std::move(Ptr)); 3271 return Val; 3272 } 3273 3274 void TypePromotionTransaction::moveBefore(Instruction *Inst, 3275 Instruction *Before) { 3276 Actions.push_back( 3277 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 3278 Inst, Before)); 3279 } 3280 3281 TypePromotionTransaction::ConstRestorationPt 3282 TypePromotionTransaction::getRestorationPoint() const { 3283 return !Actions.empty() ? Actions.back().get() : nullptr; 3284 } 3285 3286 void TypePromotionTransaction::commit() { 3287 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 3288 ++It) 3289 (*It)->commit(); 3290 Actions.clear(); 3291 } 3292 3293 void TypePromotionTransaction::rollback( 3294 TypePromotionTransaction::ConstRestorationPt Point) { 3295 while (!Actions.empty() && Point != Actions.back().get()) { 3296 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 3297 Curr->undo(); 3298 } 3299 } 3300 3301 namespace { 3302 3303 /// \brief A helper class for matching addressing modes. 3304 /// 3305 /// This encapsulates the logic for matching the target-legal addressing modes. 3306 class AddressingModeMatcher { 3307 SmallVectorImpl<Instruction*> &AddrModeInsts; 3308 const TargetLowering &TLI; 3309 const TargetRegisterInfo &TRI; 3310 const DataLayout &DL; 3311 3312 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 3313 /// the memory instruction that we're computing this address for. 3314 Type *AccessTy; 3315 unsigned AddrSpace; 3316 Instruction *MemoryInst; 3317 3318 /// This is the addressing mode that we're building up. This is 3319 /// part of the return value of this addressing mode matching stuff. 3320 ExtAddrMode &AddrMode; 3321 3322 /// The instructions inserted by other CodeGenPrepare optimizations. 3323 const SetOfInstrs &InsertedInsts; 3324 3325 /// A map from the instructions to their type before promotion. 3326 InstrToOrigTy &PromotedInsts; 3327 3328 /// The ongoing transaction where every action should be registered. 3329 TypePromotionTransaction &TPT; 3330 3331 /// This is set to true when we should not do profitability checks. 3332 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3333 bool IgnoreProfitability; 3334 3335 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 3336 const TargetLowering &TLI, 3337 const TargetRegisterInfo &TRI, 3338 Type *AT, unsigned AS, 3339 Instruction *MI, ExtAddrMode &AM, 3340 const SetOfInstrs &InsertedInsts, 3341 InstrToOrigTy &PromotedInsts, 3342 TypePromotionTransaction &TPT) 3343 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3344 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3345 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3346 PromotedInsts(PromotedInsts), TPT(TPT) { 3347 IgnoreProfitability = false; 3348 } 3349 3350 public: 3351 /// Find the maximal addressing mode that a load/store of V can fold, 3352 /// give an access type of AccessTy. This returns a list of involved 3353 /// instructions in AddrModeInsts. 3354 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3355 /// optimizations. 3356 /// \p PromotedInsts maps the instructions to their type before promotion. 3357 /// \p The ongoing transaction where every action should be registered. 3358 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 3359 Instruction *MemoryInst, 3360 SmallVectorImpl<Instruction*> &AddrModeInsts, 3361 const TargetLowering &TLI, 3362 const TargetRegisterInfo &TRI, 3363 const SetOfInstrs &InsertedInsts, 3364 InstrToOrigTy &PromotedInsts, 3365 TypePromotionTransaction &TPT) { 3366 ExtAddrMode Result; 3367 3368 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, 3369 AccessTy, AS, 3370 MemoryInst, Result, InsertedInsts, 3371 PromotedInsts, TPT).matchAddr(V, 0); 3372 (void)Success; assert(Success && "Couldn't select *anything*?"); 3373 return Result; 3374 } 3375 3376 private: 3377 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3378 bool matchAddr(Value *V, unsigned Depth); 3379 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 3380 bool *MovedAway = nullptr); 3381 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3382 ExtAddrMode &AMBefore, 3383 ExtAddrMode &AMAfter); 3384 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3385 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3386 Value *PromotedOperand) const; 3387 }; 3388 3389 /// \brief A helper class for combining addressing modes. 3390 class AddressingModeCombiner { 3391 private: 3392 /// The addressing modes we've collected. 3393 SmallVector<ExtAddrMode, 16> AddrModes; 3394 3395 /// The field in which the AddrModes differ, when we have more than one. 3396 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3397 3398 /// Are the AddrModes that we have all just equal to their original values? 3399 bool AllAddrModesTrivial = true; 3400 3401 public: 3402 /// \brief Get the combined AddrMode 3403 const ExtAddrMode &getAddrMode() const { 3404 return AddrModes[0]; 3405 } 3406 3407 /// \brief Add a new AddrMode if it's compatible with the AddrModes we already 3408 /// have. 3409 /// \return True iff we succeeded in doing so. 3410 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3411 // Take note of if we have any non-trivial AddrModes, as we need to detect 3412 // when all AddrModes are trivial as then we would introduce a phi or select 3413 // which just duplicates what's already there. 3414 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3415 3416 // If this is the first addrmode then everything is fine. 3417 if (AddrModes.empty()) { 3418 AddrModes.emplace_back(NewAddrMode); 3419 return true; 3420 } 3421 3422 // Figure out how different this is from the other address modes, which we 3423 // can do just by comparing against the first one given that we only care 3424 // about the cumulative difference. 3425 ExtAddrMode::FieldName ThisDifferentField = 3426 AddrModes[0].compare(NewAddrMode); 3427 if (DifferentField == ExtAddrMode::NoField) 3428 DifferentField = ThisDifferentField; 3429 else if (DifferentField != ThisDifferentField) 3430 DifferentField = ExtAddrMode::MultipleFields; 3431 3432 // If this AddrMode is the same as all the others then everything is fine 3433 // (which should only happen when there is actually only one AddrMode). 3434 if (DifferentField == ExtAddrMode::NoField) { 3435 assert(AddrModes.size() == 1); 3436 return true; 3437 } 3438 3439 // If NewAddrMode differs in only one dimension then we can handle it by 3440 // inserting a phi/select later on. 3441 if (DifferentField != ExtAddrMode::MultipleFields) { 3442 AddrModes.emplace_back(NewAddrMode); 3443 return true; 3444 } 3445 3446 // We couldn't combine NewAddrMode with the rest, so return failure. 3447 AddrModes.clear(); 3448 return false; 3449 } 3450 3451 /// \brief Combine the addressing modes we've collected into a single 3452 /// addressing mode. 3453 /// \return True iff we successfully combined them or we only had one so 3454 /// didn't need to combine them anyway. 3455 bool combineAddrModes() { 3456 // If we have no AddrModes then they can't be combined. 3457 if (AddrModes.size() == 0) 3458 return false; 3459 3460 // A single AddrMode can trivially be combined. 3461 if (AddrModes.size() == 1) 3462 return true; 3463 3464 // If the AddrModes we collected are all just equal to the value they are 3465 // derived from then combining them wouldn't do anything useful. 3466 if (AllAddrModesTrivial) 3467 return false; 3468 3469 // TODO: Combine multiple AddrModes by inserting a select or phi for the 3470 // field in which the AddrModes differ. 3471 return false; 3472 } 3473 }; 3474 3475 } // end anonymous namespace 3476 3477 /// Try adding ScaleReg*Scale to the current addressing mode. 3478 /// Return true and update AddrMode if this addr mode is legal for the target, 3479 /// false if not. 3480 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3481 unsigned Depth) { 3482 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3483 // mode. Just process that directly. 3484 if (Scale == 1) 3485 return matchAddr(ScaleReg, Depth); 3486 3487 // If the scale is 0, it takes nothing to add this. 3488 if (Scale == 0) 3489 return true; 3490 3491 // If we already have a scale of this value, we can add to it, otherwise, we 3492 // need an available scale field. 3493 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3494 return false; 3495 3496 ExtAddrMode TestAddrMode = AddrMode; 3497 3498 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3499 // [A+B + A*7] -> [B+A*8]. 3500 TestAddrMode.Scale += Scale; 3501 TestAddrMode.ScaledReg = ScaleReg; 3502 3503 // If the new address isn't legal, bail out. 3504 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3505 return false; 3506 3507 // It was legal, so commit it. 3508 AddrMode = TestAddrMode; 3509 3510 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3511 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3512 // X*Scale + C*Scale to addr mode. 3513 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3514 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3515 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3516 TestAddrMode.ScaledReg = AddLHS; 3517 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3518 3519 // If this addressing mode is legal, commit it and remember that we folded 3520 // this instruction. 3521 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3522 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3523 AddrMode = TestAddrMode; 3524 return true; 3525 } 3526 } 3527 3528 // Otherwise, not (x+c)*scale, just return what we have. 3529 return true; 3530 } 3531 3532 /// This is a little filter, which returns true if an addressing computation 3533 /// involving I might be folded into a load/store accessing it. 3534 /// This doesn't need to be perfect, but needs to accept at least 3535 /// the set of instructions that MatchOperationAddr can. 3536 static bool MightBeFoldableInst(Instruction *I) { 3537 switch (I->getOpcode()) { 3538 case Instruction::BitCast: 3539 case Instruction::AddrSpaceCast: 3540 // Don't touch identity bitcasts. 3541 if (I->getType() == I->getOperand(0)->getType()) 3542 return false; 3543 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 3544 case Instruction::PtrToInt: 3545 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3546 return true; 3547 case Instruction::IntToPtr: 3548 // We know the input is intptr_t, so this is foldable. 3549 return true; 3550 case Instruction::Add: 3551 return true; 3552 case Instruction::Mul: 3553 case Instruction::Shl: 3554 // Can only handle X*C and X << C. 3555 return isa<ConstantInt>(I->getOperand(1)); 3556 case Instruction::GetElementPtr: 3557 return true; 3558 default: 3559 return false; 3560 } 3561 } 3562 3563 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 3564 /// \note \p Val is assumed to be the product of some type promotion. 3565 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3566 /// to be legal, as the non-promoted value would have had the same state. 3567 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3568 const DataLayout &DL, Value *Val) { 3569 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3570 if (!PromotedInst) 3571 return false; 3572 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3573 // If the ISDOpcode is undefined, it was undefined before the promotion. 3574 if (!ISDOpcode) 3575 return true; 3576 // Otherwise, check if the promoted instruction is legal or not. 3577 return TLI.isOperationLegalOrCustom( 3578 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3579 } 3580 3581 namespace { 3582 3583 /// \brief Hepler class to perform type promotion. 3584 class TypePromotionHelper { 3585 /// \brief Utility function to check whether or not a sign or zero extension 3586 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3587 /// either using the operands of \p Inst or promoting \p Inst. 3588 /// The type of the extension is defined by \p IsSExt. 3589 /// In other words, check if: 3590 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3591 /// #1 Promotion applies: 3592 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3593 /// #2 Operand reuses: 3594 /// ext opnd1 to ConsideredExtType. 3595 /// \p PromotedInsts maps the instructions to their type before promotion. 3596 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3597 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3598 3599 /// \brief Utility function to determine if \p OpIdx should be promoted when 3600 /// promoting \p Inst. 3601 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3602 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3603 } 3604 3605 /// \brief Utility function to promote the operand of \p Ext when this 3606 /// operand is a promotable trunc or sext or zext. 3607 /// \p PromotedInsts maps the instructions to their type before promotion. 3608 /// \p CreatedInstsCost[out] contains the cost of all instructions 3609 /// created to promote the operand of Ext. 3610 /// Newly added extensions are inserted in \p Exts. 3611 /// Newly added truncates are inserted in \p Truncs. 3612 /// Should never be called directly. 3613 /// \return The promoted value which is used instead of Ext. 3614 static Value *promoteOperandForTruncAndAnyExt( 3615 Instruction *Ext, TypePromotionTransaction &TPT, 3616 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3617 SmallVectorImpl<Instruction *> *Exts, 3618 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3619 3620 /// \brief Utility function to promote the operand of \p Ext when this 3621 /// operand is promotable and is not a supported trunc or sext. 3622 /// \p PromotedInsts maps the instructions to their type before promotion. 3623 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3624 /// created to promote the operand of Ext. 3625 /// Newly added extensions are inserted in \p Exts. 3626 /// Newly added truncates are inserted in \p Truncs. 3627 /// Should never be called directly. 3628 /// \return The promoted value which is used instead of Ext. 3629 static Value *promoteOperandForOther(Instruction *Ext, 3630 TypePromotionTransaction &TPT, 3631 InstrToOrigTy &PromotedInsts, 3632 unsigned &CreatedInstsCost, 3633 SmallVectorImpl<Instruction *> *Exts, 3634 SmallVectorImpl<Instruction *> *Truncs, 3635 const TargetLowering &TLI, bool IsSExt); 3636 3637 /// \see promoteOperandForOther. 3638 static Value *signExtendOperandForOther( 3639 Instruction *Ext, TypePromotionTransaction &TPT, 3640 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3641 SmallVectorImpl<Instruction *> *Exts, 3642 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3643 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3644 Exts, Truncs, TLI, true); 3645 } 3646 3647 /// \see promoteOperandForOther. 3648 static Value *zeroExtendOperandForOther( 3649 Instruction *Ext, TypePromotionTransaction &TPT, 3650 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3651 SmallVectorImpl<Instruction *> *Exts, 3652 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3653 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3654 Exts, Truncs, TLI, false); 3655 } 3656 3657 public: 3658 /// Type for the utility function that promotes the operand of Ext. 3659 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3660 InstrToOrigTy &PromotedInsts, 3661 unsigned &CreatedInstsCost, 3662 SmallVectorImpl<Instruction *> *Exts, 3663 SmallVectorImpl<Instruction *> *Truncs, 3664 const TargetLowering &TLI); 3665 3666 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 3667 /// action to promote the operand of \p Ext instead of using Ext. 3668 /// \return NULL if no promotable action is possible with the current 3669 /// sign extension. 3670 /// \p InsertedInsts keeps track of all the instructions inserted by the 3671 /// other CodeGenPrepare optimizations. This information is important 3672 /// because we do not want to promote these instructions as CodeGenPrepare 3673 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3674 /// \p PromotedInsts maps the instructions to their type before promotion. 3675 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3676 const TargetLowering &TLI, 3677 const InstrToOrigTy &PromotedInsts); 3678 }; 3679 3680 } // end anonymous namespace 3681 3682 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3683 Type *ConsideredExtType, 3684 const InstrToOrigTy &PromotedInsts, 3685 bool IsSExt) { 3686 // The promotion helper does not know how to deal with vector types yet. 3687 // To be able to fix that, we would need to fix the places where we 3688 // statically extend, e.g., constants and such. 3689 if (Inst->getType()->isVectorTy()) 3690 return false; 3691 3692 // We can always get through zext. 3693 if (isa<ZExtInst>(Inst)) 3694 return true; 3695 3696 // sext(sext) is ok too. 3697 if (IsSExt && isa<SExtInst>(Inst)) 3698 return true; 3699 3700 // We can get through binary operator, if it is legal. In other words, the 3701 // binary operator must have a nuw or nsw flag. 3702 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3703 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3704 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3705 (IsSExt && BinOp->hasNoSignedWrap()))) 3706 return true; 3707 3708 // Check if we can do the following simplification. 3709 // ext(trunc(opnd)) --> ext(opnd) 3710 if (!isa<TruncInst>(Inst)) 3711 return false; 3712 3713 Value *OpndVal = Inst->getOperand(0); 3714 // Check if we can use this operand in the extension. 3715 // If the type is larger than the result type of the extension, we cannot. 3716 if (!OpndVal->getType()->isIntegerTy() || 3717 OpndVal->getType()->getIntegerBitWidth() > 3718 ConsideredExtType->getIntegerBitWidth()) 3719 return false; 3720 3721 // If the operand of the truncate is not an instruction, we will not have 3722 // any information on the dropped bits. 3723 // (Actually we could for constant but it is not worth the extra logic). 3724 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3725 if (!Opnd) 3726 return false; 3727 3728 // Check if the source of the type is narrow enough. 3729 // I.e., check that trunc just drops extended bits of the same kind of 3730 // the extension. 3731 // #1 get the type of the operand and check the kind of the extended bits. 3732 const Type *OpndType; 3733 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3734 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 3735 OpndType = It->second.getPointer(); 3736 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3737 OpndType = Opnd->getOperand(0)->getType(); 3738 else 3739 return false; 3740 3741 // #2 check that the truncate just drops extended bits. 3742 return Inst->getType()->getIntegerBitWidth() >= 3743 OpndType->getIntegerBitWidth(); 3744 } 3745 3746 TypePromotionHelper::Action TypePromotionHelper::getAction( 3747 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3748 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3749 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3750 "Unexpected instruction type"); 3751 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3752 Type *ExtTy = Ext->getType(); 3753 bool IsSExt = isa<SExtInst>(Ext); 3754 // If the operand of the extension is not an instruction, we cannot 3755 // get through. 3756 // If it, check we can get through. 3757 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3758 return nullptr; 3759 3760 // Do not promote if the operand has been added by codegenprepare. 3761 // Otherwise, it means we are undoing an optimization that is likely to be 3762 // redone, thus causing potential infinite loop. 3763 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3764 return nullptr; 3765 3766 // SExt or Trunc instructions. 3767 // Return the related handler. 3768 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3769 isa<ZExtInst>(ExtOpnd)) 3770 return promoteOperandForTruncAndAnyExt; 3771 3772 // Regular instruction. 3773 // Abort early if we will have to insert non-free instructions. 3774 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3775 return nullptr; 3776 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3777 } 3778 3779 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3780 Instruction *SExt, TypePromotionTransaction &TPT, 3781 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3782 SmallVectorImpl<Instruction *> *Exts, 3783 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3784 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3785 // get through it and this method should not be called. 3786 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3787 Value *ExtVal = SExt; 3788 bool HasMergedNonFreeExt = false; 3789 if (isa<ZExtInst>(SExtOpnd)) { 3790 // Replace s|zext(zext(opnd)) 3791 // => zext(opnd). 3792 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3793 Value *ZExt = 3794 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3795 TPT.replaceAllUsesWith(SExt, ZExt); 3796 TPT.eraseInstruction(SExt); 3797 ExtVal = ZExt; 3798 } else { 3799 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3800 // => z|sext(opnd). 3801 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3802 } 3803 CreatedInstsCost = 0; 3804 3805 // Remove dead code. 3806 if (SExtOpnd->use_empty()) 3807 TPT.eraseInstruction(SExtOpnd); 3808 3809 // Check if the extension is still needed. 3810 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3811 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3812 if (ExtInst) { 3813 if (Exts) 3814 Exts->push_back(ExtInst); 3815 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3816 } 3817 return ExtVal; 3818 } 3819 3820 // At this point we have: ext ty opnd to ty. 3821 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3822 Value *NextVal = ExtInst->getOperand(0); 3823 TPT.eraseInstruction(ExtInst, NextVal); 3824 return NextVal; 3825 } 3826 3827 Value *TypePromotionHelper::promoteOperandForOther( 3828 Instruction *Ext, TypePromotionTransaction &TPT, 3829 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3830 SmallVectorImpl<Instruction *> *Exts, 3831 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3832 bool IsSExt) { 3833 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3834 // get through it and this method should not be called. 3835 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3836 CreatedInstsCost = 0; 3837 if (!ExtOpnd->hasOneUse()) { 3838 // ExtOpnd will be promoted. 3839 // All its uses, but Ext, will need to use a truncated value of the 3840 // promoted version. 3841 // Create the truncate now. 3842 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3843 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3844 // Insert it just after the definition. 3845 ITrunc->moveAfter(ExtOpnd); 3846 if (Truncs) 3847 Truncs->push_back(ITrunc); 3848 } 3849 3850 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3851 // Restore the operand of Ext (which has been replaced by the previous call 3852 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3853 TPT.setOperand(Ext, 0, ExtOpnd); 3854 } 3855 3856 // Get through the Instruction: 3857 // 1. Update its type. 3858 // 2. Replace the uses of Ext by Inst. 3859 // 3. Extend each operand that needs to be extended. 3860 3861 // Remember the original type of the instruction before promotion. 3862 // This is useful to know that the high bits are sign extended bits. 3863 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3864 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3865 // Step #1. 3866 TPT.mutateType(ExtOpnd, Ext->getType()); 3867 // Step #2. 3868 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3869 // Step #3. 3870 Instruction *ExtForOpnd = Ext; 3871 3872 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3873 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3874 ++OpIdx) { 3875 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3876 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3877 !shouldExtOperand(ExtOpnd, OpIdx)) { 3878 DEBUG(dbgs() << "No need to propagate\n"); 3879 continue; 3880 } 3881 // Check if we can statically extend the operand. 3882 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3883 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3884 DEBUG(dbgs() << "Statically extend\n"); 3885 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3886 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3887 : Cst->getValue().zext(BitWidth); 3888 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3889 continue; 3890 } 3891 // UndefValue are typed, so we have to statically sign extend them. 3892 if (isa<UndefValue>(Opnd)) { 3893 DEBUG(dbgs() << "Statically extend\n"); 3894 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3895 continue; 3896 } 3897 3898 // Otherwise we have to explicity sign extend the operand. 3899 // Check if Ext was reused to extend an operand. 3900 if (!ExtForOpnd) { 3901 // If yes, create a new one. 3902 DEBUG(dbgs() << "More operands to ext\n"); 3903 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3904 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3905 if (!isa<Instruction>(ValForExtOpnd)) { 3906 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3907 continue; 3908 } 3909 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3910 } 3911 if (Exts) 3912 Exts->push_back(ExtForOpnd); 3913 TPT.setOperand(ExtForOpnd, 0, Opnd); 3914 3915 // Move the sign extension before the insertion point. 3916 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3917 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3918 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3919 // If more sext are required, new instructions will have to be created. 3920 ExtForOpnd = nullptr; 3921 } 3922 if (ExtForOpnd == Ext) { 3923 DEBUG(dbgs() << "Extension is useless now\n"); 3924 TPT.eraseInstruction(Ext); 3925 } 3926 return ExtOpnd; 3927 } 3928 3929 /// Check whether or not promoting an instruction to a wider type is profitable. 3930 /// \p NewCost gives the cost of extension instructions created by the 3931 /// promotion. 3932 /// \p OldCost gives the cost of extension instructions before the promotion 3933 /// plus the number of instructions that have been 3934 /// matched in the addressing mode the promotion. 3935 /// \p PromotedOperand is the value that has been promoted. 3936 /// \return True if the promotion is profitable, false otherwise. 3937 bool AddressingModeMatcher::isPromotionProfitable( 3938 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3939 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3940 // The cost of the new extensions is greater than the cost of the 3941 // old extension plus what we folded. 3942 // This is not profitable. 3943 if (NewCost > OldCost) 3944 return false; 3945 if (NewCost < OldCost) 3946 return true; 3947 // The promotion is neutral but it may help folding the sign extension in 3948 // loads for instance. 3949 // Check that we did not create an illegal instruction. 3950 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3951 } 3952 3953 /// Given an instruction or constant expr, see if we can fold the operation 3954 /// into the addressing mode. If so, update the addressing mode and return 3955 /// true, otherwise return false without modifying AddrMode. 3956 /// If \p MovedAway is not NULL, it contains the information of whether or 3957 /// not AddrInst has to be folded into the addressing mode on success. 3958 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3959 /// because it has been moved away. 3960 /// Thus AddrInst must not be added in the matched instructions. 3961 /// This state can happen when AddrInst is a sext, since it may be moved away. 3962 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3963 /// not be referenced anymore. 3964 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3965 unsigned Depth, 3966 bool *MovedAway) { 3967 // Avoid exponential behavior on extremely deep expression trees. 3968 if (Depth >= 5) return false; 3969 3970 // By default, all matched instructions stay in place. 3971 if (MovedAway) 3972 *MovedAway = false; 3973 3974 switch (Opcode) { 3975 case Instruction::PtrToInt: 3976 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3977 return matchAddr(AddrInst->getOperand(0), Depth); 3978 case Instruction::IntToPtr: { 3979 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3980 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3981 // This inttoptr is a no-op if the integer type is pointer sized. 3982 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3983 return matchAddr(AddrInst->getOperand(0), Depth); 3984 return false; 3985 } 3986 case Instruction::BitCast: 3987 // BitCast is always a noop, and we can handle it as long as it is 3988 // int->int or pointer->pointer (we don't want int<->fp or something). 3989 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3990 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3991 // Don't touch identity bitcasts. These were probably put here by LSR, 3992 // and we don't want to mess around with them. Assume it knows what it 3993 // is doing. 3994 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3995 return matchAddr(AddrInst->getOperand(0), Depth); 3996 return false; 3997 case Instruction::AddrSpaceCast: { 3998 unsigned SrcAS 3999 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4000 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4001 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 4002 return matchAddr(AddrInst->getOperand(0), Depth); 4003 return false; 4004 } 4005 case Instruction::Add: { 4006 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4007 ExtAddrMode BackupAddrMode = AddrMode; 4008 unsigned OldSize = AddrModeInsts.size(); 4009 // Start a transaction at this point. 4010 // The LHS may match but not the RHS. 4011 // Therefore, we need a higher level restoration point to undo partially 4012 // matched operation. 4013 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4014 TPT.getRestorationPoint(); 4015 4016 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4017 matchAddr(AddrInst->getOperand(0), Depth+1)) 4018 return true; 4019 4020 // Restore the old addr mode info. 4021 AddrMode = BackupAddrMode; 4022 AddrModeInsts.resize(OldSize); 4023 TPT.rollback(LastKnownGood); 4024 4025 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4026 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4027 matchAddr(AddrInst->getOperand(1), Depth+1)) 4028 return true; 4029 4030 // Otherwise we definitely can't merge the ADD in. 4031 AddrMode = BackupAddrMode; 4032 AddrModeInsts.resize(OldSize); 4033 TPT.rollback(LastKnownGood); 4034 break; 4035 } 4036 //case Instruction::Or: 4037 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4038 //break; 4039 case Instruction::Mul: 4040 case Instruction::Shl: { 4041 // Can only handle X*C and X << C. 4042 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4043 if (!RHS) 4044 return false; 4045 int64_t Scale = RHS->getSExtValue(); 4046 if (Opcode == Instruction::Shl) 4047 Scale = 1LL << Scale; 4048 4049 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4050 } 4051 case Instruction::GetElementPtr: { 4052 // Scan the GEP. We check it if it contains constant offsets and at most 4053 // one variable offset. 4054 int VariableOperand = -1; 4055 unsigned VariableScale = 0; 4056 4057 int64_t ConstantOffset = 0; 4058 gep_type_iterator GTI = gep_type_begin(AddrInst); 4059 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4060 if (StructType *STy = GTI.getStructTypeOrNull()) { 4061 const StructLayout *SL = DL.getStructLayout(STy); 4062 unsigned Idx = 4063 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4064 ConstantOffset += SL->getElementOffset(Idx); 4065 } else { 4066 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 4067 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4068 ConstantOffset += CI->getSExtValue()*TypeSize; 4069 } else if (TypeSize) { // Scales of zero don't do anything. 4070 // We only allow one variable index at the moment. 4071 if (VariableOperand != -1) 4072 return false; 4073 4074 // Remember the variable index. 4075 VariableOperand = i; 4076 VariableScale = TypeSize; 4077 } 4078 } 4079 } 4080 4081 // A common case is for the GEP to only do a constant offset. In this case, 4082 // just add it to the disp field and check validity. 4083 if (VariableOperand == -1) { 4084 AddrMode.BaseOffs += ConstantOffset; 4085 if (ConstantOffset == 0 || 4086 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4087 // Check to see if we can fold the base pointer in too. 4088 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 4089 return true; 4090 } 4091 AddrMode.BaseOffs -= ConstantOffset; 4092 return false; 4093 } 4094 4095 // Save the valid addressing mode in case we can't match. 4096 ExtAddrMode BackupAddrMode = AddrMode; 4097 unsigned OldSize = AddrModeInsts.size(); 4098 4099 // See if the scale and offset amount is valid for this target. 4100 AddrMode.BaseOffs += ConstantOffset; 4101 4102 // Match the base operand of the GEP. 4103 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4104 // If it couldn't be matched, just stuff the value in a register. 4105 if (AddrMode.HasBaseReg) { 4106 AddrMode = BackupAddrMode; 4107 AddrModeInsts.resize(OldSize); 4108 return false; 4109 } 4110 AddrMode.HasBaseReg = true; 4111 AddrMode.BaseReg = AddrInst->getOperand(0); 4112 } 4113 4114 // Match the remaining variable portion of the GEP. 4115 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4116 Depth)) { 4117 // If it couldn't be matched, try stuffing the base into a register 4118 // instead of matching it, and retrying the match of the scale. 4119 AddrMode = BackupAddrMode; 4120 AddrModeInsts.resize(OldSize); 4121 if (AddrMode.HasBaseReg) 4122 return false; 4123 AddrMode.HasBaseReg = true; 4124 AddrMode.BaseReg = AddrInst->getOperand(0); 4125 AddrMode.BaseOffs += ConstantOffset; 4126 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4127 VariableScale, Depth)) { 4128 // If even that didn't work, bail. 4129 AddrMode = BackupAddrMode; 4130 AddrModeInsts.resize(OldSize); 4131 return false; 4132 } 4133 } 4134 4135 return true; 4136 } 4137 case Instruction::SExt: 4138 case Instruction::ZExt: { 4139 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4140 if (!Ext) 4141 return false; 4142 4143 // Try to move this ext out of the way of the addressing mode. 4144 // Ask for a method for doing so. 4145 TypePromotionHelper::Action TPH = 4146 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4147 if (!TPH) 4148 return false; 4149 4150 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4151 TPT.getRestorationPoint(); 4152 unsigned CreatedInstsCost = 0; 4153 unsigned ExtCost = !TLI.isExtFree(Ext); 4154 Value *PromotedOperand = 4155 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4156 // SExt has been moved away. 4157 // Thus either it will be rematched later in the recursive calls or it is 4158 // gone. Anyway, we must not fold it into the addressing mode at this point. 4159 // E.g., 4160 // op = add opnd, 1 4161 // idx = ext op 4162 // addr = gep base, idx 4163 // is now: 4164 // promotedOpnd = ext opnd <- no match here 4165 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4166 // addr = gep base, op <- match 4167 if (MovedAway) 4168 *MovedAway = true; 4169 4170 assert(PromotedOperand && 4171 "TypePromotionHelper should have filtered out those cases"); 4172 4173 ExtAddrMode BackupAddrMode = AddrMode; 4174 unsigned OldSize = AddrModeInsts.size(); 4175 4176 if (!matchAddr(PromotedOperand, Depth) || 4177 // The total of the new cost is equal to the cost of the created 4178 // instructions. 4179 // The total of the old cost is equal to the cost of the extension plus 4180 // what we have saved in the addressing mode. 4181 !isPromotionProfitable(CreatedInstsCost, 4182 ExtCost + (AddrModeInsts.size() - OldSize), 4183 PromotedOperand)) { 4184 AddrMode = BackupAddrMode; 4185 AddrModeInsts.resize(OldSize); 4186 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4187 TPT.rollback(LastKnownGood); 4188 return false; 4189 } 4190 return true; 4191 } 4192 } 4193 return false; 4194 } 4195 4196 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4197 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4198 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4199 /// for the target. 4200 /// 4201 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4202 // Start a transaction at this point that we will rollback if the matching 4203 // fails. 4204 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4205 TPT.getRestorationPoint(); 4206 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4207 // Fold in immediates if legal for the target. 4208 AddrMode.BaseOffs += CI->getSExtValue(); 4209 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4210 return true; 4211 AddrMode.BaseOffs -= CI->getSExtValue(); 4212 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4213 // If this is a global variable, try to fold it into the addressing mode. 4214 if (!AddrMode.BaseGV) { 4215 AddrMode.BaseGV = GV; 4216 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4217 return true; 4218 AddrMode.BaseGV = nullptr; 4219 } 4220 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4221 ExtAddrMode BackupAddrMode = AddrMode; 4222 unsigned OldSize = AddrModeInsts.size(); 4223 4224 // Check to see if it is possible to fold this operation. 4225 bool MovedAway = false; 4226 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4227 // This instruction may have been moved away. If so, there is nothing 4228 // to check here. 4229 if (MovedAway) 4230 return true; 4231 // Okay, it's possible to fold this. Check to see if it is actually 4232 // *profitable* to do so. We use a simple cost model to avoid increasing 4233 // register pressure too much. 4234 if (I->hasOneUse() || 4235 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4236 AddrModeInsts.push_back(I); 4237 return true; 4238 } 4239 4240 // It isn't profitable to do this, roll back. 4241 //cerr << "NOT FOLDING: " << *I; 4242 AddrMode = BackupAddrMode; 4243 AddrModeInsts.resize(OldSize); 4244 TPT.rollback(LastKnownGood); 4245 } 4246 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4247 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4248 return true; 4249 TPT.rollback(LastKnownGood); 4250 } else if (isa<ConstantPointerNull>(Addr)) { 4251 // Null pointer gets folded without affecting the addressing mode. 4252 return true; 4253 } 4254 4255 // Worse case, the target should support [reg] addressing modes. :) 4256 if (!AddrMode.HasBaseReg) { 4257 AddrMode.HasBaseReg = true; 4258 AddrMode.BaseReg = Addr; 4259 // Still check for legality in case the target supports [imm] but not [i+r]. 4260 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4261 return true; 4262 AddrMode.HasBaseReg = false; 4263 AddrMode.BaseReg = nullptr; 4264 } 4265 4266 // If the base register is already taken, see if we can do [r+r]. 4267 if (AddrMode.Scale == 0) { 4268 AddrMode.Scale = 1; 4269 AddrMode.ScaledReg = Addr; 4270 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4271 return true; 4272 AddrMode.Scale = 0; 4273 AddrMode.ScaledReg = nullptr; 4274 } 4275 // Couldn't match. 4276 TPT.rollback(LastKnownGood); 4277 return false; 4278 } 4279 4280 /// Check to see if all uses of OpVal by the specified inline asm call are due 4281 /// to memory operands. If so, return true, otherwise return false. 4282 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4283 const TargetLowering &TLI, 4284 const TargetRegisterInfo &TRI) { 4285 const Function *F = CI->getFunction(); 4286 TargetLowering::AsmOperandInfoVector TargetConstraints = 4287 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 4288 ImmutableCallSite(CI)); 4289 4290 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4291 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4292 4293 // Compute the constraint code and ConstraintType to use. 4294 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4295 4296 // If this asm operand is our Value*, and if it isn't an indirect memory 4297 // operand, we can't fold it! 4298 if (OpInfo.CallOperandVal == OpVal && 4299 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4300 !OpInfo.isIndirect)) 4301 return false; 4302 } 4303 4304 return true; 4305 } 4306 4307 // Max number of memory uses to look at before aborting the search to conserve 4308 // compile time. 4309 static constexpr int MaxMemoryUsesToScan = 20; 4310 4311 /// Recursively walk all the uses of I until we find a memory use. 4312 /// If we find an obviously non-foldable instruction, return true. 4313 /// Add the ultimately found memory instructions to MemoryUses. 4314 static bool FindAllMemoryUses( 4315 Instruction *I, 4316 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4317 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4318 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 4319 // If we already considered this instruction, we're done. 4320 if (!ConsideredInsts.insert(I).second) 4321 return false; 4322 4323 // If this is an obviously unfoldable instruction, bail out. 4324 if (!MightBeFoldableInst(I)) 4325 return true; 4326 4327 const bool OptSize = I->getFunction()->optForSize(); 4328 4329 // Loop over all the uses, recursively processing them. 4330 for (Use &U : I->uses()) { 4331 // Conservatively return true if we're seeing a large number or a deep chain 4332 // of users. This avoids excessive compilation times in pathological cases. 4333 if (SeenInsts++ >= MaxMemoryUsesToScan) 4334 return true; 4335 4336 Instruction *UserI = cast<Instruction>(U.getUser()); 4337 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4338 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4339 continue; 4340 } 4341 4342 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4343 unsigned opNo = U.getOperandNo(); 4344 if (opNo != StoreInst::getPointerOperandIndex()) 4345 return true; // Storing addr, not into addr. 4346 MemoryUses.push_back(std::make_pair(SI, opNo)); 4347 continue; 4348 } 4349 4350 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4351 unsigned opNo = U.getOperandNo(); 4352 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4353 return true; // Storing addr, not into addr. 4354 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4355 continue; 4356 } 4357 4358 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4359 unsigned opNo = U.getOperandNo(); 4360 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4361 return true; // Storing addr, not into addr. 4362 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4363 continue; 4364 } 4365 4366 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4367 // If this is a cold call, we can sink the addressing calculation into 4368 // the cold path. See optimizeCallInst 4369 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4370 continue; 4371 4372 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4373 if (!IA) return true; 4374 4375 // If this is a memory operand, we're cool, otherwise bail out. 4376 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4377 return true; 4378 continue; 4379 } 4380 4381 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4382 SeenInsts)) 4383 return true; 4384 } 4385 4386 return false; 4387 } 4388 4389 /// Return true if Val is already known to be live at the use site that we're 4390 /// folding it into. If so, there is no cost to include it in the addressing 4391 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4392 /// instruction already. 4393 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4394 Value *KnownLive2) { 4395 // If Val is either of the known-live values, we know it is live! 4396 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4397 return true; 4398 4399 // All values other than instructions and arguments (e.g. constants) are live. 4400 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4401 4402 // If Val is a constant sized alloca in the entry block, it is live, this is 4403 // true because it is just a reference to the stack/frame pointer, which is 4404 // live for the whole function. 4405 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4406 if (AI->isStaticAlloca()) 4407 return true; 4408 4409 // Check to see if this value is already used in the memory instruction's 4410 // block. If so, it's already live into the block at the very least, so we 4411 // can reasonably fold it. 4412 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4413 } 4414 4415 /// It is possible for the addressing mode of the machine to fold the specified 4416 /// instruction into a load or store that ultimately uses it. 4417 /// However, the specified instruction has multiple uses. 4418 /// Given this, it may actually increase register pressure to fold it 4419 /// into the load. For example, consider this code: 4420 /// 4421 /// X = ... 4422 /// Y = X+1 4423 /// use(Y) -> nonload/store 4424 /// Z = Y+1 4425 /// load Z 4426 /// 4427 /// In this case, Y has multiple uses, and can be folded into the load of Z 4428 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4429 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4430 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4431 /// number of computations either. 4432 /// 4433 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4434 /// X was live across 'load Z' for other reasons, we actually *would* want to 4435 /// fold the addressing mode in the Z case. This would make Y die earlier. 4436 bool AddressingModeMatcher:: 4437 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4438 ExtAddrMode &AMAfter) { 4439 if (IgnoreProfitability) return true; 4440 4441 // AMBefore is the addressing mode before this instruction was folded into it, 4442 // and AMAfter is the addressing mode after the instruction was folded. Get 4443 // the set of registers referenced by AMAfter and subtract out those 4444 // referenced by AMBefore: this is the set of values which folding in this 4445 // address extends the lifetime of. 4446 // 4447 // Note that there are only two potential values being referenced here, 4448 // BaseReg and ScaleReg (global addresses are always available, as are any 4449 // folded immediates). 4450 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4451 4452 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4453 // lifetime wasn't extended by adding this instruction. 4454 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4455 BaseReg = nullptr; 4456 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4457 ScaledReg = nullptr; 4458 4459 // If folding this instruction (and it's subexprs) didn't extend any live 4460 // ranges, we're ok with it. 4461 if (!BaseReg && !ScaledReg) 4462 return true; 4463 4464 // If all uses of this instruction can have the address mode sunk into them, 4465 // we can remove the addressing mode and effectively trade one live register 4466 // for another (at worst.) In this context, folding an addressing mode into 4467 // the use is just a particularly nice way of sinking it. 4468 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4469 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4470 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4471 return false; // Has a non-memory, non-foldable use! 4472 4473 // Now that we know that all uses of this instruction are part of a chain of 4474 // computation involving only operations that could theoretically be folded 4475 // into a memory use, loop over each of these memory operation uses and see 4476 // if they could *actually* fold the instruction. The assumption is that 4477 // addressing modes are cheap and that duplicating the computation involved 4478 // many times is worthwhile, even on a fastpath. For sinking candidates 4479 // (i.e. cold call sites), this serves as a way to prevent excessive code 4480 // growth since most architectures have some reasonable small and fast way to 4481 // compute an effective address. (i.e LEA on x86) 4482 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4483 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4484 Instruction *User = MemoryUses[i].first; 4485 unsigned OpNo = MemoryUses[i].second; 4486 4487 // Get the access type of this use. If the use isn't a pointer, we don't 4488 // know what it accesses. 4489 Value *Address = User->getOperand(OpNo); 4490 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4491 if (!AddrTy) 4492 return false; 4493 Type *AddressAccessTy = AddrTy->getElementType(); 4494 unsigned AS = AddrTy->getAddressSpace(); 4495 4496 // Do a match against the root of this address, ignoring profitability. This 4497 // will tell us if the addressing mode for the memory operation will 4498 // *actually* cover the shared instruction. 4499 ExtAddrMode Result; 4500 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4501 TPT.getRestorationPoint(); 4502 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, 4503 AddressAccessTy, AS, 4504 MemoryInst, Result, InsertedInsts, 4505 PromotedInsts, TPT); 4506 Matcher.IgnoreProfitability = true; 4507 bool Success = Matcher.matchAddr(Address, 0); 4508 (void)Success; assert(Success && "Couldn't select *anything*?"); 4509 4510 // The match was to check the profitability, the changes made are not 4511 // part of the original matcher. Therefore, they should be dropped 4512 // otherwise the original matcher will not present the right state. 4513 TPT.rollback(LastKnownGood); 4514 4515 // If the match didn't cover I, then it won't be shared by it. 4516 if (!is_contained(MatchedAddrModeInsts, I)) 4517 return false; 4518 4519 MatchedAddrModeInsts.clear(); 4520 } 4521 4522 return true; 4523 } 4524 4525 /// Return true if the specified values are defined in a 4526 /// different basic block than BB. 4527 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4528 if (Instruction *I = dyn_cast<Instruction>(V)) 4529 return I->getParent() != BB; 4530 return false; 4531 } 4532 4533 /// Sink addressing mode computation immediate before MemoryInst if doing so 4534 /// can be done without increasing register pressure. The need for the 4535 /// register pressure constraint means this can end up being an all or nothing 4536 /// decision for all uses of the same addressing computation. 4537 /// 4538 /// Load and Store Instructions often have addressing modes that can do 4539 /// significant amounts of computation. As such, instruction selection will try 4540 /// to get the load or store to do as much computation as possible for the 4541 /// program. The problem is that isel can only see within a single block. As 4542 /// such, we sink as much legal addressing mode work into the block as possible. 4543 /// 4544 /// This method is used to optimize both load/store and inline asms with memory 4545 /// operands. It's also used to sink addressing computations feeding into cold 4546 /// call sites into their (cold) basic block. 4547 /// 4548 /// The motivation for handling sinking into cold blocks is that doing so can 4549 /// both enable other address mode sinking (by satisfying the register pressure 4550 /// constraint above), and reduce register pressure globally (by removing the 4551 /// addressing mode computation from the fast path entirely.). 4552 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4553 Type *AccessTy, unsigned AddrSpace) { 4554 Value *Repl = Addr; 4555 4556 // Try to collapse single-value PHI nodes. This is necessary to undo 4557 // unprofitable PRE transformations. 4558 SmallVector<Value*, 8> worklist; 4559 SmallPtrSet<Value*, 16> Visited; 4560 worklist.push_back(Addr); 4561 4562 // Use a worklist to iteratively look through PHI and select nodes, and 4563 // ensure that the addressing mode obtained from the non-PHI/select roots of 4564 // the graph are compatible. 4565 bool PhiOrSelectSeen = false; 4566 SmallVector<Instruction*, 16> AddrModeInsts; 4567 AddressingModeCombiner AddrModes; 4568 TypePromotionTransaction TPT(RemovedInsts); 4569 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4570 TPT.getRestorationPoint(); 4571 while (!worklist.empty()) { 4572 Value *V = worklist.back(); 4573 worklist.pop_back(); 4574 4575 // We allow traversing cyclic Phi nodes. 4576 // In case of success after this loop we ensure that traversing through 4577 // Phi nodes ends up with all cases to compute address of the form 4578 // BaseGV + Base + Scale * Index + Offset 4579 // where Scale and Offset are constans and BaseGV, Base and Index 4580 // are exactly the same Values in all cases. 4581 // It means that BaseGV, Scale and Offset dominate our memory instruction 4582 // and have the same value as they had in address computation represented 4583 // as Phi. So we can safely sink address computation to memory instruction. 4584 if (!Visited.insert(V).second) 4585 continue; 4586 4587 // For a PHI node, push all of its incoming values. 4588 if (PHINode *P = dyn_cast<PHINode>(V)) { 4589 for (Value *IncValue : P->incoming_values()) 4590 worklist.push_back(IncValue); 4591 PhiOrSelectSeen = true; 4592 continue; 4593 } 4594 // Similar for select. 4595 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4596 worklist.push_back(SI->getFalseValue()); 4597 worklist.push_back(SI->getTrueValue()); 4598 PhiOrSelectSeen = true; 4599 continue; 4600 } 4601 4602 // For non-PHIs, determine the addressing mode being computed. Note that 4603 // the result may differ depending on what other uses our candidate 4604 // addressing instructions might have. 4605 AddrModeInsts.clear(); 4606 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4607 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4608 InsertedInsts, PromotedInsts, TPT); 4609 NewAddrMode.OriginalValue = V; 4610 4611 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4612 break; 4613 } 4614 4615 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4616 // or we have multiple but either couldn't combine them or combining them 4617 // wouldn't do anything useful, bail out now. 4618 if (!AddrModes.combineAddrModes()) { 4619 TPT.rollback(LastKnownGood); 4620 return false; 4621 } 4622 TPT.commit(); 4623 4624 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4625 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4626 4627 // If all the instructions matched are already in this BB, don't do anything. 4628 // If we saw a Phi node then it is not local definitely, and if we saw a select 4629 // then we want to push the address calculation past it even if it's already 4630 // in this BB. 4631 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4632 return IsNonLocalValue(V, MemoryInst->getParent()); 4633 })) { 4634 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 4635 return false; 4636 } 4637 4638 // Insert this computation right after this user. Since our caller is 4639 // scanning from the top of the BB to the bottom, reuse of the expr are 4640 // guaranteed to happen later. 4641 IRBuilder<> Builder(MemoryInst); 4642 4643 // Now that we determined the addressing expression we want to use and know 4644 // that we have to sink it into this block. Check to see if we have already 4645 // done this for some other load/store instr in this block. If so, reuse the 4646 // computation. 4647 Value *&SunkAddr = SunkAddrs[Addr]; 4648 if (SunkAddr) { 4649 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 4650 << *MemoryInst << "\n"); 4651 if (SunkAddr->getType() != Addr->getType()) 4652 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4653 } else if (AddrSinkUsingGEPs || 4654 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 4655 SubtargetInfo->useAA())) { 4656 // By default, we use the GEP-based method when AA is used later. This 4657 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4658 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4659 << *MemoryInst << "\n"); 4660 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4661 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4662 4663 // First, find the pointer. 4664 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4665 ResultPtr = AddrMode.BaseReg; 4666 AddrMode.BaseReg = nullptr; 4667 } 4668 4669 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4670 // We can't add more than one pointer together, nor can we scale a 4671 // pointer (both of which seem meaningless). 4672 if (ResultPtr || AddrMode.Scale != 1) 4673 return false; 4674 4675 ResultPtr = AddrMode.ScaledReg; 4676 AddrMode.Scale = 0; 4677 } 4678 4679 // It is only safe to sign extend the BaseReg if we know that the math 4680 // required to create it did not overflow before we extend it. Since 4681 // the original IR value was tossed in favor of a constant back when 4682 // the AddrMode was created we need to bail out gracefully if widths 4683 // do not match instead of extending it. 4684 // 4685 // (See below for code to add the scale.) 4686 if (AddrMode.Scale) { 4687 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4688 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4689 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4690 return false; 4691 } 4692 4693 if (AddrMode.BaseGV) { 4694 if (ResultPtr) 4695 return false; 4696 4697 ResultPtr = AddrMode.BaseGV; 4698 } 4699 4700 // If the real base value actually came from an inttoptr, then the matcher 4701 // will look through it and provide only the integer value. In that case, 4702 // use it here. 4703 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4704 if (!ResultPtr && AddrMode.BaseReg) { 4705 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4706 "sunkaddr"); 4707 AddrMode.BaseReg = nullptr; 4708 } else if (!ResultPtr && AddrMode.Scale == 1) { 4709 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4710 "sunkaddr"); 4711 AddrMode.Scale = 0; 4712 } 4713 } 4714 4715 if (!ResultPtr && 4716 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4717 SunkAddr = Constant::getNullValue(Addr->getType()); 4718 } else if (!ResultPtr) { 4719 return false; 4720 } else { 4721 Type *I8PtrTy = 4722 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4723 Type *I8Ty = Builder.getInt8Ty(); 4724 4725 // Start with the base register. Do this first so that subsequent address 4726 // matching finds it last, which will prevent it from trying to match it 4727 // as the scaled value in case it happens to be a mul. That would be 4728 // problematic if we've sunk a different mul for the scale, because then 4729 // we'd end up sinking both muls. 4730 if (AddrMode.BaseReg) { 4731 Value *V = AddrMode.BaseReg; 4732 if (V->getType() != IntPtrTy) 4733 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4734 4735 ResultIndex = V; 4736 } 4737 4738 // Add the scale value. 4739 if (AddrMode.Scale) { 4740 Value *V = AddrMode.ScaledReg; 4741 if (V->getType() == IntPtrTy) { 4742 // done. 4743 } else { 4744 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4745 cast<IntegerType>(V->getType())->getBitWidth() && 4746 "We can't transform if ScaledReg is too narrow"); 4747 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4748 } 4749 4750 if (AddrMode.Scale != 1) 4751 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4752 "sunkaddr"); 4753 if (ResultIndex) 4754 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4755 else 4756 ResultIndex = V; 4757 } 4758 4759 // Add in the Base Offset if present. 4760 if (AddrMode.BaseOffs) { 4761 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4762 if (ResultIndex) { 4763 // We need to add this separately from the scale above to help with 4764 // SDAG consecutive load/store merging. 4765 if (ResultPtr->getType() != I8PtrTy) 4766 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4767 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4768 } 4769 4770 ResultIndex = V; 4771 } 4772 4773 if (!ResultIndex) { 4774 SunkAddr = ResultPtr; 4775 } else { 4776 if (ResultPtr->getType() != I8PtrTy) 4777 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4778 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4779 } 4780 4781 if (SunkAddr->getType() != Addr->getType()) 4782 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4783 } 4784 } else { 4785 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4786 // non-integral pointers, so in that case bail out now. 4787 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4788 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4789 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4790 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4791 if (DL->isNonIntegralPointerType(Addr->getType()) || 4792 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4793 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4794 (AddrMode.BaseGV && 4795 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4796 return false; 4797 4798 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4799 << *MemoryInst << "\n"); 4800 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4801 Value *Result = nullptr; 4802 4803 // Start with the base register. Do this first so that subsequent address 4804 // matching finds it last, which will prevent it from trying to match it 4805 // as the scaled value in case it happens to be a mul. That would be 4806 // problematic if we've sunk a different mul for the scale, because then 4807 // we'd end up sinking both muls. 4808 if (AddrMode.BaseReg) { 4809 Value *V = AddrMode.BaseReg; 4810 if (V->getType()->isPointerTy()) 4811 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4812 if (V->getType() != IntPtrTy) 4813 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4814 Result = V; 4815 } 4816 4817 // Add the scale value. 4818 if (AddrMode.Scale) { 4819 Value *V = AddrMode.ScaledReg; 4820 if (V->getType() == IntPtrTy) { 4821 // done. 4822 } else if (V->getType()->isPointerTy()) { 4823 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4824 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4825 cast<IntegerType>(V->getType())->getBitWidth()) { 4826 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4827 } else { 4828 // It is only safe to sign extend the BaseReg if we know that the math 4829 // required to create it did not overflow before we extend it. Since 4830 // the original IR value was tossed in favor of a constant back when 4831 // the AddrMode was created we need to bail out gracefully if widths 4832 // do not match instead of extending it. 4833 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4834 if (I && (Result != AddrMode.BaseReg)) 4835 I->eraseFromParent(); 4836 return false; 4837 } 4838 if (AddrMode.Scale != 1) 4839 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4840 "sunkaddr"); 4841 if (Result) 4842 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4843 else 4844 Result = V; 4845 } 4846 4847 // Add in the BaseGV if present. 4848 if (AddrMode.BaseGV) { 4849 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4850 if (Result) 4851 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4852 else 4853 Result = V; 4854 } 4855 4856 // Add in the Base Offset if present. 4857 if (AddrMode.BaseOffs) { 4858 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4859 if (Result) 4860 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4861 else 4862 Result = V; 4863 } 4864 4865 if (!Result) 4866 SunkAddr = Constant::getNullValue(Addr->getType()); 4867 else 4868 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4869 } 4870 4871 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4872 4873 // If we have no uses, recursively delete the value and all dead instructions 4874 // using it. 4875 if (Repl->use_empty()) { 4876 // This can cause recursive deletion, which can invalidate our iterator. 4877 // Use a WeakTrackingVH to hold onto it in case this happens. 4878 Value *CurValue = &*CurInstIterator; 4879 WeakTrackingVH IterHandle(CurValue); 4880 BasicBlock *BB = CurInstIterator->getParent(); 4881 4882 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4883 4884 if (IterHandle != CurValue) { 4885 // If the iterator instruction was recursively deleted, start over at the 4886 // start of the block. 4887 CurInstIterator = BB->begin(); 4888 SunkAddrs.clear(); 4889 } 4890 } 4891 ++NumMemoryInsts; 4892 return true; 4893 } 4894 4895 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4896 /// address computing into the block when possible / profitable. 4897 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4898 bool MadeChange = false; 4899 4900 const TargetRegisterInfo *TRI = 4901 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 4902 TargetLowering::AsmOperandInfoVector TargetConstraints = 4903 TLI->ParseConstraints(*DL, TRI, CS); 4904 unsigned ArgNo = 0; 4905 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4906 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4907 4908 // Compute the constraint code and ConstraintType to use. 4909 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4910 4911 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4912 OpInfo.isIndirect) { 4913 Value *OpVal = CS->getArgOperand(ArgNo++); 4914 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4915 } else if (OpInfo.Type == InlineAsm::isInput) 4916 ArgNo++; 4917 } 4918 4919 return MadeChange; 4920 } 4921 4922 /// \brief Check if all the uses of \p Val are equivalent (or free) zero or 4923 /// sign extensions. 4924 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4925 assert(!Val->use_empty() && "Input must have at least one use"); 4926 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4927 bool IsSExt = isa<SExtInst>(FirstUser); 4928 Type *ExtTy = FirstUser->getType(); 4929 for (const User *U : Val->users()) { 4930 const Instruction *UI = cast<Instruction>(U); 4931 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4932 return false; 4933 Type *CurTy = UI->getType(); 4934 // Same input and output types: Same instruction after CSE. 4935 if (CurTy == ExtTy) 4936 continue; 4937 4938 // If IsSExt is true, we are in this situation: 4939 // a = Val 4940 // b = sext ty1 a to ty2 4941 // c = sext ty1 a to ty3 4942 // Assuming ty2 is shorter than ty3, this could be turned into: 4943 // a = Val 4944 // b = sext ty1 a to ty2 4945 // c = sext ty2 b to ty3 4946 // However, the last sext is not free. 4947 if (IsSExt) 4948 return false; 4949 4950 // This is a ZExt, maybe this is free to extend from one type to another. 4951 // In that case, we would not account for a different use. 4952 Type *NarrowTy; 4953 Type *LargeTy; 4954 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4955 CurTy->getScalarType()->getIntegerBitWidth()) { 4956 NarrowTy = CurTy; 4957 LargeTy = ExtTy; 4958 } else { 4959 NarrowTy = ExtTy; 4960 LargeTy = CurTy; 4961 } 4962 4963 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4964 return false; 4965 } 4966 // All uses are the same or can be derived from one another for free. 4967 return true; 4968 } 4969 4970 /// \brief Try to speculatively promote extensions in \p Exts and continue 4971 /// promoting through newly promoted operands recursively as far as doing so is 4972 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 4973 /// When some promotion happened, \p TPT contains the proper state to revert 4974 /// them. 4975 /// 4976 /// \return true if some promotion happened, false otherwise. 4977 bool CodeGenPrepare::tryToPromoteExts( 4978 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 4979 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 4980 unsigned CreatedInstsCost) { 4981 bool Promoted = false; 4982 4983 // Iterate over all the extensions to try to promote them. 4984 for (auto I : Exts) { 4985 // Early check if we directly have ext(load). 4986 if (isa<LoadInst>(I->getOperand(0))) { 4987 ProfitablyMovedExts.push_back(I); 4988 continue; 4989 } 4990 4991 // Check whether or not we want to do any promotion. The reason we have 4992 // this check inside the for loop is to catch the case where an extension 4993 // is directly fed by a load because in such case the extension can be moved 4994 // up without any promotion on its operands. 4995 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4996 return false; 4997 4998 // Get the action to perform the promotion. 4999 TypePromotionHelper::Action TPH = 5000 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5001 // Check if we can promote. 5002 if (!TPH) { 5003 // Save the current extension as we cannot move up through its operand. 5004 ProfitablyMovedExts.push_back(I); 5005 continue; 5006 } 5007 5008 // Save the current state. 5009 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5010 TPT.getRestorationPoint(); 5011 SmallVector<Instruction *, 4> NewExts; 5012 unsigned NewCreatedInstsCost = 0; 5013 unsigned ExtCost = !TLI->isExtFree(I); 5014 // Promote. 5015 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5016 &NewExts, nullptr, *TLI); 5017 assert(PromotedVal && 5018 "TypePromotionHelper should have filtered out those cases"); 5019 5020 // We would be able to merge only one extension in a load. 5021 // Therefore, if we have more than 1 new extension we heuristically 5022 // cut this search path, because it means we degrade the code quality. 5023 // With exactly 2, the transformation is neutral, because we will merge 5024 // one extension but leave one. However, we optimistically keep going, 5025 // because the new extension may be removed too. 5026 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5027 // FIXME: It would be possible to propagate a negative value instead of 5028 // conservatively ceiling it to 0. 5029 TotalCreatedInstsCost = 5030 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5031 if (!StressExtLdPromotion && 5032 (TotalCreatedInstsCost > 1 || 5033 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5034 // This promotion is not profitable, rollback to the previous state, and 5035 // save the current extension in ProfitablyMovedExts as the latest 5036 // speculative promotion turned out to be unprofitable. 5037 TPT.rollback(LastKnownGood); 5038 ProfitablyMovedExts.push_back(I); 5039 continue; 5040 } 5041 // Continue promoting NewExts as far as doing so is profitable. 5042 SmallVector<Instruction *, 2> NewlyMovedExts; 5043 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5044 bool NewPromoted = false; 5045 for (auto ExtInst : NewlyMovedExts) { 5046 Instruction *MovedExt = cast<Instruction>(ExtInst); 5047 Value *ExtOperand = MovedExt->getOperand(0); 5048 // If we have reached to a load, we need this extra profitability check 5049 // as it could potentially be merged into an ext(load). 5050 if (isa<LoadInst>(ExtOperand) && 5051 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5052 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5053 continue; 5054 5055 ProfitablyMovedExts.push_back(MovedExt); 5056 NewPromoted = true; 5057 } 5058 5059 // If none of speculative promotions for NewExts is profitable, rollback 5060 // and save the current extension (I) as the last profitable extension. 5061 if (!NewPromoted) { 5062 TPT.rollback(LastKnownGood); 5063 ProfitablyMovedExts.push_back(I); 5064 continue; 5065 } 5066 // The promotion is profitable. 5067 Promoted = true; 5068 } 5069 return Promoted; 5070 } 5071 5072 /// Merging redundant sexts when one is dominating the other. 5073 bool CodeGenPrepare::mergeSExts(Function &F) { 5074 DominatorTree DT(F); 5075 bool Changed = false; 5076 for (auto &Entry : ValToSExtendedUses) { 5077 SExts &Insts = Entry.second; 5078 SExts CurPts; 5079 for (Instruction *Inst : Insts) { 5080 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5081 Inst->getOperand(0) != Entry.first) 5082 continue; 5083 bool inserted = false; 5084 for (auto &Pt : CurPts) { 5085 if (DT.dominates(Inst, Pt)) { 5086 Pt->replaceAllUsesWith(Inst); 5087 RemovedInsts.insert(Pt); 5088 Pt->removeFromParent(); 5089 Pt = Inst; 5090 inserted = true; 5091 Changed = true; 5092 break; 5093 } 5094 if (!DT.dominates(Pt, Inst)) 5095 // Give up if we need to merge in a common dominator as the 5096 // expermients show it is not profitable. 5097 continue; 5098 Inst->replaceAllUsesWith(Pt); 5099 RemovedInsts.insert(Inst); 5100 Inst->removeFromParent(); 5101 inserted = true; 5102 Changed = true; 5103 break; 5104 } 5105 if (!inserted) 5106 CurPts.push_back(Inst); 5107 } 5108 } 5109 return Changed; 5110 } 5111 5112 /// Return true, if an ext(load) can be formed from an extension in 5113 /// \p MovedExts. 5114 bool CodeGenPrepare::canFormExtLd( 5115 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5116 Instruction *&Inst, bool HasPromoted) { 5117 for (auto *MovedExtInst : MovedExts) { 5118 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5119 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5120 Inst = MovedExtInst; 5121 break; 5122 } 5123 } 5124 if (!LI) 5125 return false; 5126 5127 // If they're already in the same block, there's nothing to do. 5128 // Make the cheap checks first if we did not promote. 5129 // If we promoted, we need to check if it is indeed profitable. 5130 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5131 return false; 5132 5133 return TLI->isExtLoad(LI, Inst, *DL); 5134 } 5135 5136 /// Move a zext or sext fed by a load into the same basic block as the load, 5137 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5138 /// extend into the load. 5139 /// 5140 /// E.g., 5141 /// \code 5142 /// %ld = load i32* %addr 5143 /// %add = add nuw i32 %ld, 4 5144 /// %zext = zext i32 %add to i64 5145 // \endcode 5146 /// => 5147 /// \code 5148 /// %ld = load i32* %addr 5149 /// %zext = zext i32 %ld to i64 5150 /// %add = add nuw i64 %zext, 4 5151 /// \encode 5152 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5153 /// allow us to match zext(load i32*) to i64. 5154 /// 5155 /// Also, try to promote the computations used to obtain a sign extended 5156 /// value used into memory accesses. 5157 /// E.g., 5158 /// \code 5159 /// a = add nsw i32 b, 3 5160 /// d = sext i32 a to i64 5161 /// e = getelementptr ..., i64 d 5162 /// \endcode 5163 /// => 5164 /// \code 5165 /// f = sext i32 b to i64 5166 /// a = add nsw i64 f, 3 5167 /// e = getelementptr ..., i64 a 5168 /// \endcode 5169 /// 5170 /// \p Inst[in/out] the extension may be modified during the process if some 5171 /// promotions apply. 5172 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5173 // ExtLoad formation and address type promotion infrastructure requires TLI to 5174 // be effective. 5175 if (!TLI) 5176 return false; 5177 5178 bool AllowPromotionWithoutCommonHeader = false; 5179 /// See if it is an interesting sext operations for the address type 5180 /// promotion before trying to promote it, e.g., the ones with the right 5181 /// type and used in memory accesses. 5182 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5183 *Inst, AllowPromotionWithoutCommonHeader); 5184 TypePromotionTransaction TPT(RemovedInsts); 5185 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5186 TPT.getRestorationPoint(); 5187 SmallVector<Instruction *, 1> Exts; 5188 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5189 Exts.push_back(Inst); 5190 5191 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5192 5193 // Look for a load being extended. 5194 LoadInst *LI = nullptr; 5195 Instruction *ExtFedByLoad; 5196 5197 // Try to promote a chain of computation if it allows to form an extended 5198 // load. 5199 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 5200 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 5201 TPT.commit(); 5202 // Move the extend into the same block as the load 5203 ExtFedByLoad->moveAfter(LI); 5204 // CGP does not check if the zext would be speculatively executed when moved 5205 // to the same basic block as the load. Preserving its original location 5206 // would pessimize the debugging experience, as well as negatively impact 5207 // the quality of sample pgo. We don't want to use "line 0" as that has a 5208 // size cost in the line-table section and logically the zext can be seen as 5209 // part of the load. Therefore we conservatively reuse the same debug 5210 // location for the load and the zext. 5211 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 5212 ++NumExtsMoved; 5213 Inst = ExtFedByLoad; 5214 return true; 5215 } 5216 5217 // Continue promoting SExts if known as considerable depending on targets. 5218 if (ATPConsiderable && 5219 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 5220 HasPromoted, TPT, SpeculativelyMovedExts)) 5221 return true; 5222 5223 TPT.rollback(LastKnownGood); 5224 return false; 5225 } 5226 5227 // Perform address type promotion if doing so is profitable. 5228 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 5229 // instructions that sign extended the same initial value. However, if 5230 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 5231 // extension is just profitable. 5232 bool CodeGenPrepare::performAddressTypePromotion( 5233 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 5234 bool HasPromoted, TypePromotionTransaction &TPT, 5235 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 5236 bool Promoted = false; 5237 SmallPtrSet<Instruction *, 1> UnhandledExts; 5238 bool AllSeenFirst = true; 5239 for (auto I : SpeculativelyMovedExts) { 5240 Value *HeadOfChain = I->getOperand(0); 5241 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 5242 SeenChainsForSExt.find(HeadOfChain); 5243 // If there is an unhandled SExt which has the same header, try to promote 5244 // it as well. 5245 if (AlreadySeen != SeenChainsForSExt.end()) { 5246 if (AlreadySeen->second != nullptr) 5247 UnhandledExts.insert(AlreadySeen->second); 5248 AllSeenFirst = false; 5249 } 5250 } 5251 5252 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 5253 SpeculativelyMovedExts.size() == 1)) { 5254 TPT.commit(); 5255 if (HasPromoted) 5256 Promoted = true; 5257 for (auto I : SpeculativelyMovedExts) { 5258 Value *HeadOfChain = I->getOperand(0); 5259 SeenChainsForSExt[HeadOfChain] = nullptr; 5260 ValToSExtendedUses[HeadOfChain].push_back(I); 5261 } 5262 // Update Inst as promotion happen. 5263 Inst = SpeculativelyMovedExts.pop_back_val(); 5264 } else { 5265 // This is the first chain visited from the header, keep the current chain 5266 // as unhandled. Defer to promote this until we encounter another SExt 5267 // chain derived from the same header. 5268 for (auto I : SpeculativelyMovedExts) { 5269 Value *HeadOfChain = I->getOperand(0); 5270 SeenChainsForSExt[HeadOfChain] = Inst; 5271 } 5272 return false; 5273 } 5274 5275 if (!AllSeenFirst && !UnhandledExts.empty()) 5276 for (auto VisitedSExt : UnhandledExts) { 5277 if (RemovedInsts.count(VisitedSExt)) 5278 continue; 5279 TypePromotionTransaction TPT(RemovedInsts); 5280 SmallVector<Instruction *, 1> Exts; 5281 SmallVector<Instruction *, 2> Chains; 5282 Exts.push_back(VisitedSExt); 5283 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 5284 TPT.commit(); 5285 if (HasPromoted) 5286 Promoted = true; 5287 for (auto I : Chains) { 5288 Value *HeadOfChain = I->getOperand(0); 5289 // Mark this as handled. 5290 SeenChainsForSExt[HeadOfChain] = nullptr; 5291 ValToSExtendedUses[HeadOfChain].push_back(I); 5292 } 5293 } 5294 return Promoted; 5295 } 5296 5297 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 5298 BasicBlock *DefBB = I->getParent(); 5299 5300 // If the result of a {s|z}ext and its source are both live out, rewrite all 5301 // other uses of the source with result of extension. 5302 Value *Src = I->getOperand(0); 5303 if (Src->hasOneUse()) 5304 return false; 5305 5306 // Only do this xform if truncating is free. 5307 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 5308 return false; 5309 5310 // Only safe to perform the optimization if the source is also defined in 5311 // this block. 5312 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 5313 return false; 5314 5315 bool DefIsLiveOut = false; 5316 for (User *U : I->users()) { 5317 Instruction *UI = cast<Instruction>(U); 5318 5319 // Figure out which BB this ext is used in. 5320 BasicBlock *UserBB = UI->getParent(); 5321 if (UserBB == DefBB) continue; 5322 DefIsLiveOut = true; 5323 break; 5324 } 5325 if (!DefIsLiveOut) 5326 return false; 5327 5328 // Make sure none of the uses are PHI nodes. 5329 for (User *U : Src->users()) { 5330 Instruction *UI = cast<Instruction>(U); 5331 BasicBlock *UserBB = UI->getParent(); 5332 if (UserBB == DefBB) continue; 5333 // Be conservative. We don't want this xform to end up introducing 5334 // reloads just before load / store instructions. 5335 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 5336 return false; 5337 } 5338 5339 // InsertedTruncs - Only insert one trunc in each block once. 5340 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 5341 5342 bool MadeChange = false; 5343 for (Use &U : Src->uses()) { 5344 Instruction *User = cast<Instruction>(U.getUser()); 5345 5346 // Figure out which BB this ext is used in. 5347 BasicBlock *UserBB = User->getParent(); 5348 if (UserBB == DefBB) continue; 5349 5350 // Both src and def are live in this block. Rewrite the use. 5351 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 5352 5353 if (!InsertedTrunc) { 5354 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5355 assert(InsertPt != UserBB->end()); 5356 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5357 InsertedInsts.insert(InsertedTrunc); 5358 } 5359 5360 // Replace a use of the {s|z}ext source with a use of the result. 5361 U = InsertedTrunc; 5362 ++NumExtUses; 5363 MadeChange = true; 5364 } 5365 5366 return MadeChange; 5367 } 5368 5369 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5370 // just after the load if the target can fold this into one extload instruction, 5371 // with the hope of eliminating some of the other later "and" instructions using 5372 // the loaded value. "and"s that are made trivially redundant by the insertion 5373 // of the new "and" are removed by this function, while others (e.g. those whose 5374 // path from the load goes through a phi) are left for isel to potentially 5375 // remove. 5376 // 5377 // For example: 5378 // 5379 // b0: 5380 // x = load i32 5381 // ... 5382 // b1: 5383 // y = and x, 0xff 5384 // z = use y 5385 // 5386 // becomes: 5387 // 5388 // b0: 5389 // x = load i32 5390 // x' = and x, 0xff 5391 // ... 5392 // b1: 5393 // z = use x' 5394 // 5395 // whereas: 5396 // 5397 // b0: 5398 // x1 = load i32 5399 // ... 5400 // b1: 5401 // x2 = load i32 5402 // ... 5403 // b2: 5404 // x = phi x1, x2 5405 // y = and x, 0xff 5406 // 5407 // becomes (after a call to optimizeLoadExt for each load): 5408 // 5409 // b0: 5410 // x1 = load i32 5411 // x1' = and x1, 0xff 5412 // ... 5413 // b1: 5414 // x2 = load i32 5415 // x2' = and x2, 0xff 5416 // ... 5417 // b2: 5418 // x = phi x1', x2' 5419 // y = and x, 0xff 5420 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5421 if (!Load->isSimple() || 5422 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 5423 return false; 5424 5425 // Skip loads we've already transformed. 5426 if (Load->hasOneUse() && 5427 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5428 return false; 5429 5430 // Look at all uses of Load, looking through phis, to determine how many bits 5431 // of the loaded value are needed. 5432 SmallVector<Instruction *, 8> WorkList; 5433 SmallPtrSet<Instruction *, 16> Visited; 5434 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5435 for (auto *U : Load->users()) 5436 WorkList.push_back(cast<Instruction>(U)); 5437 5438 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5439 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5440 APInt DemandBits(BitWidth, 0); 5441 APInt WidestAndBits(BitWidth, 0); 5442 5443 while (!WorkList.empty()) { 5444 Instruction *I = WorkList.back(); 5445 WorkList.pop_back(); 5446 5447 // Break use-def graph loops. 5448 if (!Visited.insert(I).second) 5449 continue; 5450 5451 // For a PHI node, push all of its users. 5452 if (auto *Phi = dyn_cast<PHINode>(I)) { 5453 for (auto *U : Phi->users()) 5454 WorkList.push_back(cast<Instruction>(U)); 5455 continue; 5456 } 5457 5458 switch (I->getOpcode()) { 5459 case Instruction::And: { 5460 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5461 if (!AndC) 5462 return false; 5463 APInt AndBits = AndC->getValue(); 5464 DemandBits |= AndBits; 5465 // Keep track of the widest and mask we see. 5466 if (AndBits.ugt(WidestAndBits)) 5467 WidestAndBits = AndBits; 5468 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5469 AndsToMaybeRemove.push_back(I); 5470 break; 5471 } 5472 5473 case Instruction::Shl: { 5474 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5475 if (!ShlC) 5476 return false; 5477 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5478 DemandBits.setLowBits(BitWidth - ShiftAmt); 5479 break; 5480 } 5481 5482 case Instruction::Trunc: { 5483 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5484 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5485 DemandBits.setLowBits(TruncBitWidth); 5486 break; 5487 } 5488 5489 default: 5490 return false; 5491 } 5492 } 5493 5494 uint32_t ActiveBits = DemandBits.getActiveBits(); 5495 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5496 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5497 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5498 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5499 // followed by an AND. 5500 // TODO: Look into removing this restriction by fixing backends to either 5501 // return false for isLoadExtLegal for i1 or have them select this pattern to 5502 // a single instruction. 5503 // 5504 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5505 // mask, since these are the only ands that will be removed by isel. 5506 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5507 WidestAndBits != DemandBits) 5508 return false; 5509 5510 LLVMContext &Ctx = Load->getType()->getContext(); 5511 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5512 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5513 5514 // Reject cases that won't be matched as extloads. 5515 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5516 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5517 return false; 5518 5519 IRBuilder<> Builder(Load->getNextNode()); 5520 auto *NewAnd = dyn_cast<Instruction>( 5521 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5522 // Mark this instruction as "inserted by CGP", so that other 5523 // optimizations don't touch it. 5524 InsertedInsts.insert(NewAnd); 5525 5526 // Replace all uses of load with new and (except for the use of load in the 5527 // new and itself). 5528 Load->replaceAllUsesWith(NewAnd); 5529 NewAnd->setOperand(0, Load); 5530 5531 // Remove any and instructions that are now redundant. 5532 for (auto *And : AndsToMaybeRemove) 5533 // Check that the and mask is the same as the one we decided to put on the 5534 // new and. 5535 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5536 And->replaceAllUsesWith(NewAnd); 5537 if (&*CurInstIterator == And) 5538 CurInstIterator = std::next(And->getIterator()); 5539 And->eraseFromParent(); 5540 ++NumAndUses; 5541 } 5542 5543 ++NumAndsAdded; 5544 return true; 5545 } 5546 5547 /// Check if V (an operand of a select instruction) is an expensive instruction 5548 /// that is only used once. 5549 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5550 auto *I = dyn_cast<Instruction>(V); 5551 // If it's safe to speculatively execute, then it should not have side 5552 // effects; therefore, it's safe to sink and possibly *not* execute. 5553 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5554 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5555 } 5556 5557 /// Returns true if a SelectInst should be turned into an explicit branch. 5558 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5559 const TargetLowering *TLI, 5560 SelectInst *SI) { 5561 // If even a predictable select is cheap, then a branch can't be cheaper. 5562 if (!TLI->isPredictableSelectExpensive()) 5563 return false; 5564 5565 // FIXME: This should use the same heuristics as IfConversion to determine 5566 // whether a select is better represented as a branch. 5567 5568 // If metadata tells us that the select condition is obviously predictable, 5569 // then we want to replace the select with a branch. 5570 uint64_t TrueWeight, FalseWeight; 5571 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5572 uint64_t Max = std::max(TrueWeight, FalseWeight); 5573 uint64_t Sum = TrueWeight + FalseWeight; 5574 if (Sum != 0) { 5575 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5576 if (Probability > TLI->getPredictableBranchThreshold()) 5577 return true; 5578 } 5579 } 5580 5581 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5582 5583 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5584 // comparison condition. If the compare has more than one use, there's 5585 // probably another cmov or setcc around, so it's not worth emitting a branch. 5586 if (!Cmp || !Cmp->hasOneUse()) 5587 return false; 5588 5589 // If either operand of the select is expensive and only needed on one side 5590 // of the select, we should form a branch. 5591 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5592 sinkSelectOperand(TTI, SI->getFalseValue())) 5593 return true; 5594 5595 return false; 5596 } 5597 5598 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5599 /// false value of \p SI. If the true/false value of \p SI is defined by any 5600 /// select instructions in \p Selects, look through the defining select 5601 /// instruction until the true/false value is not defined in \p Selects. 5602 static Value *getTrueOrFalseValue( 5603 SelectInst *SI, bool isTrue, 5604 const SmallPtrSet<const Instruction *, 2> &Selects) { 5605 Value *V; 5606 5607 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5608 DefSI = dyn_cast<SelectInst>(V)) { 5609 assert(DefSI->getCondition() == SI->getCondition() && 5610 "The condition of DefSI does not match with SI"); 5611 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5612 } 5613 return V; 5614 } 5615 5616 /// If we have a SelectInst that will likely profit from branch prediction, 5617 /// turn it into a branch. 5618 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5619 // Find all consecutive select instructions that share the same condition. 5620 SmallVector<SelectInst *, 2> ASI; 5621 ASI.push_back(SI); 5622 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5623 It != SI->getParent()->end(); ++It) { 5624 SelectInst *I = dyn_cast<SelectInst>(&*It); 5625 if (I && SI->getCondition() == I->getCondition()) { 5626 ASI.push_back(I); 5627 } else { 5628 break; 5629 } 5630 } 5631 5632 SelectInst *LastSI = ASI.back(); 5633 // Increment the current iterator to skip all the rest of select instructions 5634 // because they will be either "not lowered" or "all lowered" to branch. 5635 CurInstIterator = std::next(LastSI->getIterator()); 5636 5637 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5638 5639 // Can we convert the 'select' to CF ? 5640 if (DisableSelectToBranch || OptSize || !TLI || VectorCond || 5641 SI->getMetadata(LLVMContext::MD_unpredictable)) 5642 return false; 5643 5644 TargetLowering::SelectSupportKind SelectKind; 5645 if (VectorCond) 5646 SelectKind = TargetLowering::VectorMaskSelect; 5647 else if (SI->getType()->isVectorTy()) 5648 SelectKind = TargetLowering::ScalarCondVectorVal; 5649 else 5650 SelectKind = TargetLowering::ScalarValSelect; 5651 5652 if (TLI->isSelectSupported(SelectKind) && 5653 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5654 return false; 5655 5656 ModifiedDT = true; 5657 5658 // Transform a sequence like this: 5659 // start: 5660 // %cmp = cmp uge i32 %a, %b 5661 // %sel = select i1 %cmp, i32 %c, i32 %d 5662 // 5663 // Into: 5664 // start: 5665 // %cmp = cmp uge i32 %a, %b 5666 // br i1 %cmp, label %select.true, label %select.false 5667 // select.true: 5668 // br label %select.end 5669 // select.false: 5670 // br label %select.end 5671 // select.end: 5672 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5673 // 5674 // In addition, we may sink instructions that produce %c or %d from 5675 // the entry block into the destination(s) of the new branch. 5676 // If the true or false blocks do not contain a sunken instruction, that 5677 // block and its branch may be optimized away. In that case, one side of the 5678 // first branch will point directly to select.end, and the corresponding PHI 5679 // predecessor block will be the start block. 5680 5681 // First, we split the block containing the select into 2 blocks. 5682 BasicBlock *StartBlock = SI->getParent(); 5683 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5684 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5685 5686 // Delete the unconditional branch that was just created by the split. 5687 StartBlock->getTerminator()->eraseFromParent(); 5688 5689 // These are the new basic blocks for the conditional branch. 5690 // At least one will become an actual new basic block. 5691 BasicBlock *TrueBlock = nullptr; 5692 BasicBlock *FalseBlock = nullptr; 5693 BranchInst *TrueBranch = nullptr; 5694 BranchInst *FalseBranch = nullptr; 5695 5696 // Sink expensive instructions into the conditional blocks to avoid executing 5697 // them speculatively. 5698 for (SelectInst *SI : ASI) { 5699 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5700 if (TrueBlock == nullptr) { 5701 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5702 EndBlock->getParent(), EndBlock); 5703 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5704 } 5705 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5706 TrueInst->moveBefore(TrueBranch); 5707 } 5708 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5709 if (FalseBlock == nullptr) { 5710 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5711 EndBlock->getParent(), EndBlock); 5712 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5713 } 5714 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5715 FalseInst->moveBefore(FalseBranch); 5716 } 5717 } 5718 5719 // If there was nothing to sink, then arbitrarily choose the 'false' side 5720 // for a new input value to the PHI. 5721 if (TrueBlock == FalseBlock) { 5722 assert(TrueBlock == nullptr && 5723 "Unexpected basic block transform while optimizing select"); 5724 5725 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5726 EndBlock->getParent(), EndBlock); 5727 BranchInst::Create(EndBlock, FalseBlock); 5728 } 5729 5730 // Insert the real conditional branch based on the original condition. 5731 // If we did not create a new block for one of the 'true' or 'false' paths 5732 // of the condition, it means that side of the branch goes to the end block 5733 // directly and the path originates from the start block from the point of 5734 // view of the new PHI. 5735 BasicBlock *TT, *FT; 5736 if (TrueBlock == nullptr) { 5737 TT = EndBlock; 5738 FT = FalseBlock; 5739 TrueBlock = StartBlock; 5740 } else if (FalseBlock == nullptr) { 5741 TT = TrueBlock; 5742 FT = EndBlock; 5743 FalseBlock = StartBlock; 5744 } else { 5745 TT = TrueBlock; 5746 FT = FalseBlock; 5747 } 5748 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5749 5750 SmallPtrSet<const Instruction *, 2> INS; 5751 INS.insert(ASI.begin(), ASI.end()); 5752 // Use reverse iterator because later select may use the value of the 5753 // earlier select, and we need to propagate value through earlier select 5754 // to get the PHI operand. 5755 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5756 SelectInst *SI = *It; 5757 // The select itself is replaced with a PHI Node. 5758 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5759 PN->takeName(SI); 5760 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5761 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5762 5763 SI->replaceAllUsesWith(PN); 5764 SI->eraseFromParent(); 5765 INS.erase(SI); 5766 ++NumSelectsExpanded; 5767 } 5768 5769 // Instruct OptimizeBlock to skip to the next block. 5770 CurInstIterator = StartBlock->end(); 5771 return true; 5772 } 5773 5774 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5775 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5776 int SplatElem = -1; 5777 for (unsigned i = 0; i < Mask.size(); ++i) { 5778 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5779 return false; 5780 SplatElem = Mask[i]; 5781 } 5782 5783 return true; 5784 } 5785 5786 /// Some targets have expensive vector shifts if the lanes aren't all the same 5787 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 5788 /// it's often worth sinking a shufflevector splat down to its use so that 5789 /// codegen can spot all lanes are identical. 5790 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 5791 BasicBlock *DefBB = SVI->getParent(); 5792 5793 // Only do this xform if variable vector shifts are particularly expensive. 5794 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 5795 return false; 5796 5797 // We only expect better codegen by sinking a shuffle if we can recognise a 5798 // constant splat. 5799 if (!isBroadcastShuffle(SVI)) 5800 return false; 5801 5802 // InsertedShuffles - Only insert a shuffle in each block once. 5803 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 5804 5805 bool MadeChange = false; 5806 for (User *U : SVI->users()) { 5807 Instruction *UI = cast<Instruction>(U); 5808 5809 // Figure out which BB this ext is used in. 5810 BasicBlock *UserBB = UI->getParent(); 5811 if (UserBB == DefBB) continue; 5812 5813 // For now only apply this when the splat is used by a shift instruction. 5814 if (!UI->isShift()) continue; 5815 5816 // Everything checks out, sink the shuffle if the user's block doesn't 5817 // already have a copy. 5818 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 5819 5820 if (!InsertedShuffle) { 5821 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5822 assert(InsertPt != UserBB->end()); 5823 InsertedShuffle = 5824 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5825 SVI->getOperand(2), "", &*InsertPt); 5826 } 5827 5828 UI->replaceUsesOfWith(SVI, InsertedShuffle); 5829 MadeChange = true; 5830 } 5831 5832 // If we removed all uses, nuke the shuffle. 5833 if (SVI->use_empty()) { 5834 SVI->eraseFromParent(); 5835 MadeChange = true; 5836 } 5837 5838 return MadeChange; 5839 } 5840 5841 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 5842 if (!TLI || !DL) 5843 return false; 5844 5845 Value *Cond = SI->getCondition(); 5846 Type *OldType = Cond->getType(); 5847 LLVMContext &Context = Cond->getContext(); 5848 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 5849 unsigned RegWidth = RegType.getSizeInBits(); 5850 5851 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 5852 return false; 5853 5854 // If the register width is greater than the type width, expand the condition 5855 // of the switch instruction and each case constant to the width of the 5856 // register. By widening the type of the switch condition, subsequent 5857 // comparisons (for case comparisons) will not need to be extended to the 5858 // preferred register width, so we will potentially eliminate N-1 extends, 5859 // where N is the number of cases in the switch. 5860 auto *NewType = Type::getIntNTy(Context, RegWidth); 5861 5862 // Zero-extend the switch condition and case constants unless the switch 5863 // condition is a function argument that is already being sign-extended. 5864 // In that case, we can avoid an unnecessary mask/extension by sign-extending 5865 // everything instead. 5866 Instruction::CastOps ExtType = Instruction::ZExt; 5867 if (auto *Arg = dyn_cast<Argument>(Cond)) 5868 if (Arg->hasSExtAttr()) 5869 ExtType = Instruction::SExt; 5870 5871 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 5872 ExtInst->insertBefore(SI); 5873 SI->setCondition(ExtInst); 5874 for (auto Case : SI->cases()) { 5875 APInt NarrowConst = Case.getCaseValue()->getValue(); 5876 APInt WideConst = (ExtType == Instruction::ZExt) ? 5877 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 5878 Case.setValue(ConstantInt::get(Context, WideConst)); 5879 } 5880 5881 return true; 5882 } 5883 5884 5885 namespace { 5886 5887 /// \brief Helper class to promote a scalar operation to a vector one. 5888 /// This class is used to move downward extractelement transition. 5889 /// E.g., 5890 /// a = vector_op <2 x i32> 5891 /// b = extractelement <2 x i32> a, i32 0 5892 /// c = scalar_op b 5893 /// store c 5894 /// 5895 /// => 5896 /// a = vector_op <2 x i32> 5897 /// c = vector_op a (equivalent to scalar_op on the related lane) 5898 /// * d = extractelement <2 x i32> c, i32 0 5899 /// * store d 5900 /// Assuming both extractelement and store can be combine, we get rid of the 5901 /// transition. 5902 class VectorPromoteHelper { 5903 /// DataLayout associated with the current module. 5904 const DataLayout &DL; 5905 5906 /// Used to perform some checks on the legality of vector operations. 5907 const TargetLowering &TLI; 5908 5909 /// Used to estimated the cost of the promoted chain. 5910 const TargetTransformInfo &TTI; 5911 5912 /// The transition being moved downwards. 5913 Instruction *Transition; 5914 5915 /// The sequence of instructions to be promoted. 5916 SmallVector<Instruction *, 4> InstsToBePromoted; 5917 5918 /// Cost of combining a store and an extract. 5919 unsigned StoreExtractCombineCost; 5920 5921 /// Instruction that will be combined with the transition. 5922 Instruction *CombineInst = nullptr; 5923 5924 /// \brief The instruction that represents the current end of the transition. 5925 /// Since we are faking the promotion until we reach the end of the chain 5926 /// of computation, we need a way to get the current end of the transition. 5927 Instruction *getEndOfTransition() const { 5928 if (InstsToBePromoted.empty()) 5929 return Transition; 5930 return InstsToBePromoted.back(); 5931 } 5932 5933 /// \brief Return the index of the original value in the transition. 5934 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 5935 /// c, is at index 0. 5936 unsigned getTransitionOriginalValueIdx() const { 5937 assert(isa<ExtractElementInst>(Transition) && 5938 "Other kind of transitions are not supported yet"); 5939 return 0; 5940 } 5941 5942 /// \brief Return the index of the index in the transition. 5943 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 5944 /// is at index 1. 5945 unsigned getTransitionIdx() const { 5946 assert(isa<ExtractElementInst>(Transition) && 5947 "Other kind of transitions are not supported yet"); 5948 return 1; 5949 } 5950 5951 /// \brief Get the type of the transition. 5952 /// This is the type of the original value. 5953 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 5954 /// transition is <2 x i32>. 5955 Type *getTransitionType() const { 5956 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 5957 } 5958 5959 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 5960 /// I.e., we have the following sequence: 5961 /// Def = Transition <ty1> a to <ty2> 5962 /// b = ToBePromoted <ty2> Def, ... 5963 /// => 5964 /// b = ToBePromoted <ty1> a, ... 5965 /// Def = Transition <ty1> ToBePromoted to <ty2> 5966 void promoteImpl(Instruction *ToBePromoted); 5967 5968 /// \brief Check whether or not it is profitable to promote all the 5969 /// instructions enqueued to be promoted. 5970 bool isProfitableToPromote() { 5971 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 5972 unsigned Index = isa<ConstantInt>(ValIdx) 5973 ? cast<ConstantInt>(ValIdx)->getZExtValue() 5974 : -1; 5975 Type *PromotedType = getTransitionType(); 5976 5977 StoreInst *ST = cast<StoreInst>(CombineInst); 5978 unsigned AS = ST->getPointerAddressSpace(); 5979 unsigned Align = ST->getAlignment(); 5980 // Check if this store is supported. 5981 if (!TLI.allowsMisalignedMemoryAccesses( 5982 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 5983 Align)) { 5984 // If this is not supported, there is no way we can combine 5985 // the extract with the store. 5986 return false; 5987 } 5988 5989 // The scalar chain of computation has to pay for the transition 5990 // scalar to vector. 5991 // The vector chain has to account for the combining cost. 5992 uint64_t ScalarCost = 5993 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 5994 uint64_t VectorCost = StoreExtractCombineCost; 5995 for (const auto &Inst : InstsToBePromoted) { 5996 // Compute the cost. 5997 // By construction, all instructions being promoted are arithmetic ones. 5998 // Moreover, one argument is a constant that can be viewed as a splat 5999 // constant. 6000 Value *Arg0 = Inst->getOperand(0); 6001 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 6002 isa<ConstantFP>(Arg0); 6003 TargetTransformInfo::OperandValueKind Arg0OVK = 6004 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6005 : TargetTransformInfo::OK_AnyValue; 6006 TargetTransformInfo::OperandValueKind Arg1OVK = 6007 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6008 : TargetTransformInfo::OK_AnyValue; 6009 ScalarCost += TTI.getArithmeticInstrCost( 6010 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 6011 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6012 Arg0OVK, Arg1OVK); 6013 } 6014 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6015 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6016 return ScalarCost > VectorCost; 6017 } 6018 6019 /// \brief Generate a constant vector with \p Val with the same 6020 /// number of elements as the transition. 6021 /// \p UseSplat defines whether or not \p Val should be replicated 6022 /// across the whole vector. 6023 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6024 /// otherwise we generate a vector with as many undef as possible: 6025 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6026 /// used at the index of the extract. 6027 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6028 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6029 if (!UseSplat) { 6030 // If we cannot determine where the constant must be, we have to 6031 // use a splat constant. 6032 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6033 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6034 ExtractIdx = CstVal->getSExtValue(); 6035 else 6036 UseSplat = true; 6037 } 6038 6039 unsigned End = getTransitionType()->getVectorNumElements(); 6040 if (UseSplat) 6041 return ConstantVector::getSplat(End, Val); 6042 6043 SmallVector<Constant *, 4> ConstVec; 6044 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6045 for (unsigned Idx = 0; Idx != End; ++Idx) { 6046 if (Idx == ExtractIdx) 6047 ConstVec.push_back(Val); 6048 else 6049 ConstVec.push_back(UndefVal); 6050 } 6051 return ConstantVector::get(ConstVec); 6052 } 6053 6054 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 6055 /// in \p Use can trigger undefined behavior. 6056 static bool canCauseUndefinedBehavior(const Instruction *Use, 6057 unsigned OperandIdx) { 6058 // This is not safe to introduce undef when the operand is on 6059 // the right hand side of a division-like instruction. 6060 if (OperandIdx != 1) 6061 return false; 6062 switch (Use->getOpcode()) { 6063 default: 6064 return false; 6065 case Instruction::SDiv: 6066 case Instruction::UDiv: 6067 case Instruction::SRem: 6068 case Instruction::URem: 6069 return true; 6070 case Instruction::FDiv: 6071 case Instruction::FRem: 6072 return !Use->hasNoNaNs(); 6073 } 6074 llvm_unreachable(nullptr); 6075 } 6076 6077 public: 6078 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 6079 const TargetTransformInfo &TTI, Instruction *Transition, 6080 unsigned CombineCost) 6081 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 6082 StoreExtractCombineCost(CombineCost) { 6083 assert(Transition && "Do not know how to promote null"); 6084 } 6085 6086 /// \brief Check if we can promote \p ToBePromoted to \p Type. 6087 bool canPromote(const Instruction *ToBePromoted) const { 6088 // We could support CastInst too. 6089 return isa<BinaryOperator>(ToBePromoted); 6090 } 6091 6092 /// \brief Check if it is profitable to promote \p ToBePromoted 6093 /// by moving downward the transition through. 6094 bool shouldPromote(const Instruction *ToBePromoted) const { 6095 // Promote only if all the operands can be statically expanded. 6096 // Indeed, we do not want to introduce any new kind of transitions. 6097 for (const Use &U : ToBePromoted->operands()) { 6098 const Value *Val = U.get(); 6099 if (Val == getEndOfTransition()) { 6100 // If the use is a division and the transition is on the rhs, 6101 // we cannot promote the operation, otherwise we may create a 6102 // division by zero. 6103 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 6104 return false; 6105 continue; 6106 } 6107 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 6108 !isa<ConstantFP>(Val)) 6109 return false; 6110 } 6111 // Check that the resulting operation is legal. 6112 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 6113 if (!ISDOpcode) 6114 return false; 6115 return StressStoreExtract || 6116 TLI.isOperationLegalOrCustom( 6117 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 6118 } 6119 6120 /// \brief Check whether or not \p Use can be combined 6121 /// with the transition. 6122 /// I.e., is it possible to do Use(Transition) => AnotherUse? 6123 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 6124 6125 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 6126 void enqueueForPromotion(Instruction *ToBePromoted) { 6127 InstsToBePromoted.push_back(ToBePromoted); 6128 } 6129 6130 /// \brief Set the instruction that will be combined with the transition. 6131 void recordCombineInstruction(Instruction *ToBeCombined) { 6132 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 6133 CombineInst = ToBeCombined; 6134 } 6135 6136 /// \brief Promote all the instructions enqueued for promotion if it is 6137 /// is profitable. 6138 /// \return True if the promotion happened, false otherwise. 6139 bool promote() { 6140 // Check if there is something to promote. 6141 // Right now, if we do not have anything to combine with, 6142 // we assume the promotion is not profitable. 6143 if (InstsToBePromoted.empty() || !CombineInst) 6144 return false; 6145 6146 // Check cost. 6147 if (!StressStoreExtract && !isProfitableToPromote()) 6148 return false; 6149 6150 // Promote. 6151 for (auto &ToBePromoted : InstsToBePromoted) 6152 promoteImpl(ToBePromoted); 6153 InstsToBePromoted.clear(); 6154 return true; 6155 } 6156 }; 6157 6158 } // end anonymous namespace 6159 6160 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 6161 // At this point, we know that all the operands of ToBePromoted but Def 6162 // can be statically promoted. 6163 // For Def, we need to use its parameter in ToBePromoted: 6164 // b = ToBePromoted ty1 a 6165 // Def = Transition ty1 b to ty2 6166 // Move the transition down. 6167 // 1. Replace all uses of the promoted operation by the transition. 6168 // = ... b => = ... Def. 6169 assert(ToBePromoted->getType() == Transition->getType() && 6170 "The type of the result of the transition does not match " 6171 "the final type"); 6172 ToBePromoted->replaceAllUsesWith(Transition); 6173 // 2. Update the type of the uses. 6174 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 6175 Type *TransitionTy = getTransitionType(); 6176 ToBePromoted->mutateType(TransitionTy); 6177 // 3. Update all the operands of the promoted operation with promoted 6178 // operands. 6179 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 6180 for (Use &U : ToBePromoted->operands()) { 6181 Value *Val = U.get(); 6182 Value *NewVal = nullptr; 6183 if (Val == Transition) 6184 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 6185 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 6186 isa<ConstantFP>(Val)) { 6187 // Use a splat constant if it is not safe to use undef. 6188 NewVal = getConstantVector( 6189 cast<Constant>(Val), 6190 isa<UndefValue>(Val) || 6191 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 6192 } else 6193 llvm_unreachable("Did you modified shouldPromote and forgot to update " 6194 "this?"); 6195 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 6196 } 6197 Transition->moveAfter(ToBePromoted); 6198 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 6199 } 6200 6201 /// Some targets can do store(extractelement) with one instruction. 6202 /// Try to push the extractelement towards the stores when the target 6203 /// has this feature and this is profitable. 6204 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 6205 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 6206 if (DisableStoreExtract || !TLI || 6207 (!StressStoreExtract && 6208 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 6209 Inst->getOperand(1), CombineCost))) 6210 return false; 6211 6212 // At this point we know that Inst is a vector to scalar transition. 6213 // Try to move it down the def-use chain, until: 6214 // - We can combine the transition with its single use 6215 // => we got rid of the transition. 6216 // - We escape the current basic block 6217 // => we would need to check that we are moving it at a cheaper place and 6218 // we do not do that for now. 6219 BasicBlock *Parent = Inst->getParent(); 6220 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 6221 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 6222 // If the transition has more than one use, assume this is not going to be 6223 // beneficial. 6224 while (Inst->hasOneUse()) { 6225 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 6226 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 6227 6228 if (ToBePromoted->getParent() != Parent) { 6229 DEBUG(dbgs() << "Instruction to promote is in a different block (" 6230 << ToBePromoted->getParent()->getName() 6231 << ") than the transition (" << Parent->getName() << ").\n"); 6232 return false; 6233 } 6234 6235 if (VPH.canCombine(ToBePromoted)) { 6236 DEBUG(dbgs() << "Assume " << *Inst << '\n' 6237 << "will be combined with: " << *ToBePromoted << '\n'); 6238 VPH.recordCombineInstruction(ToBePromoted); 6239 bool Changed = VPH.promote(); 6240 NumStoreExtractExposed += Changed; 6241 return Changed; 6242 } 6243 6244 DEBUG(dbgs() << "Try promoting.\n"); 6245 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 6246 return false; 6247 6248 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 6249 6250 VPH.enqueueForPromotion(ToBePromoted); 6251 Inst = ToBePromoted; 6252 } 6253 return false; 6254 } 6255 6256 /// For the instruction sequence of store below, F and I values 6257 /// are bundled together as an i64 value before being stored into memory. 6258 /// Sometimes it is more efficent to generate separate stores for F and I, 6259 /// which can remove the bitwise instructions or sink them to colder places. 6260 /// 6261 /// (store (or (zext (bitcast F to i32) to i64), 6262 /// (shl (zext I to i64), 32)), addr) --> 6263 /// (store F, addr) and (store I, addr+4) 6264 /// 6265 /// Similarly, splitting for other merged store can also be beneficial, like: 6266 /// For pair of {i32, i32}, i64 store --> two i32 stores. 6267 /// For pair of {i32, i16}, i64 store --> two i32 stores. 6268 /// For pair of {i16, i16}, i32 store --> two i16 stores. 6269 /// For pair of {i16, i8}, i32 store --> two i16 stores. 6270 /// For pair of {i8, i8}, i16 store --> two i8 stores. 6271 /// 6272 /// We allow each target to determine specifically which kind of splitting is 6273 /// supported. 6274 /// 6275 /// The store patterns are commonly seen from the simple code snippet below 6276 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 6277 /// void goo(const std::pair<int, float> &); 6278 /// hoo() { 6279 /// ... 6280 /// goo(std::make_pair(tmp, ftmp)); 6281 /// ... 6282 /// } 6283 /// 6284 /// Although we already have similar splitting in DAG Combine, we duplicate 6285 /// it in CodeGenPrepare to catch the case in which pattern is across 6286 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 6287 /// during code expansion. 6288 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 6289 const TargetLowering &TLI) { 6290 // Handle simple but common cases only. 6291 Type *StoreType = SI.getValueOperand()->getType(); 6292 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 6293 DL.getTypeSizeInBits(StoreType) == 0) 6294 return false; 6295 6296 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 6297 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 6298 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 6299 DL.getTypeSizeInBits(SplitStoreType)) 6300 return false; 6301 6302 // Match the following patterns: 6303 // (store (or (zext LValue to i64), 6304 // (shl (zext HValue to i64), 32)), HalfValBitSize) 6305 // or 6306 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 6307 // (zext LValue to i64), 6308 // Expect both operands of OR and the first operand of SHL have only 6309 // one use. 6310 Value *LValue, *HValue; 6311 if (!match(SI.getValueOperand(), 6312 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 6313 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 6314 m_SpecificInt(HalfValBitSize)))))) 6315 return false; 6316 6317 // Check LValue and HValue are int with size less or equal than 32. 6318 if (!LValue->getType()->isIntegerTy() || 6319 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 6320 !HValue->getType()->isIntegerTy() || 6321 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 6322 return false; 6323 6324 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 6325 // as the input of target query. 6326 auto *LBC = dyn_cast<BitCastInst>(LValue); 6327 auto *HBC = dyn_cast<BitCastInst>(HValue); 6328 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 6329 : EVT::getEVT(LValue->getType()); 6330 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 6331 : EVT::getEVT(HValue->getType()); 6332 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 6333 return false; 6334 6335 // Start to split store. 6336 IRBuilder<> Builder(SI.getContext()); 6337 Builder.SetInsertPoint(&SI); 6338 6339 // If LValue/HValue is a bitcast in another BB, create a new one in current 6340 // BB so it may be merged with the splitted stores by dag combiner. 6341 if (LBC && LBC->getParent() != SI.getParent()) 6342 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 6343 if (HBC && HBC->getParent() != SI.getParent()) 6344 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 6345 6346 auto CreateSplitStore = [&](Value *V, bool Upper) { 6347 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 6348 Value *Addr = Builder.CreateBitCast( 6349 SI.getOperand(1), 6350 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 6351 if (Upper) 6352 Addr = Builder.CreateGEP( 6353 SplitStoreType, Addr, 6354 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 6355 Builder.CreateAlignedStore( 6356 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6357 }; 6358 6359 CreateSplitStore(LValue, false); 6360 CreateSplitStore(HValue, true); 6361 6362 // Delete the old store. 6363 SI.eraseFromParent(); 6364 return true; 6365 } 6366 6367 // Return true if the GEP has two operands, the first operand is of a sequential 6368 // type, and the second operand is a constant. 6369 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6370 gep_type_iterator I = gep_type_begin(*GEP); 6371 return GEP->getNumOperands() == 2 && 6372 I.isSequential() && 6373 isa<ConstantInt>(GEP->getOperand(1)); 6374 } 6375 6376 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6377 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6378 // reducing liveness interference across those edges benefits global register 6379 // allocation. Currently handles only certain cases. 6380 // 6381 // For example, unmerge %GEPI and %UGEPI as below. 6382 // 6383 // ---------- BEFORE ---------- 6384 // SrcBlock: 6385 // ... 6386 // %GEPIOp = ... 6387 // ... 6388 // %GEPI = gep %GEPIOp, Idx 6389 // ... 6390 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6391 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6392 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6393 // %UGEPI) 6394 // 6395 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6396 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6397 // ... 6398 // 6399 // DstBi: 6400 // ... 6401 // %UGEPI = gep %GEPIOp, UIdx 6402 // ... 6403 // --------------------------- 6404 // 6405 // ---------- AFTER ---------- 6406 // SrcBlock: 6407 // ... (same as above) 6408 // (* %GEPI is still alive on the indirectbr edges) 6409 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6410 // unmerging) 6411 // ... 6412 // 6413 // DstBi: 6414 // ... 6415 // %UGEPI = gep %GEPI, (UIdx-Idx) 6416 // ... 6417 // --------------------------- 6418 // 6419 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6420 // no longer alive on them. 6421 // 6422 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6423 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6424 // not to disable further simplications and optimizations as a result of GEP 6425 // merging. 6426 // 6427 // Note this unmerging may increase the length of the data flow critical path 6428 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6429 // between the register pressure and the length of data-flow critical 6430 // path. Restricting this to the uncommon IndirectBr case would minimize the 6431 // impact of potentially longer critical path, if any, and the impact on compile 6432 // time. 6433 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6434 const TargetTransformInfo *TTI) { 6435 BasicBlock *SrcBlock = GEPI->getParent(); 6436 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6437 // (non-IndirectBr) cases exit early here. 6438 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6439 return false; 6440 // Check that GEPI is a simple gep with a single constant index. 6441 if (!GEPSequentialConstIndexed(GEPI)) 6442 return false; 6443 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6444 // Check that GEPI is a cheap one. 6445 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6446 > TargetTransformInfo::TCC_Basic) 6447 return false; 6448 Value *GEPIOp = GEPI->getOperand(0); 6449 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6450 if (!isa<Instruction>(GEPIOp)) 6451 return false; 6452 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6453 if (GEPIOpI->getParent() != SrcBlock) 6454 return false; 6455 // Check that GEP is used outside the block, meaning it's alive on the 6456 // IndirectBr edge(s). 6457 if (find_if(GEPI->users(), [&](User *Usr) { 6458 if (auto *I = dyn_cast<Instruction>(Usr)) { 6459 if (I->getParent() != SrcBlock) { 6460 return true; 6461 } 6462 } 6463 return false; 6464 }) == GEPI->users().end()) 6465 return false; 6466 // The second elements of the GEP chains to be unmerged. 6467 std::vector<GetElementPtrInst *> UGEPIs; 6468 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6469 // on IndirectBr edges. 6470 for (User *Usr : GEPIOp->users()) { 6471 if (Usr == GEPI) continue; 6472 // Check if Usr is an Instruction. If not, give up. 6473 if (!isa<Instruction>(Usr)) 6474 return false; 6475 auto *UI = cast<Instruction>(Usr); 6476 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6477 if (UI->getParent() == SrcBlock) 6478 continue; 6479 // Check if Usr is a GEP. If not, give up. 6480 if (!isa<GetElementPtrInst>(Usr)) 6481 return false; 6482 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6483 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6484 // the pointer operand to it. If so, record it in the vector. If not, give 6485 // up. 6486 if (!GEPSequentialConstIndexed(UGEPI)) 6487 return false; 6488 if (UGEPI->getOperand(0) != GEPIOp) 6489 return false; 6490 if (GEPIIdx->getType() != 6491 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6492 return false; 6493 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6494 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6495 > TargetTransformInfo::TCC_Basic) 6496 return false; 6497 UGEPIs.push_back(UGEPI); 6498 } 6499 if (UGEPIs.size() == 0) 6500 return false; 6501 // Check the materializing cost of (Uidx-Idx). 6502 for (GetElementPtrInst *UGEPI : UGEPIs) { 6503 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6504 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6505 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6506 if (ImmCost > TargetTransformInfo::TCC_Basic) 6507 return false; 6508 } 6509 // Now unmerge between GEPI and UGEPIs. 6510 for (GetElementPtrInst *UGEPI : UGEPIs) { 6511 UGEPI->setOperand(0, GEPI); 6512 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6513 Constant *NewUGEPIIdx = 6514 ConstantInt::get(GEPIIdx->getType(), 6515 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6516 UGEPI->setOperand(1, NewUGEPIIdx); 6517 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6518 // inbounds to avoid UB. 6519 if (!GEPI->isInBounds()) { 6520 UGEPI->setIsInBounds(false); 6521 } 6522 } 6523 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6524 // alive on IndirectBr edges). 6525 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6526 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6527 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6528 return true; 6529 } 6530 6531 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 6532 // Bail out if we inserted the instruction to prevent optimizations from 6533 // stepping on each other's toes. 6534 if (InsertedInsts.count(I)) 6535 return false; 6536 6537 if (PHINode *P = dyn_cast<PHINode>(I)) { 6538 // It is possible for very late stage optimizations (such as SimplifyCFG) 6539 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6540 // trivial PHI, go ahead and zap it here. 6541 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6542 P->replaceAllUsesWith(V); 6543 P->eraseFromParent(); 6544 ++NumPHIsElim; 6545 return true; 6546 } 6547 return false; 6548 } 6549 6550 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6551 // If the source of the cast is a constant, then this should have 6552 // already been constant folded. The only reason NOT to constant fold 6553 // it is if something (e.g. LSR) was careful to place the constant 6554 // evaluation in a block other than then one that uses it (e.g. to hoist 6555 // the address of globals out of a loop). If this is the case, we don't 6556 // want to forward-subst the cast. 6557 if (isa<Constant>(CI->getOperand(0))) 6558 return false; 6559 6560 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6561 return true; 6562 6563 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6564 /// Sink a zext or sext into its user blocks if the target type doesn't 6565 /// fit in one register 6566 if (TLI && 6567 TLI->getTypeAction(CI->getContext(), 6568 TLI->getValueType(*DL, CI->getType())) == 6569 TargetLowering::TypeExpandInteger) { 6570 return SinkCast(CI); 6571 } else { 6572 bool MadeChange = optimizeExt(I); 6573 return MadeChange | optimizeExtUses(I); 6574 } 6575 } 6576 return false; 6577 } 6578 6579 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6580 if (!TLI || !TLI->hasMultipleConditionRegisters()) 6581 return OptimizeCmpExpression(CI, TLI); 6582 6583 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6584 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6585 if (TLI) { 6586 bool Modified = optimizeLoadExt(LI); 6587 unsigned AS = LI->getPointerAddressSpace(); 6588 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6589 return Modified; 6590 } 6591 return false; 6592 } 6593 6594 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6595 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6596 return true; 6597 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6598 if (TLI) { 6599 unsigned AS = SI->getPointerAddressSpace(); 6600 return optimizeMemoryInst(I, SI->getOperand(1), 6601 SI->getOperand(0)->getType(), AS); 6602 } 6603 return false; 6604 } 6605 6606 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6607 unsigned AS = RMW->getPointerAddressSpace(); 6608 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6609 RMW->getType(), AS); 6610 } 6611 6612 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6613 unsigned AS = CmpX->getPointerAddressSpace(); 6614 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6615 CmpX->getCompareOperand()->getType(), AS); 6616 } 6617 6618 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6619 6620 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6621 EnableAndCmpSinking && TLI) 6622 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6623 6624 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6625 BinOp->getOpcode() == Instruction::LShr)) { 6626 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6627 if (TLI && CI && TLI->hasExtractBitsInsn()) 6628 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6629 6630 return false; 6631 } 6632 6633 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6634 if (GEPI->hasAllZeroIndices()) { 6635 /// The GEP operand must be a pointer, so must its result -> BitCast 6636 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6637 GEPI->getName(), GEPI); 6638 GEPI->replaceAllUsesWith(NC); 6639 GEPI->eraseFromParent(); 6640 ++NumGEPsElim; 6641 optimizeInst(NC, ModifiedDT); 6642 return true; 6643 } 6644 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6645 return true; 6646 } 6647 return false; 6648 } 6649 6650 if (CallInst *CI = dyn_cast<CallInst>(I)) 6651 return optimizeCallInst(CI, ModifiedDT); 6652 6653 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6654 return optimizeSelectInst(SI); 6655 6656 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6657 return optimizeShuffleVectorInst(SVI); 6658 6659 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6660 return optimizeSwitchInst(Switch); 6661 6662 if (isa<ExtractElementInst>(I)) 6663 return optimizeExtractElementInst(I); 6664 6665 return false; 6666 } 6667 6668 /// Given an OR instruction, check to see if this is a bitreverse 6669 /// idiom. If so, insert the new intrinsic and return true. 6670 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6671 const TargetLowering &TLI) { 6672 if (!I.getType()->isIntegerTy() || 6673 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6674 TLI.getValueType(DL, I.getType(), true))) 6675 return false; 6676 6677 SmallVector<Instruction*, 4> Insts; 6678 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6679 return false; 6680 Instruction *LastInst = Insts.back(); 6681 I.replaceAllUsesWith(LastInst); 6682 RecursivelyDeleteTriviallyDeadInstructions(&I); 6683 return true; 6684 } 6685 6686 // In this pass we look for GEP and cast instructions that are used 6687 // across basic blocks and rewrite them to improve basic-block-at-a-time 6688 // selection. 6689 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 6690 SunkAddrs.clear(); 6691 bool MadeChange = false; 6692 6693 CurInstIterator = BB.begin(); 6694 while (CurInstIterator != BB.end()) { 6695 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6696 if (ModifiedDT) 6697 return true; 6698 } 6699 6700 bool MadeBitReverse = true; 6701 while (TLI && MadeBitReverse) { 6702 MadeBitReverse = false; 6703 for (auto &I : reverse(BB)) { 6704 if (makeBitReverse(I, *DL, *TLI)) { 6705 MadeBitReverse = MadeChange = true; 6706 ModifiedDT = true; 6707 break; 6708 } 6709 } 6710 } 6711 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6712 6713 return MadeChange; 6714 } 6715 6716 // llvm.dbg.value is far away from the value then iSel may not be able 6717 // handle it properly. iSel will drop llvm.dbg.value if it can not 6718 // find a node corresponding to the value. 6719 bool CodeGenPrepare::placeDbgValues(Function &F) { 6720 bool MadeChange = false; 6721 for (BasicBlock &BB : F) { 6722 Instruction *PrevNonDbgInst = nullptr; 6723 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6724 Instruction *Insn = &*BI++; 6725 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6726 // Leave dbg.values that refer to an alloca alone. These 6727 // instrinsics describe the address of a variable (= the alloca) 6728 // being taken. They should not be moved next to the alloca 6729 // (and to the beginning of the scope), but rather stay close to 6730 // where said address is used. 6731 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6732 PrevNonDbgInst = Insn; 6733 continue; 6734 } 6735 6736 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 6737 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 6738 // If VI is a phi in a block with an EHPad terminator, we can't insert 6739 // after it. 6740 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 6741 continue; 6742 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 6743 DVI->removeFromParent(); 6744 if (isa<PHINode>(VI)) 6745 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 6746 else 6747 DVI->insertAfter(VI); 6748 MadeChange = true; 6749 ++NumDbgValueMoved; 6750 } 6751 } 6752 } 6753 return MadeChange; 6754 } 6755 6756 /// \brief Scale down both weights to fit into uint32_t. 6757 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 6758 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 6759 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 6760 NewTrue = NewTrue / Scale; 6761 NewFalse = NewFalse / Scale; 6762 } 6763 6764 /// \brief Some targets prefer to split a conditional branch like: 6765 /// \code 6766 /// %0 = icmp ne i32 %a, 0 6767 /// %1 = icmp ne i32 %b, 0 6768 /// %or.cond = or i1 %0, %1 6769 /// br i1 %or.cond, label %TrueBB, label %FalseBB 6770 /// \endcode 6771 /// into multiple branch instructions like: 6772 /// \code 6773 /// bb1: 6774 /// %0 = icmp ne i32 %a, 0 6775 /// br i1 %0, label %TrueBB, label %bb2 6776 /// bb2: 6777 /// %1 = icmp ne i32 %b, 0 6778 /// br i1 %1, label %TrueBB, label %FalseBB 6779 /// \endcode 6780 /// This usually allows instruction selection to do even further optimizations 6781 /// and combine the compare with the branch instruction. Currently this is 6782 /// applied for targets which have "cheap" jump instructions. 6783 /// 6784 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 6785 /// 6786 bool CodeGenPrepare::splitBranchCondition(Function &F) { 6787 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 6788 return false; 6789 6790 bool MadeChange = false; 6791 for (auto &BB : F) { 6792 // Does this BB end with the following? 6793 // %cond1 = icmp|fcmp|binary instruction ... 6794 // %cond2 = icmp|fcmp|binary instruction ... 6795 // %cond.or = or|and i1 %cond1, cond2 6796 // br i1 %cond.or label %dest1, label %dest2" 6797 BinaryOperator *LogicOp; 6798 BasicBlock *TBB, *FBB; 6799 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 6800 continue; 6801 6802 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 6803 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 6804 continue; 6805 6806 unsigned Opc; 6807 Value *Cond1, *Cond2; 6808 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 6809 m_OneUse(m_Value(Cond2))))) 6810 Opc = Instruction::And; 6811 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 6812 m_OneUse(m_Value(Cond2))))) 6813 Opc = Instruction::Or; 6814 else 6815 continue; 6816 6817 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 6818 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 6819 continue; 6820 6821 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 6822 6823 // Create a new BB. 6824 auto TmpBB = 6825 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 6826 BB.getParent(), BB.getNextNode()); 6827 6828 // Update original basic block by using the first condition directly by the 6829 // branch instruction and removing the no longer needed and/or instruction. 6830 Br1->setCondition(Cond1); 6831 LogicOp->eraseFromParent(); 6832 6833 // Depending on the conditon we have to either replace the true or the false 6834 // successor of the original branch instruction. 6835 if (Opc == Instruction::And) 6836 Br1->setSuccessor(0, TmpBB); 6837 else 6838 Br1->setSuccessor(1, TmpBB); 6839 6840 // Fill in the new basic block. 6841 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 6842 if (auto *I = dyn_cast<Instruction>(Cond2)) { 6843 I->removeFromParent(); 6844 I->insertBefore(Br2); 6845 } 6846 6847 // Update PHI nodes in both successors. The original BB needs to be 6848 // replaced in one successor's PHI nodes, because the branch comes now from 6849 // the newly generated BB (NewBB). In the other successor we need to add one 6850 // incoming edge to the PHI nodes, because both branch instructions target 6851 // now the same successor. Depending on the original branch condition 6852 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 6853 // we perform the correct update for the PHI nodes. 6854 // This doesn't change the successor order of the just created branch 6855 // instruction (or any other instruction). 6856 if (Opc == Instruction::Or) 6857 std::swap(TBB, FBB); 6858 6859 // Replace the old BB with the new BB. 6860 for (auto &I : *TBB) { 6861 PHINode *PN = dyn_cast<PHINode>(&I); 6862 if (!PN) 6863 break; 6864 int i; 6865 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 6866 PN->setIncomingBlock(i, TmpBB); 6867 } 6868 6869 // Add another incoming edge form the new BB. 6870 for (auto &I : *FBB) { 6871 PHINode *PN = dyn_cast<PHINode>(&I); 6872 if (!PN) 6873 break; 6874 auto *Val = PN->getIncomingValueForBlock(&BB); 6875 PN->addIncoming(Val, TmpBB); 6876 } 6877 6878 // Update the branch weights (from SelectionDAGBuilder:: 6879 // FindMergedConditions). 6880 if (Opc == Instruction::Or) { 6881 // Codegen X | Y as: 6882 // BB1: 6883 // jmp_if_X TBB 6884 // jmp TmpBB 6885 // TmpBB: 6886 // jmp_if_Y TBB 6887 // jmp FBB 6888 // 6889 6890 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 6891 // The requirement is that 6892 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 6893 // = TrueProb for orignal BB. 6894 // Assuming the orignal weights are A and B, one choice is to set BB1's 6895 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 6896 // assumes that 6897 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 6898 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 6899 // TmpBB, but the math is more complicated. 6900 uint64_t TrueWeight, FalseWeight; 6901 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6902 uint64_t NewTrueWeight = TrueWeight; 6903 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 6904 scaleWeights(NewTrueWeight, NewFalseWeight); 6905 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6906 .createBranchWeights(TrueWeight, FalseWeight)); 6907 6908 NewTrueWeight = TrueWeight; 6909 NewFalseWeight = 2 * FalseWeight; 6910 scaleWeights(NewTrueWeight, NewFalseWeight); 6911 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6912 .createBranchWeights(TrueWeight, FalseWeight)); 6913 } 6914 } else { 6915 // Codegen X & Y as: 6916 // BB1: 6917 // jmp_if_X TmpBB 6918 // jmp FBB 6919 // TmpBB: 6920 // jmp_if_Y TBB 6921 // jmp FBB 6922 // 6923 // This requires creation of TmpBB after CurBB. 6924 6925 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 6926 // The requirement is that 6927 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 6928 // = FalseProb for orignal BB. 6929 // Assuming the orignal weights are A and B, one choice is to set BB1's 6930 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 6931 // assumes that 6932 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 6933 uint64_t TrueWeight, FalseWeight; 6934 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6935 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 6936 uint64_t NewFalseWeight = FalseWeight; 6937 scaleWeights(NewTrueWeight, NewFalseWeight); 6938 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6939 .createBranchWeights(TrueWeight, FalseWeight)); 6940 6941 NewTrueWeight = 2 * TrueWeight; 6942 NewFalseWeight = FalseWeight; 6943 scaleWeights(NewTrueWeight, NewFalseWeight); 6944 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6945 .createBranchWeights(TrueWeight, FalseWeight)); 6946 } 6947 } 6948 6949 // Note: No point in getting fancy here, since the DT info is never 6950 // available to CodeGenPrepare. 6951 ModifiedDT = true; 6952 6953 MadeChange = true; 6954 6955 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 6956 TmpBB->dump()); 6957 } 6958 return MadeChange; 6959 } 6960