1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/BlockFrequencyInfo.h" 26 #include "llvm/Analysis/BranchProbabilityInfo.h" 27 #include "llvm/Analysis/ConstantFolding.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/ProfileSummaryInfo.h" 32 #include "llvm/Analysis/TargetLibraryInfo.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/CodeGen/Analysis.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/MachineValueType.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Argument.h" 42 #include "llvm/IR/Attributes.h" 43 #include "llvm/IR/BasicBlock.h" 44 #include "llvm/IR/CallSite.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/Dominators.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/GlobalVariable.h" 54 #include "llvm/IR/IRBuilder.h" 55 #include "llvm/IR/InlineAsm.h" 56 #include "llvm/IR/InstrTypes.h" 57 #include "llvm/IR/Instruction.h" 58 #include "llvm/IR/Instructions.h" 59 #include "llvm/IR/IntrinsicInst.h" 60 #include "llvm/IR/Intrinsics.h" 61 #include "llvm/IR/LLVMContext.h" 62 #include "llvm/IR/MDBuilder.h" 63 #include "llvm/IR/Module.h" 64 #include "llvm/IR/Operator.h" 65 #include "llvm/IR/PatternMatch.h" 66 #include "llvm/IR/Statepoint.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #include "llvm/IR/ValueMap.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/BlockFrequency.h" 75 #include "llvm/Support/BranchProbability.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/MathExtras.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Target/TargetLowering.h" 84 #include "llvm/Target/TargetMachine.h" 85 #include "llvm/Target/TargetOptions.h" 86 #include "llvm/Target/TargetSubtargetInfo.h" 87 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 88 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 89 #include "llvm/Transforms/Utils/Cloning.h" 90 #include "llvm/Transforms/Utils/Local.h" 91 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 92 #include "llvm/Transforms/Utils/ValueMapper.h" 93 #include <algorithm> 94 #include <cassert> 95 #include <cstdint> 96 #include <iterator> 97 #include <limits> 98 #include <memory> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 using namespace llvm::PatternMatch; 104 105 #define DEBUG_TYPE "codegenprepare" 106 107 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 108 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 109 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 110 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 111 "sunken Cmps"); 112 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 113 "of sunken Casts"); 114 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 115 "computations were sunk"); 116 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 117 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 118 STATISTIC(NumAndsAdded, 119 "Number of and mask instructions added to form ext loads"); 120 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 121 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 122 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 123 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 124 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 125 126 STATISTIC(NumMemCmpCalls, "Number of memcmp calls"); 127 STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size"); 128 STATISTIC(NumMemCmpGreaterThanMax, 129 "Number of memcmp calls with size greater than max size"); 130 STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls"); 131 132 static cl::opt<bool> DisableBranchOpts( 133 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 134 cl::desc("Disable branch optimizations in CodeGenPrepare")); 135 136 static cl::opt<bool> 137 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 138 cl::desc("Disable GC optimizations in CodeGenPrepare")); 139 140 static cl::opt<bool> DisableSelectToBranch( 141 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 142 cl::desc("Disable select to branch conversion.")); 143 144 static cl::opt<bool> AddrSinkUsingGEPs( 145 "addr-sink-using-gep", cl::Hidden, cl::init(true), 146 cl::desc("Address sinking in CGP using GEPs.")); 147 148 static cl::opt<bool> EnableAndCmpSinking( 149 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 150 cl::desc("Enable sinkinig and/cmp into branches.")); 151 152 static cl::opt<bool> DisableStoreExtract( 153 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 154 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 155 156 static cl::opt<bool> StressStoreExtract( 157 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 158 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 159 160 static cl::opt<bool> DisableExtLdPromotion( 161 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 162 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 163 "CodeGenPrepare")); 164 165 static cl::opt<bool> StressExtLdPromotion( 166 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 167 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 168 "optimization in CodeGenPrepare")); 169 170 static cl::opt<bool> DisablePreheaderProtect( 171 "disable-preheader-prot", cl::Hidden, cl::init(false), 172 cl::desc("Disable protection against removing loop preheaders")); 173 174 static cl::opt<bool> ProfileGuidedSectionPrefix( 175 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 176 cl::desc("Use profile info to add section prefix for hot/cold functions")); 177 178 static cl::opt<unsigned> FreqRatioToSkipMerge( 179 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 180 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 181 "(frequency of destination block) is greater than this ratio")); 182 183 static cl::opt<bool> ForceSplitStore( 184 "force-split-store", cl::Hidden, cl::init(false), 185 cl::desc("Force store splitting no matter what the target query says.")); 186 187 static cl::opt<bool> 188 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 189 cl::desc("Enable merging of redundant sexts when one is dominating" 190 " the other."), cl::init(true)); 191 192 static cl::opt<unsigned> MemCmpNumLoadsPerBlock( 193 "memcmp-num-loads-per-block", cl::Hidden, cl::init(1), 194 cl::desc("The number of loads per basic block for inline expansion of " 195 "memcmp that is only being compared against zero.")); 196 197 namespace { 198 199 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 200 using TypeIsSExt = PointerIntPair<Type *, 1, bool>; 201 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 202 using SExts = SmallVector<Instruction *, 16>; 203 using ValueToSExts = DenseMap<Value *, SExts>; 204 205 class TypePromotionTransaction; 206 207 class CodeGenPrepare : public FunctionPass { 208 const TargetMachine *TM = nullptr; 209 const TargetSubtargetInfo *SubtargetInfo; 210 const TargetLowering *TLI = nullptr; 211 const TargetRegisterInfo *TRI; 212 const TargetTransformInfo *TTI = nullptr; 213 const TargetLibraryInfo *TLInfo; 214 const LoopInfo *LI; 215 std::unique_ptr<BlockFrequencyInfo> BFI; 216 std::unique_ptr<BranchProbabilityInfo> BPI; 217 218 /// As we scan instructions optimizing them, this is the next instruction 219 /// to optimize. Transforms that can invalidate this should update it. 220 BasicBlock::iterator CurInstIterator; 221 222 /// Keeps track of non-local addresses that have been sunk into a block. 223 /// This allows us to avoid inserting duplicate code for blocks with 224 /// multiple load/stores of the same address. 225 ValueMap<Value*, Value*> SunkAddrs; 226 227 /// Keeps track of all instructions inserted for the current function. 228 SetOfInstrs InsertedInsts; 229 230 /// Keeps track of the type of the related instruction before their 231 /// promotion for the current function. 232 InstrToOrigTy PromotedInsts; 233 234 /// Keep track of instructions removed during promotion. 235 SetOfInstrs RemovedInsts; 236 237 /// Keep track of sext chains based on their initial value. 238 DenseMap<Value *, Instruction *> SeenChainsForSExt; 239 240 /// Keep track of SExt promoted. 241 ValueToSExts ValToSExtendedUses; 242 243 /// True if CFG is modified in any way. 244 bool ModifiedDT; 245 246 /// True if optimizing for size. 247 bool OptSize; 248 249 /// DataLayout for the Function being processed. 250 const DataLayout *DL = nullptr; 251 252 public: 253 static char ID; // Pass identification, replacement for typeid 254 255 CodeGenPrepare() : FunctionPass(ID) { 256 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 257 } 258 259 bool runOnFunction(Function &F) override; 260 261 StringRef getPassName() const override { return "CodeGen Prepare"; } 262 263 void getAnalysisUsage(AnalysisUsage &AU) const override { 264 // FIXME: When we can selectively preserve passes, preserve the domtree. 265 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 266 AU.addRequired<TargetLibraryInfoWrapperPass>(); 267 AU.addRequired<TargetTransformInfoWrapperPass>(); 268 AU.addRequired<LoopInfoWrapperPass>(); 269 } 270 271 private: 272 bool eliminateFallThrough(Function &F); 273 bool eliminateMostlyEmptyBlocks(Function &F); 274 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 275 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 276 void eliminateMostlyEmptyBlock(BasicBlock *BB); 277 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 278 bool isPreheader); 279 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 280 bool optimizeInst(Instruction *I, bool &ModifiedDT); 281 bool optimizeMemoryInst(Instruction *I, Value *Addr, 282 Type *AccessTy, unsigned AS); 283 bool optimizeInlineAsmInst(CallInst *CS); 284 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 285 bool optimizeExt(Instruction *&I); 286 bool optimizeExtUses(Instruction *I); 287 bool optimizeLoadExt(LoadInst *I); 288 bool optimizeSelectInst(SelectInst *SI); 289 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 290 bool optimizeSwitchInst(SwitchInst *CI); 291 bool optimizeExtractElementInst(Instruction *Inst); 292 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 293 bool placeDbgValues(Function &F); 294 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 295 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 296 bool tryToPromoteExts(TypePromotionTransaction &TPT, 297 const SmallVectorImpl<Instruction *> &Exts, 298 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 299 unsigned CreatedInstsCost = 0); 300 bool mergeSExts(Function &F); 301 bool performAddressTypePromotion( 302 Instruction *&Inst, 303 bool AllowPromotionWithoutCommonHeader, 304 bool HasPromoted, TypePromotionTransaction &TPT, 305 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 306 bool splitBranchCondition(Function &F); 307 bool simplifyOffsetableRelocate(Instruction &I); 308 bool splitIndirectCriticalEdges(Function &F); 309 }; 310 311 } // end anonymous namespace 312 313 char CodeGenPrepare::ID = 0; 314 315 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 316 "Optimize for code generation", false, false) 317 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 318 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 319 "Optimize for code generation", false, false) 320 321 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 322 323 bool CodeGenPrepare::runOnFunction(Function &F) { 324 if (skipFunction(F)) 325 return false; 326 327 DL = &F.getParent()->getDataLayout(); 328 329 bool EverMadeChange = false; 330 // Clear per function information. 331 InsertedInsts.clear(); 332 PromotedInsts.clear(); 333 BFI.reset(); 334 BPI.reset(); 335 336 ModifiedDT = false; 337 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 338 TM = &TPC->getTM<TargetMachine>(); 339 SubtargetInfo = TM->getSubtargetImpl(F); 340 TLI = SubtargetInfo->getTargetLowering(); 341 TRI = SubtargetInfo->getRegisterInfo(); 342 } 343 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 344 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 345 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 346 OptSize = F.optForSize(); 347 348 if (ProfileGuidedSectionPrefix) { 349 ProfileSummaryInfo *PSI = 350 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 351 if (PSI->isFunctionHotInCallGraph(&F)) 352 F.setSectionPrefix(".hot"); 353 else if (PSI->isFunctionColdInCallGraph(&F)) 354 F.setSectionPrefix(".unlikely"); 355 } 356 357 /// This optimization identifies DIV instructions that can be 358 /// profitably bypassed and carried out with a shorter, faster divide. 359 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 360 const DenseMap<unsigned int, unsigned int> &BypassWidths = 361 TLI->getBypassSlowDivWidths(); 362 BasicBlock* BB = &*F.begin(); 363 while (BB != nullptr) { 364 // bypassSlowDivision may create new BBs, but we don't want to reapply the 365 // optimization to those blocks. 366 BasicBlock* Next = BB->getNextNode(); 367 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 368 BB = Next; 369 } 370 } 371 372 // Eliminate blocks that contain only PHI nodes and an 373 // unconditional branch. 374 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 375 376 // llvm.dbg.value is far away from the value then iSel may not be able 377 // handle it properly. iSel will drop llvm.dbg.value if it can not 378 // find a node corresponding to the value. 379 EverMadeChange |= placeDbgValues(F); 380 381 if (!DisableBranchOpts) 382 EverMadeChange |= splitBranchCondition(F); 383 384 // Split some critical edges where one of the sources is an indirect branch, 385 // to help generate sane code for PHIs involving such edges. 386 EverMadeChange |= splitIndirectCriticalEdges(F); 387 388 bool MadeChange = true; 389 while (MadeChange) { 390 MadeChange = false; 391 SeenChainsForSExt.clear(); 392 ValToSExtendedUses.clear(); 393 RemovedInsts.clear(); 394 for (Function::iterator I = F.begin(); I != F.end(); ) { 395 BasicBlock *BB = &*I++; 396 bool ModifiedDTOnIteration = false; 397 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 398 399 // Restart BB iteration if the dominator tree of the Function was changed 400 if (ModifiedDTOnIteration) 401 break; 402 } 403 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 404 MadeChange |= mergeSExts(F); 405 406 // Really free removed instructions during promotion. 407 for (Instruction *I : RemovedInsts) 408 I->deleteValue(); 409 410 EverMadeChange |= MadeChange; 411 } 412 413 SunkAddrs.clear(); 414 415 if (!DisableBranchOpts) { 416 MadeChange = false; 417 SmallPtrSet<BasicBlock*, 8> WorkList; 418 for (BasicBlock &BB : F) { 419 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 420 MadeChange |= ConstantFoldTerminator(&BB, true); 421 if (!MadeChange) continue; 422 423 for (SmallVectorImpl<BasicBlock*>::iterator 424 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 425 if (pred_begin(*II) == pred_end(*II)) 426 WorkList.insert(*II); 427 } 428 429 // Delete the dead blocks and any of their dead successors. 430 MadeChange |= !WorkList.empty(); 431 while (!WorkList.empty()) { 432 BasicBlock *BB = *WorkList.begin(); 433 WorkList.erase(BB); 434 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 435 436 DeleteDeadBlock(BB); 437 438 for (SmallVectorImpl<BasicBlock*>::iterator 439 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 440 if (pred_begin(*II) == pred_end(*II)) 441 WorkList.insert(*II); 442 } 443 444 // Merge pairs of basic blocks with unconditional branches, connected by 445 // a single edge. 446 if (EverMadeChange || MadeChange) 447 MadeChange |= eliminateFallThrough(F); 448 449 EverMadeChange |= MadeChange; 450 } 451 452 if (!DisableGCOpts) { 453 SmallVector<Instruction *, 2> Statepoints; 454 for (BasicBlock &BB : F) 455 for (Instruction &I : BB) 456 if (isStatepoint(I)) 457 Statepoints.push_back(&I); 458 for (auto &I : Statepoints) 459 EverMadeChange |= simplifyOffsetableRelocate(*I); 460 } 461 462 return EverMadeChange; 463 } 464 465 /// Merge basic blocks which are connected by a single edge, where one of the 466 /// basic blocks has a single successor pointing to the other basic block, 467 /// which has a single predecessor. 468 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 469 bool Changed = false; 470 // Scan all of the blocks in the function, except for the entry block. 471 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 472 BasicBlock *BB = &*I++; 473 // If the destination block has a single pred, then this is a trivial 474 // edge, just collapse it. 475 BasicBlock *SinglePred = BB->getSinglePredecessor(); 476 477 // Don't merge if BB's address is taken. 478 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 479 480 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 481 if (Term && !Term->isConditional()) { 482 Changed = true; 483 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 484 // Remember if SinglePred was the entry block of the function. 485 // If so, we will need to move BB back to the entry position. 486 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 487 MergeBasicBlockIntoOnlyPred(BB, nullptr); 488 489 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 490 BB->moveBefore(&BB->getParent()->getEntryBlock()); 491 492 // We have erased a block. Update the iterator. 493 I = BB->getIterator(); 494 } 495 } 496 return Changed; 497 } 498 499 /// Find a destination block from BB if BB is mergeable empty block. 500 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 501 // If this block doesn't end with an uncond branch, ignore it. 502 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 503 if (!BI || !BI->isUnconditional()) 504 return nullptr; 505 506 // If the instruction before the branch (skipping debug info) isn't a phi 507 // node, then other stuff is happening here. 508 BasicBlock::iterator BBI = BI->getIterator(); 509 if (BBI != BB->begin()) { 510 --BBI; 511 while (isa<DbgInfoIntrinsic>(BBI)) { 512 if (BBI == BB->begin()) 513 break; 514 --BBI; 515 } 516 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 517 return nullptr; 518 } 519 520 // Do not break infinite loops. 521 BasicBlock *DestBB = BI->getSuccessor(0); 522 if (DestBB == BB) 523 return nullptr; 524 525 if (!canMergeBlocks(BB, DestBB)) 526 DestBB = nullptr; 527 528 return DestBB; 529 } 530 531 // Return the unique indirectbr predecessor of a block. This may return null 532 // even if such a predecessor exists, if it's not useful for splitting. 533 // If a predecessor is found, OtherPreds will contain all other (non-indirectbr) 534 // predecessors of BB. 535 static BasicBlock * 536 findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) { 537 // If the block doesn't have any PHIs, we don't care about it, since there's 538 // no point in splitting it. 539 PHINode *PN = dyn_cast<PHINode>(BB->begin()); 540 if (!PN) 541 return nullptr; 542 543 // Verify we have exactly one IBR predecessor. 544 // Conservatively bail out if one of the other predecessors is not a "regular" 545 // terminator (that is, not a switch or a br). 546 BasicBlock *IBB = nullptr; 547 for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) { 548 BasicBlock *PredBB = PN->getIncomingBlock(Pred); 549 TerminatorInst *PredTerm = PredBB->getTerminator(); 550 switch (PredTerm->getOpcode()) { 551 case Instruction::IndirectBr: 552 if (IBB) 553 return nullptr; 554 IBB = PredBB; 555 break; 556 case Instruction::Br: 557 case Instruction::Switch: 558 OtherPreds.push_back(PredBB); 559 continue; 560 default: 561 return nullptr; 562 } 563 } 564 565 return IBB; 566 } 567 568 // Split critical edges where the source of the edge is an indirectbr 569 // instruction. This isn't always possible, but we can handle some easy cases. 570 // This is useful because MI is unable to split such critical edges, 571 // which means it will not be able to sink instructions along those edges. 572 // This is especially painful for indirect branches with many successors, where 573 // we end up having to prepare all outgoing values in the origin block. 574 // 575 // Our normal algorithm for splitting critical edges requires us to update 576 // the outgoing edges of the edge origin block, but for an indirectbr this 577 // is hard, since it would require finding and updating the block addresses 578 // the indirect branch uses. But if a block only has a single indirectbr 579 // predecessor, with the others being regular branches, we can do it in a 580 // different way. 581 // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr. 582 // We can split D into D0 and D1, where D0 contains only the PHIs from D, 583 // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and 584 // create the following structure: 585 // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1 586 bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { 587 // Check whether the function has any indirectbrs, and collect which blocks 588 // they may jump to. Since most functions don't have indirect branches, 589 // this lowers the common case's overhead to O(Blocks) instead of O(Edges). 590 SmallSetVector<BasicBlock *, 16> Targets; 591 for (auto &BB : F) { 592 auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator()); 593 if (!IBI) 594 continue; 595 596 for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ) 597 Targets.insert(IBI->getSuccessor(Succ)); 598 } 599 600 if (Targets.empty()) 601 return false; 602 603 bool Changed = false; 604 for (BasicBlock *Target : Targets) { 605 SmallVector<BasicBlock *, 16> OtherPreds; 606 BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds); 607 // If we did not found an indirectbr, or the indirectbr is the only 608 // incoming edge, this isn't the kind of edge we're looking for. 609 if (!IBRPred || OtherPreds.empty()) 610 continue; 611 612 // Don't even think about ehpads/landingpads. 613 Instruction *FirstNonPHI = Target->getFirstNonPHI(); 614 if (FirstNonPHI->isEHPad() || Target->isLandingPad()) 615 continue; 616 617 BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split"); 618 // It's possible Target was its own successor through an indirectbr. 619 // In this case, the indirectbr now comes from BodyBlock. 620 if (IBRPred == Target) 621 IBRPred = BodyBlock; 622 623 // At this point Target only has PHIs, and BodyBlock has the rest of the 624 // block's body. Create a copy of Target that will be used by the "direct" 625 // preds. 626 ValueToValueMapTy VMap; 627 BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); 628 629 for (BasicBlock *Pred : OtherPreds) { 630 // If the target is a loop to itself, then the terminator of the split 631 // block needs to be updated. 632 if (Pred == Target) 633 BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 634 else 635 Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); 636 } 637 638 // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that 639 // they are clones, so the number of PHIs are the same. 640 // (a) Remove the edge coming from IBRPred from the "Direct" PHI 641 // (b) Leave that as the only edge in the "Indirect" PHI. 642 // (c) Merge the two in the body block. 643 BasicBlock::iterator Indirect = Target->begin(), 644 End = Target->getFirstNonPHI()->getIterator(); 645 BasicBlock::iterator Direct = DirectSucc->begin(); 646 BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt(); 647 648 assert(&*End == Target->getTerminator() && 649 "Block was expected to only contain PHIs"); 650 651 while (Indirect != End) { 652 PHINode *DirPHI = cast<PHINode>(Direct); 653 PHINode *IndPHI = cast<PHINode>(Indirect); 654 655 // Now, clean up - the direct block shouldn't get the indirect value, 656 // and vice versa. 657 DirPHI->removeIncomingValue(IBRPred); 658 Direct++; 659 660 // Advance the pointer here, to avoid invalidation issues when the old 661 // PHI is erased. 662 Indirect++; 663 664 PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI); 665 NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred), 666 IBRPred); 667 668 // Create a PHI in the body block, to merge the direct and indirect 669 // predecessors. 670 PHINode *MergePHI = 671 PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert); 672 MergePHI->addIncoming(NewIndPHI, Target); 673 MergePHI->addIncoming(DirPHI, DirectSucc); 674 675 IndPHI->replaceAllUsesWith(MergePHI); 676 IndPHI->eraseFromParent(); 677 } 678 679 Changed = true; 680 } 681 682 return Changed; 683 } 684 685 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 686 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 687 /// edges in ways that are non-optimal for isel. Start by eliminating these 688 /// blocks so we can split them the way we want them. 689 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 690 SmallPtrSet<BasicBlock *, 16> Preheaders; 691 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 692 while (!LoopList.empty()) { 693 Loop *L = LoopList.pop_back_val(); 694 LoopList.insert(LoopList.end(), L->begin(), L->end()); 695 if (BasicBlock *Preheader = L->getLoopPreheader()) 696 Preheaders.insert(Preheader); 697 } 698 699 bool MadeChange = false; 700 // Note that this intentionally skips the entry block. 701 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 702 BasicBlock *BB = &*I++; 703 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 704 if (!DestBB || 705 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 706 continue; 707 708 eliminateMostlyEmptyBlock(BB); 709 MadeChange = true; 710 } 711 return MadeChange; 712 } 713 714 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 715 BasicBlock *DestBB, 716 bool isPreheader) { 717 // Do not delete loop preheaders if doing so would create a critical edge. 718 // Loop preheaders can be good locations to spill registers. If the 719 // preheader is deleted and we create a critical edge, registers may be 720 // spilled in the loop body instead. 721 if (!DisablePreheaderProtect && isPreheader && 722 !(BB->getSinglePredecessor() && 723 BB->getSinglePredecessor()->getSingleSuccessor())) 724 return false; 725 726 // Try to skip merging if the unique predecessor of BB is terminated by a 727 // switch or indirect branch instruction, and BB is used as an incoming block 728 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 729 // add COPY instructions in the predecessor of BB instead of BB (if it is not 730 // merged). Note that the critical edge created by merging such blocks wont be 731 // split in MachineSink because the jump table is not analyzable. By keeping 732 // such empty block (BB), ISel will place COPY instructions in BB, not in the 733 // predecessor of BB. 734 BasicBlock *Pred = BB->getUniquePredecessor(); 735 if (!Pred || 736 !(isa<SwitchInst>(Pred->getTerminator()) || 737 isa<IndirectBrInst>(Pred->getTerminator()))) 738 return true; 739 740 if (BB->getTerminator() != BB->getFirstNonPHI()) 741 return true; 742 743 // We use a simple cost heuristic which determine skipping merging is 744 // profitable if the cost of skipping merging is less than the cost of 745 // merging : Cost(skipping merging) < Cost(merging BB), where the 746 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 747 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 748 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 749 // Freq(Pred) / Freq(BB) > 2. 750 // Note that if there are multiple empty blocks sharing the same incoming 751 // value for the PHIs in the DestBB, we consider them together. In such 752 // case, Cost(merging BB) will be the sum of their frequencies. 753 754 if (!isa<PHINode>(DestBB->begin())) 755 return true; 756 757 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 758 759 // Find all other incoming blocks from which incoming values of all PHIs in 760 // DestBB are the same as the ones from BB. 761 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 762 ++PI) { 763 BasicBlock *DestBBPred = *PI; 764 if (DestBBPred == BB) 765 continue; 766 767 bool HasAllSameValue = true; 768 BasicBlock::const_iterator DestBBI = DestBB->begin(); 769 while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) { 770 if (DestPN->getIncomingValueForBlock(BB) != 771 DestPN->getIncomingValueForBlock(DestBBPred)) { 772 HasAllSameValue = false; 773 break; 774 } 775 } 776 if (HasAllSameValue) 777 SameIncomingValueBBs.insert(DestBBPred); 778 } 779 780 // See if all BB's incoming values are same as the value from Pred. In this 781 // case, no reason to skip merging because COPYs are expected to be place in 782 // Pred already. 783 if (SameIncomingValueBBs.count(Pred)) 784 return true; 785 786 if (!BFI) { 787 Function &F = *BB->getParent(); 788 LoopInfo LI{DominatorTree(F)}; 789 BPI.reset(new BranchProbabilityInfo(F, LI)); 790 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 791 } 792 793 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 794 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 795 796 for (auto SameValueBB : SameIncomingValueBBs) 797 if (SameValueBB->getUniquePredecessor() == Pred && 798 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 799 BBFreq += BFI->getBlockFreq(SameValueBB); 800 801 return PredFreq.getFrequency() <= 802 BBFreq.getFrequency() * FreqRatioToSkipMerge; 803 } 804 805 /// Return true if we can merge BB into DestBB if there is a single 806 /// unconditional branch between them, and BB contains no other non-phi 807 /// instructions. 808 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 809 const BasicBlock *DestBB) const { 810 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 811 // the successor. If there are more complex condition (e.g. preheaders), 812 // don't mess around with them. 813 BasicBlock::const_iterator BBI = BB->begin(); 814 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 815 for (const User *U : PN->users()) { 816 const Instruction *UI = cast<Instruction>(U); 817 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 818 return false; 819 // If User is inside DestBB block and it is a PHINode then check 820 // incoming value. If incoming value is not from BB then this is 821 // a complex condition (e.g. preheaders) we want to avoid here. 822 if (UI->getParent() == DestBB) { 823 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 824 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 825 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 826 if (Insn && Insn->getParent() == BB && 827 Insn->getParent() != UPN->getIncomingBlock(I)) 828 return false; 829 } 830 } 831 } 832 } 833 834 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 835 // and DestBB may have conflicting incoming values for the block. If so, we 836 // can't merge the block. 837 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 838 if (!DestBBPN) return true; // no conflict. 839 840 // Collect the preds of BB. 841 SmallPtrSet<const BasicBlock*, 16> BBPreds; 842 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 843 // It is faster to get preds from a PHI than with pred_iterator. 844 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 845 BBPreds.insert(BBPN->getIncomingBlock(i)); 846 } else { 847 BBPreds.insert(pred_begin(BB), pred_end(BB)); 848 } 849 850 // Walk the preds of DestBB. 851 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 852 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 853 if (BBPreds.count(Pred)) { // Common predecessor? 854 BBI = DestBB->begin(); 855 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 856 const Value *V1 = PN->getIncomingValueForBlock(Pred); 857 const Value *V2 = PN->getIncomingValueForBlock(BB); 858 859 // If V2 is a phi node in BB, look up what the mapped value will be. 860 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 861 if (V2PN->getParent() == BB) 862 V2 = V2PN->getIncomingValueForBlock(Pred); 863 864 // If there is a conflict, bail out. 865 if (V1 != V2) return false; 866 } 867 } 868 } 869 870 return true; 871 } 872 873 /// Eliminate a basic block that has only phi's and an unconditional branch in 874 /// it. 875 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 876 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 877 BasicBlock *DestBB = BI->getSuccessor(0); 878 879 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 880 881 // If the destination block has a single pred, then this is a trivial edge, 882 // just collapse it. 883 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 884 if (SinglePred != DestBB) { 885 // Remember if SinglePred was the entry block of the function. If so, we 886 // will need to move BB back to the entry position. 887 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 888 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 889 890 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 891 BB->moveBefore(&BB->getParent()->getEntryBlock()); 892 893 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 894 return; 895 } 896 } 897 898 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 899 // to handle the new incoming edges it is about to have. 900 PHINode *PN; 901 for (BasicBlock::iterator BBI = DestBB->begin(); 902 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 903 // Remove the incoming value for BB, and remember it. 904 Value *InVal = PN->removeIncomingValue(BB, false); 905 906 // Two options: either the InVal is a phi node defined in BB or it is some 907 // value that dominates BB. 908 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 909 if (InValPhi && InValPhi->getParent() == BB) { 910 // Add all of the input values of the input PHI as inputs of this phi. 911 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 912 PN->addIncoming(InValPhi->getIncomingValue(i), 913 InValPhi->getIncomingBlock(i)); 914 } else { 915 // Otherwise, add one instance of the dominating value for each edge that 916 // we will be adding. 917 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 918 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 919 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 920 } else { 921 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 922 PN->addIncoming(InVal, *PI); 923 } 924 } 925 } 926 927 // The PHIs are now updated, change everything that refers to BB to use 928 // DestBB and remove BB. 929 BB->replaceAllUsesWith(DestBB); 930 BB->eraseFromParent(); 931 ++NumBlocksElim; 932 933 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 934 } 935 936 // Computes a map of base pointer relocation instructions to corresponding 937 // derived pointer relocation instructions given a vector of all relocate calls 938 static void computeBaseDerivedRelocateMap( 939 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 940 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 941 &RelocateInstMap) { 942 // Collect information in two maps: one primarily for locating the base object 943 // while filling the second map; the second map is the final structure holding 944 // a mapping between Base and corresponding Derived relocate calls 945 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 946 for (auto *ThisRelocate : AllRelocateCalls) { 947 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 948 ThisRelocate->getDerivedPtrIndex()); 949 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 950 } 951 for (auto &Item : RelocateIdxMap) { 952 std::pair<unsigned, unsigned> Key = Item.first; 953 if (Key.first == Key.second) 954 // Base relocation: nothing to insert 955 continue; 956 957 GCRelocateInst *I = Item.second; 958 auto BaseKey = std::make_pair(Key.first, Key.first); 959 960 // We're iterating over RelocateIdxMap so we cannot modify it. 961 auto MaybeBase = RelocateIdxMap.find(BaseKey); 962 if (MaybeBase == RelocateIdxMap.end()) 963 // TODO: We might want to insert a new base object relocate and gep off 964 // that, if there are enough derived object relocates. 965 continue; 966 967 RelocateInstMap[MaybeBase->second].push_back(I); 968 } 969 } 970 971 // Accepts a GEP and extracts the operands into a vector provided they're all 972 // small integer constants 973 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 974 SmallVectorImpl<Value *> &OffsetV) { 975 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 976 // Only accept small constant integer operands 977 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 978 if (!Op || Op->getZExtValue() > 20) 979 return false; 980 } 981 982 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 983 OffsetV.push_back(GEP->getOperand(i)); 984 return true; 985 } 986 987 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 988 // replace, computes a replacement, and affects it. 989 static bool 990 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 991 const SmallVectorImpl<GCRelocateInst *> &Targets) { 992 bool MadeChange = false; 993 // We must ensure the relocation of derived pointer is defined after 994 // relocation of base pointer. If we find a relocation corresponding to base 995 // defined earlier than relocation of base then we move relocation of base 996 // right before found relocation. We consider only relocation in the same 997 // basic block as relocation of base. Relocations from other basic block will 998 // be skipped by optimization and we do not care about them. 999 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 1000 &*R != RelocatedBase; ++R) 1001 if (auto RI = dyn_cast<GCRelocateInst>(R)) 1002 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 1003 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 1004 RelocatedBase->moveBefore(RI); 1005 break; 1006 } 1007 1008 for (GCRelocateInst *ToReplace : Targets) { 1009 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 1010 "Not relocating a derived object of the original base object"); 1011 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 1012 // A duplicate relocate call. TODO: coalesce duplicates. 1013 continue; 1014 } 1015 1016 if (RelocatedBase->getParent() != ToReplace->getParent()) { 1017 // Base and derived relocates are in different basic blocks. 1018 // In this case transform is only valid when base dominates derived 1019 // relocate. However it would be too expensive to check dominance 1020 // for each such relocate, so we skip the whole transformation. 1021 continue; 1022 } 1023 1024 Value *Base = ToReplace->getBasePtr(); 1025 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 1026 if (!Derived || Derived->getPointerOperand() != Base) 1027 continue; 1028 1029 SmallVector<Value *, 2> OffsetV; 1030 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 1031 continue; 1032 1033 // Create a Builder and replace the target callsite with a gep 1034 assert(RelocatedBase->getNextNode() && 1035 "Should always have one since it's not a terminator"); 1036 1037 // Insert after RelocatedBase 1038 IRBuilder<> Builder(RelocatedBase->getNextNode()); 1039 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 1040 1041 // If gc_relocate does not match the actual type, cast it to the right type. 1042 // In theory, there must be a bitcast after gc_relocate if the type does not 1043 // match, and we should reuse it to get the derived pointer. But it could be 1044 // cases like this: 1045 // bb1: 1046 // ... 1047 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1048 // br label %merge 1049 // 1050 // bb2: 1051 // ... 1052 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1053 // br label %merge 1054 // 1055 // merge: 1056 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 1057 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 1058 // 1059 // In this case, we can not find the bitcast any more. So we insert a new bitcast 1060 // no matter there is already one or not. In this way, we can handle all cases, and 1061 // the extra bitcast should be optimized away in later passes. 1062 Value *ActualRelocatedBase = RelocatedBase; 1063 if (RelocatedBase->getType() != Base->getType()) { 1064 ActualRelocatedBase = 1065 Builder.CreateBitCast(RelocatedBase, Base->getType()); 1066 } 1067 Value *Replacement = Builder.CreateGEP( 1068 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 1069 Replacement->takeName(ToReplace); 1070 // If the newly generated derived pointer's type does not match the original derived 1071 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1072 Value *ActualReplacement = Replacement; 1073 if (Replacement->getType() != ToReplace->getType()) { 1074 ActualReplacement = 1075 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1076 } 1077 ToReplace->replaceAllUsesWith(ActualReplacement); 1078 ToReplace->eraseFromParent(); 1079 1080 MadeChange = true; 1081 } 1082 return MadeChange; 1083 } 1084 1085 // Turns this: 1086 // 1087 // %base = ... 1088 // %ptr = gep %base + 15 1089 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1090 // %base' = relocate(%tok, i32 4, i32 4) 1091 // %ptr' = relocate(%tok, i32 4, i32 5) 1092 // %val = load %ptr' 1093 // 1094 // into this: 1095 // 1096 // %base = ... 1097 // %ptr = gep %base + 15 1098 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1099 // %base' = gc.relocate(%tok, i32 4, i32 4) 1100 // %ptr' = gep %base' + 15 1101 // %val = load %ptr' 1102 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1103 bool MadeChange = false; 1104 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1105 1106 for (auto *U : I.users()) 1107 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1108 // Collect all the relocate calls associated with a statepoint 1109 AllRelocateCalls.push_back(Relocate); 1110 1111 // We need atleast one base pointer relocation + one derived pointer 1112 // relocation to mangle 1113 if (AllRelocateCalls.size() < 2) 1114 return false; 1115 1116 // RelocateInstMap is a mapping from the base relocate instruction to the 1117 // corresponding derived relocate instructions 1118 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1119 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1120 if (RelocateInstMap.empty()) 1121 return false; 1122 1123 for (auto &Item : RelocateInstMap) 1124 // Item.first is the RelocatedBase to offset against 1125 // Item.second is the vector of Targets to replace 1126 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1127 return MadeChange; 1128 } 1129 1130 /// SinkCast - Sink the specified cast instruction into its user blocks 1131 static bool SinkCast(CastInst *CI) { 1132 BasicBlock *DefBB = CI->getParent(); 1133 1134 /// InsertedCasts - Only insert a cast in each block once. 1135 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1136 1137 bool MadeChange = false; 1138 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1139 UI != E; ) { 1140 Use &TheUse = UI.getUse(); 1141 Instruction *User = cast<Instruction>(*UI); 1142 1143 // Figure out which BB this cast is used in. For PHI's this is the 1144 // appropriate predecessor block. 1145 BasicBlock *UserBB = User->getParent(); 1146 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1147 UserBB = PN->getIncomingBlock(TheUse); 1148 } 1149 1150 // Preincrement use iterator so we don't invalidate it. 1151 ++UI; 1152 1153 // The first insertion point of a block containing an EH pad is after the 1154 // pad. If the pad is the user, we cannot sink the cast past the pad. 1155 if (User->isEHPad()) 1156 continue; 1157 1158 // If the block selected to receive the cast is an EH pad that does not 1159 // allow non-PHI instructions before the terminator, we can't sink the 1160 // cast. 1161 if (UserBB->getTerminator()->isEHPad()) 1162 continue; 1163 1164 // If this user is in the same block as the cast, don't change the cast. 1165 if (UserBB == DefBB) continue; 1166 1167 // If we have already inserted a cast into this block, use it. 1168 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1169 1170 if (!InsertedCast) { 1171 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1172 assert(InsertPt != UserBB->end()); 1173 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1174 CI->getType(), "", &*InsertPt); 1175 } 1176 1177 // Replace a use of the cast with a use of the new cast. 1178 TheUse = InsertedCast; 1179 MadeChange = true; 1180 ++NumCastUses; 1181 } 1182 1183 // If we removed all uses, nuke the cast. 1184 if (CI->use_empty()) { 1185 CI->eraseFromParent(); 1186 MadeChange = true; 1187 } 1188 1189 return MadeChange; 1190 } 1191 1192 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1193 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1194 /// reduce the number of virtual registers that must be created and coalesced. 1195 /// 1196 /// Return true if any changes are made. 1197 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1198 const DataLayout &DL) { 1199 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1200 // than sinking only nop casts, but is helpful on some platforms. 1201 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1202 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1203 ASC->getDestAddressSpace())) 1204 return false; 1205 } 1206 1207 // If this is a noop copy, 1208 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1209 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1210 1211 // This is an fp<->int conversion? 1212 if (SrcVT.isInteger() != DstVT.isInteger()) 1213 return false; 1214 1215 // If this is an extension, it will be a zero or sign extension, which 1216 // isn't a noop. 1217 if (SrcVT.bitsLT(DstVT)) return false; 1218 1219 // If these values will be promoted, find out what they will be promoted 1220 // to. This helps us consider truncates on PPC as noop copies when they 1221 // are. 1222 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1223 TargetLowering::TypePromoteInteger) 1224 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1225 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1226 TargetLowering::TypePromoteInteger) 1227 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1228 1229 // If, after promotion, these are the same types, this is a noop copy. 1230 if (SrcVT != DstVT) 1231 return false; 1232 1233 return SinkCast(CI); 1234 } 1235 1236 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 1237 /// possible. 1238 /// 1239 /// Return true if any changes were made. 1240 static bool CombineUAddWithOverflow(CmpInst *CI) { 1241 Value *A, *B; 1242 Instruction *AddI; 1243 if (!match(CI, 1244 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 1245 return false; 1246 1247 Type *Ty = AddI->getType(); 1248 if (!isa<IntegerType>(Ty)) 1249 return false; 1250 1251 // We don't want to move around uses of condition values this late, so we we 1252 // check if it is legal to create the call to the intrinsic in the basic 1253 // block containing the icmp: 1254 1255 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 1256 return false; 1257 1258 #ifndef NDEBUG 1259 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1260 // for now: 1261 if (AddI->hasOneUse()) 1262 assert(*AddI->user_begin() == CI && "expected!"); 1263 #endif 1264 1265 Module *M = CI->getModule(); 1266 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 1267 1268 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 1269 1270 auto *UAddWithOverflow = 1271 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 1272 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 1273 auto *Overflow = 1274 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 1275 1276 CI->replaceAllUsesWith(Overflow); 1277 AddI->replaceAllUsesWith(UAdd); 1278 CI->eraseFromParent(); 1279 AddI->eraseFromParent(); 1280 return true; 1281 } 1282 1283 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1284 /// registers that must be created and coalesced. This is a clear win except on 1285 /// targets with multiple condition code registers (PowerPC), where it might 1286 /// lose; some adjustment may be wanted there. 1287 /// 1288 /// Return true if any changes are made. 1289 static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1290 BasicBlock *DefBB = CI->getParent(); 1291 1292 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1293 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) 1294 return false; 1295 1296 // Only insert a cmp in each block once. 1297 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1298 1299 bool MadeChange = false; 1300 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1301 UI != E; ) { 1302 Use &TheUse = UI.getUse(); 1303 Instruction *User = cast<Instruction>(*UI); 1304 1305 // Preincrement use iterator so we don't invalidate it. 1306 ++UI; 1307 1308 // Don't bother for PHI nodes. 1309 if (isa<PHINode>(User)) 1310 continue; 1311 1312 // Figure out which BB this cmp is used in. 1313 BasicBlock *UserBB = User->getParent(); 1314 1315 // If this user is in the same block as the cmp, don't change the cmp. 1316 if (UserBB == DefBB) continue; 1317 1318 // If we have already inserted a cmp into this block, use it. 1319 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1320 1321 if (!InsertedCmp) { 1322 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1323 assert(InsertPt != UserBB->end()); 1324 InsertedCmp = 1325 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 1326 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 1327 // Propagate the debug info. 1328 InsertedCmp->setDebugLoc(CI->getDebugLoc()); 1329 } 1330 1331 // Replace a use of the cmp with a use of the new cmp. 1332 TheUse = InsertedCmp; 1333 MadeChange = true; 1334 ++NumCmpUses; 1335 } 1336 1337 // If we removed all uses, nuke the cmp. 1338 if (CI->use_empty()) { 1339 CI->eraseFromParent(); 1340 MadeChange = true; 1341 } 1342 1343 return MadeChange; 1344 } 1345 1346 static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { 1347 if (SinkCmpExpression(CI, TLI)) 1348 return true; 1349 1350 if (CombineUAddWithOverflow(CI)) 1351 return true; 1352 1353 return false; 1354 } 1355 1356 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1357 /// used in a compare to allow isel to generate better code for targets where 1358 /// this operation can be combined. 1359 /// 1360 /// Return true if any changes are made. 1361 static bool sinkAndCmp0Expression(Instruction *AndI, 1362 const TargetLowering &TLI, 1363 SetOfInstrs &InsertedInsts) { 1364 // Double-check that we're not trying to optimize an instruction that was 1365 // already optimized by some other part of this pass. 1366 assert(!InsertedInsts.count(AndI) && 1367 "Attempting to optimize already optimized and instruction"); 1368 (void) InsertedInsts; 1369 1370 // Nothing to do for single use in same basic block. 1371 if (AndI->hasOneUse() && 1372 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1373 return false; 1374 1375 // Try to avoid cases where sinking/duplicating is likely to increase register 1376 // pressure. 1377 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1378 !isa<ConstantInt>(AndI->getOperand(1)) && 1379 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1380 return false; 1381 1382 for (auto *U : AndI->users()) { 1383 Instruction *User = cast<Instruction>(U); 1384 1385 // Only sink for and mask feeding icmp with 0. 1386 if (!isa<ICmpInst>(User)) 1387 return false; 1388 1389 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1390 if (!CmpC || !CmpC->isZero()) 1391 return false; 1392 } 1393 1394 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1395 return false; 1396 1397 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1398 DEBUG(AndI->getParent()->dump()); 1399 1400 // Push the 'and' into the same block as the icmp 0. There should only be 1401 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1402 // others, so we don't need to keep track of which BBs we insert into. 1403 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1404 UI != E; ) { 1405 Use &TheUse = UI.getUse(); 1406 Instruction *User = cast<Instruction>(*UI); 1407 1408 // Preincrement use iterator so we don't invalidate it. 1409 ++UI; 1410 1411 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1412 1413 // Keep the 'and' in the same place if the use is already in the same block. 1414 Instruction *InsertPt = 1415 User->getParent() == AndI->getParent() ? AndI : User; 1416 Instruction *InsertedAnd = 1417 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1418 AndI->getOperand(1), "", InsertPt); 1419 // Propagate the debug info. 1420 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1421 1422 // Replace a use of the 'and' with a use of the new 'and'. 1423 TheUse = InsertedAnd; 1424 ++NumAndUses; 1425 DEBUG(User->getParent()->dump()); 1426 } 1427 1428 // We removed all uses, nuke the and. 1429 AndI->eraseFromParent(); 1430 return true; 1431 } 1432 1433 /// Check if the candidates could be combined with a shift instruction, which 1434 /// includes: 1435 /// 1. Truncate instruction 1436 /// 2. And instruction and the imm is a mask of the low bits: 1437 /// imm & (imm+1) == 0 1438 static bool isExtractBitsCandidateUse(Instruction *User) { 1439 if (!isa<TruncInst>(User)) { 1440 if (User->getOpcode() != Instruction::And || 1441 !isa<ConstantInt>(User->getOperand(1))) 1442 return false; 1443 1444 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1445 1446 if ((Cimm & (Cimm + 1)).getBoolValue()) 1447 return false; 1448 } 1449 return true; 1450 } 1451 1452 /// Sink both shift and truncate instruction to the use of truncate's BB. 1453 static bool 1454 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1455 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1456 const TargetLowering &TLI, const DataLayout &DL) { 1457 BasicBlock *UserBB = User->getParent(); 1458 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1459 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1460 bool MadeChange = false; 1461 1462 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1463 TruncE = TruncI->user_end(); 1464 TruncUI != TruncE;) { 1465 1466 Use &TruncTheUse = TruncUI.getUse(); 1467 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1468 // Preincrement use iterator so we don't invalidate it. 1469 1470 ++TruncUI; 1471 1472 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1473 if (!ISDOpcode) 1474 continue; 1475 1476 // If the use is actually a legal node, there will not be an 1477 // implicit truncate. 1478 // FIXME: always querying the result type is just an 1479 // approximation; some nodes' legality is determined by the 1480 // operand or other means. There's no good way to find out though. 1481 if (TLI.isOperationLegalOrCustom( 1482 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1483 continue; 1484 1485 // Don't bother for PHI nodes. 1486 if (isa<PHINode>(TruncUser)) 1487 continue; 1488 1489 BasicBlock *TruncUserBB = TruncUser->getParent(); 1490 1491 if (UserBB == TruncUserBB) 1492 continue; 1493 1494 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1495 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1496 1497 if (!InsertedShift && !InsertedTrunc) { 1498 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1499 assert(InsertPt != TruncUserBB->end()); 1500 // Sink the shift 1501 if (ShiftI->getOpcode() == Instruction::AShr) 1502 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1503 "", &*InsertPt); 1504 else 1505 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1506 "", &*InsertPt); 1507 1508 // Sink the trunc 1509 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1510 TruncInsertPt++; 1511 assert(TruncInsertPt != TruncUserBB->end()); 1512 1513 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1514 TruncI->getType(), "", &*TruncInsertPt); 1515 1516 MadeChange = true; 1517 1518 TruncTheUse = InsertedTrunc; 1519 } 1520 } 1521 return MadeChange; 1522 } 1523 1524 /// Sink the shift *right* instruction into user blocks if the uses could 1525 /// potentially be combined with this shift instruction and generate BitExtract 1526 /// instruction. It will only be applied if the architecture supports BitExtract 1527 /// instruction. Here is an example: 1528 /// BB1: 1529 /// %x.extract.shift = lshr i64 %arg1, 32 1530 /// BB2: 1531 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1532 /// ==> 1533 /// 1534 /// BB2: 1535 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1536 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1537 /// 1538 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1539 /// instruction. 1540 /// Return true if any changes are made. 1541 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1542 const TargetLowering &TLI, 1543 const DataLayout &DL) { 1544 BasicBlock *DefBB = ShiftI->getParent(); 1545 1546 /// Only insert instructions in each block once. 1547 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1548 1549 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1550 1551 bool MadeChange = false; 1552 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1553 UI != E;) { 1554 Use &TheUse = UI.getUse(); 1555 Instruction *User = cast<Instruction>(*UI); 1556 // Preincrement use iterator so we don't invalidate it. 1557 ++UI; 1558 1559 // Don't bother for PHI nodes. 1560 if (isa<PHINode>(User)) 1561 continue; 1562 1563 if (!isExtractBitsCandidateUse(User)) 1564 continue; 1565 1566 BasicBlock *UserBB = User->getParent(); 1567 1568 if (UserBB == DefBB) { 1569 // If the shift and truncate instruction are in the same BB. The use of 1570 // the truncate(TruncUse) may still introduce another truncate if not 1571 // legal. In this case, we would like to sink both shift and truncate 1572 // instruction to the BB of TruncUse. 1573 // for example: 1574 // BB1: 1575 // i64 shift.result = lshr i64 opnd, imm 1576 // trunc.result = trunc shift.result to i16 1577 // 1578 // BB2: 1579 // ----> We will have an implicit truncate here if the architecture does 1580 // not have i16 compare. 1581 // cmp i16 trunc.result, opnd2 1582 // 1583 if (isa<TruncInst>(User) && shiftIsLegal 1584 // If the type of the truncate is legal, no trucate will be 1585 // introduced in other basic blocks. 1586 && 1587 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1588 MadeChange = 1589 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1590 1591 continue; 1592 } 1593 // If we have already inserted a shift into this block, use it. 1594 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1595 1596 if (!InsertedShift) { 1597 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1598 assert(InsertPt != UserBB->end()); 1599 1600 if (ShiftI->getOpcode() == Instruction::AShr) 1601 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1602 "", &*InsertPt); 1603 else 1604 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1605 "", &*InsertPt); 1606 1607 MadeChange = true; 1608 } 1609 1610 // Replace a use of the shift with a use of the new shift. 1611 TheUse = InsertedShift; 1612 } 1613 1614 // If we removed all uses, nuke the shift. 1615 if (ShiftI->use_empty()) 1616 ShiftI->eraseFromParent(); 1617 1618 return MadeChange; 1619 } 1620 1621 /// If counting leading or trailing zeros is an expensive operation and a zero 1622 /// input is defined, add a check for zero to avoid calling the intrinsic. 1623 /// 1624 /// We want to transform: 1625 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1626 /// 1627 /// into: 1628 /// entry: 1629 /// %cmpz = icmp eq i64 %A, 0 1630 /// br i1 %cmpz, label %cond.end, label %cond.false 1631 /// cond.false: 1632 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1633 /// br label %cond.end 1634 /// cond.end: 1635 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1636 /// 1637 /// If the transform is performed, return true and set ModifiedDT to true. 1638 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1639 const TargetLowering *TLI, 1640 const DataLayout *DL, 1641 bool &ModifiedDT) { 1642 if (!TLI || !DL) 1643 return false; 1644 1645 // If a zero input is undefined, it doesn't make sense to despeculate that. 1646 if (match(CountZeros->getOperand(1), m_One())) 1647 return false; 1648 1649 // If it's cheap to speculate, there's nothing to do. 1650 auto IntrinsicID = CountZeros->getIntrinsicID(); 1651 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1652 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1653 return false; 1654 1655 // Only handle legal scalar cases. Anything else requires too much work. 1656 Type *Ty = CountZeros->getType(); 1657 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1658 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1659 return false; 1660 1661 // The intrinsic will be sunk behind a compare against zero and branch. 1662 BasicBlock *StartBlock = CountZeros->getParent(); 1663 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1664 1665 // Create another block after the count zero intrinsic. A PHI will be added 1666 // in this block to select the result of the intrinsic or the bit-width 1667 // constant if the input to the intrinsic is zero. 1668 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1669 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1670 1671 // Set up a builder to create a compare, conditional branch, and PHI. 1672 IRBuilder<> Builder(CountZeros->getContext()); 1673 Builder.SetInsertPoint(StartBlock->getTerminator()); 1674 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1675 1676 // Replace the unconditional branch that was created by the first split with 1677 // a compare against zero and a conditional branch. 1678 Value *Zero = Constant::getNullValue(Ty); 1679 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1680 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1681 StartBlock->getTerminator()->eraseFromParent(); 1682 1683 // Create a PHI in the end block to select either the output of the intrinsic 1684 // or the bit width of the operand. 1685 Builder.SetInsertPoint(&EndBlock->front()); 1686 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1687 CountZeros->replaceAllUsesWith(PN); 1688 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1689 PN->addIncoming(BitWidth, StartBlock); 1690 PN->addIncoming(CountZeros, CallBlock); 1691 1692 // We are explicitly handling the zero case, so we can set the intrinsic's 1693 // undefined zero argument to 'true'. This will also prevent reprocessing the 1694 // intrinsic; we only despeculate when a zero input is defined. 1695 CountZeros->setArgOperand(1, Builder.getTrue()); 1696 ModifiedDT = true; 1697 return true; 1698 } 1699 1700 namespace { 1701 1702 // This class provides helper functions to expand a memcmp library call into an 1703 // inline expansion. 1704 class MemCmpExpansion { 1705 struct ResultBlock { 1706 BasicBlock *BB = nullptr; 1707 PHINode *PhiSrc1 = nullptr; 1708 PHINode *PhiSrc2 = nullptr; 1709 1710 ResultBlock() = default; 1711 }; 1712 1713 CallInst *const CI; 1714 ResultBlock ResBlock; 1715 const uint64_t Size; 1716 unsigned MaxLoadSize; 1717 uint64_t NumLoadsNonOneByte; 1718 const uint64_t NumLoadsPerBlock; 1719 std::vector<BasicBlock *> LoadCmpBlocks; 1720 BasicBlock *EndBlock; 1721 PHINode *PhiRes; 1722 const bool IsUsedForZeroCmp; 1723 const DataLayout &DL; 1724 IRBuilder<> Builder; 1725 // Represents the decomposition in blocks of the expansion. For example, 1726 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and 1727 // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}. 1728 // TODO(courbet): Involve the target more in this computation. On X86, 7 1729 // bytes can be done more efficiently with two overlaping 4-byte loads than 1730 // covering the interval with [{4, 0},{2, 4},{1, 6}}. 1731 struct LoadEntry { 1732 LoadEntry(unsigned LoadSize, uint64_t Offset) 1733 : LoadSize(LoadSize), Offset(Offset) { 1734 assert(Offset % LoadSize == 0 && "invalid load entry"); 1735 } 1736 1737 uint64_t getGEPIndex() const { return Offset / LoadSize; } 1738 1739 // The size of the load for this block, in bytes. 1740 const unsigned LoadSize; 1741 // The offset of this load WRT the base pointer, in bytes. 1742 const uint64_t Offset; 1743 }; 1744 SmallVector<LoadEntry, 8> LoadSequence; 1745 1746 void createLoadCmpBlocks(); 1747 void createResultBlock(); 1748 void setupResultBlockPHINodes(); 1749 void setupEndBlockPHINodes(); 1750 Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex); 1751 void emitLoadCompareBlock(unsigned BlockIndex); 1752 void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex, 1753 unsigned &LoadIndex); 1754 void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned GEPIndex); 1755 void emitMemCmpResultBlock(); 1756 Value *getMemCmpExpansionZeroCase(); 1757 Value *getMemCmpEqZeroOneBlock(); 1758 Value *getMemCmpOneBlock(); 1759 1760 public: 1761 MemCmpExpansion(CallInst *CI, uint64_t Size, 1762 const TargetTransformInfo::MemCmpExpansionOptions &Options, 1763 unsigned MaxNumLoads, const bool IsUsedForZeroCmp, 1764 unsigned NumLoadsPerBlock, const DataLayout &DL); 1765 1766 unsigned getNumBlocks(); 1767 uint64_t getNumLoads() const { return LoadSequence.size(); } 1768 1769 Value *getMemCmpExpansion(); 1770 }; 1771 1772 } // end anonymous namespace 1773 1774 // Initialize the basic block structure required for expansion of memcmp call 1775 // with given maximum load size and memcmp size parameter. 1776 // This structure includes: 1777 // 1. A list of load compare blocks - LoadCmpBlocks. 1778 // 2. An EndBlock, split from original instruction point, which is the block to 1779 // return from. 1780 // 3. ResultBlock, block to branch to for early exit when a 1781 // LoadCmpBlock finds a difference. 1782 MemCmpExpansion::MemCmpExpansion( 1783 CallInst *const CI, uint64_t Size, 1784 const TargetTransformInfo::MemCmpExpansionOptions &Options, 1785 const unsigned MaxNumLoads, const bool IsUsedForZeroCmp, 1786 const unsigned NumLoadsPerBlock, const DataLayout &TheDataLayout) 1787 : CI(CI), 1788 Size(Size), 1789 MaxLoadSize(0), 1790 NumLoadsNonOneByte(0), 1791 NumLoadsPerBlock(NumLoadsPerBlock), 1792 IsUsedForZeroCmp(IsUsedForZeroCmp), 1793 DL(TheDataLayout), 1794 Builder(CI) { 1795 assert(Size > 0 && "zero blocks"); 1796 // Scale the max size down if the target can load more bytes than we need. 1797 size_t LoadSizeIndex = 0; 1798 while (LoadSizeIndex < Options.LoadSizes.size() && 1799 Options.LoadSizes[LoadSizeIndex] > Size) { 1800 ++LoadSizeIndex; 1801 } 1802 this->MaxLoadSize = Options.LoadSizes[LoadSizeIndex]; 1803 // Compute the decomposition. 1804 uint64_t CurSize = Size; 1805 uint64_t Offset = 0; 1806 while (CurSize && LoadSizeIndex < Options.LoadSizes.size()) { 1807 const unsigned LoadSize = Options.LoadSizes[LoadSizeIndex]; 1808 assert(LoadSize > 0 && "zero load size"); 1809 const uint64_t NumLoadsForThisSize = CurSize / LoadSize; 1810 if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) { 1811 // Do not expand if the total number of loads is larger than what the 1812 // target allows. Note that it's important that we exit before completing 1813 // the expansion to avoid using a ton of memory to store the expansion for 1814 // large sizes. 1815 LoadSequence.clear(); 1816 return; 1817 } 1818 if (NumLoadsForThisSize > 0) { 1819 for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) { 1820 LoadSequence.push_back({LoadSize, Offset}); 1821 Offset += LoadSize; 1822 } 1823 if (LoadSize > 1) { 1824 ++NumLoadsNonOneByte; 1825 } 1826 CurSize = CurSize % LoadSize; 1827 } 1828 ++LoadSizeIndex; 1829 } 1830 assert(LoadSequence.size() <= MaxNumLoads && "broken invariant"); 1831 } 1832 1833 unsigned MemCmpExpansion::getNumBlocks() { 1834 if (IsUsedForZeroCmp) 1835 return getNumLoads() / NumLoadsPerBlock + 1836 (getNumLoads() % NumLoadsPerBlock != 0 ? 1 : 0); 1837 return getNumLoads(); 1838 } 1839 1840 void MemCmpExpansion::createLoadCmpBlocks() { 1841 for (unsigned i = 0; i < getNumBlocks(); i++) { 1842 BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb", 1843 EndBlock->getParent(), EndBlock); 1844 LoadCmpBlocks.push_back(BB); 1845 } 1846 } 1847 1848 void MemCmpExpansion::createResultBlock() { 1849 ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block", 1850 EndBlock->getParent(), EndBlock); 1851 } 1852 1853 // This function creates the IR instructions for loading and comparing 1 byte. 1854 // It loads 1 byte from each source of the memcmp parameters with the given 1855 // GEPIndex. It then subtracts the two loaded values and adds this result to the 1856 // final phi node for selecting the memcmp result. 1857 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex, 1858 unsigned GEPIndex) { 1859 Value *Source1 = CI->getArgOperand(0); 1860 Value *Source2 = CI->getArgOperand(1); 1861 1862 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 1863 Type *LoadSizeType = Type::getInt8Ty(CI->getContext()); 1864 // Cast source to LoadSizeType*. 1865 if (Source1->getType() != LoadSizeType) 1866 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 1867 if (Source2->getType() != LoadSizeType) 1868 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 1869 1870 // Get the base address using the GEPIndex. 1871 if (GEPIndex != 0) { 1872 Source1 = Builder.CreateGEP(LoadSizeType, Source1, 1873 ConstantInt::get(LoadSizeType, GEPIndex)); 1874 Source2 = Builder.CreateGEP(LoadSizeType, Source2, 1875 ConstantInt::get(LoadSizeType, GEPIndex)); 1876 } 1877 1878 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 1879 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 1880 1881 LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext())); 1882 LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext())); 1883 Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2); 1884 1885 PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]); 1886 1887 if (BlockIndex < (LoadCmpBlocks.size() - 1)) { 1888 // Early exit branch if difference found to EndBlock. Otherwise, continue to 1889 // next LoadCmpBlock, 1890 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff, 1891 ConstantInt::get(Diff->getType(), 0)); 1892 BranchInst *CmpBr = 1893 BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp); 1894 Builder.Insert(CmpBr); 1895 } else { 1896 // The last block has an unconditional branch to EndBlock. 1897 BranchInst *CmpBr = BranchInst::Create(EndBlock); 1898 Builder.Insert(CmpBr); 1899 } 1900 } 1901 1902 /// Generate an equality comparison for one or more pairs of loaded values. 1903 /// This is used in the case where the memcmp() call is compared equal or not 1904 /// equal to zero. 1905 Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex, 1906 unsigned &LoadIndex) { 1907 assert(LoadIndex < getNumLoads() && 1908 "getCompareLoadPairs() called with no remaining loads"); 1909 std::vector<Value *> XorList, OrList; 1910 Value *Diff; 1911 1912 const unsigned NumLoads = 1913 std::min(getNumLoads() - LoadIndex, NumLoadsPerBlock); 1914 1915 // For a single-block expansion, start inserting before the memcmp call. 1916 if (LoadCmpBlocks.empty()) 1917 Builder.SetInsertPoint(CI); 1918 else 1919 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 1920 1921 Value *Cmp = nullptr; 1922 // If we have multiple loads per block, we need to generate a composite 1923 // comparison using xor+or. The type for the combinations is the largest load 1924 // type. 1925 IntegerType *const MaxLoadType = 1926 NumLoads == 1 ? nullptr 1927 : IntegerType::get(CI->getContext(), MaxLoadSize * 8); 1928 for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) { 1929 const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex]; 1930 1931 IntegerType *LoadSizeType = 1932 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8); 1933 1934 Value *Source1 = CI->getArgOperand(0); 1935 Value *Source2 = CI->getArgOperand(1); 1936 1937 // Cast source to LoadSizeType*. 1938 if (Source1->getType() != LoadSizeType) 1939 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 1940 if (Source2->getType() != LoadSizeType) 1941 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 1942 1943 // Get the base address using a GEP. 1944 if (CurLoadEntry.Offset != 0) { 1945 Source1 = Builder.CreateGEP( 1946 LoadSizeType, Source1, 1947 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 1948 Source2 = Builder.CreateGEP( 1949 LoadSizeType, Source2, 1950 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 1951 } 1952 1953 // Get a constant or load a value for each source address. 1954 Value *LoadSrc1 = nullptr; 1955 if (auto *Source1C = dyn_cast<Constant>(Source1)) 1956 LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL); 1957 if (!LoadSrc1) 1958 LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 1959 1960 Value *LoadSrc2 = nullptr; 1961 if (auto *Source2C = dyn_cast<Constant>(Source2)) 1962 LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL); 1963 if (!LoadSrc2) 1964 LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 1965 1966 if (NumLoads != 1) { 1967 if (LoadSizeType != MaxLoadType) { 1968 LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); 1969 LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); 1970 } 1971 // If we have multiple loads per block, we need to generate a composite 1972 // comparison using xor+or. 1973 Diff = Builder.CreateXor(LoadSrc1, LoadSrc2); 1974 Diff = Builder.CreateZExt(Diff, MaxLoadType); 1975 XorList.push_back(Diff); 1976 } else { 1977 // If there's only one load per block, we just compare the loaded values. 1978 Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2); 1979 } 1980 } 1981 1982 auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> { 1983 std::vector<Value *> OutList; 1984 for (unsigned i = 0; i < InList.size() - 1; i = i + 2) { 1985 Value *Or = Builder.CreateOr(InList[i], InList[i + 1]); 1986 OutList.push_back(Or); 1987 } 1988 if (InList.size() % 2 != 0) 1989 OutList.push_back(InList.back()); 1990 return OutList; 1991 }; 1992 1993 if (!Cmp) { 1994 // Pairwise OR the XOR results. 1995 OrList = pairWiseOr(XorList); 1996 1997 // Pairwise OR the OR results until one result left. 1998 while (OrList.size() != 1) { 1999 OrList = pairWiseOr(OrList); 2000 } 2001 Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0)); 2002 } 2003 2004 return Cmp; 2005 } 2006 2007 void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex, 2008 unsigned &LoadIndex) { 2009 Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex); 2010 2011 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1)) 2012 ? EndBlock 2013 : LoadCmpBlocks[BlockIndex + 1]; 2014 // Early exit branch if difference found to ResultBlock. Otherwise, 2015 // continue to next LoadCmpBlock or EndBlock. 2016 BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp); 2017 Builder.Insert(CmpBr); 2018 2019 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 2020 // since early exit to ResultBlock was not taken (no difference was found in 2021 // any of the bytes). 2022 if (BlockIndex == LoadCmpBlocks.size() - 1) { 2023 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); 2024 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]); 2025 } 2026 } 2027 2028 // This function creates the IR intructions for loading and comparing using the 2029 // given LoadSize. It loads the number of bytes specified by LoadSize from each 2030 // source of the memcmp parameters. It then does a subtract to see if there was 2031 // a difference in the loaded values. If a difference is found, it branches 2032 // with an early exit to the ResultBlock for calculating which source was 2033 // larger. Otherwise, it falls through to the either the next LoadCmpBlock or 2034 // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with 2035 // a special case through emitLoadCompareByteBlock. The special handling can 2036 // simply subtract the loaded values and add it to the result phi node. 2037 void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) { 2038 // There is one load per block in this case, BlockIndex == LoadIndex. 2039 const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex]; 2040 2041 if (CurLoadEntry.LoadSize == 1) { 2042 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, 2043 CurLoadEntry.getGEPIndex()); 2044 return; 2045 } 2046 2047 Type *LoadSizeType = 2048 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8); 2049 Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); 2050 assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type"); 2051 2052 Value *Source1 = CI->getArgOperand(0); 2053 Value *Source2 = CI->getArgOperand(1); 2054 2055 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]); 2056 // Cast source to LoadSizeType*. 2057 if (Source1->getType() != LoadSizeType) 2058 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 2059 if (Source2->getType() != LoadSizeType) 2060 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 2061 2062 // Get the base address using a GEP. 2063 if (CurLoadEntry.Offset != 0) { 2064 Source1 = Builder.CreateGEP( 2065 LoadSizeType, Source1, 2066 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 2067 Source2 = Builder.CreateGEP( 2068 LoadSizeType, Source2, 2069 ConstantInt::get(LoadSizeType, CurLoadEntry.getGEPIndex())); 2070 } 2071 2072 // Load LoadSizeType from the base address. 2073 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 2074 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 2075 2076 if (DL.isLittleEndian()) { 2077 Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), 2078 Intrinsic::bswap, LoadSizeType); 2079 LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); 2080 LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); 2081 } 2082 2083 if (LoadSizeType != MaxLoadType) { 2084 LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); 2085 LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); 2086 } 2087 2088 // Add the loaded values to the phi nodes for calculating memcmp result only 2089 // if result is not used in a zero equality. 2090 if (!IsUsedForZeroCmp) { 2091 ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[BlockIndex]); 2092 ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[BlockIndex]); 2093 } 2094 2095 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2); 2096 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1)) 2097 ? EndBlock 2098 : LoadCmpBlocks[BlockIndex + 1]; 2099 // Early exit branch if difference found to ResultBlock. Otherwise, continue 2100 // to next LoadCmpBlock or EndBlock. 2101 BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp); 2102 Builder.Insert(CmpBr); 2103 2104 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 2105 // since early exit to ResultBlock was not taken (no difference was found in 2106 // any of the bytes). 2107 if (BlockIndex == LoadCmpBlocks.size() - 1) { 2108 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); 2109 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]); 2110 } 2111 } 2112 2113 // This function populates the ResultBlock with a sequence to calculate the 2114 // memcmp result. It compares the two loaded source values and returns -1 if 2115 // src1 < src2 and 1 if src1 > src2. 2116 void MemCmpExpansion::emitMemCmpResultBlock() { 2117 // Special case: if memcmp result is used in a zero equality, result does not 2118 // need to be calculated and can simply return 1. 2119 if (IsUsedForZeroCmp) { 2120 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); 2121 Builder.SetInsertPoint(ResBlock.BB, InsertPt); 2122 Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1); 2123 PhiRes->addIncoming(Res, ResBlock.BB); 2124 BranchInst *NewBr = BranchInst::Create(EndBlock); 2125 Builder.Insert(NewBr); 2126 return; 2127 } 2128 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); 2129 Builder.SetInsertPoint(ResBlock.BB, InsertPt); 2130 2131 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1, 2132 ResBlock.PhiSrc2); 2133 2134 Value *Res = 2135 Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1), 2136 ConstantInt::get(Builder.getInt32Ty(), 1)); 2137 2138 BranchInst *NewBr = BranchInst::Create(EndBlock); 2139 Builder.Insert(NewBr); 2140 PhiRes->addIncoming(Res, ResBlock.BB); 2141 } 2142 2143 void MemCmpExpansion::setupResultBlockPHINodes() { 2144 Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); 2145 Builder.SetInsertPoint(ResBlock.BB); 2146 // Note: this assumes one load per block. 2147 ResBlock.PhiSrc1 = 2148 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1"); 2149 ResBlock.PhiSrc2 = 2150 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2"); 2151 } 2152 2153 void MemCmpExpansion::setupEndBlockPHINodes() { 2154 Builder.SetInsertPoint(&EndBlock->front()); 2155 PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res"); 2156 } 2157 2158 Value *MemCmpExpansion::getMemCmpExpansionZeroCase() { 2159 unsigned LoadIndex = 0; 2160 // This loop populates each of the LoadCmpBlocks with the IR sequence to 2161 // handle multiple loads per block. 2162 for (unsigned I = 0; I < getNumBlocks(); ++I) { 2163 emitLoadCompareBlockMultipleLoads(I, LoadIndex); 2164 } 2165 2166 emitMemCmpResultBlock(); 2167 return PhiRes; 2168 } 2169 2170 /// A memcmp expansion that compares equality with 0 and only has one block of 2171 /// load and compare can bypass the compare, branch, and phi IR that is required 2172 /// in the general case. 2173 Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() { 2174 unsigned LoadIndex = 0; 2175 Value *Cmp = getCompareLoadPairs(0, LoadIndex); 2176 assert(LoadIndex == getNumLoads() && "some entries were not consumed"); 2177 return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext())); 2178 } 2179 2180 /// A memcmp expansion that only has one block of load and compare can bypass 2181 /// the compare, branch, and phi IR that is required in the general case. 2182 Value *MemCmpExpansion::getMemCmpOneBlock() { 2183 assert(NumLoadsPerBlock == 1 && "Only handles one load pair per block"); 2184 2185 Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8); 2186 Value *Source1 = CI->getArgOperand(0); 2187 Value *Source2 = CI->getArgOperand(1); 2188 2189 // Cast source to LoadSizeType*. 2190 if (Source1->getType() != LoadSizeType) 2191 Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); 2192 if (Source2->getType() != LoadSizeType) 2193 Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); 2194 2195 // Load LoadSizeType from the base address. 2196 Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); 2197 Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); 2198 2199 if (DL.isLittleEndian() && Size != 1) { 2200 Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), 2201 Intrinsic::bswap, LoadSizeType); 2202 LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); 2203 LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); 2204 } 2205 2206 if (Size < 4) { 2207 // The i8 and i16 cases don't need compares. We zext the loaded values and 2208 // subtract them to get the suitable negative, zero, or positive i32 result. 2209 LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty()); 2210 LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty()); 2211 return Builder.CreateSub(LoadSrc1, LoadSrc2); 2212 } 2213 2214 // The result of memcmp is negative, zero, or positive, so produce that by 2215 // subtracting 2 extended compare bits: sub (ugt, ult). 2216 // If a target prefers to use selects to get -1/0/1, they should be able 2217 // to transform this later. The inverse transform (going from selects to math) 2218 // may not be possible in the DAG because the selects got converted into 2219 // branches before we got there. 2220 Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2); 2221 Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2); 2222 Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty()); 2223 Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty()); 2224 return Builder.CreateSub(ZextUGT, ZextULT); 2225 } 2226 2227 // This function expands the memcmp call into an inline expansion and returns 2228 // the memcmp result. 2229 Value *MemCmpExpansion::getMemCmpExpansion() { 2230 // A memcmp with zero-comparison with only one block of load and compare does 2231 // not need to set up any extra blocks. This case could be handled in the DAG, 2232 // but since we have all of the machinery to flexibly expand any memcpy here, 2233 // we choose to handle this case too to avoid fragmented lowering. 2234 if ((!IsUsedForZeroCmp && NumLoadsPerBlock != 1) || getNumBlocks() != 1) { 2235 BasicBlock *StartBlock = CI->getParent(); 2236 EndBlock = StartBlock->splitBasicBlock(CI, "endblock"); 2237 setupEndBlockPHINodes(); 2238 createResultBlock(); 2239 2240 // If return value of memcmp is not used in a zero equality, we need to 2241 // calculate which source was larger. The calculation requires the 2242 // two loaded source values of each load compare block. 2243 // These will be saved in the phi nodes created by setupResultBlockPHINodes. 2244 if (!IsUsedForZeroCmp) setupResultBlockPHINodes(); 2245 2246 // Create the number of required load compare basic blocks. 2247 createLoadCmpBlocks(); 2248 2249 // Update the terminator added by splitBasicBlock to branch to the first 2250 // LoadCmpBlock. 2251 StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]); 2252 } 2253 2254 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 2255 2256 if (IsUsedForZeroCmp) 2257 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock() 2258 : getMemCmpExpansionZeroCase(); 2259 2260 // TODO: Handle more than one load pair per block in getMemCmpOneBlock(). 2261 if (getNumBlocks() == 1 && NumLoadsPerBlock == 1) return getMemCmpOneBlock(); 2262 2263 for (unsigned I = 0; I < getNumBlocks(); ++I) { 2264 emitLoadCompareBlock(I); 2265 } 2266 2267 emitMemCmpResultBlock(); 2268 return PhiRes; 2269 } 2270 2271 // This function checks to see if an expansion of memcmp can be generated. 2272 // It checks for constant compare size that is less than the max inline size. 2273 // If an expansion cannot occur, returns false to leave as a library call. 2274 // Otherwise, the library call is replaced with a new IR instruction sequence. 2275 /// We want to transform: 2276 /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15) 2277 /// To: 2278 /// loadbb: 2279 /// %0 = bitcast i32* %buffer2 to i8* 2280 /// %1 = bitcast i32* %buffer1 to i8* 2281 /// %2 = bitcast i8* %1 to i64* 2282 /// %3 = bitcast i8* %0 to i64* 2283 /// %4 = load i64, i64* %2 2284 /// %5 = load i64, i64* %3 2285 /// %6 = call i64 @llvm.bswap.i64(i64 %4) 2286 /// %7 = call i64 @llvm.bswap.i64(i64 %5) 2287 /// %8 = sub i64 %6, %7 2288 /// %9 = icmp ne i64 %8, 0 2289 /// br i1 %9, label %res_block, label %loadbb1 2290 /// res_block: ; preds = %loadbb2, 2291 /// %loadbb1, %loadbb 2292 /// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ] 2293 /// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ] 2294 /// %10 = icmp ult i64 %phi.src1, %phi.src2 2295 /// %11 = select i1 %10, i32 -1, i32 1 2296 /// br label %endblock 2297 /// loadbb1: ; preds = %loadbb 2298 /// %12 = bitcast i32* %buffer2 to i8* 2299 /// %13 = bitcast i32* %buffer1 to i8* 2300 /// %14 = bitcast i8* %13 to i32* 2301 /// %15 = bitcast i8* %12 to i32* 2302 /// %16 = getelementptr i32, i32* %14, i32 2 2303 /// %17 = getelementptr i32, i32* %15, i32 2 2304 /// %18 = load i32, i32* %16 2305 /// %19 = load i32, i32* %17 2306 /// %20 = call i32 @llvm.bswap.i32(i32 %18) 2307 /// %21 = call i32 @llvm.bswap.i32(i32 %19) 2308 /// %22 = zext i32 %20 to i64 2309 /// %23 = zext i32 %21 to i64 2310 /// %24 = sub i64 %22, %23 2311 /// %25 = icmp ne i64 %24, 0 2312 /// br i1 %25, label %res_block, label %loadbb2 2313 /// loadbb2: ; preds = %loadbb1 2314 /// %26 = bitcast i32* %buffer2 to i8* 2315 /// %27 = bitcast i32* %buffer1 to i8* 2316 /// %28 = bitcast i8* %27 to i16* 2317 /// %29 = bitcast i8* %26 to i16* 2318 /// %30 = getelementptr i16, i16* %28, i16 6 2319 /// %31 = getelementptr i16, i16* %29, i16 6 2320 /// %32 = load i16, i16* %30 2321 /// %33 = load i16, i16* %31 2322 /// %34 = call i16 @llvm.bswap.i16(i16 %32) 2323 /// %35 = call i16 @llvm.bswap.i16(i16 %33) 2324 /// %36 = zext i16 %34 to i64 2325 /// %37 = zext i16 %35 to i64 2326 /// %38 = sub i64 %36, %37 2327 /// %39 = icmp ne i64 %38, 0 2328 /// br i1 %39, label %res_block, label %loadbb3 2329 /// loadbb3: ; preds = %loadbb2 2330 /// %40 = bitcast i32* %buffer2 to i8* 2331 /// %41 = bitcast i32* %buffer1 to i8* 2332 /// %42 = getelementptr i8, i8* %41, i8 14 2333 /// %43 = getelementptr i8, i8* %40, i8 14 2334 /// %44 = load i8, i8* %42 2335 /// %45 = load i8, i8* %43 2336 /// %46 = zext i8 %44 to i32 2337 /// %47 = zext i8 %45 to i32 2338 /// %48 = sub i32 %46, %47 2339 /// br label %endblock 2340 /// endblock: ; preds = %res_block, 2341 /// %loadbb3 2342 /// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ] 2343 /// ret i32 %phi.res 2344 static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, 2345 const TargetLowering *TLI, const DataLayout *DL) { 2346 NumMemCmpCalls++; 2347 2348 // Early exit from expansion if -Oz. 2349 if (CI->getFunction()->optForMinSize()) 2350 return false; 2351 2352 // Early exit from expansion if size is not a constant. 2353 ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2)); 2354 if (!SizeCast) { 2355 NumMemCmpNotConstant++; 2356 return false; 2357 } 2358 const uint64_t SizeVal = SizeCast->getZExtValue(); 2359 2360 if (SizeVal == 0) { 2361 return false; 2362 } 2363 2364 // TTI call to check if target would like to expand memcmp. Also, get the 2365 // available load sizes. 2366 const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI); 2367 const auto *const Options = TTI->enableMemCmpExpansion(IsUsedForZeroCmp); 2368 if (!Options) return false; 2369 2370 const unsigned MaxNumLoads = 2371 TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize()); 2372 2373 MemCmpExpansion Expansion(CI, SizeVal, *Options, MaxNumLoads, 2374 IsUsedForZeroCmp, MemCmpNumLoadsPerBlock, *DL); 2375 2376 // Don't expand if this will require more loads than desired by the target. 2377 if (Expansion.getNumLoads() == 0) { 2378 NumMemCmpGreaterThanMax++; 2379 return false; 2380 } 2381 2382 NumMemCmpInlined++; 2383 2384 Value *Res = Expansion.getMemCmpExpansion(); 2385 2386 // Replace call with result of expansion and erase call. 2387 CI->replaceAllUsesWith(Res); 2388 CI->eraseFromParent(); 2389 2390 return true; 2391 } 2392 2393 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 2394 BasicBlock *BB = CI->getParent(); 2395 2396 // Lower inline assembly if we can. 2397 // If we found an inline asm expession, and if the target knows how to 2398 // lower it to normal LLVM code, do so now. 2399 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 2400 if (TLI->ExpandInlineAsm(CI)) { 2401 // Avoid invalidating the iterator. 2402 CurInstIterator = BB->begin(); 2403 // Avoid processing instructions out of order, which could cause 2404 // reuse before a value is defined. 2405 SunkAddrs.clear(); 2406 return true; 2407 } 2408 // Sink address computing for memory operands into the block. 2409 if (optimizeInlineAsmInst(CI)) 2410 return true; 2411 } 2412 2413 // Align the pointer arguments to this call if the target thinks it's a good 2414 // idea 2415 unsigned MinSize, PrefAlign; 2416 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2417 for (auto &Arg : CI->arg_operands()) { 2418 // We want to align both objects whose address is used directly and 2419 // objects whose address is used in casts and GEPs, though it only makes 2420 // sense for GEPs if the offset is a multiple of the desired alignment and 2421 // if size - offset meets the size threshold. 2422 if (!Arg->getType()->isPointerTy()) 2423 continue; 2424 APInt Offset(DL->getPointerSizeInBits( 2425 cast<PointerType>(Arg->getType())->getAddressSpace()), 2426 0); 2427 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2428 uint64_t Offset2 = Offset.getLimitedValue(); 2429 if ((Offset2 & (PrefAlign-1)) != 0) 2430 continue; 2431 AllocaInst *AI; 2432 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2433 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2434 AI->setAlignment(PrefAlign); 2435 // Global variables can only be aligned if they are defined in this 2436 // object (i.e. they are uniquely initialized in this object), and 2437 // over-aligning global variables that have an explicit section is 2438 // forbidden. 2439 GlobalVariable *GV; 2440 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2441 GV->getPointerAlignment(*DL) < PrefAlign && 2442 DL->getTypeAllocSize(GV->getValueType()) >= 2443 MinSize + Offset2) 2444 GV->setAlignment(PrefAlign); 2445 } 2446 // If this is a memcpy (or similar) then we may be able to improve the 2447 // alignment 2448 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2449 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 2450 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 2451 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 2452 if (Align > MI->getAlignment()) 2453 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 2454 } 2455 } 2456 2457 // If we have a cold call site, try to sink addressing computation into the 2458 // cold block. This interacts with our handling for loads and stores to 2459 // ensure that we can fold all uses of a potential addressing computation 2460 // into their uses. TODO: generalize this to work over profiling data 2461 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 2462 for (auto &Arg : CI->arg_operands()) { 2463 if (!Arg->getType()->isPointerTy()) 2464 continue; 2465 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2466 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2467 } 2468 2469 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2470 if (II) { 2471 switch (II->getIntrinsicID()) { 2472 default: break; 2473 case Intrinsic::objectsize: { 2474 // Lower all uses of llvm.objectsize.* 2475 ConstantInt *RetVal = 2476 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 2477 // Substituting this can cause recursive simplifications, which can 2478 // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case 2479 // this 2480 // happens. 2481 Value *CurValue = &*CurInstIterator; 2482 WeakTrackingVH IterHandle(CurValue); 2483 2484 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2485 2486 // If the iterator instruction was recursively deleted, start over at the 2487 // start of the block. 2488 if (IterHandle != CurValue) { 2489 CurInstIterator = BB->begin(); 2490 SunkAddrs.clear(); 2491 } 2492 return true; 2493 } 2494 case Intrinsic::aarch64_stlxr: 2495 case Intrinsic::aarch64_stxr: { 2496 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2497 if (!ExtVal || !ExtVal->hasOneUse() || 2498 ExtVal->getParent() == CI->getParent()) 2499 return false; 2500 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2501 ExtVal->moveBefore(CI); 2502 // Mark this instruction as "inserted by CGP", so that other 2503 // optimizations don't touch it. 2504 InsertedInsts.insert(ExtVal); 2505 return true; 2506 } 2507 case Intrinsic::invariant_group_barrier: 2508 II->replaceAllUsesWith(II->getArgOperand(0)); 2509 II->eraseFromParent(); 2510 return true; 2511 2512 case Intrinsic::cttz: 2513 case Intrinsic::ctlz: 2514 // If counting zeros is expensive, try to avoid it. 2515 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2516 } 2517 2518 if (TLI) { 2519 SmallVector<Value*, 2> PtrOps; 2520 Type *AccessTy; 2521 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2522 while (!PtrOps.empty()) { 2523 Value *PtrVal = PtrOps.pop_back_val(); 2524 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2525 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2526 return true; 2527 } 2528 } 2529 } 2530 2531 // From here on out we're working with named functions. 2532 if (!CI->getCalledFunction()) return false; 2533 2534 // Lower all default uses of _chk calls. This is very similar 2535 // to what InstCombineCalls does, but here we are only lowering calls 2536 // to fortified library functions (e.g. __memcpy_chk) that have the default 2537 // "don't know" as the objectsize. Anything else should be left alone. 2538 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2539 if (Value *V = Simplifier.optimizeCall(CI)) { 2540 CI->replaceAllUsesWith(V); 2541 CI->eraseFromParent(); 2542 return true; 2543 } 2544 2545 LibFunc Func; 2546 if (TLInfo->getLibFunc(ImmutableCallSite(CI), Func) && 2547 Func == LibFunc_memcmp && expandMemCmp(CI, TTI, TLI, DL)) { 2548 ModifiedDT = true; 2549 return true; 2550 } 2551 return false; 2552 } 2553 2554 /// Look for opportunities to duplicate return instructions to the predecessor 2555 /// to enable tail call optimizations. The case it is currently looking for is: 2556 /// @code 2557 /// bb0: 2558 /// %tmp0 = tail call i32 @f0() 2559 /// br label %return 2560 /// bb1: 2561 /// %tmp1 = tail call i32 @f1() 2562 /// br label %return 2563 /// bb2: 2564 /// %tmp2 = tail call i32 @f2() 2565 /// br label %return 2566 /// return: 2567 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2568 /// ret i32 %retval 2569 /// @endcode 2570 /// 2571 /// => 2572 /// 2573 /// @code 2574 /// bb0: 2575 /// %tmp0 = tail call i32 @f0() 2576 /// ret i32 %tmp0 2577 /// bb1: 2578 /// %tmp1 = tail call i32 @f1() 2579 /// ret i32 %tmp1 2580 /// bb2: 2581 /// %tmp2 = tail call i32 @f2() 2582 /// ret i32 %tmp2 2583 /// @endcode 2584 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 2585 if (!TLI) 2586 return false; 2587 2588 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2589 if (!RetI) 2590 return false; 2591 2592 PHINode *PN = nullptr; 2593 BitCastInst *BCI = nullptr; 2594 Value *V = RetI->getReturnValue(); 2595 if (V) { 2596 BCI = dyn_cast<BitCastInst>(V); 2597 if (BCI) 2598 V = BCI->getOperand(0); 2599 2600 PN = dyn_cast<PHINode>(V); 2601 if (!PN) 2602 return false; 2603 } 2604 2605 if (PN && PN->getParent() != BB) 2606 return false; 2607 2608 // Make sure there are no instructions between the PHI and return, or that the 2609 // return is the first instruction in the block. 2610 if (PN) { 2611 BasicBlock::iterator BI = BB->begin(); 2612 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 2613 if (&*BI == BCI) 2614 // Also skip over the bitcast. 2615 ++BI; 2616 if (&*BI != RetI) 2617 return false; 2618 } else { 2619 BasicBlock::iterator BI = BB->begin(); 2620 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2621 if (&*BI != RetI) 2622 return false; 2623 } 2624 2625 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2626 /// call. 2627 const Function *F = BB->getParent(); 2628 SmallVector<CallInst*, 4> TailCalls; 2629 if (PN) { 2630 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2631 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2632 // Make sure the phi value is indeed produced by the tail call. 2633 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2634 TLI->mayBeEmittedAsTailCall(CI) && 2635 attributesPermitTailCall(F, CI, RetI, *TLI)) 2636 TailCalls.push_back(CI); 2637 } 2638 } else { 2639 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2640 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2641 if (!VisitedBBs.insert(*PI).second) 2642 continue; 2643 2644 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2645 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2646 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2647 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2648 if (RI == RE) 2649 continue; 2650 2651 CallInst *CI = dyn_cast<CallInst>(&*RI); 2652 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2653 attributesPermitTailCall(F, CI, RetI, *TLI)) 2654 TailCalls.push_back(CI); 2655 } 2656 } 2657 2658 bool Changed = false; 2659 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2660 CallInst *CI = TailCalls[i]; 2661 CallSite CS(CI); 2662 2663 // Conservatively require the attributes of the call to match those of the 2664 // return. Ignore noalias because it doesn't affect the call sequence. 2665 AttributeList CalleeAttrs = CS.getAttributes(); 2666 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2667 .removeAttribute(Attribute::NoAlias) != 2668 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) 2669 .removeAttribute(Attribute::NoAlias)) 2670 continue; 2671 2672 // Make sure the call instruction is followed by an unconditional branch to 2673 // the return block. 2674 BasicBlock *CallBB = CI->getParent(); 2675 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2676 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2677 continue; 2678 2679 // Duplicate the return into CallBB. 2680 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2681 ModifiedDT = Changed = true; 2682 ++NumRetsDup; 2683 } 2684 2685 // If we eliminated all predecessors of the block, delete the block now. 2686 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2687 BB->eraseFromParent(); 2688 2689 return Changed; 2690 } 2691 2692 //===----------------------------------------------------------------------===// 2693 // Memory Optimization 2694 //===----------------------------------------------------------------------===// 2695 2696 namespace { 2697 2698 /// This is an extended version of TargetLowering::AddrMode 2699 /// which holds actual Value*'s for register values. 2700 struct ExtAddrMode : public TargetLowering::AddrMode { 2701 Value *BaseReg = nullptr; 2702 Value *ScaledReg = nullptr; 2703 Value *OriginalValue = nullptr; 2704 2705 enum FieldName { 2706 NoField = 0x00, 2707 BaseRegField = 0x01, 2708 BaseGVField = 0x02, 2709 BaseOffsField = 0x04, 2710 ScaledRegField = 0x08, 2711 ScaleField = 0x10, 2712 MultipleFields = 0xff 2713 }; 2714 2715 ExtAddrMode() = default; 2716 2717 void print(raw_ostream &OS) const; 2718 void dump() const; 2719 2720 FieldName compare(const ExtAddrMode &other) { 2721 // First check that the types are the same on each field, as differing types 2722 // is something we can't cope with later on. 2723 if (BaseReg && other.BaseReg && 2724 BaseReg->getType() != other.BaseReg->getType()) 2725 return MultipleFields; 2726 if (BaseGV && other.BaseGV && 2727 BaseGV->getType() != other.BaseGV->getType()) 2728 return MultipleFields; 2729 if (ScaledReg && other.ScaledReg && 2730 ScaledReg->getType() != other.ScaledReg->getType()) 2731 return MultipleFields; 2732 2733 // Check each field to see if it differs. 2734 unsigned Result = NoField; 2735 if (BaseReg != other.BaseReg) 2736 Result |= BaseRegField; 2737 if (BaseGV != other.BaseGV) 2738 Result |= BaseGVField; 2739 if (BaseOffs != other.BaseOffs) 2740 Result |= BaseOffsField; 2741 if (ScaledReg != other.ScaledReg) 2742 Result |= ScaledRegField; 2743 // Don't count 0 as being a different scale, because that actually means 2744 // unscaled (which will already be counted by having no ScaledReg). 2745 if (Scale && other.Scale && Scale != other.Scale) 2746 Result |= ScaleField; 2747 2748 if (countPopulation(Result) > 1) 2749 return MultipleFields; 2750 else 2751 return static_cast<FieldName>(Result); 2752 } 2753 2754 // AddrModes with a baseReg or gv where the reg/gv is 2755 // the only populated field are trivial. 2756 bool isTrivial() { 2757 if (BaseGV && !BaseOffs && !Scale && !BaseReg) 2758 return true; 2759 2760 if (!BaseGV && !BaseOffs && !Scale && BaseReg) 2761 return true; 2762 2763 return false; 2764 } 2765 }; 2766 2767 } // end anonymous namespace 2768 2769 #ifndef NDEBUG 2770 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2771 AM.print(OS); 2772 return OS; 2773 } 2774 #endif 2775 2776 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2777 void ExtAddrMode::print(raw_ostream &OS) const { 2778 bool NeedPlus = false; 2779 OS << "["; 2780 if (BaseGV) { 2781 OS << (NeedPlus ? " + " : "") 2782 << "GV:"; 2783 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2784 NeedPlus = true; 2785 } 2786 2787 if (BaseOffs) { 2788 OS << (NeedPlus ? " + " : "") 2789 << BaseOffs; 2790 NeedPlus = true; 2791 } 2792 2793 if (BaseReg) { 2794 OS << (NeedPlus ? " + " : "") 2795 << "Base:"; 2796 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2797 NeedPlus = true; 2798 } 2799 if (Scale) { 2800 OS << (NeedPlus ? " + " : "") 2801 << Scale << "*"; 2802 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2803 } 2804 2805 OS << ']'; 2806 } 2807 2808 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2809 print(dbgs()); 2810 dbgs() << '\n'; 2811 } 2812 #endif 2813 2814 namespace { 2815 2816 /// \brief This class provides transaction based operation on the IR. 2817 /// Every change made through this class is recorded in the internal state and 2818 /// can be undone (rollback) until commit is called. 2819 class TypePromotionTransaction { 2820 /// \brief This represents the common interface of the individual transaction. 2821 /// Each class implements the logic for doing one specific modification on 2822 /// the IR via the TypePromotionTransaction. 2823 class TypePromotionAction { 2824 protected: 2825 /// The Instruction modified. 2826 Instruction *Inst; 2827 2828 public: 2829 /// \brief Constructor of the action. 2830 /// The constructor performs the related action on the IR. 2831 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2832 2833 virtual ~TypePromotionAction() = default; 2834 2835 /// \brief Undo the modification done by this action. 2836 /// When this method is called, the IR must be in the same state as it was 2837 /// before this action was applied. 2838 /// \pre Undoing the action works if and only if the IR is in the exact same 2839 /// state as it was directly after this action was applied. 2840 virtual void undo() = 0; 2841 2842 /// \brief Advocate every change made by this action. 2843 /// When the results on the IR of the action are to be kept, it is important 2844 /// to call this function, otherwise hidden information may be kept forever. 2845 virtual void commit() { 2846 // Nothing to be done, this action is not doing anything. 2847 } 2848 }; 2849 2850 /// \brief Utility to remember the position of an instruction. 2851 class InsertionHandler { 2852 /// Position of an instruction. 2853 /// Either an instruction: 2854 /// - Is the first in a basic block: BB is used. 2855 /// - Has a previous instructon: PrevInst is used. 2856 union { 2857 Instruction *PrevInst; 2858 BasicBlock *BB; 2859 } Point; 2860 2861 /// Remember whether or not the instruction had a previous instruction. 2862 bool HasPrevInstruction; 2863 2864 public: 2865 /// \brief Record the position of \p Inst. 2866 InsertionHandler(Instruction *Inst) { 2867 BasicBlock::iterator It = Inst->getIterator(); 2868 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2869 if (HasPrevInstruction) 2870 Point.PrevInst = &*--It; 2871 else 2872 Point.BB = Inst->getParent(); 2873 } 2874 2875 /// \brief Insert \p Inst at the recorded position. 2876 void insert(Instruction *Inst) { 2877 if (HasPrevInstruction) { 2878 if (Inst->getParent()) 2879 Inst->removeFromParent(); 2880 Inst->insertAfter(Point.PrevInst); 2881 } else { 2882 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2883 if (Inst->getParent()) 2884 Inst->moveBefore(Position); 2885 else 2886 Inst->insertBefore(Position); 2887 } 2888 } 2889 }; 2890 2891 /// \brief Move an instruction before another. 2892 class InstructionMoveBefore : public TypePromotionAction { 2893 /// Original position of the instruction. 2894 InsertionHandler Position; 2895 2896 public: 2897 /// \brief Move \p Inst before \p Before. 2898 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2899 : TypePromotionAction(Inst), Position(Inst) { 2900 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2901 Inst->moveBefore(Before); 2902 } 2903 2904 /// \brief Move the instruction back to its original position. 2905 void undo() override { 2906 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2907 Position.insert(Inst); 2908 } 2909 }; 2910 2911 /// \brief Set the operand of an instruction with a new value. 2912 class OperandSetter : public TypePromotionAction { 2913 /// Original operand of the instruction. 2914 Value *Origin; 2915 2916 /// Index of the modified instruction. 2917 unsigned Idx; 2918 2919 public: 2920 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2921 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2922 : TypePromotionAction(Inst), Idx(Idx) { 2923 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2924 << "for:" << *Inst << "\n" 2925 << "with:" << *NewVal << "\n"); 2926 Origin = Inst->getOperand(Idx); 2927 Inst->setOperand(Idx, NewVal); 2928 } 2929 2930 /// \brief Restore the original value of the instruction. 2931 void undo() override { 2932 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2933 << "for: " << *Inst << "\n" 2934 << "with: " << *Origin << "\n"); 2935 Inst->setOperand(Idx, Origin); 2936 } 2937 }; 2938 2939 /// \brief Hide the operands of an instruction. 2940 /// Do as if this instruction was not using any of its operands. 2941 class OperandsHider : public TypePromotionAction { 2942 /// The list of original operands. 2943 SmallVector<Value *, 4> OriginalValues; 2944 2945 public: 2946 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2947 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2948 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2949 unsigned NumOpnds = Inst->getNumOperands(); 2950 OriginalValues.reserve(NumOpnds); 2951 for (unsigned It = 0; It < NumOpnds; ++It) { 2952 // Save the current operand. 2953 Value *Val = Inst->getOperand(It); 2954 OriginalValues.push_back(Val); 2955 // Set a dummy one. 2956 // We could use OperandSetter here, but that would imply an overhead 2957 // that we are not willing to pay. 2958 Inst->setOperand(It, UndefValue::get(Val->getType())); 2959 } 2960 } 2961 2962 /// \brief Restore the original list of uses. 2963 void undo() override { 2964 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2965 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2966 Inst->setOperand(It, OriginalValues[It]); 2967 } 2968 }; 2969 2970 /// \brief Build a truncate instruction. 2971 class TruncBuilder : public TypePromotionAction { 2972 Value *Val; 2973 2974 public: 2975 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2976 /// result. 2977 /// trunc Opnd to Ty. 2978 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2979 IRBuilder<> Builder(Opnd); 2980 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2981 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2982 } 2983 2984 /// \brief Get the built value. 2985 Value *getBuiltValue() { return Val; } 2986 2987 /// \brief Remove the built instruction. 2988 void undo() override { 2989 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2990 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2991 IVal->eraseFromParent(); 2992 } 2993 }; 2994 2995 /// \brief Build a sign extension instruction. 2996 class SExtBuilder : public TypePromotionAction { 2997 Value *Val; 2998 2999 public: 3000 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 3001 /// result. 3002 /// sext Opnd to Ty. 3003 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 3004 : TypePromotionAction(InsertPt) { 3005 IRBuilder<> Builder(InsertPt); 3006 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 3007 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 3008 } 3009 3010 /// \brief Get the built value. 3011 Value *getBuiltValue() { return Val; } 3012 3013 /// \brief Remove the built instruction. 3014 void undo() override { 3015 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 3016 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 3017 IVal->eraseFromParent(); 3018 } 3019 }; 3020 3021 /// \brief Build a zero extension instruction. 3022 class ZExtBuilder : public TypePromotionAction { 3023 Value *Val; 3024 3025 public: 3026 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 3027 /// result. 3028 /// zext Opnd to Ty. 3029 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 3030 : TypePromotionAction(InsertPt) { 3031 IRBuilder<> Builder(InsertPt); 3032 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 3033 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 3034 } 3035 3036 /// \brief Get the built value. 3037 Value *getBuiltValue() { return Val; } 3038 3039 /// \brief Remove the built instruction. 3040 void undo() override { 3041 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 3042 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 3043 IVal->eraseFromParent(); 3044 } 3045 }; 3046 3047 /// \brief Mutate an instruction to another type. 3048 class TypeMutator : public TypePromotionAction { 3049 /// Record the original type. 3050 Type *OrigTy; 3051 3052 public: 3053 /// \brief Mutate the type of \p Inst into \p NewTy. 3054 TypeMutator(Instruction *Inst, Type *NewTy) 3055 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 3056 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 3057 << "\n"); 3058 Inst->mutateType(NewTy); 3059 } 3060 3061 /// \brief Mutate the instruction back to its original type. 3062 void undo() override { 3063 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 3064 << "\n"); 3065 Inst->mutateType(OrigTy); 3066 } 3067 }; 3068 3069 /// \brief Replace the uses of an instruction by another instruction. 3070 class UsesReplacer : public TypePromotionAction { 3071 /// Helper structure to keep track of the replaced uses. 3072 struct InstructionAndIdx { 3073 /// The instruction using the instruction. 3074 Instruction *Inst; 3075 3076 /// The index where this instruction is used for Inst. 3077 unsigned Idx; 3078 3079 InstructionAndIdx(Instruction *Inst, unsigned Idx) 3080 : Inst(Inst), Idx(Idx) {} 3081 }; 3082 3083 /// Keep track of the original uses (pair Instruction, Index). 3084 SmallVector<InstructionAndIdx, 4> OriginalUses; 3085 3086 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 3087 3088 public: 3089 /// \brief Replace all the use of \p Inst by \p New. 3090 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 3091 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 3092 << "\n"); 3093 // Record the original uses. 3094 for (Use &U : Inst->uses()) { 3095 Instruction *UserI = cast<Instruction>(U.getUser()); 3096 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 3097 } 3098 // Now, we can replace the uses. 3099 Inst->replaceAllUsesWith(New); 3100 } 3101 3102 /// \brief Reassign the original uses of Inst to Inst. 3103 void undo() override { 3104 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 3105 for (use_iterator UseIt = OriginalUses.begin(), 3106 EndIt = OriginalUses.end(); 3107 UseIt != EndIt; ++UseIt) { 3108 UseIt->Inst->setOperand(UseIt->Idx, Inst); 3109 } 3110 } 3111 }; 3112 3113 /// \brief Remove an instruction from the IR. 3114 class InstructionRemover : public TypePromotionAction { 3115 /// Original position of the instruction. 3116 InsertionHandler Inserter; 3117 3118 /// Helper structure to hide all the link to the instruction. In other 3119 /// words, this helps to do as if the instruction was removed. 3120 OperandsHider Hider; 3121 3122 /// Keep track of the uses replaced, if any. 3123 UsesReplacer *Replacer = nullptr; 3124 3125 /// Keep track of instructions removed. 3126 SetOfInstrs &RemovedInsts; 3127 3128 public: 3129 /// \brief Remove all reference of \p Inst and optinally replace all its 3130 /// uses with New. 3131 /// \p RemovedInsts Keep track of the instructions removed by this Action. 3132 /// \pre If !Inst->use_empty(), then New != nullptr 3133 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 3134 Value *New = nullptr) 3135 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 3136 RemovedInsts(RemovedInsts) { 3137 if (New) 3138 Replacer = new UsesReplacer(Inst, New); 3139 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 3140 RemovedInsts.insert(Inst); 3141 /// The instructions removed here will be freed after completing 3142 /// optimizeBlock() for all blocks as we need to keep track of the 3143 /// removed instructions during promotion. 3144 Inst->removeFromParent(); 3145 } 3146 3147 ~InstructionRemover() override { delete Replacer; } 3148 3149 /// \brief Resurrect the instruction and reassign it to the proper uses if 3150 /// new value was provided when build this action. 3151 void undo() override { 3152 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 3153 Inserter.insert(Inst); 3154 if (Replacer) 3155 Replacer->undo(); 3156 Hider.undo(); 3157 RemovedInsts.erase(Inst); 3158 } 3159 }; 3160 3161 public: 3162 /// Restoration point. 3163 /// The restoration point is a pointer to an action instead of an iterator 3164 /// because the iterator may be invalidated but not the pointer. 3165 using ConstRestorationPt = const TypePromotionAction *; 3166 3167 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 3168 : RemovedInsts(RemovedInsts) {} 3169 3170 /// Advocate every changes made in that transaction. 3171 void commit(); 3172 3173 /// Undo all the changes made after the given point. 3174 void rollback(ConstRestorationPt Point); 3175 3176 /// Get the current restoration point. 3177 ConstRestorationPt getRestorationPoint() const; 3178 3179 /// \name API for IR modification with state keeping to support rollback. 3180 /// @{ 3181 /// Same as Instruction::setOperand. 3182 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 3183 3184 /// Same as Instruction::eraseFromParent. 3185 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 3186 3187 /// Same as Value::replaceAllUsesWith. 3188 void replaceAllUsesWith(Instruction *Inst, Value *New); 3189 3190 /// Same as Value::mutateType. 3191 void mutateType(Instruction *Inst, Type *NewTy); 3192 3193 /// Same as IRBuilder::createTrunc. 3194 Value *createTrunc(Instruction *Opnd, Type *Ty); 3195 3196 /// Same as IRBuilder::createSExt. 3197 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 3198 3199 /// Same as IRBuilder::createZExt. 3200 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 3201 3202 /// Same as Instruction::moveBefore. 3203 void moveBefore(Instruction *Inst, Instruction *Before); 3204 /// @} 3205 3206 private: 3207 /// The ordered list of actions made so far. 3208 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 3209 3210 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 3211 3212 SetOfInstrs &RemovedInsts; 3213 }; 3214 3215 } // end anonymous namespace 3216 3217 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 3218 Value *NewVal) { 3219 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 3220 Inst, Idx, NewVal)); 3221 } 3222 3223 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 3224 Value *NewVal) { 3225 Actions.push_back( 3226 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 3227 Inst, RemovedInsts, NewVal)); 3228 } 3229 3230 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 3231 Value *New) { 3232 Actions.push_back( 3233 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 3234 } 3235 3236 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 3237 Actions.push_back( 3238 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 3239 } 3240 3241 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 3242 Type *Ty) { 3243 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 3244 Value *Val = Ptr->getBuiltValue(); 3245 Actions.push_back(std::move(Ptr)); 3246 return Val; 3247 } 3248 3249 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 3250 Value *Opnd, Type *Ty) { 3251 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 3252 Value *Val = Ptr->getBuiltValue(); 3253 Actions.push_back(std::move(Ptr)); 3254 return Val; 3255 } 3256 3257 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 3258 Value *Opnd, Type *Ty) { 3259 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 3260 Value *Val = Ptr->getBuiltValue(); 3261 Actions.push_back(std::move(Ptr)); 3262 return Val; 3263 } 3264 3265 void TypePromotionTransaction::moveBefore(Instruction *Inst, 3266 Instruction *Before) { 3267 Actions.push_back( 3268 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 3269 Inst, Before)); 3270 } 3271 3272 TypePromotionTransaction::ConstRestorationPt 3273 TypePromotionTransaction::getRestorationPoint() const { 3274 return !Actions.empty() ? Actions.back().get() : nullptr; 3275 } 3276 3277 void TypePromotionTransaction::commit() { 3278 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 3279 ++It) 3280 (*It)->commit(); 3281 Actions.clear(); 3282 } 3283 3284 void TypePromotionTransaction::rollback( 3285 TypePromotionTransaction::ConstRestorationPt Point) { 3286 while (!Actions.empty() && Point != Actions.back().get()) { 3287 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 3288 Curr->undo(); 3289 } 3290 } 3291 3292 namespace { 3293 3294 /// \brief A helper class for matching addressing modes. 3295 /// 3296 /// This encapsulates the logic for matching the target-legal addressing modes. 3297 class AddressingModeMatcher { 3298 SmallVectorImpl<Instruction*> &AddrModeInsts; 3299 const TargetLowering &TLI; 3300 const TargetRegisterInfo &TRI; 3301 const DataLayout &DL; 3302 3303 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 3304 /// the memory instruction that we're computing this address for. 3305 Type *AccessTy; 3306 unsigned AddrSpace; 3307 Instruction *MemoryInst; 3308 3309 /// This is the addressing mode that we're building up. This is 3310 /// part of the return value of this addressing mode matching stuff. 3311 ExtAddrMode &AddrMode; 3312 3313 /// The instructions inserted by other CodeGenPrepare optimizations. 3314 const SetOfInstrs &InsertedInsts; 3315 3316 /// A map from the instructions to their type before promotion. 3317 InstrToOrigTy &PromotedInsts; 3318 3319 /// The ongoing transaction where every action should be registered. 3320 TypePromotionTransaction &TPT; 3321 3322 /// This is set to true when we should not do profitability checks. 3323 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3324 bool IgnoreProfitability; 3325 3326 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 3327 const TargetLowering &TLI, 3328 const TargetRegisterInfo &TRI, 3329 Type *AT, unsigned AS, 3330 Instruction *MI, ExtAddrMode &AM, 3331 const SetOfInstrs &InsertedInsts, 3332 InstrToOrigTy &PromotedInsts, 3333 TypePromotionTransaction &TPT) 3334 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3335 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3336 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3337 PromotedInsts(PromotedInsts), TPT(TPT) { 3338 IgnoreProfitability = false; 3339 } 3340 3341 public: 3342 /// Find the maximal addressing mode that a load/store of V can fold, 3343 /// give an access type of AccessTy. This returns a list of involved 3344 /// instructions in AddrModeInsts. 3345 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3346 /// optimizations. 3347 /// \p PromotedInsts maps the instructions to their type before promotion. 3348 /// \p The ongoing transaction where every action should be registered. 3349 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 3350 Instruction *MemoryInst, 3351 SmallVectorImpl<Instruction*> &AddrModeInsts, 3352 const TargetLowering &TLI, 3353 const TargetRegisterInfo &TRI, 3354 const SetOfInstrs &InsertedInsts, 3355 InstrToOrigTy &PromotedInsts, 3356 TypePromotionTransaction &TPT) { 3357 ExtAddrMode Result; 3358 3359 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, 3360 AccessTy, AS, 3361 MemoryInst, Result, InsertedInsts, 3362 PromotedInsts, TPT).matchAddr(V, 0); 3363 (void)Success; assert(Success && "Couldn't select *anything*?"); 3364 return Result; 3365 } 3366 3367 private: 3368 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3369 bool matchAddr(Value *V, unsigned Depth); 3370 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 3371 bool *MovedAway = nullptr); 3372 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3373 ExtAddrMode &AMBefore, 3374 ExtAddrMode &AMAfter); 3375 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3376 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3377 Value *PromotedOperand) const; 3378 }; 3379 3380 /// \brief A helper class for combining addressing modes. 3381 class AddressingModeCombiner { 3382 private: 3383 /// The addressing modes we've collected. 3384 SmallVector<ExtAddrMode, 16> AddrModes; 3385 3386 /// The field in which the AddrModes differ, when we have more than one. 3387 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3388 3389 /// Are the AddrModes that we have all just equal to their original values? 3390 bool AllAddrModesTrivial = true; 3391 3392 public: 3393 /// \brief Get the combined AddrMode 3394 const ExtAddrMode &getAddrMode() const { 3395 return AddrModes[0]; 3396 } 3397 3398 /// \brief Add a new AddrMode if it's compatible with the AddrModes we already 3399 /// have. 3400 /// \return True iff we succeeded in doing so. 3401 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3402 // Take note of if we have any non-trivial AddrModes, as we need to detect 3403 // when all AddrModes are trivial as then we would introduce a phi or select 3404 // which just duplicates what's already there. 3405 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3406 3407 // If this is the first addrmode then everything is fine. 3408 if (AddrModes.empty()) { 3409 AddrModes.emplace_back(NewAddrMode); 3410 return true; 3411 } 3412 3413 // Figure out how different this is from the other address modes, which we 3414 // can do just by comparing against the first one given that we only care 3415 // about the cumulative difference. 3416 ExtAddrMode::FieldName ThisDifferentField = 3417 AddrModes[0].compare(NewAddrMode); 3418 if (DifferentField == ExtAddrMode::NoField) 3419 DifferentField = ThisDifferentField; 3420 else if (DifferentField != ThisDifferentField) 3421 DifferentField = ExtAddrMode::MultipleFields; 3422 3423 // If this AddrMode is the same as all the others then everything is fine 3424 // (which should only happen when there is actually only one AddrMode). 3425 if (DifferentField == ExtAddrMode::NoField) { 3426 assert(AddrModes.size() == 1); 3427 return true; 3428 } 3429 3430 // If NewAddrMode differs in only one dimension then we can handle it by 3431 // inserting a phi/select later on. 3432 if (DifferentField != ExtAddrMode::MultipleFields) { 3433 AddrModes.emplace_back(NewAddrMode); 3434 return true; 3435 } 3436 3437 // We couldn't combine NewAddrMode with the rest, so return failure. 3438 AddrModes.clear(); 3439 return false; 3440 } 3441 3442 /// \brief Combine the addressing modes we've collected into a single 3443 /// addressing mode. 3444 /// \return True iff we successfully combined them or we only had one so 3445 /// didn't need to combine them anyway. 3446 bool combineAddrModes() { 3447 // If we have no AddrModes then they can't be combined. 3448 if (AddrModes.size() == 0) 3449 return false; 3450 3451 // A single AddrMode can trivially be combined. 3452 if (AddrModes.size() == 1) 3453 return true; 3454 3455 // If the AddrModes we collected are all just equal to the value they are 3456 // derived from then combining them wouldn't do anything useful. 3457 if (AllAddrModesTrivial) 3458 return false; 3459 3460 // TODO: Combine multiple AddrModes by inserting a select or phi for the 3461 // field in which the AddrModes differ. 3462 return false; 3463 } 3464 }; 3465 3466 } // end anonymous namespace 3467 3468 /// Try adding ScaleReg*Scale to the current addressing mode. 3469 /// Return true and update AddrMode if this addr mode is legal for the target, 3470 /// false if not. 3471 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3472 unsigned Depth) { 3473 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3474 // mode. Just process that directly. 3475 if (Scale == 1) 3476 return matchAddr(ScaleReg, Depth); 3477 3478 // If the scale is 0, it takes nothing to add this. 3479 if (Scale == 0) 3480 return true; 3481 3482 // If we already have a scale of this value, we can add to it, otherwise, we 3483 // need an available scale field. 3484 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3485 return false; 3486 3487 ExtAddrMode TestAddrMode = AddrMode; 3488 3489 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3490 // [A+B + A*7] -> [B+A*8]. 3491 TestAddrMode.Scale += Scale; 3492 TestAddrMode.ScaledReg = ScaleReg; 3493 3494 // If the new address isn't legal, bail out. 3495 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3496 return false; 3497 3498 // It was legal, so commit it. 3499 AddrMode = TestAddrMode; 3500 3501 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3502 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3503 // X*Scale + C*Scale to addr mode. 3504 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3505 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3506 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3507 TestAddrMode.ScaledReg = AddLHS; 3508 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3509 3510 // If this addressing mode is legal, commit it and remember that we folded 3511 // this instruction. 3512 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3513 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3514 AddrMode = TestAddrMode; 3515 return true; 3516 } 3517 } 3518 3519 // Otherwise, not (x+c)*scale, just return what we have. 3520 return true; 3521 } 3522 3523 /// This is a little filter, which returns true if an addressing computation 3524 /// involving I might be folded into a load/store accessing it. 3525 /// This doesn't need to be perfect, but needs to accept at least 3526 /// the set of instructions that MatchOperationAddr can. 3527 static bool MightBeFoldableInst(Instruction *I) { 3528 switch (I->getOpcode()) { 3529 case Instruction::BitCast: 3530 case Instruction::AddrSpaceCast: 3531 // Don't touch identity bitcasts. 3532 if (I->getType() == I->getOperand(0)->getType()) 3533 return false; 3534 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 3535 case Instruction::PtrToInt: 3536 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3537 return true; 3538 case Instruction::IntToPtr: 3539 // We know the input is intptr_t, so this is foldable. 3540 return true; 3541 case Instruction::Add: 3542 return true; 3543 case Instruction::Mul: 3544 case Instruction::Shl: 3545 // Can only handle X*C and X << C. 3546 return isa<ConstantInt>(I->getOperand(1)); 3547 case Instruction::GetElementPtr: 3548 return true; 3549 default: 3550 return false; 3551 } 3552 } 3553 3554 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 3555 /// \note \p Val is assumed to be the product of some type promotion. 3556 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3557 /// to be legal, as the non-promoted value would have had the same state. 3558 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3559 const DataLayout &DL, Value *Val) { 3560 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3561 if (!PromotedInst) 3562 return false; 3563 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3564 // If the ISDOpcode is undefined, it was undefined before the promotion. 3565 if (!ISDOpcode) 3566 return true; 3567 // Otherwise, check if the promoted instruction is legal or not. 3568 return TLI.isOperationLegalOrCustom( 3569 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3570 } 3571 3572 namespace { 3573 3574 /// \brief Hepler class to perform type promotion. 3575 class TypePromotionHelper { 3576 /// \brief Utility function to check whether or not a sign or zero extension 3577 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3578 /// either using the operands of \p Inst or promoting \p Inst. 3579 /// The type of the extension is defined by \p IsSExt. 3580 /// In other words, check if: 3581 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3582 /// #1 Promotion applies: 3583 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3584 /// #2 Operand reuses: 3585 /// ext opnd1 to ConsideredExtType. 3586 /// \p PromotedInsts maps the instructions to their type before promotion. 3587 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3588 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3589 3590 /// \brief Utility function to determine if \p OpIdx should be promoted when 3591 /// promoting \p Inst. 3592 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3593 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3594 } 3595 3596 /// \brief Utility function to promote the operand of \p Ext when this 3597 /// operand is a promotable trunc or sext or zext. 3598 /// \p PromotedInsts maps the instructions to their type before promotion. 3599 /// \p CreatedInstsCost[out] contains the cost of all instructions 3600 /// created to promote the operand of Ext. 3601 /// Newly added extensions are inserted in \p Exts. 3602 /// Newly added truncates are inserted in \p Truncs. 3603 /// Should never be called directly. 3604 /// \return The promoted value which is used instead of Ext. 3605 static Value *promoteOperandForTruncAndAnyExt( 3606 Instruction *Ext, TypePromotionTransaction &TPT, 3607 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3608 SmallVectorImpl<Instruction *> *Exts, 3609 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3610 3611 /// \brief Utility function to promote the operand of \p Ext when this 3612 /// operand is promotable and is not a supported trunc or sext. 3613 /// \p PromotedInsts maps the instructions to their type before promotion. 3614 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3615 /// created to promote the operand of Ext. 3616 /// Newly added extensions are inserted in \p Exts. 3617 /// Newly added truncates are inserted in \p Truncs. 3618 /// Should never be called directly. 3619 /// \return The promoted value which is used instead of Ext. 3620 static Value *promoteOperandForOther(Instruction *Ext, 3621 TypePromotionTransaction &TPT, 3622 InstrToOrigTy &PromotedInsts, 3623 unsigned &CreatedInstsCost, 3624 SmallVectorImpl<Instruction *> *Exts, 3625 SmallVectorImpl<Instruction *> *Truncs, 3626 const TargetLowering &TLI, bool IsSExt); 3627 3628 /// \see promoteOperandForOther. 3629 static Value *signExtendOperandForOther( 3630 Instruction *Ext, TypePromotionTransaction &TPT, 3631 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3632 SmallVectorImpl<Instruction *> *Exts, 3633 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3634 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3635 Exts, Truncs, TLI, true); 3636 } 3637 3638 /// \see promoteOperandForOther. 3639 static Value *zeroExtendOperandForOther( 3640 Instruction *Ext, TypePromotionTransaction &TPT, 3641 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3642 SmallVectorImpl<Instruction *> *Exts, 3643 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3644 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3645 Exts, Truncs, TLI, false); 3646 } 3647 3648 public: 3649 /// Type for the utility function that promotes the operand of Ext. 3650 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3651 InstrToOrigTy &PromotedInsts, 3652 unsigned &CreatedInstsCost, 3653 SmallVectorImpl<Instruction *> *Exts, 3654 SmallVectorImpl<Instruction *> *Truncs, 3655 const TargetLowering &TLI); 3656 3657 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 3658 /// action to promote the operand of \p Ext instead of using Ext. 3659 /// \return NULL if no promotable action is possible with the current 3660 /// sign extension. 3661 /// \p InsertedInsts keeps track of all the instructions inserted by the 3662 /// other CodeGenPrepare optimizations. This information is important 3663 /// because we do not want to promote these instructions as CodeGenPrepare 3664 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3665 /// \p PromotedInsts maps the instructions to their type before promotion. 3666 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3667 const TargetLowering &TLI, 3668 const InstrToOrigTy &PromotedInsts); 3669 }; 3670 3671 } // end anonymous namespace 3672 3673 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3674 Type *ConsideredExtType, 3675 const InstrToOrigTy &PromotedInsts, 3676 bool IsSExt) { 3677 // The promotion helper does not know how to deal with vector types yet. 3678 // To be able to fix that, we would need to fix the places where we 3679 // statically extend, e.g., constants and such. 3680 if (Inst->getType()->isVectorTy()) 3681 return false; 3682 3683 // We can always get through zext. 3684 if (isa<ZExtInst>(Inst)) 3685 return true; 3686 3687 // sext(sext) is ok too. 3688 if (IsSExt && isa<SExtInst>(Inst)) 3689 return true; 3690 3691 // We can get through binary operator, if it is legal. In other words, the 3692 // binary operator must have a nuw or nsw flag. 3693 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3694 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3695 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3696 (IsSExt && BinOp->hasNoSignedWrap()))) 3697 return true; 3698 3699 // Check if we can do the following simplification. 3700 // ext(trunc(opnd)) --> ext(opnd) 3701 if (!isa<TruncInst>(Inst)) 3702 return false; 3703 3704 Value *OpndVal = Inst->getOperand(0); 3705 // Check if we can use this operand in the extension. 3706 // If the type is larger than the result type of the extension, we cannot. 3707 if (!OpndVal->getType()->isIntegerTy() || 3708 OpndVal->getType()->getIntegerBitWidth() > 3709 ConsideredExtType->getIntegerBitWidth()) 3710 return false; 3711 3712 // If the operand of the truncate is not an instruction, we will not have 3713 // any information on the dropped bits. 3714 // (Actually we could for constant but it is not worth the extra logic). 3715 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3716 if (!Opnd) 3717 return false; 3718 3719 // Check if the source of the type is narrow enough. 3720 // I.e., check that trunc just drops extended bits of the same kind of 3721 // the extension. 3722 // #1 get the type of the operand and check the kind of the extended bits. 3723 const Type *OpndType; 3724 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3725 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 3726 OpndType = It->second.getPointer(); 3727 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3728 OpndType = Opnd->getOperand(0)->getType(); 3729 else 3730 return false; 3731 3732 // #2 check that the truncate just drops extended bits. 3733 return Inst->getType()->getIntegerBitWidth() >= 3734 OpndType->getIntegerBitWidth(); 3735 } 3736 3737 TypePromotionHelper::Action TypePromotionHelper::getAction( 3738 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3739 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3740 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3741 "Unexpected instruction type"); 3742 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3743 Type *ExtTy = Ext->getType(); 3744 bool IsSExt = isa<SExtInst>(Ext); 3745 // If the operand of the extension is not an instruction, we cannot 3746 // get through. 3747 // If it, check we can get through. 3748 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3749 return nullptr; 3750 3751 // Do not promote if the operand has been added by codegenprepare. 3752 // Otherwise, it means we are undoing an optimization that is likely to be 3753 // redone, thus causing potential infinite loop. 3754 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3755 return nullptr; 3756 3757 // SExt or Trunc instructions. 3758 // Return the related handler. 3759 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3760 isa<ZExtInst>(ExtOpnd)) 3761 return promoteOperandForTruncAndAnyExt; 3762 3763 // Regular instruction. 3764 // Abort early if we will have to insert non-free instructions. 3765 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3766 return nullptr; 3767 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3768 } 3769 3770 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3771 Instruction *SExt, TypePromotionTransaction &TPT, 3772 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3773 SmallVectorImpl<Instruction *> *Exts, 3774 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3775 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3776 // get through it and this method should not be called. 3777 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3778 Value *ExtVal = SExt; 3779 bool HasMergedNonFreeExt = false; 3780 if (isa<ZExtInst>(SExtOpnd)) { 3781 // Replace s|zext(zext(opnd)) 3782 // => zext(opnd). 3783 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3784 Value *ZExt = 3785 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3786 TPT.replaceAllUsesWith(SExt, ZExt); 3787 TPT.eraseInstruction(SExt); 3788 ExtVal = ZExt; 3789 } else { 3790 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3791 // => z|sext(opnd). 3792 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3793 } 3794 CreatedInstsCost = 0; 3795 3796 // Remove dead code. 3797 if (SExtOpnd->use_empty()) 3798 TPT.eraseInstruction(SExtOpnd); 3799 3800 // Check if the extension is still needed. 3801 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3802 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3803 if (ExtInst) { 3804 if (Exts) 3805 Exts->push_back(ExtInst); 3806 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3807 } 3808 return ExtVal; 3809 } 3810 3811 // At this point we have: ext ty opnd to ty. 3812 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3813 Value *NextVal = ExtInst->getOperand(0); 3814 TPT.eraseInstruction(ExtInst, NextVal); 3815 return NextVal; 3816 } 3817 3818 Value *TypePromotionHelper::promoteOperandForOther( 3819 Instruction *Ext, TypePromotionTransaction &TPT, 3820 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3821 SmallVectorImpl<Instruction *> *Exts, 3822 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3823 bool IsSExt) { 3824 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3825 // get through it and this method should not be called. 3826 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3827 CreatedInstsCost = 0; 3828 if (!ExtOpnd->hasOneUse()) { 3829 // ExtOpnd will be promoted. 3830 // All its uses, but Ext, will need to use a truncated value of the 3831 // promoted version. 3832 // Create the truncate now. 3833 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3834 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3835 // Insert it just after the definition. 3836 ITrunc->moveAfter(ExtOpnd); 3837 if (Truncs) 3838 Truncs->push_back(ITrunc); 3839 } 3840 3841 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3842 // Restore the operand of Ext (which has been replaced by the previous call 3843 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3844 TPT.setOperand(Ext, 0, ExtOpnd); 3845 } 3846 3847 // Get through the Instruction: 3848 // 1. Update its type. 3849 // 2. Replace the uses of Ext by Inst. 3850 // 3. Extend each operand that needs to be extended. 3851 3852 // Remember the original type of the instruction before promotion. 3853 // This is useful to know that the high bits are sign extended bits. 3854 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3855 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3856 // Step #1. 3857 TPT.mutateType(ExtOpnd, Ext->getType()); 3858 // Step #2. 3859 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3860 // Step #3. 3861 Instruction *ExtForOpnd = Ext; 3862 3863 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3864 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3865 ++OpIdx) { 3866 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3867 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3868 !shouldExtOperand(ExtOpnd, OpIdx)) { 3869 DEBUG(dbgs() << "No need to propagate\n"); 3870 continue; 3871 } 3872 // Check if we can statically extend the operand. 3873 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3874 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3875 DEBUG(dbgs() << "Statically extend\n"); 3876 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3877 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3878 : Cst->getValue().zext(BitWidth); 3879 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3880 continue; 3881 } 3882 // UndefValue are typed, so we have to statically sign extend them. 3883 if (isa<UndefValue>(Opnd)) { 3884 DEBUG(dbgs() << "Statically extend\n"); 3885 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3886 continue; 3887 } 3888 3889 // Otherwise we have to explicity sign extend the operand. 3890 // Check if Ext was reused to extend an operand. 3891 if (!ExtForOpnd) { 3892 // If yes, create a new one. 3893 DEBUG(dbgs() << "More operands to ext\n"); 3894 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3895 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3896 if (!isa<Instruction>(ValForExtOpnd)) { 3897 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3898 continue; 3899 } 3900 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3901 } 3902 if (Exts) 3903 Exts->push_back(ExtForOpnd); 3904 TPT.setOperand(ExtForOpnd, 0, Opnd); 3905 3906 // Move the sign extension before the insertion point. 3907 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3908 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3909 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3910 // If more sext are required, new instructions will have to be created. 3911 ExtForOpnd = nullptr; 3912 } 3913 if (ExtForOpnd == Ext) { 3914 DEBUG(dbgs() << "Extension is useless now\n"); 3915 TPT.eraseInstruction(Ext); 3916 } 3917 return ExtOpnd; 3918 } 3919 3920 /// Check whether or not promoting an instruction to a wider type is profitable. 3921 /// \p NewCost gives the cost of extension instructions created by the 3922 /// promotion. 3923 /// \p OldCost gives the cost of extension instructions before the promotion 3924 /// plus the number of instructions that have been 3925 /// matched in the addressing mode the promotion. 3926 /// \p PromotedOperand is the value that has been promoted. 3927 /// \return True if the promotion is profitable, false otherwise. 3928 bool AddressingModeMatcher::isPromotionProfitable( 3929 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3930 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3931 // The cost of the new extensions is greater than the cost of the 3932 // old extension plus what we folded. 3933 // This is not profitable. 3934 if (NewCost > OldCost) 3935 return false; 3936 if (NewCost < OldCost) 3937 return true; 3938 // The promotion is neutral but it may help folding the sign extension in 3939 // loads for instance. 3940 // Check that we did not create an illegal instruction. 3941 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3942 } 3943 3944 /// Given an instruction or constant expr, see if we can fold the operation 3945 /// into the addressing mode. If so, update the addressing mode and return 3946 /// true, otherwise return false without modifying AddrMode. 3947 /// If \p MovedAway is not NULL, it contains the information of whether or 3948 /// not AddrInst has to be folded into the addressing mode on success. 3949 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3950 /// because it has been moved away. 3951 /// Thus AddrInst must not be added in the matched instructions. 3952 /// This state can happen when AddrInst is a sext, since it may be moved away. 3953 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3954 /// not be referenced anymore. 3955 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3956 unsigned Depth, 3957 bool *MovedAway) { 3958 // Avoid exponential behavior on extremely deep expression trees. 3959 if (Depth >= 5) return false; 3960 3961 // By default, all matched instructions stay in place. 3962 if (MovedAway) 3963 *MovedAway = false; 3964 3965 switch (Opcode) { 3966 case Instruction::PtrToInt: 3967 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3968 return matchAddr(AddrInst->getOperand(0), Depth); 3969 case Instruction::IntToPtr: { 3970 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3971 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3972 // This inttoptr is a no-op if the integer type is pointer sized. 3973 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3974 return matchAddr(AddrInst->getOperand(0), Depth); 3975 return false; 3976 } 3977 case Instruction::BitCast: 3978 // BitCast is always a noop, and we can handle it as long as it is 3979 // int->int or pointer->pointer (we don't want int<->fp or something). 3980 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3981 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3982 // Don't touch identity bitcasts. These were probably put here by LSR, 3983 // and we don't want to mess around with them. Assume it knows what it 3984 // is doing. 3985 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3986 return matchAddr(AddrInst->getOperand(0), Depth); 3987 return false; 3988 case Instruction::AddrSpaceCast: { 3989 unsigned SrcAS 3990 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3991 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3992 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3993 return matchAddr(AddrInst->getOperand(0), Depth); 3994 return false; 3995 } 3996 case Instruction::Add: { 3997 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3998 ExtAddrMode BackupAddrMode = AddrMode; 3999 unsigned OldSize = AddrModeInsts.size(); 4000 // Start a transaction at this point. 4001 // The LHS may match but not the RHS. 4002 // Therefore, we need a higher level restoration point to undo partially 4003 // matched operation. 4004 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4005 TPT.getRestorationPoint(); 4006 4007 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4008 matchAddr(AddrInst->getOperand(0), Depth+1)) 4009 return true; 4010 4011 // Restore the old addr mode info. 4012 AddrMode = BackupAddrMode; 4013 AddrModeInsts.resize(OldSize); 4014 TPT.rollback(LastKnownGood); 4015 4016 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4017 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4018 matchAddr(AddrInst->getOperand(1), Depth+1)) 4019 return true; 4020 4021 // Otherwise we definitely can't merge the ADD in. 4022 AddrMode = BackupAddrMode; 4023 AddrModeInsts.resize(OldSize); 4024 TPT.rollback(LastKnownGood); 4025 break; 4026 } 4027 //case Instruction::Or: 4028 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4029 //break; 4030 case Instruction::Mul: 4031 case Instruction::Shl: { 4032 // Can only handle X*C and X << C. 4033 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4034 if (!RHS || RHS->getBitWidth() > 64) 4035 return false; 4036 int64_t Scale = RHS->getSExtValue(); 4037 if (Opcode == Instruction::Shl) 4038 Scale = 1LL << Scale; 4039 4040 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4041 } 4042 case Instruction::GetElementPtr: { 4043 // Scan the GEP. We check it if it contains constant offsets and at most 4044 // one variable offset. 4045 int VariableOperand = -1; 4046 unsigned VariableScale = 0; 4047 4048 int64_t ConstantOffset = 0; 4049 gep_type_iterator GTI = gep_type_begin(AddrInst); 4050 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4051 if (StructType *STy = GTI.getStructTypeOrNull()) { 4052 const StructLayout *SL = DL.getStructLayout(STy); 4053 unsigned Idx = 4054 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4055 ConstantOffset += SL->getElementOffset(Idx); 4056 } else { 4057 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 4058 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4059 ConstantOffset += CI->getSExtValue()*TypeSize; 4060 } else if (TypeSize) { // Scales of zero don't do anything. 4061 // We only allow one variable index at the moment. 4062 if (VariableOperand != -1) 4063 return false; 4064 4065 // Remember the variable index. 4066 VariableOperand = i; 4067 VariableScale = TypeSize; 4068 } 4069 } 4070 } 4071 4072 // A common case is for the GEP to only do a constant offset. In this case, 4073 // just add it to the disp field and check validity. 4074 if (VariableOperand == -1) { 4075 AddrMode.BaseOffs += ConstantOffset; 4076 if (ConstantOffset == 0 || 4077 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4078 // Check to see if we can fold the base pointer in too. 4079 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 4080 return true; 4081 } 4082 AddrMode.BaseOffs -= ConstantOffset; 4083 return false; 4084 } 4085 4086 // Save the valid addressing mode in case we can't match. 4087 ExtAddrMode BackupAddrMode = AddrMode; 4088 unsigned OldSize = AddrModeInsts.size(); 4089 4090 // See if the scale and offset amount is valid for this target. 4091 AddrMode.BaseOffs += ConstantOffset; 4092 4093 // Match the base operand of the GEP. 4094 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4095 // If it couldn't be matched, just stuff the value in a register. 4096 if (AddrMode.HasBaseReg) { 4097 AddrMode = BackupAddrMode; 4098 AddrModeInsts.resize(OldSize); 4099 return false; 4100 } 4101 AddrMode.HasBaseReg = true; 4102 AddrMode.BaseReg = AddrInst->getOperand(0); 4103 } 4104 4105 // Match the remaining variable portion of the GEP. 4106 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4107 Depth)) { 4108 // If it couldn't be matched, try stuffing the base into a register 4109 // instead of matching it, and retrying the match of the scale. 4110 AddrMode = BackupAddrMode; 4111 AddrModeInsts.resize(OldSize); 4112 if (AddrMode.HasBaseReg) 4113 return false; 4114 AddrMode.HasBaseReg = true; 4115 AddrMode.BaseReg = AddrInst->getOperand(0); 4116 AddrMode.BaseOffs += ConstantOffset; 4117 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4118 VariableScale, Depth)) { 4119 // If even that didn't work, bail. 4120 AddrMode = BackupAddrMode; 4121 AddrModeInsts.resize(OldSize); 4122 return false; 4123 } 4124 } 4125 4126 return true; 4127 } 4128 case Instruction::SExt: 4129 case Instruction::ZExt: { 4130 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4131 if (!Ext) 4132 return false; 4133 4134 // Try to move this ext out of the way of the addressing mode. 4135 // Ask for a method for doing so. 4136 TypePromotionHelper::Action TPH = 4137 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4138 if (!TPH) 4139 return false; 4140 4141 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4142 TPT.getRestorationPoint(); 4143 unsigned CreatedInstsCost = 0; 4144 unsigned ExtCost = !TLI.isExtFree(Ext); 4145 Value *PromotedOperand = 4146 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4147 // SExt has been moved away. 4148 // Thus either it will be rematched later in the recursive calls or it is 4149 // gone. Anyway, we must not fold it into the addressing mode at this point. 4150 // E.g., 4151 // op = add opnd, 1 4152 // idx = ext op 4153 // addr = gep base, idx 4154 // is now: 4155 // promotedOpnd = ext opnd <- no match here 4156 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4157 // addr = gep base, op <- match 4158 if (MovedAway) 4159 *MovedAway = true; 4160 4161 assert(PromotedOperand && 4162 "TypePromotionHelper should have filtered out those cases"); 4163 4164 ExtAddrMode BackupAddrMode = AddrMode; 4165 unsigned OldSize = AddrModeInsts.size(); 4166 4167 if (!matchAddr(PromotedOperand, Depth) || 4168 // The total of the new cost is equal to the cost of the created 4169 // instructions. 4170 // The total of the old cost is equal to the cost of the extension plus 4171 // what we have saved in the addressing mode. 4172 !isPromotionProfitable(CreatedInstsCost, 4173 ExtCost + (AddrModeInsts.size() - OldSize), 4174 PromotedOperand)) { 4175 AddrMode = BackupAddrMode; 4176 AddrModeInsts.resize(OldSize); 4177 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4178 TPT.rollback(LastKnownGood); 4179 return false; 4180 } 4181 return true; 4182 } 4183 } 4184 return false; 4185 } 4186 4187 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4188 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4189 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4190 /// for the target. 4191 /// 4192 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4193 // Start a transaction at this point that we will rollback if the matching 4194 // fails. 4195 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4196 TPT.getRestorationPoint(); 4197 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4198 // Fold in immediates if legal for the target. 4199 AddrMode.BaseOffs += CI->getSExtValue(); 4200 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4201 return true; 4202 AddrMode.BaseOffs -= CI->getSExtValue(); 4203 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4204 // If this is a global variable, try to fold it into the addressing mode. 4205 if (!AddrMode.BaseGV) { 4206 AddrMode.BaseGV = GV; 4207 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4208 return true; 4209 AddrMode.BaseGV = nullptr; 4210 } 4211 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4212 ExtAddrMode BackupAddrMode = AddrMode; 4213 unsigned OldSize = AddrModeInsts.size(); 4214 4215 // Check to see if it is possible to fold this operation. 4216 bool MovedAway = false; 4217 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4218 // This instruction may have been moved away. If so, there is nothing 4219 // to check here. 4220 if (MovedAway) 4221 return true; 4222 // Okay, it's possible to fold this. Check to see if it is actually 4223 // *profitable* to do so. We use a simple cost model to avoid increasing 4224 // register pressure too much. 4225 if (I->hasOneUse() || 4226 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4227 AddrModeInsts.push_back(I); 4228 return true; 4229 } 4230 4231 // It isn't profitable to do this, roll back. 4232 //cerr << "NOT FOLDING: " << *I; 4233 AddrMode = BackupAddrMode; 4234 AddrModeInsts.resize(OldSize); 4235 TPT.rollback(LastKnownGood); 4236 } 4237 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4238 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4239 return true; 4240 TPT.rollback(LastKnownGood); 4241 } else if (isa<ConstantPointerNull>(Addr)) { 4242 // Null pointer gets folded without affecting the addressing mode. 4243 return true; 4244 } 4245 4246 // Worse case, the target should support [reg] addressing modes. :) 4247 if (!AddrMode.HasBaseReg) { 4248 AddrMode.HasBaseReg = true; 4249 AddrMode.BaseReg = Addr; 4250 // Still check for legality in case the target supports [imm] but not [i+r]. 4251 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4252 return true; 4253 AddrMode.HasBaseReg = false; 4254 AddrMode.BaseReg = nullptr; 4255 } 4256 4257 // If the base register is already taken, see if we can do [r+r]. 4258 if (AddrMode.Scale == 0) { 4259 AddrMode.Scale = 1; 4260 AddrMode.ScaledReg = Addr; 4261 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4262 return true; 4263 AddrMode.Scale = 0; 4264 AddrMode.ScaledReg = nullptr; 4265 } 4266 // Couldn't match. 4267 TPT.rollback(LastKnownGood); 4268 return false; 4269 } 4270 4271 /// Check to see if all uses of OpVal by the specified inline asm call are due 4272 /// to memory operands. If so, return true, otherwise return false. 4273 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4274 const TargetLowering &TLI, 4275 const TargetRegisterInfo &TRI) { 4276 const Function *F = CI->getFunction(); 4277 TargetLowering::AsmOperandInfoVector TargetConstraints = 4278 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 4279 ImmutableCallSite(CI)); 4280 4281 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4282 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4283 4284 // Compute the constraint code and ConstraintType to use. 4285 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4286 4287 // If this asm operand is our Value*, and if it isn't an indirect memory 4288 // operand, we can't fold it! 4289 if (OpInfo.CallOperandVal == OpVal && 4290 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4291 !OpInfo.isIndirect)) 4292 return false; 4293 } 4294 4295 return true; 4296 } 4297 4298 // Max number of memory uses to look at before aborting the search to conserve 4299 // compile time. 4300 static constexpr int MaxMemoryUsesToScan = 20; 4301 4302 /// Recursively walk all the uses of I until we find a memory use. 4303 /// If we find an obviously non-foldable instruction, return true. 4304 /// Add the ultimately found memory instructions to MemoryUses. 4305 static bool FindAllMemoryUses( 4306 Instruction *I, 4307 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4308 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4309 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 4310 // If we already considered this instruction, we're done. 4311 if (!ConsideredInsts.insert(I).second) 4312 return false; 4313 4314 // If this is an obviously unfoldable instruction, bail out. 4315 if (!MightBeFoldableInst(I)) 4316 return true; 4317 4318 const bool OptSize = I->getFunction()->optForSize(); 4319 4320 // Loop over all the uses, recursively processing them. 4321 for (Use &U : I->uses()) { 4322 // Conservatively return true if we're seeing a large number or a deep chain 4323 // of users. This avoids excessive compilation times in pathological cases. 4324 if (SeenInsts++ >= MaxMemoryUsesToScan) 4325 return true; 4326 4327 Instruction *UserI = cast<Instruction>(U.getUser()); 4328 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4329 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4330 continue; 4331 } 4332 4333 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4334 unsigned opNo = U.getOperandNo(); 4335 if (opNo != StoreInst::getPointerOperandIndex()) 4336 return true; // Storing addr, not into addr. 4337 MemoryUses.push_back(std::make_pair(SI, opNo)); 4338 continue; 4339 } 4340 4341 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4342 unsigned opNo = U.getOperandNo(); 4343 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4344 return true; // Storing addr, not into addr. 4345 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4346 continue; 4347 } 4348 4349 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4350 unsigned opNo = U.getOperandNo(); 4351 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4352 return true; // Storing addr, not into addr. 4353 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4354 continue; 4355 } 4356 4357 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4358 // If this is a cold call, we can sink the addressing calculation into 4359 // the cold path. See optimizeCallInst 4360 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4361 continue; 4362 4363 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4364 if (!IA) return true; 4365 4366 // If this is a memory operand, we're cool, otherwise bail out. 4367 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4368 return true; 4369 continue; 4370 } 4371 4372 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4373 SeenInsts)) 4374 return true; 4375 } 4376 4377 return false; 4378 } 4379 4380 /// Return true if Val is already known to be live at the use site that we're 4381 /// folding it into. If so, there is no cost to include it in the addressing 4382 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4383 /// instruction already. 4384 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4385 Value *KnownLive2) { 4386 // If Val is either of the known-live values, we know it is live! 4387 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4388 return true; 4389 4390 // All values other than instructions and arguments (e.g. constants) are live. 4391 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4392 4393 // If Val is a constant sized alloca in the entry block, it is live, this is 4394 // true because it is just a reference to the stack/frame pointer, which is 4395 // live for the whole function. 4396 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4397 if (AI->isStaticAlloca()) 4398 return true; 4399 4400 // Check to see if this value is already used in the memory instruction's 4401 // block. If so, it's already live into the block at the very least, so we 4402 // can reasonably fold it. 4403 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4404 } 4405 4406 /// It is possible for the addressing mode of the machine to fold the specified 4407 /// instruction into a load or store that ultimately uses it. 4408 /// However, the specified instruction has multiple uses. 4409 /// Given this, it may actually increase register pressure to fold it 4410 /// into the load. For example, consider this code: 4411 /// 4412 /// X = ... 4413 /// Y = X+1 4414 /// use(Y) -> nonload/store 4415 /// Z = Y+1 4416 /// load Z 4417 /// 4418 /// In this case, Y has multiple uses, and can be folded into the load of Z 4419 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4420 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4421 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4422 /// number of computations either. 4423 /// 4424 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4425 /// X was live across 'load Z' for other reasons, we actually *would* want to 4426 /// fold the addressing mode in the Z case. This would make Y die earlier. 4427 bool AddressingModeMatcher:: 4428 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4429 ExtAddrMode &AMAfter) { 4430 if (IgnoreProfitability) return true; 4431 4432 // AMBefore is the addressing mode before this instruction was folded into it, 4433 // and AMAfter is the addressing mode after the instruction was folded. Get 4434 // the set of registers referenced by AMAfter and subtract out those 4435 // referenced by AMBefore: this is the set of values which folding in this 4436 // address extends the lifetime of. 4437 // 4438 // Note that there are only two potential values being referenced here, 4439 // BaseReg and ScaleReg (global addresses are always available, as are any 4440 // folded immediates). 4441 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4442 4443 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4444 // lifetime wasn't extended by adding this instruction. 4445 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4446 BaseReg = nullptr; 4447 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4448 ScaledReg = nullptr; 4449 4450 // If folding this instruction (and it's subexprs) didn't extend any live 4451 // ranges, we're ok with it. 4452 if (!BaseReg && !ScaledReg) 4453 return true; 4454 4455 // If all uses of this instruction can have the address mode sunk into them, 4456 // we can remove the addressing mode and effectively trade one live register 4457 // for another (at worst.) In this context, folding an addressing mode into 4458 // the use is just a particularly nice way of sinking it. 4459 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4460 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4461 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4462 return false; // Has a non-memory, non-foldable use! 4463 4464 // Now that we know that all uses of this instruction are part of a chain of 4465 // computation involving only operations that could theoretically be folded 4466 // into a memory use, loop over each of these memory operation uses and see 4467 // if they could *actually* fold the instruction. The assumption is that 4468 // addressing modes are cheap and that duplicating the computation involved 4469 // many times is worthwhile, even on a fastpath. For sinking candidates 4470 // (i.e. cold call sites), this serves as a way to prevent excessive code 4471 // growth since most architectures have some reasonable small and fast way to 4472 // compute an effective address. (i.e LEA on x86) 4473 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4474 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4475 Instruction *User = MemoryUses[i].first; 4476 unsigned OpNo = MemoryUses[i].second; 4477 4478 // Get the access type of this use. If the use isn't a pointer, we don't 4479 // know what it accesses. 4480 Value *Address = User->getOperand(OpNo); 4481 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4482 if (!AddrTy) 4483 return false; 4484 Type *AddressAccessTy = AddrTy->getElementType(); 4485 unsigned AS = AddrTy->getAddressSpace(); 4486 4487 // Do a match against the root of this address, ignoring profitability. This 4488 // will tell us if the addressing mode for the memory operation will 4489 // *actually* cover the shared instruction. 4490 ExtAddrMode Result; 4491 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4492 TPT.getRestorationPoint(); 4493 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, 4494 AddressAccessTy, AS, 4495 MemoryInst, Result, InsertedInsts, 4496 PromotedInsts, TPT); 4497 Matcher.IgnoreProfitability = true; 4498 bool Success = Matcher.matchAddr(Address, 0); 4499 (void)Success; assert(Success && "Couldn't select *anything*?"); 4500 4501 // The match was to check the profitability, the changes made are not 4502 // part of the original matcher. Therefore, they should be dropped 4503 // otherwise the original matcher will not present the right state. 4504 TPT.rollback(LastKnownGood); 4505 4506 // If the match didn't cover I, then it won't be shared by it. 4507 if (!is_contained(MatchedAddrModeInsts, I)) 4508 return false; 4509 4510 MatchedAddrModeInsts.clear(); 4511 } 4512 4513 return true; 4514 } 4515 4516 /// Return true if the specified values are defined in a 4517 /// different basic block than BB. 4518 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4519 if (Instruction *I = dyn_cast<Instruction>(V)) 4520 return I->getParent() != BB; 4521 return false; 4522 } 4523 4524 /// Sink addressing mode computation immediate before MemoryInst if doing so 4525 /// can be done without increasing register pressure. The need for the 4526 /// register pressure constraint means this can end up being an all or nothing 4527 /// decision for all uses of the same addressing computation. 4528 /// 4529 /// Load and Store Instructions often have addressing modes that can do 4530 /// significant amounts of computation. As such, instruction selection will try 4531 /// to get the load or store to do as much computation as possible for the 4532 /// program. The problem is that isel can only see within a single block. As 4533 /// such, we sink as much legal addressing mode work into the block as possible. 4534 /// 4535 /// This method is used to optimize both load/store and inline asms with memory 4536 /// operands. It's also used to sink addressing computations feeding into cold 4537 /// call sites into their (cold) basic block. 4538 /// 4539 /// The motivation for handling sinking into cold blocks is that doing so can 4540 /// both enable other address mode sinking (by satisfying the register pressure 4541 /// constraint above), and reduce register pressure globally (by removing the 4542 /// addressing mode computation from the fast path entirely.). 4543 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4544 Type *AccessTy, unsigned AddrSpace) { 4545 Value *Repl = Addr; 4546 4547 // Try to collapse single-value PHI nodes. This is necessary to undo 4548 // unprofitable PRE transformations. 4549 SmallVector<Value*, 8> worklist; 4550 SmallPtrSet<Value*, 16> Visited; 4551 worklist.push_back(Addr); 4552 4553 // Use a worklist to iteratively look through PHI and select nodes, and 4554 // ensure that the addressing mode obtained from the non-PHI/select roots of 4555 // the graph are compatible. 4556 bool PhiOrSelectSeen = false; 4557 SmallVector<Instruction*, 16> AddrModeInsts; 4558 AddressingModeCombiner AddrModes; 4559 TypePromotionTransaction TPT(RemovedInsts); 4560 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4561 TPT.getRestorationPoint(); 4562 while (!worklist.empty()) { 4563 Value *V = worklist.back(); 4564 worklist.pop_back(); 4565 4566 // We allow traversing cyclic Phi nodes. 4567 // In case of success after this loop we ensure that traversing through 4568 // Phi nodes ends up with all cases to compute address of the form 4569 // BaseGV + Base + Scale * Index + Offset 4570 // where Scale and Offset are constans and BaseGV, Base and Index 4571 // are exactly the same Values in all cases. 4572 // It means that BaseGV, Scale and Offset dominate our memory instruction 4573 // and have the same value as they had in address computation represented 4574 // as Phi. So we can safely sink address computation to memory instruction. 4575 if (!Visited.insert(V).second) 4576 continue; 4577 4578 // For a PHI node, push all of its incoming values. 4579 if (PHINode *P = dyn_cast<PHINode>(V)) { 4580 for (Value *IncValue : P->incoming_values()) 4581 worklist.push_back(IncValue); 4582 PhiOrSelectSeen = true; 4583 continue; 4584 } 4585 // Similar for select. 4586 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4587 worklist.push_back(SI->getFalseValue()); 4588 worklist.push_back(SI->getTrueValue()); 4589 PhiOrSelectSeen = true; 4590 continue; 4591 } 4592 4593 // For non-PHIs, determine the addressing mode being computed. Note that 4594 // the result may differ depending on what other uses our candidate 4595 // addressing instructions might have. 4596 AddrModeInsts.clear(); 4597 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4598 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4599 InsertedInsts, PromotedInsts, TPT); 4600 NewAddrMode.OriginalValue = V; 4601 4602 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4603 break; 4604 } 4605 4606 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4607 // or we have multiple but either couldn't combine them or combining them 4608 // wouldn't do anything useful, bail out now. 4609 if (!AddrModes.combineAddrModes()) { 4610 TPT.rollback(LastKnownGood); 4611 return false; 4612 } 4613 TPT.commit(); 4614 4615 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4616 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4617 4618 // If all the instructions matched are already in this BB, don't do anything. 4619 // If we saw a Phi node then it is not local definitely, and if we saw a select 4620 // then we want to push the address calculation past it even if it's already 4621 // in this BB. 4622 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4623 return IsNonLocalValue(V, MemoryInst->getParent()); 4624 })) { 4625 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 4626 return false; 4627 } 4628 4629 // Insert this computation right after this user. Since our caller is 4630 // scanning from the top of the BB to the bottom, reuse of the expr are 4631 // guaranteed to happen later. 4632 IRBuilder<> Builder(MemoryInst); 4633 4634 // Now that we determined the addressing expression we want to use and know 4635 // that we have to sink it into this block. Check to see if we have already 4636 // done this for some other load/store instr in this block. If so, reuse the 4637 // computation. 4638 Value *&SunkAddr = SunkAddrs[Addr]; 4639 if (SunkAddr) { 4640 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 4641 << *MemoryInst << "\n"); 4642 if (SunkAddr->getType() != Addr->getType()) 4643 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4644 } else if (AddrSinkUsingGEPs || 4645 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 4646 SubtargetInfo->useAA())) { 4647 // By default, we use the GEP-based method when AA is used later. This 4648 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4649 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4650 << *MemoryInst << "\n"); 4651 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4652 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4653 4654 // First, find the pointer. 4655 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4656 ResultPtr = AddrMode.BaseReg; 4657 AddrMode.BaseReg = nullptr; 4658 } 4659 4660 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4661 // We can't add more than one pointer together, nor can we scale a 4662 // pointer (both of which seem meaningless). 4663 if (ResultPtr || AddrMode.Scale != 1) 4664 return false; 4665 4666 ResultPtr = AddrMode.ScaledReg; 4667 AddrMode.Scale = 0; 4668 } 4669 4670 // It is only safe to sign extend the BaseReg if we know that the math 4671 // required to create it did not overflow before we extend it. Since 4672 // the original IR value was tossed in favor of a constant back when 4673 // the AddrMode was created we need to bail out gracefully if widths 4674 // do not match instead of extending it. 4675 // 4676 // (See below for code to add the scale.) 4677 if (AddrMode.Scale) { 4678 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4679 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4680 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4681 return false; 4682 } 4683 4684 if (AddrMode.BaseGV) { 4685 if (ResultPtr) 4686 return false; 4687 4688 ResultPtr = AddrMode.BaseGV; 4689 } 4690 4691 // If the real base value actually came from an inttoptr, then the matcher 4692 // will look through it and provide only the integer value. In that case, 4693 // use it here. 4694 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4695 if (!ResultPtr && AddrMode.BaseReg) { 4696 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4697 "sunkaddr"); 4698 AddrMode.BaseReg = nullptr; 4699 } else if (!ResultPtr && AddrMode.Scale == 1) { 4700 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4701 "sunkaddr"); 4702 AddrMode.Scale = 0; 4703 } 4704 } 4705 4706 if (!ResultPtr && 4707 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4708 SunkAddr = Constant::getNullValue(Addr->getType()); 4709 } else if (!ResultPtr) { 4710 return false; 4711 } else { 4712 Type *I8PtrTy = 4713 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4714 Type *I8Ty = Builder.getInt8Ty(); 4715 4716 // Start with the base register. Do this first so that subsequent address 4717 // matching finds it last, which will prevent it from trying to match it 4718 // as the scaled value in case it happens to be a mul. That would be 4719 // problematic if we've sunk a different mul for the scale, because then 4720 // we'd end up sinking both muls. 4721 if (AddrMode.BaseReg) { 4722 Value *V = AddrMode.BaseReg; 4723 if (V->getType() != IntPtrTy) 4724 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4725 4726 ResultIndex = V; 4727 } 4728 4729 // Add the scale value. 4730 if (AddrMode.Scale) { 4731 Value *V = AddrMode.ScaledReg; 4732 if (V->getType() == IntPtrTy) { 4733 // done. 4734 } else { 4735 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4736 cast<IntegerType>(V->getType())->getBitWidth() && 4737 "We can't transform if ScaledReg is too narrow"); 4738 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4739 } 4740 4741 if (AddrMode.Scale != 1) 4742 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4743 "sunkaddr"); 4744 if (ResultIndex) 4745 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4746 else 4747 ResultIndex = V; 4748 } 4749 4750 // Add in the Base Offset if present. 4751 if (AddrMode.BaseOffs) { 4752 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4753 if (ResultIndex) { 4754 // We need to add this separately from the scale above to help with 4755 // SDAG consecutive load/store merging. 4756 if (ResultPtr->getType() != I8PtrTy) 4757 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4758 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4759 } 4760 4761 ResultIndex = V; 4762 } 4763 4764 if (!ResultIndex) { 4765 SunkAddr = ResultPtr; 4766 } else { 4767 if (ResultPtr->getType() != I8PtrTy) 4768 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4769 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4770 } 4771 4772 if (SunkAddr->getType() != Addr->getType()) 4773 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4774 } 4775 } else { 4776 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4777 // non-integral pointers, so in that case bail out now. 4778 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4779 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4780 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4781 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4782 if (DL->isNonIntegralPointerType(Addr->getType()) || 4783 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4784 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4785 (AddrMode.BaseGV && 4786 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4787 return false; 4788 4789 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 4790 << *MemoryInst << "\n"); 4791 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4792 Value *Result = nullptr; 4793 4794 // Start with the base register. Do this first so that subsequent address 4795 // matching finds it last, which will prevent it from trying to match it 4796 // as the scaled value in case it happens to be a mul. That would be 4797 // problematic if we've sunk a different mul for the scale, because then 4798 // we'd end up sinking both muls. 4799 if (AddrMode.BaseReg) { 4800 Value *V = AddrMode.BaseReg; 4801 if (V->getType()->isPointerTy()) 4802 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4803 if (V->getType() != IntPtrTy) 4804 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4805 Result = V; 4806 } 4807 4808 // Add the scale value. 4809 if (AddrMode.Scale) { 4810 Value *V = AddrMode.ScaledReg; 4811 if (V->getType() == IntPtrTy) { 4812 // done. 4813 } else if (V->getType()->isPointerTy()) { 4814 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4815 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4816 cast<IntegerType>(V->getType())->getBitWidth()) { 4817 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4818 } else { 4819 // It is only safe to sign extend the BaseReg if we know that the math 4820 // required to create it did not overflow before we extend it. Since 4821 // the original IR value was tossed in favor of a constant back when 4822 // the AddrMode was created we need to bail out gracefully if widths 4823 // do not match instead of extending it. 4824 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4825 if (I && (Result != AddrMode.BaseReg)) 4826 I->eraseFromParent(); 4827 return false; 4828 } 4829 if (AddrMode.Scale != 1) 4830 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4831 "sunkaddr"); 4832 if (Result) 4833 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4834 else 4835 Result = V; 4836 } 4837 4838 // Add in the BaseGV if present. 4839 if (AddrMode.BaseGV) { 4840 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4841 if (Result) 4842 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4843 else 4844 Result = V; 4845 } 4846 4847 // Add in the Base Offset if present. 4848 if (AddrMode.BaseOffs) { 4849 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4850 if (Result) 4851 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4852 else 4853 Result = V; 4854 } 4855 4856 if (!Result) 4857 SunkAddr = Constant::getNullValue(Addr->getType()); 4858 else 4859 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4860 } 4861 4862 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4863 4864 // If we have no uses, recursively delete the value and all dead instructions 4865 // using it. 4866 if (Repl->use_empty()) { 4867 // This can cause recursive deletion, which can invalidate our iterator. 4868 // Use a WeakTrackingVH to hold onto it in case this happens. 4869 Value *CurValue = &*CurInstIterator; 4870 WeakTrackingVH IterHandle(CurValue); 4871 BasicBlock *BB = CurInstIterator->getParent(); 4872 4873 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4874 4875 if (IterHandle != CurValue) { 4876 // If the iterator instruction was recursively deleted, start over at the 4877 // start of the block. 4878 CurInstIterator = BB->begin(); 4879 SunkAddrs.clear(); 4880 } 4881 } 4882 ++NumMemoryInsts; 4883 return true; 4884 } 4885 4886 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4887 /// address computing into the block when possible / profitable. 4888 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4889 bool MadeChange = false; 4890 4891 const TargetRegisterInfo *TRI = 4892 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 4893 TargetLowering::AsmOperandInfoVector TargetConstraints = 4894 TLI->ParseConstraints(*DL, TRI, CS); 4895 unsigned ArgNo = 0; 4896 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4897 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4898 4899 // Compute the constraint code and ConstraintType to use. 4900 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4901 4902 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4903 OpInfo.isIndirect) { 4904 Value *OpVal = CS->getArgOperand(ArgNo++); 4905 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4906 } else if (OpInfo.Type == InlineAsm::isInput) 4907 ArgNo++; 4908 } 4909 4910 return MadeChange; 4911 } 4912 4913 /// \brief Check if all the uses of \p Val are equivalent (or free) zero or 4914 /// sign extensions. 4915 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4916 assert(!Val->use_empty() && "Input must have at least one use"); 4917 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4918 bool IsSExt = isa<SExtInst>(FirstUser); 4919 Type *ExtTy = FirstUser->getType(); 4920 for (const User *U : Val->users()) { 4921 const Instruction *UI = cast<Instruction>(U); 4922 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4923 return false; 4924 Type *CurTy = UI->getType(); 4925 // Same input and output types: Same instruction after CSE. 4926 if (CurTy == ExtTy) 4927 continue; 4928 4929 // If IsSExt is true, we are in this situation: 4930 // a = Val 4931 // b = sext ty1 a to ty2 4932 // c = sext ty1 a to ty3 4933 // Assuming ty2 is shorter than ty3, this could be turned into: 4934 // a = Val 4935 // b = sext ty1 a to ty2 4936 // c = sext ty2 b to ty3 4937 // However, the last sext is not free. 4938 if (IsSExt) 4939 return false; 4940 4941 // This is a ZExt, maybe this is free to extend from one type to another. 4942 // In that case, we would not account for a different use. 4943 Type *NarrowTy; 4944 Type *LargeTy; 4945 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4946 CurTy->getScalarType()->getIntegerBitWidth()) { 4947 NarrowTy = CurTy; 4948 LargeTy = ExtTy; 4949 } else { 4950 NarrowTy = ExtTy; 4951 LargeTy = CurTy; 4952 } 4953 4954 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4955 return false; 4956 } 4957 // All uses are the same or can be derived from one another for free. 4958 return true; 4959 } 4960 4961 /// \brief Try to speculatively promote extensions in \p Exts and continue 4962 /// promoting through newly promoted operands recursively as far as doing so is 4963 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 4964 /// When some promotion happened, \p TPT contains the proper state to revert 4965 /// them. 4966 /// 4967 /// \return true if some promotion happened, false otherwise. 4968 bool CodeGenPrepare::tryToPromoteExts( 4969 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 4970 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 4971 unsigned CreatedInstsCost) { 4972 bool Promoted = false; 4973 4974 // Iterate over all the extensions to try to promote them. 4975 for (auto I : Exts) { 4976 // Early check if we directly have ext(load). 4977 if (isa<LoadInst>(I->getOperand(0))) { 4978 ProfitablyMovedExts.push_back(I); 4979 continue; 4980 } 4981 4982 // Check whether or not we want to do any promotion. The reason we have 4983 // this check inside the for loop is to catch the case where an extension 4984 // is directly fed by a load because in such case the extension can be moved 4985 // up without any promotion on its operands. 4986 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4987 return false; 4988 4989 // Get the action to perform the promotion. 4990 TypePromotionHelper::Action TPH = 4991 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 4992 // Check if we can promote. 4993 if (!TPH) { 4994 // Save the current extension as we cannot move up through its operand. 4995 ProfitablyMovedExts.push_back(I); 4996 continue; 4997 } 4998 4999 // Save the current state. 5000 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5001 TPT.getRestorationPoint(); 5002 SmallVector<Instruction *, 4> NewExts; 5003 unsigned NewCreatedInstsCost = 0; 5004 unsigned ExtCost = !TLI->isExtFree(I); 5005 // Promote. 5006 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5007 &NewExts, nullptr, *TLI); 5008 assert(PromotedVal && 5009 "TypePromotionHelper should have filtered out those cases"); 5010 5011 // We would be able to merge only one extension in a load. 5012 // Therefore, if we have more than 1 new extension we heuristically 5013 // cut this search path, because it means we degrade the code quality. 5014 // With exactly 2, the transformation is neutral, because we will merge 5015 // one extension but leave one. However, we optimistically keep going, 5016 // because the new extension may be removed too. 5017 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5018 // FIXME: It would be possible to propagate a negative value instead of 5019 // conservatively ceiling it to 0. 5020 TotalCreatedInstsCost = 5021 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5022 if (!StressExtLdPromotion && 5023 (TotalCreatedInstsCost > 1 || 5024 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5025 // This promotion is not profitable, rollback to the previous state, and 5026 // save the current extension in ProfitablyMovedExts as the latest 5027 // speculative promotion turned out to be unprofitable. 5028 TPT.rollback(LastKnownGood); 5029 ProfitablyMovedExts.push_back(I); 5030 continue; 5031 } 5032 // Continue promoting NewExts as far as doing so is profitable. 5033 SmallVector<Instruction *, 2> NewlyMovedExts; 5034 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5035 bool NewPromoted = false; 5036 for (auto ExtInst : NewlyMovedExts) { 5037 Instruction *MovedExt = cast<Instruction>(ExtInst); 5038 Value *ExtOperand = MovedExt->getOperand(0); 5039 // If we have reached to a load, we need this extra profitability check 5040 // as it could potentially be merged into an ext(load). 5041 if (isa<LoadInst>(ExtOperand) && 5042 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5043 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5044 continue; 5045 5046 ProfitablyMovedExts.push_back(MovedExt); 5047 NewPromoted = true; 5048 } 5049 5050 // If none of speculative promotions for NewExts is profitable, rollback 5051 // and save the current extension (I) as the last profitable extension. 5052 if (!NewPromoted) { 5053 TPT.rollback(LastKnownGood); 5054 ProfitablyMovedExts.push_back(I); 5055 continue; 5056 } 5057 // The promotion is profitable. 5058 Promoted = true; 5059 } 5060 return Promoted; 5061 } 5062 5063 /// Merging redundant sexts when one is dominating the other. 5064 bool CodeGenPrepare::mergeSExts(Function &F) { 5065 DominatorTree DT(F); 5066 bool Changed = false; 5067 for (auto &Entry : ValToSExtendedUses) { 5068 SExts &Insts = Entry.second; 5069 SExts CurPts; 5070 for (Instruction *Inst : Insts) { 5071 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5072 Inst->getOperand(0) != Entry.first) 5073 continue; 5074 bool inserted = false; 5075 for (auto &Pt : CurPts) { 5076 if (DT.dominates(Inst, Pt)) { 5077 Pt->replaceAllUsesWith(Inst); 5078 RemovedInsts.insert(Pt); 5079 Pt->removeFromParent(); 5080 Pt = Inst; 5081 inserted = true; 5082 Changed = true; 5083 break; 5084 } 5085 if (!DT.dominates(Pt, Inst)) 5086 // Give up if we need to merge in a common dominator as the 5087 // expermients show it is not profitable. 5088 continue; 5089 Inst->replaceAllUsesWith(Pt); 5090 RemovedInsts.insert(Inst); 5091 Inst->removeFromParent(); 5092 inserted = true; 5093 Changed = true; 5094 break; 5095 } 5096 if (!inserted) 5097 CurPts.push_back(Inst); 5098 } 5099 } 5100 return Changed; 5101 } 5102 5103 /// Return true, if an ext(load) can be formed from an extension in 5104 /// \p MovedExts. 5105 bool CodeGenPrepare::canFormExtLd( 5106 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5107 Instruction *&Inst, bool HasPromoted) { 5108 for (auto *MovedExtInst : MovedExts) { 5109 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5110 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5111 Inst = MovedExtInst; 5112 break; 5113 } 5114 } 5115 if (!LI) 5116 return false; 5117 5118 // If they're already in the same block, there's nothing to do. 5119 // Make the cheap checks first if we did not promote. 5120 // If we promoted, we need to check if it is indeed profitable. 5121 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5122 return false; 5123 5124 return TLI->isExtLoad(LI, Inst, *DL); 5125 } 5126 5127 /// Move a zext or sext fed by a load into the same basic block as the load, 5128 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5129 /// extend into the load. 5130 /// 5131 /// E.g., 5132 /// \code 5133 /// %ld = load i32* %addr 5134 /// %add = add nuw i32 %ld, 4 5135 /// %zext = zext i32 %add to i64 5136 // \endcode 5137 /// => 5138 /// \code 5139 /// %ld = load i32* %addr 5140 /// %zext = zext i32 %ld to i64 5141 /// %add = add nuw i64 %zext, 4 5142 /// \encode 5143 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5144 /// allow us to match zext(load i32*) to i64. 5145 /// 5146 /// Also, try to promote the computations used to obtain a sign extended 5147 /// value used into memory accesses. 5148 /// E.g., 5149 /// \code 5150 /// a = add nsw i32 b, 3 5151 /// d = sext i32 a to i64 5152 /// e = getelementptr ..., i64 d 5153 /// \endcode 5154 /// => 5155 /// \code 5156 /// f = sext i32 b to i64 5157 /// a = add nsw i64 f, 3 5158 /// e = getelementptr ..., i64 a 5159 /// \endcode 5160 /// 5161 /// \p Inst[in/out] the extension may be modified during the process if some 5162 /// promotions apply. 5163 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5164 // ExtLoad formation and address type promotion infrastructure requires TLI to 5165 // be effective. 5166 if (!TLI) 5167 return false; 5168 5169 bool AllowPromotionWithoutCommonHeader = false; 5170 /// See if it is an interesting sext operations for the address type 5171 /// promotion before trying to promote it, e.g., the ones with the right 5172 /// type and used in memory accesses. 5173 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5174 *Inst, AllowPromotionWithoutCommonHeader); 5175 TypePromotionTransaction TPT(RemovedInsts); 5176 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5177 TPT.getRestorationPoint(); 5178 SmallVector<Instruction *, 1> Exts; 5179 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5180 Exts.push_back(Inst); 5181 5182 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5183 5184 // Look for a load being extended. 5185 LoadInst *LI = nullptr; 5186 Instruction *ExtFedByLoad; 5187 5188 // Try to promote a chain of computation if it allows to form an extended 5189 // load. 5190 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 5191 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 5192 TPT.commit(); 5193 // Move the extend into the same block as the load 5194 ExtFedByLoad->moveAfter(LI); 5195 // CGP does not check if the zext would be speculatively executed when moved 5196 // to the same basic block as the load. Preserving its original location 5197 // would pessimize the debugging experience, as well as negatively impact 5198 // the quality of sample pgo. We don't want to use "line 0" as that has a 5199 // size cost in the line-table section and logically the zext can be seen as 5200 // part of the load. Therefore we conservatively reuse the same debug 5201 // location for the load and the zext. 5202 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 5203 ++NumExtsMoved; 5204 Inst = ExtFedByLoad; 5205 return true; 5206 } 5207 5208 // Continue promoting SExts if known as considerable depending on targets. 5209 if (ATPConsiderable && 5210 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 5211 HasPromoted, TPT, SpeculativelyMovedExts)) 5212 return true; 5213 5214 TPT.rollback(LastKnownGood); 5215 return false; 5216 } 5217 5218 // Perform address type promotion if doing so is profitable. 5219 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 5220 // instructions that sign extended the same initial value. However, if 5221 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 5222 // extension is just profitable. 5223 bool CodeGenPrepare::performAddressTypePromotion( 5224 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 5225 bool HasPromoted, TypePromotionTransaction &TPT, 5226 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 5227 bool Promoted = false; 5228 SmallPtrSet<Instruction *, 1> UnhandledExts; 5229 bool AllSeenFirst = true; 5230 for (auto I : SpeculativelyMovedExts) { 5231 Value *HeadOfChain = I->getOperand(0); 5232 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 5233 SeenChainsForSExt.find(HeadOfChain); 5234 // If there is an unhandled SExt which has the same header, try to promote 5235 // it as well. 5236 if (AlreadySeen != SeenChainsForSExt.end()) { 5237 if (AlreadySeen->second != nullptr) 5238 UnhandledExts.insert(AlreadySeen->second); 5239 AllSeenFirst = false; 5240 } 5241 } 5242 5243 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 5244 SpeculativelyMovedExts.size() == 1)) { 5245 TPT.commit(); 5246 if (HasPromoted) 5247 Promoted = true; 5248 for (auto I : SpeculativelyMovedExts) { 5249 Value *HeadOfChain = I->getOperand(0); 5250 SeenChainsForSExt[HeadOfChain] = nullptr; 5251 ValToSExtendedUses[HeadOfChain].push_back(I); 5252 } 5253 // Update Inst as promotion happen. 5254 Inst = SpeculativelyMovedExts.pop_back_val(); 5255 } else { 5256 // This is the first chain visited from the header, keep the current chain 5257 // as unhandled. Defer to promote this until we encounter another SExt 5258 // chain derived from the same header. 5259 for (auto I : SpeculativelyMovedExts) { 5260 Value *HeadOfChain = I->getOperand(0); 5261 SeenChainsForSExt[HeadOfChain] = Inst; 5262 } 5263 return false; 5264 } 5265 5266 if (!AllSeenFirst && !UnhandledExts.empty()) 5267 for (auto VisitedSExt : UnhandledExts) { 5268 if (RemovedInsts.count(VisitedSExt)) 5269 continue; 5270 TypePromotionTransaction TPT(RemovedInsts); 5271 SmallVector<Instruction *, 1> Exts; 5272 SmallVector<Instruction *, 2> Chains; 5273 Exts.push_back(VisitedSExt); 5274 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 5275 TPT.commit(); 5276 if (HasPromoted) 5277 Promoted = true; 5278 for (auto I : Chains) { 5279 Value *HeadOfChain = I->getOperand(0); 5280 // Mark this as handled. 5281 SeenChainsForSExt[HeadOfChain] = nullptr; 5282 ValToSExtendedUses[HeadOfChain].push_back(I); 5283 } 5284 } 5285 return Promoted; 5286 } 5287 5288 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 5289 BasicBlock *DefBB = I->getParent(); 5290 5291 // If the result of a {s|z}ext and its source are both live out, rewrite all 5292 // other uses of the source with result of extension. 5293 Value *Src = I->getOperand(0); 5294 if (Src->hasOneUse()) 5295 return false; 5296 5297 // Only do this xform if truncating is free. 5298 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 5299 return false; 5300 5301 // Only safe to perform the optimization if the source is also defined in 5302 // this block. 5303 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 5304 return false; 5305 5306 bool DefIsLiveOut = false; 5307 for (User *U : I->users()) { 5308 Instruction *UI = cast<Instruction>(U); 5309 5310 // Figure out which BB this ext is used in. 5311 BasicBlock *UserBB = UI->getParent(); 5312 if (UserBB == DefBB) continue; 5313 DefIsLiveOut = true; 5314 break; 5315 } 5316 if (!DefIsLiveOut) 5317 return false; 5318 5319 // Make sure none of the uses are PHI nodes. 5320 for (User *U : Src->users()) { 5321 Instruction *UI = cast<Instruction>(U); 5322 BasicBlock *UserBB = UI->getParent(); 5323 if (UserBB == DefBB) continue; 5324 // Be conservative. We don't want this xform to end up introducing 5325 // reloads just before load / store instructions. 5326 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 5327 return false; 5328 } 5329 5330 // InsertedTruncs - Only insert one trunc in each block once. 5331 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 5332 5333 bool MadeChange = false; 5334 for (Use &U : Src->uses()) { 5335 Instruction *User = cast<Instruction>(U.getUser()); 5336 5337 // Figure out which BB this ext is used in. 5338 BasicBlock *UserBB = User->getParent(); 5339 if (UserBB == DefBB) continue; 5340 5341 // Both src and def are live in this block. Rewrite the use. 5342 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 5343 5344 if (!InsertedTrunc) { 5345 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5346 assert(InsertPt != UserBB->end()); 5347 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5348 InsertedInsts.insert(InsertedTrunc); 5349 } 5350 5351 // Replace a use of the {s|z}ext source with a use of the result. 5352 U = InsertedTrunc; 5353 ++NumExtUses; 5354 MadeChange = true; 5355 } 5356 5357 return MadeChange; 5358 } 5359 5360 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5361 // just after the load if the target can fold this into one extload instruction, 5362 // with the hope of eliminating some of the other later "and" instructions using 5363 // the loaded value. "and"s that are made trivially redundant by the insertion 5364 // of the new "and" are removed by this function, while others (e.g. those whose 5365 // path from the load goes through a phi) are left for isel to potentially 5366 // remove. 5367 // 5368 // For example: 5369 // 5370 // b0: 5371 // x = load i32 5372 // ... 5373 // b1: 5374 // y = and x, 0xff 5375 // z = use y 5376 // 5377 // becomes: 5378 // 5379 // b0: 5380 // x = load i32 5381 // x' = and x, 0xff 5382 // ... 5383 // b1: 5384 // z = use x' 5385 // 5386 // whereas: 5387 // 5388 // b0: 5389 // x1 = load i32 5390 // ... 5391 // b1: 5392 // x2 = load i32 5393 // ... 5394 // b2: 5395 // x = phi x1, x2 5396 // y = and x, 0xff 5397 // 5398 // becomes (after a call to optimizeLoadExt for each load): 5399 // 5400 // b0: 5401 // x1 = load i32 5402 // x1' = and x1, 0xff 5403 // ... 5404 // b1: 5405 // x2 = load i32 5406 // x2' = and x2, 0xff 5407 // ... 5408 // b2: 5409 // x = phi x1', x2' 5410 // y = and x, 0xff 5411 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5412 if (!Load->isSimple() || 5413 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 5414 return false; 5415 5416 // Skip loads we've already transformed. 5417 if (Load->hasOneUse() && 5418 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5419 return false; 5420 5421 // Look at all uses of Load, looking through phis, to determine how many bits 5422 // of the loaded value are needed. 5423 SmallVector<Instruction *, 8> WorkList; 5424 SmallPtrSet<Instruction *, 16> Visited; 5425 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5426 for (auto *U : Load->users()) 5427 WorkList.push_back(cast<Instruction>(U)); 5428 5429 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5430 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5431 APInt DemandBits(BitWidth, 0); 5432 APInt WidestAndBits(BitWidth, 0); 5433 5434 while (!WorkList.empty()) { 5435 Instruction *I = WorkList.back(); 5436 WorkList.pop_back(); 5437 5438 // Break use-def graph loops. 5439 if (!Visited.insert(I).second) 5440 continue; 5441 5442 // For a PHI node, push all of its users. 5443 if (auto *Phi = dyn_cast<PHINode>(I)) { 5444 for (auto *U : Phi->users()) 5445 WorkList.push_back(cast<Instruction>(U)); 5446 continue; 5447 } 5448 5449 switch (I->getOpcode()) { 5450 case Instruction::And: { 5451 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5452 if (!AndC) 5453 return false; 5454 APInt AndBits = AndC->getValue(); 5455 DemandBits |= AndBits; 5456 // Keep track of the widest and mask we see. 5457 if (AndBits.ugt(WidestAndBits)) 5458 WidestAndBits = AndBits; 5459 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5460 AndsToMaybeRemove.push_back(I); 5461 break; 5462 } 5463 5464 case Instruction::Shl: { 5465 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5466 if (!ShlC) 5467 return false; 5468 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5469 DemandBits.setLowBits(BitWidth - ShiftAmt); 5470 break; 5471 } 5472 5473 case Instruction::Trunc: { 5474 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5475 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5476 DemandBits.setLowBits(TruncBitWidth); 5477 break; 5478 } 5479 5480 default: 5481 return false; 5482 } 5483 } 5484 5485 uint32_t ActiveBits = DemandBits.getActiveBits(); 5486 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5487 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5488 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5489 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5490 // followed by an AND. 5491 // TODO: Look into removing this restriction by fixing backends to either 5492 // return false for isLoadExtLegal for i1 or have them select this pattern to 5493 // a single instruction. 5494 // 5495 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5496 // mask, since these are the only ands that will be removed by isel. 5497 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5498 WidestAndBits != DemandBits) 5499 return false; 5500 5501 LLVMContext &Ctx = Load->getType()->getContext(); 5502 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5503 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5504 5505 // Reject cases that won't be matched as extloads. 5506 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5507 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5508 return false; 5509 5510 IRBuilder<> Builder(Load->getNextNode()); 5511 auto *NewAnd = dyn_cast<Instruction>( 5512 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5513 // Mark this instruction as "inserted by CGP", so that other 5514 // optimizations don't touch it. 5515 InsertedInsts.insert(NewAnd); 5516 5517 // Replace all uses of load with new and (except for the use of load in the 5518 // new and itself). 5519 Load->replaceAllUsesWith(NewAnd); 5520 NewAnd->setOperand(0, Load); 5521 5522 // Remove any and instructions that are now redundant. 5523 for (auto *And : AndsToMaybeRemove) 5524 // Check that the and mask is the same as the one we decided to put on the 5525 // new and. 5526 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5527 And->replaceAllUsesWith(NewAnd); 5528 if (&*CurInstIterator == And) 5529 CurInstIterator = std::next(And->getIterator()); 5530 And->eraseFromParent(); 5531 ++NumAndUses; 5532 } 5533 5534 ++NumAndsAdded; 5535 return true; 5536 } 5537 5538 /// Check if V (an operand of a select instruction) is an expensive instruction 5539 /// that is only used once. 5540 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5541 auto *I = dyn_cast<Instruction>(V); 5542 // If it's safe to speculatively execute, then it should not have side 5543 // effects; therefore, it's safe to sink and possibly *not* execute. 5544 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5545 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5546 } 5547 5548 /// Returns true if a SelectInst should be turned into an explicit branch. 5549 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5550 const TargetLowering *TLI, 5551 SelectInst *SI) { 5552 // If even a predictable select is cheap, then a branch can't be cheaper. 5553 if (!TLI->isPredictableSelectExpensive()) 5554 return false; 5555 5556 // FIXME: This should use the same heuristics as IfConversion to determine 5557 // whether a select is better represented as a branch. 5558 5559 // If metadata tells us that the select condition is obviously predictable, 5560 // then we want to replace the select with a branch. 5561 uint64_t TrueWeight, FalseWeight; 5562 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5563 uint64_t Max = std::max(TrueWeight, FalseWeight); 5564 uint64_t Sum = TrueWeight + FalseWeight; 5565 if (Sum != 0) { 5566 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5567 if (Probability > TLI->getPredictableBranchThreshold()) 5568 return true; 5569 } 5570 } 5571 5572 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5573 5574 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5575 // comparison condition. If the compare has more than one use, there's 5576 // probably another cmov or setcc around, so it's not worth emitting a branch. 5577 if (!Cmp || !Cmp->hasOneUse()) 5578 return false; 5579 5580 // If either operand of the select is expensive and only needed on one side 5581 // of the select, we should form a branch. 5582 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5583 sinkSelectOperand(TTI, SI->getFalseValue())) 5584 return true; 5585 5586 return false; 5587 } 5588 5589 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5590 /// false value of \p SI. If the true/false value of \p SI is defined by any 5591 /// select instructions in \p Selects, look through the defining select 5592 /// instruction until the true/false value is not defined in \p Selects. 5593 static Value *getTrueOrFalseValue( 5594 SelectInst *SI, bool isTrue, 5595 const SmallPtrSet<const Instruction *, 2> &Selects) { 5596 Value *V; 5597 5598 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5599 DefSI = dyn_cast<SelectInst>(V)) { 5600 assert(DefSI->getCondition() == SI->getCondition() && 5601 "The condition of DefSI does not match with SI"); 5602 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5603 } 5604 return V; 5605 } 5606 5607 /// If we have a SelectInst that will likely profit from branch prediction, 5608 /// turn it into a branch. 5609 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5610 // Find all consecutive select instructions that share the same condition. 5611 SmallVector<SelectInst *, 2> ASI; 5612 ASI.push_back(SI); 5613 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5614 It != SI->getParent()->end(); ++It) { 5615 SelectInst *I = dyn_cast<SelectInst>(&*It); 5616 if (I && SI->getCondition() == I->getCondition()) { 5617 ASI.push_back(I); 5618 } else { 5619 break; 5620 } 5621 } 5622 5623 SelectInst *LastSI = ASI.back(); 5624 // Increment the current iterator to skip all the rest of select instructions 5625 // because they will be either "not lowered" or "all lowered" to branch. 5626 CurInstIterator = std::next(LastSI->getIterator()); 5627 5628 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5629 5630 // Can we convert the 'select' to CF ? 5631 if (DisableSelectToBranch || OptSize || !TLI || VectorCond || 5632 SI->getMetadata(LLVMContext::MD_unpredictable)) 5633 return false; 5634 5635 TargetLowering::SelectSupportKind SelectKind; 5636 if (VectorCond) 5637 SelectKind = TargetLowering::VectorMaskSelect; 5638 else if (SI->getType()->isVectorTy()) 5639 SelectKind = TargetLowering::ScalarCondVectorVal; 5640 else 5641 SelectKind = TargetLowering::ScalarValSelect; 5642 5643 if (TLI->isSelectSupported(SelectKind) && 5644 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5645 return false; 5646 5647 ModifiedDT = true; 5648 5649 // Transform a sequence like this: 5650 // start: 5651 // %cmp = cmp uge i32 %a, %b 5652 // %sel = select i1 %cmp, i32 %c, i32 %d 5653 // 5654 // Into: 5655 // start: 5656 // %cmp = cmp uge i32 %a, %b 5657 // br i1 %cmp, label %select.true, label %select.false 5658 // select.true: 5659 // br label %select.end 5660 // select.false: 5661 // br label %select.end 5662 // select.end: 5663 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5664 // 5665 // In addition, we may sink instructions that produce %c or %d from 5666 // the entry block into the destination(s) of the new branch. 5667 // If the true or false blocks do not contain a sunken instruction, that 5668 // block and its branch may be optimized away. In that case, one side of the 5669 // first branch will point directly to select.end, and the corresponding PHI 5670 // predecessor block will be the start block. 5671 5672 // First, we split the block containing the select into 2 blocks. 5673 BasicBlock *StartBlock = SI->getParent(); 5674 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5675 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5676 5677 // Delete the unconditional branch that was just created by the split. 5678 StartBlock->getTerminator()->eraseFromParent(); 5679 5680 // These are the new basic blocks for the conditional branch. 5681 // At least one will become an actual new basic block. 5682 BasicBlock *TrueBlock = nullptr; 5683 BasicBlock *FalseBlock = nullptr; 5684 BranchInst *TrueBranch = nullptr; 5685 BranchInst *FalseBranch = nullptr; 5686 5687 // Sink expensive instructions into the conditional blocks to avoid executing 5688 // them speculatively. 5689 for (SelectInst *SI : ASI) { 5690 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5691 if (TrueBlock == nullptr) { 5692 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5693 EndBlock->getParent(), EndBlock); 5694 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5695 } 5696 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5697 TrueInst->moveBefore(TrueBranch); 5698 } 5699 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5700 if (FalseBlock == nullptr) { 5701 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5702 EndBlock->getParent(), EndBlock); 5703 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5704 } 5705 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5706 FalseInst->moveBefore(FalseBranch); 5707 } 5708 } 5709 5710 // If there was nothing to sink, then arbitrarily choose the 'false' side 5711 // for a new input value to the PHI. 5712 if (TrueBlock == FalseBlock) { 5713 assert(TrueBlock == nullptr && 5714 "Unexpected basic block transform while optimizing select"); 5715 5716 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5717 EndBlock->getParent(), EndBlock); 5718 BranchInst::Create(EndBlock, FalseBlock); 5719 } 5720 5721 // Insert the real conditional branch based on the original condition. 5722 // If we did not create a new block for one of the 'true' or 'false' paths 5723 // of the condition, it means that side of the branch goes to the end block 5724 // directly and the path originates from the start block from the point of 5725 // view of the new PHI. 5726 BasicBlock *TT, *FT; 5727 if (TrueBlock == nullptr) { 5728 TT = EndBlock; 5729 FT = FalseBlock; 5730 TrueBlock = StartBlock; 5731 } else if (FalseBlock == nullptr) { 5732 TT = TrueBlock; 5733 FT = EndBlock; 5734 FalseBlock = StartBlock; 5735 } else { 5736 TT = TrueBlock; 5737 FT = FalseBlock; 5738 } 5739 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5740 5741 SmallPtrSet<const Instruction *, 2> INS; 5742 INS.insert(ASI.begin(), ASI.end()); 5743 // Use reverse iterator because later select may use the value of the 5744 // earlier select, and we need to propagate value through earlier select 5745 // to get the PHI operand. 5746 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5747 SelectInst *SI = *It; 5748 // The select itself is replaced with a PHI Node. 5749 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5750 PN->takeName(SI); 5751 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5752 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5753 5754 SI->replaceAllUsesWith(PN); 5755 SI->eraseFromParent(); 5756 INS.erase(SI); 5757 ++NumSelectsExpanded; 5758 } 5759 5760 // Instruct OptimizeBlock to skip to the next block. 5761 CurInstIterator = StartBlock->end(); 5762 return true; 5763 } 5764 5765 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5766 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5767 int SplatElem = -1; 5768 for (unsigned i = 0; i < Mask.size(); ++i) { 5769 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5770 return false; 5771 SplatElem = Mask[i]; 5772 } 5773 5774 return true; 5775 } 5776 5777 /// Some targets have expensive vector shifts if the lanes aren't all the same 5778 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 5779 /// it's often worth sinking a shufflevector splat down to its use so that 5780 /// codegen can spot all lanes are identical. 5781 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 5782 BasicBlock *DefBB = SVI->getParent(); 5783 5784 // Only do this xform if variable vector shifts are particularly expensive. 5785 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 5786 return false; 5787 5788 // We only expect better codegen by sinking a shuffle if we can recognise a 5789 // constant splat. 5790 if (!isBroadcastShuffle(SVI)) 5791 return false; 5792 5793 // InsertedShuffles - Only insert a shuffle in each block once. 5794 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 5795 5796 bool MadeChange = false; 5797 for (User *U : SVI->users()) { 5798 Instruction *UI = cast<Instruction>(U); 5799 5800 // Figure out which BB this ext is used in. 5801 BasicBlock *UserBB = UI->getParent(); 5802 if (UserBB == DefBB) continue; 5803 5804 // For now only apply this when the splat is used by a shift instruction. 5805 if (!UI->isShift()) continue; 5806 5807 // Everything checks out, sink the shuffle if the user's block doesn't 5808 // already have a copy. 5809 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 5810 5811 if (!InsertedShuffle) { 5812 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5813 assert(InsertPt != UserBB->end()); 5814 InsertedShuffle = 5815 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5816 SVI->getOperand(2), "", &*InsertPt); 5817 } 5818 5819 UI->replaceUsesOfWith(SVI, InsertedShuffle); 5820 MadeChange = true; 5821 } 5822 5823 // If we removed all uses, nuke the shuffle. 5824 if (SVI->use_empty()) { 5825 SVI->eraseFromParent(); 5826 MadeChange = true; 5827 } 5828 5829 return MadeChange; 5830 } 5831 5832 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 5833 if (!TLI || !DL) 5834 return false; 5835 5836 Value *Cond = SI->getCondition(); 5837 Type *OldType = Cond->getType(); 5838 LLVMContext &Context = Cond->getContext(); 5839 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 5840 unsigned RegWidth = RegType.getSizeInBits(); 5841 5842 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 5843 return false; 5844 5845 // If the register width is greater than the type width, expand the condition 5846 // of the switch instruction and each case constant to the width of the 5847 // register. By widening the type of the switch condition, subsequent 5848 // comparisons (for case comparisons) will not need to be extended to the 5849 // preferred register width, so we will potentially eliminate N-1 extends, 5850 // where N is the number of cases in the switch. 5851 auto *NewType = Type::getIntNTy(Context, RegWidth); 5852 5853 // Zero-extend the switch condition and case constants unless the switch 5854 // condition is a function argument that is already being sign-extended. 5855 // In that case, we can avoid an unnecessary mask/extension by sign-extending 5856 // everything instead. 5857 Instruction::CastOps ExtType = Instruction::ZExt; 5858 if (auto *Arg = dyn_cast<Argument>(Cond)) 5859 if (Arg->hasSExtAttr()) 5860 ExtType = Instruction::SExt; 5861 5862 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 5863 ExtInst->insertBefore(SI); 5864 SI->setCondition(ExtInst); 5865 for (auto Case : SI->cases()) { 5866 APInt NarrowConst = Case.getCaseValue()->getValue(); 5867 APInt WideConst = (ExtType == Instruction::ZExt) ? 5868 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 5869 Case.setValue(ConstantInt::get(Context, WideConst)); 5870 } 5871 5872 return true; 5873 } 5874 5875 5876 namespace { 5877 5878 /// \brief Helper class to promote a scalar operation to a vector one. 5879 /// This class is used to move downward extractelement transition. 5880 /// E.g., 5881 /// a = vector_op <2 x i32> 5882 /// b = extractelement <2 x i32> a, i32 0 5883 /// c = scalar_op b 5884 /// store c 5885 /// 5886 /// => 5887 /// a = vector_op <2 x i32> 5888 /// c = vector_op a (equivalent to scalar_op on the related lane) 5889 /// * d = extractelement <2 x i32> c, i32 0 5890 /// * store d 5891 /// Assuming both extractelement and store can be combine, we get rid of the 5892 /// transition. 5893 class VectorPromoteHelper { 5894 /// DataLayout associated with the current module. 5895 const DataLayout &DL; 5896 5897 /// Used to perform some checks on the legality of vector operations. 5898 const TargetLowering &TLI; 5899 5900 /// Used to estimated the cost of the promoted chain. 5901 const TargetTransformInfo &TTI; 5902 5903 /// The transition being moved downwards. 5904 Instruction *Transition; 5905 5906 /// The sequence of instructions to be promoted. 5907 SmallVector<Instruction *, 4> InstsToBePromoted; 5908 5909 /// Cost of combining a store and an extract. 5910 unsigned StoreExtractCombineCost; 5911 5912 /// Instruction that will be combined with the transition. 5913 Instruction *CombineInst = nullptr; 5914 5915 /// \brief The instruction that represents the current end of the transition. 5916 /// Since we are faking the promotion until we reach the end of the chain 5917 /// of computation, we need a way to get the current end of the transition. 5918 Instruction *getEndOfTransition() const { 5919 if (InstsToBePromoted.empty()) 5920 return Transition; 5921 return InstsToBePromoted.back(); 5922 } 5923 5924 /// \brief Return the index of the original value in the transition. 5925 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 5926 /// c, is at index 0. 5927 unsigned getTransitionOriginalValueIdx() const { 5928 assert(isa<ExtractElementInst>(Transition) && 5929 "Other kind of transitions are not supported yet"); 5930 return 0; 5931 } 5932 5933 /// \brief Return the index of the index in the transition. 5934 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 5935 /// is at index 1. 5936 unsigned getTransitionIdx() const { 5937 assert(isa<ExtractElementInst>(Transition) && 5938 "Other kind of transitions are not supported yet"); 5939 return 1; 5940 } 5941 5942 /// \brief Get the type of the transition. 5943 /// This is the type of the original value. 5944 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 5945 /// transition is <2 x i32>. 5946 Type *getTransitionType() const { 5947 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 5948 } 5949 5950 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 5951 /// I.e., we have the following sequence: 5952 /// Def = Transition <ty1> a to <ty2> 5953 /// b = ToBePromoted <ty2> Def, ... 5954 /// => 5955 /// b = ToBePromoted <ty1> a, ... 5956 /// Def = Transition <ty1> ToBePromoted to <ty2> 5957 void promoteImpl(Instruction *ToBePromoted); 5958 5959 /// \brief Check whether or not it is profitable to promote all the 5960 /// instructions enqueued to be promoted. 5961 bool isProfitableToPromote() { 5962 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 5963 unsigned Index = isa<ConstantInt>(ValIdx) 5964 ? cast<ConstantInt>(ValIdx)->getZExtValue() 5965 : -1; 5966 Type *PromotedType = getTransitionType(); 5967 5968 StoreInst *ST = cast<StoreInst>(CombineInst); 5969 unsigned AS = ST->getPointerAddressSpace(); 5970 unsigned Align = ST->getAlignment(); 5971 // Check if this store is supported. 5972 if (!TLI.allowsMisalignedMemoryAccesses( 5973 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 5974 Align)) { 5975 // If this is not supported, there is no way we can combine 5976 // the extract with the store. 5977 return false; 5978 } 5979 5980 // The scalar chain of computation has to pay for the transition 5981 // scalar to vector. 5982 // The vector chain has to account for the combining cost. 5983 uint64_t ScalarCost = 5984 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 5985 uint64_t VectorCost = StoreExtractCombineCost; 5986 for (const auto &Inst : InstsToBePromoted) { 5987 // Compute the cost. 5988 // By construction, all instructions being promoted are arithmetic ones. 5989 // Moreover, one argument is a constant that can be viewed as a splat 5990 // constant. 5991 Value *Arg0 = Inst->getOperand(0); 5992 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 5993 isa<ConstantFP>(Arg0); 5994 TargetTransformInfo::OperandValueKind Arg0OVK = 5995 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5996 : TargetTransformInfo::OK_AnyValue; 5997 TargetTransformInfo::OperandValueKind Arg1OVK = 5998 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 5999 : TargetTransformInfo::OK_AnyValue; 6000 ScalarCost += TTI.getArithmeticInstrCost( 6001 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 6002 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6003 Arg0OVK, Arg1OVK); 6004 } 6005 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6006 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6007 return ScalarCost > VectorCost; 6008 } 6009 6010 /// \brief Generate a constant vector with \p Val with the same 6011 /// number of elements as the transition. 6012 /// \p UseSplat defines whether or not \p Val should be replicated 6013 /// across the whole vector. 6014 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6015 /// otherwise we generate a vector with as many undef as possible: 6016 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6017 /// used at the index of the extract. 6018 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6019 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6020 if (!UseSplat) { 6021 // If we cannot determine where the constant must be, we have to 6022 // use a splat constant. 6023 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6024 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6025 ExtractIdx = CstVal->getSExtValue(); 6026 else 6027 UseSplat = true; 6028 } 6029 6030 unsigned End = getTransitionType()->getVectorNumElements(); 6031 if (UseSplat) 6032 return ConstantVector::getSplat(End, Val); 6033 6034 SmallVector<Constant *, 4> ConstVec; 6035 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6036 for (unsigned Idx = 0; Idx != End; ++Idx) { 6037 if (Idx == ExtractIdx) 6038 ConstVec.push_back(Val); 6039 else 6040 ConstVec.push_back(UndefVal); 6041 } 6042 return ConstantVector::get(ConstVec); 6043 } 6044 6045 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 6046 /// in \p Use can trigger undefined behavior. 6047 static bool canCauseUndefinedBehavior(const Instruction *Use, 6048 unsigned OperandIdx) { 6049 // This is not safe to introduce undef when the operand is on 6050 // the right hand side of a division-like instruction. 6051 if (OperandIdx != 1) 6052 return false; 6053 switch (Use->getOpcode()) { 6054 default: 6055 return false; 6056 case Instruction::SDiv: 6057 case Instruction::UDiv: 6058 case Instruction::SRem: 6059 case Instruction::URem: 6060 return true; 6061 case Instruction::FDiv: 6062 case Instruction::FRem: 6063 return !Use->hasNoNaNs(); 6064 } 6065 llvm_unreachable(nullptr); 6066 } 6067 6068 public: 6069 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 6070 const TargetTransformInfo &TTI, Instruction *Transition, 6071 unsigned CombineCost) 6072 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 6073 StoreExtractCombineCost(CombineCost) { 6074 assert(Transition && "Do not know how to promote null"); 6075 } 6076 6077 /// \brief Check if we can promote \p ToBePromoted to \p Type. 6078 bool canPromote(const Instruction *ToBePromoted) const { 6079 // We could support CastInst too. 6080 return isa<BinaryOperator>(ToBePromoted); 6081 } 6082 6083 /// \brief Check if it is profitable to promote \p ToBePromoted 6084 /// by moving downward the transition through. 6085 bool shouldPromote(const Instruction *ToBePromoted) const { 6086 // Promote only if all the operands can be statically expanded. 6087 // Indeed, we do not want to introduce any new kind of transitions. 6088 for (const Use &U : ToBePromoted->operands()) { 6089 const Value *Val = U.get(); 6090 if (Val == getEndOfTransition()) { 6091 // If the use is a division and the transition is on the rhs, 6092 // we cannot promote the operation, otherwise we may create a 6093 // division by zero. 6094 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 6095 return false; 6096 continue; 6097 } 6098 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 6099 !isa<ConstantFP>(Val)) 6100 return false; 6101 } 6102 // Check that the resulting operation is legal. 6103 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 6104 if (!ISDOpcode) 6105 return false; 6106 return StressStoreExtract || 6107 TLI.isOperationLegalOrCustom( 6108 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 6109 } 6110 6111 /// \brief Check whether or not \p Use can be combined 6112 /// with the transition. 6113 /// I.e., is it possible to do Use(Transition) => AnotherUse? 6114 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 6115 6116 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 6117 void enqueueForPromotion(Instruction *ToBePromoted) { 6118 InstsToBePromoted.push_back(ToBePromoted); 6119 } 6120 6121 /// \brief Set the instruction that will be combined with the transition. 6122 void recordCombineInstruction(Instruction *ToBeCombined) { 6123 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 6124 CombineInst = ToBeCombined; 6125 } 6126 6127 /// \brief Promote all the instructions enqueued for promotion if it is 6128 /// is profitable. 6129 /// \return True if the promotion happened, false otherwise. 6130 bool promote() { 6131 // Check if there is something to promote. 6132 // Right now, if we do not have anything to combine with, 6133 // we assume the promotion is not profitable. 6134 if (InstsToBePromoted.empty() || !CombineInst) 6135 return false; 6136 6137 // Check cost. 6138 if (!StressStoreExtract && !isProfitableToPromote()) 6139 return false; 6140 6141 // Promote. 6142 for (auto &ToBePromoted : InstsToBePromoted) 6143 promoteImpl(ToBePromoted); 6144 InstsToBePromoted.clear(); 6145 return true; 6146 } 6147 }; 6148 6149 } // end anonymous namespace 6150 6151 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 6152 // At this point, we know that all the operands of ToBePromoted but Def 6153 // can be statically promoted. 6154 // For Def, we need to use its parameter in ToBePromoted: 6155 // b = ToBePromoted ty1 a 6156 // Def = Transition ty1 b to ty2 6157 // Move the transition down. 6158 // 1. Replace all uses of the promoted operation by the transition. 6159 // = ... b => = ... Def. 6160 assert(ToBePromoted->getType() == Transition->getType() && 6161 "The type of the result of the transition does not match " 6162 "the final type"); 6163 ToBePromoted->replaceAllUsesWith(Transition); 6164 // 2. Update the type of the uses. 6165 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 6166 Type *TransitionTy = getTransitionType(); 6167 ToBePromoted->mutateType(TransitionTy); 6168 // 3. Update all the operands of the promoted operation with promoted 6169 // operands. 6170 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 6171 for (Use &U : ToBePromoted->operands()) { 6172 Value *Val = U.get(); 6173 Value *NewVal = nullptr; 6174 if (Val == Transition) 6175 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 6176 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 6177 isa<ConstantFP>(Val)) { 6178 // Use a splat constant if it is not safe to use undef. 6179 NewVal = getConstantVector( 6180 cast<Constant>(Val), 6181 isa<UndefValue>(Val) || 6182 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 6183 } else 6184 llvm_unreachable("Did you modified shouldPromote and forgot to update " 6185 "this?"); 6186 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 6187 } 6188 Transition->moveAfter(ToBePromoted); 6189 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 6190 } 6191 6192 /// Some targets can do store(extractelement) with one instruction. 6193 /// Try to push the extractelement towards the stores when the target 6194 /// has this feature and this is profitable. 6195 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 6196 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 6197 if (DisableStoreExtract || !TLI || 6198 (!StressStoreExtract && 6199 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 6200 Inst->getOperand(1), CombineCost))) 6201 return false; 6202 6203 // At this point we know that Inst is a vector to scalar transition. 6204 // Try to move it down the def-use chain, until: 6205 // - We can combine the transition with its single use 6206 // => we got rid of the transition. 6207 // - We escape the current basic block 6208 // => we would need to check that we are moving it at a cheaper place and 6209 // we do not do that for now. 6210 BasicBlock *Parent = Inst->getParent(); 6211 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 6212 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 6213 // If the transition has more than one use, assume this is not going to be 6214 // beneficial. 6215 while (Inst->hasOneUse()) { 6216 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 6217 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 6218 6219 if (ToBePromoted->getParent() != Parent) { 6220 DEBUG(dbgs() << "Instruction to promote is in a different block (" 6221 << ToBePromoted->getParent()->getName() 6222 << ") than the transition (" << Parent->getName() << ").\n"); 6223 return false; 6224 } 6225 6226 if (VPH.canCombine(ToBePromoted)) { 6227 DEBUG(dbgs() << "Assume " << *Inst << '\n' 6228 << "will be combined with: " << *ToBePromoted << '\n'); 6229 VPH.recordCombineInstruction(ToBePromoted); 6230 bool Changed = VPH.promote(); 6231 NumStoreExtractExposed += Changed; 6232 return Changed; 6233 } 6234 6235 DEBUG(dbgs() << "Try promoting.\n"); 6236 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 6237 return false; 6238 6239 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 6240 6241 VPH.enqueueForPromotion(ToBePromoted); 6242 Inst = ToBePromoted; 6243 } 6244 return false; 6245 } 6246 6247 /// For the instruction sequence of store below, F and I values 6248 /// are bundled together as an i64 value before being stored into memory. 6249 /// Sometimes it is more efficent to generate separate stores for F and I, 6250 /// which can remove the bitwise instructions or sink them to colder places. 6251 /// 6252 /// (store (or (zext (bitcast F to i32) to i64), 6253 /// (shl (zext I to i64), 32)), addr) --> 6254 /// (store F, addr) and (store I, addr+4) 6255 /// 6256 /// Similarly, splitting for other merged store can also be beneficial, like: 6257 /// For pair of {i32, i32}, i64 store --> two i32 stores. 6258 /// For pair of {i32, i16}, i64 store --> two i32 stores. 6259 /// For pair of {i16, i16}, i32 store --> two i16 stores. 6260 /// For pair of {i16, i8}, i32 store --> two i16 stores. 6261 /// For pair of {i8, i8}, i16 store --> two i8 stores. 6262 /// 6263 /// We allow each target to determine specifically which kind of splitting is 6264 /// supported. 6265 /// 6266 /// The store patterns are commonly seen from the simple code snippet below 6267 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 6268 /// void goo(const std::pair<int, float> &); 6269 /// hoo() { 6270 /// ... 6271 /// goo(std::make_pair(tmp, ftmp)); 6272 /// ... 6273 /// } 6274 /// 6275 /// Although we already have similar splitting in DAG Combine, we duplicate 6276 /// it in CodeGenPrepare to catch the case in which pattern is across 6277 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 6278 /// during code expansion. 6279 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 6280 const TargetLowering &TLI) { 6281 // Handle simple but common cases only. 6282 Type *StoreType = SI.getValueOperand()->getType(); 6283 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 6284 DL.getTypeSizeInBits(StoreType) == 0) 6285 return false; 6286 6287 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 6288 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 6289 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 6290 DL.getTypeSizeInBits(SplitStoreType)) 6291 return false; 6292 6293 // Match the following patterns: 6294 // (store (or (zext LValue to i64), 6295 // (shl (zext HValue to i64), 32)), HalfValBitSize) 6296 // or 6297 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 6298 // (zext LValue to i64), 6299 // Expect both operands of OR and the first operand of SHL have only 6300 // one use. 6301 Value *LValue, *HValue; 6302 if (!match(SI.getValueOperand(), 6303 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 6304 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 6305 m_SpecificInt(HalfValBitSize)))))) 6306 return false; 6307 6308 // Check LValue and HValue are int with size less or equal than 32. 6309 if (!LValue->getType()->isIntegerTy() || 6310 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 6311 !HValue->getType()->isIntegerTy() || 6312 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 6313 return false; 6314 6315 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 6316 // as the input of target query. 6317 auto *LBC = dyn_cast<BitCastInst>(LValue); 6318 auto *HBC = dyn_cast<BitCastInst>(HValue); 6319 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 6320 : EVT::getEVT(LValue->getType()); 6321 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 6322 : EVT::getEVT(HValue->getType()); 6323 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 6324 return false; 6325 6326 // Start to split store. 6327 IRBuilder<> Builder(SI.getContext()); 6328 Builder.SetInsertPoint(&SI); 6329 6330 // If LValue/HValue is a bitcast in another BB, create a new one in current 6331 // BB so it may be merged with the splitted stores by dag combiner. 6332 if (LBC && LBC->getParent() != SI.getParent()) 6333 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 6334 if (HBC && HBC->getParent() != SI.getParent()) 6335 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 6336 6337 auto CreateSplitStore = [&](Value *V, bool Upper) { 6338 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 6339 Value *Addr = Builder.CreateBitCast( 6340 SI.getOperand(1), 6341 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 6342 if (Upper) 6343 Addr = Builder.CreateGEP( 6344 SplitStoreType, Addr, 6345 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 6346 Builder.CreateAlignedStore( 6347 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6348 }; 6349 6350 CreateSplitStore(LValue, false); 6351 CreateSplitStore(HValue, true); 6352 6353 // Delete the old store. 6354 SI.eraseFromParent(); 6355 return true; 6356 } 6357 6358 // Return true if the GEP has two operands, the first operand is of a sequential 6359 // type, and the second operand is a constant. 6360 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6361 gep_type_iterator I = gep_type_begin(*GEP); 6362 return GEP->getNumOperands() == 2 && 6363 I.isSequential() && 6364 isa<ConstantInt>(GEP->getOperand(1)); 6365 } 6366 6367 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6368 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6369 // reducing liveness interference across those edges benefits global register 6370 // allocation. Currently handles only certain cases. 6371 // 6372 // For example, unmerge %GEPI and %UGEPI as below. 6373 // 6374 // ---------- BEFORE ---------- 6375 // SrcBlock: 6376 // ... 6377 // %GEPIOp = ... 6378 // ... 6379 // %GEPI = gep %GEPIOp, Idx 6380 // ... 6381 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6382 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6383 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6384 // %UGEPI) 6385 // 6386 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6387 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6388 // ... 6389 // 6390 // DstBi: 6391 // ... 6392 // %UGEPI = gep %GEPIOp, UIdx 6393 // ... 6394 // --------------------------- 6395 // 6396 // ---------- AFTER ---------- 6397 // SrcBlock: 6398 // ... (same as above) 6399 // (* %GEPI is still alive on the indirectbr edges) 6400 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6401 // unmerging) 6402 // ... 6403 // 6404 // DstBi: 6405 // ... 6406 // %UGEPI = gep %GEPI, (UIdx-Idx) 6407 // ... 6408 // --------------------------- 6409 // 6410 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6411 // no longer alive on them. 6412 // 6413 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6414 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6415 // not to disable further simplications and optimizations as a result of GEP 6416 // merging. 6417 // 6418 // Note this unmerging may increase the length of the data flow critical path 6419 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6420 // between the register pressure and the length of data-flow critical 6421 // path. Restricting this to the uncommon IndirectBr case would minimize the 6422 // impact of potentially longer critical path, if any, and the impact on compile 6423 // time. 6424 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6425 const TargetTransformInfo *TTI) { 6426 BasicBlock *SrcBlock = GEPI->getParent(); 6427 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6428 // (non-IndirectBr) cases exit early here. 6429 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6430 return false; 6431 // Check that GEPI is a simple gep with a single constant index. 6432 if (!GEPSequentialConstIndexed(GEPI)) 6433 return false; 6434 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6435 // Check that GEPI is a cheap one. 6436 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6437 > TargetTransformInfo::TCC_Basic) 6438 return false; 6439 Value *GEPIOp = GEPI->getOperand(0); 6440 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6441 if (!isa<Instruction>(GEPIOp)) 6442 return false; 6443 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6444 if (GEPIOpI->getParent() != SrcBlock) 6445 return false; 6446 // Check that GEP is used outside the block, meaning it's alive on the 6447 // IndirectBr edge(s). 6448 if (find_if(GEPI->users(), [&](User *Usr) { 6449 if (auto *I = dyn_cast<Instruction>(Usr)) { 6450 if (I->getParent() != SrcBlock) { 6451 return true; 6452 } 6453 } 6454 return false; 6455 }) == GEPI->users().end()) 6456 return false; 6457 // The second elements of the GEP chains to be unmerged. 6458 std::vector<GetElementPtrInst *> UGEPIs; 6459 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6460 // on IndirectBr edges. 6461 for (User *Usr : GEPIOp->users()) { 6462 if (Usr == GEPI) continue; 6463 // Check if Usr is an Instruction. If not, give up. 6464 if (!isa<Instruction>(Usr)) 6465 return false; 6466 auto *UI = cast<Instruction>(Usr); 6467 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6468 if (UI->getParent() == SrcBlock) 6469 continue; 6470 // Check if Usr is a GEP. If not, give up. 6471 if (!isa<GetElementPtrInst>(Usr)) 6472 return false; 6473 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6474 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6475 // the pointer operand to it. If so, record it in the vector. If not, give 6476 // up. 6477 if (!GEPSequentialConstIndexed(UGEPI)) 6478 return false; 6479 if (UGEPI->getOperand(0) != GEPIOp) 6480 return false; 6481 if (GEPIIdx->getType() != 6482 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6483 return false; 6484 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6485 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6486 > TargetTransformInfo::TCC_Basic) 6487 return false; 6488 UGEPIs.push_back(UGEPI); 6489 } 6490 if (UGEPIs.size() == 0) 6491 return false; 6492 // Check the materializing cost of (Uidx-Idx). 6493 for (GetElementPtrInst *UGEPI : UGEPIs) { 6494 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6495 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6496 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6497 if (ImmCost > TargetTransformInfo::TCC_Basic) 6498 return false; 6499 } 6500 // Now unmerge between GEPI and UGEPIs. 6501 for (GetElementPtrInst *UGEPI : UGEPIs) { 6502 UGEPI->setOperand(0, GEPI); 6503 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6504 Constant *NewUGEPIIdx = 6505 ConstantInt::get(GEPIIdx->getType(), 6506 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6507 UGEPI->setOperand(1, NewUGEPIIdx); 6508 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6509 // inbounds to avoid UB. 6510 if (!GEPI->isInBounds()) { 6511 UGEPI->setIsInBounds(false); 6512 } 6513 } 6514 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6515 // alive on IndirectBr edges). 6516 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6517 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6518 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6519 return true; 6520 } 6521 6522 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 6523 // Bail out if we inserted the instruction to prevent optimizations from 6524 // stepping on each other's toes. 6525 if (InsertedInsts.count(I)) 6526 return false; 6527 6528 if (PHINode *P = dyn_cast<PHINode>(I)) { 6529 // It is possible for very late stage optimizations (such as SimplifyCFG) 6530 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6531 // trivial PHI, go ahead and zap it here. 6532 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6533 P->replaceAllUsesWith(V); 6534 P->eraseFromParent(); 6535 ++NumPHIsElim; 6536 return true; 6537 } 6538 return false; 6539 } 6540 6541 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6542 // If the source of the cast is a constant, then this should have 6543 // already been constant folded. The only reason NOT to constant fold 6544 // it is if something (e.g. LSR) was careful to place the constant 6545 // evaluation in a block other than then one that uses it (e.g. to hoist 6546 // the address of globals out of a loop). If this is the case, we don't 6547 // want to forward-subst the cast. 6548 if (isa<Constant>(CI->getOperand(0))) 6549 return false; 6550 6551 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6552 return true; 6553 6554 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6555 /// Sink a zext or sext into its user blocks if the target type doesn't 6556 /// fit in one register 6557 if (TLI && 6558 TLI->getTypeAction(CI->getContext(), 6559 TLI->getValueType(*DL, CI->getType())) == 6560 TargetLowering::TypeExpandInteger) { 6561 return SinkCast(CI); 6562 } else { 6563 bool MadeChange = optimizeExt(I); 6564 return MadeChange | optimizeExtUses(I); 6565 } 6566 } 6567 return false; 6568 } 6569 6570 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6571 if (!TLI || !TLI->hasMultipleConditionRegisters()) 6572 return OptimizeCmpExpression(CI, TLI); 6573 6574 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6575 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6576 if (TLI) { 6577 bool Modified = optimizeLoadExt(LI); 6578 unsigned AS = LI->getPointerAddressSpace(); 6579 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6580 return Modified; 6581 } 6582 return false; 6583 } 6584 6585 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6586 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6587 return true; 6588 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6589 if (TLI) { 6590 unsigned AS = SI->getPointerAddressSpace(); 6591 return optimizeMemoryInst(I, SI->getOperand(1), 6592 SI->getOperand(0)->getType(), AS); 6593 } 6594 return false; 6595 } 6596 6597 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6598 unsigned AS = RMW->getPointerAddressSpace(); 6599 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6600 RMW->getType(), AS); 6601 } 6602 6603 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6604 unsigned AS = CmpX->getPointerAddressSpace(); 6605 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6606 CmpX->getCompareOperand()->getType(), AS); 6607 } 6608 6609 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6610 6611 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6612 EnableAndCmpSinking && TLI) 6613 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6614 6615 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6616 BinOp->getOpcode() == Instruction::LShr)) { 6617 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6618 if (TLI && CI && TLI->hasExtractBitsInsn()) 6619 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6620 6621 return false; 6622 } 6623 6624 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6625 if (GEPI->hasAllZeroIndices()) { 6626 /// The GEP operand must be a pointer, so must its result -> BitCast 6627 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6628 GEPI->getName(), GEPI); 6629 GEPI->replaceAllUsesWith(NC); 6630 GEPI->eraseFromParent(); 6631 ++NumGEPsElim; 6632 optimizeInst(NC, ModifiedDT); 6633 return true; 6634 } 6635 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6636 return true; 6637 } 6638 return false; 6639 } 6640 6641 if (CallInst *CI = dyn_cast<CallInst>(I)) 6642 return optimizeCallInst(CI, ModifiedDT); 6643 6644 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6645 return optimizeSelectInst(SI); 6646 6647 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6648 return optimizeShuffleVectorInst(SVI); 6649 6650 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6651 return optimizeSwitchInst(Switch); 6652 6653 if (isa<ExtractElementInst>(I)) 6654 return optimizeExtractElementInst(I); 6655 6656 return false; 6657 } 6658 6659 /// Given an OR instruction, check to see if this is a bitreverse 6660 /// idiom. If so, insert the new intrinsic and return true. 6661 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6662 const TargetLowering &TLI) { 6663 if (!I.getType()->isIntegerTy() || 6664 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6665 TLI.getValueType(DL, I.getType(), true))) 6666 return false; 6667 6668 SmallVector<Instruction*, 4> Insts; 6669 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6670 return false; 6671 Instruction *LastInst = Insts.back(); 6672 I.replaceAllUsesWith(LastInst); 6673 RecursivelyDeleteTriviallyDeadInstructions(&I); 6674 return true; 6675 } 6676 6677 // In this pass we look for GEP and cast instructions that are used 6678 // across basic blocks and rewrite them to improve basic-block-at-a-time 6679 // selection. 6680 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 6681 SunkAddrs.clear(); 6682 bool MadeChange = false; 6683 6684 CurInstIterator = BB.begin(); 6685 while (CurInstIterator != BB.end()) { 6686 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6687 if (ModifiedDT) 6688 return true; 6689 } 6690 6691 bool MadeBitReverse = true; 6692 while (TLI && MadeBitReverse) { 6693 MadeBitReverse = false; 6694 for (auto &I : reverse(BB)) { 6695 if (makeBitReverse(I, *DL, *TLI)) { 6696 MadeBitReverse = MadeChange = true; 6697 ModifiedDT = true; 6698 break; 6699 } 6700 } 6701 } 6702 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6703 6704 return MadeChange; 6705 } 6706 6707 // llvm.dbg.value is far away from the value then iSel may not be able 6708 // handle it properly. iSel will drop llvm.dbg.value if it can not 6709 // find a node corresponding to the value. 6710 bool CodeGenPrepare::placeDbgValues(Function &F) { 6711 bool MadeChange = false; 6712 for (BasicBlock &BB : F) { 6713 Instruction *PrevNonDbgInst = nullptr; 6714 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6715 Instruction *Insn = &*BI++; 6716 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6717 // Leave dbg.values that refer to an alloca alone. These 6718 // instrinsics describe the address of a variable (= the alloca) 6719 // being taken. They should not be moved next to the alloca 6720 // (and to the beginning of the scope), but rather stay close to 6721 // where said address is used. 6722 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6723 PrevNonDbgInst = Insn; 6724 continue; 6725 } 6726 6727 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 6728 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 6729 // If VI is a phi in a block with an EHPad terminator, we can't insert 6730 // after it. 6731 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 6732 continue; 6733 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 6734 DVI->removeFromParent(); 6735 if (isa<PHINode>(VI)) 6736 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 6737 else 6738 DVI->insertAfter(VI); 6739 MadeChange = true; 6740 ++NumDbgValueMoved; 6741 } 6742 } 6743 } 6744 return MadeChange; 6745 } 6746 6747 /// \brief Scale down both weights to fit into uint32_t. 6748 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 6749 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 6750 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 6751 NewTrue = NewTrue / Scale; 6752 NewFalse = NewFalse / Scale; 6753 } 6754 6755 /// \brief Some targets prefer to split a conditional branch like: 6756 /// \code 6757 /// %0 = icmp ne i32 %a, 0 6758 /// %1 = icmp ne i32 %b, 0 6759 /// %or.cond = or i1 %0, %1 6760 /// br i1 %or.cond, label %TrueBB, label %FalseBB 6761 /// \endcode 6762 /// into multiple branch instructions like: 6763 /// \code 6764 /// bb1: 6765 /// %0 = icmp ne i32 %a, 0 6766 /// br i1 %0, label %TrueBB, label %bb2 6767 /// bb2: 6768 /// %1 = icmp ne i32 %b, 0 6769 /// br i1 %1, label %TrueBB, label %FalseBB 6770 /// \endcode 6771 /// This usually allows instruction selection to do even further optimizations 6772 /// and combine the compare with the branch instruction. Currently this is 6773 /// applied for targets which have "cheap" jump instructions. 6774 /// 6775 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 6776 /// 6777 bool CodeGenPrepare::splitBranchCondition(Function &F) { 6778 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 6779 return false; 6780 6781 bool MadeChange = false; 6782 for (auto &BB : F) { 6783 // Does this BB end with the following? 6784 // %cond1 = icmp|fcmp|binary instruction ... 6785 // %cond2 = icmp|fcmp|binary instruction ... 6786 // %cond.or = or|and i1 %cond1, cond2 6787 // br i1 %cond.or label %dest1, label %dest2" 6788 BinaryOperator *LogicOp; 6789 BasicBlock *TBB, *FBB; 6790 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 6791 continue; 6792 6793 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 6794 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 6795 continue; 6796 6797 unsigned Opc; 6798 Value *Cond1, *Cond2; 6799 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 6800 m_OneUse(m_Value(Cond2))))) 6801 Opc = Instruction::And; 6802 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 6803 m_OneUse(m_Value(Cond2))))) 6804 Opc = Instruction::Or; 6805 else 6806 continue; 6807 6808 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 6809 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 6810 continue; 6811 6812 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 6813 6814 // Create a new BB. 6815 auto TmpBB = 6816 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 6817 BB.getParent(), BB.getNextNode()); 6818 6819 // Update original basic block by using the first condition directly by the 6820 // branch instruction and removing the no longer needed and/or instruction. 6821 Br1->setCondition(Cond1); 6822 LogicOp->eraseFromParent(); 6823 6824 // Depending on the conditon we have to either replace the true or the false 6825 // successor of the original branch instruction. 6826 if (Opc == Instruction::And) 6827 Br1->setSuccessor(0, TmpBB); 6828 else 6829 Br1->setSuccessor(1, TmpBB); 6830 6831 // Fill in the new basic block. 6832 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 6833 if (auto *I = dyn_cast<Instruction>(Cond2)) { 6834 I->removeFromParent(); 6835 I->insertBefore(Br2); 6836 } 6837 6838 // Update PHI nodes in both successors. The original BB needs to be 6839 // replaced in one successor's PHI nodes, because the branch comes now from 6840 // the newly generated BB (NewBB). In the other successor we need to add one 6841 // incoming edge to the PHI nodes, because both branch instructions target 6842 // now the same successor. Depending on the original branch condition 6843 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 6844 // we perform the correct update for the PHI nodes. 6845 // This doesn't change the successor order of the just created branch 6846 // instruction (or any other instruction). 6847 if (Opc == Instruction::Or) 6848 std::swap(TBB, FBB); 6849 6850 // Replace the old BB with the new BB. 6851 for (auto &I : *TBB) { 6852 PHINode *PN = dyn_cast<PHINode>(&I); 6853 if (!PN) 6854 break; 6855 int i; 6856 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 6857 PN->setIncomingBlock(i, TmpBB); 6858 } 6859 6860 // Add another incoming edge form the new BB. 6861 for (auto &I : *FBB) { 6862 PHINode *PN = dyn_cast<PHINode>(&I); 6863 if (!PN) 6864 break; 6865 auto *Val = PN->getIncomingValueForBlock(&BB); 6866 PN->addIncoming(Val, TmpBB); 6867 } 6868 6869 // Update the branch weights (from SelectionDAGBuilder:: 6870 // FindMergedConditions). 6871 if (Opc == Instruction::Or) { 6872 // Codegen X | Y as: 6873 // BB1: 6874 // jmp_if_X TBB 6875 // jmp TmpBB 6876 // TmpBB: 6877 // jmp_if_Y TBB 6878 // jmp FBB 6879 // 6880 6881 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 6882 // The requirement is that 6883 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 6884 // = TrueProb for orignal BB. 6885 // Assuming the orignal weights are A and B, one choice is to set BB1's 6886 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 6887 // assumes that 6888 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 6889 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 6890 // TmpBB, but the math is more complicated. 6891 uint64_t TrueWeight, FalseWeight; 6892 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6893 uint64_t NewTrueWeight = TrueWeight; 6894 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 6895 scaleWeights(NewTrueWeight, NewFalseWeight); 6896 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6897 .createBranchWeights(TrueWeight, FalseWeight)); 6898 6899 NewTrueWeight = TrueWeight; 6900 NewFalseWeight = 2 * FalseWeight; 6901 scaleWeights(NewTrueWeight, NewFalseWeight); 6902 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6903 .createBranchWeights(TrueWeight, FalseWeight)); 6904 } 6905 } else { 6906 // Codegen X & Y as: 6907 // BB1: 6908 // jmp_if_X TmpBB 6909 // jmp FBB 6910 // TmpBB: 6911 // jmp_if_Y TBB 6912 // jmp FBB 6913 // 6914 // This requires creation of TmpBB after CurBB. 6915 6916 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 6917 // The requirement is that 6918 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 6919 // = FalseProb for orignal BB. 6920 // Assuming the orignal weights are A and B, one choice is to set BB1's 6921 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 6922 // assumes that 6923 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 6924 uint64_t TrueWeight, FalseWeight; 6925 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 6926 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 6927 uint64_t NewFalseWeight = FalseWeight; 6928 scaleWeights(NewTrueWeight, NewFalseWeight); 6929 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 6930 .createBranchWeights(TrueWeight, FalseWeight)); 6931 6932 NewTrueWeight = 2 * TrueWeight; 6933 NewFalseWeight = FalseWeight; 6934 scaleWeights(NewTrueWeight, NewFalseWeight); 6935 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 6936 .createBranchWeights(TrueWeight, FalseWeight)); 6937 } 6938 } 6939 6940 // Note: No point in getting fancy here, since the DT info is never 6941 // available to CodeGenPrepare. 6942 ModifiedDT = true; 6943 6944 MadeChange = true; 6945 6946 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 6947 TmpBB->dump()); 6948 } 6949 return MadeChange; 6950 } 6951