1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PointerIntPair.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/BranchProbabilityInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/CodeGen/ISDOpcodes.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/Config/llvm-config.h" 42 #include "llvm/IR/Argument.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Statepoint.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/IR/ValueMap.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/BlockFrequency.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/MachineValueType.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetMachine.h" 86 #include "llvm/Target/TargetOptions.h" 87 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 88 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 89 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <limits> 95 #include <memory> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 102 #define DEBUG_TYPE "codegenprepare" 103 104 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 105 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 106 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 107 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 108 "sunken Cmps"); 109 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 110 "of sunken Casts"); 111 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 112 "computations were sunk"); 113 STATISTIC(NumMemoryInstsPhiCreated, 114 "Number of phis created when address " 115 "computations were sunk to memory instructions"); 116 STATISTIC(NumMemoryInstsSelectCreated, 117 "Number of select created when address " 118 "computations were sunk to memory instructions"); 119 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 120 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 121 STATISTIC(NumAndsAdded, 122 "Number of and mask instructions added to form ext loads"); 123 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 124 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 125 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 126 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 127 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 128 129 static cl::opt<bool> DisableBranchOpts( 130 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 131 cl::desc("Disable branch optimizations in CodeGenPrepare")); 132 133 static cl::opt<bool> 134 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable GC optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> DisableSelectToBranch( 138 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 139 cl::desc("Disable select to branch conversion.")); 140 141 static cl::opt<bool> AddrSinkUsingGEPs( 142 "addr-sink-using-gep", cl::Hidden, cl::init(true), 143 cl::desc("Address sinking in CGP using GEPs.")); 144 145 static cl::opt<bool> EnableAndCmpSinking( 146 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 147 cl::desc("Enable sinkinig and/cmp into branches.")); 148 149 static cl::opt<bool> DisableStoreExtract( 150 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 151 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 152 153 static cl::opt<bool> StressStoreExtract( 154 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> DisableExtLdPromotion( 158 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 159 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 160 "CodeGenPrepare")); 161 162 static cl::opt<bool> StressExtLdPromotion( 163 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 164 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 165 "optimization in CodeGenPrepare")); 166 167 static cl::opt<bool> DisablePreheaderProtect( 168 "disable-preheader-prot", cl::Hidden, cl::init(false), 169 cl::desc("Disable protection against removing loop preheaders")); 170 171 static cl::opt<bool> ProfileGuidedSectionPrefix( 172 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 173 cl::desc("Use profile info to add section prefix for hot/cold functions")); 174 175 static cl::opt<unsigned> FreqRatioToSkipMerge( 176 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 177 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 178 "(frequency of destination block) is greater than this ratio")); 179 180 static cl::opt<bool> ForceSplitStore( 181 "force-split-store", cl::Hidden, cl::init(false), 182 cl::desc("Force store splitting no matter what the target query says.")); 183 184 static cl::opt<bool> 185 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 186 cl::desc("Enable merging of redundant sexts when one is dominating" 187 " the other."), cl::init(true)); 188 189 static cl::opt<bool> DisableComplexAddrModes( 190 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 191 cl::desc("Disables combining addressing modes with different parts " 192 "in optimizeMemoryInst.")); 193 194 static cl::opt<bool> 195 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 196 cl::desc("Allow creation of Phis in Address sinking.")); 197 198 static cl::opt<bool> 199 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 200 cl::desc("Allow creation of selects in Address sinking.")); 201 202 static cl::opt<bool> AddrSinkCombineBaseReg( 203 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 204 cl::desc("Allow combining of BaseReg field in Address sinking.")); 205 206 static cl::opt<bool> AddrSinkCombineBaseGV( 207 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 208 cl::desc("Allow combining of BaseGV field in Address sinking.")); 209 210 static cl::opt<bool> AddrSinkCombineBaseOffs( 211 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 212 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 213 214 static cl::opt<bool> AddrSinkCombineScaledReg( 215 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 216 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 217 218 static cl::opt<bool> 219 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 220 cl::init(true), 221 cl::desc("Enable splitting large offset of GEP.")); 222 223 namespace { 224 225 enum ExtType { 226 ZeroExtension, // Zero extension has been seen. 227 SignExtension, // Sign extension has been seen. 228 BothExtension // This extension type is used if we saw sext after 229 // ZeroExtension had been set, or if we saw zext after 230 // SignExtension had been set. It makes the type 231 // information of a promoted instruction invalid. 232 }; 233 234 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 235 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 236 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 237 using SExts = SmallVector<Instruction *, 16>; 238 using ValueToSExts = DenseMap<Value *, SExts>; 239 240 class TypePromotionTransaction; 241 242 class CodeGenPrepare : public FunctionPass { 243 const TargetMachine *TM = nullptr; 244 const TargetSubtargetInfo *SubtargetInfo; 245 const TargetLowering *TLI = nullptr; 246 const TargetRegisterInfo *TRI; 247 const TargetTransformInfo *TTI = nullptr; 248 const TargetLibraryInfo *TLInfo; 249 const LoopInfo *LI; 250 std::unique_ptr<BlockFrequencyInfo> BFI; 251 std::unique_ptr<BranchProbabilityInfo> BPI; 252 253 /// As we scan instructions optimizing them, this is the next instruction 254 /// to optimize. Transforms that can invalidate this should update it. 255 BasicBlock::iterator CurInstIterator; 256 257 /// Keeps track of non-local addresses that have been sunk into a block. 258 /// This allows us to avoid inserting duplicate code for blocks with 259 /// multiple load/stores of the same address. The usage of WeakTrackingVH 260 /// enables SunkAddrs to be treated as a cache whose entries can be 261 /// invalidated if a sunken address computation has been erased. 262 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 263 264 /// Keeps track of all instructions inserted for the current function. 265 SetOfInstrs InsertedInsts; 266 267 /// Keeps track of the type of the related instruction before their 268 /// promotion for the current function. 269 InstrToOrigTy PromotedInsts; 270 271 /// Keep track of instructions removed during promotion. 272 SetOfInstrs RemovedInsts; 273 274 /// Keep track of sext chains based on their initial value. 275 DenseMap<Value *, Instruction *> SeenChainsForSExt; 276 277 /// Keep track of GEPs accessing the same data structures such as structs or 278 /// arrays that are candidates to be split later because of their large 279 /// size. 280 MapVector< 281 AssertingVH<Value>, 282 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 283 LargeOffsetGEPMap; 284 285 /// Keep track of new GEP base after splitting the GEPs having large offset. 286 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 287 288 /// Map serial numbers to Large offset GEPs. 289 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 290 291 /// Keep track of SExt promoted. 292 ValueToSExts ValToSExtendedUses; 293 294 /// True if optimizing for size. 295 bool OptSize; 296 297 /// DataLayout for the Function being processed. 298 const DataLayout *DL = nullptr; 299 300 public: 301 static char ID; // Pass identification, replacement for typeid 302 303 CodeGenPrepare() : FunctionPass(ID) { 304 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 305 } 306 307 bool runOnFunction(Function &F) override; 308 309 StringRef getPassName() const override { return "CodeGen Prepare"; } 310 311 void getAnalysisUsage(AnalysisUsage &AU) const override { 312 // FIXME: When we can selectively preserve passes, preserve the domtree. 313 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 314 AU.addRequired<TargetLibraryInfoWrapperPass>(); 315 AU.addRequired<TargetTransformInfoWrapperPass>(); 316 AU.addRequired<LoopInfoWrapperPass>(); 317 } 318 319 private: 320 template <typename F> 321 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 322 // Substituting can cause recursive simplifications, which can invalidate 323 // our iterator. Use a WeakTrackingVH to hold onto it in case this 324 // happens. 325 Value *CurValue = &*CurInstIterator; 326 WeakTrackingVH IterHandle(CurValue); 327 328 f(); 329 330 // If the iterator instruction was recursively deleted, start over at the 331 // start of the block. 332 if (IterHandle != CurValue) { 333 CurInstIterator = BB->begin(); 334 SunkAddrs.clear(); 335 } 336 } 337 338 bool eliminateFallThrough(Function &F); 339 bool eliminateMostlyEmptyBlocks(Function &F); 340 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 341 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 342 void eliminateMostlyEmptyBlock(BasicBlock *BB); 343 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 344 bool isPreheader); 345 bool optimizeBlock(BasicBlock &BB, DominatorTree &DT, bool &ModifiedDT); 346 bool optimizeInst(Instruction *I, DominatorTree &DT, bool &ModifiedDT); 347 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 348 Type *AccessTy, unsigned AddrSpace); 349 bool optimizeInlineAsmInst(CallInst *CS); 350 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 351 bool optimizeExt(Instruction *&I); 352 bool optimizeExtUses(Instruction *I); 353 bool optimizeLoadExt(LoadInst *Load); 354 bool optimizeSelectInst(SelectInst *SI, bool &ModifiedDT); 355 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 356 bool optimizeSwitchInst(SwitchInst *SI); 357 bool optimizeExtractElementInst(Instruction *Inst); 358 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); 359 bool placeDbgValues(Function &F); 360 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 361 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 362 bool tryToPromoteExts(TypePromotionTransaction &TPT, 363 const SmallVectorImpl<Instruction *> &Exts, 364 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 365 unsigned CreatedInstsCost = 0); 366 bool mergeSExts(Function &F, DominatorTree &DT); 367 bool splitLargeGEPOffsets(); 368 bool performAddressTypePromotion( 369 Instruction *&Inst, 370 bool AllowPromotionWithoutCommonHeader, 371 bool HasPromoted, TypePromotionTransaction &TPT, 372 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 373 bool splitBranchCondition(Function &F, bool &ModifiedDT); 374 bool simplifyOffsetableRelocate(Instruction &I); 375 376 bool tryToSinkFreeOperands(Instruction *I); 377 }; 378 379 } // end anonymous namespace 380 381 char CodeGenPrepare::ID = 0; 382 383 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 384 "Optimize for code generation", false, false) 385 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 386 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 387 "Optimize for code generation", false, false) 388 389 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 390 391 bool CodeGenPrepare::runOnFunction(Function &F) { 392 if (skipFunction(F)) 393 return false; 394 395 DL = &F.getParent()->getDataLayout(); 396 397 bool EverMadeChange = false; 398 // Clear per function information. 399 InsertedInsts.clear(); 400 PromotedInsts.clear(); 401 402 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 403 TM = &TPC->getTM<TargetMachine>(); 404 SubtargetInfo = TM->getSubtargetImpl(F); 405 TLI = SubtargetInfo->getTargetLowering(); 406 TRI = SubtargetInfo->getRegisterInfo(); 407 } 408 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 409 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 410 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 411 BPI.reset(new BranchProbabilityInfo(F, *LI)); 412 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 413 OptSize = F.optForSize(); 414 415 ProfileSummaryInfo *PSI = 416 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 417 if (ProfileGuidedSectionPrefix) { 418 if (PSI->isFunctionHotInCallGraph(&F, *BFI)) 419 F.setSectionPrefix(".hot"); 420 else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) 421 F.setSectionPrefix(".unlikely"); 422 } 423 424 /// This optimization identifies DIV instructions that can be 425 /// profitably bypassed and carried out with a shorter, faster divide. 426 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI && 427 TLI->isSlowDivBypassed()) { 428 const DenseMap<unsigned int, unsigned int> &BypassWidths = 429 TLI->getBypassSlowDivWidths(); 430 BasicBlock* BB = &*F.begin(); 431 while (BB != nullptr) { 432 // bypassSlowDivision may create new BBs, but we don't want to reapply the 433 // optimization to those blocks. 434 BasicBlock* Next = BB->getNextNode(); 435 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 436 BB = Next; 437 } 438 } 439 440 // Eliminate blocks that contain only PHI nodes and an 441 // unconditional branch. 442 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 443 444 bool ModifiedDT = false; 445 if (!DisableBranchOpts) 446 EverMadeChange |= splitBranchCondition(F, ModifiedDT); 447 448 // Split some critical edges where one of the sources is an indirect branch, 449 // to help generate sane code for PHIs involving such edges. 450 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 451 452 bool MadeChange = true; 453 while (MadeChange) { 454 MadeChange = false; 455 DominatorTree DT(F); 456 for (Function::iterator I = F.begin(); I != F.end(); ) { 457 BasicBlock *BB = &*I++; 458 bool ModifiedDTOnIteration = false; 459 MadeChange |= optimizeBlock(*BB, DT, ModifiedDTOnIteration); 460 461 // Restart BB iteration if the dominator tree of the Function was changed 462 if (ModifiedDTOnIteration) 463 break; 464 } 465 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 466 MadeChange |= mergeSExts(F, DT); 467 if (!LargeOffsetGEPMap.empty()) 468 MadeChange |= splitLargeGEPOffsets(); 469 470 // Really free removed instructions during promotion. 471 for (Instruction *I : RemovedInsts) 472 I->deleteValue(); 473 474 EverMadeChange |= MadeChange; 475 SeenChainsForSExt.clear(); 476 ValToSExtendedUses.clear(); 477 RemovedInsts.clear(); 478 LargeOffsetGEPMap.clear(); 479 LargeOffsetGEPID.clear(); 480 } 481 482 SunkAddrs.clear(); 483 484 if (!DisableBranchOpts) { 485 MadeChange = false; 486 // Use a set vector to get deterministic iteration order. The order the 487 // blocks are removed may affect whether or not PHI nodes in successors 488 // are removed. 489 SmallSetVector<BasicBlock*, 8> WorkList; 490 for (BasicBlock &BB : F) { 491 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 492 MadeChange |= ConstantFoldTerminator(&BB, true); 493 if (!MadeChange) continue; 494 495 for (SmallVectorImpl<BasicBlock*>::iterator 496 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 497 if (pred_begin(*II) == pred_end(*II)) 498 WorkList.insert(*II); 499 } 500 501 // Delete the dead blocks and any of their dead successors. 502 MadeChange |= !WorkList.empty(); 503 while (!WorkList.empty()) { 504 BasicBlock *BB = WorkList.pop_back_val(); 505 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 506 507 DeleteDeadBlock(BB); 508 509 for (SmallVectorImpl<BasicBlock*>::iterator 510 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 511 if (pred_begin(*II) == pred_end(*II)) 512 WorkList.insert(*II); 513 } 514 515 // Merge pairs of basic blocks with unconditional branches, connected by 516 // a single edge. 517 if (EverMadeChange || MadeChange) 518 MadeChange |= eliminateFallThrough(F); 519 520 EverMadeChange |= MadeChange; 521 } 522 523 if (!DisableGCOpts) { 524 SmallVector<Instruction *, 2> Statepoints; 525 for (BasicBlock &BB : F) 526 for (Instruction &I : BB) 527 if (isStatepoint(I)) 528 Statepoints.push_back(&I); 529 for (auto &I : Statepoints) 530 EverMadeChange |= simplifyOffsetableRelocate(*I); 531 } 532 533 // Do this last to clean up use-before-def scenarios introduced by other 534 // preparatory transforms. 535 EverMadeChange |= placeDbgValues(F); 536 537 return EverMadeChange; 538 } 539 540 /// Merge basic blocks which are connected by a single edge, where one of the 541 /// basic blocks has a single successor pointing to the other basic block, 542 /// which has a single predecessor. 543 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 544 bool Changed = false; 545 // Scan all of the blocks in the function, except for the entry block. 546 // Use a temporary array to avoid iterator being invalidated when 547 // deleting blocks. 548 SmallVector<WeakTrackingVH, 16> Blocks; 549 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 550 Blocks.push_back(&Block); 551 552 for (auto &Block : Blocks) { 553 auto *BB = cast_or_null<BasicBlock>(Block); 554 if (!BB) 555 continue; 556 // If the destination block has a single pred, then this is a trivial 557 // edge, just collapse it. 558 BasicBlock *SinglePred = BB->getSinglePredecessor(); 559 560 // Don't merge if BB's address is taken. 561 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 562 563 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 564 if (Term && !Term->isConditional()) { 565 Changed = true; 566 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 567 568 // Merge BB into SinglePred and delete it. 569 MergeBlockIntoPredecessor(BB); 570 } 571 } 572 return Changed; 573 } 574 575 /// Find a destination block from BB if BB is mergeable empty block. 576 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 577 // If this block doesn't end with an uncond branch, ignore it. 578 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 579 if (!BI || !BI->isUnconditional()) 580 return nullptr; 581 582 // If the instruction before the branch (skipping debug info) isn't a phi 583 // node, then other stuff is happening here. 584 BasicBlock::iterator BBI = BI->getIterator(); 585 if (BBI != BB->begin()) { 586 --BBI; 587 while (isa<DbgInfoIntrinsic>(BBI)) { 588 if (BBI == BB->begin()) 589 break; 590 --BBI; 591 } 592 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 593 return nullptr; 594 } 595 596 // Do not break infinite loops. 597 BasicBlock *DestBB = BI->getSuccessor(0); 598 if (DestBB == BB) 599 return nullptr; 600 601 if (!canMergeBlocks(BB, DestBB)) 602 DestBB = nullptr; 603 604 return DestBB; 605 } 606 607 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 608 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 609 /// edges in ways that are non-optimal for isel. Start by eliminating these 610 /// blocks so we can split them the way we want them. 611 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 612 SmallPtrSet<BasicBlock *, 16> Preheaders; 613 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 614 while (!LoopList.empty()) { 615 Loop *L = LoopList.pop_back_val(); 616 LoopList.insert(LoopList.end(), L->begin(), L->end()); 617 if (BasicBlock *Preheader = L->getLoopPreheader()) 618 Preheaders.insert(Preheader); 619 } 620 621 bool MadeChange = false; 622 // Copy blocks into a temporary array to avoid iterator invalidation issues 623 // as we remove them. 624 // Note that this intentionally skips the entry block. 625 SmallVector<WeakTrackingVH, 16> Blocks; 626 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 627 Blocks.push_back(&Block); 628 629 for (auto &Block : Blocks) { 630 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 631 if (!BB) 632 continue; 633 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 634 if (!DestBB || 635 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 636 continue; 637 638 eliminateMostlyEmptyBlock(BB); 639 MadeChange = true; 640 } 641 return MadeChange; 642 } 643 644 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 645 BasicBlock *DestBB, 646 bool isPreheader) { 647 // Do not delete loop preheaders if doing so would create a critical edge. 648 // Loop preheaders can be good locations to spill registers. If the 649 // preheader is deleted and we create a critical edge, registers may be 650 // spilled in the loop body instead. 651 if (!DisablePreheaderProtect && isPreheader && 652 !(BB->getSinglePredecessor() && 653 BB->getSinglePredecessor()->getSingleSuccessor())) 654 return false; 655 656 // Skip merging if the block's successor is also a successor to any callbr 657 // that leads to this block. 658 // FIXME: Is this really needed? Is this a correctness issue? 659 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 660 if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator())) 661 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 662 if (DestBB == CBI->getSuccessor(i)) 663 return false; 664 } 665 666 // Try to skip merging if the unique predecessor of BB is terminated by a 667 // switch or indirect branch instruction, and BB is used as an incoming block 668 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 669 // add COPY instructions in the predecessor of BB instead of BB (if it is not 670 // merged). Note that the critical edge created by merging such blocks wont be 671 // split in MachineSink because the jump table is not analyzable. By keeping 672 // such empty block (BB), ISel will place COPY instructions in BB, not in the 673 // predecessor of BB. 674 BasicBlock *Pred = BB->getUniquePredecessor(); 675 if (!Pred || 676 !(isa<SwitchInst>(Pred->getTerminator()) || 677 isa<IndirectBrInst>(Pred->getTerminator()))) 678 return true; 679 680 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 681 return true; 682 683 // We use a simple cost heuristic which determine skipping merging is 684 // profitable if the cost of skipping merging is less than the cost of 685 // merging : Cost(skipping merging) < Cost(merging BB), where the 686 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 687 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 688 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 689 // Freq(Pred) / Freq(BB) > 2. 690 // Note that if there are multiple empty blocks sharing the same incoming 691 // value for the PHIs in the DestBB, we consider them together. In such 692 // case, Cost(merging BB) will be the sum of their frequencies. 693 694 if (!isa<PHINode>(DestBB->begin())) 695 return true; 696 697 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 698 699 // Find all other incoming blocks from which incoming values of all PHIs in 700 // DestBB are the same as the ones from BB. 701 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 702 ++PI) { 703 BasicBlock *DestBBPred = *PI; 704 if (DestBBPred == BB) 705 continue; 706 707 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 708 return DestPN.getIncomingValueForBlock(BB) == 709 DestPN.getIncomingValueForBlock(DestBBPred); 710 })) 711 SameIncomingValueBBs.insert(DestBBPred); 712 } 713 714 // See if all BB's incoming values are same as the value from Pred. In this 715 // case, no reason to skip merging because COPYs are expected to be place in 716 // Pred already. 717 if (SameIncomingValueBBs.count(Pred)) 718 return true; 719 720 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 721 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 722 723 for (auto SameValueBB : SameIncomingValueBBs) 724 if (SameValueBB->getUniquePredecessor() == Pred && 725 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 726 BBFreq += BFI->getBlockFreq(SameValueBB); 727 728 return PredFreq.getFrequency() <= 729 BBFreq.getFrequency() * FreqRatioToSkipMerge; 730 } 731 732 /// Return true if we can merge BB into DestBB if there is a single 733 /// unconditional branch between them, and BB contains no other non-phi 734 /// instructions. 735 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 736 const BasicBlock *DestBB) const { 737 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 738 // the successor. If there are more complex condition (e.g. preheaders), 739 // don't mess around with them. 740 for (const PHINode &PN : BB->phis()) { 741 for (const User *U : PN.users()) { 742 const Instruction *UI = cast<Instruction>(U); 743 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 744 return false; 745 // If User is inside DestBB block and it is a PHINode then check 746 // incoming value. If incoming value is not from BB then this is 747 // a complex condition (e.g. preheaders) we want to avoid here. 748 if (UI->getParent() == DestBB) { 749 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 750 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 751 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 752 if (Insn && Insn->getParent() == BB && 753 Insn->getParent() != UPN->getIncomingBlock(I)) 754 return false; 755 } 756 } 757 } 758 } 759 760 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 761 // and DestBB may have conflicting incoming values for the block. If so, we 762 // can't merge the block. 763 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 764 if (!DestBBPN) return true; // no conflict. 765 766 // Collect the preds of BB. 767 SmallPtrSet<const BasicBlock*, 16> BBPreds; 768 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 769 // It is faster to get preds from a PHI than with pred_iterator. 770 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 771 BBPreds.insert(BBPN->getIncomingBlock(i)); 772 } else { 773 BBPreds.insert(pred_begin(BB), pred_end(BB)); 774 } 775 776 // Walk the preds of DestBB. 777 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 778 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 779 if (BBPreds.count(Pred)) { // Common predecessor? 780 for (const PHINode &PN : DestBB->phis()) { 781 const Value *V1 = PN.getIncomingValueForBlock(Pred); 782 const Value *V2 = PN.getIncomingValueForBlock(BB); 783 784 // If V2 is a phi node in BB, look up what the mapped value will be. 785 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 786 if (V2PN->getParent() == BB) 787 V2 = V2PN->getIncomingValueForBlock(Pred); 788 789 // If there is a conflict, bail out. 790 if (V1 != V2) return false; 791 } 792 } 793 } 794 795 return true; 796 } 797 798 /// Eliminate a basic block that has only phi's and an unconditional branch in 799 /// it. 800 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 801 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 802 BasicBlock *DestBB = BI->getSuccessor(0); 803 804 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 805 << *BB << *DestBB); 806 807 // If the destination block has a single pred, then this is a trivial edge, 808 // just collapse it. 809 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 810 if (SinglePred != DestBB) { 811 assert(SinglePred == BB && 812 "Single predecessor not the same as predecessor"); 813 // Merge DestBB into SinglePred/BB and delete it. 814 MergeBlockIntoPredecessor(DestBB); 815 // Note: BB(=SinglePred) will not be deleted on this path. 816 // DestBB(=its single successor) is the one that was deleted. 817 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 818 return; 819 } 820 } 821 822 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 823 // to handle the new incoming edges it is about to have. 824 for (PHINode &PN : DestBB->phis()) { 825 // Remove the incoming value for BB, and remember it. 826 Value *InVal = PN.removeIncomingValue(BB, false); 827 828 // Two options: either the InVal is a phi node defined in BB or it is some 829 // value that dominates BB. 830 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 831 if (InValPhi && InValPhi->getParent() == BB) { 832 // Add all of the input values of the input PHI as inputs of this phi. 833 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 834 PN.addIncoming(InValPhi->getIncomingValue(i), 835 InValPhi->getIncomingBlock(i)); 836 } else { 837 // Otherwise, add one instance of the dominating value for each edge that 838 // we will be adding. 839 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 840 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 841 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 842 } else { 843 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 844 PN.addIncoming(InVal, *PI); 845 } 846 } 847 } 848 849 // The PHIs are now updated, change everything that refers to BB to use 850 // DestBB and remove BB. 851 BB->replaceAllUsesWith(DestBB); 852 BB->eraseFromParent(); 853 ++NumBlocksElim; 854 855 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 856 } 857 858 // Computes a map of base pointer relocation instructions to corresponding 859 // derived pointer relocation instructions given a vector of all relocate calls 860 static void computeBaseDerivedRelocateMap( 861 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 862 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 863 &RelocateInstMap) { 864 // Collect information in two maps: one primarily for locating the base object 865 // while filling the second map; the second map is the final structure holding 866 // a mapping between Base and corresponding Derived relocate calls 867 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 868 for (auto *ThisRelocate : AllRelocateCalls) { 869 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 870 ThisRelocate->getDerivedPtrIndex()); 871 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 872 } 873 for (auto &Item : RelocateIdxMap) { 874 std::pair<unsigned, unsigned> Key = Item.first; 875 if (Key.first == Key.second) 876 // Base relocation: nothing to insert 877 continue; 878 879 GCRelocateInst *I = Item.second; 880 auto BaseKey = std::make_pair(Key.first, Key.first); 881 882 // We're iterating over RelocateIdxMap so we cannot modify it. 883 auto MaybeBase = RelocateIdxMap.find(BaseKey); 884 if (MaybeBase == RelocateIdxMap.end()) 885 // TODO: We might want to insert a new base object relocate and gep off 886 // that, if there are enough derived object relocates. 887 continue; 888 889 RelocateInstMap[MaybeBase->second].push_back(I); 890 } 891 } 892 893 // Accepts a GEP and extracts the operands into a vector provided they're all 894 // small integer constants 895 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 896 SmallVectorImpl<Value *> &OffsetV) { 897 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 898 // Only accept small constant integer operands 899 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 900 if (!Op || Op->getZExtValue() > 20) 901 return false; 902 } 903 904 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 905 OffsetV.push_back(GEP->getOperand(i)); 906 return true; 907 } 908 909 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 910 // replace, computes a replacement, and affects it. 911 static bool 912 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 913 const SmallVectorImpl<GCRelocateInst *> &Targets) { 914 bool MadeChange = false; 915 // We must ensure the relocation of derived pointer is defined after 916 // relocation of base pointer. If we find a relocation corresponding to base 917 // defined earlier than relocation of base then we move relocation of base 918 // right before found relocation. We consider only relocation in the same 919 // basic block as relocation of base. Relocations from other basic block will 920 // be skipped by optimization and we do not care about them. 921 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 922 &*R != RelocatedBase; ++R) 923 if (auto RI = dyn_cast<GCRelocateInst>(R)) 924 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 925 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 926 RelocatedBase->moveBefore(RI); 927 break; 928 } 929 930 for (GCRelocateInst *ToReplace : Targets) { 931 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 932 "Not relocating a derived object of the original base object"); 933 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 934 // A duplicate relocate call. TODO: coalesce duplicates. 935 continue; 936 } 937 938 if (RelocatedBase->getParent() != ToReplace->getParent()) { 939 // Base and derived relocates are in different basic blocks. 940 // In this case transform is only valid when base dominates derived 941 // relocate. However it would be too expensive to check dominance 942 // for each such relocate, so we skip the whole transformation. 943 continue; 944 } 945 946 Value *Base = ToReplace->getBasePtr(); 947 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 948 if (!Derived || Derived->getPointerOperand() != Base) 949 continue; 950 951 SmallVector<Value *, 2> OffsetV; 952 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 953 continue; 954 955 // Create a Builder and replace the target callsite with a gep 956 assert(RelocatedBase->getNextNode() && 957 "Should always have one since it's not a terminator"); 958 959 // Insert after RelocatedBase 960 IRBuilder<> Builder(RelocatedBase->getNextNode()); 961 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 962 963 // If gc_relocate does not match the actual type, cast it to the right type. 964 // In theory, there must be a bitcast after gc_relocate if the type does not 965 // match, and we should reuse it to get the derived pointer. But it could be 966 // cases like this: 967 // bb1: 968 // ... 969 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 970 // br label %merge 971 // 972 // bb2: 973 // ... 974 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 975 // br label %merge 976 // 977 // merge: 978 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 979 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 980 // 981 // In this case, we can not find the bitcast any more. So we insert a new bitcast 982 // no matter there is already one or not. In this way, we can handle all cases, and 983 // the extra bitcast should be optimized away in later passes. 984 Value *ActualRelocatedBase = RelocatedBase; 985 if (RelocatedBase->getType() != Base->getType()) { 986 ActualRelocatedBase = 987 Builder.CreateBitCast(RelocatedBase, Base->getType()); 988 } 989 Value *Replacement = Builder.CreateGEP( 990 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 991 Replacement->takeName(ToReplace); 992 // If the newly generated derived pointer's type does not match the original derived 993 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 994 Value *ActualReplacement = Replacement; 995 if (Replacement->getType() != ToReplace->getType()) { 996 ActualReplacement = 997 Builder.CreateBitCast(Replacement, ToReplace->getType()); 998 } 999 ToReplace->replaceAllUsesWith(ActualReplacement); 1000 ToReplace->eraseFromParent(); 1001 1002 MadeChange = true; 1003 } 1004 return MadeChange; 1005 } 1006 1007 // Turns this: 1008 // 1009 // %base = ... 1010 // %ptr = gep %base + 15 1011 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1012 // %base' = relocate(%tok, i32 4, i32 4) 1013 // %ptr' = relocate(%tok, i32 4, i32 5) 1014 // %val = load %ptr' 1015 // 1016 // into this: 1017 // 1018 // %base = ... 1019 // %ptr = gep %base + 15 1020 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1021 // %base' = gc.relocate(%tok, i32 4, i32 4) 1022 // %ptr' = gep %base' + 15 1023 // %val = load %ptr' 1024 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1025 bool MadeChange = false; 1026 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1027 1028 for (auto *U : I.users()) 1029 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1030 // Collect all the relocate calls associated with a statepoint 1031 AllRelocateCalls.push_back(Relocate); 1032 1033 // We need atleast one base pointer relocation + one derived pointer 1034 // relocation to mangle 1035 if (AllRelocateCalls.size() < 2) 1036 return false; 1037 1038 // RelocateInstMap is a mapping from the base relocate instruction to the 1039 // corresponding derived relocate instructions 1040 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1041 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1042 if (RelocateInstMap.empty()) 1043 return false; 1044 1045 for (auto &Item : RelocateInstMap) 1046 // Item.first is the RelocatedBase to offset against 1047 // Item.second is the vector of Targets to replace 1048 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1049 return MadeChange; 1050 } 1051 1052 /// Sink the specified cast instruction into its user blocks. 1053 static bool SinkCast(CastInst *CI) { 1054 BasicBlock *DefBB = CI->getParent(); 1055 1056 /// InsertedCasts - Only insert a cast in each block once. 1057 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1058 1059 bool MadeChange = false; 1060 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1061 UI != E; ) { 1062 Use &TheUse = UI.getUse(); 1063 Instruction *User = cast<Instruction>(*UI); 1064 1065 // Figure out which BB this cast is used in. For PHI's this is the 1066 // appropriate predecessor block. 1067 BasicBlock *UserBB = User->getParent(); 1068 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1069 UserBB = PN->getIncomingBlock(TheUse); 1070 } 1071 1072 // Preincrement use iterator so we don't invalidate it. 1073 ++UI; 1074 1075 // The first insertion point of a block containing an EH pad is after the 1076 // pad. If the pad is the user, we cannot sink the cast past the pad. 1077 if (User->isEHPad()) 1078 continue; 1079 1080 // If the block selected to receive the cast is an EH pad that does not 1081 // allow non-PHI instructions before the terminator, we can't sink the 1082 // cast. 1083 if (UserBB->getTerminator()->isEHPad()) 1084 continue; 1085 1086 // If this user is in the same block as the cast, don't change the cast. 1087 if (UserBB == DefBB) continue; 1088 1089 // If we have already inserted a cast into this block, use it. 1090 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1091 1092 if (!InsertedCast) { 1093 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1094 assert(InsertPt != UserBB->end()); 1095 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1096 CI->getType(), "", &*InsertPt); 1097 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1098 } 1099 1100 // Replace a use of the cast with a use of the new cast. 1101 TheUse = InsertedCast; 1102 MadeChange = true; 1103 ++NumCastUses; 1104 } 1105 1106 // If we removed all uses, nuke the cast. 1107 if (CI->use_empty()) { 1108 salvageDebugInfo(*CI); 1109 CI->eraseFromParent(); 1110 MadeChange = true; 1111 } 1112 1113 return MadeChange; 1114 } 1115 1116 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1117 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1118 /// reduce the number of virtual registers that must be created and coalesced. 1119 /// 1120 /// Return true if any changes are made. 1121 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1122 const DataLayout &DL) { 1123 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1124 // than sinking only nop casts, but is helpful on some platforms. 1125 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1126 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1127 ASC->getDestAddressSpace())) 1128 return false; 1129 } 1130 1131 // If this is a noop copy, 1132 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1133 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1134 1135 // This is an fp<->int conversion? 1136 if (SrcVT.isInteger() != DstVT.isInteger()) 1137 return false; 1138 1139 // If this is an extension, it will be a zero or sign extension, which 1140 // isn't a noop. 1141 if (SrcVT.bitsLT(DstVT)) return false; 1142 1143 // If these values will be promoted, find out what they will be promoted 1144 // to. This helps us consider truncates on PPC as noop copies when they 1145 // are. 1146 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1147 TargetLowering::TypePromoteInteger) 1148 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1149 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1150 TargetLowering::TypePromoteInteger) 1151 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1152 1153 // If, after promotion, these are the same types, this is a noop copy. 1154 if (SrcVT != DstVT) 1155 return false; 1156 1157 return SinkCast(CI); 1158 } 1159 1160 static bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, CmpInst *Cmp, 1161 Intrinsic::ID IID, DominatorTree &DT) { 1162 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1163 Value *Arg0 = BO->getOperand(0); 1164 Value *Arg1 = BO->getOperand(1); 1165 if (BO->getOpcode() == Instruction::Add && 1166 IID == Intrinsic::usub_with_overflow) { 1167 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1168 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1169 } 1170 1171 Instruction *InsertPt; 1172 if (BO->hasOneUse() && BO->user_back() == Cmp) { 1173 // If the math is only used by the compare, insert at the compare to keep 1174 // the condition in the same block as its users. (CGP aggressively sinks 1175 // compares to help out SDAG.) 1176 InsertPt = Cmp; 1177 } else { 1178 // The math and compare may be independent instructions. Check dominance to 1179 // determine the insertion point for the intrinsic. 1180 bool MathDominates = DT.dominates(BO, Cmp); 1181 if (!MathDominates && !DT.dominates(Cmp, BO)) 1182 return false; 1183 1184 // Check that the insertion doesn't create a value that is live across more 1185 // than two blocks, so to minimise the increase in register pressure. 1186 if (BO->getParent() != Cmp->getParent()) { 1187 BasicBlock *Dominator = MathDominates ? BO->getParent() : Cmp->getParent(); 1188 BasicBlock *Dominated = MathDominates ? Cmp->getParent() : BO->getParent(); 1189 auto Successors = successors(Dominator); 1190 if (llvm::find(Successors, Dominated) == Successors.end()) 1191 return false; 1192 } 1193 1194 InsertPt = MathDominates ? cast<Instruction>(BO) : cast<Instruction>(Cmp); 1195 } 1196 1197 IRBuilder<> Builder(InsertPt); 1198 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1199 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1200 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1201 BO->replaceAllUsesWith(Math); 1202 Cmp->replaceAllUsesWith(OV); 1203 BO->eraseFromParent(); 1204 Cmp->eraseFromParent(); 1205 return true; 1206 } 1207 1208 /// Match special-case patterns that check for unsigned add overflow. 1209 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, 1210 BinaryOperator *&Add) { 1211 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) 1212 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) 1213 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1214 1215 // We are not expecting non-canonical/degenerate code. Just bail out. 1216 if (isa<Constant>(A)) 1217 return false; 1218 1219 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1220 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) 1221 B = ConstantInt::get(B->getType(), 1); 1222 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) 1223 B = ConstantInt::get(B->getType(), -1); 1224 else 1225 return false; 1226 1227 // Check the users of the variable operand of the compare looking for an add 1228 // with the adjusted constant. 1229 for (User *U : A->users()) { 1230 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { 1231 Add = cast<BinaryOperator>(U); 1232 return true; 1233 } 1234 } 1235 return false; 1236 } 1237 1238 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1239 /// intrinsic. Return true if any changes were made. 1240 static bool combineToUAddWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1241 const DataLayout &DL, DominatorTree &DT, 1242 bool &ModifiedDT) { 1243 Value *A, *B; 1244 BinaryOperator *Add; 1245 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) 1246 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) 1247 return false; 1248 1249 if (!TLI.shouldFormOverflowOp(ISD::UADDO, 1250 TLI.getValueType(DL, Add->getType()))) 1251 return false; 1252 1253 // We don't want to move around uses of condition values this late, so we 1254 // check if it is legal to create the call to the intrinsic in the basic 1255 // block containing the icmp. 1256 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1257 return false; 1258 1259 if (!replaceMathCmpWithIntrinsic(Add, Cmp, Intrinsic::uadd_with_overflow, DT)) 1260 return false; 1261 1262 // Reset callers - do not crash by iterating over a dead instruction. 1263 ModifiedDT = true; 1264 return true; 1265 } 1266 1267 static bool combineToUSubWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1268 const DataLayout &DL, DominatorTree &DT, 1269 bool &ModifiedDT) { 1270 // We are not expecting non-canonical/degenerate code. Just bail out. 1271 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1272 if (isa<Constant>(A) && isa<Constant>(B)) 1273 return false; 1274 1275 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1276 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1277 if (Pred == ICmpInst::ICMP_UGT) { 1278 std::swap(A, B); 1279 Pred = ICmpInst::ICMP_ULT; 1280 } 1281 // Convert special-case: (A == 0) is the same as (A u< 1). 1282 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1283 B = ConstantInt::get(B->getType(), 1); 1284 Pred = ICmpInst::ICMP_ULT; 1285 } 1286 // Convert special-case: (A != 0) is the same as (0 u< A). 1287 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { 1288 std::swap(A, B); 1289 Pred = ICmpInst::ICMP_ULT; 1290 } 1291 if (Pred != ICmpInst::ICMP_ULT) 1292 return false; 1293 1294 // Walk the users of a variable operand of a compare looking for a subtract or 1295 // add with that same operand. Also match the 2nd operand of the compare to 1296 // the add/sub, but that may be a negated constant operand of an add. 1297 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1298 BinaryOperator *Sub = nullptr; 1299 for (User *U : CmpVariableOperand->users()) { 1300 // A - B, A u< B --> usubo(A, B) 1301 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1302 Sub = cast<BinaryOperator>(U); 1303 break; 1304 } 1305 1306 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1307 const APInt *CmpC, *AddC; 1308 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1309 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1310 Sub = cast<BinaryOperator>(U); 1311 break; 1312 } 1313 } 1314 if (!Sub) 1315 return false; 1316 1317 if (!TLI.shouldFormOverflowOp(ISD::USUBO, 1318 TLI.getValueType(DL, Sub->getType()))) 1319 return false; 1320 1321 if (!replaceMathCmpWithIntrinsic(Sub, Cmp, Intrinsic::usub_with_overflow, DT)) 1322 return false; 1323 1324 // Reset callers - do not crash by iterating over a dead instruction. 1325 ModifiedDT = true; 1326 return true; 1327 } 1328 1329 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1330 /// registers that must be created and coalesced. This is a clear win except on 1331 /// targets with multiple condition code registers (PowerPC), where it might 1332 /// lose; some adjustment may be wanted there. 1333 /// 1334 /// Return true if any changes are made. 1335 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1336 if (TLI.hasMultipleConditionRegisters()) 1337 return false; 1338 1339 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1340 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1341 return false; 1342 1343 // Only insert a cmp in each block once. 1344 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1345 1346 bool MadeChange = false; 1347 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1348 UI != E; ) { 1349 Use &TheUse = UI.getUse(); 1350 Instruction *User = cast<Instruction>(*UI); 1351 1352 // Preincrement use iterator so we don't invalidate it. 1353 ++UI; 1354 1355 // Don't bother for PHI nodes. 1356 if (isa<PHINode>(User)) 1357 continue; 1358 1359 // Figure out which BB this cmp is used in. 1360 BasicBlock *UserBB = User->getParent(); 1361 BasicBlock *DefBB = Cmp->getParent(); 1362 1363 // If this user is in the same block as the cmp, don't change the cmp. 1364 if (UserBB == DefBB) continue; 1365 1366 // If we have already inserted a cmp into this block, use it. 1367 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1368 1369 if (!InsertedCmp) { 1370 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1371 assert(InsertPt != UserBB->end()); 1372 InsertedCmp = 1373 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1374 Cmp->getOperand(0), Cmp->getOperand(1), "", 1375 &*InsertPt); 1376 // Propagate the debug info. 1377 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1378 } 1379 1380 // Replace a use of the cmp with a use of the new cmp. 1381 TheUse = InsertedCmp; 1382 MadeChange = true; 1383 ++NumCmpUses; 1384 } 1385 1386 // If we removed all uses, nuke the cmp. 1387 if (Cmp->use_empty()) { 1388 Cmp->eraseFromParent(); 1389 MadeChange = true; 1390 } 1391 1392 return MadeChange; 1393 } 1394 1395 static bool optimizeCmp(CmpInst *Cmp, const TargetLowering &TLI, 1396 const DataLayout &DL, DominatorTree &DT, 1397 bool &ModifiedDT) { 1398 if (sinkCmpExpression(Cmp, TLI)) 1399 return true; 1400 1401 if (combineToUAddWithOverflow(Cmp, TLI, DL, DT, ModifiedDT)) 1402 return true; 1403 1404 if (combineToUSubWithOverflow(Cmp, TLI, DL, DT, ModifiedDT)) 1405 return true; 1406 1407 return false; 1408 } 1409 1410 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1411 /// used in a compare to allow isel to generate better code for targets where 1412 /// this operation can be combined. 1413 /// 1414 /// Return true if any changes are made. 1415 static bool sinkAndCmp0Expression(Instruction *AndI, 1416 const TargetLowering &TLI, 1417 SetOfInstrs &InsertedInsts) { 1418 // Double-check that we're not trying to optimize an instruction that was 1419 // already optimized by some other part of this pass. 1420 assert(!InsertedInsts.count(AndI) && 1421 "Attempting to optimize already optimized and instruction"); 1422 (void) InsertedInsts; 1423 1424 // Nothing to do for single use in same basic block. 1425 if (AndI->hasOneUse() && 1426 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1427 return false; 1428 1429 // Try to avoid cases where sinking/duplicating is likely to increase register 1430 // pressure. 1431 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1432 !isa<ConstantInt>(AndI->getOperand(1)) && 1433 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1434 return false; 1435 1436 for (auto *U : AndI->users()) { 1437 Instruction *User = cast<Instruction>(U); 1438 1439 // Only sink 'and' feeding icmp with 0. 1440 if (!isa<ICmpInst>(User)) 1441 return false; 1442 1443 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1444 if (!CmpC || !CmpC->isZero()) 1445 return false; 1446 } 1447 1448 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1449 return false; 1450 1451 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1452 LLVM_DEBUG(AndI->getParent()->dump()); 1453 1454 // Push the 'and' into the same block as the icmp 0. There should only be 1455 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1456 // others, so we don't need to keep track of which BBs we insert into. 1457 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1458 UI != E; ) { 1459 Use &TheUse = UI.getUse(); 1460 Instruction *User = cast<Instruction>(*UI); 1461 1462 // Preincrement use iterator so we don't invalidate it. 1463 ++UI; 1464 1465 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1466 1467 // Keep the 'and' in the same place if the use is already in the same block. 1468 Instruction *InsertPt = 1469 User->getParent() == AndI->getParent() ? AndI : User; 1470 Instruction *InsertedAnd = 1471 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1472 AndI->getOperand(1), "", InsertPt); 1473 // Propagate the debug info. 1474 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1475 1476 // Replace a use of the 'and' with a use of the new 'and'. 1477 TheUse = InsertedAnd; 1478 ++NumAndUses; 1479 LLVM_DEBUG(User->getParent()->dump()); 1480 } 1481 1482 // We removed all uses, nuke the and. 1483 AndI->eraseFromParent(); 1484 return true; 1485 } 1486 1487 /// Check if the candidates could be combined with a shift instruction, which 1488 /// includes: 1489 /// 1. Truncate instruction 1490 /// 2. And instruction and the imm is a mask of the low bits: 1491 /// imm & (imm+1) == 0 1492 static bool isExtractBitsCandidateUse(Instruction *User) { 1493 if (!isa<TruncInst>(User)) { 1494 if (User->getOpcode() != Instruction::And || 1495 !isa<ConstantInt>(User->getOperand(1))) 1496 return false; 1497 1498 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1499 1500 if ((Cimm & (Cimm + 1)).getBoolValue()) 1501 return false; 1502 } 1503 return true; 1504 } 1505 1506 /// Sink both shift and truncate instruction to the use of truncate's BB. 1507 static bool 1508 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1509 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1510 const TargetLowering &TLI, const DataLayout &DL) { 1511 BasicBlock *UserBB = User->getParent(); 1512 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1513 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1514 bool MadeChange = false; 1515 1516 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1517 TruncE = TruncI->user_end(); 1518 TruncUI != TruncE;) { 1519 1520 Use &TruncTheUse = TruncUI.getUse(); 1521 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1522 // Preincrement use iterator so we don't invalidate it. 1523 1524 ++TruncUI; 1525 1526 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1527 if (!ISDOpcode) 1528 continue; 1529 1530 // If the use is actually a legal node, there will not be an 1531 // implicit truncate. 1532 // FIXME: always querying the result type is just an 1533 // approximation; some nodes' legality is determined by the 1534 // operand or other means. There's no good way to find out though. 1535 if (TLI.isOperationLegalOrCustom( 1536 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1537 continue; 1538 1539 // Don't bother for PHI nodes. 1540 if (isa<PHINode>(TruncUser)) 1541 continue; 1542 1543 BasicBlock *TruncUserBB = TruncUser->getParent(); 1544 1545 if (UserBB == TruncUserBB) 1546 continue; 1547 1548 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1549 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1550 1551 if (!InsertedShift && !InsertedTrunc) { 1552 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1553 assert(InsertPt != TruncUserBB->end()); 1554 // Sink the shift 1555 if (ShiftI->getOpcode() == Instruction::AShr) 1556 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1557 "", &*InsertPt); 1558 else 1559 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1560 "", &*InsertPt); 1561 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1562 1563 // Sink the trunc 1564 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1565 TruncInsertPt++; 1566 assert(TruncInsertPt != TruncUserBB->end()); 1567 1568 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1569 TruncI->getType(), "", &*TruncInsertPt); 1570 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1571 1572 MadeChange = true; 1573 1574 TruncTheUse = InsertedTrunc; 1575 } 1576 } 1577 return MadeChange; 1578 } 1579 1580 /// Sink the shift *right* instruction into user blocks if the uses could 1581 /// potentially be combined with this shift instruction and generate BitExtract 1582 /// instruction. It will only be applied if the architecture supports BitExtract 1583 /// instruction. Here is an example: 1584 /// BB1: 1585 /// %x.extract.shift = lshr i64 %arg1, 32 1586 /// BB2: 1587 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1588 /// ==> 1589 /// 1590 /// BB2: 1591 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1592 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1593 /// 1594 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1595 /// instruction. 1596 /// Return true if any changes are made. 1597 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1598 const TargetLowering &TLI, 1599 const DataLayout &DL) { 1600 BasicBlock *DefBB = ShiftI->getParent(); 1601 1602 /// Only insert instructions in each block once. 1603 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1604 1605 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1606 1607 bool MadeChange = false; 1608 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1609 UI != E;) { 1610 Use &TheUse = UI.getUse(); 1611 Instruction *User = cast<Instruction>(*UI); 1612 // Preincrement use iterator so we don't invalidate it. 1613 ++UI; 1614 1615 // Don't bother for PHI nodes. 1616 if (isa<PHINode>(User)) 1617 continue; 1618 1619 if (!isExtractBitsCandidateUse(User)) 1620 continue; 1621 1622 BasicBlock *UserBB = User->getParent(); 1623 1624 if (UserBB == DefBB) { 1625 // If the shift and truncate instruction are in the same BB. The use of 1626 // the truncate(TruncUse) may still introduce another truncate if not 1627 // legal. In this case, we would like to sink both shift and truncate 1628 // instruction to the BB of TruncUse. 1629 // for example: 1630 // BB1: 1631 // i64 shift.result = lshr i64 opnd, imm 1632 // trunc.result = trunc shift.result to i16 1633 // 1634 // BB2: 1635 // ----> We will have an implicit truncate here if the architecture does 1636 // not have i16 compare. 1637 // cmp i16 trunc.result, opnd2 1638 // 1639 if (isa<TruncInst>(User) && shiftIsLegal 1640 // If the type of the truncate is legal, no truncate will be 1641 // introduced in other basic blocks. 1642 && 1643 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1644 MadeChange = 1645 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1646 1647 continue; 1648 } 1649 // If we have already inserted a shift into this block, use it. 1650 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1651 1652 if (!InsertedShift) { 1653 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1654 assert(InsertPt != UserBB->end()); 1655 1656 if (ShiftI->getOpcode() == Instruction::AShr) 1657 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1658 "", &*InsertPt); 1659 else 1660 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1661 "", &*InsertPt); 1662 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1663 1664 MadeChange = true; 1665 } 1666 1667 // Replace a use of the shift with a use of the new shift. 1668 TheUse = InsertedShift; 1669 } 1670 1671 // If we removed all uses, nuke the shift. 1672 if (ShiftI->use_empty()) { 1673 salvageDebugInfo(*ShiftI); 1674 ShiftI->eraseFromParent(); 1675 } 1676 1677 return MadeChange; 1678 } 1679 1680 /// If counting leading or trailing zeros is an expensive operation and a zero 1681 /// input is defined, add a check for zero to avoid calling the intrinsic. 1682 /// 1683 /// We want to transform: 1684 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1685 /// 1686 /// into: 1687 /// entry: 1688 /// %cmpz = icmp eq i64 %A, 0 1689 /// br i1 %cmpz, label %cond.end, label %cond.false 1690 /// cond.false: 1691 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1692 /// br label %cond.end 1693 /// cond.end: 1694 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1695 /// 1696 /// If the transform is performed, return true and set ModifiedDT to true. 1697 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1698 const TargetLowering *TLI, 1699 const DataLayout *DL, 1700 bool &ModifiedDT) { 1701 if (!TLI || !DL) 1702 return false; 1703 1704 // If a zero input is undefined, it doesn't make sense to despeculate that. 1705 if (match(CountZeros->getOperand(1), m_One())) 1706 return false; 1707 1708 // If it's cheap to speculate, there's nothing to do. 1709 auto IntrinsicID = CountZeros->getIntrinsicID(); 1710 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1711 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1712 return false; 1713 1714 // Only handle legal scalar cases. Anything else requires too much work. 1715 Type *Ty = CountZeros->getType(); 1716 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1717 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1718 return false; 1719 1720 // The intrinsic will be sunk behind a compare against zero and branch. 1721 BasicBlock *StartBlock = CountZeros->getParent(); 1722 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1723 1724 // Create another block after the count zero intrinsic. A PHI will be added 1725 // in this block to select the result of the intrinsic or the bit-width 1726 // constant if the input to the intrinsic is zero. 1727 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1728 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1729 1730 // Set up a builder to create a compare, conditional branch, and PHI. 1731 IRBuilder<> Builder(CountZeros->getContext()); 1732 Builder.SetInsertPoint(StartBlock->getTerminator()); 1733 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1734 1735 // Replace the unconditional branch that was created by the first split with 1736 // a compare against zero and a conditional branch. 1737 Value *Zero = Constant::getNullValue(Ty); 1738 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1739 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1740 StartBlock->getTerminator()->eraseFromParent(); 1741 1742 // Create a PHI in the end block to select either the output of the intrinsic 1743 // or the bit width of the operand. 1744 Builder.SetInsertPoint(&EndBlock->front()); 1745 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1746 CountZeros->replaceAllUsesWith(PN); 1747 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1748 PN->addIncoming(BitWidth, StartBlock); 1749 PN->addIncoming(CountZeros, CallBlock); 1750 1751 // We are explicitly handling the zero case, so we can set the intrinsic's 1752 // undefined zero argument to 'true'. This will also prevent reprocessing the 1753 // intrinsic; we only despeculate when a zero input is defined. 1754 CountZeros->setArgOperand(1, Builder.getTrue()); 1755 ModifiedDT = true; 1756 return true; 1757 } 1758 1759 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 1760 BasicBlock *BB = CI->getParent(); 1761 1762 // Lower inline assembly if we can. 1763 // If we found an inline asm expession, and if the target knows how to 1764 // lower it to normal LLVM code, do so now. 1765 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1766 if (TLI->ExpandInlineAsm(CI)) { 1767 // Avoid invalidating the iterator. 1768 CurInstIterator = BB->begin(); 1769 // Avoid processing instructions out of order, which could cause 1770 // reuse before a value is defined. 1771 SunkAddrs.clear(); 1772 return true; 1773 } 1774 // Sink address computing for memory operands into the block. 1775 if (optimizeInlineAsmInst(CI)) 1776 return true; 1777 } 1778 1779 // Align the pointer arguments to this call if the target thinks it's a good 1780 // idea 1781 unsigned MinSize, PrefAlign; 1782 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1783 for (auto &Arg : CI->arg_operands()) { 1784 // We want to align both objects whose address is used directly and 1785 // objects whose address is used in casts and GEPs, though it only makes 1786 // sense for GEPs if the offset is a multiple of the desired alignment and 1787 // if size - offset meets the size threshold. 1788 if (!Arg->getType()->isPointerTy()) 1789 continue; 1790 APInt Offset(DL->getIndexSizeInBits( 1791 cast<PointerType>(Arg->getType())->getAddressSpace()), 1792 0); 1793 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1794 uint64_t Offset2 = Offset.getLimitedValue(); 1795 if ((Offset2 & (PrefAlign-1)) != 0) 1796 continue; 1797 AllocaInst *AI; 1798 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1799 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1800 AI->setAlignment(PrefAlign); 1801 // Global variables can only be aligned if they are defined in this 1802 // object (i.e. they are uniquely initialized in this object), and 1803 // over-aligning global variables that have an explicit section is 1804 // forbidden. 1805 GlobalVariable *GV; 1806 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1807 GV->getPointerAlignment(*DL) < PrefAlign && 1808 DL->getTypeAllocSize(GV->getValueType()) >= 1809 MinSize + Offset2) 1810 GV->setAlignment(PrefAlign); 1811 } 1812 // If this is a memcpy (or similar) then we may be able to improve the 1813 // alignment 1814 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1815 unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); 1816 if (DestAlign > MI->getDestAlignment()) 1817 MI->setDestAlignment(DestAlign); 1818 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 1819 unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 1820 if (SrcAlign > MTI->getSourceAlignment()) 1821 MTI->setSourceAlignment(SrcAlign); 1822 } 1823 } 1824 } 1825 1826 // If we have a cold call site, try to sink addressing computation into the 1827 // cold block. This interacts with our handling for loads and stores to 1828 // ensure that we can fold all uses of a potential addressing computation 1829 // into their uses. TODO: generalize this to work over profiling data 1830 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 1831 for (auto &Arg : CI->arg_operands()) { 1832 if (!Arg->getType()->isPointerTy()) 1833 continue; 1834 unsigned AS = Arg->getType()->getPointerAddressSpace(); 1835 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 1836 } 1837 1838 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1839 if (II) { 1840 switch (II->getIntrinsicID()) { 1841 default: break; 1842 case Intrinsic::experimental_widenable_condition: { 1843 // Give up on future widening oppurtunties so that we can fold away dead 1844 // paths and merge blocks before going into block-local instruction 1845 // selection. 1846 if (II->use_empty()) { 1847 II->eraseFromParent(); 1848 return true; 1849 } 1850 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 1851 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1852 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1853 }); 1854 return true; 1855 } 1856 case Intrinsic::objectsize: { 1857 // Lower all uses of llvm.objectsize.* 1858 Value *RetVal = 1859 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 1860 1861 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1862 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1863 }); 1864 return true; 1865 } 1866 case Intrinsic::is_constant: { 1867 // If is_constant hasn't folded away yet, lower it to false now. 1868 Constant *RetVal = ConstantInt::get(II->getType(), 0); 1869 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1870 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1871 }); 1872 return true; 1873 } 1874 case Intrinsic::aarch64_stlxr: 1875 case Intrinsic::aarch64_stxr: { 1876 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1877 if (!ExtVal || !ExtVal->hasOneUse() || 1878 ExtVal->getParent() == CI->getParent()) 1879 return false; 1880 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1881 ExtVal->moveBefore(CI); 1882 // Mark this instruction as "inserted by CGP", so that other 1883 // optimizations don't touch it. 1884 InsertedInsts.insert(ExtVal); 1885 return true; 1886 } 1887 1888 case Intrinsic::launder_invariant_group: 1889 case Intrinsic::strip_invariant_group: { 1890 Value *ArgVal = II->getArgOperand(0); 1891 auto it = LargeOffsetGEPMap.find(II); 1892 if (it != LargeOffsetGEPMap.end()) { 1893 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 1894 // Make sure not to have to deal with iterator invalidation 1895 // after possibly adding ArgVal to LargeOffsetGEPMap. 1896 auto GEPs = std::move(it->second); 1897 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 1898 LargeOffsetGEPMap.erase(II); 1899 } 1900 1901 II->replaceAllUsesWith(ArgVal); 1902 II->eraseFromParent(); 1903 return true; 1904 } 1905 case Intrinsic::cttz: 1906 case Intrinsic::ctlz: 1907 // If counting zeros is expensive, try to avoid it. 1908 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1909 } 1910 1911 if (TLI) { 1912 SmallVector<Value*, 2> PtrOps; 1913 Type *AccessTy; 1914 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 1915 while (!PtrOps.empty()) { 1916 Value *PtrVal = PtrOps.pop_back_val(); 1917 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 1918 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 1919 return true; 1920 } 1921 } 1922 } 1923 1924 // From here on out we're working with named functions. 1925 if (!CI->getCalledFunction()) return false; 1926 1927 // Lower all default uses of _chk calls. This is very similar 1928 // to what InstCombineCalls does, but here we are only lowering calls 1929 // to fortified library functions (e.g. __memcpy_chk) that have the default 1930 // "don't know" as the objectsize. Anything else should be left alone. 1931 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1932 if (Value *V = Simplifier.optimizeCall(CI)) { 1933 CI->replaceAllUsesWith(V); 1934 CI->eraseFromParent(); 1935 return true; 1936 } 1937 1938 return false; 1939 } 1940 1941 /// Look for opportunities to duplicate return instructions to the predecessor 1942 /// to enable tail call optimizations. The case it is currently looking for is: 1943 /// @code 1944 /// bb0: 1945 /// %tmp0 = tail call i32 @f0() 1946 /// br label %return 1947 /// bb1: 1948 /// %tmp1 = tail call i32 @f1() 1949 /// br label %return 1950 /// bb2: 1951 /// %tmp2 = tail call i32 @f2() 1952 /// br label %return 1953 /// return: 1954 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1955 /// ret i32 %retval 1956 /// @endcode 1957 /// 1958 /// => 1959 /// 1960 /// @code 1961 /// bb0: 1962 /// %tmp0 = tail call i32 @f0() 1963 /// ret i32 %tmp0 1964 /// bb1: 1965 /// %tmp1 = tail call i32 @f1() 1966 /// ret i32 %tmp1 1967 /// bb2: 1968 /// %tmp2 = tail call i32 @f2() 1969 /// ret i32 %tmp2 1970 /// @endcode 1971 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) { 1972 if (!TLI) 1973 return false; 1974 1975 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 1976 if (!RetI) 1977 return false; 1978 1979 PHINode *PN = nullptr; 1980 BitCastInst *BCI = nullptr; 1981 Value *V = RetI->getReturnValue(); 1982 if (V) { 1983 BCI = dyn_cast<BitCastInst>(V); 1984 if (BCI) 1985 V = BCI->getOperand(0); 1986 1987 PN = dyn_cast<PHINode>(V); 1988 if (!PN) 1989 return false; 1990 } 1991 1992 if (PN && PN->getParent() != BB) 1993 return false; 1994 1995 // Make sure there are no instructions between the PHI and return, or that the 1996 // return is the first instruction in the block. 1997 if (PN) { 1998 BasicBlock::iterator BI = BB->begin(); 1999 // Skip over debug and the bitcast. 2000 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI); 2001 if (&*BI != RetI) 2002 return false; 2003 } else { 2004 BasicBlock::iterator BI = BB->begin(); 2005 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2006 if (&*BI != RetI) 2007 return false; 2008 } 2009 2010 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2011 /// call. 2012 const Function *F = BB->getParent(); 2013 SmallVector<CallInst*, 4> TailCalls; 2014 if (PN) { 2015 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2016 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2017 // Make sure the phi value is indeed produced by the tail call. 2018 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2019 TLI->mayBeEmittedAsTailCall(CI) && 2020 attributesPermitTailCall(F, CI, RetI, *TLI)) 2021 TailCalls.push_back(CI); 2022 } 2023 } else { 2024 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2025 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2026 if (!VisitedBBs.insert(*PI).second) 2027 continue; 2028 2029 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2030 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2031 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2032 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2033 if (RI == RE) 2034 continue; 2035 2036 CallInst *CI = dyn_cast<CallInst>(&*RI); 2037 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2038 attributesPermitTailCall(F, CI, RetI, *TLI)) 2039 TailCalls.push_back(CI); 2040 } 2041 } 2042 2043 bool Changed = false; 2044 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2045 CallInst *CI = TailCalls[i]; 2046 CallSite CS(CI); 2047 2048 // Make sure the call instruction is followed by an unconditional branch to 2049 // the return block. 2050 BasicBlock *CallBB = CI->getParent(); 2051 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2052 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2053 continue; 2054 2055 // Duplicate the return into CallBB. 2056 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2057 ModifiedDT = Changed = true; 2058 ++NumRetsDup; 2059 } 2060 2061 // If we eliminated all predecessors of the block, delete the block now. 2062 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2063 BB->eraseFromParent(); 2064 2065 return Changed; 2066 } 2067 2068 //===----------------------------------------------------------------------===// 2069 // Memory Optimization 2070 //===----------------------------------------------------------------------===// 2071 2072 namespace { 2073 2074 /// This is an extended version of TargetLowering::AddrMode 2075 /// which holds actual Value*'s for register values. 2076 struct ExtAddrMode : public TargetLowering::AddrMode { 2077 Value *BaseReg = nullptr; 2078 Value *ScaledReg = nullptr; 2079 Value *OriginalValue = nullptr; 2080 bool InBounds = true; 2081 2082 enum FieldName { 2083 NoField = 0x00, 2084 BaseRegField = 0x01, 2085 BaseGVField = 0x02, 2086 BaseOffsField = 0x04, 2087 ScaledRegField = 0x08, 2088 ScaleField = 0x10, 2089 MultipleFields = 0xff 2090 }; 2091 2092 2093 ExtAddrMode() = default; 2094 2095 void print(raw_ostream &OS) const; 2096 void dump() const; 2097 2098 FieldName compare(const ExtAddrMode &other) { 2099 // First check that the types are the same on each field, as differing types 2100 // is something we can't cope with later on. 2101 if (BaseReg && other.BaseReg && 2102 BaseReg->getType() != other.BaseReg->getType()) 2103 return MultipleFields; 2104 if (BaseGV && other.BaseGV && 2105 BaseGV->getType() != other.BaseGV->getType()) 2106 return MultipleFields; 2107 if (ScaledReg && other.ScaledReg && 2108 ScaledReg->getType() != other.ScaledReg->getType()) 2109 return MultipleFields; 2110 2111 // Conservatively reject 'inbounds' mismatches. 2112 if (InBounds != other.InBounds) 2113 return MultipleFields; 2114 2115 // Check each field to see if it differs. 2116 unsigned Result = NoField; 2117 if (BaseReg != other.BaseReg) 2118 Result |= BaseRegField; 2119 if (BaseGV != other.BaseGV) 2120 Result |= BaseGVField; 2121 if (BaseOffs != other.BaseOffs) 2122 Result |= BaseOffsField; 2123 if (ScaledReg != other.ScaledReg) 2124 Result |= ScaledRegField; 2125 // Don't count 0 as being a different scale, because that actually means 2126 // unscaled (which will already be counted by having no ScaledReg). 2127 if (Scale && other.Scale && Scale != other.Scale) 2128 Result |= ScaleField; 2129 2130 if (countPopulation(Result) > 1) 2131 return MultipleFields; 2132 else 2133 return static_cast<FieldName>(Result); 2134 } 2135 2136 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2137 // with no offset. 2138 bool isTrivial() { 2139 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2140 // trivial if at most one of these terms is nonzero, except that BaseGV and 2141 // BaseReg both being zero actually means a null pointer value, which we 2142 // consider to be 'non-zero' here. 2143 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2144 } 2145 2146 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2147 switch (Field) { 2148 default: 2149 return nullptr; 2150 case BaseRegField: 2151 return BaseReg; 2152 case BaseGVField: 2153 return BaseGV; 2154 case ScaledRegField: 2155 return ScaledReg; 2156 case BaseOffsField: 2157 return ConstantInt::get(IntPtrTy, BaseOffs); 2158 } 2159 } 2160 2161 void SetCombinedField(FieldName Field, Value *V, 2162 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2163 switch (Field) { 2164 default: 2165 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2166 break; 2167 case ExtAddrMode::BaseRegField: 2168 BaseReg = V; 2169 break; 2170 case ExtAddrMode::BaseGVField: 2171 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2172 // in the BaseReg field. 2173 assert(BaseReg == nullptr); 2174 BaseReg = V; 2175 BaseGV = nullptr; 2176 break; 2177 case ExtAddrMode::ScaledRegField: 2178 ScaledReg = V; 2179 // If we have a mix of scaled and unscaled addrmodes then we want scale 2180 // to be the scale and not zero. 2181 if (!Scale) 2182 for (const ExtAddrMode &AM : AddrModes) 2183 if (AM.Scale) { 2184 Scale = AM.Scale; 2185 break; 2186 } 2187 break; 2188 case ExtAddrMode::BaseOffsField: 2189 // The offset is no longer a constant, so it goes in ScaledReg with a 2190 // scale of 1. 2191 assert(ScaledReg == nullptr); 2192 ScaledReg = V; 2193 Scale = 1; 2194 BaseOffs = 0; 2195 break; 2196 } 2197 } 2198 }; 2199 2200 } // end anonymous namespace 2201 2202 #ifndef NDEBUG 2203 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2204 AM.print(OS); 2205 return OS; 2206 } 2207 #endif 2208 2209 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2210 void ExtAddrMode::print(raw_ostream &OS) const { 2211 bool NeedPlus = false; 2212 OS << "["; 2213 if (InBounds) 2214 OS << "inbounds "; 2215 if (BaseGV) { 2216 OS << (NeedPlus ? " + " : "") 2217 << "GV:"; 2218 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2219 NeedPlus = true; 2220 } 2221 2222 if (BaseOffs) { 2223 OS << (NeedPlus ? " + " : "") 2224 << BaseOffs; 2225 NeedPlus = true; 2226 } 2227 2228 if (BaseReg) { 2229 OS << (NeedPlus ? " + " : "") 2230 << "Base:"; 2231 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2232 NeedPlus = true; 2233 } 2234 if (Scale) { 2235 OS << (NeedPlus ? " + " : "") 2236 << Scale << "*"; 2237 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2238 } 2239 2240 OS << ']'; 2241 } 2242 2243 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2244 print(dbgs()); 2245 dbgs() << '\n'; 2246 } 2247 #endif 2248 2249 namespace { 2250 2251 /// This class provides transaction based operation on the IR. 2252 /// Every change made through this class is recorded in the internal state and 2253 /// can be undone (rollback) until commit is called. 2254 class TypePromotionTransaction { 2255 /// This represents the common interface of the individual transaction. 2256 /// Each class implements the logic for doing one specific modification on 2257 /// the IR via the TypePromotionTransaction. 2258 class TypePromotionAction { 2259 protected: 2260 /// The Instruction modified. 2261 Instruction *Inst; 2262 2263 public: 2264 /// Constructor of the action. 2265 /// The constructor performs the related action on the IR. 2266 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2267 2268 virtual ~TypePromotionAction() = default; 2269 2270 /// Undo the modification done by this action. 2271 /// When this method is called, the IR must be in the same state as it was 2272 /// before this action was applied. 2273 /// \pre Undoing the action works if and only if the IR is in the exact same 2274 /// state as it was directly after this action was applied. 2275 virtual void undo() = 0; 2276 2277 /// Advocate every change made by this action. 2278 /// When the results on the IR of the action are to be kept, it is important 2279 /// to call this function, otherwise hidden information may be kept forever. 2280 virtual void commit() { 2281 // Nothing to be done, this action is not doing anything. 2282 } 2283 }; 2284 2285 /// Utility to remember the position of an instruction. 2286 class InsertionHandler { 2287 /// Position of an instruction. 2288 /// Either an instruction: 2289 /// - Is the first in a basic block: BB is used. 2290 /// - Has a previous instruction: PrevInst is used. 2291 union { 2292 Instruction *PrevInst; 2293 BasicBlock *BB; 2294 } Point; 2295 2296 /// Remember whether or not the instruction had a previous instruction. 2297 bool HasPrevInstruction; 2298 2299 public: 2300 /// Record the position of \p Inst. 2301 InsertionHandler(Instruction *Inst) { 2302 BasicBlock::iterator It = Inst->getIterator(); 2303 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2304 if (HasPrevInstruction) 2305 Point.PrevInst = &*--It; 2306 else 2307 Point.BB = Inst->getParent(); 2308 } 2309 2310 /// Insert \p Inst at the recorded position. 2311 void insert(Instruction *Inst) { 2312 if (HasPrevInstruction) { 2313 if (Inst->getParent()) 2314 Inst->removeFromParent(); 2315 Inst->insertAfter(Point.PrevInst); 2316 } else { 2317 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2318 if (Inst->getParent()) 2319 Inst->moveBefore(Position); 2320 else 2321 Inst->insertBefore(Position); 2322 } 2323 } 2324 }; 2325 2326 /// Move an instruction before another. 2327 class InstructionMoveBefore : public TypePromotionAction { 2328 /// Original position of the instruction. 2329 InsertionHandler Position; 2330 2331 public: 2332 /// Move \p Inst before \p Before. 2333 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2334 : TypePromotionAction(Inst), Position(Inst) { 2335 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2336 << "\n"); 2337 Inst->moveBefore(Before); 2338 } 2339 2340 /// Move the instruction back to its original position. 2341 void undo() override { 2342 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2343 Position.insert(Inst); 2344 } 2345 }; 2346 2347 /// Set the operand of an instruction with a new value. 2348 class OperandSetter : public TypePromotionAction { 2349 /// Original operand of the instruction. 2350 Value *Origin; 2351 2352 /// Index of the modified instruction. 2353 unsigned Idx; 2354 2355 public: 2356 /// Set \p Idx operand of \p Inst with \p NewVal. 2357 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2358 : TypePromotionAction(Inst), Idx(Idx) { 2359 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2360 << "for:" << *Inst << "\n" 2361 << "with:" << *NewVal << "\n"); 2362 Origin = Inst->getOperand(Idx); 2363 Inst->setOperand(Idx, NewVal); 2364 } 2365 2366 /// Restore the original value of the instruction. 2367 void undo() override { 2368 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2369 << "for: " << *Inst << "\n" 2370 << "with: " << *Origin << "\n"); 2371 Inst->setOperand(Idx, Origin); 2372 } 2373 }; 2374 2375 /// Hide the operands of an instruction. 2376 /// Do as if this instruction was not using any of its operands. 2377 class OperandsHider : public TypePromotionAction { 2378 /// The list of original operands. 2379 SmallVector<Value *, 4> OriginalValues; 2380 2381 public: 2382 /// Remove \p Inst from the uses of the operands of \p Inst. 2383 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2384 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2385 unsigned NumOpnds = Inst->getNumOperands(); 2386 OriginalValues.reserve(NumOpnds); 2387 for (unsigned It = 0; It < NumOpnds; ++It) { 2388 // Save the current operand. 2389 Value *Val = Inst->getOperand(It); 2390 OriginalValues.push_back(Val); 2391 // Set a dummy one. 2392 // We could use OperandSetter here, but that would imply an overhead 2393 // that we are not willing to pay. 2394 Inst->setOperand(It, UndefValue::get(Val->getType())); 2395 } 2396 } 2397 2398 /// Restore the original list of uses. 2399 void undo() override { 2400 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2401 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2402 Inst->setOperand(It, OriginalValues[It]); 2403 } 2404 }; 2405 2406 /// Build a truncate instruction. 2407 class TruncBuilder : public TypePromotionAction { 2408 Value *Val; 2409 2410 public: 2411 /// Build a truncate instruction of \p Opnd producing a \p Ty 2412 /// result. 2413 /// trunc Opnd to Ty. 2414 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2415 IRBuilder<> Builder(Opnd); 2416 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2417 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2418 } 2419 2420 /// Get the built value. 2421 Value *getBuiltValue() { return Val; } 2422 2423 /// Remove the built instruction. 2424 void undo() override { 2425 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2426 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2427 IVal->eraseFromParent(); 2428 } 2429 }; 2430 2431 /// Build a sign extension instruction. 2432 class SExtBuilder : public TypePromotionAction { 2433 Value *Val; 2434 2435 public: 2436 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2437 /// result. 2438 /// sext Opnd to Ty. 2439 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2440 : TypePromotionAction(InsertPt) { 2441 IRBuilder<> Builder(InsertPt); 2442 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2443 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2444 } 2445 2446 /// Get the built value. 2447 Value *getBuiltValue() { return Val; } 2448 2449 /// Remove the built instruction. 2450 void undo() override { 2451 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2452 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2453 IVal->eraseFromParent(); 2454 } 2455 }; 2456 2457 /// Build a zero extension instruction. 2458 class ZExtBuilder : public TypePromotionAction { 2459 Value *Val; 2460 2461 public: 2462 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2463 /// result. 2464 /// zext Opnd to Ty. 2465 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2466 : TypePromotionAction(InsertPt) { 2467 IRBuilder<> Builder(InsertPt); 2468 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2469 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2470 } 2471 2472 /// Get the built value. 2473 Value *getBuiltValue() { return Val; } 2474 2475 /// Remove the built instruction. 2476 void undo() override { 2477 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2478 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2479 IVal->eraseFromParent(); 2480 } 2481 }; 2482 2483 /// Mutate an instruction to another type. 2484 class TypeMutator : public TypePromotionAction { 2485 /// Record the original type. 2486 Type *OrigTy; 2487 2488 public: 2489 /// Mutate the type of \p Inst into \p NewTy. 2490 TypeMutator(Instruction *Inst, Type *NewTy) 2491 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2492 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2493 << "\n"); 2494 Inst->mutateType(NewTy); 2495 } 2496 2497 /// Mutate the instruction back to its original type. 2498 void undo() override { 2499 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2500 << "\n"); 2501 Inst->mutateType(OrigTy); 2502 } 2503 }; 2504 2505 /// Replace the uses of an instruction by another instruction. 2506 class UsesReplacer : public TypePromotionAction { 2507 /// Helper structure to keep track of the replaced uses. 2508 struct InstructionAndIdx { 2509 /// The instruction using the instruction. 2510 Instruction *Inst; 2511 2512 /// The index where this instruction is used for Inst. 2513 unsigned Idx; 2514 2515 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2516 : Inst(Inst), Idx(Idx) {} 2517 }; 2518 2519 /// Keep track of the original uses (pair Instruction, Index). 2520 SmallVector<InstructionAndIdx, 4> OriginalUses; 2521 /// Keep track of the debug users. 2522 SmallVector<DbgValueInst *, 1> DbgValues; 2523 2524 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2525 2526 public: 2527 /// Replace all the use of \p Inst by \p New. 2528 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2529 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2530 << "\n"); 2531 // Record the original uses. 2532 for (Use &U : Inst->uses()) { 2533 Instruction *UserI = cast<Instruction>(U.getUser()); 2534 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2535 } 2536 // Record the debug uses separately. They are not in the instruction's 2537 // use list, but they are replaced by RAUW. 2538 findDbgValues(DbgValues, Inst); 2539 2540 // Now, we can replace the uses. 2541 Inst->replaceAllUsesWith(New); 2542 } 2543 2544 /// Reassign the original uses of Inst to Inst. 2545 void undo() override { 2546 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2547 for (use_iterator UseIt = OriginalUses.begin(), 2548 EndIt = OriginalUses.end(); 2549 UseIt != EndIt; ++UseIt) { 2550 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2551 } 2552 // RAUW has replaced all original uses with references to the new value, 2553 // including the debug uses. Since we are undoing the replacements, 2554 // the original debug uses must also be reinstated to maintain the 2555 // correctness and utility of debug value instructions. 2556 for (auto *DVI: DbgValues) { 2557 LLVMContext &Ctx = Inst->getType()->getContext(); 2558 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst)); 2559 DVI->setOperand(0, MV); 2560 } 2561 } 2562 }; 2563 2564 /// Remove an instruction from the IR. 2565 class InstructionRemover : public TypePromotionAction { 2566 /// Original position of the instruction. 2567 InsertionHandler Inserter; 2568 2569 /// Helper structure to hide all the link to the instruction. In other 2570 /// words, this helps to do as if the instruction was removed. 2571 OperandsHider Hider; 2572 2573 /// Keep track of the uses replaced, if any. 2574 UsesReplacer *Replacer = nullptr; 2575 2576 /// Keep track of instructions removed. 2577 SetOfInstrs &RemovedInsts; 2578 2579 public: 2580 /// Remove all reference of \p Inst and optionally replace all its 2581 /// uses with New. 2582 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2583 /// \pre If !Inst->use_empty(), then New != nullptr 2584 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2585 Value *New = nullptr) 2586 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2587 RemovedInsts(RemovedInsts) { 2588 if (New) 2589 Replacer = new UsesReplacer(Inst, New); 2590 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2591 RemovedInsts.insert(Inst); 2592 /// The instructions removed here will be freed after completing 2593 /// optimizeBlock() for all blocks as we need to keep track of the 2594 /// removed instructions during promotion. 2595 Inst->removeFromParent(); 2596 } 2597 2598 ~InstructionRemover() override { delete Replacer; } 2599 2600 /// Resurrect the instruction and reassign it to the proper uses if 2601 /// new value was provided when build this action. 2602 void undo() override { 2603 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2604 Inserter.insert(Inst); 2605 if (Replacer) 2606 Replacer->undo(); 2607 Hider.undo(); 2608 RemovedInsts.erase(Inst); 2609 } 2610 }; 2611 2612 public: 2613 /// Restoration point. 2614 /// The restoration point is a pointer to an action instead of an iterator 2615 /// because the iterator may be invalidated but not the pointer. 2616 using ConstRestorationPt = const TypePromotionAction *; 2617 2618 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2619 : RemovedInsts(RemovedInsts) {} 2620 2621 /// Advocate every changes made in that transaction. 2622 void commit(); 2623 2624 /// Undo all the changes made after the given point. 2625 void rollback(ConstRestorationPt Point); 2626 2627 /// Get the current restoration point. 2628 ConstRestorationPt getRestorationPoint() const; 2629 2630 /// \name API for IR modification with state keeping to support rollback. 2631 /// @{ 2632 /// Same as Instruction::setOperand. 2633 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2634 2635 /// Same as Instruction::eraseFromParent. 2636 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2637 2638 /// Same as Value::replaceAllUsesWith. 2639 void replaceAllUsesWith(Instruction *Inst, Value *New); 2640 2641 /// Same as Value::mutateType. 2642 void mutateType(Instruction *Inst, Type *NewTy); 2643 2644 /// Same as IRBuilder::createTrunc. 2645 Value *createTrunc(Instruction *Opnd, Type *Ty); 2646 2647 /// Same as IRBuilder::createSExt. 2648 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2649 2650 /// Same as IRBuilder::createZExt. 2651 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2652 2653 /// Same as Instruction::moveBefore. 2654 void moveBefore(Instruction *Inst, Instruction *Before); 2655 /// @} 2656 2657 private: 2658 /// The ordered list of actions made so far. 2659 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2660 2661 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2662 2663 SetOfInstrs &RemovedInsts; 2664 }; 2665 2666 } // end anonymous namespace 2667 2668 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2669 Value *NewVal) { 2670 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 2671 Inst, Idx, NewVal)); 2672 } 2673 2674 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2675 Value *NewVal) { 2676 Actions.push_back( 2677 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 2678 Inst, RemovedInsts, NewVal)); 2679 } 2680 2681 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2682 Value *New) { 2683 Actions.push_back( 2684 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2685 } 2686 2687 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2688 Actions.push_back( 2689 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2690 } 2691 2692 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2693 Type *Ty) { 2694 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2695 Value *Val = Ptr->getBuiltValue(); 2696 Actions.push_back(std::move(Ptr)); 2697 return Val; 2698 } 2699 2700 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2701 Value *Opnd, Type *Ty) { 2702 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2703 Value *Val = Ptr->getBuiltValue(); 2704 Actions.push_back(std::move(Ptr)); 2705 return Val; 2706 } 2707 2708 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2709 Value *Opnd, Type *Ty) { 2710 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2711 Value *Val = Ptr->getBuiltValue(); 2712 Actions.push_back(std::move(Ptr)); 2713 return Val; 2714 } 2715 2716 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2717 Instruction *Before) { 2718 Actions.push_back( 2719 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 2720 Inst, Before)); 2721 } 2722 2723 TypePromotionTransaction::ConstRestorationPt 2724 TypePromotionTransaction::getRestorationPoint() const { 2725 return !Actions.empty() ? Actions.back().get() : nullptr; 2726 } 2727 2728 void TypePromotionTransaction::commit() { 2729 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2730 ++It) 2731 (*It)->commit(); 2732 Actions.clear(); 2733 } 2734 2735 void TypePromotionTransaction::rollback( 2736 TypePromotionTransaction::ConstRestorationPt Point) { 2737 while (!Actions.empty() && Point != Actions.back().get()) { 2738 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2739 Curr->undo(); 2740 } 2741 } 2742 2743 namespace { 2744 2745 /// A helper class for matching addressing modes. 2746 /// 2747 /// This encapsulates the logic for matching the target-legal addressing modes. 2748 class AddressingModeMatcher { 2749 SmallVectorImpl<Instruction*> &AddrModeInsts; 2750 const TargetLowering &TLI; 2751 const TargetRegisterInfo &TRI; 2752 const DataLayout &DL; 2753 2754 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2755 /// the memory instruction that we're computing this address for. 2756 Type *AccessTy; 2757 unsigned AddrSpace; 2758 Instruction *MemoryInst; 2759 2760 /// This is the addressing mode that we're building up. This is 2761 /// part of the return value of this addressing mode matching stuff. 2762 ExtAddrMode &AddrMode; 2763 2764 /// The instructions inserted by other CodeGenPrepare optimizations. 2765 const SetOfInstrs &InsertedInsts; 2766 2767 /// A map from the instructions to their type before promotion. 2768 InstrToOrigTy &PromotedInsts; 2769 2770 /// The ongoing transaction where every action should be registered. 2771 TypePromotionTransaction &TPT; 2772 2773 // A GEP which has too large offset to be folded into the addressing mode. 2774 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 2775 2776 /// This is set to true when we should not do profitability checks. 2777 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2778 bool IgnoreProfitability; 2779 2780 AddressingModeMatcher( 2781 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 2782 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, 2783 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, 2784 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 2785 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) 2786 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 2787 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2788 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2789 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP) { 2790 IgnoreProfitability = false; 2791 } 2792 2793 public: 2794 /// Find the maximal addressing mode that a load/store of V can fold, 2795 /// give an access type of AccessTy. This returns a list of involved 2796 /// instructions in AddrModeInsts. 2797 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2798 /// optimizations. 2799 /// \p PromotedInsts maps the instructions to their type before promotion. 2800 /// \p The ongoing transaction where every action should be registered. 2801 static ExtAddrMode 2802 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 2803 SmallVectorImpl<Instruction *> &AddrModeInsts, 2804 const TargetLowering &TLI, const TargetRegisterInfo &TRI, 2805 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 2806 TypePromotionTransaction &TPT, 2807 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) { 2808 ExtAddrMode Result; 2809 2810 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, 2811 MemoryInst, Result, InsertedInsts, 2812 PromotedInsts, TPT, LargeOffsetGEP) 2813 .matchAddr(V, 0); 2814 (void)Success; assert(Success && "Couldn't select *anything*?"); 2815 return Result; 2816 } 2817 2818 private: 2819 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2820 bool matchAddr(Value *Addr, unsigned Depth); 2821 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 2822 bool *MovedAway = nullptr); 2823 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2824 ExtAddrMode &AMBefore, 2825 ExtAddrMode &AMAfter); 2826 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2827 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2828 Value *PromotedOperand) const; 2829 }; 2830 2831 class PhiNodeSet; 2832 2833 /// An iterator for PhiNodeSet. 2834 class PhiNodeSetIterator { 2835 PhiNodeSet * const Set; 2836 size_t CurrentIndex = 0; 2837 2838 public: 2839 /// The constructor. Start should point to either a valid element, or be equal 2840 /// to the size of the underlying SmallVector of the PhiNodeSet. 2841 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 2842 PHINode * operator*() const; 2843 PhiNodeSetIterator& operator++(); 2844 bool operator==(const PhiNodeSetIterator &RHS) const; 2845 bool operator!=(const PhiNodeSetIterator &RHS) const; 2846 }; 2847 2848 /// Keeps a set of PHINodes. 2849 /// 2850 /// This is a minimal set implementation for a specific use case: 2851 /// It is very fast when there are very few elements, but also provides good 2852 /// performance when there are many. It is similar to SmallPtrSet, but also 2853 /// provides iteration by insertion order, which is deterministic and stable 2854 /// across runs. It is also similar to SmallSetVector, but provides removing 2855 /// elements in O(1) time. This is achieved by not actually removing the element 2856 /// from the underlying vector, so comes at the cost of using more memory, but 2857 /// that is fine, since PhiNodeSets are used as short lived objects. 2858 class PhiNodeSet { 2859 friend class PhiNodeSetIterator; 2860 2861 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 2862 using iterator = PhiNodeSetIterator; 2863 2864 /// Keeps the elements in the order of their insertion in the underlying 2865 /// vector. To achieve constant time removal, it never deletes any element. 2866 SmallVector<PHINode *, 32> NodeList; 2867 2868 /// Keeps the elements in the underlying set implementation. This (and not the 2869 /// NodeList defined above) is the source of truth on whether an element 2870 /// is actually in the collection. 2871 MapType NodeMap; 2872 2873 /// Points to the first valid (not deleted) element when the set is not empty 2874 /// and the value is not zero. Equals to the size of the underlying vector 2875 /// when the set is empty. When the value is 0, as in the beginning, the 2876 /// first element may or may not be valid. 2877 size_t FirstValidElement = 0; 2878 2879 public: 2880 /// Inserts a new element to the collection. 2881 /// \returns true if the element is actually added, i.e. was not in the 2882 /// collection before the operation. 2883 bool insert(PHINode *Ptr) { 2884 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 2885 NodeList.push_back(Ptr); 2886 return true; 2887 } 2888 return false; 2889 } 2890 2891 /// Removes the element from the collection. 2892 /// \returns whether the element is actually removed, i.e. was in the 2893 /// collection before the operation. 2894 bool erase(PHINode *Ptr) { 2895 auto it = NodeMap.find(Ptr); 2896 if (it != NodeMap.end()) { 2897 NodeMap.erase(Ptr); 2898 SkipRemovedElements(FirstValidElement); 2899 return true; 2900 } 2901 return false; 2902 } 2903 2904 /// Removes all elements and clears the collection. 2905 void clear() { 2906 NodeMap.clear(); 2907 NodeList.clear(); 2908 FirstValidElement = 0; 2909 } 2910 2911 /// \returns an iterator that will iterate the elements in the order of 2912 /// insertion. 2913 iterator begin() { 2914 if (FirstValidElement == 0) 2915 SkipRemovedElements(FirstValidElement); 2916 return PhiNodeSetIterator(this, FirstValidElement); 2917 } 2918 2919 /// \returns an iterator that points to the end of the collection. 2920 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 2921 2922 /// Returns the number of elements in the collection. 2923 size_t size() const { 2924 return NodeMap.size(); 2925 } 2926 2927 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 2928 size_t count(PHINode *Ptr) const { 2929 return NodeMap.count(Ptr); 2930 } 2931 2932 private: 2933 /// Updates the CurrentIndex so that it will point to a valid element. 2934 /// 2935 /// If the element of NodeList at CurrentIndex is valid, it does not 2936 /// change it. If there are no more valid elements, it updates CurrentIndex 2937 /// to point to the end of the NodeList. 2938 void SkipRemovedElements(size_t &CurrentIndex) { 2939 while (CurrentIndex < NodeList.size()) { 2940 auto it = NodeMap.find(NodeList[CurrentIndex]); 2941 // If the element has been deleted and added again later, NodeMap will 2942 // point to a different index, so CurrentIndex will still be invalid. 2943 if (it != NodeMap.end() && it->second == CurrentIndex) 2944 break; 2945 ++CurrentIndex; 2946 } 2947 } 2948 }; 2949 2950 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 2951 : Set(Set), CurrentIndex(Start) {} 2952 2953 PHINode * PhiNodeSetIterator::operator*() const { 2954 assert(CurrentIndex < Set->NodeList.size() && 2955 "PhiNodeSet access out of range"); 2956 return Set->NodeList[CurrentIndex]; 2957 } 2958 2959 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 2960 assert(CurrentIndex < Set->NodeList.size() && 2961 "PhiNodeSet access out of range"); 2962 ++CurrentIndex; 2963 Set->SkipRemovedElements(CurrentIndex); 2964 return *this; 2965 } 2966 2967 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 2968 return CurrentIndex == RHS.CurrentIndex; 2969 } 2970 2971 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 2972 return !((*this) == RHS); 2973 } 2974 2975 /// Keep track of simplification of Phi nodes. 2976 /// Accept the set of all phi nodes and erase phi node from this set 2977 /// if it is simplified. 2978 class SimplificationTracker { 2979 DenseMap<Value *, Value *> Storage; 2980 const SimplifyQuery &SQ; 2981 // Tracks newly created Phi nodes. The elements are iterated by insertion 2982 // order. 2983 PhiNodeSet AllPhiNodes; 2984 // Tracks newly created Select nodes. 2985 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 2986 2987 public: 2988 SimplificationTracker(const SimplifyQuery &sq) 2989 : SQ(sq) {} 2990 2991 Value *Get(Value *V) { 2992 do { 2993 auto SV = Storage.find(V); 2994 if (SV == Storage.end()) 2995 return V; 2996 V = SV->second; 2997 } while (true); 2998 } 2999 3000 Value *Simplify(Value *Val) { 3001 SmallVector<Value *, 32> WorkList; 3002 SmallPtrSet<Value *, 32> Visited; 3003 WorkList.push_back(Val); 3004 while (!WorkList.empty()) { 3005 auto P = WorkList.pop_back_val(); 3006 if (!Visited.insert(P).second) 3007 continue; 3008 if (auto *PI = dyn_cast<Instruction>(P)) 3009 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 3010 for (auto *U : PI->users()) 3011 WorkList.push_back(cast<Value>(U)); 3012 Put(PI, V); 3013 PI->replaceAllUsesWith(V); 3014 if (auto *PHI = dyn_cast<PHINode>(PI)) 3015 AllPhiNodes.erase(PHI); 3016 if (auto *Select = dyn_cast<SelectInst>(PI)) 3017 AllSelectNodes.erase(Select); 3018 PI->eraseFromParent(); 3019 } 3020 } 3021 return Get(Val); 3022 } 3023 3024 void Put(Value *From, Value *To) { 3025 Storage.insert({ From, To }); 3026 } 3027 3028 void ReplacePhi(PHINode *From, PHINode *To) { 3029 Value* OldReplacement = Get(From); 3030 while (OldReplacement != From) { 3031 From = To; 3032 To = dyn_cast<PHINode>(OldReplacement); 3033 OldReplacement = Get(From); 3034 } 3035 assert(Get(To) == To && "Replacement PHI node is already replaced."); 3036 Put(From, To); 3037 From->replaceAllUsesWith(To); 3038 AllPhiNodes.erase(From); 3039 From->eraseFromParent(); 3040 } 3041 3042 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 3043 3044 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 3045 3046 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 3047 3048 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 3049 3050 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 3051 3052 void destroyNewNodes(Type *CommonType) { 3053 // For safe erasing, replace the uses with dummy value first. 3054 auto Dummy = UndefValue::get(CommonType); 3055 for (auto I : AllPhiNodes) { 3056 I->replaceAllUsesWith(Dummy); 3057 I->eraseFromParent(); 3058 } 3059 AllPhiNodes.clear(); 3060 for (auto I : AllSelectNodes) { 3061 I->replaceAllUsesWith(Dummy); 3062 I->eraseFromParent(); 3063 } 3064 AllSelectNodes.clear(); 3065 } 3066 }; 3067 3068 /// A helper class for combining addressing modes. 3069 class AddressingModeCombiner { 3070 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3071 typedef std::pair<PHINode *, PHINode *> PHIPair; 3072 3073 private: 3074 /// The addressing modes we've collected. 3075 SmallVector<ExtAddrMode, 16> AddrModes; 3076 3077 /// The field in which the AddrModes differ, when we have more than one. 3078 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3079 3080 /// Are the AddrModes that we have all just equal to their original values? 3081 bool AllAddrModesTrivial = true; 3082 3083 /// Common Type for all different fields in addressing modes. 3084 Type *CommonType; 3085 3086 /// SimplifyQuery for simplifyInstruction utility. 3087 const SimplifyQuery &SQ; 3088 3089 /// Original Address. 3090 Value *Original; 3091 3092 public: 3093 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3094 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3095 3096 /// Get the combined AddrMode 3097 const ExtAddrMode &getAddrMode() const { 3098 return AddrModes[0]; 3099 } 3100 3101 /// Add a new AddrMode if it's compatible with the AddrModes we already 3102 /// have. 3103 /// \return True iff we succeeded in doing so. 3104 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3105 // Take note of if we have any non-trivial AddrModes, as we need to detect 3106 // when all AddrModes are trivial as then we would introduce a phi or select 3107 // which just duplicates what's already there. 3108 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3109 3110 // If this is the first addrmode then everything is fine. 3111 if (AddrModes.empty()) { 3112 AddrModes.emplace_back(NewAddrMode); 3113 return true; 3114 } 3115 3116 // Figure out how different this is from the other address modes, which we 3117 // can do just by comparing against the first one given that we only care 3118 // about the cumulative difference. 3119 ExtAddrMode::FieldName ThisDifferentField = 3120 AddrModes[0].compare(NewAddrMode); 3121 if (DifferentField == ExtAddrMode::NoField) 3122 DifferentField = ThisDifferentField; 3123 else if (DifferentField != ThisDifferentField) 3124 DifferentField = ExtAddrMode::MultipleFields; 3125 3126 // If NewAddrMode differs in more than one dimension we cannot handle it. 3127 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3128 3129 // If Scale Field is different then we reject. 3130 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3131 3132 // We also must reject the case when base offset is different and 3133 // scale reg is not null, we cannot handle this case due to merge of 3134 // different offsets will be used as ScaleReg. 3135 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3136 !NewAddrMode.ScaledReg); 3137 3138 // We also must reject the case when GV is different and BaseReg installed 3139 // due to we want to use base reg as a merge of GV values. 3140 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3141 !NewAddrMode.HasBaseReg); 3142 3143 // Even if NewAddMode is the same we still need to collect it due to 3144 // original value is different. And later we will need all original values 3145 // as anchors during finding the common Phi node. 3146 if (CanHandle) 3147 AddrModes.emplace_back(NewAddrMode); 3148 else 3149 AddrModes.clear(); 3150 3151 return CanHandle; 3152 } 3153 3154 /// Combine the addressing modes we've collected into a single 3155 /// addressing mode. 3156 /// \return True iff we successfully combined them or we only had one so 3157 /// didn't need to combine them anyway. 3158 bool combineAddrModes() { 3159 // If we have no AddrModes then they can't be combined. 3160 if (AddrModes.size() == 0) 3161 return false; 3162 3163 // A single AddrMode can trivially be combined. 3164 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3165 return true; 3166 3167 // If the AddrModes we collected are all just equal to the value they are 3168 // derived from then combining them wouldn't do anything useful. 3169 if (AllAddrModesTrivial) 3170 return false; 3171 3172 if (!addrModeCombiningAllowed()) 3173 return false; 3174 3175 // Build a map between <original value, basic block where we saw it> to 3176 // value of base register. 3177 // Bail out if there is no common type. 3178 FoldAddrToValueMapping Map; 3179 if (!initializeMap(Map)) 3180 return false; 3181 3182 Value *CommonValue = findCommon(Map); 3183 if (CommonValue) 3184 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3185 return CommonValue != nullptr; 3186 } 3187 3188 private: 3189 /// Initialize Map with anchor values. For address seen 3190 /// we set the value of different field saw in this address. 3191 /// At the same time we find a common type for different field we will 3192 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3193 /// Return false if there is no common type found. 3194 bool initializeMap(FoldAddrToValueMapping &Map) { 3195 // Keep track of keys where the value is null. We will need to replace it 3196 // with constant null when we know the common type. 3197 SmallVector<Value *, 2> NullValue; 3198 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3199 for (auto &AM : AddrModes) { 3200 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3201 if (DV) { 3202 auto *Type = DV->getType(); 3203 if (CommonType && CommonType != Type) 3204 return false; 3205 CommonType = Type; 3206 Map[AM.OriginalValue] = DV; 3207 } else { 3208 NullValue.push_back(AM.OriginalValue); 3209 } 3210 } 3211 assert(CommonType && "At least one non-null value must be!"); 3212 for (auto *V : NullValue) 3213 Map[V] = Constant::getNullValue(CommonType); 3214 return true; 3215 } 3216 3217 /// We have mapping between value A and other value B where B was a field in 3218 /// addressing mode represented by A. Also we have an original value C 3219 /// representing an address we start with. Traversing from C through phi and 3220 /// selects we ended up with A's in a map. This utility function tries to find 3221 /// a value V which is a field in addressing mode C and traversing through phi 3222 /// nodes and selects we will end up in corresponded values B in a map. 3223 /// The utility will create a new Phi/Selects if needed. 3224 // The simple example looks as follows: 3225 // BB1: 3226 // p1 = b1 + 40 3227 // br cond BB2, BB3 3228 // BB2: 3229 // p2 = b2 + 40 3230 // br BB3 3231 // BB3: 3232 // p = phi [p1, BB1], [p2, BB2] 3233 // v = load p 3234 // Map is 3235 // p1 -> b1 3236 // p2 -> b2 3237 // Request is 3238 // p -> ? 3239 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3240 Value *findCommon(FoldAddrToValueMapping &Map) { 3241 // Tracks the simplification of newly created phi nodes. The reason we use 3242 // this mapping is because we will add new created Phi nodes in AddrToBase. 3243 // Simplification of Phi nodes is recursive, so some Phi node may 3244 // be simplified after we added it to AddrToBase. In reality this 3245 // simplification is possible only if original phi/selects were not 3246 // simplified yet. 3247 // Using this mapping we can find the current value in AddrToBase. 3248 SimplificationTracker ST(SQ); 3249 3250 // First step, DFS to create PHI nodes for all intermediate blocks. 3251 // Also fill traverse order for the second step. 3252 SmallVector<Value *, 32> TraverseOrder; 3253 InsertPlaceholders(Map, TraverseOrder, ST); 3254 3255 // Second Step, fill new nodes by merged values and simplify if possible. 3256 FillPlaceholders(Map, TraverseOrder, ST); 3257 3258 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3259 ST.destroyNewNodes(CommonType); 3260 return nullptr; 3261 } 3262 3263 // Now we'd like to match New Phi nodes to existed ones. 3264 unsigned PhiNotMatchedCount = 0; 3265 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3266 ST.destroyNewNodes(CommonType); 3267 return nullptr; 3268 } 3269 3270 auto *Result = ST.Get(Map.find(Original)->second); 3271 if (Result) { 3272 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3273 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3274 } 3275 return Result; 3276 } 3277 3278 /// Try to match PHI node to Candidate. 3279 /// Matcher tracks the matched Phi nodes. 3280 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3281 SmallSetVector<PHIPair, 8> &Matcher, 3282 PhiNodeSet &PhiNodesToMatch) { 3283 SmallVector<PHIPair, 8> WorkList; 3284 Matcher.insert({ PHI, Candidate }); 3285 SmallSet<PHINode *, 8> MatchedPHIs; 3286 MatchedPHIs.insert(PHI); 3287 WorkList.push_back({ PHI, Candidate }); 3288 SmallSet<PHIPair, 8> Visited; 3289 while (!WorkList.empty()) { 3290 auto Item = WorkList.pop_back_val(); 3291 if (!Visited.insert(Item).second) 3292 continue; 3293 // We iterate over all incoming values to Phi to compare them. 3294 // If values are different and both of them Phi and the first one is a 3295 // Phi we added (subject to match) and both of them is in the same basic 3296 // block then we can match our pair if values match. So we state that 3297 // these values match and add it to work list to verify that. 3298 for (auto B : Item.first->blocks()) { 3299 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3300 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3301 if (FirstValue == SecondValue) 3302 continue; 3303 3304 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3305 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3306 3307 // One of them is not Phi or 3308 // The first one is not Phi node from the set we'd like to match or 3309 // Phi nodes from different basic blocks then 3310 // we will not be able to match. 3311 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3312 FirstPhi->getParent() != SecondPhi->getParent()) 3313 return false; 3314 3315 // If we already matched them then continue. 3316 if (Matcher.count({ FirstPhi, SecondPhi })) 3317 continue; 3318 // So the values are different and does not match. So we need them to 3319 // match. (But we register no more than one match per PHI node, so that 3320 // we won't later try to replace them twice.) 3321 if (!MatchedPHIs.insert(FirstPhi).second) 3322 Matcher.insert({ FirstPhi, SecondPhi }); 3323 // But me must check it. 3324 WorkList.push_back({ FirstPhi, SecondPhi }); 3325 } 3326 } 3327 return true; 3328 } 3329 3330 /// For the given set of PHI nodes (in the SimplificationTracker) try 3331 /// to find their equivalents. 3332 /// Returns false if this matching fails and creation of new Phi is disabled. 3333 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3334 unsigned &PhiNotMatchedCount) { 3335 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3336 // order, so the replacements (ReplacePhi) are also done in a deterministic 3337 // order. 3338 SmallSetVector<PHIPair, 8> Matched; 3339 SmallPtrSet<PHINode *, 8> WillNotMatch; 3340 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3341 while (PhiNodesToMatch.size()) { 3342 PHINode *PHI = *PhiNodesToMatch.begin(); 3343 3344 // Add us, if no Phi nodes in the basic block we do not match. 3345 WillNotMatch.clear(); 3346 WillNotMatch.insert(PHI); 3347 3348 // Traverse all Phis until we found equivalent or fail to do that. 3349 bool IsMatched = false; 3350 for (auto &P : PHI->getParent()->phis()) { 3351 if (&P == PHI) 3352 continue; 3353 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3354 break; 3355 // If it does not match, collect all Phi nodes from matcher. 3356 // if we end up with no match, them all these Phi nodes will not match 3357 // later. 3358 for (auto M : Matched) 3359 WillNotMatch.insert(M.first); 3360 Matched.clear(); 3361 } 3362 if (IsMatched) { 3363 // Replace all matched values and erase them. 3364 for (auto MV : Matched) 3365 ST.ReplacePhi(MV.first, MV.second); 3366 Matched.clear(); 3367 continue; 3368 } 3369 // If we are not allowed to create new nodes then bail out. 3370 if (!AllowNewPhiNodes) 3371 return false; 3372 // Just remove all seen values in matcher. They will not match anything. 3373 PhiNotMatchedCount += WillNotMatch.size(); 3374 for (auto *P : WillNotMatch) 3375 PhiNodesToMatch.erase(P); 3376 } 3377 return true; 3378 } 3379 /// Fill the placeholders with values from predecessors and simplify them. 3380 void FillPlaceholders(FoldAddrToValueMapping &Map, 3381 SmallVectorImpl<Value *> &TraverseOrder, 3382 SimplificationTracker &ST) { 3383 while (!TraverseOrder.empty()) { 3384 Value *Current = TraverseOrder.pop_back_val(); 3385 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3386 Value *V = Map[Current]; 3387 3388 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3389 // CurrentValue also must be Select. 3390 auto *CurrentSelect = cast<SelectInst>(Current); 3391 auto *TrueValue = CurrentSelect->getTrueValue(); 3392 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3393 Select->setTrueValue(ST.Get(Map[TrueValue])); 3394 auto *FalseValue = CurrentSelect->getFalseValue(); 3395 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3396 Select->setFalseValue(ST.Get(Map[FalseValue])); 3397 } else { 3398 // Must be a Phi node then. 3399 PHINode *PHI = cast<PHINode>(V); 3400 auto *CurrentPhi = dyn_cast<PHINode>(Current); 3401 // Fill the Phi node with values from predecessors. 3402 for (auto B : predecessors(PHI->getParent())) { 3403 Value *PV = CurrentPhi->getIncomingValueForBlock(B); 3404 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3405 PHI->addIncoming(ST.Get(Map[PV]), B); 3406 } 3407 } 3408 Map[Current] = ST.Simplify(V); 3409 } 3410 } 3411 3412 /// Starting from original value recursively iterates over def-use chain up to 3413 /// known ending values represented in a map. For each traversed phi/select 3414 /// inserts a placeholder Phi or Select. 3415 /// Reports all new created Phi/Select nodes by adding them to set. 3416 /// Also reports and order in what values have been traversed. 3417 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3418 SmallVectorImpl<Value *> &TraverseOrder, 3419 SimplificationTracker &ST) { 3420 SmallVector<Value *, 32> Worklist; 3421 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3422 "Address must be a Phi or Select node"); 3423 auto *Dummy = UndefValue::get(CommonType); 3424 Worklist.push_back(Original); 3425 while (!Worklist.empty()) { 3426 Value *Current = Worklist.pop_back_val(); 3427 // if it is already visited or it is an ending value then skip it. 3428 if (Map.find(Current) != Map.end()) 3429 continue; 3430 TraverseOrder.push_back(Current); 3431 3432 // CurrentValue must be a Phi node or select. All others must be covered 3433 // by anchors. 3434 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3435 // Is it OK to get metadata from OrigSelect?! 3436 // Create a Select placeholder with dummy value. 3437 SelectInst *Select = SelectInst::Create( 3438 CurrentSelect->getCondition(), Dummy, Dummy, 3439 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3440 Map[Current] = Select; 3441 ST.insertNewSelect(Select); 3442 // We are interested in True and False values. 3443 Worklist.push_back(CurrentSelect->getTrueValue()); 3444 Worklist.push_back(CurrentSelect->getFalseValue()); 3445 } else { 3446 // It must be a Phi node then. 3447 PHINode *CurrentPhi = cast<PHINode>(Current); 3448 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3449 PHINode *PHI = 3450 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3451 Map[Current] = PHI; 3452 ST.insertNewPhi(PHI); 3453 for (Value *P : CurrentPhi->incoming_values()) 3454 Worklist.push_back(P); 3455 } 3456 } 3457 } 3458 3459 bool addrModeCombiningAllowed() { 3460 if (DisableComplexAddrModes) 3461 return false; 3462 switch (DifferentField) { 3463 default: 3464 return false; 3465 case ExtAddrMode::BaseRegField: 3466 return AddrSinkCombineBaseReg; 3467 case ExtAddrMode::BaseGVField: 3468 return AddrSinkCombineBaseGV; 3469 case ExtAddrMode::BaseOffsField: 3470 return AddrSinkCombineBaseOffs; 3471 case ExtAddrMode::ScaledRegField: 3472 return AddrSinkCombineScaledReg; 3473 } 3474 } 3475 }; 3476 } // end anonymous namespace 3477 3478 /// Try adding ScaleReg*Scale to the current addressing mode. 3479 /// Return true and update AddrMode if this addr mode is legal for the target, 3480 /// false if not. 3481 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3482 unsigned Depth) { 3483 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3484 // mode. Just process that directly. 3485 if (Scale == 1) 3486 return matchAddr(ScaleReg, Depth); 3487 3488 // If the scale is 0, it takes nothing to add this. 3489 if (Scale == 0) 3490 return true; 3491 3492 // If we already have a scale of this value, we can add to it, otherwise, we 3493 // need an available scale field. 3494 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3495 return false; 3496 3497 ExtAddrMode TestAddrMode = AddrMode; 3498 3499 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3500 // [A+B + A*7] -> [B+A*8]. 3501 TestAddrMode.Scale += Scale; 3502 TestAddrMode.ScaledReg = ScaleReg; 3503 3504 // If the new address isn't legal, bail out. 3505 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3506 return false; 3507 3508 // It was legal, so commit it. 3509 AddrMode = TestAddrMode; 3510 3511 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3512 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3513 // X*Scale + C*Scale to addr mode. 3514 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3515 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3516 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3517 TestAddrMode.InBounds = false; 3518 TestAddrMode.ScaledReg = AddLHS; 3519 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3520 3521 // If this addressing mode is legal, commit it and remember that we folded 3522 // this instruction. 3523 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3524 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3525 AddrMode = TestAddrMode; 3526 return true; 3527 } 3528 } 3529 3530 // Otherwise, not (x+c)*scale, just return what we have. 3531 return true; 3532 } 3533 3534 /// This is a little filter, which returns true if an addressing computation 3535 /// involving I might be folded into a load/store accessing it. 3536 /// This doesn't need to be perfect, but needs to accept at least 3537 /// the set of instructions that MatchOperationAddr can. 3538 static bool MightBeFoldableInst(Instruction *I) { 3539 switch (I->getOpcode()) { 3540 case Instruction::BitCast: 3541 case Instruction::AddrSpaceCast: 3542 // Don't touch identity bitcasts. 3543 if (I->getType() == I->getOperand(0)->getType()) 3544 return false; 3545 return I->getType()->isIntOrPtrTy(); 3546 case Instruction::PtrToInt: 3547 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3548 return true; 3549 case Instruction::IntToPtr: 3550 // We know the input is intptr_t, so this is foldable. 3551 return true; 3552 case Instruction::Add: 3553 return true; 3554 case Instruction::Mul: 3555 case Instruction::Shl: 3556 // Can only handle X*C and X << C. 3557 return isa<ConstantInt>(I->getOperand(1)); 3558 case Instruction::GetElementPtr: 3559 return true; 3560 default: 3561 return false; 3562 } 3563 } 3564 3565 /// Check whether or not \p Val is a legal instruction for \p TLI. 3566 /// \note \p Val is assumed to be the product of some type promotion. 3567 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3568 /// to be legal, as the non-promoted value would have had the same state. 3569 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3570 const DataLayout &DL, Value *Val) { 3571 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3572 if (!PromotedInst) 3573 return false; 3574 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3575 // If the ISDOpcode is undefined, it was undefined before the promotion. 3576 if (!ISDOpcode) 3577 return true; 3578 // Otherwise, check if the promoted instruction is legal or not. 3579 return TLI.isOperationLegalOrCustom( 3580 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3581 } 3582 3583 namespace { 3584 3585 /// Hepler class to perform type promotion. 3586 class TypePromotionHelper { 3587 /// Utility function to add a promoted instruction \p ExtOpnd to 3588 /// \p PromotedInsts and record the type of extension we have seen. 3589 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3590 Instruction *ExtOpnd, 3591 bool IsSExt) { 3592 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3593 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3594 if (It != PromotedInsts.end()) { 3595 // If the new extension is same as original, the information in 3596 // PromotedInsts[ExtOpnd] is still correct. 3597 if (It->second.getInt() == ExtTy) 3598 return; 3599 3600 // Now the new extension is different from old extension, we make 3601 // the type information invalid by setting extension type to 3602 // BothExtension. 3603 ExtTy = BothExtension; 3604 } 3605 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 3606 } 3607 3608 /// Utility function to query the original type of instruction \p Opnd 3609 /// with a matched extension type. If the extension doesn't match, we 3610 /// cannot use the information we had on the original type. 3611 /// BothExtension doesn't match any extension type. 3612 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 3613 Instruction *Opnd, 3614 bool IsSExt) { 3615 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3616 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3617 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 3618 return It->second.getPointer(); 3619 return nullptr; 3620 } 3621 3622 /// Utility function to check whether or not a sign or zero extension 3623 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3624 /// either using the operands of \p Inst or promoting \p Inst. 3625 /// The type of the extension is defined by \p IsSExt. 3626 /// In other words, check if: 3627 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3628 /// #1 Promotion applies: 3629 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3630 /// #2 Operand reuses: 3631 /// ext opnd1 to ConsideredExtType. 3632 /// \p PromotedInsts maps the instructions to their type before promotion. 3633 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3634 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3635 3636 /// Utility function to determine if \p OpIdx should be promoted when 3637 /// promoting \p Inst. 3638 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3639 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3640 } 3641 3642 /// Utility function to promote the operand of \p Ext when this 3643 /// operand is a promotable trunc or sext or zext. 3644 /// \p PromotedInsts maps the instructions to their type before promotion. 3645 /// \p CreatedInstsCost[out] contains the cost of all instructions 3646 /// created to promote the operand of Ext. 3647 /// Newly added extensions are inserted in \p Exts. 3648 /// Newly added truncates are inserted in \p Truncs. 3649 /// Should never be called directly. 3650 /// \return The promoted value which is used instead of Ext. 3651 static Value *promoteOperandForTruncAndAnyExt( 3652 Instruction *Ext, TypePromotionTransaction &TPT, 3653 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3654 SmallVectorImpl<Instruction *> *Exts, 3655 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3656 3657 /// Utility function to promote the operand of \p Ext when this 3658 /// operand is promotable and is not a supported trunc or sext. 3659 /// \p PromotedInsts maps the instructions to their type before promotion. 3660 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3661 /// created to promote the operand of Ext. 3662 /// Newly added extensions are inserted in \p Exts. 3663 /// Newly added truncates are inserted in \p Truncs. 3664 /// Should never be called directly. 3665 /// \return The promoted value which is used instead of Ext. 3666 static Value *promoteOperandForOther(Instruction *Ext, 3667 TypePromotionTransaction &TPT, 3668 InstrToOrigTy &PromotedInsts, 3669 unsigned &CreatedInstsCost, 3670 SmallVectorImpl<Instruction *> *Exts, 3671 SmallVectorImpl<Instruction *> *Truncs, 3672 const TargetLowering &TLI, bool IsSExt); 3673 3674 /// \see promoteOperandForOther. 3675 static Value *signExtendOperandForOther( 3676 Instruction *Ext, TypePromotionTransaction &TPT, 3677 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3678 SmallVectorImpl<Instruction *> *Exts, 3679 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3680 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3681 Exts, Truncs, TLI, true); 3682 } 3683 3684 /// \see promoteOperandForOther. 3685 static Value *zeroExtendOperandForOther( 3686 Instruction *Ext, TypePromotionTransaction &TPT, 3687 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3688 SmallVectorImpl<Instruction *> *Exts, 3689 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3690 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3691 Exts, Truncs, TLI, false); 3692 } 3693 3694 public: 3695 /// Type for the utility function that promotes the operand of Ext. 3696 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3697 InstrToOrigTy &PromotedInsts, 3698 unsigned &CreatedInstsCost, 3699 SmallVectorImpl<Instruction *> *Exts, 3700 SmallVectorImpl<Instruction *> *Truncs, 3701 const TargetLowering &TLI); 3702 3703 /// Given a sign/zero extend instruction \p Ext, return the appropriate 3704 /// action to promote the operand of \p Ext instead of using Ext. 3705 /// \return NULL if no promotable action is possible with the current 3706 /// sign extension. 3707 /// \p InsertedInsts keeps track of all the instructions inserted by the 3708 /// other CodeGenPrepare optimizations. This information is important 3709 /// because we do not want to promote these instructions as CodeGenPrepare 3710 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3711 /// \p PromotedInsts maps the instructions to their type before promotion. 3712 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3713 const TargetLowering &TLI, 3714 const InstrToOrigTy &PromotedInsts); 3715 }; 3716 3717 } // end anonymous namespace 3718 3719 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3720 Type *ConsideredExtType, 3721 const InstrToOrigTy &PromotedInsts, 3722 bool IsSExt) { 3723 // The promotion helper does not know how to deal with vector types yet. 3724 // To be able to fix that, we would need to fix the places where we 3725 // statically extend, e.g., constants and such. 3726 if (Inst->getType()->isVectorTy()) 3727 return false; 3728 3729 // We can always get through zext. 3730 if (isa<ZExtInst>(Inst)) 3731 return true; 3732 3733 // sext(sext) is ok too. 3734 if (IsSExt && isa<SExtInst>(Inst)) 3735 return true; 3736 3737 // We can get through binary operator, if it is legal. In other words, the 3738 // binary operator must have a nuw or nsw flag. 3739 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3740 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3741 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3742 (IsSExt && BinOp->hasNoSignedWrap()))) 3743 return true; 3744 3745 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 3746 if ((Inst->getOpcode() == Instruction::And || 3747 Inst->getOpcode() == Instruction::Or)) 3748 return true; 3749 3750 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 3751 if (Inst->getOpcode() == Instruction::Xor) { 3752 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 3753 // Make sure it is not a NOT. 3754 if (Cst && !Cst->getValue().isAllOnesValue()) 3755 return true; 3756 } 3757 3758 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 3759 // It may change a poisoned value into a regular value, like 3760 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 3761 // poisoned value regular value 3762 // It should be OK since undef covers valid value. 3763 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 3764 return true; 3765 3766 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 3767 // It may change a poisoned value into a regular value, like 3768 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 3769 // poisoned value regular value 3770 // It should be OK since undef covers valid value. 3771 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 3772 const Instruction *ExtInst = 3773 dyn_cast<const Instruction>(*Inst->user_begin()); 3774 if (ExtInst->hasOneUse()) { 3775 const Instruction *AndInst = 3776 dyn_cast<const Instruction>(*ExtInst->user_begin()); 3777 if (AndInst && AndInst->getOpcode() == Instruction::And) { 3778 const ConstantInt *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 3779 if (Cst && 3780 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 3781 return true; 3782 } 3783 } 3784 } 3785 3786 // Check if we can do the following simplification. 3787 // ext(trunc(opnd)) --> ext(opnd) 3788 if (!isa<TruncInst>(Inst)) 3789 return false; 3790 3791 Value *OpndVal = Inst->getOperand(0); 3792 // Check if we can use this operand in the extension. 3793 // If the type is larger than the result type of the extension, we cannot. 3794 if (!OpndVal->getType()->isIntegerTy() || 3795 OpndVal->getType()->getIntegerBitWidth() > 3796 ConsideredExtType->getIntegerBitWidth()) 3797 return false; 3798 3799 // If the operand of the truncate is not an instruction, we will not have 3800 // any information on the dropped bits. 3801 // (Actually we could for constant but it is not worth the extra logic). 3802 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3803 if (!Opnd) 3804 return false; 3805 3806 // Check if the source of the type is narrow enough. 3807 // I.e., check that trunc just drops extended bits of the same kind of 3808 // the extension. 3809 // #1 get the type of the operand and check the kind of the extended bits. 3810 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 3811 if (OpndType) 3812 ; 3813 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3814 OpndType = Opnd->getOperand(0)->getType(); 3815 else 3816 return false; 3817 3818 // #2 check that the truncate just drops extended bits. 3819 return Inst->getType()->getIntegerBitWidth() >= 3820 OpndType->getIntegerBitWidth(); 3821 } 3822 3823 TypePromotionHelper::Action TypePromotionHelper::getAction( 3824 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3825 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3826 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3827 "Unexpected instruction type"); 3828 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3829 Type *ExtTy = Ext->getType(); 3830 bool IsSExt = isa<SExtInst>(Ext); 3831 // If the operand of the extension is not an instruction, we cannot 3832 // get through. 3833 // If it, check we can get through. 3834 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3835 return nullptr; 3836 3837 // Do not promote if the operand has been added by codegenprepare. 3838 // Otherwise, it means we are undoing an optimization that is likely to be 3839 // redone, thus causing potential infinite loop. 3840 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3841 return nullptr; 3842 3843 // SExt or Trunc instructions. 3844 // Return the related handler. 3845 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3846 isa<ZExtInst>(ExtOpnd)) 3847 return promoteOperandForTruncAndAnyExt; 3848 3849 // Regular instruction. 3850 // Abort early if we will have to insert non-free instructions. 3851 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3852 return nullptr; 3853 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3854 } 3855 3856 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3857 Instruction *SExt, TypePromotionTransaction &TPT, 3858 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3859 SmallVectorImpl<Instruction *> *Exts, 3860 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3861 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3862 // get through it and this method should not be called. 3863 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3864 Value *ExtVal = SExt; 3865 bool HasMergedNonFreeExt = false; 3866 if (isa<ZExtInst>(SExtOpnd)) { 3867 // Replace s|zext(zext(opnd)) 3868 // => zext(opnd). 3869 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3870 Value *ZExt = 3871 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3872 TPT.replaceAllUsesWith(SExt, ZExt); 3873 TPT.eraseInstruction(SExt); 3874 ExtVal = ZExt; 3875 } else { 3876 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3877 // => z|sext(opnd). 3878 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3879 } 3880 CreatedInstsCost = 0; 3881 3882 // Remove dead code. 3883 if (SExtOpnd->use_empty()) 3884 TPT.eraseInstruction(SExtOpnd); 3885 3886 // Check if the extension is still needed. 3887 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3888 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3889 if (ExtInst) { 3890 if (Exts) 3891 Exts->push_back(ExtInst); 3892 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3893 } 3894 return ExtVal; 3895 } 3896 3897 // At this point we have: ext ty opnd to ty. 3898 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3899 Value *NextVal = ExtInst->getOperand(0); 3900 TPT.eraseInstruction(ExtInst, NextVal); 3901 return NextVal; 3902 } 3903 3904 Value *TypePromotionHelper::promoteOperandForOther( 3905 Instruction *Ext, TypePromotionTransaction &TPT, 3906 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3907 SmallVectorImpl<Instruction *> *Exts, 3908 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3909 bool IsSExt) { 3910 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3911 // get through it and this method should not be called. 3912 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3913 CreatedInstsCost = 0; 3914 if (!ExtOpnd->hasOneUse()) { 3915 // ExtOpnd will be promoted. 3916 // All its uses, but Ext, will need to use a truncated value of the 3917 // promoted version. 3918 // Create the truncate now. 3919 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3920 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3921 // Insert it just after the definition. 3922 ITrunc->moveAfter(ExtOpnd); 3923 if (Truncs) 3924 Truncs->push_back(ITrunc); 3925 } 3926 3927 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3928 // Restore the operand of Ext (which has been replaced by the previous call 3929 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3930 TPT.setOperand(Ext, 0, ExtOpnd); 3931 } 3932 3933 // Get through the Instruction: 3934 // 1. Update its type. 3935 // 2. Replace the uses of Ext by Inst. 3936 // 3. Extend each operand that needs to be extended. 3937 3938 // Remember the original type of the instruction before promotion. 3939 // This is useful to know that the high bits are sign extended bits. 3940 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 3941 // Step #1. 3942 TPT.mutateType(ExtOpnd, Ext->getType()); 3943 // Step #2. 3944 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3945 // Step #3. 3946 Instruction *ExtForOpnd = Ext; 3947 3948 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 3949 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3950 ++OpIdx) { 3951 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3952 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3953 !shouldExtOperand(ExtOpnd, OpIdx)) { 3954 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 3955 continue; 3956 } 3957 // Check if we can statically extend the operand. 3958 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3959 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3960 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3961 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3962 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3963 : Cst->getValue().zext(BitWidth); 3964 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3965 continue; 3966 } 3967 // UndefValue are typed, so we have to statically sign extend them. 3968 if (isa<UndefValue>(Opnd)) { 3969 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3970 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3971 continue; 3972 } 3973 3974 // Otherwise we have to explicitly sign extend the operand. 3975 // Check if Ext was reused to extend an operand. 3976 if (!ExtForOpnd) { 3977 // If yes, create a new one. 3978 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 3979 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3980 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3981 if (!isa<Instruction>(ValForExtOpnd)) { 3982 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3983 continue; 3984 } 3985 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3986 } 3987 if (Exts) 3988 Exts->push_back(ExtForOpnd); 3989 TPT.setOperand(ExtForOpnd, 0, Opnd); 3990 3991 // Move the sign extension before the insertion point. 3992 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3993 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3994 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3995 // If more sext are required, new instructions will have to be created. 3996 ExtForOpnd = nullptr; 3997 } 3998 if (ExtForOpnd == Ext) { 3999 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 4000 TPT.eraseInstruction(Ext); 4001 } 4002 return ExtOpnd; 4003 } 4004 4005 /// Check whether or not promoting an instruction to a wider type is profitable. 4006 /// \p NewCost gives the cost of extension instructions created by the 4007 /// promotion. 4008 /// \p OldCost gives the cost of extension instructions before the promotion 4009 /// plus the number of instructions that have been 4010 /// matched in the addressing mode the promotion. 4011 /// \p PromotedOperand is the value that has been promoted. 4012 /// \return True if the promotion is profitable, false otherwise. 4013 bool AddressingModeMatcher::isPromotionProfitable( 4014 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 4015 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 4016 << '\n'); 4017 // The cost of the new extensions is greater than the cost of the 4018 // old extension plus what we folded. 4019 // This is not profitable. 4020 if (NewCost > OldCost) 4021 return false; 4022 if (NewCost < OldCost) 4023 return true; 4024 // The promotion is neutral but it may help folding the sign extension in 4025 // loads for instance. 4026 // Check that we did not create an illegal instruction. 4027 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 4028 } 4029 4030 /// Given an instruction or constant expr, see if we can fold the operation 4031 /// into the addressing mode. If so, update the addressing mode and return 4032 /// true, otherwise return false without modifying AddrMode. 4033 /// If \p MovedAway is not NULL, it contains the information of whether or 4034 /// not AddrInst has to be folded into the addressing mode on success. 4035 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 4036 /// because it has been moved away. 4037 /// Thus AddrInst must not be added in the matched instructions. 4038 /// This state can happen when AddrInst is a sext, since it may be moved away. 4039 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 4040 /// not be referenced anymore. 4041 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 4042 unsigned Depth, 4043 bool *MovedAway) { 4044 // Avoid exponential behavior on extremely deep expression trees. 4045 if (Depth >= 5) return false; 4046 4047 // By default, all matched instructions stay in place. 4048 if (MovedAway) 4049 *MovedAway = false; 4050 4051 switch (Opcode) { 4052 case Instruction::PtrToInt: 4053 // PtrToInt is always a noop, as we know that the int type is pointer sized. 4054 return matchAddr(AddrInst->getOperand(0), Depth); 4055 case Instruction::IntToPtr: { 4056 auto AS = AddrInst->getType()->getPointerAddressSpace(); 4057 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 4058 // This inttoptr is a no-op if the integer type is pointer sized. 4059 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 4060 return matchAddr(AddrInst->getOperand(0), Depth); 4061 return false; 4062 } 4063 case Instruction::BitCast: 4064 // BitCast is always a noop, and we can handle it as long as it is 4065 // int->int or pointer->pointer (we don't want int<->fp or something). 4066 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 4067 // Don't touch identity bitcasts. These were probably put here by LSR, 4068 // and we don't want to mess around with them. Assume it knows what it 4069 // is doing. 4070 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4071 return matchAddr(AddrInst->getOperand(0), Depth); 4072 return false; 4073 case Instruction::AddrSpaceCast: { 4074 unsigned SrcAS 4075 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4076 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4077 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 4078 return matchAddr(AddrInst->getOperand(0), Depth); 4079 return false; 4080 } 4081 case Instruction::Add: { 4082 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4083 ExtAddrMode BackupAddrMode = AddrMode; 4084 unsigned OldSize = AddrModeInsts.size(); 4085 // Start a transaction at this point. 4086 // The LHS may match but not the RHS. 4087 // Therefore, we need a higher level restoration point to undo partially 4088 // matched operation. 4089 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4090 TPT.getRestorationPoint(); 4091 4092 AddrMode.InBounds = false; 4093 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4094 matchAddr(AddrInst->getOperand(0), Depth+1)) 4095 return true; 4096 4097 // Restore the old addr mode info. 4098 AddrMode = BackupAddrMode; 4099 AddrModeInsts.resize(OldSize); 4100 TPT.rollback(LastKnownGood); 4101 4102 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4103 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4104 matchAddr(AddrInst->getOperand(1), Depth+1)) 4105 return true; 4106 4107 // Otherwise we definitely can't merge the ADD in. 4108 AddrMode = BackupAddrMode; 4109 AddrModeInsts.resize(OldSize); 4110 TPT.rollback(LastKnownGood); 4111 break; 4112 } 4113 //case Instruction::Or: 4114 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4115 //break; 4116 case Instruction::Mul: 4117 case Instruction::Shl: { 4118 // Can only handle X*C and X << C. 4119 AddrMode.InBounds = false; 4120 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4121 if (!RHS || RHS->getBitWidth() > 64) 4122 return false; 4123 int64_t Scale = RHS->getSExtValue(); 4124 if (Opcode == Instruction::Shl) 4125 Scale = 1LL << Scale; 4126 4127 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4128 } 4129 case Instruction::GetElementPtr: { 4130 // Scan the GEP. We check it if it contains constant offsets and at most 4131 // one variable offset. 4132 int VariableOperand = -1; 4133 unsigned VariableScale = 0; 4134 4135 int64_t ConstantOffset = 0; 4136 gep_type_iterator GTI = gep_type_begin(AddrInst); 4137 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4138 if (StructType *STy = GTI.getStructTypeOrNull()) { 4139 const StructLayout *SL = DL.getStructLayout(STy); 4140 unsigned Idx = 4141 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4142 ConstantOffset += SL->getElementOffset(Idx); 4143 } else { 4144 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 4145 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4146 const APInt &CVal = CI->getValue(); 4147 if (CVal.getMinSignedBits() <= 64) { 4148 ConstantOffset += CVal.getSExtValue() * TypeSize; 4149 continue; 4150 } 4151 } 4152 if (TypeSize) { // Scales of zero don't do anything. 4153 // We only allow one variable index at the moment. 4154 if (VariableOperand != -1) 4155 return false; 4156 4157 // Remember the variable index. 4158 VariableOperand = i; 4159 VariableScale = TypeSize; 4160 } 4161 } 4162 } 4163 4164 // A common case is for the GEP to only do a constant offset. In this case, 4165 // just add it to the disp field and check validity. 4166 if (VariableOperand == -1) { 4167 AddrMode.BaseOffs += ConstantOffset; 4168 if (ConstantOffset == 0 || 4169 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4170 // Check to see if we can fold the base pointer in too. 4171 if (matchAddr(AddrInst->getOperand(0), Depth+1)) { 4172 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4173 AddrMode.InBounds = false; 4174 return true; 4175 } 4176 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4177 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4178 ConstantOffset > 0) { 4179 // Record GEPs with non-zero offsets as candidates for splitting in the 4180 // event that the offset cannot fit into the r+i addressing mode. 4181 // Simple and common case that only one GEP is used in calculating the 4182 // address for the memory access. 4183 Value *Base = AddrInst->getOperand(0); 4184 auto *BaseI = dyn_cast<Instruction>(Base); 4185 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4186 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4187 (BaseI && !isa<CastInst>(BaseI) && 4188 !isa<GetElementPtrInst>(BaseI))) { 4189 // If the base is an instruction, make sure the GEP is not in the same 4190 // basic block as the base. If the base is an argument or global 4191 // value, make sure the GEP is not in the entry block. Otherwise, 4192 // instruction selection can undo the split. Also make sure the 4193 // parent block allows inserting non-PHI instructions before the 4194 // terminator. 4195 BasicBlock *Parent = 4196 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4197 if (GEP->getParent() != Parent && !Parent->getTerminator()->isEHPad()) 4198 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4199 } 4200 } 4201 AddrMode.BaseOffs -= ConstantOffset; 4202 return false; 4203 } 4204 4205 // Save the valid addressing mode in case we can't match. 4206 ExtAddrMode BackupAddrMode = AddrMode; 4207 unsigned OldSize = AddrModeInsts.size(); 4208 4209 // See if the scale and offset amount is valid for this target. 4210 AddrMode.BaseOffs += ConstantOffset; 4211 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4212 AddrMode.InBounds = false; 4213 4214 // Match the base operand of the GEP. 4215 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4216 // If it couldn't be matched, just stuff the value in a register. 4217 if (AddrMode.HasBaseReg) { 4218 AddrMode = BackupAddrMode; 4219 AddrModeInsts.resize(OldSize); 4220 return false; 4221 } 4222 AddrMode.HasBaseReg = true; 4223 AddrMode.BaseReg = AddrInst->getOperand(0); 4224 } 4225 4226 // Match the remaining variable portion of the GEP. 4227 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4228 Depth)) { 4229 // If it couldn't be matched, try stuffing the base into a register 4230 // instead of matching it, and retrying the match of the scale. 4231 AddrMode = BackupAddrMode; 4232 AddrModeInsts.resize(OldSize); 4233 if (AddrMode.HasBaseReg) 4234 return false; 4235 AddrMode.HasBaseReg = true; 4236 AddrMode.BaseReg = AddrInst->getOperand(0); 4237 AddrMode.BaseOffs += ConstantOffset; 4238 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4239 VariableScale, Depth)) { 4240 // If even that didn't work, bail. 4241 AddrMode = BackupAddrMode; 4242 AddrModeInsts.resize(OldSize); 4243 return false; 4244 } 4245 } 4246 4247 return true; 4248 } 4249 case Instruction::SExt: 4250 case Instruction::ZExt: { 4251 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4252 if (!Ext) 4253 return false; 4254 4255 // Try to move this ext out of the way of the addressing mode. 4256 // Ask for a method for doing so. 4257 TypePromotionHelper::Action TPH = 4258 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4259 if (!TPH) 4260 return false; 4261 4262 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4263 TPT.getRestorationPoint(); 4264 unsigned CreatedInstsCost = 0; 4265 unsigned ExtCost = !TLI.isExtFree(Ext); 4266 Value *PromotedOperand = 4267 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4268 // SExt has been moved away. 4269 // Thus either it will be rematched later in the recursive calls or it is 4270 // gone. Anyway, we must not fold it into the addressing mode at this point. 4271 // E.g., 4272 // op = add opnd, 1 4273 // idx = ext op 4274 // addr = gep base, idx 4275 // is now: 4276 // promotedOpnd = ext opnd <- no match here 4277 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4278 // addr = gep base, op <- match 4279 if (MovedAway) 4280 *MovedAway = true; 4281 4282 assert(PromotedOperand && 4283 "TypePromotionHelper should have filtered out those cases"); 4284 4285 ExtAddrMode BackupAddrMode = AddrMode; 4286 unsigned OldSize = AddrModeInsts.size(); 4287 4288 if (!matchAddr(PromotedOperand, Depth) || 4289 // The total of the new cost is equal to the cost of the created 4290 // instructions. 4291 // The total of the old cost is equal to the cost of the extension plus 4292 // what we have saved in the addressing mode. 4293 !isPromotionProfitable(CreatedInstsCost, 4294 ExtCost + (AddrModeInsts.size() - OldSize), 4295 PromotedOperand)) { 4296 AddrMode = BackupAddrMode; 4297 AddrModeInsts.resize(OldSize); 4298 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4299 TPT.rollback(LastKnownGood); 4300 return false; 4301 } 4302 return true; 4303 } 4304 } 4305 return false; 4306 } 4307 4308 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4309 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4310 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4311 /// for the target. 4312 /// 4313 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4314 // Start a transaction at this point that we will rollback if the matching 4315 // fails. 4316 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4317 TPT.getRestorationPoint(); 4318 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4319 // Fold in immediates if legal for the target. 4320 AddrMode.BaseOffs += CI->getSExtValue(); 4321 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4322 return true; 4323 AddrMode.BaseOffs -= CI->getSExtValue(); 4324 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4325 // If this is a global variable, try to fold it into the addressing mode. 4326 if (!AddrMode.BaseGV) { 4327 AddrMode.BaseGV = GV; 4328 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4329 return true; 4330 AddrMode.BaseGV = nullptr; 4331 } 4332 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4333 ExtAddrMode BackupAddrMode = AddrMode; 4334 unsigned OldSize = AddrModeInsts.size(); 4335 4336 // Check to see if it is possible to fold this operation. 4337 bool MovedAway = false; 4338 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4339 // This instruction may have been moved away. If so, there is nothing 4340 // to check here. 4341 if (MovedAway) 4342 return true; 4343 // Okay, it's possible to fold this. Check to see if it is actually 4344 // *profitable* to do so. We use a simple cost model to avoid increasing 4345 // register pressure too much. 4346 if (I->hasOneUse() || 4347 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4348 AddrModeInsts.push_back(I); 4349 return true; 4350 } 4351 4352 // It isn't profitable to do this, roll back. 4353 //cerr << "NOT FOLDING: " << *I; 4354 AddrMode = BackupAddrMode; 4355 AddrModeInsts.resize(OldSize); 4356 TPT.rollback(LastKnownGood); 4357 } 4358 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4359 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4360 return true; 4361 TPT.rollback(LastKnownGood); 4362 } else if (isa<ConstantPointerNull>(Addr)) { 4363 // Null pointer gets folded without affecting the addressing mode. 4364 return true; 4365 } 4366 4367 // Worse case, the target should support [reg] addressing modes. :) 4368 if (!AddrMode.HasBaseReg) { 4369 AddrMode.HasBaseReg = true; 4370 AddrMode.BaseReg = Addr; 4371 // Still check for legality in case the target supports [imm] but not [i+r]. 4372 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4373 return true; 4374 AddrMode.HasBaseReg = false; 4375 AddrMode.BaseReg = nullptr; 4376 } 4377 4378 // If the base register is already taken, see if we can do [r+r]. 4379 if (AddrMode.Scale == 0) { 4380 AddrMode.Scale = 1; 4381 AddrMode.ScaledReg = Addr; 4382 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4383 return true; 4384 AddrMode.Scale = 0; 4385 AddrMode.ScaledReg = nullptr; 4386 } 4387 // Couldn't match. 4388 TPT.rollback(LastKnownGood); 4389 return false; 4390 } 4391 4392 /// Check to see if all uses of OpVal by the specified inline asm call are due 4393 /// to memory operands. If so, return true, otherwise return false. 4394 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4395 const TargetLowering &TLI, 4396 const TargetRegisterInfo &TRI) { 4397 const Function *F = CI->getFunction(); 4398 TargetLowering::AsmOperandInfoVector TargetConstraints = 4399 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 4400 ImmutableCallSite(CI)); 4401 4402 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4403 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4404 4405 // Compute the constraint code and ConstraintType to use. 4406 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4407 4408 // If this asm operand is our Value*, and if it isn't an indirect memory 4409 // operand, we can't fold it! 4410 if (OpInfo.CallOperandVal == OpVal && 4411 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4412 !OpInfo.isIndirect)) 4413 return false; 4414 } 4415 4416 return true; 4417 } 4418 4419 // Max number of memory uses to look at before aborting the search to conserve 4420 // compile time. 4421 static constexpr int MaxMemoryUsesToScan = 20; 4422 4423 /// Recursively walk all the uses of I until we find a memory use. 4424 /// If we find an obviously non-foldable instruction, return true. 4425 /// Add the ultimately found memory instructions to MemoryUses. 4426 static bool FindAllMemoryUses( 4427 Instruction *I, 4428 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4429 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4430 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 4431 // If we already considered this instruction, we're done. 4432 if (!ConsideredInsts.insert(I).second) 4433 return false; 4434 4435 // If this is an obviously unfoldable instruction, bail out. 4436 if (!MightBeFoldableInst(I)) 4437 return true; 4438 4439 const bool OptSize = I->getFunction()->optForSize(); 4440 4441 // Loop over all the uses, recursively processing them. 4442 for (Use &U : I->uses()) { 4443 // Conservatively return true if we're seeing a large number or a deep chain 4444 // of users. This avoids excessive compilation times in pathological cases. 4445 if (SeenInsts++ >= MaxMemoryUsesToScan) 4446 return true; 4447 4448 Instruction *UserI = cast<Instruction>(U.getUser()); 4449 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4450 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4451 continue; 4452 } 4453 4454 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4455 unsigned opNo = U.getOperandNo(); 4456 if (opNo != StoreInst::getPointerOperandIndex()) 4457 return true; // Storing addr, not into addr. 4458 MemoryUses.push_back(std::make_pair(SI, opNo)); 4459 continue; 4460 } 4461 4462 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4463 unsigned opNo = U.getOperandNo(); 4464 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4465 return true; // Storing addr, not into addr. 4466 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4467 continue; 4468 } 4469 4470 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4471 unsigned opNo = U.getOperandNo(); 4472 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4473 return true; // Storing addr, not into addr. 4474 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4475 continue; 4476 } 4477 4478 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4479 // If this is a cold call, we can sink the addressing calculation into 4480 // the cold path. See optimizeCallInst 4481 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4482 continue; 4483 4484 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4485 if (!IA) return true; 4486 4487 // If this is a memory operand, we're cool, otherwise bail out. 4488 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4489 return true; 4490 continue; 4491 } 4492 4493 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4494 SeenInsts)) 4495 return true; 4496 } 4497 4498 return false; 4499 } 4500 4501 /// Return true if Val is already known to be live at the use site that we're 4502 /// folding it into. If so, there is no cost to include it in the addressing 4503 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4504 /// instruction already. 4505 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4506 Value *KnownLive2) { 4507 // If Val is either of the known-live values, we know it is live! 4508 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4509 return true; 4510 4511 // All values other than instructions and arguments (e.g. constants) are live. 4512 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4513 4514 // If Val is a constant sized alloca in the entry block, it is live, this is 4515 // true because it is just a reference to the stack/frame pointer, which is 4516 // live for the whole function. 4517 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4518 if (AI->isStaticAlloca()) 4519 return true; 4520 4521 // Check to see if this value is already used in the memory instruction's 4522 // block. If so, it's already live into the block at the very least, so we 4523 // can reasonably fold it. 4524 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4525 } 4526 4527 /// It is possible for the addressing mode of the machine to fold the specified 4528 /// instruction into a load or store that ultimately uses it. 4529 /// However, the specified instruction has multiple uses. 4530 /// Given this, it may actually increase register pressure to fold it 4531 /// into the load. For example, consider this code: 4532 /// 4533 /// X = ... 4534 /// Y = X+1 4535 /// use(Y) -> nonload/store 4536 /// Z = Y+1 4537 /// load Z 4538 /// 4539 /// In this case, Y has multiple uses, and can be folded into the load of Z 4540 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4541 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4542 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4543 /// number of computations either. 4544 /// 4545 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4546 /// X was live across 'load Z' for other reasons, we actually *would* want to 4547 /// fold the addressing mode in the Z case. This would make Y die earlier. 4548 bool AddressingModeMatcher:: 4549 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4550 ExtAddrMode &AMAfter) { 4551 if (IgnoreProfitability) return true; 4552 4553 // AMBefore is the addressing mode before this instruction was folded into it, 4554 // and AMAfter is the addressing mode after the instruction was folded. Get 4555 // the set of registers referenced by AMAfter and subtract out those 4556 // referenced by AMBefore: this is the set of values which folding in this 4557 // address extends the lifetime of. 4558 // 4559 // Note that there are only two potential values being referenced here, 4560 // BaseReg and ScaleReg (global addresses are always available, as are any 4561 // folded immediates). 4562 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4563 4564 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4565 // lifetime wasn't extended by adding this instruction. 4566 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4567 BaseReg = nullptr; 4568 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4569 ScaledReg = nullptr; 4570 4571 // If folding this instruction (and it's subexprs) didn't extend any live 4572 // ranges, we're ok with it. 4573 if (!BaseReg && !ScaledReg) 4574 return true; 4575 4576 // If all uses of this instruction can have the address mode sunk into them, 4577 // we can remove the addressing mode and effectively trade one live register 4578 // for another (at worst.) In this context, folding an addressing mode into 4579 // the use is just a particularly nice way of sinking it. 4580 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4581 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4582 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4583 return false; // Has a non-memory, non-foldable use! 4584 4585 // Now that we know that all uses of this instruction are part of a chain of 4586 // computation involving only operations that could theoretically be folded 4587 // into a memory use, loop over each of these memory operation uses and see 4588 // if they could *actually* fold the instruction. The assumption is that 4589 // addressing modes are cheap and that duplicating the computation involved 4590 // many times is worthwhile, even on a fastpath. For sinking candidates 4591 // (i.e. cold call sites), this serves as a way to prevent excessive code 4592 // growth since most architectures have some reasonable small and fast way to 4593 // compute an effective address. (i.e LEA on x86) 4594 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4595 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4596 Instruction *User = MemoryUses[i].first; 4597 unsigned OpNo = MemoryUses[i].second; 4598 4599 // Get the access type of this use. If the use isn't a pointer, we don't 4600 // know what it accesses. 4601 Value *Address = User->getOperand(OpNo); 4602 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4603 if (!AddrTy) 4604 return false; 4605 Type *AddressAccessTy = AddrTy->getElementType(); 4606 unsigned AS = AddrTy->getAddressSpace(); 4607 4608 // Do a match against the root of this address, ignoring profitability. This 4609 // will tell us if the addressing mode for the memory operation will 4610 // *actually* cover the shared instruction. 4611 ExtAddrMode Result; 4612 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4613 0); 4614 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4615 TPT.getRestorationPoint(); 4616 AddressingModeMatcher Matcher( 4617 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, 4618 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4619 Matcher.IgnoreProfitability = true; 4620 bool Success = Matcher.matchAddr(Address, 0); 4621 (void)Success; assert(Success && "Couldn't select *anything*?"); 4622 4623 // The match was to check the profitability, the changes made are not 4624 // part of the original matcher. Therefore, they should be dropped 4625 // otherwise the original matcher will not present the right state. 4626 TPT.rollback(LastKnownGood); 4627 4628 // If the match didn't cover I, then it won't be shared by it. 4629 if (!is_contained(MatchedAddrModeInsts, I)) 4630 return false; 4631 4632 MatchedAddrModeInsts.clear(); 4633 } 4634 4635 return true; 4636 } 4637 4638 /// Return true if the specified values are defined in a 4639 /// different basic block than BB. 4640 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4641 if (Instruction *I = dyn_cast<Instruction>(V)) 4642 return I->getParent() != BB; 4643 return false; 4644 } 4645 4646 /// Sink addressing mode computation immediate before MemoryInst if doing so 4647 /// can be done without increasing register pressure. The need for the 4648 /// register pressure constraint means this can end up being an all or nothing 4649 /// decision for all uses of the same addressing computation. 4650 /// 4651 /// Load and Store Instructions often have addressing modes that can do 4652 /// significant amounts of computation. As such, instruction selection will try 4653 /// to get the load or store to do as much computation as possible for the 4654 /// program. The problem is that isel can only see within a single block. As 4655 /// such, we sink as much legal addressing mode work into the block as possible. 4656 /// 4657 /// This method is used to optimize both load/store and inline asms with memory 4658 /// operands. It's also used to sink addressing computations feeding into cold 4659 /// call sites into their (cold) basic block. 4660 /// 4661 /// The motivation for handling sinking into cold blocks is that doing so can 4662 /// both enable other address mode sinking (by satisfying the register pressure 4663 /// constraint above), and reduce register pressure globally (by removing the 4664 /// addressing mode computation from the fast path entirely.). 4665 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4666 Type *AccessTy, unsigned AddrSpace) { 4667 Value *Repl = Addr; 4668 4669 // Try to collapse single-value PHI nodes. This is necessary to undo 4670 // unprofitable PRE transformations. 4671 SmallVector<Value*, 8> worklist; 4672 SmallPtrSet<Value*, 16> Visited; 4673 worklist.push_back(Addr); 4674 4675 // Use a worklist to iteratively look through PHI and select nodes, and 4676 // ensure that the addressing mode obtained from the non-PHI/select roots of 4677 // the graph are compatible. 4678 bool PhiOrSelectSeen = false; 4679 SmallVector<Instruction*, 16> AddrModeInsts; 4680 const SimplifyQuery SQ(*DL, TLInfo); 4681 AddressingModeCombiner AddrModes(SQ, Addr); 4682 TypePromotionTransaction TPT(RemovedInsts); 4683 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4684 TPT.getRestorationPoint(); 4685 while (!worklist.empty()) { 4686 Value *V = worklist.back(); 4687 worklist.pop_back(); 4688 4689 // We allow traversing cyclic Phi nodes. 4690 // In case of success after this loop we ensure that traversing through 4691 // Phi nodes ends up with all cases to compute address of the form 4692 // BaseGV + Base + Scale * Index + Offset 4693 // where Scale and Offset are constans and BaseGV, Base and Index 4694 // are exactly the same Values in all cases. 4695 // It means that BaseGV, Scale and Offset dominate our memory instruction 4696 // and have the same value as they had in address computation represented 4697 // as Phi. So we can safely sink address computation to memory instruction. 4698 if (!Visited.insert(V).second) 4699 continue; 4700 4701 // For a PHI node, push all of its incoming values. 4702 if (PHINode *P = dyn_cast<PHINode>(V)) { 4703 for (Value *IncValue : P->incoming_values()) 4704 worklist.push_back(IncValue); 4705 PhiOrSelectSeen = true; 4706 continue; 4707 } 4708 // Similar for select. 4709 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4710 worklist.push_back(SI->getFalseValue()); 4711 worklist.push_back(SI->getTrueValue()); 4712 PhiOrSelectSeen = true; 4713 continue; 4714 } 4715 4716 // For non-PHIs, determine the addressing mode being computed. Note that 4717 // the result may differ depending on what other uses our candidate 4718 // addressing instructions might have. 4719 AddrModeInsts.clear(); 4720 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4721 0); 4722 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4723 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4724 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4725 4726 GetElementPtrInst *GEP = LargeOffsetGEP.first; 4727 if (GEP && GEP->getParent() != MemoryInst->getParent() && 4728 !NewGEPBases.count(GEP)) { 4729 // If splitting the underlying data structure can reduce the offset of a 4730 // GEP, collect the GEP. Skip the GEPs that are the new bases of 4731 // previously split data structures. 4732 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 4733 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 4734 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 4735 } 4736 4737 NewAddrMode.OriginalValue = V; 4738 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4739 break; 4740 } 4741 4742 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4743 // or we have multiple but either couldn't combine them or combining them 4744 // wouldn't do anything useful, bail out now. 4745 if (!AddrModes.combineAddrModes()) { 4746 TPT.rollback(LastKnownGood); 4747 return false; 4748 } 4749 TPT.commit(); 4750 4751 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4752 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4753 4754 // If all the instructions matched are already in this BB, don't do anything. 4755 // If we saw a Phi node then it is not local definitely, and if we saw a select 4756 // then we want to push the address calculation past it even if it's already 4757 // in this BB. 4758 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4759 return IsNonLocalValue(V, MemoryInst->getParent()); 4760 })) { 4761 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 4762 << "\n"); 4763 return false; 4764 } 4765 4766 // Insert this computation right after this user. Since our caller is 4767 // scanning from the top of the BB to the bottom, reuse of the expr are 4768 // guaranteed to happen later. 4769 IRBuilder<> Builder(MemoryInst); 4770 4771 // Now that we determined the addressing expression we want to use and know 4772 // that we have to sink it into this block. Check to see if we have already 4773 // done this for some other load/store instr in this block. If so, reuse 4774 // the computation. Before attempting reuse, check if the address is valid 4775 // as it may have been erased. 4776 4777 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 4778 4779 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 4780 if (SunkAddr) { 4781 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 4782 << " for " << *MemoryInst << "\n"); 4783 if (SunkAddr->getType() != Addr->getType()) 4784 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4785 } else if (AddrSinkUsingGEPs || 4786 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) { 4787 // By default, we use the GEP-based method when AA is used later. This 4788 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4789 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4790 << " for " << *MemoryInst << "\n"); 4791 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4792 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4793 4794 // First, find the pointer. 4795 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4796 ResultPtr = AddrMode.BaseReg; 4797 AddrMode.BaseReg = nullptr; 4798 } 4799 4800 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4801 // We can't add more than one pointer together, nor can we scale a 4802 // pointer (both of which seem meaningless). 4803 if (ResultPtr || AddrMode.Scale != 1) 4804 return false; 4805 4806 ResultPtr = AddrMode.ScaledReg; 4807 AddrMode.Scale = 0; 4808 } 4809 4810 // It is only safe to sign extend the BaseReg if we know that the math 4811 // required to create it did not overflow before we extend it. Since 4812 // the original IR value was tossed in favor of a constant back when 4813 // the AddrMode was created we need to bail out gracefully if widths 4814 // do not match instead of extending it. 4815 // 4816 // (See below for code to add the scale.) 4817 if (AddrMode.Scale) { 4818 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4819 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4820 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4821 return false; 4822 } 4823 4824 if (AddrMode.BaseGV) { 4825 if (ResultPtr) 4826 return false; 4827 4828 ResultPtr = AddrMode.BaseGV; 4829 } 4830 4831 // If the real base value actually came from an inttoptr, then the matcher 4832 // will look through it and provide only the integer value. In that case, 4833 // use it here. 4834 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4835 if (!ResultPtr && AddrMode.BaseReg) { 4836 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4837 "sunkaddr"); 4838 AddrMode.BaseReg = nullptr; 4839 } else if (!ResultPtr && AddrMode.Scale == 1) { 4840 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4841 "sunkaddr"); 4842 AddrMode.Scale = 0; 4843 } 4844 } 4845 4846 if (!ResultPtr && 4847 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4848 SunkAddr = Constant::getNullValue(Addr->getType()); 4849 } else if (!ResultPtr) { 4850 return false; 4851 } else { 4852 Type *I8PtrTy = 4853 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4854 Type *I8Ty = Builder.getInt8Ty(); 4855 4856 // Start with the base register. Do this first so that subsequent address 4857 // matching finds it last, which will prevent it from trying to match it 4858 // as the scaled value in case it happens to be a mul. That would be 4859 // problematic if we've sunk a different mul for the scale, because then 4860 // we'd end up sinking both muls. 4861 if (AddrMode.BaseReg) { 4862 Value *V = AddrMode.BaseReg; 4863 if (V->getType() != IntPtrTy) 4864 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4865 4866 ResultIndex = V; 4867 } 4868 4869 // Add the scale value. 4870 if (AddrMode.Scale) { 4871 Value *V = AddrMode.ScaledReg; 4872 if (V->getType() == IntPtrTy) { 4873 // done. 4874 } else { 4875 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4876 cast<IntegerType>(V->getType())->getBitWidth() && 4877 "We can't transform if ScaledReg is too narrow"); 4878 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4879 } 4880 4881 if (AddrMode.Scale != 1) 4882 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4883 "sunkaddr"); 4884 if (ResultIndex) 4885 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4886 else 4887 ResultIndex = V; 4888 } 4889 4890 // Add in the Base Offset if present. 4891 if (AddrMode.BaseOffs) { 4892 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4893 if (ResultIndex) { 4894 // We need to add this separately from the scale above to help with 4895 // SDAG consecutive load/store merging. 4896 if (ResultPtr->getType() != I8PtrTy) 4897 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4898 ResultPtr = 4899 AddrMode.InBounds 4900 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 4901 "sunkaddr") 4902 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4903 } 4904 4905 ResultIndex = V; 4906 } 4907 4908 if (!ResultIndex) { 4909 SunkAddr = ResultPtr; 4910 } else { 4911 if (ResultPtr->getType() != I8PtrTy) 4912 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4913 SunkAddr = 4914 AddrMode.InBounds 4915 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 4916 "sunkaddr") 4917 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4918 } 4919 4920 if (SunkAddr->getType() != Addr->getType()) 4921 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4922 } 4923 } else { 4924 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4925 // non-integral pointers, so in that case bail out now. 4926 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4927 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4928 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4929 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4930 if (DL->isNonIntegralPointerType(Addr->getType()) || 4931 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4932 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4933 (AddrMode.BaseGV && 4934 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4935 return false; 4936 4937 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4938 << " for " << *MemoryInst << "\n"); 4939 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4940 Value *Result = nullptr; 4941 4942 // Start with the base register. Do this first so that subsequent address 4943 // matching finds it last, which will prevent it from trying to match it 4944 // as the scaled value in case it happens to be a mul. That would be 4945 // problematic if we've sunk a different mul for the scale, because then 4946 // we'd end up sinking both muls. 4947 if (AddrMode.BaseReg) { 4948 Value *V = AddrMode.BaseReg; 4949 if (V->getType()->isPointerTy()) 4950 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4951 if (V->getType() != IntPtrTy) 4952 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4953 Result = V; 4954 } 4955 4956 // Add the scale value. 4957 if (AddrMode.Scale) { 4958 Value *V = AddrMode.ScaledReg; 4959 if (V->getType() == IntPtrTy) { 4960 // done. 4961 } else if (V->getType()->isPointerTy()) { 4962 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4963 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4964 cast<IntegerType>(V->getType())->getBitWidth()) { 4965 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4966 } else { 4967 // It is only safe to sign extend the BaseReg if we know that the math 4968 // required to create it did not overflow before we extend it. Since 4969 // the original IR value was tossed in favor of a constant back when 4970 // the AddrMode was created we need to bail out gracefully if widths 4971 // do not match instead of extending it. 4972 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4973 if (I && (Result != AddrMode.BaseReg)) 4974 I->eraseFromParent(); 4975 return false; 4976 } 4977 if (AddrMode.Scale != 1) 4978 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4979 "sunkaddr"); 4980 if (Result) 4981 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4982 else 4983 Result = V; 4984 } 4985 4986 // Add in the BaseGV if present. 4987 if (AddrMode.BaseGV) { 4988 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4989 if (Result) 4990 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4991 else 4992 Result = V; 4993 } 4994 4995 // Add in the Base Offset if present. 4996 if (AddrMode.BaseOffs) { 4997 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4998 if (Result) 4999 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5000 else 5001 Result = V; 5002 } 5003 5004 if (!Result) 5005 SunkAddr = Constant::getNullValue(Addr->getType()); 5006 else 5007 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 5008 } 5009 5010 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 5011 // Store the newly computed address into the cache. In the case we reused a 5012 // value, this should be idempotent. 5013 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 5014 5015 // If we have no uses, recursively delete the value and all dead instructions 5016 // using it. 5017 if (Repl->use_empty()) { 5018 // This can cause recursive deletion, which can invalidate our iterator. 5019 // Use a WeakTrackingVH to hold onto it in case this happens. 5020 Value *CurValue = &*CurInstIterator; 5021 WeakTrackingVH IterHandle(CurValue); 5022 BasicBlock *BB = CurInstIterator->getParent(); 5023 5024 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 5025 5026 if (IterHandle != CurValue) { 5027 // If the iterator instruction was recursively deleted, start over at the 5028 // start of the block. 5029 CurInstIterator = BB->begin(); 5030 SunkAddrs.clear(); 5031 } 5032 } 5033 ++NumMemoryInsts; 5034 return true; 5035 } 5036 5037 /// If there are any memory operands, use OptimizeMemoryInst to sink their 5038 /// address computing into the block when possible / profitable. 5039 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 5040 bool MadeChange = false; 5041 5042 const TargetRegisterInfo *TRI = 5043 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 5044 TargetLowering::AsmOperandInfoVector TargetConstraints = 5045 TLI->ParseConstraints(*DL, TRI, CS); 5046 unsigned ArgNo = 0; 5047 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5048 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 5049 5050 // Compute the constraint code and ConstraintType to use. 5051 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 5052 5053 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5054 OpInfo.isIndirect) { 5055 Value *OpVal = CS->getArgOperand(ArgNo++); 5056 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 5057 } else if (OpInfo.Type == InlineAsm::isInput) 5058 ArgNo++; 5059 } 5060 5061 return MadeChange; 5062 } 5063 5064 /// Check if all the uses of \p Val are equivalent (or free) zero or 5065 /// sign extensions. 5066 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 5067 assert(!Val->use_empty() && "Input must have at least one use"); 5068 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 5069 bool IsSExt = isa<SExtInst>(FirstUser); 5070 Type *ExtTy = FirstUser->getType(); 5071 for (const User *U : Val->users()) { 5072 const Instruction *UI = cast<Instruction>(U); 5073 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 5074 return false; 5075 Type *CurTy = UI->getType(); 5076 // Same input and output types: Same instruction after CSE. 5077 if (CurTy == ExtTy) 5078 continue; 5079 5080 // If IsSExt is true, we are in this situation: 5081 // a = Val 5082 // b = sext ty1 a to ty2 5083 // c = sext ty1 a to ty3 5084 // Assuming ty2 is shorter than ty3, this could be turned into: 5085 // a = Val 5086 // b = sext ty1 a to ty2 5087 // c = sext ty2 b to ty3 5088 // However, the last sext is not free. 5089 if (IsSExt) 5090 return false; 5091 5092 // This is a ZExt, maybe this is free to extend from one type to another. 5093 // In that case, we would not account for a different use. 5094 Type *NarrowTy; 5095 Type *LargeTy; 5096 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5097 CurTy->getScalarType()->getIntegerBitWidth()) { 5098 NarrowTy = CurTy; 5099 LargeTy = ExtTy; 5100 } else { 5101 NarrowTy = ExtTy; 5102 LargeTy = CurTy; 5103 } 5104 5105 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5106 return false; 5107 } 5108 // All uses are the same or can be derived from one another for free. 5109 return true; 5110 } 5111 5112 /// Try to speculatively promote extensions in \p Exts and continue 5113 /// promoting through newly promoted operands recursively as far as doing so is 5114 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5115 /// When some promotion happened, \p TPT contains the proper state to revert 5116 /// them. 5117 /// 5118 /// \return true if some promotion happened, false otherwise. 5119 bool CodeGenPrepare::tryToPromoteExts( 5120 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5121 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5122 unsigned CreatedInstsCost) { 5123 bool Promoted = false; 5124 5125 // Iterate over all the extensions to try to promote them. 5126 for (auto I : Exts) { 5127 // Early check if we directly have ext(load). 5128 if (isa<LoadInst>(I->getOperand(0))) { 5129 ProfitablyMovedExts.push_back(I); 5130 continue; 5131 } 5132 5133 // Check whether or not we want to do any promotion. The reason we have 5134 // this check inside the for loop is to catch the case where an extension 5135 // is directly fed by a load because in such case the extension can be moved 5136 // up without any promotion on its operands. 5137 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5138 return false; 5139 5140 // Get the action to perform the promotion. 5141 TypePromotionHelper::Action TPH = 5142 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5143 // Check if we can promote. 5144 if (!TPH) { 5145 // Save the current extension as we cannot move up through its operand. 5146 ProfitablyMovedExts.push_back(I); 5147 continue; 5148 } 5149 5150 // Save the current state. 5151 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5152 TPT.getRestorationPoint(); 5153 SmallVector<Instruction *, 4> NewExts; 5154 unsigned NewCreatedInstsCost = 0; 5155 unsigned ExtCost = !TLI->isExtFree(I); 5156 // Promote. 5157 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5158 &NewExts, nullptr, *TLI); 5159 assert(PromotedVal && 5160 "TypePromotionHelper should have filtered out those cases"); 5161 5162 // We would be able to merge only one extension in a load. 5163 // Therefore, if we have more than 1 new extension we heuristically 5164 // cut this search path, because it means we degrade the code quality. 5165 // With exactly 2, the transformation is neutral, because we will merge 5166 // one extension but leave one. However, we optimistically keep going, 5167 // because the new extension may be removed too. 5168 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5169 // FIXME: It would be possible to propagate a negative value instead of 5170 // conservatively ceiling it to 0. 5171 TotalCreatedInstsCost = 5172 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5173 if (!StressExtLdPromotion && 5174 (TotalCreatedInstsCost > 1 || 5175 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5176 // This promotion is not profitable, rollback to the previous state, and 5177 // save the current extension in ProfitablyMovedExts as the latest 5178 // speculative promotion turned out to be unprofitable. 5179 TPT.rollback(LastKnownGood); 5180 ProfitablyMovedExts.push_back(I); 5181 continue; 5182 } 5183 // Continue promoting NewExts as far as doing so is profitable. 5184 SmallVector<Instruction *, 2> NewlyMovedExts; 5185 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5186 bool NewPromoted = false; 5187 for (auto ExtInst : NewlyMovedExts) { 5188 Instruction *MovedExt = cast<Instruction>(ExtInst); 5189 Value *ExtOperand = MovedExt->getOperand(0); 5190 // If we have reached to a load, we need this extra profitability check 5191 // as it could potentially be merged into an ext(load). 5192 if (isa<LoadInst>(ExtOperand) && 5193 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5194 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5195 continue; 5196 5197 ProfitablyMovedExts.push_back(MovedExt); 5198 NewPromoted = true; 5199 } 5200 5201 // If none of speculative promotions for NewExts is profitable, rollback 5202 // and save the current extension (I) as the last profitable extension. 5203 if (!NewPromoted) { 5204 TPT.rollback(LastKnownGood); 5205 ProfitablyMovedExts.push_back(I); 5206 continue; 5207 } 5208 // The promotion is profitable. 5209 Promoted = true; 5210 } 5211 return Promoted; 5212 } 5213 5214 /// Merging redundant sexts when one is dominating the other. 5215 bool CodeGenPrepare::mergeSExts(Function &F, DominatorTree &DT) { 5216 bool Changed = false; 5217 for (auto &Entry : ValToSExtendedUses) { 5218 SExts &Insts = Entry.second; 5219 SExts CurPts; 5220 for (Instruction *Inst : Insts) { 5221 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5222 Inst->getOperand(0) != Entry.first) 5223 continue; 5224 bool inserted = false; 5225 for (auto &Pt : CurPts) { 5226 if (DT.dominates(Inst, Pt)) { 5227 Pt->replaceAllUsesWith(Inst); 5228 RemovedInsts.insert(Pt); 5229 Pt->removeFromParent(); 5230 Pt = Inst; 5231 inserted = true; 5232 Changed = true; 5233 break; 5234 } 5235 if (!DT.dominates(Pt, Inst)) 5236 // Give up if we need to merge in a common dominator as the 5237 // experiments show it is not profitable. 5238 continue; 5239 Inst->replaceAllUsesWith(Pt); 5240 RemovedInsts.insert(Inst); 5241 Inst->removeFromParent(); 5242 inserted = true; 5243 Changed = true; 5244 break; 5245 } 5246 if (!inserted) 5247 CurPts.push_back(Inst); 5248 } 5249 } 5250 return Changed; 5251 } 5252 5253 // Spliting large data structures so that the GEPs accessing them can have 5254 // smaller offsets so that they can be sunk to the same blocks as their users. 5255 // For example, a large struct starting from %base is splitted into two parts 5256 // where the second part starts from %new_base. 5257 // 5258 // Before: 5259 // BB0: 5260 // %base = 5261 // 5262 // BB1: 5263 // %gep0 = gep %base, off0 5264 // %gep1 = gep %base, off1 5265 // %gep2 = gep %base, off2 5266 // 5267 // BB2: 5268 // %load1 = load %gep0 5269 // %load2 = load %gep1 5270 // %load3 = load %gep2 5271 // 5272 // After: 5273 // BB0: 5274 // %base = 5275 // %new_base = gep %base, off0 5276 // 5277 // BB1: 5278 // %new_gep0 = %new_base 5279 // %new_gep1 = gep %new_base, off1 - off0 5280 // %new_gep2 = gep %new_base, off2 - off0 5281 // 5282 // BB2: 5283 // %load1 = load i32, i32* %new_gep0 5284 // %load2 = load i32, i32* %new_gep1 5285 // %load3 = load i32, i32* %new_gep2 5286 // 5287 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5288 // their offsets are smaller enough to fit into the addressing mode. 5289 bool CodeGenPrepare::splitLargeGEPOffsets() { 5290 bool Changed = false; 5291 for (auto &Entry : LargeOffsetGEPMap) { 5292 Value *OldBase = Entry.first; 5293 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5294 &LargeOffsetGEPs = Entry.second; 5295 auto compareGEPOffset = 5296 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5297 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5298 if (LHS.first == RHS.first) 5299 return false; 5300 if (LHS.second != RHS.second) 5301 return LHS.second < RHS.second; 5302 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5303 }; 5304 // Sorting all the GEPs of the same data structures based on the offsets. 5305 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5306 LargeOffsetGEPs.erase( 5307 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5308 LargeOffsetGEPs.end()); 5309 // Skip if all the GEPs have the same offsets. 5310 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5311 continue; 5312 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5313 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5314 Value *NewBaseGEP = nullptr; 5315 5316 auto LargeOffsetGEP = LargeOffsetGEPs.begin(); 5317 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5318 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5319 int64_t Offset = LargeOffsetGEP->second; 5320 if (Offset != BaseOffset) { 5321 TargetLowering::AddrMode AddrMode; 5322 AddrMode.BaseOffs = Offset - BaseOffset; 5323 // The result type of the GEP might not be the type of the memory 5324 // access. 5325 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5326 GEP->getResultElementType(), 5327 GEP->getAddressSpace())) { 5328 // We need to create a new base if the offset to the current base is 5329 // too large to fit into the addressing mode. So, a very large struct 5330 // may be splitted into several parts. 5331 BaseGEP = GEP; 5332 BaseOffset = Offset; 5333 NewBaseGEP = nullptr; 5334 } 5335 } 5336 5337 // Generate a new GEP to replace the current one. 5338 LLVMContext &Ctx = GEP->getContext(); 5339 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5340 Type *I8PtrTy = 5341 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5342 Type *I8Ty = Type::getInt8Ty(Ctx); 5343 5344 if (!NewBaseGEP) { 5345 // Create a new base if we don't have one yet. Find the insertion 5346 // pointer for the new base first. 5347 BasicBlock::iterator NewBaseInsertPt; 5348 BasicBlock *NewBaseInsertBB; 5349 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5350 // If the base of the struct is an instruction, the new base will be 5351 // inserted close to it. 5352 NewBaseInsertBB = BaseI->getParent(); 5353 if (isa<PHINode>(BaseI)) 5354 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5355 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5356 NewBaseInsertBB = 5357 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5358 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5359 } else 5360 NewBaseInsertPt = std::next(BaseI->getIterator()); 5361 } else { 5362 // If the current base is an argument or global value, the new base 5363 // will be inserted to the entry block. 5364 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5365 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5366 } 5367 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5368 // Create a new base. 5369 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5370 NewBaseGEP = OldBase; 5371 if (NewBaseGEP->getType() != I8PtrTy) 5372 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5373 NewBaseGEP = 5374 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5375 NewGEPBases.insert(NewBaseGEP); 5376 } 5377 5378 IRBuilder<> Builder(GEP); 5379 Value *NewGEP = NewBaseGEP; 5380 if (Offset == BaseOffset) { 5381 if (GEP->getType() != I8PtrTy) 5382 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5383 } else { 5384 // Calculate the new offset for the new GEP. 5385 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5386 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5387 5388 if (GEP->getType() != I8PtrTy) 5389 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5390 } 5391 GEP->replaceAllUsesWith(NewGEP); 5392 LargeOffsetGEPID.erase(GEP); 5393 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5394 GEP->eraseFromParent(); 5395 Changed = true; 5396 } 5397 } 5398 return Changed; 5399 } 5400 5401 /// Return true, if an ext(load) can be formed from an extension in 5402 /// \p MovedExts. 5403 bool CodeGenPrepare::canFormExtLd( 5404 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5405 Instruction *&Inst, bool HasPromoted) { 5406 for (auto *MovedExtInst : MovedExts) { 5407 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5408 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5409 Inst = MovedExtInst; 5410 break; 5411 } 5412 } 5413 if (!LI) 5414 return false; 5415 5416 // If they're already in the same block, there's nothing to do. 5417 // Make the cheap checks first if we did not promote. 5418 // If we promoted, we need to check if it is indeed profitable. 5419 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5420 return false; 5421 5422 return TLI->isExtLoad(LI, Inst, *DL); 5423 } 5424 5425 /// Move a zext or sext fed by a load into the same basic block as the load, 5426 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5427 /// extend into the load. 5428 /// 5429 /// E.g., 5430 /// \code 5431 /// %ld = load i32* %addr 5432 /// %add = add nuw i32 %ld, 4 5433 /// %zext = zext i32 %add to i64 5434 // \endcode 5435 /// => 5436 /// \code 5437 /// %ld = load i32* %addr 5438 /// %zext = zext i32 %ld to i64 5439 /// %add = add nuw i64 %zext, 4 5440 /// \encode 5441 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5442 /// allow us to match zext(load i32*) to i64. 5443 /// 5444 /// Also, try to promote the computations used to obtain a sign extended 5445 /// value used into memory accesses. 5446 /// E.g., 5447 /// \code 5448 /// a = add nsw i32 b, 3 5449 /// d = sext i32 a to i64 5450 /// e = getelementptr ..., i64 d 5451 /// \endcode 5452 /// => 5453 /// \code 5454 /// f = sext i32 b to i64 5455 /// a = add nsw i64 f, 3 5456 /// e = getelementptr ..., i64 a 5457 /// \endcode 5458 /// 5459 /// \p Inst[in/out] the extension may be modified during the process if some 5460 /// promotions apply. 5461 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5462 // ExtLoad formation and address type promotion infrastructure requires TLI to 5463 // be effective. 5464 if (!TLI) 5465 return false; 5466 5467 bool AllowPromotionWithoutCommonHeader = false; 5468 /// See if it is an interesting sext operations for the address type 5469 /// promotion before trying to promote it, e.g., the ones with the right 5470 /// type and used in memory accesses. 5471 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5472 *Inst, AllowPromotionWithoutCommonHeader); 5473 TypePromotionTransaction TPT(RemovedInsts); 5474 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5475 TPT.getRestorationPoint(); 5476 SmallVector<Instruction *, 1> Exts; 5477 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5478 Exts.push_back(Inst); 5479 5480 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5481 5482 // Look for a load being extended. 5483 LoadInst *LI = nullptr; 5484 Instruction *ExtFedByLoad; 5485 5486 // Try to promote a chain of computation if it allows to form an extended 5487 // load. 5488 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 5489 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 5490 TPT.commit(); 5491 // Move the extend into the same block as the load 5492 ExtFedByLoad->moveAfter(LI); 5493 // CGP does not check if the zext would be speculatively executed when moved 5494 // to the same basic block as the load. Preserving its original location 5495 // would pessimize the debugging experience, as well as negatively impact 5496 // the quality of sample pgo. We don't want to use "line 0" as that has a 5497 // size cost in the line-table section and logically the zext can be seen as 5498 // part of the load. Therefore we conservatively reuse the same debug 5499 // location for the load and the zext. 5500 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 5501 ++NumExtsMoved; 5502 Inst = ExtFedByLoad; 5503 return true; 5504 } 5505 5506 // Continue promoting SExts if known as considerable depending on targets. 5507 if (ATPConsiderable && 5508 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 5509 HasPromoted, TPT, SpeculativelyMovedExts)) 5510 return true; 5511 5512 TPT.rollback(LastKnownGood); 5513 return false; 5514 } 5515 5516 // Perform address type promotion if doing so is profitable. 5517 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 5518 // instructions that sign extended the same initial value. However, if 5519 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 5520 // extension is just profitable. 5521 bool CodeGenPrepare::performAddressTypePromotion( 5522 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 5523 bool HasPromoted, TypePromotionTransaction &TPT, 5524 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 5525 bool Promoted = false; 5526 SmallPtrSet<Instruction *, 1> UnhandledExts; 5527 bool AllSeenFirst = true; 5528 for (auto I : SpeculativelyMovedExts) { 5529 Value *HeadOfChain = I->getOperand(0); 5530 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 5531 SeenChainsForSExt.find(HeadOfChain); 5532 // If there is an unhandled SExt which has the same header, try to promote 5533 // it as well. 5534 if (AlreadySeen != SeenChainsForSExt.end()) { 5535 if (AlreadySeen->second != nullptr) 5536 UnhandledExts.insert(AlreadySeen->second); 5537 AllSeenFirst = false; 5538 } 5539 } 5540 5541 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 5542 SpeculativelyMovedExts.size() == 1)) { 5543 TPT.commit(); 5544 if (HasPromoted) 5545 Promoted = true; 5546 for (auto I : SpeculativelyMovedExts) { 5547 Value *HeadOfChain = I->getOperand(0); 5548 SeenChainsForSExt[HeadOfChain] = nullptr; 5549 ValToSExtendedUses[HeadOfChain].push_back(I); 5550 } 5551 // Update Inst as promotion happen. 5552 Inst = SpeculativelyMovedExts.pop_back_val(); 5553 } else { 5554 // This is the first chain visited from the header, keep the current chain 5555 // as unhandled. Defer to promote this until we encounter another SExt 5556 // chain derived from the same header. 5557 for (auto I : SpeculativelyMovedExts) { 5558 Value *HeadOfChain = I->getOperand(0); 5559 SeenChainsForSExt[HeadOfChain] = Inst; 5560 } 5561 return false; 5562 } 5563 5564 if (!AllSeenFirst && !UnhandledExts.empty()) 5565 for (auto VisitedSExt : UnhandledExts) { 5566 if (RemovedInsts.count(VisitedSExt)) 5567 continue; 5568 TypePromotionTransaction TPT(RemovedInsts); 5569 SmallVector<Instruction *, 1> Exts; 5570 SmallVector<Instruction *, 2> Chains; 5571 Exts.push_back(VisitedSExt); 5572 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 5573 TPT.commit(); 5574 if (HasPromoted) 5575 Promoted = true; 5576 for (auto I : Chains) { 5577 Value *HeadOfChain = I->getOperand(0); 5578 // Mark this as handled. 5579 SeenChainsForSExt[HeadOfChain] = nullptr; 5580 ValToSExtendedUses[HeadOfChain].push_back(I); 5581 } 5582 } 5583 return Promoted; 5584 } 5585 5586 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 5587 BasicBlock *DefBB = I->getParent(); 5588 5589 // If the result of a {s|z}ext and its source are both live out, rewrite all 5590 // other uses of the source with result of extension. 5591 Value *Src = I->getOperand(0); 5592 if (Src->hasOneUse()) 5593 return false; 5594 5595 // Only do this xform if truncating is free. 5596 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 5597 return false; 5598 5599 // Only safe to perform the optimization if the source is also defined in 5600 // this block. 5601 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 5602 return false; 5603 5604 bool DefIsLiveOut = false; 5605 for (User *U : I->users()) { 5606 Instruction *UI = cast<Instruction>(U); 5607 5608 // Figure out which BB this ext is used in. 5609 BasicBlock *UserBB = UI->getParent(); 5610 if (UserBB == DefBB) continue; 5611 DefIsLiveOut = true; 5612 break; 5613 } 5614 if (!DefIsLiveOut) 5615 return false; 5616 5617 // Make sure none of the uses are PHI nodes. 5618 for (User *U : Src->users()) { 5619 Instruction *UI = cast<Instruction>(U); 5620 BasicBlock *UserBB = UI->getParent(); 5621 if (UserBB == DefBB) continue; 5622 // Be conservative. We don't want this xform to end up introducing 5623 // reloads just before load / store instructions. 5624 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 5625 return false; 5626 } 5627 5628 // InsertedTruncs - Only insert one trunc in each block once. 5629 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 5630 5631 bool MadeChange = false; 5632 for (Use &U : Src->uses()) { 5633 Instruction *User = cast<Instruction>(U.getUser()); 5634 5635 // Figure out which BB this ext is used in. 5636 BasicBlock *UserBB = User->getParent(); 5637 if (UserBB == DefBB) continue; 5638 5639 // Both src and def are live in this block. Rewrite the use. 5640 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 5641 5642 if (!InsertedTrunc) { 5643 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5644 assert(InsertPt != UserBB->end()); 5645 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5646 InsertedInsts.insert(InsertedTrunc); 5647 } 5648 5649 // Replace a use of the {s|z}ext source with a use of the result. 5650 U = InsertedTrunc; 5651 ++NumExtUses; 5652 MadeChange = true; 5653 } 5654 5655 return MadeChange; 5656 } 5657 5658 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5659 // just after the load if the target can fold this into one extload instruction, 5660 // with the hope of eliminating some of the other later "and" instructions using 5661 // the loaded value. "and"s that are made trivially redundant by the insertion 5662 // of the new "and" are removed by this function, while others (e.g. those whose 5663 // path from the load goes through a phi) are left for isel to potentially 5664 // remove. 5665 // 5666 // For example: 5667 // 5668 // b0: 5669 // x = load i32 5670 // ... 5671 // b1: 5672 // y = and x, 0xff 5673 // z = use y 5674 // 5675 // becomes: 5676 // 5677 // b0: 5678 // x = load i32 5679 // x' = and x, 0xff 5680 // ... 5681 // b1: 5682 // z = use x' 5683 // 5684 // whereas: 5685 // 5686 // b0: 5687 // x1 = load i32 5688 // ... 5689 // b1: 5690 // x2 = load i32 5691 // ... 5692 // b2: 5693 // x = phi x1, x2 5694 // y = and x, 0xff 5695 // 5696 // becomes (after a call to optimizeLoadExt for each load): 5697 // 5698 // b0: 5699 // x1 = load i32 5700 // x1' = and x1, 0xff 5701 // ... 5702 // b1: 5703 // x2 = load i32 5704 // x2' = and x2, 0xff 5705 // ... 5706 // b2: 5707 // x = phi x1', x2' 5708 // y = and x, 0xff 5709 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5710 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 5711 return false; 5712 5713 // Skip loads we've already transformed. 5714 if (Load->hasOneUse() && 5715 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5716 return false; 5717 5718 // Look at all uses of Load, looking through phis, to determine how many bits 5719 // of the loaded value are needed. 5720 SmallVector<Instruction *, 8> WorkList; 5721 SmallPtrSet<Instruction *, 16> Visited; 5722 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5723 for (auto *U : Load->users()) 5724 WorkList.push_back(cast<Instruction>(U)); 5725 5726 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5727 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5728 APInt DemandBits(BitWidth, 0); 5729 APInt WidestAndBits(BitWidth, 0); 5730 5731 while (!WorkList.empty()) { 5732 Instruction *I = WorkList.back(); 5733 WorkList.pop_back(); 5734 5735 // Break use-def graph loops. 5736 if (!Visited.insert(I).second) 5737 continue; 5738 5739 // For a PHI node, push all of its users. 5740 if (auto *Phi = dyn_cast<PHINode>(I)) { 5741 for (auto *U : Phi->users()) 5742 WorkList.push_back(cast<Instruction>(U)); 5743 continue; 5744 } 5745 5746 switch (I->getOpcode()) { 5747 case Instruction::And: { 5748 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5749 if (!AndC) 5750 return false; 5751 APInt AndBits = AndC->getValue(); 5752 DemandBits |= AndBits; 5753 // Keep track of the widest and mask we see. 5754 if (AndBits.ugt(WidestAndBits)) 5755 WidestAndBits = AndBits; 5756 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5757 AndsToMaybeRemove.push_back(I); 5758 break; 5759 } 5760 5761 case Instruction::Shl: { 5762 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5763 if (!ShlC) 5764 return false; 5765 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5766 DemandBits.setLowBits(BitWidth - ShiftAmt); 5767 break; 5768 } 5769 5770 case Instruction::Trunc: { 5771 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5772 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5773 DemandBits.setLowBits(TruncBitWidth); 5774 break; 5775 } 5776 5777 default: 5778 return false; 5779 } 5780 } 5781 5782 uint32_t ActiveBits = DemandBits.getActiveBits(); 5783 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5784 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5785 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5786 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5787 // followed by an AND. 5788 // TODO: Look into removing this restriction by fixing backends to either 5789 // return false for isLoadExtLegal for i1 or have them select this pattern to 5790 // a single instruction. 5791 // 5792 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5793 // mask, since these are the only ands that will be removed by isel. 5794 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5795 WidestAndBits != DemandBits) 5796 return false; 5797 5798 LLVMContext &Ctx = Load->getType()->getContext(); 5799 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5800 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5801 5802 // Reject cases that won't be matched as extloads. 5803 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5804 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5805 return false; 5806 5807 IRBuilder<> Builder(Load->getNextNode()); 5808 auto *NewAnd = dyn_cast<Instruction>( 5809 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5810 // Mark this instruction as "inserted by CGP", so that other 5811 // optimizations don't touch it. 5812 InsertedInsts.insert(NewAnd); 5813 5814 // Replace all uses of load with new and (except for the use of load in the 5815 // new and itself). 5816 Load->replaceAllUsesWith(NewAnd); 5817 NewAnd->setOperand(0, Load); 5818 5819 // Remove any and instructions that are now redundant. 5820 for (auto *And : AndsToMaybeRemove) 5821 // Check that the and mask is the same as the one we decided to put on the 5822 // new and. 5823 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5824 And->replaceAllUsesWith(NewAnd); 5825 if (&*CurInstIterator == And) 5826 CurInstIterator = std::next(And->getIterator()); 5827 And->eraseFromParent(); 5828 ++NumAndUses; 5829 } 5830 5831 ++NumAndsAdded; 5832 return true; 5833 } 5834 5835 /// Check if V (an operand of a select instruction) is an expensive instruction 5836 /// that is only used once. 5837 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5838 auto *I = dyn_cast<Instruction>(V); 5839 // If it's safe to speculatively execute, then it should not have side 5840 // effects; therefore, it's safe to sink and possibly *not* execute. 5841 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5842 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5843 } 5844 5845 /// Returns true if a SelectInst should be turned into an explicit branch. 5846 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5847 const TargetLowering *TLI, 5848 SelectInst *SI) { 5849 // If even a predictable select is cheap, then a branch can't be cheaper. 5850 if (!TLI->isPredictableSelectExpensive()) 5851 return false; 5852 5853 // FIXME: This should use the same heuristics as IfConversion to determine 5854 // whether a select is better represented as a branch. 5855 5856 // If metadata tells us that the select condition is obviously predictable, 5857 // then we want to replace the select with a branch. 5858 uint64_t TrueWeight, FalseWeight; 5859 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5860 uint64_t Max = std::max(TrueWeight, FalseWeight); 5861 uint64_t Sum = TrueWeight + FalseWeight; 5862 if (Sum != 0) { 5863 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5864 if (Probability > TLI->getPredictableBranchThreshold()) 5865 return true; 5866 } 5867 } 5868 5869 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5870 5871 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5872 // comparison condition. If the compare has more than one use, there's 5873 // probably another cmov or setcc around, so it's not worth emitting a branch. 5874 if (!Cmp || !Cmp->hasOneUse()) 5875 return false; 5876 5877 // If either operand of the select is expensive and only needed on one side 5878 // of the select, we should form a branch. 5879 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5880 sinkSelectOperand(TTI, SI->getFalseValue())) 5881 return true; 5882 5883 return false; 5884 } 5885 5886 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5887 /// false value of \p SI. If the true/false value of \p SI is defined by any 5888 /// select instructions in \p Selects, look through the defining select 5889 /// instruction until the true/false value is not defined in \p Selects. 5890 static Value *getTrueOrFalseValue( 5891 SelectInst *SI, bool isTrue, 5892 const SmallPtrSet<const Instruction *, 2> &Selects) { 5893 Value *V; 5894 5895 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5896 DefSI = dyn_cast<SelectInst>(V)) { 5897 assert(DefSI->getCondition() == SI->getCondition() && 5898 "The condition of DefSI does not match with SI"); 5899 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5900 } 5901 return V; 5902 } 5903 5904 /// If we have a SelectInst that will likely profit from branch prediction, 5905 /// turn it into a branch. 5906 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI, bool &ModifiedDT) { 5907 // If branch conversion isn't desirable, exit early. 5908 if (DisableSelectToBranch || OptSize || !TLI) 5909 return false; 5910 5911 // Find all consecutive select instructions that share the same condition. 5912 SmallVector<SelectInst *, 2> ASI; 5913 ASI.push_back(SI); 5914 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5915 It != SI->getParent()->end(); ++It) { 5916 SelectInst *I = dyn_cast<SelectInst>(&*It); 5917 if (I && SI->getCondition() == I->getCondition()) { 5918 ASI.push_back(I); 5919 } else { 5920 break; 5921 } 5922 } 5923 5924 SelectInst *LastSI = ASI.back(); 5925 // Increment the current iterator to skip all the rest of select instructions 5926 // because they will be either "not lowered" or "all lowered" to branch. 5927 CurInstIterator = std::next(LastSI->getIterator()); 5928 5929 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5930 5931 // Can we convert the 'select' to CF ? 5932 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 5933 return false; 5934 5935 TargetLowering::SelectSupportKind SelectKind; 5936 if (VectorCond) 5937 SelectKind = TargetLowering::VectorMaskSelect; 5938 else if (SI->getType()->isVectorTy()) 5939 SelectKind = TargetLowering::ScalarCondVectorVal; 5940 else 5941 SelectKind = TargetLowering::ScalarValSelect; 5942 5943 if (TLI->isSelectSupported(SelectKind) && 5944 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5945 return false; 5946 5947 ModifiedDT = true; 5948 5949 // Transform a sequence like this: 5950 // start: 5951 // %cmp = cmp uge i32 %a, %b 5952 // %sel = select i1 %cmp, i32 %c, i32 %d 5953 // 5954 // Into: 5955 // start: 5956 // %cmp = cmp uge i32 %a, %b 5957 // br i1 %cmp, label %select.true, label %select.false 5958 // select.true: 5959 // br label %select.end 5960 // select.false: 5961 // br label %select.end 5962 // select.end: 5963 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5964 // 5965 // In addition, we may sink instructions that produce %c or %d from 5966 // the entry block into the destination(s) of the new branch. 5967 // If the true or false blocks do not contain a sunken instruction, that 5968 // block and its branch may be optimized away. In that case, one side of the 5969 // first branch will point directly to select.end, and the corresponding PHI 5970 // predecessor block will be the start block. 5971 5972 // First, we split the block containing the select into 2 blocks. 5973 BasicBlock *StartBlock = SI->getParent(); 5974 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5975 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5976 5977 // Delete the unconditional branch that was just created by the split. 5978 StartBlock->getTerminator()->eraseFromParent(); 5979 5980 // These are the new basic blocks for the conditional branch. 5981 // At least one will become an actual new basic block. 5982 BasicBlock *TrueBlock = nullptr; 5983 BasicBlock *FalseBlock = nullptr; 5984 BranchInst *TrueBranch = nullptr; 5985 BranchInst *FalseBranch = nullptr; 5986 5987 // Sink expensive instructions into the conditional blocks to avoid executing 5988 // them speculatively. 5989 for (SelectInst *SI : ASI) { 5990 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5991 if (TrueBlock == nullptr) { 5992 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5993 EndBlock->getParent(), EndBlock); 5994 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5995 TrueBranch->setDebugLoc(SI->getDebugLoc()); 5996 } 5997 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5998 TrueInst->moveBefore(TrueBranch); 5999 } 6000 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 6001 if (FalseBlock == nullptr) { 6002 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 6003 EndBlock->getParent(), EndBlock); 6004 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6005 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6006 } 6007 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 6008 FalseInst->moveBefore(FalseBranch); 6009 } 6010 } 6011 6012 // If there was nothing to sink, then arbitrarily choose the 'false' side 6013 // for a new input value to the PHI. 6014 if (TrueBlock == FalseBlock) { 6015 assert(TrueBlock == nullptr && 6016 "Unexpected basic block transform while optimizing select"); 6017 6018 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 6019 EndBlock->getParent(), EndBlock); 6020 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6021 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6022 } 6023 6024 // Insert the real conditional branch based on the original condition. 6025 // If we did not create a new block for one of the 'true' or 'false' paths 6026 // of the condition, it means that side of the branch goes to the end block 6027 // directly and the path originates from the start block from the point of 6028 // view of the new PHI. 6029 BasicBlock *TT, *FT; 6030 if (TrueBlock == nullptr) { 6031 TT = EndBlock; 6032 FT = FalseBlock; 6033 TrueBlock = StartBlock; 6034 } else if (FalseBlock == nullptr) { 6035 TT = TrueBlock; 6036 FT = EndBlock; 6037 FalseBlock = StartBlock; 6038 } else { 6039 TT = TrueBlock; 6040 FT = FalseBlock; 6041 } 6042 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 6043 6044 SmallPtrSet<const Instruction *, 2> INS; 6045 INS.insert(ASI.begin(), ASI.end()); 6046 // Use reverse iterator because later select may use the value of the 6047 // earlier select, and we need to propagate value through earlier select 6048 // to get the PHI operand. 6049 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 6050 SelectInst *SI = *It; 6051 // The select itself is replaced with a PHI Node. 6052 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 6053 PN->takeName(SI); 6054 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 6055 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 6056 PN->setDebugLoc(SI->getDebugLoc()); 6057 6058 SI->replaceAllUsesWith(PN); 6059 SI->eraseFromParent(); 6060 INS.erase(SI); 6061 ++NumSelectsExpanded; 6062 } 6063 6064 // Instruct OptimizeBlock to skip to the next block. 6065 CurInstIterator = StartBlock->end(); 6066 return true; 6067 } 6068 6069 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 6070 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 6071 int SplatElem = -1; 6072 for (unsigned i = 0; i < Mask.size(); ++i) { 6073 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 6074 return false; 6075 SplatElem = Mask[i]; 6076 } 6077 6078 return true; 6079 } 6080 6081 /// Some targets have expensive vector shifts if the lanes aren't all the same 6082 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 6083 /// it's often worth sinking a shufflevector splat down to its use so that 6084 /// codegen can spot all lanes are identical. 6085 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6086 BasicBlock *DefBB = SVI->getParent(); 6087 6088 // Only do this xform if variable vector shifts are particularly expensive. 6089 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 6090 return false; 6091 6092 // We only expect better codegen by sinking a shuffle if we can recognise a 6093 // constant splat. 6094 if (!isBroadcastShuffle(SVI)) 6095 return false; 6096 6097 // InsertedShuffles - Only insert a shuffle in each block once. 6098 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 6099 6100 bool MadeChange = false; 6101 for (User *U : SVI->users()) { 6102 Instruction *UI = cast<Instruction>(U); 6103 6104 // Figure out which BB this ext is used in. 6105 BasicBlock *UserBB = UI->getParent(); 6106 if (UserBB == DefBB) continue; 6107 6108 // For now only apply this when the splat is used by a shift instruction. 6109 if (!UI->isShift()) continue; 6110 6111 // Everything checks out, sink the shuffle if the user's block doesn't 6112 // already have a copy. 6113 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 6114 6115 if (!InsertedShuffle) { 6116 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6117 assert(InsertPt != UserBB->end()); 6118 InsertedShuffle = 6119 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 6120 SVI->getOperand(2), "", &*InsertPt); 6121 } 6122 6123 UI->replaceUsesOfWith(SVI, InsertedShuffle); 6124 MadeChange = true; 6125 } 6126 6127 // If we removed all uses, nuke the shuffle. 6128 if (SVI->use_empty()) { 6129 SVI->eraseFromParent(); 6130 MadeChange = true; 6131 } 6132 6133 return MadeChange; 6134 } 6135 6136 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6137 // If the operands of I can be folded into a target instruction together with 6138 // I, duplicate and sink them. 6139 SmallVector<Use *, 4> OpsToSink; 6140 if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink)) 6141 return false; 6142 6143 // OpsToSink can contain multiple uses in a use chain (e.g. 6144 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6145 // uses must come first, which means they are sunk first, temporarily creating 6146 // invalid IR. This will be fixed once their dominated users are sunk and 6147 // updated. 6148 BasicBlock *TargetBB = I->getParent(); 6149 bool Changed = false; 6150 SmallVector<Use *, 4> ToReplace; 6151 for (Use *U : OpsToSink) { 6152 auto *UI = cast<Instruction>(U->get()); 6153 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6154 continue; 6155 ToReplace.push_back(U); 6156 } 6157 6158 SmallPtrSet<Instruction *, 4> MaybeDead; 6159 for (Use *U : ToReplace) { 6160 auto *UI = cast<Instruction>(U->get()); 6161 Instruction *NI = UI->clone(); 6162 MaybeDead.insert(UI); 6163 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6164 NI->insertBefore(I); 6165 InsertedInsts.insert(NI); 6166 U->set(NI); 6167 Changed = true; 6168 } 6169 6170 // Remove instructions that are dead after sinking. 6171 for (auto *I : MaybeDead) 6172 if (!I->hasNUsesOrMore(1)) 6173 I->eraseFromParent(); 6174 6175 return Changed; 6176 } 6177 6178 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6179 if (!TLI || !DL) 6180 return false; 6181 6182 Value *Cond = SI->getCondition(); 6183 Type *OldType = Cond->getType(); 6184 LLVMContext &Context = Cond->getContext(); 6185 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6186 unsigned RegWidth = RegType.getSizeInBits(); 6187 6188 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6189 return false; 6190 6191 // If the register width is greater than the type width, expand the condition 6192 // of the switch instruction and each case constant to the width of the 6193 // register. By widening the type of the switch condition, subsequent 6194 // comparisons (for case comparisons) will not need to be extended to the 6195 // preferred register width, so we will potentially eliminate N-1 extends, 6196 // where N is the number of cases in the switch. 6197 auto *NewType = Type::getIntNTy(Context, RegWidth); 6198 6199 // Zero-extend the switch condition and case constants unless the switch 6200 // condition is a function argument that is already being sign-extended. 6201 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6202 // everything instead. 6203 Instruction::CastOps ExtType = Instruction::ZExt; 6204 if (auto *Arg = dyn_cast<Argument>(Cond)) 6205 if (Arg->hasSExtAttr()) 6206 ExtType = Instruction::SExt; 6207 6208 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6209 ExtInst->insertBefore(SI); 6210 ExtInst->setDebugLoc(SI->getDebugLoc()); 6211 SI->setCondition(ExtInst); 6212 for (auto Case : SI->cases()) { 6213 APInt NarrowConst = Case.getCaseValue()->getValue(); 6214 APInt WideConst = (ExtType == Instruction::ZExt) ? 6215 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6216 Case.setValue(ConstantInt::get(Context, WideConst)); 6217 } 6218 6219 return true; 6220 } 6221 6222 6223 namespace { 6224 6225 /// Helper class to promote a scalar operation to a vector one. 6226 /// This class is used to move downward extractelement transition. 6227 /// E.g., 6228 /// a = vector_op <2 x i32> 6229 /// b = extractelement <2 x i32> a, i32 0 6230 /// c = scalar_op b 6231 /// store c 6232 /// 6233 /// => 6234 /// a = vector_op <2 x i32> 6235 /// c = vector_op a (equivalent to scalar_op on the related lane) 6236 /// * d = extractelement <2 x i32> c, i32 0 6237 /// * store d 6238 /// Assuming both extractelement and store can be combine, we get rid of the 6239 /// transition. 6240 class VectorPromoteHelper { 6241 /// DataLayout associated with the current module. 6242 const DataLayout &DL; 6243 6244 /// Used to perform some checks on the legality of vector operations. 6245 const TargetLowering &TLI; 6246 6247 /// Used to estimated the cost of the promoted chain. 6248 const TargetTransformInfo &TTI; 6249 6250 /// The transition being moved downwards. 6251 Instruction *Transition; 6252 6253 /// The sequence of instructions to be promoted. 6254 SmallVector<Instruction *, 4> InstsToBePromoted; 6255 6256 /// Cost of combining a store and an extract. 6257 unsigned StoreExtractCombineCost; 6258 6259 /// Instruction that will be combined with the transition. 6260 Instruction *CombineInst = nullptr; 6261 6262 /// The instruction that represents the current end of the transition. 6263 /// Since we are faking the promotion until we reach the end of the chain 6264 /// of computation, we need a way to get the current end of the transition. 6265 Instruction *getEndOfTransition() const { 6266 if (InstsToBePromoted.empty()) 6267 return Transition; 6268 return InstsToBePromoted.back(); 6269 } 6270 6271 /// Return the index of the original value in the transition. 6272 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 6273 /// c, is at index 0. 6274 unsigned getTransitionOriginalValueIdx() const { 6275 assert(isa<ExtractElementInst>(Transition) && 6276 "Other kind of transitions are not supported yet"); 6277 return 0; 6278 } 6279 6280 /// Return the index of the index in the transition. 6281 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 6282 /// is at index 1. 6283 unsigned getTransitionIdx() const { 6284 assert(isa<ExtractElementInst>(Transition) && 6285 "Other kind of transitions are not supported yet"); 6286 return 1; 6287 } 6288 6289 /// Get the type of the transition. 6290 /// This is the type of the original value. 6291 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 6292 /// transition is <2 x i32>. 6293 Type *getTransitionType() const { 6294 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 6295 } 6296 6297 /// Promote \p ToBePromoted by moving \p Def downward through. 6298 /// I.e., we have the following sequence: 6299 /// Def = Transition <ty1> a to <ty2> 6300 /// b = ToBePromoted <ty2> Def, ... 6301 /// => 6302 /// b = ToBePromoted <ty1> a, ... 6303 /// Def = Transition <ty1> ToBePromoted to <ty2> 6304 void promoteImpl(Instruction *ToBePromoted); 6305 6306 /// Check whether or not it is profitable to promote all the 6307 /// instructions enqueued to be promoted. 6308 bool isProfitableToPromote() { 6309 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 6310 unsigned Index = isa<ConstantInt>(ValIdx) 6311 ? cast<ConstantInt>(ValIdx)->getZExtValue() 6312 : -1; 6313 Type *PromotedType = getTransitionType(); 6314 6315 StoreInst *ST = cast<StoreInst>(CombineInst); 6316 unsigned AS = ST->getPointerAddressSpace(); 6317 unsigned Align = ST->getAlignment(); 6318 // Check if this store is supported. 6319 if (!TLI.allowsMisalignedMemoryAccesses( 6320 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 6321 Align)) { 6322 // If this is not supported, there is no way we can combine 6323 // the extract with the store. 6324 return false; 6325 } 6326 6327 // The scalar chain of computation has to pay for the transition 6328 // scalar to vector. 6329 // The vector chain has to account for the combining cost. 6330 uint64_t ScalarCost = 6331 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 6332 uint64_t VectorCost = StoreExtractCombineCost; 6333 for (const auto &Inst : InstsToBePromoted) { 6334 // Compute the cost. 6335 // By construction, all instructions being promoted are arithmetic ones. 6336 // Moreover, one argument is a constant that can be viewed as a splat 6337 // constant. 6338 Value *Arg0 = Inst->getOperand(0); 6339 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 6340 isa<ConstantFP>(Arg0); 6341 TargetTransformInfo::OperandValueKind Arg0OVK = 6342 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6343 : TargetTransformInfo::OK_AnyValue; 6344 TargetTransformInfo::OperandValueKind Arg1OVK = 6345 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6346 : TargetTransformInfo::OK_AnyValue; 6347 ScalarCost += TTI.getArithmeticInstrCost( 6348 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 6349 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6350 Arg0OVK, Arg1OVK); 6351 } 6352 LLVM_DEBUG( 6353 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6354 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6355 return ScalarCost > VectorCost; 6356 } 6357 6358 /// Generate a constant vector with \p Val with the same 6359 /// number of elements as the transition. 6360 /// \p UseSplat defines whether or not \p Val should be replicated 6361 /// across the whole vector. 6362 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6363 /// otherwise we generate a vector with as many undef as possible: 6364 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6365 /// used at the index of the extract. 6366 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6367 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6368 if (!UseSplat) { 6369 // If we cannot determine where the constant must be, we have to 6370 // use a splat constant. 6371 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6372 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6373 ExtractIdx = CstVal->getSExtValue(); 6374 else 6375 UseSplat = true; 6376 } 6377 6378 unsigned End = getTransitionType()->getVectorNumElements(); 6379 if (UseSplat) 6380 return ConstantVector::getSplat(End, Val); 6381 6382 SmallVector<Constant *, 4> ConstVec; 6383 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6384 for (unsigned Idx = 0; Idx != End; ++Idx) { 6385 if (Idx == ExtractIdx) 6386 ConstVec.push_back(Val); 6387 else 6388 ConstVec.push_back(UndefVal); 6389 } 6390 return ConstantVector::get(ConstVec); 6391 } 6392 6393 /// Check if promoting to a vector type an operand at \p OperandIdx 6394 /// in \p Use can trigger undefined behavior. 6395 static bool canCauseUndefinedBehavior(const Instruction *Use, 6396 unsigned OperandIdx) { 6397 // This is not safe to introduce undef when the operand is on 6398 // the right hand side of a division-like instruction. 6399 if (OperandIdx != 1) 6400 return false; 6401 switch (Use->getOpcode()) { 6402 default: 6403 return false; 6404 case Instruction::SDiv: 6405 case Instruction::UDiv: 6406 case Instruction::SRem: 6407 case Instruction::URem: 6408 return true; 6409 case Instruction::FDiv: 6410 case Instruction::FRem: 6411 return !Use->hasNoNaNs(); 6412 } 6413 llvm_unreachable(nullptr); 6414 } 6415 6416 public: 6417 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 6418 const TargetTransformInfo &TTI, Instruction *Transition, 6419 unsigned CombineCost) 6420 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 6421 StoreExtractCombineCost(CombineCost) { 6422 assert(Transition && "Do not know how to promote null"); 6423 } 6424 6425 /// Check if we can promote \p ToBePromoted to \p Type. 6426 bool canPromote(const Instruction *ToBePromoted) const { 6427 // We could support CastInst too. 6428 return isa<BinaryOperator>(ToBePromoted); 6429 } 6430 6431 /// Check if it is profitable to promote \p ToBePromoted 6432 /// by moving downward the transition through. 6433 bool shouldPromote(const Instruction *ToBePromoted) const { 6434 // Promote only if all the operands can be statically expanded. 6435 // Indeed, we do not want to introduce any new kind of transitions. 6436 for (const Use &U : ToBePromoted->operands()) { 6437 const Value *Val = U.get(); 6438 if (Val == getEndOfTransition()) { 6439 // If the use is a division and the transition is on the rhs, 6440 // we cannot promote the operation, otherwise we may create a 6441 // division by zero. 6442 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 6443 return false; 6444 continue; 6445 } 6446 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 6447 !isa<ConstantFP>(Val)) 6448 return false; 6449 } 6450 // Check that the resulting operation is legal. 6451 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 6452 if (!ISDOpcode) 6453 return false; 6454 return StressStoreExtract || 6455 TLI.isOperationLegalOrCustom( 6456 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 6457 } 6458 6459 /// Check whether or not \p Use can be combined 6460 /// with the transition. 6461 /// I.e., is it possible to do Use(Transition) => AnotherUse? 6462 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 6463 6464 /// Record \p ToBePromoted as part of the chain to be promoted. 6465 void enqueueForPromotion(Instruction *ToBePromoted) { 6466 InstsToBePromoted.push_back(ToBePromoted); 6467 } 6468 6469 /// Set the instruction that will be combined with the transition. 6470 void recordCombineInstruction(Instruction *ToBeCombined) { 6471 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 6472 CombineInst = ToBeCombined; 6473 } 6474 6475 /// Promote all the instructions enqueued for promotion if it is 6476 /// is profitable. 6477 /// \return True if the promotion happened, false otherwise. 6478 bool promote() { 6479 // Check if there is something to promote. 6480 // Right now, if we do not have anything to combine with, 6481 // we assume the promotion is not profitable. 6482 if (InstsToBePromoted.empty() || !CombineInst) 6483 return false; 6484 6485 // Check cost. 6486 if (!StressStoreExtract && !isProfitableToPromote()) 6487 return false; 6488 6489 // Promote. 6490 for (auto &ToBePromoted : InstsToBePromoted) 6491 promoteImpl(ToBePromoted); 6492 InstsToBePromoted.clear(); 6493 return true; 6494 } 6495 }; 6496 6497 } // end anonymous namespace 6498 6499 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 6500 // At this point, we know that all the operands of ToBePromoted but Def 6501 // can be statically promoted. 6502 // For Def, we need to use its parameter in ToBePromoted: 6503 // b = ToBePromoted ty1 a 6504 // Def = Transition ty1 b to ty2 6505 // Move the transition down. 6506 // 1. Replace all uses of the promoted operation by the transition. 6507 // = ... b => = ... Def. 6508 assert(ToBePromoted->getType() == Transition->getType() && 6509 "The type of the result of the transition does not match " 6510 "the final type"); 6511 ToBePromoted->replaceAllUsesWith(Transition); 6512 // 2. Update the type of the uses. 6513 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 6514 Type *TransitionTy = getTransitionType(); 6515 ToBePromoted->mutateType(TransitionTy); 6516 // 3. Update all the operands of the promoted operation with promoted 6517 // operands. 6518 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 6519 for (Use &U : ToBePromoted->operands()) { 6520 Value *Val = U.get(); 6521 Value *NewVal = nullptr; 6522 if (Val == Transition) 6523 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 6524 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 6525 isa<ConstantFP>(Val)) { 6526 // Use a splat constant if it is not safe to use undef. 6527 NewVal = getConstantVector( 6528 cast<Constant>(Val), 6529 isa<UndefValue>(Val) || 6530 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 6531 } else 6532 llvm_unreachable("Did you modified shouldPromote and forgot to update " 6533 "this?"); 6534 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 6535 } 6536 Transition->moveAfter(ToBePromoted); 6537 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 6538 } 6539 6540 /// Some targets can do store(extractelement) with one instruction. 6541 /// Try to push the extractelement towards the stores when the target 6542 /// has this feature and this is profitable. 6543 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 6544 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 6545 if (DisableStoreExtract || !TLI || 6546 (!StressStoreExtract && 6547 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 6548 Inst->getOperand(1), CombineCost))) 6549 return false; 6550 6551 // At this point we know that Inst is a vector to scalar transition. 6552 // Try to move it down the def-use chain, until: 6553 // - We can combine the transition with its single use 6554 // => we got rid of the transition. 6555 // - We escape the current basic block 6556 // => we would need to check that we are moving it at a cheaper place and 6557 // we do not do that for now. 6558 BasicBlock *Parent = Inst->getParent(); 6559 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 6560 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 6561 // If the transition has more than one use, assume this is not going to be 6562 // beneficial. 6563 while (Inst->hasOneUse()) { 6564 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 6565 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 6566 6567 if (ToBePromoted->getParent() != Parent) { 6568 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 6569 << ToBePromoted->getParent()->getName() 6570 << ") than the transition (" << Parent->getName() 6571 << ").\n"); 6572 return false; 6573 } 6574 6575 if (VPH.canCombine(ToBePromoted)) { 6576 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 6577 << "will be combined with: " << *ToBePromoted << '\n'); 6578 VPH.recordCombineInstruction(ToBePromoted); 6579 bool Changed = VPH.promote(); 6580 NumStoreExtractExposed += Changed; 6581 return Changed; 6582 } 6583 6584 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 6585 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 6586 return false; 6587 6588 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 6589 6590 VPH.enqueueForPromotion(ToBePromoted); 6591 Inst = ToBePromoted; 6592 } 6593 return false; 6594 } 6595 6596 /// For the instruction sequence of store below, F and I values 6597 /// are bundled together as an i64 value before being stored into memory. 6598 /// Sometimes it is more efficient to generate separate stores for F and I, 6599 /// which can remove the bitwise instructions or sink them to colder places. 6600 /// 6601 /// (store (or (zext (bitcast F to i32) to i64), 6602 /// (shl (zext I to i64), 32)), addr) --> 6603 /// (store F, addr) and (store I, addr+4) 6604 /// 6605 /// Similarly, splitting for other merged store can also be beneficial, like: 6606 /// For pair of {i32, i32}, i64 store --> two i32 stores. 6607 /// For pair of {i32, i16}, i64 store --> two i32 stores. 6608 /// For pair of {i16, i16}, i32 store --> two i16 stores. 6609 /// For pair of {i16, i8}, i32 store --> two i16 stores. 6610 /// For pair of {i8, i8}, i16 store --> two i8 stores. 6611 /// 6612 /// We allow each target to determine specifically which kind of splitting is 6613 /// supported. 6614 /// 6615 /// The store patterns are commonly seen from the simple code snippet below 6616 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 6617 /// void goo(const std::pair<int, float> &); 6618 /// hoo() { 6619 /// ... 6620 /// goo(std::make_pair(tmp, ftmp)); 6621 /// ... 6622 /// } 6623 /// 6624 /// Although we already have similar splitting in DAG Combine, we duplicate 6625 /// it in CodeGenPrepare to catch the case in which pattern is across 6626 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 6627 /// during code expansion. 6628 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 6629 const TargetLowering &TLI) { 6630 // Handle simple but common cases only. 6631 Type *StoreType = SI.getValueOperand()->getType(); 6632 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 6633 DL.getTypeSizeInBits(StoreType) == 0) 6634 return false; 6635 6636 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 6637 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 6638 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 6639 DL.getTypeSizeInBits(SplitStoreType)) 6640 return false; 6641 6642 // Match the following patterns: 6643 // (store (or (zext LValue to i64), 6644 // (shl (zext HValue to i64), 32)), HalfValBitSize) 6645 // or 6646 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 6647 // (zext LValue to i64), 6648 // Expect both operands of OR and the first operand of SHL have only 6649 // one use. 6650 Value *LValue, *HValue; 6651 if (!match(SI.getValueOperand(), 6652 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 6653 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 6654 m_SpecificInt(HalfValBitSize)))))) 6655 return false; 6656 6657 // Check LValue and HValue are int with size less or equal than 32. 6658 if (!LValue->getType()->isIntegerTy() || 6659 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 6660 !HValue->getType()->isIntegerTy() || 6661 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 6662 return false; 6663 6664 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 6665 // as the input of target query. 6666 auto *LBC = dyn_cast<BitCastInst>(LValue); 6667 auto *HBC = dyn_cast<BitCastInst>(HValue); 6668 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 6669 : EVT::getEVT(LValue->getType()); 6670 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 6671 : EVT::getEVT(HValue->getType()); 6672 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 6673 return false; 6674 6675 // Start to split store. 6676 IRBuilder<> Builder(SI.getContext()); 6677 Builder.SetInsertPoint(&SI); 6678 6679 // If LValue/HValue is a bitcast in another BB, create a new one in current 6680 // BB so it may be merged with the splitted stores by dag combiner. 6681 if (LBC && LBC->getParent() != SI.getParent()) 6682 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 6683 if (HBC && HBC->getParent() != SI.getParent()) 6684 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 6685 6686 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 6687 auto CreateSplitStore = [&](Value *V, bool Upper) { 6688 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 6689 Value *Addr = Builder.CreateBitCast( 6690 SI.getOperand(1), 6691 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 6692 if ((IsLE && Upper) || (!IsLE && !Upper)) 6693 Addr = Builder.CreateGEP( 6694 SplitStoreType, Addr, 6695 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 6696 Builder.CreateAlignedStore( 6697 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6698 }; 6699 6700 CreateSplitStore(LValue, false); 6701 CreateSplitStore(HValue, true); 6702 6703 // Delete the old store. 6704 SI.eraseFromParent(); 6705 return true; 6706 } 6707 6708 // Return true if the GEP has two operands, the first operand is of a sequential 6709 // type, and the second operand is a constant. 6710 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6711 gep_type_iterator I = gep_type_begin(*GEP); 6712 return GEP->getNumOperands() == 2 && 6713 I.isSequential() && 6714 isa<ConstantInt>(GEP->getOperand(1)); 6715 } 6716 6717 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6718 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6719 // reducing liveness interference across those edges benefits global register 6720 // allocation. Currently handles only certain cases. 6721 // 6722 // For example, unmerge %GEPI and %UGEPI as below. 6723 // 6724 // ---------- BEFORE ---------- 6725 // SrcBlock: 6726 // ... 6727 // %GEPIOp = ... 6728 // ... 6729 // %GEPI = gep %GEPIOp, Idx 6730 // ... 6731 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6732 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6733 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6734 // %UGEPI) 6735 // 6736 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6737 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6738 // ... 6739 // 6740 // DstBi: 6741 // ... 6742 // %UGEPI = gep %GEPIOp, UIdx 6743 // ... 6744 // --------------------------- 6745 // 6746 // ---------- AFTER ---------- 6747 // SrcBlock: 6748 // ... (same as above) 6749 // (* %GEPI is still alive on the indirectbr edges) 6750 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6751 // unmerging) 6752 // ... 6753 // 6754 // DstBi: 6755 // ... 6756 // %UGEPI = gep %GEPI, (UIdx-Idx) 6757 // ... 6758 // --------------------------- 6759 // 6760 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6761 // no longer alive on them. 6762 // 6763 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6764 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6765 // not to disable further simplications and optimizations as a result of GEP 6766 // merging. 6767 // 6768 // Note this unmerging may increase the length of the data flow critical path 6769 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6770 // between the register pressure and the length of data-flow critical 6771 // path. Restricting this to the uncommon IndirectBr case would minimize the 6772 // impact of potentially longer critical path, if any, and the impact on compile 6773 // time. 6774 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6775 const TargetTransformInfo *TTI) { 6776 BasicBlock *SrcBlock = GEPI->getParent(); 6777 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6778 // (non-IndirectBr) cases exit early here. 6779 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6780 return false; 6781 // Check that GEPI is a simple gep with a single constant index. 6782 if (!GEPSequentialConstIndexed(GEPI)) 6783 return false; 6784 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6785 // Check that GEPI is a cheap one. 6786 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6787 > TargetTransformInfo::TCC_Basic) 6788 return false; 6789 Value *GEPIOp = GEPI->getOperand(0); 6790 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6791 if (!isa<Instruction>(GEPIOp)) 6792 return false; 6793 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6794 if (GEPIOpI->getParent() != SrcBlock) 6795 return false; 6796 // Check that GEP is used outside the block, meaning it's alive on the 6797 // IndirectBr edge(s). 6798 if (find_if(GEPI->users(), [&](User *Usr) { 6799 if (auto *I = dyn_cast<Instruction>(Usr)) { 6800 if (I->getParent() != SrcBlock) { 6801 return true; 6802 } 6803 } 6804 return false; 6805 }) == GEPI->users().end()) 6806 return false; 6807 // The second elements of the GEP chains to be unmerged. 6808 std::vector<GetElementPtrInst *> UGEPIs; 6809 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6810 // on IndirectBr edges. 6811 for (User *Usr : GEPIOp->users()) { 6812 if (Usr == GEPI) continue; 6813 // Check if Usr is an Instruction. If not, give up. 6814 if (!isa<Instruction>(Usr)) 6815 return false; 6816 auto *UI = cast<Instruction>(Usr); 6817 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6818 if (UI->getParent() == SrcBlock) 6819 continue; 6820 // Check if Usr is a GEP. If not, give up. 6821 if (!isa<GetElementPtrInst>(Usr)) 6822 return false; 6823 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6824 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6825 // the pointer operand to it. If so, record it in the vector. If not, give 6826 // up. 6827 if (!GEPSequentialConstIndexed(UGEPI)) 6828 return false; 6829 if (UGEPI->getOperand(0) != GEPIOp) 6830 return false; 6831 if (GEPIIdx->getType() != 6832 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6833 return false; 6834 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6835 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6836 > TargetTransformInfo::TCC_Basic) 6837 return false; 6838 UGEPIs.push_back(UGEPI); 6839 } 6840 if (UGEPIs.size() == 0) 6841 return false; 6842 // Check the materializing cost of (Uidx-Idx). 6843 for (GetElementPtrInst *UGEPI : UGEPIs) { 6844 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6845 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6846 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6847 if (ImmCost > TargetTransformInfo::TCC_Basic) 6848 return false; 6849 } 6850 // Now unmerge between GEPI and UGEPIs. 6851 for (GetElementPtrInst *UGEPI : UGEPIs) { 6852 UGEPI->setOperand(0, GEPI); 6853 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6854 Constant *NewUGEPIIdx = 6855 ConstantInt::get(GEPIIdx->getType(), 6856 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6857 UGEPI->setOperand(1, NewUGEPIIdx); 6858 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6859 // inbounds to avoid UB. 6860 if (!GEPI->isInBounds()) { 6861 UGEPI->setIsInBounds(false); 6862 } 6863 } 6864 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6865 // alive on IndirectBr edges). 6866 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6867 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6868 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6869 return true; 6870 } 6871 6872 bool CodeGenPrepare::optimizeInst(Instruction *I, DominatorTree &DT, 6873 bool &ModifiedDT) { 6874 // Bail out if we inserted the instruction to prevent optimizations from 6875 // stepping on each other's toes. 6876 if (InsertedInsts.count(I)) 6877 return false; 6878 6879 if (PHINode *P = dyn_cast<PHINode>(I)) { 6880 // It is possible for very late stage optimizations (such as SimplifyCFG) 6881 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6882 // trivial PHI, go ahead and zap it here. 6883 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6884 LargeOffsetGEPMap.erase(P); 6885 P->replaceAllUsesWith(V); 6886 P->eraseFromParent(); 6887 ++NumPHIsElim; 6888 return true; 6889 } 6890 return false; 6891 } 6892 6893 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6894 // If the source of the cast is a constant, then this should have 6895 // already been constant folded. The only reason NOT to constant fold 6896 // it is if something (e.g. LSR) was careful to place the constant 6897 // evaluation in a block other than then one that uses it (e.g. to hoist 6898 // the address of globals out of a loop). If this is the case, we don't 6899 // want to forward-subst the cast. 6900 if (isa<Constant>(CI->getOperand(0))) 6901 return false; 6902 6903 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6904 return true; 6905 6906 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6907 /// Sink a zext or sext into its user blocks if the target type doesn't 6908 /// fit in one register 6909 if (TLI && 6910 TLI->getTypeAction(CI->getContext(), 6911 TLI->getValueType(*DL, CI->getType())) == 6912 TargetLowering::TypeExpandInteger) { 6913 return SinkCast(CI); 6914 } else { 6915 bool MadeChange = optimizeExt(I); 6916 return MadeChange | optimizeExtUses(I); 6917 } 6918 } 6919 return false; 6920 } 6921 6922 if (auto *Cmp = dyn_cast<CmpInst>(I)) 6923 if (TLI && optimizeCmp(Cmp, *TLI, *DL, DT, ModifiedDT)) 6924 return true; 6925 6926 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6927 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6928 if (TLI) { 6929 bool Modified = optimizeLoadExt(LI); 6930 unsigned AS = LI->getPointerAddressSpace(); 6931 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6932 return Modified; 6933 } 6934 return false; 6935 } 6936 6937 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6938 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6939 return true; 6940 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6941 if (TLI) { 6942 unsigned AS = SI->getPointerAddressSpace(); 6943 return optimizeMemoryInst(I, SI->getOperand(1), 6944 SI->getOperand(0)->getType(), AS); 6945 } 6946 return false; 6947 } 6948 6949 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6950 unsigned AS = RMW->getPointerAddressSpace(); 6951 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6952 RMW->getType(), AS); 6953 } 6954 6955 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6956 unsigned AS = CmpX->getPointerAddressSpace(); 6957 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6958 CmpX->getCompareOperand()->getType(), AS); 6959 } 6960 6961 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6962 6963 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6964 EnableAndCmpSinking && TLI) 6965 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6966 6967 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6968 BinOp->getOpcode() == Instruction::LShr)) { 6969 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6970 if (TLI && CI && TLI->hasExtractBitsInsn()) 6971 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6972 6973 return false; 6974 } 6975 6976 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6977 if (GEPI->hasAllZeroIndices()) { 6978 /// The GEP operand must be a pointer, so must its result -> BitCast 6979 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6980 GEPI->getName(), GEPI); 6981 NC->setDebugLoc(GEPI->getDebugLoc()); 6982 GEPI->replaceAllUsesWith(NC); 6983 GEPI->eraseFromParent(); 6984 ++NumGEPsElim; 6985 optimizeInst(NC, DT, ModifiedDT); 6986 return true; 6987 } 6988 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6989 return true; 6990 } 6991 return false; 6992 } 6993 6994 if (tryToSinkFreeOperands(I)) 6995 return true; 6996 6997 if (CallInst *CI = dyn_cast<CallInst>(I)) 6998 return optimizeCallInst(CI, ModifiedDT); 6999 7000 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 7001 return optimizeSelectInst(SI, ModifiedDT); 7002 7003 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 7004 return optimizeShuffleVectorInst(SVI); 7005 7006 if (auto *Switch = dyn_cast<SwitchInst>(I)) 7007 return optimizeSwitchInst(Switch); 7008 7009 if (isa<ExtractElementInst>(I)) 7010 return optimizeExtractElementInst(I); 7011 7012 return false; 7013 } 7014 7015 /// Given an OR instruction, check to see if this is a bitreverse 7016 /// idiom. If so, insert the new intrinsic and return true. 7017 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 7018 const TargetLowering &TLI) { 7019 if (!I.getType()->isIntegerTy() || 7020 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 7021 TLI.getValueType(DL, I.getType(), true))) 7022 return false; 7023 7024 SmallVector<Instruction*, 4> Insts; 7025 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 7026 return false; 7027 Instruction *LastInst = Insts.back(); 7028 I.replaceAllUsesWith(LastInst); 7029 RecursivelyDeleteTriviallyDeadInstructions(&I); 7030 return true; 7031 } 7032 7033 // In this pass we look for GEP and cast instructions that are used 7034 // across basic blocks and rewrite them to improve basic-block-at-a-time 7035 // selection. 7036 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, DominatorTree &DT, 7037 bool &ModifiedDT) { 7038 SunkAddrs.clear(); 7039 bool MadeChange = false; 7040 7041 CurInstIterator = BB.begin(); 7042 while (CurInstIterator != BB.end()) { 7043 MadeChange |= optimizeInst(&*CurInstIterator++, DT, ModifiedDT); 7044 if (ModifiedDT) 7045 return true; 7046 } 7047 7048 bool MadeBitReverse = true; 7049 while (TLI && MadeBitReverse) { 7050 MadeBitReverse = false; 7051 for (auto &I : reverse(BB)) { 7052 if (makeBitReverse(I, *DL, *TLI)) { 7053 MadeBitReverse = MadeChange = true; 7054 ModifiedDT = true; 7055 break; 7056 } 7057 } 7058 } 7059 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); 7060 7061 return MadeChange; 7062 } 7063 7064 // llvm.dbg.value is far away from the value then iSel may not be able 7065 // handle it properly. iSel will drop llvm.dbg.value if it can not 7066 // find a node corresponding to the value. 7067 bool CodeGenPrepare::placeDbgValues(Function &F) { 7068 bool MadeChange = false; 7069 for (BasicBlock &BB : F) { 7070 Instruction *PrevNonDbgInst = nullptr; 7071 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 7072 Instruction *Insn = &*BI++; 7073 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 7074 // Leave dbg.values that refer to an alloca alone. These 7075 // intrinsics describe the address of a variable (= the alloca) 7076 // being taken. They should not be moved next to the alloca 7077 // (and to the beginning of the scope), but rather stay close to 7078 // where said address is used. 7079 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 7080 PrevNonDbgInst = Insn; 7081 continue; 7082 } 7083 7084 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7085 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 7086 // If VI is a phi in a block with an EHPad terminator, we can't insert 7087 // after it. 7088 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7089 continue; 7090 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7091 << *DVI << ' ' << *VI); 7092 DVI->removeFromParent(); 7093 if (isa<PHINode>(VI)) 7094 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7095 else 7096 DVI->insertAfter(VI); 7097 MadeChange = true; 7098 ++NumDbgValueMoved; 7099 } 7100 } 7101 } 7102 return MadeChange; 7103 } 7104 7105 /// Scale down both weights to fit into uint32_t. 7106 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7107 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7108 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7109 NewTrue = NewTrue / Scale; 7110 NewFalse = NewFalse / Scale; 7111 } 7112 7113 /// Some targets prefer to split a conditional branch like: 7114 /// \code 7115 /// %0 = icmp ne i32 %a, 0 7116 /// %1 = icmp ne i32 %b, 0 7117 /// %or.cond = or i1 %0, %1 7118 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7119 /// \endcode 7120 /// into multiple branch instructions like: 7121 /// \code 7122 /// bb1: 7123 /// %0 = icmp ne i32 %a, 0 7124 /// br i1 %0, label %TrueBB, label %bb2 7125 /// bb2: 7126 /// %1 = icmp ne i32 %b, 0 7127 /// br i1 %1, label %TrueBB, label %FalseBB 7128 /// \endcode 7129 /// This usually allows instruction selection to do even further optimizations 7130 /// and combine the compare with the branch instruction. Currently this is 7131 /// applied for targets which have "cheap" jump instructions. 7132 /// 7133 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7134 /// 7135 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) { 7136 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 7137 return false; 7138 7139 bool MadeChange = false; 7140 for (auto &BB : F) { 7141 // Does this BB end with the following? 7142 // %cond1 = icmp|fcmp|binary instruction ... 7143 // %cond2 = icmp|fcmp|binary instruction ... 7144 // %cond.or = or|and i1 %cond1, cond2 7145 // br i1 %cond.or label %dest1, label %dest2" 7146 BinaryOperator *LogicOp; 7147 BasicBlock *TBB, *FBB; 7148 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 7149 continue; 7150 7151 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7152 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7153 continue; 7154 7155 unsigned Opc; 7156 Value *Cond1, *Cond2; 7157 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 7158 m_OneUse(m_Value(Cond2))))) 7159 Opc = Instruction::And; 7160 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 7161 m_OneUse(m_Value(Cond2))))) 7162 Opc = Instruction::Or; 7163 else 7164 continue; 7165 7166 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 7167 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 7168 continue; 7169 7170 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 7171 7172 // Create a new BB. 7173 auto TmpBB = 7174 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 7175 BB.getParent(), BB.getNextNode()); 7176 7177 // Update original basic block by using the first condition directly by the 7178 // branch instruction and removing the no longer needed and/or instruction. 7179 Br1->setCondition(Cond1); 7180 LogicOp->eraseFromParent(); 7181 7182 // Depending on the condition we have to either replace the true or the 7183 // false successor of the original branch instruction. 7184 if (Opc == Instruction::And) 7185 Br1->setSuccessor(0, TmpBB); 7186 else 7187 Br1->setSuccessor(1, TmpBB); 7188 7189 // Fill in the new basic block. 7190 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 7191 if (auto *I = dyn_cast<Instruction>(Cond2)) { 7192 I->removeFromParent(); 7193 I->insertBefore(Br2); 7194 } 7195 7196 // Update PHI nodes in both successors. The original BB needs to be 7197 // replaced in one successor's PHI nodes, because the branch comes now from 7198 // the newly generated BB (NewBB). In the other successor we need to add one 7199 // incoming edge to the PHI nodes, because both branch instructions target 7200 // now the same successor. Depending on the original branch condition 7201 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 7202 // we perform the correct update for the PHI nodes. 7203 // This doesn't change the successor order of the just created branch 7204 // instruction (or any other instruction). 7205 if (Opc == Instruction::Or) 7206 std::swap(TBB, FBB); 7207 7208 // Replace the old BB with the new BB. 7209 for (PHINode &PN : TBB->phis()) { 7210 int i; 7211 while ((i = PN.getBasicBlockIndex(&BB)) >= 0) 7212 PN.setIncomingBlock(i, TmpBB); 7213 } 7214 7215 // Add another incoming edge form the new BB. 7216 for (PHINode &PN : FBB->phis()) { 7217 auto *Val = PN.getIncomingValueForBlock(&BB); 7218 PN.addIncoming(Val, TmpBB); 7219 } 7220 7221 // Update the branch weights (from SelectionDAGBuilder:: 7222 // FindMergedConditions). 7223 if (Opc == Instruction::Or) { 7224 // Codegen X | Y as: 7225 // BB1: 7226 // jmp_if_X TBB 7227 // jmp TmpBB 7228 // TmpBB: 7229 // jmp_if_Y TBB 7230 // jmp FBB 7231 // 7232 7233 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 7234 // The requirement is that 7235 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 7236 // = TrueProb for original BB. 7237 // Assuming the original weights are A and B, one choice is to set BB1's 7238 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 7239 // assumes that 7240 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 7241 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 7242 // TmpBB, but the math is more complicated. 7243 uint64_t TrueWeight, FalseWeight; 7244 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7245 uint64_t NewTrueWeight = TrueWeight; 7246 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 7247 scaleWeights(NewTrueWeight, NewFalseWeight); 7248 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7249 .createBranchWeights(TrueWeight, FalseWeight)); 7250 7251 NewTrueWeight = TrueWeight; 7252 NewFalseWeight = 2 * FalseWeight; 7253 scaleWeights(NewTrueWeight, NewFalseWeight); 7254 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7255 .createBranchWeights(TrueWeight, FalseWeight)); 7256 } 7257 } else { 7258 // Codegen X & Y as: 7259 // BB1: 7260 // jmp_if_X TmpBB 7261 // jmp FBB 7262 // TmpBB: 7263 // jmp_if_Y TBB 7264 // jmp FBB 7265 // 7266 // This requires creation of TmpBB after CurBB. 7267 7268 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 7269 // The requirement is that 7270 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 7271 // = FalseProb for original BB. 7272 // Assuming the original weights are A and B, one choice is to set BB1's 7273 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 7274 // assumes that 7275 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 7276 uint64_t TrueWeight, FalseWeight; 7277 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7278 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 7279 uint64_t NewFalseWeight = FalseWeight; 7280 scaleWeights(NewTrueWeight, NewFalseWeight); 7281 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7282 .createBranchWeights(TrueWeight, FalseWeight)); 7283 7284 NewTrueWeight = 2 * TrueWeight; 7285 NewFalseWeight = FalseWeight; 7286 scaleWeights(NewTrueWeight, NewFalseWeight); 7287 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7288 .createBranchWeights(TrueWeight, FalseWeight)); 7289 } 7290 } 7291 7292 ModifiedDT = true; 7293 MadeChange = true; 7294 7295 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 7296 TmpBB->dump()); 7297 } 7298 return MadeChange; 7299 } 7300