1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PointerIntPair.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/BranchProbabilityInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/CodeGen/ISDOpcodes.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/Config/llvm-config.h" 42 #include "llvm/IR/Argument.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Statepoint.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/IR/ValueMap.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/BlockFrequency.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/MachineValueType.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetMachine.h" 86 #include "llvm/Target/TargetOptions.h" 87 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 88 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 89 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <limits> 95 #include <memory> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 102 #define DEBUG_TYPE "codegenprepare" 103 104 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 105 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 106 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 107 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 108 "sunken Cmps"); 109 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 110 "of sunken Casts"); 111 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 112 "computations were sunk"); 113 STATISTIC(NumMemoryInstsPhiCreated, 114 "Number of phis created when address " 115 "computations were sunk to memory instructions"); 116 STATISTIC(NumMemoryInstsSelectCreated, 117 "Number of select created when address " 118 "computations were sunk to memory instructions"); 119 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 120 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 121 STATISTIC(NumAndsAdded, 122 "Number of and mask instructions added to form ext loads"); 123 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 124 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 125 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 126 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 127 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 128 129 static cl::opt<bool> DisableBranchOpts( 130 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 131 cl::desc("Disable branch optimizations in CodeGenPrepare")); 132 133 static cl::opt<bool> 134 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable GC optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> DisableSelectToBranch( 138 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 139 cl::desc("Disable select to branch conversion.")); 140 141 static cl::opt<bool> AddrSinkUsingGEPs( 142 "addr-sink-using-gep", cl::Hidden, cl::init(true), 143 cl::desc("Address sinking in CGP using GEPs.")); 144 145 static cl::opt<bool> EnableAndCmpSinking( 146 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 147 cl::desc("Enable sinkinig and/cmp into branches.")); 148 149 static cl::opt<bool> DisableStoreExtract( 150 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 151 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 152 153 static cl::opt<bool> StressStoreExtract( 154 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> DisableExtLdPromotion( 158 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 159 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 160 "CodeGenPrepare")); 161 162 static cl::opt<bool> StressExtLdPromotion( 163 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 164 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 165 "optimization in CodeGenPrepare")); 166 167 static cl::opt<bool> DisablePreheaderProtect( 168 "disable-preheader-prot", cl::Hidden, cl::init(false), 169 cl::desc("Disable protection against removing loop preheaders")); 170 171 static cl::opt<bool> ProfileGuidedSectionPrefix( 172 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 173 cl::desc("Use profile info to add section prefix for hot/cold functions")); 174 175 static cl::opt<unsigned> FreqRatioToSkipMerge( 176 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 177 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 178 "(frequency of destination block) is greater than this ratio")); 179 180 static cl::opt<bool> ForceSplitStore( 181 "force-split-store", cl::Hidden, cl::init(false), 182 cl::desc("Force store splitting no matter what the target query says.")); 183 184 static cl::opt<bool> 185 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 186 cl::desc("Enable merging of redundant sexts when one is dominating" 187 " the other."), cl::init(true)); 188 189 static cl::opt<bool> DisableComplexAddrModes( 190 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 191 cl::desc("Disables combining addressing modes with different parts " 192 "in optimizeMemoryInst.")); 193 194 static cl::opt<bool> 195 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 196 cl::desc("Allow creation of Phis in Address sinking.")); 197 198 static cl::opt<bool> 199 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 200 cl::desc("Allow creation of selects in Address sinking.")); 201 202 static cl::opt<bool> AddrSinkCombineBaseReg( 203 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 204 cl::desc("Allow combining of BaseReg field in Address sinking.")); 205 206 static cl::opt<bool> AddrSinkCombineBaseGV( 207 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 208 cl::desc("Allow combining of BaseGV field in Address sinking.")); 209 210 static cl::opt<bool> AddrSinkCombineBaseOffs( 211 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 212 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 213 214 static cl::opt<bool> AddrSinkCombineScaledReg( 215 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 216 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 217 218 static cl::opt<bool> 219 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 220 cl::init(true), 221 cl::desc("Enable splitting large offset of GEP.")); 222 223 namespace { 224 225 enum ExtType { 226 ZeroExtension, // Zero extension has been seen. 227 SignExtension, // Sign extension has been seen. 228 BothExtension // This extension type is used if we saw sext after 229 // ZeroExtension had been set, or if we saw zext after 230 // SignExtension had been set. It makes the type 231 // information of a promoted instruction invalid. 232 }; 233 234 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 235 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 236 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 237 using SExts = SmallVector<Instruction *, 16>; 238 using ValueToSExts = DenseMap<Value *, SExts>; 239 240 class TypePromotionTransaction; 241 242 class CodeGenPrepare : public FunctionPass { 243 const TargetMachine *TM = nullptr; 244 const TargetSubtargetInfo *SubtargetInfo; 245 const TargetLowering *TLI = nullptr; 246 const TargetRegisterInfo *TRI; 247 const TargetTransformInfo *TTI = nullptr; 248 const TargetLibraryInfo *TLInfo; 249 const LoopInfo *LI; 250 std::unique_ptr<BlockFrequencyInfo> BFI; 251 std::unique_ptr<BranchProbabilityInfo> BPI; 252 253 /// As we scan instructions optimizing them, this is the next instruction 254 /// to optimize. Transforms that can invalidate this should update it. 255 BasicBlock::iterator CurInstIterator; 256 257 /// Keeps track of non-local addresses that have been sunk into a block. 258 /// This allows us to avoid inserting duplicate code for blocks with 259 /// multiple load/stores of the same address. The usage of WeakTrackingVH 260 /// enables SunkAddrs to be treated as a cache whose entries can be 261 /// invalidated if a sunken address computation has been erased. 262 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 263 264 /// Keeps track of all instructions inserted for the current function. 265 SetOfInstrs InsertedInsts; 266 267 /// Keeps track of the type of the related instruction before their 268 /// promotion for the current function. 269 InstrToOrigTy PromotedInsts; 270 271 /// Keep track of instructions removed during promotion. 272 SetOfInstrs RemovedInsts; 273 274 /// Keep track of sext chains based on their initial value. 275 DenseMap<Value *, Instruction *> SeenChainsForSExt; 276 277 /// Keep track of GEPs accessing the same data structures such as structs or 278 /// arrays that are candidates to be split later because of their large 279 /// size. 280 MapVector< 281 AssertingVH<Value>, 282 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 283 LargeOffsetGEPMap; 284 285 /// Keep track of new GEP base after splitting the GEPs having large offset. 286 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 287 288 /// Map serial numbers to Large offset GEPs. 289 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 290 291 /// Keep track of SExt promoted. 292 ValueToSExts ValToSExtendedUses; 293 294 /// True if CFG is modified in any way. 295 bool ModifiedDT; 296 297 /// True if optimizing for size. 298 bool OptSize; 299 300 /// DataLayout for the Function being processed. 301 const DataLayout *DL = nullptr; 302 303 public: 304 static char ID; // Pass identification, replacement for typeid 305 306 CodeGenPrepare() : FunctionPass(ID) { 307 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 308 } 309 310 bool runOnFunction(Function &F) override; 311 312 StringRef getPassName() const override { return "CodeGen Prepare"; } 313 314 void getAnalysisUsage(AnalysisUsage &AU) const override { 315 // FIXME: When we can selectively preserve passes, preserve the domtree. 316 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 317 AU.addRequired<TargetLibraryInfoWrapperPass>(); 318 AU.addRequired<TargetTransformInfoWrapperPass>(); 319 AU.addRequired<LoopInfoWrapperPass>(); 320 } 321 322 private: 323 template <typename F> 324 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 325 // Substituting can cause recursive simplifications, which can invalidate 326 // our iterator. Use a WeakTrackingVH to hold onto it in case this 327 // happens. 328 Value *CurValue = &*CurInstIterator; 329 WeakTrackingVH IterHandle(CurValue); 330 331 f(); 332 333 // If the iterator instruction was recursively deleted, start over at the 334 // start of the block. 335 if (IterHandle != CurValue) { 336 CurInstIterator = BB->begin(); 337 SunkAddrs.clear(); 338 } 339 } 340 341 bool eliminateFallThrough(Function &F); 342 bool eliminateMostlyEmptyBlocks(Function &F); 343 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 344 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 345 void eliminateMostlyEmptyBlock(BasicBlock *BB); 346 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 347 bool isPreheader); 348 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 349 bool optimizeInst(Instruction *I, bool &ModifiedDT); 350 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 351 Type *AccessTy, unsigned AddrSpace); 352 bool optimizeInlineAsmInst(CallInst *CS); 353 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 354 bool optimizeExt(Instruction *&I); 355 bool optimizeExtUses(Instruction *I); 356 bool optimizeLoadExt(LoadInst *Load); 357 bool optimizeSelectInst(SelectInst *SI); 358 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 359 bool optimizeSwitchInst(SwitchInst *SI); 360 bool optimizeExtractElementInst(Instruction *Inst); 361 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 362 bool placeDbgValues(Function &F); 363 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 364 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 365 bool tryToPromoteExts(TypePromotionTransaction &TPT, 366 const SmallVectorImpl<Instruction *> &Exts, 367 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 368 unsigned CreatedInstsCost = 0); 369 bool mergeSExts(Function &F); 370 bool splitLargeGEPOffsets(); 371 bool performAddressTypePromotion( 372 Instruction *&Inst, 373 bool AllowPromotionWithoutCommonHeader, 374 bool HasPromoted, TypePromotionTransaction &TPT, 375 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 376 bool splitBranchCondition(Function &F); 377 bool simplifyOffsetableRelocate(Instruction &I); 378 379 bool tryToSinkFreeOperands(Instruction *I); 380 }; 381 382 } // end anonymous namespace 383 384 char CodeGenPrepare::ID = 0; 385 386 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 387 "Optimize for code generation", false, false) 388 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 389 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 390 "Optimize for code generation", false, false) 391 392 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 393 394 bool CodeGenPrepare::runOnFunction(Function &F) { 395 if (skipFunction(F)) 396 return false; 397 398 DL = &F.getParent()->getDataLayout(); 399 400 bool EverMadeChange = false; 401 // Clear per function information. 402 InsertedInsts.clear(); 403 PromotedInsts.clear(); 404 405 ModifiedDT = false; 406 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 407 TM = &TPC->getTM<TargetMachine>(); 408 SubtargetInfo = TM->getSubtargetImpl(F); 409 TLI = SubtargetInfo->getTargetLowering(); 410 TRI = SubtargetInfo->getRegisterInfo(); 411 } 412 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 413 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 414 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 415 BPI.reset(new BranchProbabilityInfo(F, *LI)); 416 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 417 OptSize = F.optForSize(); 418 419 ProfileSummaryInfo *PSI = 420 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 421 if (ProfileGuidedSectionPrefix) { 422 if (PSI->isFunctionHotInCallGraph(&F, *BFI)) 423 F.setSectionPrefix(".hot"); 424 else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) 425 F.setSectionPrefix(".unlikely"); 426 } 427 428 /// This optimization identifies DIV instructions that can be 429 /// profitably bypassed and carried out with a shorter, faster divide. 430 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI && 431 TLI->isSlowDivBypassed()) { 432 const DenseMap<unsigned int, unsigned int> &BypassWidths = 433 TLI->getBypassSlowDivWidths(); 434 BasicBlock* BB = &*F.begin(); 435 while (BB != nullptr) { 436 // bypassSlowDivision may create new BBs, but we don't want to reapply the 437 // optimization to those blocks. 438 BasicBlock* Next = BB->getNextNode(); 439 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 440 BB = Next; 441 } 442 } 443 444 // Eliminate blocks that contain only PHI nodes and an 445 // unconditional branch. 446 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 447 448 if (!DisableBranchOpts) 449 EverMadeChange |= splitBranchCondition(F); 450 451 // Split some critical edges where one of the sources is an indirect branch, 452 // to help generate sane code for PHIs involving such edges. 453 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 454 455 bool MadeChange = true; 456 while (MadeChange) { 457 MadeChange = false; 458 for (Function::iterator I = F.begin(); I != F.end(); ) { 459 BasicBlock *BB = &*I++; 460 bool ModifiedDTOnIteration = false; 461 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 462 463 // Restart BB iteration if the dominator tree of the Function was changed 464 if (ModifiedDTOnIteration) 465 break; 466 } 467 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 468 MadeChange |= mergeSExts(F); 469 if (!LargeOffsetGEPMap.empty()) 470 MadeChange |= splitLargeGEPOffsets(); 471 472 // Really free removed instructions during promotion. 473 for (Instruction *I : RemovedInsts) 474 I->deleteValue(); 475 476 EverMadeChange |= MadeChange; 477 SeenChainsForSExt.clear(); 478 ValToSExtendedUses.clear(); 479 RemovedInsts.clear(); 480 LargeOffsetGEPMap.clear(); 481 LargeOffsetGEPID.clear(); 482 } 483 484 SunkAddrs.clear(); 485 486 if (!DisableBranchOpts) { 487 MadeChange = false; 488 // Use a set vector to get deterministic iteration order. The order the 489 // blocks are removed may affect whether or not PHI nodes in successors 490 // are removed. 491 SmallSetVector<BasicBlock*, 8> WorkList; 492 for (BasicBlock &BB : F) { 493 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 494 MadeChange |= ConstantFoldTerminator(&BB, true); 495 if (!MadeChange) continue; 496 497 for (SmallVectorImpl<BasicBlock*>::iterator 498 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 499 if (pred_begin(*II) == pred_end(*II)) 500 WorkList.insert(*II); 501 } 502 503 // Delete the dead blocks and any of their dead successors. 504 MadeChange |= !WorkList.empty(); 505 while (!WorkList.empty()) { 506 BasicBlock *BB = WorkList.pop_back_val(); 507 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 508 509 DeleteDeadBlock(BB); 510 511 for (SmallVectorImpl<BasicBlock*>::iterator 512 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 513 if (pred_begin(*II) == pred_end(*II)) 514 WorkList.insert(*II); 515 } 516 517 // Merge pairs of basic blocks with unconditional branches, connected by 518 // a single edge. 519 if (EverMadeChange || MadeChange) 520 MadeChange |= eliminateFallThrough(F); 521 522 EverMadeChange |= MadeChange; 523 } 524 525 if (!DisableGCOpts) { 526 SmallVector<Instruction *, 2> Statepoints; 527 for (BasicBlock &BB : F) 528 for (Instruction &I : BB) 529 if (isStatepoint(I)) 530 Statepoints.push_back(&I); 531 for (auto &I : Statepoints) 532 EverMadeChange |= simplifyOffsetableRelocate(*I); 533 } 534 535 // Do this last to clean up use-before-def scenarios introduced by other 536 // preparatory transforms. 537 EverMadeChange |= placeDbgValues(F); 538 539 return EverMadeChange; 540 } 541 542 /// Merge basic blocks which are connected by a single edge, where one of the 543 /// basic blocks has a single successor pointing to the other basic block, 544 /// which has a single predecessor. 545 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 546 bool Changed = false; 547 // Scan all of the blocks in the function, except for the entry block. 548 // Use a temporary array to avoid iterator being invalidated when 549 // deleting blocks. 550 SmallVector<WeakTrackingVH, 16> Blocks; 551 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 552 Blocks.push_back(&Block); 553 554 for (auto &Block : Blocks) { 555 auto *BB = cast_or_null<BasicBlock>(Block); 556 if (!BB) 557 continue; 558 // If the destination block has a single pred, then this is a trivial 559 // edge, just collapse it. 560 BasicBlock *SinglePred = BB->getSinglePredecessor(); 561 562 // Don't merge if BB's address is taken. 563 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 564 565 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 566 if (Term && !Term->isConditional()) { 567 Changed = true; 568 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 569 570 // Merge BB into SinglePred and delete it. 571 MergeBlockIntoPredecessor(BB); 572 } 573 } 574 return Changed; 575 } 576 577 /// Find a destination block from BB if BB is mergeable empty block. 578 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 579 // If this block doesn't end with an uncond branch, ignore it. 580 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 581 if (!BI || !BI->isUnconditional()) 582 return nullptr; 583 584 // If the instruction before the branch (skipping debug info) isn't a phi 585 // node, then other stuff is happening here. 586 BasicBlock::iterator BBI = BI->getIterator(); 587 if (BBI != BB->begin()) { 588 --BBI; 589 while (isa<DbgInfoIntrinsic>(BBI)) { 590 if (BBI == BB->begin()) 591 break; 592 --BBI; 593 } 594 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 595 return nullptr; 596 } 597 598 // Do not break infinite loops. 599 BasicBlock *DestBB = BI->getSuccessor(0); 600 if (DestBB == BB) 601 return nullptr; 602 603 if (!canMergeBlocks(BB, DestBB)) 604 DestBB = nullptr; 605 606 return DestBB; 607 } 608 609 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 610 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 611 /// edges in ways that are non-optimal for isel. Start by eliminating these 612 /// blocks so we can split them the way we want them. 613 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 614 SmallPtrSet<BasicBlock *, 16> Preheaders; 615 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 616 while (!LoopList.empty()) { 617 Loop *L = LoopList.pop_back_val(); 618 LoopList.insert(LoopList.end(), L->begin(), L->end()); 619 if (BasicBlock *Preheader = L->getLoopPreheader()) 620 Preheaders.insert(Preheader); 621 } 622 623 bool MadeChange = false; 624 // Copy blocks into a temporary array to avoid iterator invalidation issues 625 // as we remove them. 626 // Note that this intentionally skips the entry block. 627 SmallVector<WeakTrackingVH, 16> Blocks; 628 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 629 Blocks.push_back(&Block); 630 631 for (auto &Block : Blocks) { 632 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 633 if (!BB) 634 continue; 635 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 636 if (!DestBB || 637 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 638 continue; 639 640 eliminateMostlyEmptyBlock(BB); 641 MadeChange = true; 642 } 643 return MadeChange; 644 } 645 646 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 647 BasicBlock *DestBB, 648 bool isPreheader) { 649 // Do not delete loop preheaders if doing so would create a critical edge. 650 // Loop preheaders can be good locations to spill registers. If the 651 // preheader is deleted and we create a critical edge, registers may be 652 // spilled in the loop body instead. 653 if (!DisablePreheaderProtect && isPreheader && 654 !(BB->getSinglePredecessor() && 655 BB->getSinglePredecessor()->getSingleSuccessor())) 656 return false; 657 658 // Skip merging if the block's successor is also a successor to any callbr 659 // that leads to this block. 660 // FIXME: Is this really needed? Is this a correctness issue? 661 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 662 if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator())) 663 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 664 if (DestBB == CBI->getSuccessor(i)) 665 return false; 666 } 667 668 // Try to skip merging if the unique predecessor of BB is terminated by a 669 // switch or indirect branch instruction, and BB is used as an incoming block 670 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 671 // add COPY instructions in the predecessor of BB instead of BB (if it is not 672 // merged). Note that the critical edge created by merging such blocks wont be 673 // split in MachineSink because the jump table is not analyzable. By keeping 674 // such empty block (BB), ISel will place COPY instructions in BB, not in the 675 // predecessor of BB. 676 BasicBlock *Pred = BB->getUniquePredecessor(); 677 if (!Pred || 678 !(isa<SwitchInst>(Pred->getTerminator()) || 679 isa<IndirectBrInst>(Pred->getTerminator()))) 680 return true; 681 682 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 683 return true; 684 685 // We use a simple cost heuristic which determine skipping merging is 686 // profitable if the cost of skipping merging is less than the cost of 687 // merging : Cost(skipping merging) < Cost(merging BB), where the 688 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 689 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 690 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 691 // Freq(Pred) / Freq(BB) > 2. 692 // Note that if there are multiple empty blocks sharing the same incoming 693 // value for the PHIs in the DestBB, we consider them together. In such 694 // case, Cost(merging BB) will be the sum of their frequencies. 695 696 if (!isa<PHINode>(DestBB->begin())) 697 return true; 698 699 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 700 701 // Find all other incoming blocks from which incoming values of all PHIs in 702 // DestBB are the same as the ones from BB. 703 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 704 ++PI) { 705 BasicBlock *DestBBPred = *PI; 706 if (DestBBPred == BB) 707 continue; 708 709 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 710 return DestPN.getIncomingValueForBlock(BB) == 711 DestPN.getIncomingValueForBlock(DestBBPred); 712 })) 713 SameIncomingValueBBs.insert(DestBBPred); 714 } 715 716 // See if all BB's incoming values are same as the value from Pred. In this 717 // case, no reason to skip merging because COPYs are expected to be place in 718 // Pred already. 719 if (SameIncomingValueBBs.count(Pred)) 720 return true; 721 722 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 723 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 724 725 for (auto SameValueBB : SameIncomingValueBBs) 726 if (SameValueBB->getUniquePredecessor() == Pred && 727 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 728 BBFreq += BFI->getBlockFreq(SameValueBB); 729 730 return PredFreq.getFrequency() <= 731 BBFreq.getFrequency() * FreqRatioToSkipMerge; 732 } 733 734 /// Return true if we can merge BB into DestBB if there is a single 735 /// unconditional branch between them, and BB contains no other non-phi 736 /// instructions. 737 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 738 const BasicBlock *DestBB) const { 739 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 740 // the successor. If there are more complex condition (e.g. preheaders), 741 // don't mess around with them. 742 for (const PHINode &PN : BB->phis()) { 743 for (const User *U : PN.users()) { 744 const Instruction *UI = cast<Instruction>(U); 745 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 746 return false; 747 // If User is inside DestBB block and it is a PHINode then check 748 // incoming value. If incoming value is not from BB then this is 749 // a complex condition (e.g. preheaders) we want to avoid here. 750 if (UI->getParent() == DestBB) { 751 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 752 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 753 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 754 if (Insn && Insn->getParent() == BB && 755 Insn->getParent() != UPN->getIncomingBlock(I)) 756 return false; 757 } 758 } 759 } 760 } 761 762 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 763 // and DestBB may have conflicting incoming values for the block. If so, we 764 // can't merge the block. 765 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 766 if (!DestBBPN) return true; // no conflict. 767 768 // Collect the preds of BB. 769 SmallPtrSet<const BasicBlock*, 16> BBPreds; 770 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 771 // It is faster to get preds from a PHI than with pred_iterator. 772 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 773 BBPreds.insert(BBPN->getIncomingBlock(i)); 774 } else { 775 BBPreds.insert(pred_begin(BB), pred_end(BB)); 776 } 777 778 // Walk the preds of DestBB. 779 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 780 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 781 if (BBPreds.count(Pred)) { // Common predecessor? 782 for (const PHINode &PN : DestBB->phis()) { 783 const Value *V1 = PN.getIncomingValueForBlock(Pred); 784 const Value *V2 = PN.getIncomingValueForBlock(BB); 785 786 // If V2 is a phi node in BB, look up what the mapped value will be. 787 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 788 if (V2PN->getParent() == BB) 789 V2 = V2PN->getIncomingValueForBlock(Pred); 790 791 // If there is a conflict, bail out. 792 if (V1 != V2) return false; 793 } 794 } 795 } 796 797 return true; 798 } 799 800 /// Eliminate a basic block that has only phi's and an unconditional branch in 801 /// it. 802 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 803 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 804 BasicBlock *DestBB = BI->getSuccessor(0); 805 806 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 807 << *BB << *DestBB); 808 809 // If the destination block has a single pred, then this is a trivial edge, 810 // just collapse it. 811 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 812 if (SinglePred != DestBB) { 813 assert(SinglePred == BB && 814 "Single predecessor not the same as predecessor"); 815 // Merge DestBB into SinglePred/BB and delete it. 816 MergeBlockIntoPredecessor(DestBB); 817 // Note: BB(=SinglePred) will not be deleted on this path. 818 // DestBB(=its single successor) is the one that was deleted. 819 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 820 return; 821 } 822 } 823 824 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 825 // to handle the new incoming edges it is about to have. 826 for (PHINode &PN : DestBB->phis()) { 827 // Remove the incoming value for BB, and remember it. 828 Value *InVal = PN.removeIncomingValue(BB, false); 829 830 // Two options: either the InVal is a phi node defined in BB or it is some 831 // value that dominates BB. 832 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 833 if (InValPhi && InValPhi->getParent() == BB) { 834 // Add all of the input values of the input PHI as inputs of this phi. 835 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 836 PN.addIncoming(InValPhi->getIncomingValue(i), 837 InValPhi->getIncomingBlock(i)); 838 } else { 839 // Otherwise, add one instance of the dominating value for each edge that 840 // we will be adding. 841 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 842 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 843 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 844 } else { 845 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 846 PN.addIncoming(InVal, *PI); 847 } 848 } 849 } 850 851 // The PHIs are now updated, change everything that refers to BB to use 852 // DestBB and remove BB. 853 BB->replaceAllUsesWith(DestBB); 854 BB->eraseFromParent(); 855 ++NumBlocksElim; 856 857 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 858 } 859 860 // Computes a map of base pointer relocation instructions to corresponding 861 // derived pointer relocation instructions given a vector of all relocate calls 862 static void computeBaseDerivedRelocateMap( 863 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 864 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 865 &RelocateInstMap) { 866 // Collect information in two maps: one primarily for locating the base object 867 // while filling the second map; the second map is the final structure holding 868 // a mapping between Base and corresponding Derived relocate calls 869 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 870 for (auto *ThisRelocate : AllRelocateCalls) { 871 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 872 ThisRelocate->getDerivedPtrIndex()); 873 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 874 } 875 for (auto &Item : RelocateIdxMap) { 876 std::pair<unsigned, unsigned> Key = Item.first; 877 if (Key.first == Key.second) 878 // Base relocation: nothing to insert 879 continue; 880 881 GCRelocateInst *I = Item.second; 882 auto BaseKey = std::make_pair(Key.first, Key.first); 883 884 // We're iterating over RelocateIdxMap so we cannot modify it. 885 auto MaybeBase = RelocateIdxMap.find(BaseKey); 886 if (MaybeBase == RelocateIdxMap.end()) 887 // TODO: We might want to insert a new base object relocate and gep off 888 // that, if there are enough derived object relocates. 889 continue; 890 891 RelocateInstMap[MaybeBase->second].push_back(I); 892 } 893 } 894 895 // Accepts a GEP and extracts the operands into a vector provided they're all 896 // small integer constants 897 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 898 SmallVectorImpl<Value *> &OffsetV) { 899 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 900 // Only accept small constant integer operands 901 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 902 if (!Op || Op->getZExtValue() > 20) 903 return false; 904 } 905 906 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 907 OffsetV.push_back(GEP->getOperand(i)); 908 return true; 909 } 910 911 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 912 // replace, computes a replacement, and affects it. 913 static bool 914 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 915 const SmallVectorImpl<GCRelocateInst *> &Targets) { 916 bool MadeChange = false; 917 // We must ensure the relocation of derived pointer is defined after 918 // relocation of base pointer. If we find a relocation corresponding to base 919 // defined earlier than relocation of base then we move relocation of base 920 // right before found relocation. We consider only relocation in the same 921 // basic block as relocation of base. Relocations from other basic block will 922 // be skipped by optimization and we do not care about them. 923 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 924 &*R != RelocatedBase; ++R) 925 if (auto RI = dyn_cast<GCRelocateInst>(R)) 926 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 927 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 928 RelocatedBase->moveBefore(RI); 929 break; 930 } 931 932 for (GCRelocateInst *ToReplace : Targets) { 933 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 934 "Not relocating a derived object of the original base object"); 935 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 936 // A duplicate relocate call. TODO: coalesce duplicates. 937 continue; 938 } 939 940 if (RelocatedBase->getParent() != ToReplace->getParent()) { 941 // Base and derived relocates are in different basic blocks. 942 // In this case transform is only valid when base dominates derived 943 // relocate. However it would be too expensive to check dominance 944 // for each such relocate, so we skip the whole transformation. 945 continue; 946 } 947 948 Value *Base = ToReplace->getBasePtr(); 949 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 950 if (!Derived || Derived->getPointerOperand() != Base) 951 continue; 952 953 SmallVector<Value *, 2> OffsetV; 954 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 955 continue; 956 957 // Create a Builder and replace the target callsite with a gep 958 assert(RelocatedBase->getNextNode() && 959 "Should always have one since it's not a terminator"); 960 961 // Insert after RelocatedBase 962 IRBuilder<> Builder(RelocatedBase->getNextNode()); 963 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 964 965 // If gc_relocate does not match the actual type, cast it to the right type. 966 // In theory, there must be a bitcast after gc_relocate if the type does not 967 // match, and we should reuse it to get the derived pointer. But it could be 968 // cases like this: 969 // bb1: 970 // ... 971 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 972 // br label %merge 973 // 974 // bb2: 975 // ... 976 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 977 // br label %merge 978 // 979 // merge: 980 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 981 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 982 // 983 // In this case, we can not find the bitcast any more. So we insert a new bitcast 984 // no matter there is already one or not. In this way, we can handle all cases, and 985 // the extra bitcast should be optimized away in later passes. 986 Value *ActualRelocatedBase = RelocatedBase; 987 if (RelocatedBase->getType() != Base->getType()) { 988 ActualRelocatedBase = 989 Builder.CreateBitCast(RelocatedBase, Base->getType()); 990 } 991 Value *Replacement = Builder.CreateGEP( 992 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 993 Replacement->takeName(ToReplace); 994 // If the newly generated derived pointer's type does not match the original derived 995 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 996 Value *ActualReplacement = Replacement; 997 if (Replacement->getType() != ToReplace->getType()) { 998 ActualReplacement = 999 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1000 } 1001 ToReplace->replaceAllUsesWith(ActualReplacement); 1002 ToReplace->eraseFromParent(); 1003 1004 MadeChange = true; 1005 } 1006 return MadeChange; 1007 } 1008 1009 // Turns this: 1010 // 1011 // %base = ... 1012 // %ptr = gep %base + 15 1013 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1014 // %base' = relocate(%tok, i32 4, i32 4) 1015 // %ptr' = relocate(%tok, i32 4, i32 5) 1016 // %val = load %ptr' 1017 // 1018 // into this: 1019 // 1020 // %base = ... 1021 // %ptr = gep %base + 15 1022 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1023 // %base' = gc.relocate(%tok, i32 4, i32 4) 1024 // %ptr' = gep %base' + 15 1025 // %val = load %ptr' 1026 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1027 bool MadeChange = false; 1028 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1029 1030 for (auto *U : I.users()) 1031 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1032 // Collect all the relocate calls associated with a statepoint 1033 AllRelocateCalls.push_back(Relocate); 1034 1035 // We need atleast one base pointer relocation + one derived pointer 1036 // relocation to mangle 1037 if (AllRelocateCalls.size() < 2) 1038 return false; 1039 1040 // RelocateInstMap is a mapping from the base relocate instruction to the 1041 // corresponding derived relocate instructions 1042 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1043 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1044 if (RelocateInstMap.empty()) 1045 return false; 1046 1047 for (auto &Item : RelocateInstMap) 1048 // Item.first is the RelocatedBase to offset against 1049 // Item.second is the vector of Targets to replace 1050 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1051 return MadeChange; 1052 } 1053 1054 /// SinkCast - Sink the specified cast instruction into its user blocks 1055 static bool SinkCast(CastInst *CI) { 1056 BasicBlock *DefBB = CI->getParent(); 1057 1058 /// InsertedCasts - Only insert a cast in each block once. 1059 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1060 1061 bool MadeChange = false; 1062 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1063 UI != E; ) { 1064 Use &TheUse = UI.getUse(); 1065 Instruction *User = cast<Instruction>(*UI); 1066 1067 // Figure out which BB this cast is used in. For PHI's this is the 1068 // appropriate predecessor block. 1069 BasicBlock *UserBB = User->getParent(); 1070 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1071 UserBB = PN->getIncomingBlock(TheUse); 1072 } 1073 1074 // Preincrement use iterator so we don't invalidate it. 1075 ++UI; 1076 1077 // The first insertion point of a block containing an EH pad is after the 1078 // pad. If the pad is the user, we cannot sink the cast past the pad. 1079 if (User->isEHPad()) 1080 continue; 1081 1082 // If the block selected to receive the cast is an EH pad that does not 1083 // allow non-PHI instructions before the terminator, we can't sink the 1084 // cast. 1085 if (UserBB->getTerminator()->isEHPad()) 1086 continue; 1087 1088 // If this user is in the same block as the cast, don't change the cast. 1089 if (UserBB == DefBB) continue; 1090 1091 // If we have already inserted a cast into this block, use it. 1092 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1093 1094 if (!InsertedCast) { 1095 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1096 assert(InsertPt != UserBB->end()); 1097 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1098 CI->getType(), "", &*InsertPt); 1099 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1100 } 1101 1102 // Replace a use of the cast with a use of the new cast. 1103 TheUse = InsertedCast; 1104 MadeChange = true; 1105 ++NumCastUses; 1106 } 1107 1108 // If we removed all uses, nuke the cast. 1109 if (CI->use_empty()) { 1110 salvageDebugInfo(*CI); 1111 CI->eraseFromParent(); 1112 MadeChange = true; 1113 } 1114 1115 return MadeChange; 1116 } 1117 1118 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1119 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1120 /// reduce the number of virtual registers that must be created and coalesced. 1121 /// 1122 /// Return true if any changes are made. 1123 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1124 const DataLayout &DL) { 1125 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1126 // than sinking only nop casts, but is helpful on some platforms. 1127 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1128 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1129 ASC->getDestAddressSpace())) 1130 return false; 1131 } 1132 1133 // If this is a noop copy, 1134 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1135 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1136 1137 // This is an fp<->int conversion? 1138 if (SrcVT.isInteger() != DstVT.isInteger()) 1139 return false; 1140 1141 // If this is an extension, it will be a zero or sign extension, which 1142 // isn't a noop. 1143 if (SrcVT.bitsLT(DstVT)) return false; 1144 1145 // If these values will be promoted, find out what they will be promoted 1146 // to. This helps us consider truncates on PPC as noop copies when they 1147 // are. 1148 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1149 TargetLowering::TypePromoteInteger) 1150 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1151 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1152 TargetLowering::TypePromoteInteger) 1153 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1154 1155 // If, after promotion, these are the same types, this is a noop copy. 1156 if (SrcVT != DstVT) 1157 return false; 1158 1159 return SinkCast(CI); 1160 } 1161 1162 static void replaceMathCmpWithIntrinsic(BinaryOperator *BO, CmpInst *Cmp, 1163 Instruction *InsertPt, 1164 Intrinsic::ID IID) { 1165 Value *Arg0 = BO->getOperand(0); 1166 Value *Arg1 = BO->getOperand(1); 1167 1168 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1169 if (BO->getOpcode() == Instruction::Add && 1170 IID == Intrinsic::usub_with_overflow) { 1171 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1172 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1173 } 1174 1175 IRBuilder<> Builder(InsertPt); 1176 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1177 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1178 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1179 BO->replaceAllUsesWith(Math); 1180 Cmp->replaceAllUsesWith(OV); 1181 BO->eraseFromParent(); 1182 Cmp->eraseFromParent(); 1183 } 1184 1185 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1186 /// intrinsic. Return true if any changes were made. 1187 static bool combineToUAddWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1188 const DataLayout &DL) { 1189 Value *A, *B; 1190 BinaryOperator *Add; 1191 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) 1192 return false; 1193 1194 if (!TLI.shouldFormOverflowOp(ISD::UADDO, 1195 TLI.getValueType(DL, Add->getType()))) 1196 return false; 1197 1198 // We don't want to move around uses of condition values this late, so we 1199 // check if it is legal to create the call to the intrinsic in the basic 1200 // block containing the icmp. 1201 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1202 return false; 1203 1204 #ifndef NDEBUG 1205 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 1206 // for now: 1207 if (Add->hasOneUse()) 1208 assert(*Add->user_begin() == Cmp && "expected!"); 1209 #endif 1210 1211 Instruction *InPt = Add->hasOneUse() ? cast<Instruction>(Cmp) 1212 : cast<Instruction>(Add); 1213 replaceMathCmpWithIntrinsic(Add, Cmp, InPt, Intrinsic::uadd_with_overflow); 1214 return true; 1215 } 1216 1217 static bool combineToUSubWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1218 const DataLayout &DL, bool &ModifiedDT) { 1219 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1220 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1221 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1222 if (Pred == ICmpInst::ICMP_UGT) { 1223 std::swap(A, B); 1224 Pred = ICmpInst::ICMP_ULT; 1225 } 1226 // Convert special-case: (A == 0) is the same as (A u< 1). 1227 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1228 B = ConstantInt::get(B->getType(), 1); 1229 Pred = ICmpInst::ICMP_ULT; 1230 } 1231 if (Pred != ICmpInst::ICMP_ULT) 1232 return false; 1233 1234 // Walk the users of a variable operand of a compare looking for a subtract or 1235 // add with that same operand. Also match the 2nd operand of the compare to 1236 // the add/sub, but that may be a negated constant operand of an add. 1237 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1238 BinaryOperator *Sub = nullptr; 1239 for (User *U : CmpVariableOperand->users()) { 1240 // A - B, A u< B --> usubo(A, B) 1241 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1242 Sub = cast<BinaryOperator>(U); 1243 break; 1244 } 1245 1246 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1247 const APInt *CmpC, *AddC; 1248 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1249 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1250 Sub = cast<BinaryOperator>(U); 1251 break; 1252 } 1253 } 1254 if (!Sub) 1255 return false; 1256 1257 if (!TLI.shouldFormOverflowOp(ISD::USUBO, 1258 TLI.getValueType(DL, Sub->getType()))) 1259 return false; 1260 1261 // Pattern matched and profitability checked. Check dominance to determine the 1262 // insertion point for an intrinsic that replaces the subtract and compare. 1263 DominatorTree DT(*Sub->getFunction()); 1264 bool SubDominates = DT.dominates(Sub, Cmp); 1265 if (!SubDominates && !DT.dominates(Cmp, Sub)) 1266 return false; 1267 Instruction *InPt = SubDominates ? cast<Instruction>(Sub) 1268 : cast<Instruction>(Cmp); 1269 replaceMathCmpWithIntrinsic(Sub, Cmp, InPt, Intrinsic::usub_with_overflow); 1270 // Reset callers - do not crash by iterating over a dead instruction. 1271 ModifiedDT = true; 1272 return true; 1273 } 1274 1275 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1276 /// registers that must be created and coalesced. This is a clear win except on 1277 /// targets with multiple condition code registers (PowerPC), where it might 1278 /// lose; some adjustment may be wanted there. 1279 /// 1280 /// Return true if any changes are made. 1281 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1282 if (TLI.hasMultipleConditionRegisters()) 1283 return false; 1284 1285 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1286 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1287 return false; 1288 1289 // Only insert a cmp in each block once. 1290 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1291 1292 bool MadeChange = false; 1293 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1294 UI != E; ) { 1295 Use &TheUse = UI.getUse(); 1296 Instruction *User = cast<Instruction>(*UI); 1297 1298 // Preincrement use iterator so we don't invalidate it. 1299 ++UI; 1300 1301 // Don't bother for PHI nodes. 1302 if (isa<PHINode>(User)) 1303 continue; 1304 1305 // Figure out which BB this cmp is used in. 1306 BasicBlock *UserBB = User->getParent(); 1307 BasicBlock *DefBB = Cmp->getParent(); 1308 1309 // If this user is in the same block as the cmp, don't change the cmp. 1310 if (UserBB == DefBB) continue; 1311 1312 // If we have already inserted a cmp into this block, use it. 1313 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1314 1315 if (!InsertedCmp) { 1316 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1317 assert(InsertPt != UserBB->end()); 1318 InsertedCmp = 1319 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1320 Cmp->getOperand(0), Cmp->getOperand(1), "", 1321 &*InsertPt); 1322 // Propagate the debug info. 1323 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1324 } 1325 1326 // Replace a use of the cmp with a use of the new cmp. 1327 TheUse = InsertedCmp; 1328 MadeChange = true; 1329 ++NumCmpUses; 1330 } 1331 1332 // If we removed all uses, nuke the cmp. 1333 if (Cmp->use_empty()) { 1334 Cmp->eraseFromParent(); 1335 MadeChange = true; 1336 } 1337 1338 return MadeChange; 1339 } 1340 1341 static bool optimizeCmp(CmpInst *Cmp, const TargetLowering &TLI, 1342 const DataLayout &DL, bool &ModifiedDT) { 1343 if (sinkCmpExpression(Cmp, TLI)) 1344 return true; 1345 1346 if (combineToUAddWithOverflow(Cmp, TLI, DL)) 1347 return true; 1348 1349 if (combineToUSubWithOverflow(Cmp, TLI, DL, ModifiedDT)) 1350 return true; 1351 1352 return false; 1353 } 1354 1355 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1356 /// used in a compare to allow isel to generate better code for targets where 1357 /// this operation can be combined. 1358 /// 1359 /// Return true if any changes are made. 1360 static bool sinkAndCmp0Expression(Instruction *AndI, 1361 const TargetLowering &TLI, 1362 SetOfInstrs &InsertedInsts) { 1363 // Double-check that we're not trying to optimize an instruction that was 1364 // already optimized by some other part of this pass. 1365 assert(!InsertedInsts.count(AndI) && 1366 "Attempting to optimize already optimized and instruction"); 1367 (void) InsertedInsts; 1368 1369 // Nothing to do for single use in same basic block. 1370 if (AndI->hasOneUse() && 1371 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1372 return false; 1373 1374 // Try to avoid cases where sinking/duplicating is likely to increase register 1375 // pressure. 1376 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1377 !isa<ConstantInt>(AndI->getOperand(1)) && 1378 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1379 return false; 1380 1381 for (auto *U : AndI->users()) { 1382 Instruction *User = cast<Instruction>(U); 1383 1384 // Only sink for and mask feeding icmp with 0. 1385 if (!isa<ICmpInst>(User)) 1386 return false; 1387 1388 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1389 if (!CmpC || !CmpC->isZero()) 1390 return false; 1391 } 1392 1393 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1394 return false; 1395 1396 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1397 LLVM_DEBUG(AndI->getParent()->dump()); 1398 1399 // Push the 'and' into the same block as the icmp 0. There should only be 1400 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1401 // others, so we don't need to keep track of which BBs we insert into. 1402 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1403 UI != E; ) { 1404 Use &TheUse = UI.getUse(); 1405 Instruction *User = cast<Instruction>(*UI); 1406 1407 // Preincrement use iterator so we don't invalidate it. 1408 ++UI; 1409 1410 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1411 1412 // Keep the 'and' in the same place if the use is already in the same block. 1413 Instruction *InsertPt = 1414 User->getParent() == AndI->getParent() ? AndI : User; 1415 Instruction *InsertedAnd = 1416 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1417 AndI->getOperand(1), "", InsertPt); 1418 // Propagate the debug info. 1419 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1420 1421 // Replace a use of the 'and' with a use of the new 'and'. 1422 TheUse = InsertedAnd; 1423 ++NumAndUses; 1424 LLVM_DEBUG(User->getParent()->dump()); 1425 } 1426 1427 // We removed all uses, nuke the and. 1428 AndI->eraseFromParent(); 1429 return true; 1430 } 1431 1432 /// Check if the candidates could be combined with a shift instruction, which 1433 /// includes: 1434 /// 1. Truncate instruction 1435 /// 2. And instruction and the imm is a mask of the low bits: 1436 /// imm & (imm+1) == 0 1437 static bool isExtractBitsCandidateUse(Instruction *User) { 1438 if (!isa<TruncInst>(User)) { 1439 if (User->getOpcode() != Instruction::And || 1440 !isa<ConstantInt>(User->getOperand(1))) 1441 return false; 1442 1443 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1444 1445 if ((Cimm & (Cimm + 1)).getBoolValue()) 1446 return false; 1447 } 1448 return true; 1449 } 1450 1451 /// Sink both shift and truncate instruction to the use of truncate's BB. 1452 static bool 1453 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1454 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1455 const TargetLowering &TLI, const DataLayout &DL) { 1456 BasicBlock *UserBB = User->getParent(); 1457 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1458 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1459 bool MadeChange = false; 1460 1461 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1462 TruncE = TruncI->user_end(); 1463 TruncUI != TruncE;) { 1464 1465 Use &TruncTheUse = TruncUI.getUse(); 1466 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1467 // Preincrement use iterator so we don't invalidate it. 1468 1469 ++TruncUI; 1470 1471 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1472 if (!ISDOpcode) 1473 continue; 1474 1475 // If the use is actually a legal node, there will not be an 1476 // implicit truncate. 1477 // FIXME: always querying the result type is just an 1478 // approximation; some nodes' legality is determined by the 1479 // operand or other means. There's no good way to find out though. 1480 if (TLI.isOperationLegalOrCustom( 1481 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1482 continue; 1483 1484 // Don't bother for PHI nodes. 1485 if (isa<PHINode>(TruncUser)) 1486 continue; 1487 1488 BasicBlock *TruncUserBB = TruncUser->getParent(); 1489 1490 if (UserBB == TruncUserBB) 1491 continue; 1492 1493 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1494 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1495 1496 if (!InsertedShift && !InsertedTrunc) { 1497 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1498 assert(InsertPt != TruncUserBB->end()); 1499 // Sink the shift 1500 if (ShiftI->getOpcode() == Instruction::AShr) 1501 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1502 "", &*InsertPt); 1503 else 1504 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1505 "", &*InsertPt); 1506 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1507 1508 // Sink the trunc 1509 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1510 TruncInsertPt++; 1511 assert(TruncInsertPt != TruncUserBB->end()); 1512 1513 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1514 TruncI->getType(), "", &*TruncInsertPt); 1515 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1516 1517 MadeChange = true; 1518 1519 TruncTheUse = InsertedTrunc; 1520 } 1521 } 1522 return MadeChange; 1523 } 1524 1525 /// Sink the shift *right* instruction into user blocks if the uses could 1526 /// potentially be combined with this shift instruction and generate BitExtract 1527 /// instruction. It will only be applied if the architecture supports BitExtract 1528 /// instruction. Here is an example: 1529 /// BB1: 1530 /// %x.extract.shift = lshr i64 %arg1, 32 1531 /// BB2: 1532 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1533 /// ==> 1534 /// 1535 /// BB2: 1536 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1537 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1538 /// 1539 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1540 /// instruction. 1541 /// Return true if any changes are made. 1542 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1543 const TargetLowering &TLI, 1544 const DataLayout &DL) { 1545 BasicBlock *DefBB = ShiftI->getParent(); 1546 1547 /// Only insert instructions in each block once. 1548 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1549 1550 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1551 1552 bool MadeChange = false; 1553 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1554 UI != E;) { 1555 Use &TheUse = UI.getUse(); 1556 Instruction *User = cast<Instruction>(*UI); 1557 // Preincrement use iterator so we don't invalidate it. 1558 ++UI; 1559 1560 // Don't bother for PHI nodes. 1561 if (isa<PHINode>(User)) 1562 continue; 1563 1564 if (!isExtractBitsCandidateUse(User)) 1565 continue; 1566 1567 BasicBlock *UserBB = User->getParent(); 1568 1569 if (UserBB == DefBB) { 1570 // If the shift and truncate instruction are in the same BB. The use of 1571 // the truncate(TruncUse) may still introduce another truncate if not 1572 // legal. In this case, we would like to sink both shift and truncate 1573 // instruction to the BB of TruncUse. 1574 // for example: 1575 // BB1: 1576 // i64 shift.result = lshr i64 opnd, imm 1577 // trunc.result = trunc shift.result to i16 1578 // 1579 // BB2: 1580 // ----> We will have an implicit truncate here if the architecture does 1581 // not have i16 compare. 1582 // cmp i16 trunc.result, opnd2 1583 // 1584 if (isa<TruncInst>(User) && shiftIsLegal 1585 // If the type of the truncate is legal, no truncate will be 1586 // introduced in other basic blocks. 1587 && 1588 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1589 MadeChange = 1590 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1591 1592 continue; 1593 } 1594 // If we have already inserted a shift into this block, use it. 1595 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1596 1597 if (!InsertedShift) { 1598 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1599 assert(InsertPt != UserBB->end()); 1600 1601 if (ShiftI->getOpcode() == Instruction::AShr) 1602 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1603 "", &*InsertPt); 1604 else 1605 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1606 "", &*InsertPt); 1607 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1608 1609 MadeChange = true; 1610 } 1611 1612 // Replace a use of the shift with a use of the new shift. 1613 TheUse = InsertedShift; 1614 } 1615 1616 // If we removed all uses, nuke the shift. 1617 if (ShiftI->use_empty()) { 1618 salvageDebugInfo(*ShiftI); 1619 ShiftI->eraseFromParent(); 1620 } 1621 1622 return MadeChange; 1623 } 1624 1625 /// If counting leading or trailing zeros is an expensive operation and a zero 1626 /// input is defined, add a check for zero to avoid calling the intrinsic. 1627 /// 1628 /// We want to transform: 1629 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1630 /// 1631 /// into: 1632 /// entry: 1633 /// %cmpz = icmp eq i64 %A, 0 1634 /// br i1 %cmpz, label %cond.end, label %cond.false 1635 /// cond.false: 1636 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1637 /// br label %cond.end 1638 /// cond.end: 1639 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1640 /// 1641 /// If the transform is performed, return true and set ModifiedDT to true. 1642 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1643 const TargetLowering *TLI, 1644 const DataLayout *DL, 1645 bool &ModifiedDT) { 1646 if (!TLI || !DL) 1647 return false; 1648 1649 // If a zero input is undefined, it doesn't make sense to despeculate that. 1650 if (match(CountZeros->getOperand(1), m_One())) 1651 return false; 1652 1653 // If it's cheap to speculate, there's nothing to do. 1654 auto IntrinsicID = CountZeros->getIntrinsicID(); 1655 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1656 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1657 return false; 1658 1659 // Only handle legal scalar cases. Anything else requires too much work. 1660 Type *Ty = CountZeros->getType(); 1661 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1662 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1663 return false; 1664 1665 // The intrinsic will be sunk behind a compare against zero and branch. 1666 BasicBlock *StartBlock = CountZeros->getParent(); 1667 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1668 1669 // Create another block after the count zero intrinsic. A PHI will be added 1670 // in this block to select the result of the intrinsic or the bit-width 1671 // constant if the input to the intrinsic is zero. 1672 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1673 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1674 1675 // Set up a builder to create a compare, conditional branch, and PHI. 1676 IRBuilder<> Builder(CountZeros->getContext()); 1677 Builder.SetInsertPoint(StartBlock->getTerminator()); 1678 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1679 1680 // Replace the unconditional branch that was created by the first split with 1681 // a compare against zero and a conditional branch. 1682 Value *Zero = Constant::getNullValue(Ty); 1683 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1684 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1685 StartBlock->getTerminator()->eraseFromParent(); 1686 1687 // Create a PHI in the end block to select either the output of the intrinsic 1688 // or the bit width of the operand. 1689 Builder.SetInsertPoint(&EndBlock->front()); 1690 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1691 CountZeros->replaceAllUsesWith(PN); 1692 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1693 PN->addIncoming(BitWidth, StartBlock); 1694 PN->addIncoming(CountZeros, CallBlock); 1695 1696 // We are explicitly handling the zero case, so we can set the intrinsic's 1697 // undefined zero argument to 'true'. This will also prevent reprocessing the 1698 // intrinsic; we only despeculate when a zero input is defined. 1699 CountZeros->setArgOperand(1, Builder.getTrue()); 1700 ModifiedDT = true; 1701 return true; 1702 } 1703 1704 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 1705 BasicBlock *BB = CI->getParent(); 1706 1707 // Lower inline assembly if we can. 1708 // If we found an inline asm expession, and if the target knows how to 1709 // lower it to normal LLVM code, do so now. 1710 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1711 if (TLI->ExpandInlineAsm(CI)) { 1712 // Avoid invalidating the iterator. 1713 CurInstIterator = BB->begin(); 1714 // Avoid processing instructions out of order, which could cause 1715 // reuse before a value is defined. 1716 SunkAddrs.clear(); 1717 return true; 1718 } 1719 // Sink address computing for memory operands into the block. 1720 if (optimizeInlineAsmInst(CI)) 1721 return true; 1722 } 1723 1724 // Align the pointer arguments to this call if the target thinks it's a good 1725 // idea 1726 unsigned MinSize, PrefAlign; 1727 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1728 for (auto &Arg : CI->arg_operands()) { 1729 // We want to align both objects whose address is used directly and 1730 // objects whose address is used in casts and GEPs, though it only makes 1731 // sense for GEPs if the offset is a multiple of the desired alignment and 1732 // if size - offset meets the size threshold. 1733 if (!Arg->getType()->isPointerTy()) 1734 continue; 1735 APInt Offset(DL->getIndexSizeInBits( 1736 cast<PointerType>(Arg->getType())->getAddressSpace()), 1737 0); 1738 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1739 uint64_t Offset2 = Offset.getLimitedValue(); 1740 if ((Offset2 & (PrefAlign-1)) != 0) 1741 continue; 1742 AllocaInst *AI; 1743 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1744 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1745 AI->setAlignment(PrefAlign); 1746 // Global variables can only be aligned if they are defined in this 1747 // object (i.e. they are uniquely initialized in this object), and 1748 // over-aligning global variables that have an explicit section is 1749 // forbidden. 1750 GlobalVariable *GV; 1751 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1752 GV->getPointerAlignment(*DL) < PrefAlign && 1753 DL->getTypeAllocSize(GV->getValueType()) >= 1754 MinSize + Offset2) 1755 GV->setAlignment(PrefAlign); 1756 } 1757 // If this is a memcpy (or similar) then we may be able to improve the 1758 // alignment 1759 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1760 unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); 1761 if (DestAlign > MI->getDestAlignment()) 1762 MI->setDestAlignment(DestAlign); 1763 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 1764 unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 1765 if (SrcAlign > MTI->getSourceAlignment()) 1766 MTI->setSourceAlignment(SrcAlign); 1767 } 1768 } 1769 } 1770 1771 // If we have a cold call site, try to sink addressing computation into the 1772 // cold block. This interacts with our handling for loads and stores to 1773 // ensure that we can fold all uses of a potential addressing computation 1774 // into their uses. TODO: generalize this to work over profiling data 1775 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 1776 for (auto &Arg : CI->arg_operands()) { 1777 if (!Arg->getType()->isPointerTy()) 1778 continue; 1779 unsigned AS = Arg->getType()->getPointerAddressSpace(); 1780 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 1781 } 1782 1783 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1784 if (II) { 1785 switch (II->getIntrinsicID()) { 1786 default: break; 1787 case Intrinsic::experimental_widenable_condition: { 1788 // Give up on future widening oppurtunties so that we can fold away dead 1789 // paths and merge blocks before going into block-local instruction 1790 // selection. 1791 if (II->use_empty()) { 1792 II->eraseFromParent(); 1793 return true; 1794 } 1795 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 1796 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1797 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1798 }); 1799 return true; 1800 } 1801 case Intrinsic::objectsize: { 1802 // Lower all uses of llvm.objectsize.* 1803 Value *RetVal = 1804 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 1805 1806 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1807 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1808 }); 1809 return true; 1810 } 1811 case Intrinsic::is_constant: { 1812 // If is_constant hasn't folded away yet, lower it to false now. 1813 Constant *RetVal = ConstantInt::get(II->getType(), 0); 1814 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1815 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1816 }); 1817 return true; 1818 } 1819 case Intrinsic::aarch64_stlxr: 1820 case Intrinsic::aarch64_stxr: { 1821 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1822 if (!ExtVal || !ExtVal->hasOneUse() || 1823 ExtVal->getParent() == CI->getParent()) 1824 return false; 1825 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1826 ExtVal->moveBefore(CI); 1827 // Mark this instruction as "inserted by CGP", so that other 1828 // optimizations don't touch it. 1829 InsertedInsts.insert(ExtVal); 1830 return true; 1831 } 1832 1833 case Intrinsic::launder_invariant_group: 1834 case Intrinsic::strip_invariant_group: { 1835 Value *ArgVal = II->getArgOperand(0); 1836 auto it = LargeOffsetGEPMap.find(II); 1837 if (it != LargeOffsetGEPMap.end()) { 1838 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 1839 // Make sure not to have to deal with iterator invalidation 1840 // after possibly adding ArgVal to LargeOffsetGEPMap. 1841 auto GEPs = std::move(it->second); 1842 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 1843 LargeOffsetGEPMap.erase(II); 1844 } 1845 1846 II->replaceAllUsesWith(ArgVal); 1847 II->eraseFromParent(); 1848 return true; 1849 } 1850 case Intrinsic::cttz: 1851 case Intrinsic::ctlz: 1852 // If counting zeros is expensive, try to avoid it. 1853 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1854 } 1855 1856 if (TLI) { 1857 SmallVector<Value*, 2> PtrOps; 1858 Type *AccessTy; 1859 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 1860 while (!PtrOps.empty()) { 1861 Value *PtrVal = PtrOps.pop_back_val(); 1862 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 1863 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 1864 return true; 1865 } 1866 } 1867 } 1868 1869 // From here on out we're working with named functions. 1870 if (!CI->getCalledFunction()) return false; 1871 1872 // Lower all default uses of _chk calls. This is very similar 1873 // to what InstCombineCalls does, but here we are only lowering calls 1874 // to fortified library functions (e.g. __memcpy_chk) that have the default 1875 // "don't know" as the objectsize. Anything else should be left alone. 1876 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1877 if (Value *V = Simplifier.optimizeCall(CI)) { 1878 CI->replaceAllUsesWith(V); 1879 CI->eraseFromParent(); 1880 return true; 1881 } 1882 1883 return false; 1884 } 1885 1886 /// Look for opportunities to duplicate return instructions to the predecessor 1887 /// to enable tail call optimizations. The case it is currently looking for is: 1888 /// @code 1889 /// bb0: 1890 /// %tmp0 = tail call i32 @f0() 1891 /// br label %return 1892 /// bb1: 1893 /// %tmp1 = tail call i32 @f1() 1894 /// br label %return 1895 /// bb2: 1896 /// %tmp2 = tail call i32 @f2() 1897 /// br label %return 1898 /// return: 1899 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1900 /// ret i32 %retval 1901 /// @endcode 1902 /// 1903 /// => 1904 /// 1905 /// @code 1906 /// bb0: 1907 /// %tmp0 = tail call i32 @f0() 1908 /// ret i32 %tmp0 1909 /// bb1: 1910 /// %tmp1 = tail call i32 @f1() 1911 /// ret i32 %tmp1 1912 /// bb2: 1913 /// %tmp2 = tail call i32 @f2() 1914 /// ret i32 %tmp2 1915 /// @endcode 1916 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1917 if (!TLI) 1918 return false; 1919 1920 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 1921 if (!RetI) 1922 return false; 1923 1924 PHINode *PN = nullptr; 1925 BitCastInst *BCI = nullptr; 1926 Value *V = RetI->getReturnValue(); 1927 if (V) { 1928 BCI = dyn_cast<BitCastInst>(V); 1929 if (BCI) 1930 V = BCI->getOperand(0); 1931 1932 PN = dyn_cast<PHINode>(V); 1933 if (!PN) 1934 return false; 1935 } 1936 1937 if (PN && PN->getParent() != BB) 1938 return false; 1939 1940 // Make sure there are no instructions between the PHI and return, or that the 1941 // return is the first instruction in the block. 1942 if (PN) { 1943 BasicBlock::iterator BI = BB->begin(); 1944 // Skip over debug and the bitcast. 1945 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI); 1946 if (&*BI != RetI) 1947 return false; 1948 } else { 1949 BasicBlock::iterator BI = BB->begin(); 1950 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1951 if (&*BI != RetI) 1952 return false; 1953 } 1954 1955 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1956 /// call. 1957 const Function *F = BB->getParent(); 1958 SmallVector<CallInst*, 4> TailCalls; 1959 if (PN) { 1960 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1961 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1962 // Make sure the phi value is indeed produced by the tail call. 1963 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1964 TLI->mayBeEmittedAsTailCall(CI) && 1965 attributesPermitTailCall(F, CI, RetI, *TLI)) 1966 TailCalls.push_back(CI); 1967 } 1968 } else { 1969 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1970 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1971 if (!VisitedBBs.insert(*PI).second) 1972 continue; 1973 1974 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1975 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1976 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1977 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1978 if (RI == RE) 1979 continue; 1980 1981 CallInst *CI = dyn_cast<CallInst>(&*RI); 1982 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 1983 attributesPermitTailCall(F, CI, RetI, *TLI)) 1984 TailCalls.push_back(CI); 1985 } 1986 } 1987 1988 bool Changed = false; 1989 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1990 CallInst *CI = TailCalls[i]; 1991 CallSite CS(CI); 1992 1993 // Make sure the call instruction is followed by an unconditional branch to 1994 // the return block. 1995 BasicBlock *CallBB = CI->getParent(); 1996 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1997 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1998 continue; 1999 2000 // Duplicate the return into CallBB. 2001 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2002 ModifiedDT = Changed = true; 2003 ++NumRetsDup; 2004 } 2005 2006 // If we eliminated all predecessors of the block, delete the block now. 2007 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2008 BB->eraseFromParent(); 2009 2010 return Changed; 2011 } 2012 2013 //===----------------------------------------------------------------------===// 2014 // Memory Optimization 2015 //===----------------------------------------------------------------------===// 2016 2017 namespace { 2018 2019 /// This is an extended version of TargetLowering::AddrMode 2020 /// which holds actual Value*'s for register values. 2021 struct ExtAddrMode : public TargetLowering::AddrMode { 2022 Value *BaseReg = nullptr; 2023 Value *ScaledReg = nullptr; 2024 Value *OriginalValue = nullptr; 2025 2026 enum FieldName { 2027 NoField = 0x00, 2028 BaseRegField = 0x01, 2029 BaseGVField = 0x02, 2030 BaseOffsField = 0x04, 2031 ScaledRegField = 0x08, 2032 ScaleField = 0x10, 2033 MultipleFields = 0xff 2034 }; 2035 2036 ExtAddrMode() = default; 2037 2038 void print(raw_ostream &OS) const; 2039 void dump() const; 2040 2041 FieldName compare(const ExtAddrMode &other) { 2042 // First check that the types are the same on each field, as differing types 2043 // is something we can't cope with later on. 2044 if (BaseReg && other.BaseReg && 2045 BaseReg->getType() != other.BaseReg->getType()) 2046 return MultipleFields; 2047 if (BaseGV && other.BaseGV && 2048 BaseGV->getType() != other.BaseGV->getType()) 2049 return MultipleFields; 2050 if (ScaledReg && other.ScaledReg && 2051 ScaledReg->getType() != other.ScaledReg->getType()) 2052 return MultipleFields; 2053 2054 // Check each field to see if it differs. 2055 unsigned Result = NoField; 2056 if (BaseReg != other.BaseReg) 2057 Result |= BaseRegField; 2058 if (BaseGV != other.BaseGV) 2059 Result |= BaseGVField; 2060 if (BaseOffs != other.BaseOffs) 2061 Result |= BaseOffsField; 2062 if (ScaledReg != other.ScaledReg) 2063 Result |= ScaledRegField; 2064 // Don't count 0 as being a different scale, because that actually means 2065 // unscaled (which will already be counted by having no ScaledReg). 2066 if (Scale && other.Scale && Scale != other.Scale) 2067 Result |= ScaleField; 2068 2069 if (countPopulation(Result) > 1) 2070 return MultipleFields; 2071 else 2072 return static_cast<FieldName>(Result); 2073 } 2074 2075 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2076 // with no offset. 2077 bool isTrivial() { 2078 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2079 // trivial if at most one of these terms is nonzero, except that BaseGV and 2080 // BaseReg both being zero actually means a null pointer value, which we 2081 // consider to be 'non-zero' here. 2082 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2083 } 2084 2085 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2086 switch (Field) { 2087 default: 2088 return nullptr; 2089 case BaseRegField: 2090 return BaseReg; 2091 case BaseGVField: 2092 return BaseGV; 2093 case ScaledRegField: 2094 return ScaledReg; 2095 case BaseOffsField: 2096 return ConstantInt::get(IntPtrTy, BaseOffs); 2097 } 2098 } 2099 2100 void SetCombinedField(FieldName Field, Value *V, 2101 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2102 switch (Field) { 2103 default: 2104 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2105 break; 2106 case ExtAddrMode::BaseRegField: 2107 BaseReg = V; 2108 break; 2109 case ExtAddrMode::BaseGVField: 2110 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2111 // in the BaseReg field. 2112 assert(BaseReg == nullptr); 2113 BaseReg = V; 2114 BaseGV = nullptr; 2115 break; 2116 case ExtAddrMode::ScaledRegField: 2117 ScaledReg = V; 2118 // If we have a mix of scaled and unscaled addrmodes then we want scale 2119 // to be the scale and not zero. 2120 if (!Scale) 2121 for (const ExtAddrMode &AM : AddrModes) 2122 if (AM.Scale) { 2123 Scale = AM.Scale; 2124 break; 2125 } 2126 break; 2127 case ExtAddrMode::BaseOffsField: 2128 // The offset is no longer a constant, so it goes in ScaledReg with a 2129 // scale of 1. 2130 assert(ScaledReg == nullptr); 2131 ScaledReg = V; 2132 Scale = 1; 2133 BaseOffs = 0; 2134 break; 2135 } 2136 } 2137 }; 2138 2139 } // end anonymous namespace 2140 2141 #ifndef NDEBUG 2142 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2143 AM.print(OS); 2144 return OS; 2145 } 2146 #endif 2147 2148 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2149 void ExtAddrMode::print(raw_ostream &OS) const { 2150 bool NeedPlus = false; 2151 OS << "["; 2152 if (BaseGV) { 2153 OS << (NeedPlus ? " + " : "") 2154 << "GV:"; 2155 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2156 NeedPlus = true; 2157 } 2158 2159 if (BaseOffs) { 2160 OS << (NeedPlus ? " + " : "") 2161 << BaseOffs; 2162 NeedPlus = true; 2163 } 2164 2165 if (BaseReg) { 2166 OS << (NeedPlus ? " + " : "") 2167 << "Base:"; 2168 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2169 NeedPlus = true; 2170 } 2171 if (Scale) { 2172 OS << (NeedPlus ? " + " : "") 2173 << Scale << "*"; 2174 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2175 } 2176 2177 OS << ']'; 2178 } 2179 2180 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2181 print(dbgs()); 2182 dbgs() << '\n'; 2183 } 2184 #endif 2185 2186 namespace { 2187 2188 /// This class provides transaction based operation on the IR. 2189 /// Every change made through this class is recorded in the internal state and 2190 /// can be undone (rollback) until commit is called. 2191 class TypePromotionTransaction { 2192 /// This represents the common interface of the individual transaction. 2193 /// Each class implements the logic for doing one specific modification on 2194 /// the IR via the TypePromotionTransaction. 2195 class TypePromotionAction { 2196 protected: 2197 /// The Instruction modified. 2198 Instruction *Inst; 2199 2200 public: 2201 /// Constructor of the action. 2202 /// The constructor performs the related action on the IR. 2203 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2204 2205 virtual ~TypePromotionAction() = default; 2206 2207 /// Undo the modification done by this action. 2208 /// When this method is called, the IR must be in the same state as it was 2209 /// before this action was applied. 2210 /// \pre Undoing the action works if and only if the IR is in the exact same 2211 /// state as it was directly after this action was applied. 2212 virtual void undo() = 0; 2213 2214 /// Advocate every change made by this action. 2215 /// When the results on the IR of the action are to be kept, it is important 2216 /// to call this function, otherwise hidden information may be kept forever. 2217 virtual void commit() { 2218 // Nothing to be done, this action is not doing anything. 2219 } 2220 }; 2221 2222 /// Utility to remember the position of an instruction. 2223 class InsertionHandler { 2224 /// Position of an instruction. 2225 /// Either an instruction: 2226 /// - Is the first in a basic block: BB is used. 2227 /// - Has a previous instruction: PrevInst is used. 2228 union { 2229 Instruction *PrevInst; 2230 BasicBlock *BB; 2231 } Point; 2232 2233 /// Remember whether or not the instruction had a previous instruction. 2234 bool HasPrevInstruction; 2235 2236 public: 2237 /// Record the position of \p Inst. 2238 InsertionHandler(Instruction *Inst) { 2239 BasicBlock::iterator It = Inst->getIterator(); 2240 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2241 if (HasPrevInstruction) 2242 Point.PrevInst = &*--It; 2243 else 2244 Point.BB = Inst->getParent(); 2245 } 2246 2247 /// Insert \p Inst at the recorded position. 2248 void insert(Instruction *Inst) { 2249 if (HasPrevInstruction) { 2250 if (Inst->getParent()) 2251 Inst->removeFromParent(); 2252 Inst->insertAfter(Point.PrevInst); 2253 } else { 2254 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2255 if (Inst->getParent()) 2256 Inst->moveBefore(Position); 2257 else 2258 Inst->insertBefore(Position); 2259 } 2260 } 2261 }; 2262 2263 /// Move an instruction before another. 2264 class InstructionMoveBefore : public TypePromotionAction { 2265 /// Original position of the instruction. 2266 InsertionHandler Position; 2267 2268 public: 2269 /// Move \p Inst before \p Before. 2270 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2271 : TypePromotionAction(Inst), Position(Inst) { 2272 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2273 << "\n"); 2274 Inst->moveBefore(Before); 2275 } 2276 2277 /// Move the instruction back to its original position. 2278 void undo() override { 2279 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2280 Position.insert(Inst); 2281 } 2282 }; 2283 2284 /// Set the operand of an instruction with a new value. 2285 class OperandSetter : public TypePromotionAction { 2286 /// Original operand of the instruction. 2287 Value *Origin; 2288 2289 /// Index of the modified instruction. 2290 unsigned Idx; 2291 2292 public: 2293 /// Set \p Idx operand of \p Inst with \p NewVal. 2294 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2295 : TypePromotionAction(Inst), Idx(Idx) { 2296 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2297 << "for:" << *Inst << "\n" 2298 << "with:" << *NewVal << "\n"); 2299 Origin = Inst->getOperand(Idx); 2300 Inst->setOperand(Idx, NewVal); 2301 } 2302 2303 /// Restore the original value of the instruction. 2304 void undo() override { 2305 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2306 << "for: " << *Inst << "\n" 2307 << "with: " << *Origin << "\n"); 2308 Inst->setOperand(Idx, Origin); 2309 } 2310 }; 2311 2312 /// Hide the operands of an instruction. 2313 /// Do as if this instruction was not using any of its operands. 2314 class OperandsHider : public TypePromotionAction { 2315 /// The list of original operands. 2316 SmallVector<Value *, 4> OriginalValues; 2317 2318 public: 2319 /// Remove \p Inst from the uses of the operands of \p Inst. 2320 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2321 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2322 unsigned NumOpnds = Inst->getNumOperands(); 2323 OriginalValues.reserve(NumOpnds); 2324 for (unsigned It = 0; It < NumOpnds; ++It) { 2325 // Save the current operand. 2326 Value *Val = Inst->getOperand(It); 2327 OriginalValues.push_back(Val); 2328 // Set a dummy one. 2329 // We could use OperandSetter here, but that would imply an overhead 2330 // that we are not willing to pay. 2331 Inst->setOperand(It, UndefValue::get(Val->getType())); 2332 } 2333 } 2334 2335 /// Restore the original list of uses. 2336 void undo() override { 2337 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2338 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2339 Inst->setOperand(It, OriginalValues[It]); 2340 } 2341 }; 2342 2343 /// Build a truncate instruction. 2344 class TruncBuilder : public TypePromotionAction { 2345 Value *Val; 2346 2347 public: 2348 /// Build a truncate instruction of \p Opnd producing a \p Ty 2349 /// result. 2350 /// trunc Opnd to Ty. 2351 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2352 IRBuilder<> Builder(Opnd); 2353 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2354 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2355 } 2356 2357 /// Get the built value. 2358 Value *getBuiltValue() { return Val; } 2359 2360 /// Remove the built instruction. 2361 void undo() override { 2362 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2363 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2364 IVal->eraseFromParent(); 2365 } 2366 }; 2367 2368 /// Build a sign extension instruction. 2369 class SExtBuilder : public TypePromotionAction { 2370 Value *Val; 2371 2372 public: 2373 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2374 /// result. 2375 /// sext Opnd to Ty. 2376 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2377 : TypePromotionAction(InsertPt) { 2378 IRBuilder<> Builder(InsertPt); 2379 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2380 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2381 } 2382 2383 /// Get the built value. 2384 Value *getBuiltValue() { return Val; } 2385 2386 /// Remove the built instruction. 2387 void undo() override { 2388 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2389 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2390 IVal->eraseFromParent(); 2391 } 2392 }; 2393 2394 /// Build a zero extension instruction. 2395 class ZExtBuilder : public TypePromotionAction { 2396 Value *Val; 2397 2398 public: 2399 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2400 /// result. 2401 /// zext Opnd to Ty. 2402 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2403 : TypePromotionAction(InsertPt) { 2404 IRBuilder<> Builder(InsertPt); 2405 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2406 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2407 } 2408 2409 /// Get the built value. 2410 Value *getBuiltValue() { return Val; } 2411 2412 /// Remove the built instruction. 2413 void undo() override { 2414 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2415 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2416 IVal->eraseFromParent(); 2417 } 2418 }; 2419 2420 /// Mutate an instruction to another type. 2421 class TypeMutator : public TypePromotionAction { 2422 /// Record the original type. 2423 Type *OrigTy; 2424 2425 public: 2426 /// Mutate the type of \p Inst into \p NewTy. 2427 TypeMutator(Instruction *Inst, Type *NewTy) 2428 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2429 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2430 << "\n"); 2431 Inst->mutateType(NewTy); 2432 } 2433 2434 /// Mutate the instruction back to its original type. 2435 void undo() override { 2436 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2437 << "\n"); 2438 Inst->mutateType(OrigTy); 2439 } 2440 }; 2441 2442 /// Replace the uses of an instruction by another instruction. 2443 class UsesReplacer : public TypePromotionAction { 2444 /// Helper structure to keep track of the replaced uses. 2445 struct InstructionAndIdx { 2446 /// The instruction using the instruction. 2447 Instruction *Inst; 2448 2449 /// The index where this instruction is used for Inst. 2450 unsigned Idx; 2451 2452 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2453 : Inst(Inst), Idx(Idx) {} 2454 }; 2455 2456 /// Keep track of the original uses (pair Instruction, Index). 2457 SmallVector<InstructionAndIdx, 4> OriginalUses; 2458 /// Keep track of the debug users. 2459 SmallVector<DbgValueInst *, 1> DbgValues; 2460 2461 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2462 2463 public: 2464 /// Replace all the use of \p Inst by \p New. 2465 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2466 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2467 << "\n"); 2468 // Record the original uses. 2469 for (Use &U : Inst->uses()) { 2470 Instruction *UserI = cast<Instruction>(U.getUser()); 2471 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2472 } 2473 // Record the debug uses separately. They are not in the instruction's 2474 // use list, but they are replaced by RAUW. 2475 findDbgValues(DbgValues, Inst); 2476 2477 // Now, we can replace the uses. 2478 Inst->replaceAllUsesWith(New); 2479 } 2480 2481 /// Reassign the original uses of Inst to Inst. 2482 void undo() override { 2483 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2484 for (use_iterator UseIt = OriginalUses.begin(), 2485 EndIt = OriginalUses.end(); 2486 UseIt != EndIt; ++UseIt) { 2487 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2488 } 2489 // RAUW has replaced all original uses with references to the new value, 2490 // including the debug uses. Since we are undoing the replacements, 2491 // the original debug uses must also be reinstated to maintain the 2492 // correctness and utility of debug value instructions. 2493 for (auto *DVI: DbgValues) { 2494 LLVMContext &Ctx = Inst->getType()->getContext(); 2495 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst)); 2496 DVI->setOperand(0, MV); 2497 } 2498 } 2499 }; 2500 2501 /// Remove an instruction from the IR. 2502 class InstructionRemover : public TypePromotionAction { 2503 /// Original position of the instruction. 2504 InsertionHandler Inserter; 2505 2506 /// Helper structure to hide all the link to the instruction. In other 2507 /// words, this helps to do as if the instruction was removed. 2508 OperandsHider Hider; 2509 2510 /// Keep track of the uses replaced, if any. 2511 UsesReplacer *Replacer = nullptr; 2512 2513 /// Keep track of instructions removed. 2514 SetOfInstrs &RemovedInsts; 2515 2516 public: 2517 /// Remove all reference of \p Inst and optionally replace all its 2518 /// uses with New. 2519 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2520 /// \pre If !Inst->use_empty(), then New != nullptr 2521 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2522 Value *New = nullptr) 2523 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2524 RemovedInsts(RemovedInsts) { 2525 if (New) 2526 Replacer = new UsesReplacer(Inst, New); 2527 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2528 RemovedInsts.insert(Inst); 2529 /// The instructions removed here will be freed after completing 2530 /// optimizeBlock() for all blocks as we need to keep track of the 2531 /// removed instructions during promotion. 2532 Inst->removeFromParent(); 2533 } 2534 2535 ~InstructionRemover() override { delete Replacer; } 2536 2537 /// Resurrect the instruction and reassign it to the proper uses if 2538 /// new value was provided when build this action. 2539 void undo() override { 2540 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2541 Inserter.insert(Inst); 2542 if (Replacer) 2543 Replacer->undo(); 2544 Hider.undo(); 2545 RemovedInsts.erase(Inst); 2546 } 2547 }; 2548 2549 public: 2550 /// Restoration point. 2551 /// The restoration point is a pointer to an action instead of an iterator 2552 /// because the iterator may be invalidated but not the pointer. 2553 using ConstRestorationPt = const TypePromotionAction *; 2554 2555 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2556 : RemovedInsts(RemovedInsts) {} 2557 2558 /// Advocate every changes made in that transaction. 2559 void commit(); 2560 2561 /// Undo all the changes made after the given point. 2562 void rollback(ConstRestorationPt Point); 2563 2564 /// Get the current restoration point. 2565 ConstRestorationPt getRestorationPoint() const; 2566 2567 /// \name API for IR modification with state keeping to support rollback. 2568 /// @{ 2569 /// Same as Instruction::setOperand. 2570 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2571 2572 /// Same as Instruction::eraseFromParent. 2573 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2574 2575 /// Same as Value::replaceAllUsesWith. 2576 void replaceAllUsesWith(Instruction *Inst, Value *New); 2577 2578 /// Same as Value::mutateType. 2579 void mutateType(Instruction *Inst, Type *NewTy); 2580 2581 /// Same as IRBuilder::createTrunc. 2582 Value *createTrunc(Instruction *Opnd, Type *Ty); 2583 2584 /// Same as IRBuilder::createSExt. 2585 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2586 2587 /// Same as IRBuilder::createZExt. 2588 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2589 2590 /// Same as Instruction::moveBefore. 2591 void moveBefore(Instruction *Inst, Instruction *Before); 2592 /// @} 2593 2594 private: 2595 /// The ordered list of actions made so far. 2596 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2597 2598 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2599 2600 SetOfInstrs &RemovedInsts; 2601 }; 2602 2603 } // end anonymous namespace 2604 2605 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2606 Value *NewVal) { 2607 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 2608 Inst, Idx, NewVal)); 2609 } 2610 2611 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2612 Value *NewVal) { 2613 Actions.push_back( 2614 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 2615 Inst, RemovedInsts, NewVal)); 2616 } 2617 2618 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2619 Value *New) { 2620 Actions.push_back( 2621 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2622 } 2623 2624 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2625 Actions.push_back( 2626 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2627 } 2628 2629 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2630 Type *Ty) { 2631 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2632 Value *Val = Ptr->getBuiltValue(); 2633 Actions.push_back(std::move(Ptr)); 2634 return Val; 2635 } 2636 2637 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2638 Value *Opnd, Type *Ty) { 2639 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2640 Value *Val = Ptr->getBuiltValue(); 2641 Actions.push_back(std::move(Ptr)); 2642 return Val; 2643 } 2644 2645 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2646 Value *Opnd, Type *Ty) { 2647 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2648 Value *Val = Ptr->getBuiltValue(); 2649 Actions.push_back(std::move(Ptr)); 2650 return Val; 2651 } 2652 2653 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2654 Instruction *Before) { 2655 Actions.push_back( 2656 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 2657 Inst, Before)); 2658 } 2659 2660 TypePromotionTransaction::ConstRestorationPt 2661 TypePromotionTransaction::getRestorationPoint() const { 2662 return !Actions.empty() ? Actions.back().get() : nullptr; 2663 } 2664 2665 void TypePromotionTransaction::commit() { 2666 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2667 ++It) 2668 (*It)->commit(); 2669 Actions.clear(); 2670 } 2671 2672 void TypePromotionTransaction::rollback( 2673 TypePromotionTransaction::ConstRestorationPt Point) { 2674 while (!Actions.empty() && Point != Actions.back().get()) { 2675 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2676 Curr->undo(); 2677 } 2678 } 2679 2680 namespace { 2681 2682 /// A helper class for matching addressing modes. 2683 /// 2684 /// This encapsulates the logic for matching the target-legal addressing modes. 2685 class AddressingModeMatcher { 2686 SmallVectorImpl<Instruction*> &AddrModeInsts; 2687 const TargetLowering &TLI; 2688 const TargetRegisterInfo &TRI; 2689 const DataLayout &DL; 2690 2691 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2692 /// the memory instruction that we're computing this address for. 2693 Type *AccessTy; 2694 unsigned AddrSpace; 2695 Instruction *MemoryInst; 2696 2697 /// This is the addressing mode that we're building up. This is 2698 /// part of the return value of this addressing mode matching stuff. 2699 ExtAddrMode &AddrMode; 2700 2701 /// The instructions inserted by other CodeGenPrepare optimizations. 2702 const SetOfInstrs &InsertedInsts; 2703 2704 /// A map from the instructions to their type before promotion. 2705 InstrToOrigTy &PromotedInsts; 2706 2707 /// The ongoing transaction where every action should be registered. 2708 TypePromotionTransaction &TPT; 2709 2710 // A GEP which has too large offset to be folded into the addressing mode. 2711 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 2712 2713 /// This is set to true when we should not do profitability checks. 2714 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2715 bool IgnoreProfitability; 2716 2717 AddressingModeMatcher( 2718 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 2719 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, 2720 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, 2721 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 2722 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) 2723 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 2724 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2725 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2726 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP) { 2727 IgnoreProfitability = false; 2728 } 2729 2730 public: 2731 /// Find the maximal addressing mode that a load/store of V can fold, 2732 /// give an access type of AccessTy. This returns a list of involved 2733 /// instructions in AddrModeInsts. 2734 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2735 /// optimizations. 2736 /// \p PromotedInsts maps the instructions to their type before promotion. 2737 /// \p The ongoing transaction where every action should be registered. 2738 static ExtAddrMode 2739 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 2740 SmallVectorImpl<Instruction *> &AddrModeInsts, 2741 const TargetLowering &TLI, const TargetRegisterInfo &TRI, 2742 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 2743 TypePromotionTransaction &TPT, 2744 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) { 2745 ExtAddrMode Result; 2746 2747 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, 2748 MemoryInst, Result, InsertedInsts, 2749 PromotedInsts, TPT, LargeOffsetGEP) 2750 .matchAddr(V, 0); 2751 (void)Success; assert(Success && "Couldn't select *anything*?"); 2752 return Result; 2753 } 2754 2755 private: 2756 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2757 bool matchAddr(Value *Addr, unsigned Depth); 2758 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 2759 bool *MovedAway = nullptr); 2760 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2761 ExtAddrMode &AMBefore, 2762 ExtAddrMode &AMAfter); 2763 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2764 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2765 Value *PromotedOperand) const; 2766 }; 2767 2768 class PhiNodeSet; 2769 2770 /// An iterator for PhiNodeSet. 2771 class PhiNodeSetIterator { 2772 PhiNodeSet * const Set; 2773 size_t CurrentIndex = 0; 2774 2775 public: 2776 /// The constructor. Start should point to either a valid element, or be equal 2777 /// to the size of the underlying SmallVector of the PhiNodeSet. 2778 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 2779 PHINode * operator*() const; 2780 PhiNodeSetIterator& operator++(); 2781 bool operator==(const PhiNodeSetIterator &RHS) const; 2782 bool operator!=(const PhiNodeSetIterator &RHS) const; 2783 }; 2784 2785 /// Keeps a set of PHINodes. 2786 /// 2787 /// This is a minimal set implementation for a specific use case: 2788 /// It is very fast when there are very few elements, but also provides good 2789 /// performance when there are many. It is similar to SmallPtrSet, but also 2790 /// provides iteration by insertion order, which is deterministic and stable 2791 /// across runs. It is also similar to SmallSetVector, but provides removing 2792 /// elements in O(1) time. This is achieved by not actually removing the element 2793 /// from the underlying vector, so comes at the cost of using more memory, but 2794 /// that is fine, since PhiNodeSets are used as short lived objects. 2795 class PhiNodeSet { 2796 friend class PhiNodeSetIterator; 2797 2798 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 2799 using iterator = PhiNodeSetIterator; 2800 2801 /// Keeps the elements in the order of their insertion in the underlying 2802 /// vector. To achieve constant time removal, it never deletes any element. 2803 SmallVector<PHINode *, 32> NodeList; 2804 2805 /// Keeps the elements in the underlying set implementation. This (and not the 2806 /// NodeList defined above) is the source of truth on whether an element 2807 /// is actually in the collection. 2808 MapType NodeMap; 2809 2810 /// Points to the first valid (not deleted) element when the set is not empty 2811 /// and the value is not zero. Equals to the size of the underlying vector 2812 /// when the set is empty. When the value is 0, as in the beginning, the 2813 /// first element may or may not be valid. 2814 size_t FirstValidElement = 0; 2815 2816 public: 2817 /// Inserts a new element to the collection. 2818 /// \returns true if the element is actually added, i.e. was not in the 2819 /// collection before the operation. 2820 bool insert(PHINode *Ptr) { 2821 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 2822 NodeList.push_back(Ptr); 2823 return true; 2824 } 2825 return false; 2826 } 2827 2828 /// Removes the element from the collection. 2829 /// \returns whether the element is actually removed, i.e. was in the 2830 /// collection before the operation. 2831 bool erase(PHINode *Ptr) { 2832 auto it = NodeMap.find(Ptr); 2833 if (it != NodeMap.end()) { 2834 NodeMap.erase(Ptr); 2835 SkipRemovedElements(FirstValidElement); 2836 return true; 2837 } 2838 return false; 2839 } 2840 2841 /// Removes all elements and clears the collection. 2842 void clear() { 2843 NodeMap.clear(); 2844 NodeList.clear(); 2845 FirstValidElement = 0; 2846 } 2847 2848 /// \returns an iterator that will iterate the elements in the order of 2849 /// insertion. 2850 iterator begin() { 2851 if (FirstValidElement == 0) 2852 SkipRemovedElements(FirstValidElement); 2853 return PhiNodeSetIterator(this, FirstValidElement); 2854 } 2855 2856 /// \returns an iterator that points to the end of the collection. 2857 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 2858 2859 /// Returns the number of elements in the collection. 2860 size_t size() const { 2861 return NodeMap.size(); 2862 } 2863 2864 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 2865 size_t count(PHINode *Ptr) const { 2866 return NodeMap.count(Ptr); 2867 } 2868 2869 private: 2870 /// Updates the CurrentIndex so that it will point to a valid element. 2871 /// 2872 /// If the element of NodeList at CurrentIndex is valid, it does not 2873 /// change it. If there are no more valid elements, it updates CurrentIndex 2874 /// to point to the end of the NodeList. 2875 void SkipRemovedElements(size_t &CurrentIndex) { 2876 while (CurrentIndex < NodeList.size()) { 2877 auto it = NodeMap.find(NodeList[CurrentIndex]); 2878 // If the element has been deleted and added again later, NodeMap will 2879 // point to a different index, so CurrentIndex will still be invalid. 2880 if (it != NodeMap.end() && it->second == CurrentIndex) 2881 break; 2882 ++CurrentIndex; 2883 } 2884 } 2885 }; 2886 2887 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 2888 : Set(Set), CurrentIndex(Start) {} 2889 2890 PHINode * PhiNodeSetIterator::operator*() const { 2891 assert(CurrentIndex < Set->NodeList.size() && 2892 "PhiNodeSet access out of range"); 2893 return Set->NodeList[CurrentIndex]; 2894 } 2895 2896 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 2897 assert(CurrentIndex < Set->NodeList.size() && 2898 "PhiNodeSet access out of range"); 2899 ++CurrentIndex; 2900 Set->SkipRemovedElements(CurrentIndex); 2901 return *this; 2902 } 2903 2904 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 2905 return CurrentIndex == RHS.CurrentIndex; 2906 } 2907 2908 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 2909 return !((*this) == RHS); 2910 } 2911 2912 /// Keep track of simplification of Phi nodes. 2913 /// Accept the set of all phi nodes and erase phi node from this set 2914 /// if it is simplified. 2915 class SimplificationTracker { 2916 DenseMap<Value *, Value *> Storage; 2917 const SimplifyQuery &SQ; 2918 // Tracks newly created Phi nodes. The elements are iterated by insertion 2919 // order. 2920 PhiNodeSet AllPhiNodes; 2921 // Tracks newly created Select nodes. 2922 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 2923 2924 public: 2925 SimplificationTracker(const SimplifyQuery &sq) 2926 : SQ(sq) {} 2927 2928 Value *Get(Value *V) { 2929 do { 2930 auto SV = Storage.find(V); 2931 if (SV == Storage.end()) 2932 return V; 2933 V = SV->second; 2934 } while (true); 2935 } 2936 2937 Value *Simplify(Value *Val) { 2938 SmallVector<Value *, 32> WorkList; 2939 SmallPtrSet<Value *, 32> Visited; 2940 WorkList.push_back(Val); 2941 while (!WorkList.empty()) { 2942 auto P = WorkList.pop_back_val(); 2943 if (!Visited.insert(P).second) 2944 continue; 2945 if (auto *PI = dyn_cast<Instruction>(P)) 2946 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 2947 for (auto *U : PI->users()) 2948 WorkList.push_back(cast<Value>(U)); 2949 Put(PI, V); 2950 PI->replaceAllUsesWith(V); 2951 if (auto *PHI = dyn_cast<PHINode>(PI)) 2952 AllPhiNodes.erase(PHI); 2953 if (auto *Select = dyn_cast<SelectInst>(PI)) 2954 AllSelectNodes.erase(Select); 2955 PI->eraseFromParent(); 2956 } 2957 } 2958 return Get(Val); 2959 } 2960 2961 void Put(Value *From, Value *To) { 2962 Storage.insert({ From, To }); 2963 } 2964 2965 void ReplacePhi(PHINode *From, PHINode *To) { 2966 Value* OldReplacement = Get(From); 2967 while (OldReplacement != From) { 2968 From = To; 2969 To = dyn_cast<PHINode>(OldReplacement); 2970 OldReplacement = Get(From); 2971 } 2972 assert(Get(To) == To && "Replacement PHI node is already replaced."); 2973 Put(From, To); 2974 From->replaceAllUsesWith(To); 2975 AllPhiNodes.erase(From); 2976 From->eraseFromParent(); 2977 } 2978 2979 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 2980 2981 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 2982 2983 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 2984 2985 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 2986 2987 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 2988 2989 void destroyNewNodes(Type *CommonType) { 2990 // For safe erasing, replace the uses with dummy value first. 2991 auto Dummy = UndefValue::get(CommonType); 2992 for (auto I : AllPhiNodes) { 2993 I->replaceAllUsesWith(Dummy); 2994 I->eraseFromParent(); 2995 } 2996 AllPhiNodes.clear(); 2997 for (auto I : AllSelectNodes) { 2998 I->replaceAllUsesWith(Dummy); 2999 I->eraseFromParent(); 3000 } 3001 AllSelectNodes.clear(); 3002 } 3003 }; 3004 3005 /// A helper class for combining addressing modes. 3006 class AddressingModeCombiner { 3007 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3008 typedef std::pair<PHINode *, PHINode *> PHIPair; 3009 3010 private: 3011 /// The addressing modes we've collected. 3012 SmallVector<ExtAddrMode, 16> AddrModes; 3013 3014 /// The field in which the AddrModes differ, when we have more than one. 3015 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3016 3017 /// Are the AddrModes that we have all just equal to their original values? 3018 bool AllAddrModesTrivial = true; 3019 3020 /// Common Type for all different fields in addressing modes. 3021 Type *CommonType; 3022 3023 /// SimplifyQuery for simplifyInstruction utility. 3024 const SimplifyQuery &SQ; 3025 3026 /// Original Address. 3027 Value *Original; 3028 3029 public: 3030 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3031 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3032 3033 /// Get the combined AddrMode 3034 const ExtAddrMode &getAddrMode() const { 3035 return AddrModes[0]; 3036 } 3037 3038 /// Add a new AddrMode if it's compatible with the AddrModes we already 3039 /// have. 3040 /// \return True iff we succeeded in doing so. 3041 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3042 // Take note of if we have any non-trivial AddrModes, as we need to detect 3043 // when all AddrModes are trivial as then we would introduce a phi or select 3044 // which just duplicates what's already there. 3045 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3046 3047 // If this is the first addrmode then everything is fine. 3048 if (AddrModes.empty()) { 3049 AddrModes.emplace_back(NewAddrMode); 3050 return true; 3051 } 3052 3053 // Figure out how different this is from the other address modes, which we 3054 // can do just by comparing against the first one given that we only care 3055 // about the cumulative difference. 3056 ExtAddrMode::FieldName ThisDifferentField = 3057 AddrModes[0].compare(NewAddrMode); 3058 if (DifferentField == ExtAddrMode::NoField) 3059 DifferentField = ThisDifferentField; 3060 else if (DifferentField != ThisDifferentField) 3061 DifferentField = ExtAddrMode::MultipleFields; 3062 3063 // If NewAddrMode differs in more than one dimension we cannot handle it. 3064 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3065 3066 // If Scale Field is different then we reject. 3067 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3068 3069 // We also must reject the case when base offset is different and 3070 // scale reg is not null, we cannot handle this case due to merge of 3071 // different offsets will be used as ScaleReg. 3072 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3073 !NewAddrMode.ScaledReg); 3074 3075 // We also must reject the case when GV is different and BaseReg installed 3076 // due to we want to use base reg as a merge of GV values. 3077 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3078 !NewAddrMode.HasBaseReg); 3079 3080 // Even if NewAddMode is the same we still need to collect it due to 3081 // original value is different. And later we will need all original values 3082 // as anchors during finding the common Phi node. 3083 if (CanHandle) 3084 AddrModes.emplace_back(NewAddrMode); 3085 else 3086 AddrModes.clear(); 3087 3088 return CanHandle; 3089 } 3090 3091 /// Combine the addressing modes we've collected into a single 3092 /// addressing mode. 3093 /// \return True iff we successfully combined them or we only had one so 3094 /// didn't need to combine them anyway. 3095 bool combineAddrModes() { 3096 // If we have no AddrModes then they can't be combined. 3097 if (AddrModes.size() == 0) 3098 return false; 3099 3100 // A single AddrMode can trivially be combined. 3101 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3102 return true; 3103 3104 // If the AddrModes we collected are all just equal to the value they are 3105 // derived from then combining them wouldn't do anything useful. 3106 if (AllAddrModesTrivial) 3107 return false; 3108 3109 if (!addrModeCombiningAllowed()) 3110 return false; 3111 3112 // Build a map between <original value, basic block where we saw it> to 3113 // value of base register. 3114 // Bail out if there is no common type. 3115 FoldAddrToValueMapping Map; 3116 if (!initializeMap(Map)) 3117 return false; 3118 3119 Value *CommonValue = findCommon(Map); 3120 if (CommonValue) 3121 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3122 return CommonValue != nullptr; 3123 } 3124 3125 private: 3126 /// Initialize Map with anchor values. For address seen 3127 /// we set the value of different field saw in this address. 3128 /// At the same time we find a common type for different field we will 3129 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3130 /// Return false if there is no common type found. 3131 bool initializeMap(FoldAddrToValueMapping &Map) { 3132 // Keep track of keys where the value is null. We will need to replace it 3133 // with constant null when we know the common type. 3134 SmallVector<Value *, 2> NullValue; 3135 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3136 for (auto &AM : AddrModes) { 3137 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3138 if (DV) { 3139 auto *Type = DV->getType(); 3140 if (CommonType && CommonType != Type) 3141 return false; 3142 CommonType = Type; 3143 Map[AM.OriginalValue] = DV; 3144 } else { 3145 NullValue.push_back(AM.OriginalValue); 3146 } 3147 } 3148 assert(CommonType && "At least one non-null value must be!"); 3149 for (auto *V : NullValue) 3150 Map[V] = Constant::getNullValue(CommonType); 3151 return true; 3152 } 3153 3154 /// We have mapping between value A and other value B where B was a field in 3155 /// addressing mode represented by A. Also we have an original value C 3156 /// representing an address we start with. Traversing from C through phi and 3157 /// selects we ended up with A's in a map. This utility function tries to find 3158 /// a value V which is a field in addressing mode C and traversing through phi 3159 /// nodes and selects we will end up in corresponded values B in a map. 3160 /// The utility will create a new Phi/Selects if needed. 3161 // The simple example looks as follows: 3162 // BB1: 3163 // p1 = b1 + 40 3164 // br cond BB2, BB3 3165 // BB2: 3166 // p2 = b2 + 40 3167 // br BB3 3168 // BB3: 3169 // p = phi [p1, BB1], [p2, BB2] 3170 // v = load p 3171 // Map is 3172 // p1 -> b1 3173 // p2 -> b2 3174 // Request is 3175 // p -> ? 3176 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3177 Value *findCommon(FoldAddrToValueMapping &Map) { 3178 // Tracks the simplification of newly created phi nodes. The reason we use 3179 // this mapping is because we will add new created Phi nodes in AddrToBase. 3180 // Simplification of Phi nodes is recursive, so some Phi node may 3181 // be simplified after we added it to AddrToBase. In reality this 3182 // simplification is possible only if original phi/selects were not 3183 // simplified yet. 3184 // Using this mapping we can find the current value in AddrToBase. 3185 SimplificationTracker ST(SQ); 3186 3187 // First step, DFS to create PHI nodes for all intermediate blocks. 3188 // Also fill traverse order for the second step. 3189 SmallVector<Value *, 32> TraverseOrder; 3190 InsertPlaceholders(Map, TraverseOrder, ST); 3191 3192 // Second Step, fill new nodes by merged values and simplify if possible. 3193 FillPlaceholders(Map, TraverseOrder, ST); 3194 3195 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3196 ST.destroyNewNodes(CommonType); 3197 return nullptr; 3198 } 3199 3200 // Now we'd like to match New Phi nodes to existed ones. 3201 unsigned PhiNotMatchedCount = 0; 3202 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3203 ST.destroyNewNodes(CommonType); 3204 return nullptr; 3205 } 3206 3207 auto *Result = ST.Get(Map.find(Original)->second); 3208 if (Result) { 3209 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3210 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3211 } 3212 return Result; 3213 } 3214 3215 /// Try to match PHI node to Candidate. 3216 /// Matcher tracks the matched Phi nodes. 3217 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3218 SmallSetVector<PHIPair, 8> &Matcher, 3219 PhiNodeSet &PhiNodesToMatch) { 3220 SmallVector<PHIPair, 8> WorkList; 3221 Matcher.insert({ PHI, Candidate }); 3222 WorkList.push_back({ PHI, Candidate }); 3223 SmallSet<PHIPair, 8> Visited; 3224 while (!WorkList.empty()) { 3225 auto Item = WorkList.pop_back_val(); 3226 if (!Visited.insert(Item).second) 3227 continue; 3228 // We iterate over all incoming values to Phi to compare them. 3229 // If values are different and both of them Phi and the first one is a 3230 // Phi we added (subject to match) and both of them is in the same basic 3231 // block then we can match our pair if values match. So we state that 3232 // these values match and add it to work list to verify that. 3233 for (auto B : Item.first->blocks()) { 3234 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3235 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3236 if (FirstValue == SecondValue) 3237 continue; 3238 3239 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3240 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3241 3242 // One of them is not Phi or 3243 // The first one is not Phi node from the set we'd like to match or 3244 // Phi nodes from different basic blocks then 3245 // we will not be able to match. 3246 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3247 FirstPhi->getParent() != SecondPhi->getParent()) 3248 return false; 3249 3250 // If we already matched them then continue. 3251 if (Matcher.count({ FirstPhi, SecondPhi })) 3252 continue; 3253 // So the values are different and does not match. So we need them to 3254 // match. 3255 Matcher.insert({ FirstPhi, SecondPhi }); 3256 // But me must check it. 3257 WorkList.push_back({ FirstPhi, SecondPhi }); 3258 } 3259 } 3260 return true; 3261 } 3262 3263 /// For the given set of PHI nodes (in the SimplificationTracker) try 3264 /// to find their equivalents. 3265 /// Returns false if this matching fails and creation of new Phi is disabled. 3266 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3267 unsigned &PhiNotMatchedCount) { 3268 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3269 // order, so the replacements (ReplacePhi) are also done in a deterministic 3270 // order. 3271 SmallSetVector<PHIPair, 8> Matched; 3272 SmallPtrSet<PHINode *, 8> WillNotMatch; 3273 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3274 while (PhiNodesToMatch.size()) { 3275 PHINode *PHI = *PhiNodesToMatch.begin(); 3276 3277 // Add us, if no Phi nodes in the basic block we do not match. 3278 WillNotMatch.clear(); 3279 WillNotMatch.insert(PHI); 3280 3281 // Traverse all Phis until we found equivalent or fail to do that. 3282 bool IsMatched = false; 3283 for (auto &P : PHI->getParent()->phis()) { 3284 if (&P == PHI) 3285 continue; 3286 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3287 break; 3288 // If it does not match, collect all Phi nodes from matcher. 3289 // if we end up with no match, them all these Phi nodes will not match 3290 // later. 3291 for (auto M : Matched) 3292 WillNotMatch.insert(M.first); 3293 Matched.clear(); 3294 } 3295 if (IsMatched) { 3296 // Replace all matched values and erase them. 3297 for (auto MV : Matched) 3298 ST.ReplacePhi(MV.first, MV.second); 3299 Matched.clear(); 3300 continue; 3301 } 3302 // If we are not allowed to create new nodes then bail out. 3303 if (!AllowNewPhiNodes) 3304 return false; 3305 // Just remove all seen values in matcher. They will not match anything. 3306 PhiNotMatchedCount += WillNotMatch.size(); 3307 for (auto *P : WillNotMatch) 3308 PhiNodesToMatch.erase(P); 3309 } 3310 return true; 3311 } 3312 /// Fill the placeholders with values from predecessors and simplify them. 3313 void FillPlaceholders(FoldAddrToValueMapping &Map, 3314 SmallVectorImpl<Value *> &TraverseOrder, 3315 SimplificationTracker &ST) { 3316 while (!TraverseOrder.empty()) { 3317 Value *Current = TraverseOrder.pop_back_val(); 3318 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3319 Value *V = Map[Current]; 3320 3321 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3322 // CurrentValue also must be Select. 3323 auto *CurrentSelect = cast<SelectInst>(Current); 3324 auto *TrueValue = CurrentSelect->getTrueValue(); 3325 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3326 Select->setTrueValue(ST.Get(Map[TrueValue])); 3327 auto *FalseValue = CurrentSelect->getFalseValue(); 3328 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3329 Select->setFalseValue(ST.Get(Map[FalseValue])); 3330 } else { 3331 // Must be a Phi node then. 3332 PHINode *PHI = cast<PHINode>(V); 3333 auto *CurrentPhi = dyn_cast<PHINode>(Current); 3334 // Fill the Phi node with values from predecessors. 3335 for (auto B : predecessors(PHI->getParent())) { 3336 Value *PV = CurrentPhi->getIncomingValueForBlock(B); 3337 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3338 PHI->addIncoming(ST.Get(Map[PV]), B); 3339 } 3340 } 3341 Map[Current] = ST.Simplify(V); 3342 } 3343 } 3344 3345 /// Starting from original value recursively iterates over def-use chain up to 3346 /// known ending values represented in a map. For each traversed phi/select 3347 /// inserts a placeholder Phi or Select. 3348 /// Reports all new created Phi/Select nodes by adding them to set. 3349 /// Also reports and order in what values have been traversed. 3350 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3351 SmallVectorImpl<Value *> &TraverseOrder, 3352 SimplificationTracker &ST) { 3353 SmallVector<Value *, 32> Worklist; 3354 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3355 "Address must be a Phi or Select node"); 3356 auto *Dummy = UndefValue::get(CommonType); 3357 Worklist.push_back(Original); 3358 while (!Worklist.empty()) { 3359 Value *Current = Worklist.pop_back_val(); 3360 // if it is already visited or it is an ending value then skip it. 3361 if (Map.find(Current) != Map.end()) 3362 continue; 3363 TraverseOrder.push_back(Current); 3364 3365 // CurrentValue must be a Phi node or select. All others must be covered 3366 // by anchors. 3367 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3368 // Is it OK to get metadata from OrigSelect?! 3369 // Create a Select placeholder with dummy value. 3370 SelectInst *Select = SelectInst::Create( 3371 CurrentSelect->getCondition(), Dummy, Dummy, 3372 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3373 Map[Current] = Select; 3374 ST.insertNewSelect(Select); 3375 // We are interested in True and False values. 3376 Worklist.push_back(CurrentSelect->getTrueValue()); 3377 Worklist.push_back(CurrentSelect->getFalseValue()); 3378 } else { 3379 // It must be a Phi node then. 3380 PHINode *CurrentPhi = cast<PHINode>(Current); 3381 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3382 PHINode *PHI = 3383 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3384 Map[Current] = PHI; 3385 ST.insertNewPhi(PHI); 3386 for (Value *P : CurrentPhi->incoming_values()) 3387 Worklist.push_back(P); 3388 } 3389 } 3390 } 3391 3392 bool addrModeCombiningAllowed() { 3393 if (DisableComplexAddrModes) 3394 return false; 3395 switch (DifferentField) { 3396 default: 3397 return false; 3398 case ExtAddrMode::BaseRegField: 3399 return AddrSinkCombineBaseReg; 3400 case ExtAddrMode::BaseGVField: 3401 return AddrSinkCombineBaseGV; 3402 case ExtAddrMode::BaseOffsField: 3403 return AddrSinkCombineBaseOffs; 3404 case ExtAddrMode::ScaledRegField: 3405 return AddrSinkCombineScaledReg; 3406 } 3407 } 3408 }; 3409 } // end anonymous namespace 3410 3411 /// Try adding ScaleReg*Scale to the current addressing mode. 3412 /// Return true and update AddrMode if this addr mode is legal for the target, 3413 /// false if not. 3414 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3415 unsigned Depth) { 3416 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3417 // mode. Just process that directly. 3418 if (Scale == 1) 3419 return matchAddr(ScaleReg, Depth); 3420 3421 // If the scale is 0, it takes nothing to add this. 3422 if (Scale == 0) 3423 return true; 3424 3425 // If we already have a scale of this value, we can add to it, otherwise, we 3426 // need an available scale field. 3427 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3428 return false; 3429 3430 ExtAddrMode TestAddrMode = AddrMode; 3431 3432 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3433 // [A+B + A*7] -> [B+A*8]. 3434 TestAddrMode.Scale += Scale; 3435 TestAddrMode.ScaledReg = ScaleReg; 3436 3437 // If the new address isn't legal, bail out. 3438 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3439 return false; 3440 3441 // It was legal, so commit it. 3442 AddrMode = TestAddrMode; 3443 3444 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3445 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3446 // X*Scale + C*Scale to addr mode. 3447 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3448 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3449 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3450 TestAddrMode.ScaledReg = AddLHS; 3451 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3452 3453 // If this addressing mode is legal, commit it and remember that we folded 3454 // this instruction. 3455 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3456 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3457 AddrMode = TestAddrMode; 3458 return true; 3459 } 3460 } 3461 3462 // Otherwise, not (x+c)*scale, just return what we have. 3463 return true; 3464 } 3465 3466 /// This is a little filter, which returns true if an addressing computation 3467 /// involving I might be folded into a load/store accessing it. 3468 /// This doesn't need to be perfect, but needs to accept at least 3469 /// the set of instructions that MatchOperationAddr can. 3470 static bool MightBeFoldableInst(Instruction *I) { 3471 switch (I->getOpcode()) { 3472 case Instruction::BitCast: 3473 case Instruction::AddrSpaceCast: 3474 // Don't touch identity bitcasts. 3475 if (I->getType() == I->getOperand(0)->getType()) 3476 return false; 3477 return I->getType()->isIntOrPtrTy(); 3478 case Instruction::PtrToInt: 3479 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3480 return true; 3481 case Instruction::IntToPtr: 3482 // We know the input is intptr_t, so this is foldable. 3483 return true; 3484 case Instruction::Add: 3485 return true; 3486 case Instruction::Mul: 3487 case Instruction::Shl: 3488 // Can only handle X*C and X << C. 3489 return isa<ConstantInt>(I->getOperand(1)); 3490 case Instruction::GetElementPtr: 3491 return true; 3492 default: 3493 return false; 3494 } 3495 } 3496 3497 /// Check whether or not \p Val is a legal instruction for \p TLI. 3498 /// \note \p Val is assumed to be the product of some type promotion. 3499 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3500 /// to be legal, as the non-promoted value would have had the same state. 3501 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3502 const DataLayout &DL, Value *Val) { 3503 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3504 if (!PromotedInst) 3505 return false; 3506 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3507 // If the ISDOpcode is undefined, it was undefined before the promotion. 3508 if (!ISDOpcode) 3509 return true; 3510 // Otherwise, check if the promoted instruction is legal or not. 3511 return TLI.isOperationLegalOrCustom( 3512 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3513 } 3514 3515 namespace { 3516 3517 /// Hepler class to perform type promotion. 3518 class TypePromotionHelper { 3519 /// Utility function to add a promoted instruction \p ExtOpnd to 3520 /// \p PromotedInsts and record the type of extension we have seen. 3521 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3522 Instruction *ExtOpnd, 3523 bool IsSExt) { 3524 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3525 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3526 if (It != PromotedInsts.end()) { 3527 // If the new extension is same as original, the information in 3528 // PromotedInsts[ExtOpnd] is still correct. 3529 if (It->second.getInt() == ExtTy) 3530 return; 3531 3532 // Now the new extension is different from old extension, we make 3533 // the type information invalid by setting extension type to 3534 // BothExtension. 3535 ExtTy = BothExtension; 3536 } 3537 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 3538 } 3539 3540 /// Utility function to query the original type of instruction \p Opnd 3541 /// with a matched extension type. If the extension doesn't match, we 3542 /// cannot use the information we had on the original type. 3543 /// BothExtension doesn't match any extension type. 3544 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 3545 Instruction *Opnd, 3546 bool IsSExt) { 3547 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3548 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3549 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 3550 return It->second.getPointer(); 3551 return nullptr; 3552 } 3553 3554 /// Utility function to check whether or not a sign or zero extension 3555 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3556 /// either using the operands of \p Inst or promoting \p Inst. 3557 /// The type of the extension is defined by \p IsSExt. 3558 /// In other words, check if: 3559 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3560 /// #1 Promotion applies: 3561 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3562 /// #2 Operand reuses: 3563 /// ext opnd1 to ConsideredExtType. 3564 /// \p PromotedInsts maps the instructions to their type before promotion. 3565 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3566 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3567 3568 /// Utility function to determine if \p OpIdx should be promoted when 3569 /// promoting \p Inst. 3570 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3571 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3572 } 3573 3574 /// Utility function to promote the operand of \p Ext when this 3575 /// operand is a promotable trunc or sext or zext. 3576 /// \p PromotedInsts maps the instructions to their type before promotion. 3577 /// \p CreatedInstsCost[out] contains the cost of all instructions 3578 /// created to promote the operand of Ext. 3579 /// Newly added extensions are inserted in \p Exts. 3580 /// Newly added truncates are inserted in \p Truncs. 3581 /// Should never be called directly. 3582 /// \return The promoted value which is used instead of Ext. 3583 static Value *promoteOperandForTruncAndAnyExt( 3584 Instruction *Ext, TypePromotionTransaction &TPT, 3585 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3586 SmallVectorImpl<Instruction *> *Exts, 3587 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3588 3589 /// Utility function to promote the operand of \p Ext when this 3590 /// operand is promotable and is not a supported trunc or sext. 3591 /// \p PromotedInsts maps the instructions to their type before promotion. 3592 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3593 /// created to promote the operand of Ext. 3594 /// Newly added extensions are inserted in \p Exts. 3595 /// Newly added truncates are inserted in \p Truncs. 3596 /// Should never be called directly. 3597 /// \return The promoted value which is used instead of Ext. 3598 static Value *promoteOperandForOther(Instruction *Ext, 3599 TypePromotionTransaction &TPT, 3600 InstrToOrigTy &PromotedInsts, 3601 unsigned &CreatedInstsCost, 3602 SmallVectorImpl<Instruction *> *Exts, 3603 SmallVectorImpl<Instruction *> *Truncs, 3604 const TargetLowering &TLI, bool IsSExt); 3605 3606 /// \see promoteOperandForOther. 3607 static Value *signExtendOperandForOther( 3608 Instruction *Ext, TypePromotionTransaction &TPT, 3609 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3610 SmallVectorImpl<Instruction *> *Exts, 3611 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3612 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3613 Exts, Truncs, TLI, true); 3614 } 3615 3616 /// \see promoteOperandForOther. 3617 static Value *zeroExtendOperandForOther( 3618 Instruction *Ext, TypePromotionTransaction &TPT, 3619 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3620 SmallVectorImpl<Instruction *> *Exts, 3621 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3622 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3623 Exts, Truncs, TLI, false); 3624 } 3625 3626 public: 3627 /// Type for the utility function that promotes the operand of Ext. 3628 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3629 InstrToOrigTy &PromotedInsts, 3630 unsigned &CreatedInstsCost, 3631 SmallVectorImpl<Instruction *> *Exts, 3632 SmallVectorImpl<Instruction *> *Truncs, 3633 const TargetLowering &TLI); 3634 3635 /// Given a sign/zero extend instruction \p Ext, return the appropriate 3636 /// action to promote the operand of \p Ext instead of using Ext. 3637 /// \return NULL if no promotable action is possible with the current 3638 /// sign extension. 3639 /// \p InsertedInsts keeps track of all the instructions inserted by the 3640 /// other CodeGenPrepare optimizations. This information is important 3641 /// because we do not want to promote these instructions as CodeGenPrepare 3642 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3643 /// \p PromotedInsts maps the instructions to their type before promotion. 3644 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3645 const TargetLowering &TLI, 3646 const InstrToOrigTy &PromotedInsts); 3647 }; 3648 3649 } // end anonymous namespace 3650 3651 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3652 Type *ConsideredExtType, 3653 const InstrToOrigTy &PromotedInsts, 3654 bool IsSExt) { 3655 // The promotion helper does not know how to deal with vector types yet. 3656 // To be able to fix that, we would need to fix the places where we 3657 // statically extend, e.g., constants and such. 3658 if (Inst->getType()->isVectorTy()) 3659 return false; 3660 3661 // We can always get through zext. 3662 if (isa<ZExtInst>(Inst)) 3663 return true; 3664 3665 // sext(sext) is ok too. 3666 if (IsSExt && isa<SExtInst>(Inst)) 3667 return true; 3668 3669 // We can get through binary operator, if it is legal. In other words, the 3670 // binary operator must have a nuw or nsw flag. 3671 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3672 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3673 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3674 (IsSExt && BinOp->hasNoSignedWrap()))) 3675 return true; 3676 3677 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 3678 if ((Inst->getOpcode() == Instruction::And || 3679 Inst->getOpcode() == Instruction::Or)) 3680 return true; 3681 3682 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 3683 if (Inst->getOpcode() == Instruction::Xor) { 3684 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 3685 // Make sure it is not a NOT. 3686 if (Cst && !Cst->getValue().isAllOnesValue()) 3687 return true; 3688 } 3689 3690 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 3691 // It may change a poisoned value into a regular value, like 3692 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 3693 // poisoned value regular value 3694 // It should be OK since undef covers valid value. 3695 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 3696 return true; 3697 3698 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 3699 // It may change a poisoned value into a regular value, like 3700 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 3701 // poisoned value regular value 3702 // It should be OK since undef covers valid value. 3703 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 3704 const Instruction *ExtInst = 3705 dyn_cast<const Instruction>(*Inst->user_begin()); 3706 if (ExtInst->hasOneUse()) { 3707 const Instruction *AndInst = 3708 dyn_cast<const Instruction>(*ExtInst->user_begin()); 3709 if (AndInst && AndInst->getOpcode() == Instruction::And) { 3710 const ConstantInt *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 3711 if (Cst && 3712 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 3713 return true; 3714 } 3715 } 3716 } 3717 3718 // Check if we can do the following simplification. 3719 // ext(trunc(opnd)) --> ext(opnd) 3720 if (!isa<TruncInst>(Inst)) 3721 return false; 3722 3723 Value *OpndVal = Inst->getOperand(0); 3724 // Check if we can use this operand in the extension. 3725 // If the type is larger than the result type of the extension, we cannot. 3726 if (!OpndVal->getType()->isIntegerTy() || 3727 OpndVal->getType()->getIntegerBitWidth() > 3728 ConsideredExtType->getIntegerBitWidth()) 3729 return false; 3730 3731 // If the operand of the truncate is not an instruction, we will not have 3732 // any information on the dropped bits. 3733 // (Actually we could for constant but it is not worth the extra logic). 3734 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3735 if (!Opnd) 3736 return false; 3737 3738 // Check if the source of the type is narrow enough. 3739 // I.e., check that trunc just drops extended bits of the same kind of 3740 // the extension. 3741 // #1 get the type of the operand and check the kind of the extended bits. 3742 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 3743 if (OpndType) 3744 ; 3745 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3746 OpndType = Opnd->getOperand(0)->getType(); 3747 else 3748 return false; 3749 3750 // #2 check that the truncate just drops extended bits. 3751 return Inst->getType()->getIntegerBitWidth() >= 3752 OpndType->getIntegerBitWidth(); 3753 } 3754 3755 TypePromotionHelper::Action TypePromotionHelper::getAction( 3756 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3757 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3758 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3759 "Unexpected instruction type"); 3760 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3761 Type *ExtTy = Ext->getType(); 3762 bool IsSExt = isa<SExtInst>(Ext); 3763 // If the operand of the extension is not an instruction, we cannot 3764 // get through. 3765 // If it, check we can get through. 3766 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3767 return nullptr; 3768 3769 // Do not promote if the operand has been added by codegenprepare. 3770 // Otherwise, it means we are undoing an optimization that is likely to be 3771 // redone, thus causing potential infinite loop. 3772 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3773 return nullptr; 3774 3775 // SExt or Trunc instructions. 3776 // Return the related handler. 3777 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3778 isa<ZExtInst>(ExtOpnd)) 3779 return promoteOperandForTruncAndAnyExt; 3780 3781 // Regular instruction. 3782 // Abort early if we will have to insert non-free instructions. 3783 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3784 return nullptr; 3785 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3786 } 3787 3788 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3789 Instruction *SExt, TypePromotionTransaction &TPT, 3790 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3791 SmallVectorImpl<Instruction *> *Exts, 3792 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3793 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3794 // get through it and this method should not be called. 3795 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3796 Value *ExtVal = SExt; 3797 bool HasMergedNonFreeExt = false; 3798 if (isa<ZExtInst>(SExtOpnd)) { 3799 // Replace s|zext(zext(opnd)) 3800 // => zext(opnd). 3801 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3802 Value *ZExt = 3803 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3804 TPT.replaceAllUsesWith(SExt, ZExt); 3805 TPT.eraseInstruction(SExt); 3806 ExtVal = ZExt; 3807 } else { 3808 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3809 // => z|sext(opnd). 3810 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3811 } 3812 CreatedInstsCost = 0; 3813 3814 // Remove dead code. 3815 if (SExtOpnd->use_empty()) 3816 TPT.eraseInstruction(SExtOpnd); 3817 3818 // Check if the extension is still needed. 3819 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3820 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3821 if (ExtInst) { 3822 if (Exts) 3823 Exts->push_back(ExtInst); 3824 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3825 } 3826 return ExtVal; 3827 } 3828 3829 // At this point we have: ext ty opnd to ty. 3830 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3831 Value *NextVal = ExtInst->getOperand(0); 3832 TPT.eraseInstruction(ExtInst, NextVal); 3833 return NextVal; 3834 } 3835 3836 Value *TypePromotionHelper::promoteOperandForOther( 3837 Instruction *Ext, TypePromotionTransaction &TPT, 3838 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3839 SmallVectorImpl<Instruction *> *Exts, 3840 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3841 bool IsSExt) { 3842 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3843 // get through it and this method should not be called. 3844 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3845 CreatedInstsCost = 0; 3846 if (!ExtOpnd->hasOneUse()) { 3847 // ExtOpnd will be promoted. 3848 // All its uses, but Ext, will need to use a truncated value of the 3849 // promoted version. 3850 // Create the truncate now. 3851 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3852 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3853 // Insert it just after the definition. 3854 ITrunc->moveAfter(ExtOpnd); 3855 if (Truncs) 3856 Truncs->push_back(ITrunc); 3857 } 3858 3859 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3860 // Restore the operand of Ext (which has been replaced by the previous call 3861 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3862 TPT.setOperand(Ext, 0, ExtOpnd); 3863 } 3864 3865 // Get through the Instruction: 3866 // 1. Update its type. 3867 // 2. Replace the uses of Ext by Inst. 3868 // 3. Extend each operand that needs to be extended. 3869 3870 // Remember the original type of the instruction before promotion. 3871 // This is useful to know that the high bits are sign extended bits. 3872 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 3873 // Step #1. 3874 TPT.mutateType(ExtOpnd, Ext->getType()); 3875 // Step #2. 3876 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3877 // Step #3. 3878 Instruction *ExtForOpnd = Ext; 3879 3880 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 3881 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3882 ++OpIdx) { 3883 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3884 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3885 !shouldExtOperand(ExtOpnd, OpIdx)) { 3886 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 3887 continue; 3888 } 3889 // Check if we can statically extend the operand. 3890 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3891 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3892 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3893 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3894 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3895 : Cst->getValue().zext(BitWidth); 3896 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3897 continue; 3898 } 3899 // UndefValue are typed, so we have to statically sign extend them. 3900 if (isa<UndefValue>(Opnd)) { 3901 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3902 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3903 continue; 3904 } 3905 3906 // Otherwise we have to explicitly sign extend the operand. 3907 // Check if Ext was reused to extend an operand. 3908 if (!ExtForOpnd) { 3909 // If yes, create a new one. 3910 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 3911 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3912 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3913 if (!isa<Instruction>(ValForExtOpnd)) { 3914 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3915 continue; 3916 } 3917 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3918 } 3919 if (Exts) 3920 Exts->push_back(ExtForOpnd); 3921 TPT.setOperand(ExtForOpnd, 0, Opnd); 3922 3923 // Move the sign extension before the insertion point. 3924 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3925 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3926 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3927 // If more sext are required, new instructions will have to be created. 3928 ExtForOpnd = nullptr; 3929 } 3930 if (ExtForOpnd == Ext) { 3931 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 3932 TPT.eraseInstruction(Ext); 3933 } 3934 return ExtOpnd; 3935 } 3936 3937 /// Check whether or not promoting an instruction to a wider type is profitable. 3938 /// \p NewCost gives the cost of extension instructions created by the 3939 /// promotion. 3940 /// \p OldCost gives the cost of extension instructions before the promotion 3941 /// plus the number of instructions that have been 3942 /// matched in the addressing mode the promotion. 3943 /// \p PromotedOperand is the value that has been promoted. 3944 /// \return True if the promotion is profitable, false otherwise. 3945 bool AddressingModeMatcher::isPromotionProfitable( 3946 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3947 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 3948 << '\n'); 3949 // The cost of the new extensions is greater than the cost of the 3950 // old extension plus what we folded. 3951 // This is not profitable. 3952 if (NewCost > OldCost) 3953 return false; 3954 if (NewCost < OldCost) 3955 return true; 3956 // The promotion is neutral but it may help folding the sign extension in 3957 // loads for instance. 3958 // Check that we did not create an illegal instruction. 3959 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3960 } 3961 3962 /// Given an instruction or constant expr, see if we can fold the operation 3963 /// into the addressing mode. If so, update the addressing mode and return 3964 /// true, otherwise return false without modifying AddrMode. 3965 /// If \p MovedAway is not NULL, it contains the information of whether or 3966 /// not AddrInst has to be folded into the addressing mode on success. 3967 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3968 /// because it has been moved away. 3969 /// Thus AddrInst must not be added in the matched instructions. 3970 /// This state can happen when AddrInst is a sext, since it may be moved away. 3971 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3972 /// not be referenced anymore. 3973 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3974 unsigned Depth, 3975 bool *MovedAway) { 3976 // Avoid exponential behavior on extremely deep expression trees. 3977 if (Depth >= 5) return false; 3978 3979 // By default, all matched instructions stay in place. 3980 if (MovedAway) 3981 *MovedAway = false; 3982 3983 switch (Opcode) { 3984 case Instruction::PtrToInt: 3985 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3986 return matchAddr(AddrInst->getOperand(0), Depth); 3987 case Instruction::IntToPtr: { 3988 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3989 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3990 // This inttoptr is a no-op if the integer type is pointer sized. 3991 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3992 return matchAddr(AddrInst->getOperand(0), Depth); 3993 return false; 3994 } 3995 case Instruction::BitCast: 3996 // BitCast is always a noop, and we can handle it as long as it is 3997 // int->int or pointer->pointer (we don't want int<->fp or something). 3998 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 3999 // Don't touch identity bitcasts. These were probably put here by LSR, 4000 // and we don't want to mess around with them. Assume it knows what it 4001 // is doing. 4002 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4003 return matchAddr(AddrInst->getOperand(0), Depth); 4004 return false; 4005 case Instruction::AddrSpaceCast: { 4006 unsigned SrcAS 4007 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4008 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4009 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 4010 return matchAddr(AddrInst->getOperand(0), Depth); 4011 return false; 4012 } 4013 case Instruction::Add: { 4014 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4015 ExtAddrMode BackupAddrMode = AddrMode; 4016 unsigned OldSize = AddrModeInsts.size(); 4017 // Start a transaction at this point. 4018 // The LHS may match but not the RHS. 4019 // Therefore, we need a higher level restoration point to undo partially 4020 // matched operation. 4021 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4022 TPT.getRestorationPoint(); 4023 4024 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4025 matchAddr(AddrInst->getOperand(0), Depth+1)) 4026 return true; 4027 4028 // Restore the old addr mode info. 4029 AddrMode = BackupAddrMode; 4030 AddrModeInsts.resize(OldSize); 4031 TPT.rollback(LastKnownGood); 4032 4033 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4034 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4035 matchAddr(AddrInst->getOperand(1), Depth+1)) 4036 return true; 4037 4038 // Otherwise we definitely can't merge the ADD in. 4039 AddrMode = BackupAddrMode; 4040 AddrModeInsts.resize(OldSize); 4041 TPT.rollback(LastKnownGood); 4042 break; 4043 } 4044 //case Instruction::Or: 4045 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4046 //break; 4047 case Instruction::Mul: 4048 case Instruction::Shl: { 4049 // Can only handle X*C and X << C. 4050 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4051 if (!RHS || RHS->getBitWidth() > 64) 4052 return false; 4053 int64_t Scale = RHS->getSExtValue(); 4054 if (Opcode == Instruction::Shl) 4055 Scale = 1LL << Scale; 4056 4057 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4058 } 4059 case Instruction::GetElementPtr: { 4060 // Scan the GEP. We check it if it contains constant offsets and at most 4061 // one variable offset. 4062 int VariableOperand = -1; 4063 unsigned VariableScale = 0; 4064 4065 int64_t ConstantOffset = 0; 4066 gep_type_iterator GTI = gep_type_begin(AddrInst); 4067 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4068 if (StructType *STy = GTI.getStructTypeOrNull()) { 4069 const StructLayout *SL = DL.getStructLayout(STy); 4070 unsigned Idx = 4071 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4072 ConstantOffset += SL->getElementOffset(Idx); 4073 } else { 4074 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 4075 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4076 const APInt &CVal = CI->getValue(); 4077 if (CVal.getMinSignedBits() <= 64) { 4078 ConstantOffset += CVal.getSExtValue() * TypeSize; 4079 continue; 4080 } 4081 } 4082 if (TypeSize) { // Scales of zero don't do anything. 4083 // We only allow one variable index at the moment. 4084 if (VariableOperand != -1) 4085 return false; 4086 4087 // Remember the variable index. 4088 VariableOperand = i; 4089 VariableScale = TypeSize; 4090 } 4091 } 4092 } 4093 4094 // A common case is for the GEP to only do a constant offset. In this case, 4095 // just add it to the disp field and check validity. 4096 if (VariableOperand == -1) { 4097 AddrMode.BaseOffs += ConstantOffset; 4098 if (ConstantOffset == 0 || 4099 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4100 // Check to see if we can fold the base pointer in too. 4101 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 4102 return true; 4103 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4104 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4105 ConstantOffset > 0) { 4106 // Record GEPs with non-zero offsets as candidates for splitting in the 4107 // event that the offset cannot fit into the r+i addressing mode. 4108 // Simple and common case that only one GEP is used in calculating the 4109 // address for the memory access. 4110 Value *Base = AddrInst->getOperand(0); 4111 auto *BaseI = dyn_cast<Instruction>(Base); 4112 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4113 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4114 (BaseI && !isa<CastInst>(BaseI) && 4115 !isa<GetElementPtrInst>(BaseI))) { 4116 // If the base is an instruction, make sure the GEP is not in the same 4117 // basic block as the base. If the base is an argument or global 4118 // value, make sure the GEP is not in the entry block. Otherwise, 4119 // instruction selection can undo the split. Also make sure the 4120 // parent block allows inserting non-PHI instructions before the 4121 // terminator. 4122 BasicBlock *Parent = 4123 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4124 if (GEP->getParent() != Parent && !Parent->getTerminator()->isEHPad()) 4125 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4126 } 4127 } 4128 AddrMode.BaseOffs -= ConstantOffset; 4129 return false; 4130 } 4131 4132 // Save the valid addressing mode in case we can't match. 4133 ExtAddrMode BackupAddrMode = AddrMode; 4134 unsigned OldSize = AddrModeInsts.size(); 4135 4136 // See if the scale and offset amount is valid for this target. 4137 AddrMode.BaseOffs += ConstantOffset; 4138 4139 // Match the base operand of the GEP. 4140 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4141 // If it couldn't be matched, just stuff the value in a register. 4142 if (AddrMode.HasBaseReg) { 4143 AddrMode = BackupAddrMode; 4144 AddrModeInsts.resize(OldSize); 4145 return false; 4146 } 4147 AddrMode.HasBaseReg = true; 4148 AddrMode.BaseReg = AddrInst->getOperand(0); 4149 } 4150 4151 // Match the remaining variable portion of the GEP. 4152 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4153 Depth)) { 4154 // If it couldn't be matched, try stuffing the base into a register 4155 // instead of matching it, and retrying the match of the scale. 4156 AddrMode = BackupAddrMode; 4157 AddrModeInsts.resize(OldSize); 4158 if (AddrMode.HasBaseReg) 4159 return false; 4160 AddrMode.HasBaseReg = true; 4161 AddrMode.BaseReg = AddrInst->getOperand(0); 4162 AddrMode.BaseOffs += ConstantOffset; 4163 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4164 VariableScale, Depth)) { 4165 // If even that didn't work, bail. 4166 AddrMode = BackupAddrMode; 4167 AddrModeInsts.resize(OldSize); 4168 return false; 4169 } 4170 } 4171 4172 return true; 4173 } 4174 case Instruction::SExt: 4175 case Instruction::ZExt: { 4176 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4177 if (!Ext) 4178 return false; 4179 4180 // Try to move this ext out of the way of the addressing mode. 4181 // Ask for a method for doing so. 4182 TypePromotionHelper::Action TPH = 4183 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4184 if (!TPH) 4185 return false; 4186 4187 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4188 TPT.getRestorationPoint(); 4189 unsigned CreatedInstsCost = 0; 4190 unsigned ExtCost = !TLI.isExtFree(Ext); 4191 Value *PromotedOperand = 4192 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4193 // SExt has been moved away. 4194 // Thus either it will be rematched later in the recursive calls or it is 4195 // gone. Anyway, we must not fold it into the addressing mode at this point. 4196 // E.g., 4197 // op = add opnd, 1 4198 // idx = ext op 4199 // addr = gep base, idx 4200 // is now: 4201 // promotedOpnd = ext opnd <- no match here 4202 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4203 // addr = gep base, op <- match 4204 if (MovedAway) 4205 *MovedAway = true; 4206 4207 assert(PromotedOperand && 4208 "TypePromotionHelper should have filtered out those cases"); 4209 4210 ExtAddrMode BackupAddrMode = AddrMode; 4211 unsigned OldSize = AddrModeInsts.size(); 4212 4213 if (!matchAddr(PromotedOperand, Depth) || 4214 // The total of the new cost is equal to the cost of the created 4215 // instructions. 4216 // The total of the old cost is equal to the cost of the extension plus 4217 // what we have saved in the addressing mode. 4218 !isPromotionProfitable(CreatedInstsCost, 4219 ExtCost + (AddrModeInsts.size() - OldSize), 4220 PromotedOperand)) { 4221 AddrMode = BackupAddrMode; 4222 AddrModeInsts.resize(OldSize); 4223 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4224 TPT.rollback(LastKnownGood); 4225 return false; 4226 } 4227 return true; 4228 } 4229 } 4230 return false; 4231 } 4232 4233 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4234 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4235 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4236 /// for the target. 4237 /// 4238 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4239 // Start a transaction at this point that we will rollback if the matching 4240 // fails. 4241 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4242 TPT.getRestorationPoint(); 4243 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4244 // Fold in immediates if legal for the target. 4245 AddrMode.BaseOffs += CI->getSExtValue(); 4246 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4247 return true; 4248 AddrMode.BaseOffs -= CI->getSExtValue(); 4249 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4250 // If this is a global variable, try to fold it into the addressing mode. 4251 if (!AddrMode.BaseGV) { 4252 AddrMode.BaseGV = GV; 4253 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4254 return true; 4255 AddrMode.BaseGV = nullptr; 4256 } 4257 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4258 ExtAddrMode BackupAddrMode = AddrMode; 4259 unsigned OldSize = AddrModeInsts.size(); 4260 4261 // Check to see if it is possible to fold this operation. 4262 bool MovedAway = false; 4263 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4264 // This instruction may have been moved away. If so, there is nothing 4265 // to check here. 4266 if (MovedAway) 4267 return true; 4268 // Okay, it's possible to fold this. Check to see if it is actually 4269 // *profitable* to do so. We use a simple cost model to avoid increasing 4270 // register pressure too much. 4271 if (I->hasOneUse() || 4272 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4273 AddrModeInsts.push_back(I); 4274 return true; 4275 } 4276 4277 // It isn't profitable to do this, roll back. 4278 //cerr << "NOT FOLDING: " << *I; 4279 AddrMode = BackupAddrMode; 4280 AddrModeInsts.resize(OldSize); 4281 TPT.rollback(LastKnownGood); 4282 } 4283 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4284 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4285 return true; 4286 TPT.rollback(LastKnownGood); 4287 } else if (isa<ConstantPointerNull>(Addr)) { 4288 // Null pointer gets folded without affecting the addressing mode. 4289 return true; 4290 } 4291 4292 // Worse case, the target should support [reg] addressing modes. :) 4293 if (!AddrMode.HasBaseReg) { 4294 AddrMode.HasBaseReg = true; 4295 AddrMode.BaseReg = Addr; 4296 // Still check for legality in case the target supports [imm] but not [i+r]. 4297 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4298 return true; 4299 AddrMode.HasBaseReg = false; 4300 AddrMode.BaseReg = nullptr; 4301 } 4302 4303 // If the base register is already taken, see if we can do [r+r]. 4304 if (AddrMode.Scale == 0) { 4305 AddrMode.Scale = 1; 4306 AddrMode.ScaledReg = Addr; 4307 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4308 return true; 4309 AddrMode.Scale = 0; 4310 AddrMode.ScaledReg = nullptr; 4311 } 4312 // Couldn't match. 4313 TPT.rollback(LastKnownGood); 4314 return false; 4315 } 4316 4317 /// Check to see if all uses of OpVal by the specified inline asm call are due 4318 /// to memory operands. If so, return true, otherwise return false. 4319 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4320 const TargetLowering &TLI, 4321 const TargetRegisterInfo &TRI) { 4322 const Function *F = CI->getFunction(); 4323 TargetLowering::AsmOperandInfoVector TargetConstraints = 4324 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 4325 ImmutableCallSite(CI)); 4326 4327 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4328 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4329 4330 // Compute the constraint code and ConstraintType to use. 4331 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4332 4333 // If this asm operand is our Value*, and if it isn't an indirect memory 4334 // operand, we can't fold it! 4335 if (OpInfo.CallOperandVal == OpVal && 4336 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4337 !OpInfo.isIndirect)) 4338 return false; 4339 } 4340 4341 return true; 4342 } 4343 4344 // Max number of memory uses to look at before aborting the search to conserve 4345 // compile time. 4346 static constexpr int MaxMemoryUsesToScan = 20; 4347 4348 /// Recursively walk all the uses of I until we find a memory use. 4349 /// If we find an obviously non-foldable instruction, return true. 4350 /// Add the ultimately found memory instructions to MemoryUses. 4351 static bool FindAllMemoryUses( 4352 Instruction *I, 4353 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4354 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4355 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 4356 // If we already considered this instruction, we're done. 4357 if (!ConsideredInsts.insert(I).second) 4358 return false; 4359 4360 // If this is an obviously unfoldable instruction, bail out. 4361 if (!MightBeFoldableInst(I)) 4362 return true; 4363 4364 const bool OptSize = I->getFunction()->optForSize(); 4365 4366 // Loop over all the uses, recursively processing them. 4367 for (Use &U : I->uses()) { 4368 // Conservatively return true if we're seeing a large number or a deep chain 4369 // of users. This avoids excessive compilation times in pathological cases. 4370 if (SeenInsts++ >= MaxMemoryUsesToScan) 4371 return true; 4372 4373 Instruction *UserI = cast<Instruction>(U.getUser()); 4374 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4375 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4376 continue; 4377 } 4378 4379 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4380 unsigned opNo = U.getOperandNo(); 4381 if (opNo != StoreInst::getPointerOperandIndex()) 4382 return true; // Storing addr, not into addr. 4383 MemoryUses.push_back(std::make_pair(SI, opNo)); 4384 continue; 4385 } 4386 4387 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4388 unsigned opNo = U.getOperandNo(); 4389 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4390 return true; // Storing addr, not into addr. 4391 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4392 continue; 4393 } 4394 4395 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4396 unsigned opNo = U.getOperandNo(); 4397 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4398 return true; // Storing addr, not into addr. 4399 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4400 continue; 4401 } 4402 4403 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4404 // If this is a cold call, we can sink the addressing calculation into 4405 // the cold path. See optimizeCallInst 4406 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4407 continue; 4408 4409 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4410 if (!IA) return true; 4411 4412 // If this is a memory operand, we're cool, otherwise bail out. 4413 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4414 return true; 4415 continue; 4416 } 4417 4418 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4419 SeenInsts)) 4420 return true; 4421 } 4422 4423 return false; 4424 } 4425 4426 /// Return true if Val is already known to be live at the use site that we're 4427 /// folding it into. If so, there is no cost to include it in the addressing 4428 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4429 /// instruction already. 4430 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4431 Value *KnownLive2) { 4432 // If Val is either of the known-live values, we know it is live! 4433 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4434 return true; 4435 4436 // All values other than instructions and arguments (e.g. constants) are live. 4437 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4438 4439 // If Val is a constant sized alloca in the entry block, it is live, this is 4440 // true because it is just a reference to the stack/frame pointer, which is 4441 // live for the whole function. 4442 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4443 if (AI->isStaticAlloca()) 4444 return true; 4445 4446 // Check to see if this value is already used in the memory instruction's 4447 // block. If so, it's already live into the block at the very least, so we 4448 // can reasonably fold it. 4449 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4450 } 4451 4452 /// It is possible for the addressing mode of the machine to fold the specified 4453 /// instruction into a load or store that ultimately uses it. 4454 /// However, the specified instruction has multiple uses. 4455 /// Given this, it may actually increase register pressure to fold it 4456 /// into the load. For example, consider this code: 4457 /// 4458 /// X = ... 4459 /// Y = X+1 4460 /// use(Y) -> nonload/store 4461 /// Z = Y+1 4462 /// load Z 4463 /// 4464 /// In this case, Y has multiple uses, and can be folded into the load of Z 4465 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4466 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4467 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4468 /// number of computations either. 4469 /// 4470 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4471 /// X was live across 'load Z' for other reasons, we actually *would* want to 4472 /// fold the addressing mode in the Z case. This would make Y die earlier. 4473 bool AddressingModeMatcher:: 4474 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4475 ExtAddrMode &AMAfter) { 4476 if (IgnoreProfitability) return true; 4477 4478 // AMBefore is the addressing mode before this instruction was folded into it, 4479 // and AMAfter is the addressing mode after the instruction was folded. Get 4480 // the set of registers referenced by AMAfter and subtract out those 4481 // referenced by AMBefore: this is the set of values which folding in this 4482 // address extends the lifetime of. 4483 // 4484 // Note that there are only two potential values being referenced here, 4485 // BaseReg and ScaleReg (global addresses are always available, as are any 4486 // folded immediates). 4487 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4488 4489 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4490 // lifetime wasn't extended by adding this instruction. 4491 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4492 BaseReg = nullptr; 4493 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4494 ScaledReg = nullptr; 4495 4496 // If folding this instruction (and it's subexprs) didn't extend any live 4497 // ranges, we're ok with it. 4498 if (!BaseReg && !ScaledReg) 4499 return true; 4500 4501 // If all uses of this instruction can have the address mode sunk into them, 4502 // we can remove the addressing mode and effectively trade one live register 4503 // for another (at worst.) In this context, folding an addressing mode into 4504 // the use is just a particularly nice way of sinking it. 4505 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4506 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4507 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4508 return false; // Has a non-memory, non-foldable use! 4509 4510 // Now that we know that all uses of this instruction are part of a chain of 4511 // computation involving only operations that could theoretically be folded 4512 // into a memory use, loop over each of these memory operation uses and see 4513 // if they could *actually* fold the instruction. The assumption is that 4514 // addressing modes are cheap and that duplicating the computation involved 4515 // many times is worthwhile, even on a fastpath. For sinking candidates 4516 // (i.e. cold call sites), this serves as a way to prevent excessive code 4517 // growth since most architectures have some reasonable small and fast way to 4518 // compute an effective address. (i.e LEA on x86) 4519 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4520 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4521 Instruction *User = MemoryUses[i].first; 4522 unsigned OpNo = MemoryUses[i].second; 4523 4524 // Get the access type of this use. If the use isn't a pointer, we don't 4525 // know what it accesses. 4526 Value *Address = User->getOperand(OpNo); 4527 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4528 if (!AddrTy) 4529 return false; 4530 Type *AddressAccessTy = AddrTy->getElementType(); 4531 unsigned AS = AddrTy->getAddressSpace(); 4532 4533 // Do a match against the root of this address, ignoring profitability. This 4534 // will tell us if the addressing mode for the memory operation will 4535 // *actually* cover the shared instruction. 4536 ExtAddrMode Result; 4537 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4538 0); 4539 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4540 TPT.getRestorationPoint(); 4541 AddressingModeMatcher Matcher( 4542 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, 4543 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4544 Matcher.IgnoreProfitability = true; 4545 bool Success = Matcher.matchAddr(Address, 0); 4546 (void)Success; assert(Success && "Couldn't select *anything*?"); 4547 4548 // The match was to check the profitability, the changes made are not 4549 // part of the original matcher. Therefore, they should be dropped 4550 // otherwise the original matcher will not present the right state. 4551 TPT.rollback(LastKnownGood); 4552 4553 // If the match didn't cover I, then it won't be shared by it. 4554 if (!is_contained(MatchedAddrModeInsts, I)) 4555 return false; 4556 4557 MatchedAddrModeInsts.clear(); 4558 } 4559 4560 return true; 4561 } 4562 4563 /// Return true if the specified values are defined in a 4564 /// different basic block than BB. 4565 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4566 if (Instruction *I = dyn_cast<Instruction>(V)) 4567 return I->getParent() != BB; 4568 return false; 4569 } 4570 4571 /// Sink addressing mode computation immediate before MemoryInst if doing so 4572 /// can be done without increasing register pressure. The need for the 4573 /// register pressure constraint means this can end up being an all or nothing 4574 /// decision for all uses of the same addressing computation. 4575 /// 4576 /// Load and Store Instructions often have addressing modes that can do 4577 /// significant amounts of computation. As such, instruction selection will try 4578 /// to get the load or store to do as much computation as possible for the 4579 /// program. The problem is that isel can only see within a single block. As 4580 /// such, we sink as much legal addressing mode work into the block as possible. 4581 /// 4582 /// This method is used to optimize both load/store and inline asms with memory 4583 /// operands. It's also used to sink addressing computations feeding into cold 4584 /// call sites into their (cold) basic block. 4585 /// 4586 /// The motivation for handling sinking into cold blocks is that doing so can 4587 /// both enable other address mode sinking (by satisfying the register pressure 4588 /// constraint above), and reduce register pressure globally (by removing the 4589 /// addressing mode computation from the fast path entirely.). 4590 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4591 Type *AccessTy, unsigned AddrSpace) { 4592 Value *Repl = Addr; 4593 4594 // Try to collapse single-value PHI nodes. This is necessary to undo 4595 // unprofitable PRE transformations. 4596 SmallVector<Value*, 8> worklist; 4597 SmallPtrSet<Value*, 16> Visited; 4598 worklist.push_back(Addr); 4599 4600 // Use a worklist to iteratively look through PHI and select nodes, and 4601 // ensure that the addressing mode obtained from the non-PHI/select roots of 4602 // the graph are compatible. 4603 bool PhiOrSelectSeen = false; 4604 SmallVector<Instruction*, 16> AddrModeInsts; 4605 const SimplifyQuery SQ(*DL, TLInfo); 4606 AddressingModeCombiner AddrModes(SQ, Addr); 4607 TypePromotionTransaction TPT(RemovedInsts); 4608 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4609 TPT.getRestorationPoint(); 4610 while (!worklist.empty()) { 4611 Value *V = worklist.back(); 4612 worklist.pop_back(); 4613 4614 // We allow traversing cyclic Phi nodes. 4615 // In case of success after this loop we ensure that traversing through 4616 // Phi nodes ends up with all cases to compute address of the form 4617 // BaseGV + Base + Scale * Index + Offset 4618 // where Scale and Offset are constans and BaseGV, Base and Index 4619 // are exactly the same Values in all cases. 4620 // It means that BaseGV, Scale and Offset dominate our memory instruction 4621 // and have the same value as they had in address computation represented 4622 // as Phi. So we can safely sink address computation to memory instruction. 4623 if (!Visited.insert(V).second) 4624 continue; 4625 4626 // For a PHI node, push all of its incoming values. 4627 if (PHINode *P = dyn_cast<PHINode>(V)) { 4628 for (Value *IncValue : P->incoming_values()) 4629 worklist.push_back(IncValue); 4630 PhiOrSelectSeen = true; 4631 continue; 4632 } 4633 // Similar for select. 4634 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4635 worklist.push_back(SI->getFalseValue()); 4636 worklist.push_back(SI->getTrueValue()); 4637 PhiOrSelectSeen = true; 4638 continue; 4639 } 4640 4641 // For non-PHIs, determine the addressing mode being computed. Note that 4642 // the result may differ depending on what other uses our candidate 4643 // addressing instructions might have. 4644 AddrModeInsts.clear(); 4645 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4646 0); 4647 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4648 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4649 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4650 4651 GetElementPtrInst *GEP = LargeOffsetGEP.first; 4652 if (GEP && GEP->getParent() != MemoryInst->getParent() && 4653 !NewGEPBases.count(GEP)) { 4654 // If splitting the underlying data structure can reduce the offset of a 4655 // GEP, collect the GEP. Skip the GEPs that are the new bases of 4656 // previously split data structures. 4657 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 4658 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 4659 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 4660 } 4661 4662 NewAddrMode.OriginalValue = V; 4663 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4664 break; 4665 } 4666 4667 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4668 // or we have multiple but either couldn't combine them or combining them 4669 // wouldn't do anything useful, bail out now. 4670 if (!AddrModes.combineAddrModes()) { 4671 TPT.rollback(LastKnownGood); 4672 return false; 4673 } 4674 TPT.commit(); 4675 4676 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4677 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4678 4679 // If all the instructions matched are already in this BB, don't do anything. 4680 // If we saw a Phi node then it is not local definitely, and if we saw a select 4681 // then we want to push the address calculation past it even if it's already 4682 // in this BB. 4683 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4684 return IsNonLocalValue(V, MemoryInst->getParent()); 4685 })) { 4686 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 4687 << "\n"); 4688 return false; 4689 } 4690 4691 // Insert this computation right after this user. Since our caller is 4692 // scanning from the top of the BB to the bottom, reuse of the expr are 4693 // guaranteed to happen later. 4694 IRBuilder<> Builder(MemoryInst); 4695 4696 // Now that we determined the addressing expression we want to use and know 4697 // that we have to sink it into this block. Check to see if we have already 4698 // done this for some other load/store instr in this block. If so, reuse 4699 // the computation. Before attempting reuse, check if the address is valid 4700 // as it may have been erased. 4701 4702 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 4703 4704 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 4705 if (SunkAddr) { 4706 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 4707 << " for " << *MemoryInst << "\n"); 4708 if (SunkAddr->getType() != Addr->getType()) 4709 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4710 } else if (AddrSinkUsingGEPs || 4711 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) { 4712 // By default, we use the GEP-based method when AA is used later. This 4713 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4714 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4715 << " for " << *MemoryInst << "\n"); 4716 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4717 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4718 4719 // First, find the pointer. 4720 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4721 ResultPtr = AddrMode.BaseReg; 4722 AddrMode.BaseReg = nullptr; 4723 } 4724 4725 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4726 // We can't add more than one pointer together, nor can we scale a 4727 // pointer (both of which seem meaningless). 4728 if (ResultPtr || AddrMode.Scale != 1) 4729 return false; 4730 4731 ResultPtr = AddrMode.ScaledReg; 4732 AddrMode.Scale = 0; 4733 } 4734 4735 // It is only safe to sign extend the BaseReg if we know that the math 4736 // required to create it did not overflow before we extend it. Since 4737 // the original IR value was tossed in favor of a constant back when 4738 // the AddrMode was created we need to bail out gracefully if widths 4739 // do not match instead of extending it. 4740 // 4741 // (See below for code to add the scale.) 4742 if (AddrMode.Scale) { 4743 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4744 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4745 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4746 return false; 4747 } 4748 4749 if (AddrMode.BaseGV) { 4750 if (ResultPtr) 4751 return false; 4752 4753 ResultPtr = AddrMode.BaseGV; 4754 } 4755 4756 // If the real base value actually came from an inttoptr, then the matcher 4757 // will look through it and provide only the integer value. In that case, 4758 // use it here. 4759 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4760 if (!ResultPtr && AddrMode.BaseReg) { 4761 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4762 "sunkaddr"); 4763 AddrMode.BaseReg = nullptr; 4764 } else if (!ResultPtr && AddrMode.Scale == 1) { 4765 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4766 "sunkaddr"); 4767 AddrMode.Scale = 0; 4768 } 4769 } 4770 4771 if (!ResultPtr && 4772 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4773 SunkAddr = Constant::getNullValue(Addr->getType()); 4774 } else if (!ResultPtr) { 4775 return false; 4776 } else { 4777 Type *I8PtrTy = 4778 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4779 Type *I8Ty = Builder.getInt8Ty(); 4780 4781 // Start with the base register. Do this first so that subsequent address 4782 // matching finds it last, which will prevent it from trying to match it 4783 // as the scaled value in case it happens to be a mul. That would be 4784 // problematic if we've sunk a different mul for the scale, because then 4785 // we'd end up sinking both muls. 4786 if (AddrMode.BaseReg) { 4787 Value *V = AddrMode.BaseReg; 4788 if (V->getType() != IntPtrTy) 4789 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4790 4791 ResultIndex = V; 4792 } 4793 4794 // Add the scale value. 4795 if (AddrMode.Scale) { 4796 Value *V = AddrMode.ScaledReg; 4797 if (V->getType() == IntPtrTy) { 4798 // done. 4799 } else { 4800 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4801 cast<IntegerType>(V->getType())->getBitWidth() && 4802 "We can't transform if ScaledReg is too narrow"); 4803 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4804 } 4805 4806 if (AddrMode.Scale != 1) 4807 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4808 "sunkaddr"); 4809 if (ResultIndex) 4810 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4811 else 4812 ResultIndex = V; 4813 } 4814 4815 // Add in the Base Offset if present. 4816 if (AddrMode.BaseOffs) { 4817 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4818 if (ResultIndex) { 4819 // We need to add this separately from the scale above to help with 4820 // SDAG consecutive load/store merging. 4821 if (ResultPtr->getType() != I8PtrTy) 4822 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4823 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4824 } 4825 4826 ResultIndex = V; 4827 } 4828 4829 if (!ResultIndex) { 4830 SunkAddr = ResultPtr; 4831 } else { 4832 if (ResultPtr->getType() != I8PtrTy) 4833 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4834 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4835 } 4836 4837 if (SunkAddr->getType() != Addr->getType()) 4838 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4839 } 4840 } else { 4841 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4842 // non-integral pointers, so in that case bail out now. 4843 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4844 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4845 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4846 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4847 if (DL->isNonIntegralPointerType(Addr->getType()) || 4848 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4849 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4850 (AddrMode.BaseGV && 4851 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4852 return false; 4853 4854 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4855 << " for " << *MemoryInst << "\n"); 4856 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4857 Value *Result = nullptr; 4858 4859 // Start with the base register. Do this first so that subsequent address 4860 // matching finds it last, which will prevent it from trying to match it 4861 // as the scaled value in case it happens to be a mul. That would be 4862 // problematic if we've sunk a different mul for the scale, because then 4863 // we'd end up sinking both muls. 4864 if (AddrMode.BaseReg) { 4865 Value *V = AddrMode.BaseReg; 4866 if (V->getType()->isPointerTy()) 4867 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4868 if (V->getType() != IntPtrTy) 4869 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4870 Result = V; 4871 } 4872 4873 // Add the scale value. 4874 if (AddrMode.Scale) { 4875 Value *V = AddrMode.ScaledReg; 4876 if (V->getType() == IntPtrTy) { 4877 // done. 4878 } else if (V->getType()->isPointerTy()) { 4879 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4880 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4881 cast<IntegerType>(V->getType())->getBitWidth()) { 4882 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4883 } else { 4884 // It is only safe to sign extend the BaseReg if we know that the math 4885 // required to create it did not overflow before we extend it. Since 4886 // the original IR value was tossed in favor of a constant back when 4887 // the AddrMode was created we need to bail out gracefully if widths 4888 // do not match instead of extending it. 4889 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4890 if (I && (Result != AddrMode.BaseReg)) 4891 I->eraseFromParent(); 4892 return false; 4893 } 4894 if (AddrMode.Scale != 1) 4895 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4896 "sunkaddr"); 4897 if (Result) 4898 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4899 else 4900 Result = V; 4901 } 4902 4903 // Add in the BaseGV if present. 4904 if (AddrMode.BaseGV) { 4905 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4906 if (Result) 4907 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4908 else 4909 Result = V; 4910 } 4911 4912 // Add in the Base Offset if present. 4913 if (AddrMode.BaseOffs) { 4914 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4915 if (Result) 4916 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4917 else 4918 Result = V; 4919 } 4920 4921 if (!Result) 4922 SunkAddr = Constant::getNullValue(Addr->getType()); 4923 else 4924 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4925 } 4926 4927 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4928 // Store the newly computed address into the cache. In the case we reused a 4929 // value, this should be idempotent. 4930 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 4931 4932 // If we have no uses, recursively delete the value and all dead instructions 4933 // using it. 4934 if (Repl->use_empty()) { 4935 // This can cause recursive deletion, which can invalidate our iterator. 4936 // Use a WeakTrackingVH to hold onto it in case this happens. 4937 Value *CurValue = &*CurInstIterator; 4938 WeakTrackingVH IterHandle(CurValue); 4939 BasicBlock *BB = CurInstIterator->getParent(); 4940 4941 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4942 4943 if (IterHandle != CurValue) { 4944 // If the iterator instruction was recursively deleted, start over at the 4945 // start of the block. 4946 CurInstIterator = BB->begin(); 4947 SunkAddrs.clear(); 4948 } 4949 } 4950 ++NumMemoryInsts; 4951 return true; 4952 } 4953 4954 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4955 /// address computing into the block when possible / profitable. 4956 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4957 bool MadeChange = false; 4958 4959 const TargetRegisterInfo *TRI = 4960 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 4961 TargetLowering::AsmOperandInfoVector TargetConstraints = 4962 TLI->ParseConstraints(*DL, TRI, CS); 4963 unsigned ArgNo = 0; 4964 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4965 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4966 4967 // Compute the constraint code and ConstraintType to use. 4968 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4969 4970 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4971 OpInfo.isIndirect) { 4972 Value *OpVal = CS->getArgOperand(ArgNo++); 4973 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4974 } else if (OpInfo.Type == InlineAsm::isInput) 4975 ArgNo++; 4976 } 4977 4978 return MadeChange; 4979 } 4980 4981 /// Check if all the uses of \p Val are equivalent (or free) zero or 4982 /// sign extensions. 4983 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 4984 assert(!Val->use_empty() && "Input must have at least one use"); 4985 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 4986 bool IsSExt = isa<SExtInst>(FirstUser); 4987 Type *ExtTy = FirstUser->getType(); 4988 for (const User *U : Val->users()) { 4989 const Instruction *UI = cast<Instruction>(U); 4990 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4991 return false; 4992 Type *CurTy = UI->getType(); 4993 // Same input and output types: Same instruction after CSE. 4994 if (CurTy == ExtTy) 4995 continue; 4996 4997 // If IsSExt is true, we are in this situation: 4998 // a = Val 4999 // b = sext ty1 a to ty2 5000 // c = sext ty1 a to ty3 5001 // Assuming ty2 is shorter than ty3, this could be turned into: 5002 // a = Val 5003 // b = sext ty1 a to ty2 5004 // c = sext ty2 b to ty3 5005 // However, the last sext is not free. 5006 if (IsSExt) 5007 return false; 5008 5009 // This is a ZExt, maybe this is free to extend from one type to another. 5010 // In that case, we would not account for a different use. 5011 Type *NarrowTy; 5012 Type *LargeTy; 5013 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5014 CurTy->getScalarType()->getIntegerBitWidth()) { 5015 NarrowTy = CurTy; 5016 LargeTy = ExtTy; 5017 } else { 5018 NarrowTy = ExtTy; 5019 LargeTy = CurTy; 5020 } 5021 5022 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5023 return false; 5024 } 5025 // All uses are the same or can be derived from one another for free. 5026 return true; 5027 } 5028 5029 /// Try to speculatively promote extensions in \p Exts and continue 5030 /// promoting through newly promoted operands recursively as far as doing so is 5031 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5032 /// When some promotion happened, \p TPT contains the proper state to revert 5033 /// them. 5034 /// 5035 /// \return true if some promotion happened, false otherwise. 5036 bool CodeGenPrepare::tryToPromoteExts( 5037 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5038 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5039 unsigned CreatedInstsCost) { 5040 bool Promoted = false; 5041 5042 // Iterate over all the extensions to try to promote them. 5043 for (auto I : Exts) { 5044 // Early check if we directly have ext(load). 5045 if (isa<LoadInst>(I->getOperand(0))) { 5046 ProfitablyMovedExts.push_back(I); 5047 continue; 5048 } 5049 5050 // Check whether or not we want to do any promotion. The reason we have 5051 // this check inside the for loop is to catch the case where an extension 5052 // is directly fed by a load because in such case the extension can be moved 5053 // up without any promotion on its operands. 5054 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5055 return false; 5056 5057 // Get the action to perform the promotion. 5058 TypePromotionHelper::Action TPH = 5059 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5060 // Check if we can promote. 5061 if (!TPH) { 5062 // Save the current extension as we cannot move up through its operand. 5063 ProfitablyMovedExts.push_back(I); 5064 continue; 5065 } 5066 5067 // Save the current state. 5068 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5069 TPT.getRestorationPoint(); 5070 SmallVector<Instruction *, 4> NewExts; 5071 unsigned NewCreatedInstsCost = 0; 5072 unsigned ExtCost = !TLI->isExtFree(I); 5073 // Promote. 5074 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5075 &NewExts, nullptr, *TLI); 5076 assert(PromotedVal && 5077 "TypePromotionHelper should have filtered out those cases"); 5078 5079 // We would be able to merge only one extension in a load. 5080 // Therefore, if we have more than 1 new extension we heuristically 5081 // cut this search path, because it means we degrade the code quality. 5082 // With exactly 2, the transformation is neutral, because we will merge 5083 // one extension but leave one. However, we optimistically keep going, 5084 // because the new extension may be removed too. 5085 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5086 // FIXME: It would be possible to propagate a negative value instead of 5087 // conservatively ceiling it to 0. 5088 TotalCreatedInstsCost = 5089 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5090 if (!StressExtLdPromotion && 5091 (TotalCreatedInstsCost > 1 || 5092 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5093 // This promotion is not profitable, rollback to the previous state, and 5094 // save the current extension in ProfitablyMovedExts as the latest 5095 // speculative promotion turned out to be unprofitable. 5096 TPT.rollback(LastKnownGood); 5097 ProfitablyMovedExts.push_back(I); 5098 continue; 5099 } 5100 // Continue promoting NewExts as far as doing so is profitable. 5101 SmallVector<Instruction *, 2> NewlyMovedExts; 5102 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5103 bool NewPromoted = false; 5104 for (auto ExtInst : NewlyMovedExts) { 5105 Instruction *MovedExt = cast<Instruction>(ExtInst); 5106 Value *ExtOperand = MovedExt->getOperand(0); 5107 // If we have reached to a load, we need this extra profitability check 5108 // as it could potentially be merged into an ext(load). 5109 if (isa<LoadInst>(ExtOperand) && 5110 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5111 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5112 continue; 5113 5114 ProfitablyMovedExts.push_back(MovedExt); 5115 NewPromoted = true; 5116 } 5117 5118 // If none of speculative promotions for NewExts is profitable, rollback 5119 // and save the current extension (I) as the last profitable extension. 5120 if (!NewPromoted) { 5121 TPT.rollback(LastKnownGood); 5122 ProfitablyMovedExts.push_back(I); 5123 continue; 5124 } 5125 // The promotion is profitable. 5126 Promoted = true; 5127 } 5128 return Promoted; 5129 } 5130 5131 /// Merging redundant sexts when one is dominating the other. 5132 bool CodeGenPrepare::mergeSExts(Function &F) { 5133 DominatorTree DT(F); 5134 bool Changed = false; 5135 for (auto &Entry : ValToSExtendedUses) { 5136 SExts &Insts = Entry.second; 5137 SExts CurPts; 5138 for (Instruction *Inst : Insts) { 5139 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5140 Inst->getOperand(0) != Entry.first) 5141 continue; 5142 bool inserted = false; 5143 for (auto &Pt : CurPts) { 5144 if (DT.dominates(Inst, Pt)) { 5145 Pt->replaceAllUsesWith(Inst); 5146 RemovedInsts.insert(Pt); 5147 Pt->removeFromParent(); 5148 Pt = Inst; 5149 inserted = true; 5150 Changed = true; 5151 break; 5152 } 5153 if (!DT.dominates(Pt, Inst)) 5154 // Give up if we need to merge in a common dominator as the 5155 // experiments show it is not profitable. 5156 continue; 5157 Inst->replaceAllUsesWith(Pt); 5158 RemovedInsts.insert(Inst); 5159 Inst->removeFromParent(); 5160 inserted = true; 5161 Changed = true; 5162 break; 5163 } 5164 if (!inserted) 5165 CurPts.push_back(Inst); 5166 } 5167 } 5168 return Changed; 5169 } 5170 5171 // Spliting large data structures so that the GEPs accessing them can have 5172 // smaller offsets so that they can be sunk to the same blocks as their users. 5173 // For example, a large struct starting from %base is splitted into two parts 5174 // where the second part starts from %new_base. 5175 // 5176 // Before: 5177 // BB0: 5178 // %base = 5179 // 5180 // BB1: 5181 // %gep0 = gep %base, off0 5182 // %gep1 = gep %base, off1 5183 // %gep2 = gep %base, off2 5184 // 5185 // BB2: 5186 // %load1 = load %gep0 5187 // %load2 = load %gep1 5188 // %load3 = load %gep2 5189 // 5190 // After: 5191 // BB0: 5192 // %base = 5193 // %new_base = gep %base, off0 5194 // 5195 // BB1: 5196 // %new_gep0 = %new_base 5197 // %new_gep1 = gep %new_base, off1 - off0 5198 // %new_gep2 = gep %new_base, off2 - off0 5199 // 5200 // BB2: 5201 // %load1 = load i32, i32* %new_gep0 5202 // %load2 = load i32, i32* %new_gep1 5203 // %load3 = load i32, i32* %new_gep2 5204 // 5205 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5206 // their offsets are smaller enough to fit into the addressing mode. 5207 bool CodeGenPrepare::splitLargeGEPOffsets() { 5208 bool Changed = false; 5209 for (auto &Entry : LargeOffsetGEPMap) { 5210 Value *OldBase = Entry.first; 5211 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5212 &LargeOffsetGEPs = Entry.second; 5213 auto compareGEPOffset = 5214 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5215 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5216 if (LHS.first == RHS.first) 5217 return false; 5218 if (LHS.second != RHS.second) 5219 return LHS.second < RHS.second; 5220 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5221 }; 5222 // Sorting all the GEPs of the same data structures based on the offsets. 5223 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5224 LargeOffsetGEPs.erase( 5225 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5226 LargeOffsetGEPs.end()); 5227 // Skip if all the GEPs have the same offsets. 5228 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5229 continue; 5230 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5231 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5232 Value *NewBaseGEP = nullptr; 5233 5234 auto LargeOffsetGEP = LargeOffsetGEPs.begin(); 5235 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5236 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5237 int64_t Offset = LargeOffsetGEP->second; 5238 if (Offset != BaseOffset) { 5239 TargetLowering::AddrMode AddrMode; 5240 AddrMode.BaseOffs = Offset - BaseOffset; 5241 // The result type of the GEP might not be the type of the memory 5242 // access. 5243 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5244 GEP->getResultElementType(), 5245 GEP->getAddressSpace())) { 5246 // We need to create a new base if the offset to the current base is 5247 // too large to fit into the addressing mode. So, a very large struct 5248 // may be splitted into several parts. 5249 BaseGEP = GEP; 5250 BaseOffset = Offset; 5251 NewBaseGEP = nullptr; 5252 } 5253 } 5254 5255 // Generate a new GEP to replace the current one. 5256 LLVMContext &Ctx = GEP->getContext(); 5257 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5258 Type *I8PtrTy = 5259 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5260 Type *I8Ty = Type::getInt8Ty(Ctx); 5261 5262 if (!NewBaseGEP) { 5263 // Create a new base if we don't have one yet. Find the insertion 5264 // pointer for the new base first. 5265 BasicBlock::iterator NewBaseInsertPt; 5266 BasicBlock *NewBaseInsertBB; 5267 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5268 // If the base of the struct is an instruction, the new base will be 5269 // inserted close to it. 5270 NewBaseInsertBB = BaseI->getParent(); 5271 if (isa<PHINode>(BaseI)) 5272 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5273 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5274 NewBaseInsertBB = 5275 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5276 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5277 } else 5278 NewBaseInsertPt = std::next(BaseI->getIterator()); 5279 } else { 5280 // If the current base is an argument or global value, the new base 5281 // will be inserted to the entry block. 5282 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5283 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5284 } 5285 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5286 // Create a new base. 5287 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5288 NewBaseGEP = OldBase; 5289 if (NewBaseGEP->getType() != I8PtrTy) 5290 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5291 NewBaseGEP = 5292 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5293 NewGEPBases.insert(NewBaseGEP); 5294 } 5295 5296 IRBuilder<> Builder(GEP); 5297 Value *NewGEP = NewBaseGEP; 5298 if (Offset == BaseOffset) { 5299 if (GEP->getType() != I8PtrTy) 5300 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5301 } else { 5302 // Calculate the new offset for the new GEP. 5303 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5304 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5305 5306 if (GEP->getType() != I8PtrTy) 5307 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5308 } 5309 GEP->replaceAllUsesWith(NewGEP); 5310 LargeOffsetGEPID.erase(GEP); 5311 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5312 GEP->eraseFromParent(); 5313 Changed = true; 5314 } 5315 } 5316 return Changed; 5317 } 5318 5319 /// Return true, if an ext(load) can be formed from an extension in 5320 /// \p MovedExts. 5321 bool CodeGenPrepare::canFormExtLd( 5322 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5323 Instruction *&Inst, bool HasPromoted) { 5324 for (auto *MovedExtInst : MovedExts) { 5325 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5326 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5327 Inst = MovedExtInst; 5328 break; 5329 } 5330 } 5331 if (!LI) 5332 return false; 5333 5334 // If they're already in the same block, there's nothing to do. 5335 // Make the cheap checks first if we did not promote. 5336 // If we promoted, we need to check if it is indeed profitable. 5337 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5338 return false; 5339 5340 return TLI->isExtLoad(LI, Inst, *DL); 5341 } 5342 5343 /// Move a zext or sext fed by a load into the same basic block as the load, 5344 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5345 /// extend into the load. 5346 /// 5347 /// E.g., 5348 /// \code 5349 /// %ld = load i32* %addr 5350 /// %add = add nuw i32 %ld, 4 5351 /// %zext = zext i32 %add to i64 5352 // \endcode 5353 /// => 5354 /// \code 5355 /// %ld = load i32* %addr 5356 /// %zext = zext i32 %ld to i64 5357 /// %add = add nuw i64 %zext, 4 5358 /// \encode 5359 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5360 /// allow us to match zext(load i32*) to i64. 5361 /// 5362 /// Also, try to promote the computations used to obtain a sign extended 5363 /// value used into memory accesses. 5364 /// E.g., 5365 /// \code 5366 /// a = add nsw i32 b, 3 5367 /// d = sext i32 a to i64 5368 /// e = getelementptr ..., i64 d 5369 /// \endcode 5370 /// => 5371 /// \code 5372 /// f = sext i32 b to i64 5373 /// a = add nsw i64 f, 3 5374 /// e = getelementptr ..., i64 a 5375 /// \endcode 5376 /// 5377 /// \p Inst[in/out] the extension may be modified during the process if some 5378 /// promotions apply. 5379 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5380 // ExtLoad formation and address type promotion infrastructure requires TLI to 5381 // be effective. 5382 if (!TLI) 5383 return false; 5384 5385 bool AllowPromotionWithoutCommonHeader = false; 5386 /// See if it is an interesting sext operations for the address type 5387 /// promotion before trying to promote it, e.g., the ones with the right 5388 /// type and used in memory accesses. 5389 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5390 *Inst, AllowPromotionWithoutCommonHeader); 5391 TypePromotionTransaction TPT(RemovedInsts); 5392 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5393 TPT.getRestorationPoint(); 5394 SmallVector<Instruction *, 1> Exts; 5395 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5396 Exts.push_back(Inst); 5397 5398 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5399 5400 // Look for a load being extended. 5401 LoadInst *LI = nullptr; 5402 Instruction *ExtFedByLoad; 5403 5404 // Try to promote a chain of computation if it allows to form an extended 5405 // load. 5406 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 5407 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 5408 TPT.commit(); 5409 // Move the extend into the same block as the load 5410 ExtFedByLoad->moveAfter(LI); 5411 // CGP does not check if the zext would be speculatively executed when moved 5412 // to the same basic block as the load. Preserving its original location 5413 // would pessimize the debugging experience, as well as negatively impact 5414 // the quality of sample pgo. We don't want to use "line 0" as that has a 5415 // size cost in the line-table section and logically the zext can be seen as 5416 // part of the load. Therefore we conservatively reuse the same debug 5417 // location for the load and the zext. 5418 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 5419 ++NumExtsMoved; 5420 Inst = ExtFedByLoad; 5421 return true; 5422 } 5423 5424 // Continue promoting SExts if known as considerable depending on targets. 5425 if (ATPConsiderable && 5426 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 5427 HasPromoted, TPT, SpeculativelyMovedExts)) 5428 return true; 5429 5430 TPT.rollback(LastKnownGood); 5431 return false; 5432 } 5433 5434 // Perform address type promotion if doing so is profitable. 5435 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 5436 // instructions that sign extended the same initial value. However, if 5437 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 5438 // extension is just profitable. 5439 bool CodeGenPrepare::performAddressTypePromotion( 5440 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 5441 bool HasPromoted, TypePromotionTransaction &TPT, 5442 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 5443 bool Promoted = false; 5444 SmallPtrSet<Instruction *, 1> UnhandledExts; 5445 bool AllSeenFirst = true; 5446 for (auto I : SpeculativelyMovedExts) { 5447 Value *HeadOfChain = I->getOperand(0); 5448 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 5449 SeenChainsForSExt.find(HeadOfChain); 5450 // If there is an unhandled SExt which has the same header, try to promote 5451 // it as well. 5452 if (AlreadySeen != SeenChainsForSExt.end()) { 5453 if (AlreadySeen->second != nullptr) 5454 UnhandledExts.insert(AlreadySeen->second); 5455 AllSeenFirst = false; 5456 } 5457 } 5458 5459 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 5460 SpeculativelyMovedExts.size() == 1)) { 5461 TPT.commit(); 5462 if (HasPromoted) 5463 Promoted = true; 5464 for (auto I : SpeculativelyMovedExts) { 5465 Value *HeadOfChain = I->getOperand(0); 5466 SeenChainsForSExt[HeadOfChain] = nullptr; 5467 ValToSExtendedUses[HeadOfChain].push_back(I); 5468 } 5469 // Update Inst as promotion happen. 5470 Inst = SpeculativelyMovedExts.pop_back_val(); 5471 } else { 5472 // This is the first chain visited from the header, keep the current chain 5473 // as unhandled. Defer to promote this until we encounter another SExt 5474 // chain derived from the same header. 5475 for (auto I : SpeculativelyMovedExts) { 5476 Value *HeadOfChain = I->getOperand(0); 5477 SeenChainsForSExt[HeadOfChain] = Inst; 5478 } 5479 return false; 5480 } 5481 5482 if (!AllSeenFirst && !UnhandledExts.empty()) 5483 for (auto VisitedSExt : UnhandledExts) { 5484 if (RemovedInsts.count(VisitedSExt)) 5485 continue; 5486 TypePromotionTransaction TPT(RemovedInsts); 5487 SmallVector<Instruction *, 1> Exts; 5488 SmallVector<Instruction *, 2> Chains; 5489 Exts.push_back(VisitedSExt); 5490 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 5491 TPT.commit(); 5492 if (HasPromoted) 5493 Promoted = true; 5494 for (auto I : Chains) { 5495 Value *HeadOfChain = I->getOperand(0); 5496 // Mark this as handled. 5497 SeenChainsForSExt[HeadOfChain] = nullptr; 5498 ValToSExtendedUses[HeadOfChain].push_back(I); 5499 } 5500 } 5501 return Promoted; 5502 } 5503 5504 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 5505 BasicBlock *DefBB = I->getParent(); 5506 5507 // If the result of a {s|z}ext and its source are both live out, rewrite all 5508 // other uses of the source with result of extension. 5509 Value *Src = I->getOperand(0); 5510 if (Src->hasOneUse()) 5511 return false; 5512 5513 // Only do this xform if truncating is free. 5514 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 5515 return false; 5516 5517 // Only safe to perform the optimization if the source is also defined in 5518 // this block. 5519 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 5520 return false; 5521 5522 bool DefIsLiveOut = false; 5523 for (User *U : I->users()) { 5524 Instruction *UI = cast<Instruction>(U); 5525 5526 // Figure out which BB this ext is used in. 5527 BasicBlock *UserBB = UI->getParent(); 5528 if (UserBB == DefBB) continue; 5529 DefIsLiveOut = true; 5530 break; 5531 } 5532 if (!DefIsLiveOut) 5533 return false; 5534 5535 // Make sure none of the uses are PHI nodes. 5536 for (User *U : Src->users()) { 5537 Instruction *UI = cast<Instruction>(U); 5538 BasicBlock *UserBB = UI->getParent(); 5539 if (UserBB == DefBB) continue; 5540 // Be conservative. We don't want this xform to end up introducing 5541 // reloads just before load / store instructions. 5542 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 5543 return false; 5544 } 5545 5546 // InsertedTruncs - Only insert one trunc in each block once. 5547 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 5548 5549 bool MadeChange = false; 5550 for (Use &U : Src->uses()) { 5551 Instruction *User = cast<Instruction>(U.getUser()); 5552 5553 // Figure out which BB this ext is used in. 5554 BasicBlock *UserBB = User->getParent(); 5555 if (UserBB == DefBB) continue; 5556 5557 // Both src and def are live in this block. Rewrite the use. 5558 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 5559 5560 if (!InsertedTrunc) { 5561 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5562 assert(InsertPt != UserBB->end()); 5563 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5564 InsertedInsts.insert(InsertedTrunc); 5565 } 5566 5567 // Replace a use of the {s|z}ext source with a use of the result. 5568 U = InsertedTrunc; 5569 ++NumExtUses; 5570 MadeChange = true; 5571 } 5572 5573 return MadeChange; 5574 } 5575 5576 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5577 // just after the load if the target can fold this into one extload instruction, 5578 // with the hope of eliminating some of the other later "and" instructions using 5579 // the loaded value. "and"s that are made trivially redundant by the insertion 5580 // of the new "and" are removed by this function, while others (e.g. those whose 5581 // path from the load goes through a phi) are left for isel to potentially 5582 // remove. 5583 // 5584 // For example: 5585 // 5586 // b0: 5587 // x = load i32 5588 // ... 5589 // b1: 5590 // y = and x, 0xff 5591 // z = use y 5592 // 5593 // becomes: 5594 // 5595 // b0: 5596 // x = load i32 5597 // x' = and x, 0xff 5598 // ... 5599 // b1: 5600 // z = use x' 5601 // 5602 // whereas: 5603 // 5604 // b0: 5605 // x1 = load i32 5606 // ... 5607 // b1: 5608 // x2 = load i32 5609 // ... 5610 // b2: 5611 // x = phi x1, x2 5612 // y = and x, 0xff 5613 // 5614 // becomes (after a call to optimizeLoadExt for each load): 5615 // 5616 // b0: 5617 // x1 = load i32 5618 // x1' = and x1, 0xff 5619 // ... 5620 // b1: 5621 // x2 = load i32 5622 // x2' = and x2, 0xff 5623 // ... 5624 // b2: 5625 // x = phi x1', x2' 5626 // y = and x, 0xff 5627 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5628 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 5629 return false; 5630 5631 // Skip loads we've already transformed. 5632 if (Load->hasOneUse() && 5633 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5634 return false; 5635 5636 // Look at all uses of Load, looking through phis, to determine how many bits 5637 // of the loaded value are needed. 5638 SmallVector<Instruction *, 8> WorkList; 5639 SmallPtrSet<Instruction *, 16> Visited; 5640 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5641 for (auto *U : Load->users()) 5642 WorkList.push_back(cast<Instruction>(U)); 5643 5644 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5645 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5646 APInt DemandBits(BitWidth, 0); 5647 APInt WidestAndBits(BitWidth, 0); 5648 5649 while (!WorkList.empty()) { 5650 Instruction *I = WorkList.back(); 5651 WorkList.pop_back(); 5652 5653 // Break use-def graph loops. 5654 if (!Visited.insert(I).second) 5655 continue; 5656 5657 // For a PHI node, push all of its users. 5658 if (auto *Phi = dyn_cast<PHINode>(I)) { 5659 for (auto *U : Phi->users()) 5660 WorkList.push_back(cast<Instruction>(U)); 5661 continue; 5662 } 5663 5664 switch (I->getOpcode()) { 5665 case Instruction::And: { 5666 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5667 if (!AndC) 5668 return false; 5669 APInt AndBits = AndC->getValue(); 5670 DemandBits |= AndBits; 5671 // Keep track of the widest and mask we see. 5672 if (AndBits.ugt(WidestAndBits)) 5673 WidestAndBits = AndBits; 5674 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5675 AndsToMaybeRemove.push_back(I); 5676 break; 5677 } 5678 5679 case Instruction::Shl: { 5680 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5681 if (!ShlC) 5682 return false; 5683 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5684 DemandBits.setLowBits(BitWidth - ShiftAmt); 5685 break; 5686 } 5687 5688 case Instruction::Trunc: { 5689 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5690 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5691 DemandBits.setLowBits(TruncBitWidth); 5692 break; 5693 } 5694 5695 default: 5696 return false; 5697 } 5698 } 5699 5700 uint32_t ActiveBits = DemandBits.getActiveBits(); 5701 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5702 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5703 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5704 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5705 // followed by an AND. 5706 // TODO: Look into removing this restriction by fixing backends to either 5707 // return false for isLoadExtLegal for i1 or have them select this pattern to 5708 // a single instruction. 5709 // 5710 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5711 // mask, since these are the only ands that will be removed by isel. 5712 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5713 WidestAndBits != DemandBits) 5714 return false; 5715 5716 LLVMContext &Ctx = Load->getType()->getContext(); 5717 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5718 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5719 5720 // Reject cases that won't be matched as extloads. 5721 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5722 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5723 return false; 5724 5725 IRBuilder<> Builder(Load->getNextNode()); 5726 auto *NewAnd = dyn_cast<Instruction>( 5727 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5728 // Mark this instruction as "inserted by CGP", so that other 5729 // optimizations don't touch it. 5730 InsertedInsts.insert(NewAnd); 5731 5732 // Replace all uses of load with new and (except for the use of load in the 5733 // new and itself). 5734 Load->replaceAllUsesWith(NewAnd); 5735 NewAnd->setOperand(0, Load); 5736 5737 // Remove any and instructions that are now redundant. 5738 for (auto *And : AndsToMaybeRemove) 5739 // Check that the and mask is the same as the one we decided to put on the 5740 // new and. 5741 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5742 And->replaceAllUsesWith(NewAnd); 5743 if (&*CurInstIterator == And) 5744 CurInstIterator = std::next(And->getIterator()); 5745 And->eraseFromParent(); 5746 ++NumAndUses; 5747 } 5748 5749 ++NumAndsAdded; 5750 return true; 5751 } 5752 5753 /// Check if V (an operand of a select instruction) is an expensive instruction 5754 /// that is only used once. 5755 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5756 auto *I = dyn_cast<Instruction>(V); 5757 // If it's safe to speculatively execute, then it should not have side 5758 // effects; therefore, it's safe to sink and possibly *not* execute. 5759 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5760 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5761 } 5762 5763 /// Returns true if a SelectInst should be turned into an explicit branch. 5764 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5765 const TargetLowering *TLI, 5766 SelectInst *SI) { 5767 // If even a predictable select is cheap, then a branch can't be cheaper. 5768 if (!TLI->isPredictableSelectExpensive()) 5769 return false; 5770 5771 // FIXME: This should use the same heuristics as IfConversion to determine 5772 // whether a select is better represented as a branch. 5773 5774 // If metadata tells us that the select condition is obviously predictable, 5775 // then we want to replace the select with a branch. 5776 uint64_t TrueWeight, FalseWeight; 5777 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5778 uint64_t Max = std::max(TrueWeight, FalseWeight); 5779 uint64_t Sum = TrueWeight + FalseWeight; 5780 if (Sum != 0) { 5781 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5782 if (Probability > TLI->getPredictableBranchThreshold()) 5783 return true; 5784 } 5785 } 5786 5787 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5788 5789 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5790 // comparison condition. If the compare has more than one use, there's 5791 // probably another cmov or setcc around, so it's not worth emitting a branch. 5792 if (!Cmp || !Cmp->hasOneUse()) 5793 return false; 5794 5795 // If either operand of the select is expensive and only needed on one side 5796 // of the select, we should form a branch. 5797 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5798 sinkSelectOperand(TTI, SI->getFalseValue())) 5799 return true; 5800 5801 return false; 5802 } 5803 5804 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5805 /// false value of \p SI. If the true/false value of \p SI is defined by any 5806 /// select instructions in \p Selects, look through the defining select 5807 /// instruction until the true/false value is not defined in \p Selects. 5808 static Value *getTrueOrFalseValue( 5809 SelectInst *SI, bool isTrue, 5810 const SmallPtrSet<const Instruction *, 2> &Selects) { 5811 Value *V; 5812 5813 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5814 DefSI = dyn_cast<SelectInst>(V)) { 5815 assert(DefSI->getCondition() == SI->getCondition() && 5816 "The condition of DefSI does not match with SI"); 5817 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5818 } 5819 return V; 5820 } 5821 5822 /// If we have a SelectInst that will likely profit from branch prediction, 5823 /// turn it into a branch. 5824 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5825 // If branch conversion isn't desirable, exit early. 5826 if (DisableSelectToBranch || OptSize || !TLI) 5827 return false; 5828 5829 // Find all consecutive select instructions that share the same condition. 5830 SmallVector<SelectInst *, 2> ASI; 5831 ASI.push_back(SI); 5832 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5833 It != SI->getParent()->end(); ++It) { 5834 SelectInst *I = dyn_cast<SelectInst>(&*It); 5835 if (I && SI->getCondition() == I->getCondition()) { 5836 ASI.push_back(I); 5837 } else { 5838 break; 5839 } 5840 } 5841 5842 SelectInst *LastSI = ASI.back(); 5843 // Increment the current iterator to skip all the rest of select instructions 5844 // because they will be either "not lowered" or "all lowered" to branch. 5845 CurInstIterator = std::next(LastSI->getIterator()); 5846 5847 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5848 5849 // Can we convert the 'select' to CF ? 5850 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 5851 return false; 5852 5853 TargetLowering::SelectSupportKind SelectKind; 5854 if (VectorCond) 5855 SelectKind = TargetLowering::VectorMaskSelect; 5856 else if (SI->getType()->isVectorTy()) 5857 SelectKind = TargetLowering::ScalarCondVectorVal; 5858 else 5859 SelectKind = TargetLowering::ScalarValSelect; 5860 5861 if (TLI->isSelectSupported(SelectKind) && 5862 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5863 return false; 5864 5865 ModifiedDT = true; 5866 5867 // Transform a sequence like this: 5868 // start: 5869 // %cmp = cmp uge i32 %a, %b 5870 // %sel = select i1 %cmp, i32 %c, i32 %d 5871 // 5872 // Into: 5873 // start: 5874 // %cmp = cmp uge i32 %a, %b 5875 // br i1 %cmp, label %select.true, label %select.false 5876 // select.true: 5877 // br label %select.end 5878 // select.false: 5879 // br label %select.end 5880 // select.end: 5881 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5882 // 5883 // In addition, we may sink instructions that produce %c or %d from 5884 // the entry block into the destination(s) of the new branch. 5885 // If the true or false blocks do not contain a sunken instruction, that 5886 // block and its branch may be optimized away. In that case, one side of the 5887 // first branch will point directly to select.end, and the corresponding PHI 5888 // predecessor block will be the start block. 5889 5890 // First, we split the block containing the select into 2 blocks. 5891 BasicBlock *StartBlock = SI->getParent(); 5892 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5893 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5894 5895 // Delete the unconditional branch that was just created by the split. 5896 StartBlock->getTerminator()->eraseFromParent(); 5897 5898 // These are the new basic blocks for the conditional branch. 5899 // At least one will become an actual new basic block. 5900 BasicBlock *TrueBlock = nullptr; 5901 BasicBlock *FalseBlock = nullptr; 5902 BranchInst *TrueBranch = nullptr; 5903 BranchInst *FalseBranch = nullptr; 5904 5905 // Sink expensive instructions into the conditional blocks to avoid executing 5906 // them speculatively. 5907 for (SelectInst *SI : ASI) { 5908 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5909 if (TrueBlock == nullptr) { 5910 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5911 EndBlock->getParent(), EndBlock); 5912 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5913 TrueBranch->setDebugLoc(SI->getDebugLoc()); 5914 } 5915 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5916 TrueInst->moveBefore(TrueBranch); 5917 } 5918 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5919 if (FalseBlock == nullptr) { 5920 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5921 EndBlock->getParent(), EndBlock); 5922 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5923 FalseBranch->setDebugLoc(SI->getDebugLoc()); 5924 } 5925 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5926 FalseInst->moveBefore(FalseBranch); 5927 } 5928 } 5929 5930 // If there was nothing to sink, then arbitrarily choose the 'false' side 5931 // for a new input value to the PHI. 5932 if (TrueBlock == FalseBlock) { 5933 assert(TrueBlock == nullptr && 5934 "Unexpected basic block transform while optimizing select"); 5935 5936 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5937 EndBlock->getParent(), EndBlock); 5938 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5939 FalseBranch->setDebugLoc(SI->getDebugLoc()); 5940 } 5941 5942 // Insert the real conditional branch based on the original condition. 5943 // If we did not create a new block for one of the 'true' or 'false' paths 5944 // of the condition, it means that side of the branch goes to the end block 5945 // directly and the path originates from the start block from the point of 5946 // view of the new PHI. 5947 BasicBlock *TT, *FT; 5948 if (TrueBlock == nullptr) { 5949 TT = EndBlock; 5950 FT = FalseBlock; 5951 TrueBlock = StartBlock; 5952 } else if (FalseBlock == nullptr) { 5953 TT = TrueBlock; 5954 FT = EndBlock; 5955 FalseBlock = StartBlock; 5956 } else { 5957 TT = TrueBlock; 5958 FT = FalseBlock; 5959 } 5960 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 5961 5962 SmallPtrSet<const Instruction *, 2> INS; 5963 INS.insert(ASI.begin(), ASI.end()); 5964 // Use reverse iterator because later select may use the value of the 5965 // earlier select, and we need to propagate value through earlier select 5966 // to get the PHI operand. 5967 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 5968 SelectInst *SI = *It; 5969 // The select itself is replaced with a PHI Node. 5970 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 5971 PN->takeName(SI); 5972 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 5973 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 5974 PN->setDebugLoc(SI->getDebugLoc()); 5975 5976 SI->replaceAllUsesWith(PN); 5977 SI->eraseFromParent(); 5978 INS.erase(SI); 5979 ++NumSelectsExpanded; 5980 } 5981 5982 // Instruct OptimizeBlock to skip to the next block. 5983 CurInstIterator = StartBlock->end(); 5984 return true; 5985 } 5986 5987 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 5988 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 5989 int SplatElem = -1; 5990 for (unsigned i = 0; i < Mask.size(); ++i) { 5991 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 5992 return false; 5993 SplatElem = Mask[i]; 5994 } 5995 5996 return true; 5997 } 5998 5999 /// Some targets have expensive vector shifts if the lanes aren't all the same 6000 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 6001 /// it's often worth sinking a shufflevector splat down to its use so that 6002 /// codegen can spot all lanes are identical. 6003 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6004 BasicBlock *DefBB = SVI->getParent(); 6005 6006 // Only do this xform if variable vector shifts are particularly expensive. 6007 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 6008 return false; 6009 6010 // We only expect better codegen by sinking a shuffle if we can recognise a 6011 // constant splat. 6012 if (!isBroadcastShuffle(SVI)) 6013 return false; 6014 6015 // InsertedShuffles - Only insert a shuffle in each block once. 6016 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 6017 6018 bool MadeChange = false; 6019 for (User *U : SVI->users()) { 6020 Instruction *UI = cast<Instruction>(U); 6021 6022 // Figure out which BB this ext is used in. 6023 BasicBlock *UserBB = UI->getParent(); 6024 if (UserBB == DefBB) continue; 6025 6026 // For now only apply this when the splat is used by a shift instruction. 6027 if (!UI->isShift()) continue; 6028 6029 // Everything checks out, sink the shuffle if the user's block doesn't 6030 // already have a copy. 6031 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 6032 6033 if (!InsertedShuffle) { 6034 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6035 assert(InsertPt != UserBB->end()); 6036 InsertedShuffle = 6037 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 6038 SVI->getOperand(2), "", &*InsertPt); 6039 } 6040 6041 UI->replaceUsesOfWith(SVI, InsertedShuffle); 6042 MadeChange = true; 6043 } 6044 6045 // If we removed all uses, nuke the shuffle. 6046 if (SVI->use_empty()) { 6047 SVI->eraseFromParent(); 6048 MadeChange = true; 6049 } 6050 6051 return MadeChange; 6052 } 6053 6054 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6055 // If the operands of I can be folded into a target instruction together with 6056 // I, duplicate and sink them. 6057 SmallVector<Use *, 4> OpsToSink; 6058 if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink)) 6059 return false; 6060 6061 // OpsToSink can contain multiple uses in a use chain (e.g. 6062 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6063 // uses must come first, which means they are sunk first, temporarily creating 6064 // invalid IR. This will be fixed once their dominated users are sunk and 6065 // updated. 6066 BasicBlock *TargetBB = I->getParent(); 6067 bool Changed = false; 6068 SmallVector<Use *, 4> ToReplace; 6069 for (Use *U : OpsToSink) { 6070 auto *UI = cast<Instruction>(U->get()); 6071 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6072 continue; 6073 ToReplace.push_back(U); 6074 } 6075 6076 SmallPtrSet<Instruction *, 4> MaybeDead; 6077 for (Use *U : ToReplace) { 6078 auto *UI = cast<Instruction>(U->get()); 6079 Instruction *NI = UI->clone(); 6080 MaybeDead.insert(UI); 6081 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6082 NI->insertBefore(I); 6083 InsertedInsts.insert(NI); 6084 U->set(NI); 6085 Changed = true; 6086 } 6087 6088 // Remove instructions that are dead after sinking. 6089 for (auto *I : MaybeDead) 6090 if (!I->hasNUsesOrMore(1)) 6091 I->eraseFromParent(); 6092 6093 return Changed; 6094 } 6095 6096 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6097 if (!TLI || !DL) 6098 return false; 6099 6100 Value *Cond = SI->getCondition(); 6101 Type *OldType = Cond->getType(); 6102 LLVMContext &Context = Cond->getContext(); 6103 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6104 unsigned RegWidth = RegType.getSizeInBits(); 6105 6106 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6107 return false; 6108 6109 // If the register width is greater than the type width, expand the condition 6110 // of the switch instruction and each case constant to the width of the 6111 // register. By widening the type of the switch condition, subsequent 6112 // comparisons (for case comparisons) will not need to be extended to the 6113 // preferred register width, so we will potentially eliminate N-1 extends, 6114 // where N is the number of cases in the switch. 6115 auto *NewType = Type::getIntNTy(Context, RegWidth); 6116 6117 // Zero-extend the switch condition and case constants unless the switch 6118 // condition is a function argument that is already being sign-extended. 6119 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6120 // everything instead. 6121 Instruction::CastOps ExtType = Instruction::ZExt; 6122 if (auto *Arg = dyn_cast<Argument>(Cond)) 6123 if (Arg->hasSExtAttr()) 6124 ExtType = Instruction::SExt; 6125 6126 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6127 ExtInst->insertBefore(SI); 6128 ExtInst->setDebugLoc(SI->getDebugLoc()); 6129 SI->setCondition(ExtInst); 6130 for (auto Case : SI->cases()) { 6131 APInt NarrowConst = Case.getCaseValue()->getValue(); 6132 APInt WideConst = (ExtType == Instruction::ZExt) ? 6133 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6134 Case.setValue(ConstantInt::get(Context, WideConst)); 6135 } 6136 6137 return true; 6138 } 6139 6140 6141 namespace { 6142 6143 /// Helper class to promote a scalar operation to a vector one. 6144 /// This class is used to move downward extractelement transition. 6145 /// E.g., 6146 /// a = vector_op <2 x i32> 6147 /// b = extractelement <2 x i32> a, i32 0 6148 /// c = scalar_op b 6149 /// store c 6150 /// 6151 /// => 6152 /// a = vector_op <2 x i32> 6153 /// c = vector_op a (equivalent to scalar_op on the related lane) 6154 /// * d = extractelement <2 x i32> c, i32 0 6155 /// * store d 6156 /// Assuming both extractelement and store can be combine, we get rid of the 6157 /// transition. 6158 class VectorPromoteHelper { 6159 /// DataLayout associated with the current module. 6160 const DataLayout &DL; 6161 6162 /// Used to perform some checks on the legality of vector operations. 6163 const TargetLowering &TLI; 6164 6165 /// Used to estimated the cost of the promoted chain. 6166 const TargetTransformInfo &TTI; 6167 6168 /// The transition being moved downwards. 6169 Instruction *Transition; 6170 6171 /// The sequence of instructions to be promoted. 6172 SmallVector<Instruction *, 4> InstsToBePromoted; 6173 6174 /// Cost of combining a store and an extract. 6175 unsigned StoreExtractCombineCost; 6176 6177 /// Instruction that will be combined with the transition. 6178 Instruction *CombineInst = nullptr; 6179 6180 /// The instruction that represents the current end of the transition. 6181 /// Since we are faking the promotion until we reach the end of the chain 6182 /// of computation, we need a way to get the current end of the transition. 6183 Instruction *getEndOfTransition() const { 6184 if (InstsToBePromoted.empty()) 6185 return Transition; 6186 return InstsToBePromoted.back(); 6187 } 6188 6189 /// Return the index of the original value in the transition. 6190 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 6191 /// c, is at index 0. 6192 unsigned getTransitionOriginalValueIdx() const { 6193 assert(isa<ExtractElementInst>(Transition) && 6194 "Other kind of transitions are not supported yet"); 6195 return 0; 6196 } 6197 6198 /// Return the index of the index in the transition. 6199 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 6200 /// is at index 1. 6201 unsigned getTransitionIdx() const { 6202 assert(isa<ExtractElementInst>(Transition) && 6203 "Other kind of transitions are not supported yet"); 6204 return 1; 6205 } 6206 6207 /// Get the type of the transition. 6208 /// This is the type of the original value. 6209 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 6210 /// transition is <2 x i32>. 6211 Type *getTransitionType() const { 6212 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 6213 } 6214 6215 /// Promote \p ToBePromoted by moving \p Def downward through. 6216 /// I.e., we have the following sequence: 6217 /// Def = Transition <ty1> a to <ty2> 6218 /// b = ToBePromoted <ty2> Def, ... 6219 /// => 6220 /// b = ToBePromoted <ty1> a, ... 6221 /// Def = Transition <ty1> ToBePromoted to <ty2> 6222 void promoteImpl(Instruction *ToBePromoted); 6223 6224 /// Check whether or not it is profitable to promote all the 6225 /// instructions enqueued to be promoted. 6226 bool isProfitableToPromote() { 6227 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 6228 unsigned Index = isa<ConstantInt>(ValIdx) 6229 ? cast<ConstantInt>(ValIdx)->getZExtValue() 6230 : -1; 6231 Type *PromotedType = getTransitionType(); 6232 6233 StoreInst *ST = cast<StoreInst>(CombineInst); 6234 unsigned AS = ST->getPointerAddressSpace(); 6235 unsigned Align = ST->getAlignment(); 6236 // Check if this store is supported. 6237 if (!TLI.allowsMisalignedMemoryAccesses( 6238 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 6239 Align)) { 6240 // If this is not supported, there is no way we can combine 6241 // the extract with the store. 6242 return false; 6243 } 6244 6245 // The scalar chain of computation has to pay for the transition 6246 // scalar to vector. 6247 // The vector chain has to account for the combining cost. 6248 uint64_t ScalarCost = 6249 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 6250 uint64_t VectorCost = StoreExtractCombineCost; 6251 for (const auto &Inst : InstsToBePromoted) { 6252 // Compute the cost. 6253 // By construction, all instructions being promoted are arithmetic ones. 6254 // Moreover, one argument is a constant that can be viewed as a splat 6255 // constant. 6256 Value *Arg0 = Inst->getOperand(0); 6257 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 6258 isa<ConstantFP>(Arg0); 6259 TargetTransformInfo::OperandValueKind Arg0OVK = 6260 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6261 : TargetTransformInfo::OK_AnyValue; 6262 TargetTransformInfo::OperandValueKind Arg1OVK = 6263 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6264 : TargetTransformInfo::OK_AnyValue; 6265 ScalarCost += TTI.getArithmeticInstrCost( 6266 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 6267 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6268 Arg0OVK, Arg1OVK); 6269 } 6270 LLVM_DEBUG( 6271 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6272 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6273 return ScalarCost > VectorCost; 6274 } 6275 6276 /// Generate a constant vector with \p Val with the same 6277 /// number of elements as the transition. 6278 /// \p UseSplat defines whether or not \p Val should be replicated 6279 /// across the whole vector. 6280 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6281 /// otherwise we generate a vector with as many undef as possible: 6282 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6283 /// used at the index of the extract. 6284 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6285 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6286 if (!UseSplat) { 6287 // If we cannot determine where the constant must be, we have to 6288 // use a splat constant. 6289 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6290 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6291 ExtractIdx = CstVal->getSExtValue(); 6292 else 6293 UseSplat = true; 6294 } 6295 6296 unsigned End = getTransitionType()->getVectorNumElements(); 6297 if (UseSplat) 6298 return ConstantVector::getSplat(End, Val); 6299 6300 SmallVector<Constant *, 4> ConstVec; 6301 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6302 for (unsigned Idx = 0; Idx != End; ++Idx) { 6303 if (Idx == ExtractIdx) 6304 ConstVec.push_back(Val); 6305 else 6306 ConstVec.push_back(UndefVal); 6307 } 6308 return ConstantVector::get(ConstVec); 6309 } 6310 6311 /// Check if promoting to a vector type an operand at \p OperandIdx 6312 /// in \p Use can trigger undefined behavior. 6313 static bool canCauseUndefinedBehavior(const Instruction *Use, 6314 unsigned OperandIdx) { 6315 // This is not safe to introduce undef when the operand is on 6316 // the right hand side of a division-like instruction. 6317 if (OperandIdx != 1) 6318 return false; 6319 switch (Use->getOpcode()) { 6320 default: 6321 return false; 6322 case Instruction::SDiv: 6323 case Instruction::UDiv: 6324 case Instruction::SRem: 6325 case Instruction::URem: 6326 return true; 6327 case Instruction::FDiv: 6328 case Instruction::FRem: 6329 return !Use->hasNoNaNs(); 6330 } 6331 llvm_unreachable(nullptr); 6332 } 6333 6334 public: 6335 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 6336 const TargetTransformInfo &TTI, Instruction *Transition, 6337 unsigned CombineCost) 6338 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 6339 StoreExtractCombineCost(CombineCost) { 6340 assert(Transition && "Do not know how to promote null"); 6341 } 6342 6343 /// Check if we can promote \p ToBePromoted to \p Type. 6344 bool canPromote(const Instruction *ToBePromoted) const { 6345 // We could support CastInst too. 6346 return isa<BinaryOperator>(ToBePromoted); 6347 } 6348 6349 /// Check if it is profitable to promote \p ToBePromoted 6350 /// by moving downward the transition through. 6351 bool shouldPromote(const Instruction *ToBePromoted) const { 6352 // Promote only if all the operands can be statically expanded. 6353 // Indeed, we do not want to introduce any new kind of transitions. 6354 for (const Use &U : ToBePromoted->operands()) { 6355 const Value *Val = U.get(); 6356 if (Val == getEndOfTransition()) { 6357 // If the use is a division and the transition is on the rhs, 6358 // we cannot promote the operation, otherwise we may create a 6359 // division by zero. 6360 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 6361 return false; 6362 continue; 6363 } 6364 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 6365 !isa<ConstantFP>(Val)) 6366 return false; 6367 } 6368 // Check that the resulting operation is legal. 6369 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 6370 if (!ISDOpcode) 6371 return false; 6372 return StressStoreExtract || 6373 TLI.isOperationLegalOrCustom( 6374 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 6375 } 6376 6377 /// Check whether or not \p Use can be combined 6378 /// with the transition. 6379 /// I.e., is it possible to do Use(Transition) => AnotherUse? 6380 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 6381 6382 /// Record \p ToBePromoted as part of the chain to be promoted. 6383 void enqueueForPromotion(Instruction *ToBePromoted) { 6384 InstsToBePromoted.push_back(ToBePromoted); 6385 } 6386 6387 /// Set the instruction that will be combined with the transition. 6388 void recordCombineInstruction(Instruction *ToBeCombined) { 6389 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 6390 CombineInst = ToBeCombined; 6391 } 6392 6393 /// Promote all the instructions enqueued for promotion if it is 6394 /// is profitable. 6395 /// \return True if the promotion happened, false otherwise. 6396 bool promote() { 6397 // Check if there is something to promote. 6398 // Right now, if we do not have anything to combine with, 6399 // we assume the promotion is not profitable. 6400 if (InstsToBePromoted.empty() || !CombineInst) 6401 return false; 6402 6403 // Check cost. 6404 if (!StressStoreExtract && !isProfitableToPromote()) 6405 return false; 6406 6407 // Promote. 6408 for (auto &ToBePromoted : InstsToBePromoted) 6409 promoteImpl(ToBePromoted); 6410 InstsToBePromoted.clear(); 6411 return true; 6412 } 6413 }; 6414 6415 } // end anonymous namespace 6416 6417 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 6418 // At this point, we know that all the operands of ToBePromoted but Def 6419 // can be statically promoted. 6420 // For Def, we need to use its parameter in ToBePromoted: 6421 // b = ToBePromoted ty1 a 6422 // Def = Transition ty1 b to ty2 6423 // Move the transition down. 6424 // 1. Replace all uses of the promoted operation by the transition. 6425 // = ... b => = ... Def. 6426 assert(ToBePromoted->getType() == Transition->getType() && 6427 "The type of the result of the transition does not match " 6428 "the final type"); 6429 ToBePromoted->replaceAllUsesWith(Transition); 6430 // 2. Update the type of the uses. 6431 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 6432 Type *TransitionTy = getTransitionType(); 6433 ToBePromoted->mutateType(TransitionTy); 6434 // 3. Update all the operands of the promoted operation with promoted 6435 // operands. 6436 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 6437 for (Use &U : ToBePromoted->operands()) { 6438 Value *Val = U.get(); 6439 Value *NewVal = nullptr; 6440 if (Val == Transition) 6441 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 6442 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 6443 isa<ConstantFP>(Val)) { 6444 // Use a splat constant if it is not safe to use undef. 6445 NewVal = getConstantVector( 6446 cast<Constant>(Val), 6447 isa<UndefValue>(Val) || 6448 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 6449 } else 6450 llvm_unreachable("Did you modified shouldPromote and forgot to update " 6451 "this?"); 6452 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 6453 } 6454 Transition->moveAfter(ToBePromoted); 6455 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 6456 } 6457 6458 /// Some targets can do store(extractelement) with one instruction. 6459 /// Try to push the extractelement towards the stores when the target 6460 /// has this feature and this is profitable. 6461 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 6462 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 6463 if (DisableStoreExtract || !TLI || 6464 (!StressStoreExtract && 6465 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 6466 Inst->getOperand(1), CombineCost))) 6467 return false; 6468 6469 // At this point we know that Inst is a vector to scalar transition. 6470 // Try to move it down the def-use chain, until: 6471 // - We can combine the transition with its single use 6472 // => we got rid of the transition. 6473 // - We escape the current basic block 6474 // => we would need to check that we are moving it at a cheaper place and 6475 // we do not do that for now. 6476 BasicBlock *Parent = Inst->getParent(); 6477 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 6478 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 6479 // If the transition has more than one use, assume this is not going to be 6480 // beneficial. 6481 while (Inst->hasOneUse()) { 6482 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 6483 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 6484 6485 if (ToBePromoted->getParent() != Parent) { 6486 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 6487 << ToBePromoted->getParent()->getName() 6488 << ") than the transition (" << Parent->getName() 6489 << ").\n"); 6490 return false; 6491 } 6492 6493 if (VPH.canCombine(ToBePromoted)) { 6494 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 6495 << "will be combined with: " << *ToBePromoted << '\n'); 6496 VPH.recordCombineInstruction(ToBePromoted); 6497 bool Changed = VPH.promote(); 6498 NumStoreExtractExposed += Changed; 6499 return Changed; 6500 } 6501 6502 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 6503 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 6504 return false; 6505 6506 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 6507 6508 VPH.enqueueForPromotion(ToBePromoted); 6509 Inst = ToBePromoted; 6510 } 6511 return false; 6512 } 6513 6514 /// For the instruction sequence of store below, F and I values 6515 /// are bundled together as an i64 value before being stored into memory. 6516 /// Sometimes it is more efficient to generate separate stores for F and I, 6517 /// which can remove the bitwise instructions or sink them to colder places. 6518 /// 6519 /// (store (or (zext (bitcast F to i32) to i64), 6520 /// (shl (zext I to i64), 32)), addr) --> 6521 /// (store F, addr) and (store I, addr+4) 6522 /// 6523 /// Similarly, splitting for other merged store can also be beneficial, like: 6524 /// For pair of {i32, i32}, i64 store --> two i32 stores. 6525 /// For pair of {i32, i16}, i64 store --> two i32 stores. 6526 /// For pair of {i16, i16}, i32 store --> two i16 stores. 6527 /// For pair of {i16, i8}, i32 store --> two i16 stores. 6528 /// For pair of {i8, i8}, i16 store --> two i8 stores. 6529 /// 6530 /// We allow each target to determine specifically which kind of splitting is 6531 /// supported. 6532 /// 6533 /// The store patterns are commonly seen from the simple code snippet below 6534 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 6535 /// void goo(const std::pair<int, float> &); 6536 /// hoo() { 6537 /// ... 6538 /// goo(std::make_pair(tmp, ftmp)); 6539 /// ... 6540 /// } 6541 /// 6542 /// Although we already have similar splitting in DAG Combine, we duplicate 6543 /// it in CodeGenPrepare to catch the case in which pattern is across 6544 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 6545 /// during code expansion. 6546 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 6547 const TargetLowering &TLI) { 6548 // Handle simple but common cases only. 6549 Type *StoreType = SI.getValueOperand()->getType(); 6550 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 6551 DL.getTypeSizeInBits(StoreType) == 0) 6552 return false; 6553 6554 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 6555 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 6556 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 6557 DL.getTypeSizeInBits(SplitStoreType)) 6558 return false; 6559 6560 // Match the following patterns: 6561 // (store (or (zext LValue to i64), 6562 // (shl (zext HValue to i64), 32)), HalfValBitSize) 6563 // or 6564 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 6565 // (zext LValue to i64), 6566 // Expect both operands of OR and the first operand of SHL have only 6567 // one use. 6568 Value *LValue, *HValue; 6569 if (!match(SI.getValueOperand(), 6570 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 6571 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 6572 m_SpecificInt(HalfValBitSize)))))) 6573 return false; 6574 6575 // Check LValue and HValue are int with size less or equal than 32. 6576 if (!LValue->getType()->isIntegerTy() || 6577 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 6578 !HValue->getType()->isIntegerTy() || 6579 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 6580 return false; 6581 6582 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 6583 // as the input of target query. 6584 auto *LBC = dyn_cast<BitCastInst>(LValue); 6585 auto *HBC = dyn_cast<BitCastInst>(HValue); 6586 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 6587 : EVT::getEVT(LValue->getType()); 6588 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 6589 : EVT::getEVT(HValue->getType()); 6590 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 6591 return false; 6592 6593 // Start to split store. 6594 IRBuilder<> Builder(SI.getContext()); 6595 Builder.SetInsertPoint(&SI); 6596 6597 // If LValue/HValue is a bitcast in another BB, create a new one in current 6598 // BB so it may be merged with the splitted stores by dag combiner. 6599 if (LBC && LBC->getParent() != SI.getParent()) 6600 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 6601 if (HBC && HBC->getParent() != SI.getParent()) 6602 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 6603 6604 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 6605 auto CreateSplitStore = [&](Value *V, bool Upper) { 6606 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 6607 Value *Addr = Builder.CreateBitCast( 6608 SI.getOperand(1), 6609 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 6610 if ((IsLE && Upper) || (!IsLE && !Upper)) 6611 Addr = Builder.CreateGEP( 6612 SplitStoreType, Addr, 6613 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 6614 Builder.CreateAlignedStore( 6615 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6616 }; 6617 6618 CreateSplitStore(LValue, false); 6619 CreateSplitStore(HValue, true); 6620 6621 // Delete the old store. 6622 SI.eraseFromParent(); 6623 return true; 6624 } 6625 6626 // Return true if the GEP has two operands, the first operand is of a sequential 6627 // type, and the second operand is a constant. 6628 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6629 gep_type_iterator I = gep_type_begin(*GEP); 6630 return GEP->getNumOperands() == 2 && 6631 I.isSequential() && 6632 isa<ConstantInt>(GEP->getOperand(1)); 6633 } 6634 6635 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6636 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6637 // reducing liveness interference across those edges benefits global register 6638 // allocation. Currently handles only certain cases. 6639 // 6640 // For example, unmerge %GEPI and %UGEPI as below. 6641 // 6642 // ---------- BEFORE ---------- 6643 // SrcBlock: 6644 // ... 6645 // %GEPIOp = ... 6646 // ... 6647 // %GEPI = gep %GEPIOp, Idx 6648 // ... 6649 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6650 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6651 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6652 // %UGEPI) 6653 // 6654 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6655 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6656 // ... 6657 // 6658 // DstBi: 6659 // ... 6660 // %UGEPI = gep %GEPIOp, UIdx 6661 // ... 6662 // --------------------------- 6663 // 6664 // ---------- AFTER ---------- 6665 // SrcBlock: 6666 // ... (same as above) 6667 // (* %GEPI is still alive on the indirectbr edges) 6668 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6669 // unmerging) 6670 // ... 6671 // 6672 // DstBi: 6673 // ... 6674 // %UGEPI = gep %GEPI, (UIdx-Idx) 6675 // ... 6676 // --------------------------- 6677 // 6678 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6679 // no longer alive on them. 6680 // 6681 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6682 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6683 // not to disable further simplications and optimizations as a result of GEP 6684 // merging. 6685 // 6686 // Note this unmerging may increase the length of the data flow critical path 6687 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6688 // between the register pressure and the length of data-flow critical 6689 // path. Restricting this to the uncommon IndirectBr case would minimize the 6690 // impact of potentially longer critical path, if any, and the impact on compile 6691 // time. 6692 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6693 const TargetTransformInfo *TTI) { 6694 BasicBlock *SrcBlock = GEPI->getParent(); 6695 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6696 // (non-IndirectBr) cases exit early here. 6697 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6698 return false; 6699 // Check that GEPI is a simple gep with a single constant index. 6700 if (!GEPSequentialConstIndexed(GEPI)) 6701 return false; 6702 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6703 // Check that GEPI is a cheap one. 6704 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6705 > TargetTransformInfo::TCC_Basic) 6706 return false; 6707 Value *GEPIOp = GEPI->getOperand(0); 6708 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6709 if (!isa<Instruction>(GEPIOp)) 6710 return false; 6711 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6712 if (GEPIOpI->getParent() != SrcBlock) 6713 return false; 6714 // Check that GEP is used outside the block, meaning it's alive on the 6715 // IndirectBr edge(s). 6716 if (find_if(GEPI->users(), [&](User *Usr) { 6717 if (auto *I = dyn_cast<Instruction>(Usr)) { 6718 if (I->getParent() != SrcBlock) { 6719 return true; 6720 } 6721 } 6722 return false; 6723 }) == GEPI->users().end()) 6724 return false; 6725 // The second elements of the GEP chains to be unmerged. 6726 std::vector<GetElementPtrInst *> UGEPIs; 6727 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6728 // on IndirectBr edges. 6729 for (User *Usr : GEPIOp->users()) { 6730 if (Usr == GEPI) continue; 6731 // Check if Usr is an Instruction. If not, give up. 6732 if (!isa<Instruction>(Usr)) 6733 return false; 6734 auto *UI = cast<Instruction>(Usr); 6735 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6736 if (UI->getParent() == SrcBlock) 6737 continue; 6738 // Check if Usr is a GEP. If not, give up. 6739 if (!isa<GetElementPtrInst>(Usr)) 6740 return false; 6741 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6742 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6743 // the pointer operand to it. If so, record it in the vector. If not, give 6744 // up. 6745 if (!GEPSequentialConstIndexed(UGEPI)) 6746 return false; 6747 if (UGEPI->getOperand(0) != GEPIOp) 6748 return false; 6749 if (GEPIIdx->getType() != 6750 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6751 return false; 6752 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6753 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6754 > TargetTransformInfo::TCC_Basic) 6755 return false; 6756 UGEPIs.push_back(UGEPI); 6757 } 6758 if (UGEPIs.size() == 0) 6759 return false; 6760 // Check the materializing cost of (Uidx-Idx). 6761 for (GetElementPtrInst *UGEPI : UGEPIs) { 6762 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6763 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6764 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6765 if (ImmCost > TargetTransformInfo::TCC_Basic) 6766 return false; 6767 } 6768 // Now unmerge between GEPI and UGEPIs. 6769 for (GetElementPtrInst *UGEPI : UGEPIs) { 6770 UGEPI->setOperand(0, GEPI); 6771 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6772 Constant *NewUGEPIIdx = 6773 ConstantInt::get(GEPIIdx->getType(), 6774 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6775 UGEPI->setOperand(1, NewUGEPIIdx); 6776 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6777 // inbounds to avoid UB. 6778 if (!GEPI->isInBounds()) { 6779 UGEPI->setIsInBounds(false); 6780 } 6781 } 6782 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6783 // alive on IndirectBr edges). 6784 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6785 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6786 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6787 return true; 6788 } 6789 6790 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 6791 // Bail out if we inserted the instruction to prevent optimizations from 6792 // stepping on each other's toes. 6793 if (InsertedInsts.count(I)) 6794 return false; 6795 6796 if (PHINode *P = dyn_cast<PHINode>(I)) { 6797 // It is possible for very late stage optimizations (such as SimplifyCFG) 6798 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6799 // trivial PHI, go ahead and zap it here. 6800 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6801 P->replaceAllUsesWith(V); 6802 P->eraseFromParent(); 6803 ++NumPHIsElim; 6804 return true; 6805 } 6806 return false; 6807 } 6808 6809 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6810 // If the source of the cast is a constant, then this should have 6811 // already been constant folded. The only reason NOT to constant fold 6812 // it is if something (e.g. LSR) was careful to place the constant 6813 // evaluation in a block other than then one that uses it (e.g. to hoist 6814 // the address of globals out of a loop). If this is the case, we don't 6815 // want to forward-subst the cast. 6816 if (isa<Constant>(CI->getOperand(0))) 6817 return false; 6818 6819 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6820 return true; 6821 6822 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6823 /// Sink a zext or sext into its user blocks if the target type doesn't 6824 /// fit in one register 6825 if (TLI && 6826 TLI->getTypeAction(CI->getContext(), 6827 TLI->getValueType(*DL, CI->getType())) == 6828 TargetLowering::TypeExpandInteger) { 6829 return SinkCast(CI); 6830 } else { 6831 bool MadeChange = optimizeExt(I); 6832 return MadeChange | optimizeExtUses(I); 6833 } 6834 } 6835 return false; 6836 } 6837 6838 if (auto *Cmp = dyn_cast<CmpInst>(I)) 6839 if (TLI && optimizeCmp(Cmp, *TLI, *DL, ModifiedDT)) 6840 return true; 6841 6842 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6843 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6844 if (TLI) { 6845 bool Modified = optimizeLoadExt(LI); 6846 unsigned AS = LI->getPointerAddressSpace(); 6847 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6848 return Modified; 6849 } 6850 return false; 6851 } 6852 6853 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6854 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6855 return true; 6856 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6857 if (TLI) { 6858 unsigned AS = SI->getPointerAddressSpace(); 6859 return optimizeMemoryInst(I, SI->getOperand(1), 6860 SI->getOperand(0)->getType(), AS); 6861 } 6862 return false; 6863 } 6864 6865 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6866 unsigned AS = RMW->getPointerAddressSpace(); 6867 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6868 RMW->getType(), AS); 6869 } 6870 6871 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6872 unsigned AS = CmpX->getPointerAddressSpace(); 6873 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6874 CmpX->getCompareOperand()->getType(), AS); 6875 } 6876 6877 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6878 6879 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6880 EnableAndCmpSinking && TLI) 6881 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6882 6883 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6884 BinOp->getOpcode() == Instruction::LShr)) { 6885 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6886 if (TLI && CI && TLI->hasExtractBitsInsn()) 6887 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6888 6889 return false; 6890 } 6891 6892 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6893 if (GEPI->hasAllZeroIndices()) { 6894 /// The GEP operand must be a pointer, so must its result -> BitCast 6895 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6896 GEPI->getName(), GEPI); 6897 NC->setDebugLoc(GEPI->getDebugLoc()); 6898 GEPI->replaceAllUsesWith(NC); 6899 GEPI->eraseFromParent(); 6900 ++NumGEPsElim; 6901 optimizeInst(NC, ModifiedDT); 6902 return true; 6903 } 6904 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6905 return true; 6906 } 6907 return false; 6908 } 6909 6910 if (tryToSinkFreeOperands(I)) 6911 return true; 6912 6913 if (CallInst *CI = dyn_cast<CallInst>(I)) 6914 return optimizeCallInst(CI, ModifiedDT); 6915 6916 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6917 return optimizeSelectInst(SI); 6918 6919 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6920 return optimizeShuffleVectorInst(SVI); 6921 6922 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6923 return optimizeSwitchInst(Switch); 6924 6925 if (isa<ExtractElementInst>(I)) 6926 return optimizeExtractElementInst(I); 6927 6928 return false; 6929 } 6930 6931 /// Given an OR instruction, check to see if this is a bitreverse 6932 /// idiom. If so, insert the new intrinsic and return true. 6933 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6934 const TargetLowering &TLI) { 6935 if (!I.getType()->isIntegerTy() || 6936 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6937 TLI.getValueType(DL, I.getType(), true))) 6938 return false; 6939 6940 SmallVector<Instruction*, 4> Insts; 6941 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6942 return false; 6943 Instruction *LastInst = Insts.back(); 6944 I.replaceAllUsesWith(LastInst); 6945 RecursivelyDeleteTriviallyDeadInstructions(&I); 6946 return true; 6947 } 6948 6949 // In this pass we look for GEP and cast instructions that are used 6950 // across basic blocks and rewrite them to improve basic-block-at-a-time 6951 // selection. 6952 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 6953 SunkAddrs.clear(); 6954 bool MadeChange = false; 6955 6956 CurInstIterator = BB.begin(); 6957 while (CurInstIterator != BB.end()) { 6958 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 6959 if (ModifiedDT) 6960 return true; 6961 } 6962 6963 bool MadeBitReverse = true; 6964 while (TLI && MadeBitReverse) { 6965 MadeBitReverse = false; 6966 for (auto &I : reverse(BB)) { 6967 if (makeBitReverse(I, *DL, *TLI)) { 6968 MadeBitReverse = MadeChange = true; 6969 ModifiedDT = true; 6970 break; 6971 } 6972 } 6973 } 6974 MadeChange |= dupRetToEnableTailCallOpts(&BB); 6975 6976 return MadeChange; 6977 } 6978 6979 // llvm.dbg.value is far away from the value then iSel may not be able 6980 // handle it properly. iSel will drop llvm.dbg.value if it can not 6981 // find a node corresponding to the value. 6982 bool CodeGenPrepare::placeDbgValues(Function &F) { 6983 bool MadeChange = false; 6984 for (BasicBlock &BB : F) { 6985 Instruction *PrevNonDbgInst = nullptr; 6986 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 6987 Instruction *Insn = &*BI++; 6988 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 6989 // Leave dbg.values that refer to an alloca alone. These 6990 // intrinsics describe the address of a variable (= the alloca) 6991 // being taken. They should not be moved next to the alloca 6992 // (and to the beginning of the scope), but rather stay close to 6993 // where said address is used. 6994 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 6995 PrevNonDbgInst = Insn; 6996 continue; 6997 } 6998 6999 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7000 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 7001 // If VI is a phi in a block with an EHPad terminator, we can't insert 7002 // after it. 7003 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7004 continue; 7005 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7006 << *DVI << ' ' << *VI); 7007 DVI->removeFromParent(); 7008 if (isa<PHINode>(VI)) 7009 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7010 else 7011 DVI->insertAfter(VI); 7012 MadeChange = true; 7013 ++NumDbgValueMoved; 7014 } 7015 } 7016 } 7017 return MadeChange; 7018 } 7019 7020 /// Scale down both weights to fit into uint32_t. 7021 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7022 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7023 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7024 NewTrue = NewTrue / Scale; 7025 NewFalse = NewFalse / Scale; 7026 } 7027 7028 /// Some targets prefer to split a conditional branch like: 7029 /// \code 7030 /// %0 = icmp ne i32 %a, 0 7031 /// %1 = icmp ne i32 %b, 0 7032 /// %or.cond = or i1 %0, %1 7033 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7034 /// \endcode 7035 /// into multiple branch instructions like: 7036 /// \code 7037 /// bb1: 7038 /// %0 = icmp ne i32 %a, 0 7039 /// br i1 %0, label %TrueBB, label %bb2 7040 /// bb2: 7041 /// %1 = icmp ne i32 %b, 0 7042 /// br i1 %1, label %TrueBB, label %FalseBB 7043 /// \endcode 7044 /// This usually allows instruction selection to do even further optimizations 7045 /// and combine the compare with the branch instruction. Currently this is 7046 /// applied for targets which have "cheap" jump instructions. 7047 /// 7048 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7049 /// 7050 bool CodeGenPrepare::splitBranchCondition(Function &F) { 7051 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 7052 return false; 7053 7054 bool MadeChange = false; 7055 for (auto &BB : F) { 7056 // Does this BB end with the following? 7057 // %cond1 = icmp|fcmp|binary instruction ... 7058 // %cond2 = icmp|fcmp|binary instruction ... 7059 // %cond.or = or|and i1 %cond1, cond2 7060 // br i1 %cond.or label %dest1, label %dest2" 7061 BinaryOperator *LogicOp; 7062 BasicBlock *TBB, *FBB; 7063 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 7064 continue; 7065 7066 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7067 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7068 continue; 7069 7070 unsigned Opc; 7071 Value *Cond1, *Cond2; 7072 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 7073 m_OneUse(m_Value(Cond2))))) 7074 Opc = Instruction::And; 7075 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 7076 m_OneUse(m_Value(Cond2))))) 7077 Opc = Instruction::Or; 7078 else 7079 continue; 7080 7081 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 7082 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 7083 continue; 7084 7085 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 7086 7087 // Create a new BB. 7088 auto TmpBB = 7089 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 7090 BB.getParent(), BB.getNextNode()); 7091 7092 // Update original basic block by using the first condition directly by the 7093 // branch instruction and removing the no longer needed and/or instruction. 7094 Br1->setCondition(Cond1); 7095 LogicOp->eraseFromParent(); 7096 7097 // Depending on the condition we have to either replace the true or the 7098 // false successor of the original branch instruction. 7099 if (Opc == Instruction::And) 7100 Br1->setSuccessor(0, TmpBB); 7101 else 7102 Br1->setSuccessor(1, TmpBB); 7103 7104 // Fill in the new basic block. 7105 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 7106 if (auto *I = dyn_cast<Instruction>(Cond2)) { 7107 I->removeFromParent(); 7108 I->insertBefore(Br2); 7109 } 7110 7111 // Update PHI nodes in both successors. The original BB needs to be 7112 // replaced in one successor's PHI nodes, because the branch comes now from 7113 // the newly generated BB (NewBB). In the other successor we need to add one 7114 // incoming edge to the PHI nodes, because both branch instructions target 7115 // now the same successor. Depending on the original branch condition 7116 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 7117 // we perform the correct update for the PHI nodes. 7118 // This doesn't change the successor order of the just created branch 7119 // instruction (or any other instruction). 7120 if (Opc == Instruction::Or) 7121 std::swap(TBB, FBB); 7122 7123 // Replace the old BB with the new BB. 7124 for (PHINode &PN : TBB->phis()) { 7125 int i; 7126 while ((i = PN.getBasicBlockIndex(&BB)) >= 0) 7127 PN.setIncomingBlock(i, TmpBB); 7128 } 7129 7130 // Add another incoming edge form the new BB. 7131 for (PHINode &PN : FBB->phis()) { 7132 auto *Val = PN.getIncomingValueForBlock(&BB); 7133 PN.addIncoming(Val, TmpBB); 7134 } 7135 7136 // Update the branch weights (from SelectionDAGBuilder:: 7137 // FindMergedConditions). 7138 if (Opc == Instruction::Or) { 7139 // Codegen X | Y as: 7140 // BB1: 7141 // jmp_if_X TBB 7142 // jmp TmpBB 7143 // TmpBB: 7144 // jmp_if_Y TBB 7145 // jmp FBB 7146 // 7147 7148 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 7149 // The requirement is that 7150 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 7151 // = TrueProb for original BB. 7152 // Assuming the original weights are A and B, one choice is to set BB1's 7153 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 7154 // assumes that 7155 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 7156 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 7157 // TmpBB, but the math is more complicated. 7158 uint64_t TrueWeight, FalseWeight; 7159 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7160 uint64_t NewTrueWeight = TrueWeight; 7161 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 7162 scaleWeights(NewTrueWeight, NewFalseWeight); 7163 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7164 .createBranchWeights(TrueWeight, FalseWeight)); 7165 7166 NewTrueWeight = TrueWeight; 7167 NewFalseWeight = 2 * FalseWeight; 7168 scaleWeights(NewTrueWeight, NewFalseWeight); 7169 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7170 .createBranchWeights(TrueWeight, FalseWeight)); 7171 } 7172 } else { 7173 // Codegen X & Y as: 7174 // BB1: 7175 // jmp_if_X TmpBB 7176 // jmp FBB 7177 // TmpBB: 7178 // jmp_if_Y TBB 7179 // jmp FBB 7180 // 7181 // This requires creation of TmpBB after CurBB. 7182 7183 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 7184 // The requirement is that 7185 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 7186 // = FalseProb for original BB. 7187 // Assuming the original weights are A and B, one choice is to set BB1's 7188 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 7189 // assumes that 7190 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 7191 uint64_t TrueWeight, FalseWeight; 7192 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7193 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 7194 uint64_t NewFalseWeight = FalseWeight; 7195 scaleWeights(NewTrueWeight, NewFalseWeight); 7196 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7197 .createBranchWeights(TrueWeight, FalseWeight)); 7198 7199 NewTrueWeight = 2 * TrueWeight; 7200 NewFalseWeight = FalseWeight; 7201 scaleWeights(NewTrueWeight, NewFalseWeight); 7202 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7203 .createBranchWeights(TrueWeight, FalseWeight)); 7204 } 7205 } 7206 7207 // Note: No point in getting fancy here, since the DT info is never 7208 // available to CodeGenPrepare. 7209 ModifiedDT = true; 7210 7211 MadeChange = true; 7212 7213 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 7214 TmpBB->dump()); 7215 } 7216 return MadeChange; 7217 } 7218