1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PointerIntPair.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/BranchProbabilityInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/CodeGen/Analysis.h" 35 #include "llvm/CodeGen/ISDOpcodes.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/Config/llvm-config.h" 42 #include "llvm/IR/Argument.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Statepoint.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/IR/ValueMap.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/BlockFrequency.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/MachineValueType.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetMachine.h" 86 #include "llvm/Target/TargetOptions.h" 87 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 88 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 89 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <limits> 95 #include <memory> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 102 #define DEBUG_TYPE "codegenprepare" 103 104 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 105 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 106 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 107 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 108 "sunken Cmps"); 109 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 110 "of sunken Casts"); 111 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 112 "computations were sunk"); 113 STATISTIC(NumMemoryInstsPhiCreated, 114 "Number of phis created when address " 115 "computations were sunk to memory instructions"); 116 STATISTIC(NumMemoryInstsSelectCreated, 117 "Number of select created when address " 118 "computations were sunk to memory instructions"); 119 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 120 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 121 STATISTIC(NumAndsAdded, 122 "Number of and mask instructions added to form ext loads"); 123 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 124 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 125 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 126 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 127 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 128 129 static cl::opt<bool> DisableBranchOpts( 130 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 131 cl::desc("Disable branch optimizations in CodeGenPrepare")); 132 133 static cl::opt<bool> 134 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable GC optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> DisableSelectToBranch( 138 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 139 cl::desc("Disable select to branch conversion.")); 140 141 static cl::opt<bool> AddrSinkUsingGEPs( 142 "addr-sink-using-gep", cl::Hidden, cl::init(true), 143 cl::desc("Address sinking in CGP using GEPs.")); 144 145 static cl::opt<bool> EnableAndCmpSinking( 146 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 147 cl::desc("Enable sinkinig and/cmp into branches.")); 148 149 static cl::opt<bool> DisableStoreExtract( 150 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 151 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 152 153 static cl::opt<bool> StressStoreExtract( 154 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> DisableExtLdPromotion( 158 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 159 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 160 "CodeGenPrepare")); 161 162 static cl::opt<bool> StressExtLdPromotion( 163 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 164 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 165 "optimization in CodeGenPrepare")); 166 167 static cl::opt<bool> DisablePreheaderProtect( 168 "disable-preheader-prot", cl::Hidden, cl::init(false), 169 cl::desc("Disable protection against removing loop preheaders")); 170 171 static cl::opt<bool> ProfileGuidedSectionPrefix( 172 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 173 cl::desc("Use profile info to add section prefix for hot/cold functions")); 174 175 static cl::opt<unsigned> FreqRatioToSkipMerge( 176 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 177 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 178 "(frequency of destination block) is greater than this ratio")); 179 180 static cl::opt<bool> ForceSplitStore( 181 "force-split-store", cl::Hidden, cl::init(false), 182 cl::desc("Force store splitting no matter what the target query says.")); 183 184 static cl::opt<bool> 185 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 186 cl::desc("Enable merging of redundant sexts when one is dominating" 187 " the other."), cl::init(true)); 188 189 static cl::opt<bool> DisableComplexAddrModes( 190 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 191 cl::desc("Disables combining addressing modes with different parts " 192 "in optimizeMemoryInst.")); 193 194 static cl::opt<bool> 195 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 196 cl::desc("Allow creation of Phis in Address sinking.")); 197 198 static cl::opt<bool> 199 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 200 cl::desc("Allow creation of selects in Address sinking.")); 201 202 static cl::opt<bool> AddrSinkCombineBaseReg( 203 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 204 cl::desc("Allow combining of BaseReg field in Address sinking.")); 205 206 static cl::opt<bool> AddrSinkCombineBaseGV( 207 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 208 cl::desc("Allow combining of BaseGV field in Address sinking.")); 209 210 static cl::opt<bool> AddrSinkCombineBaseOffs( 211 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 212 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 213 214 static cl::opt<bool> AddrSinkCombineScaledReg( 215 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 216 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 217 218 static cl::opt<bool> 219 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 220 cl::init(true), 221 cl::desc("Enable splitting large offset of GEP.")); 222 223 namespace { 224 225 enum ExtType { 226 ZeroExtension, // Zero extension has been seen. 227 SignExtension, // Sign extension has been seen. 228 BothExtension // This extension type is used if we saw sext after 229 // ZeroExtension had been set, or if we saw zext after 230 // SignExtension had been set. It makes the type 231 // information of a promoted instruction invalid. 232 }; 233 234 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 235 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 236 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 237 using SExts = SmallVector<Instruction *, 16>; 238 using ValueToSExts = DenseMap<Value *, SExts>; 239 240 class TypePromotionTransaction; 241 242 class CodeGenPrepare : public FunctionPass { 243 const TargetMachine *TM = nullptr; 244 const TargetSubtargetInfo *SubtargetInfo; 245 const TargetLowering *TLI = nullptr; 246 const TargetRegisterInfo *TRI; 247 const TargetTransformInfo *TTI = nullptr; 248 const TargetLibraryInfo *TLInfo; 249 const LoopInfo *LI; 250 std::unique_ptr<BlockFrequencyInfo> BFI; 251 std::unique_ptr<BranchProbabilityInfo> BPI; 252 253 /// As we scan instructions optimizing them, this is the next instruction 254 /// to optimize. Transforms that can invalidate this should update it. 255 BasicBlock::iterator CurInstIterator; 256 257 /// Keeps track of non-local addresses that have been sunk into a block. 258 /// This allows us to avoid inserting duplicate code for blocks with 259 /// multiple load/stores of the same address. The usage of WeakTrackingVH 260 /// enables SunkAddrs to be treated as a cache whose entries can be 261 /// invalidated if a sunken address computation has been erased. 262 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 263 264 /// Keeps track of all instructions inserted for the current function. 265 SetOfInstrs InsertedInsts; 266 267 /// Keeps track of the type of the related instruction before their 268 /// promotion for the current function. 269 InstrToOrigTy PromotedInsts; 270 271 /// Keep track of instructions removed during promotion. 272 SetOfInstrs RemovedInsts; 273 274 /// Keep track of sext chains based on their initial value. 275 DenseMap<Value *, Instruction *> SeenChainsForSExt; 276 277 /// Keep track of GEPs accessing the same data structures such as structs or 278 /// arrays that are candidates to be split later because of their large 279 /// size. 280 MapVector< 281 AssertingVH<Value>, 282 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 283 LargeOffsetGEPMap; 284 285 /// Keep track of new GEP base after splitting the GEPs having large offset. 286 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 287 288 /// Map serial numbers to Large offset GEPs. 289 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 290 291 /// Keep track of SExt promoted. 292 ValueToSExts ValToSExtendedUses; 293 294 /// True if CFG is modified in any way. 295 bool ModifiedDT; 296 297 /// True if optimizing for size. 298 bool OptSize; 299 300 /// DataLayout for the Function being processed. 301 const DataLayout *DL = nullptr; 302 303 public: 304 static char ID; // Pass identification, replacement for typeid 305 306 CodeGenPrepare() : FunctionPass(ID) { 307 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 308 } 309 310 bool runOnFunction(Function &F) override; 311 312 StringRef getPassName() const override { return "CodeGen Prepare"; } 313 314 void getAnalysisUsage(AnalysisUsage &AU) const override { 315 // FIXME: When we can selectively preserve passes, preserve the domtree. 316 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 317 AU.addRequired<TargetLibraryInfoWrapperPass>(); 318 AU.addRequired<TargetTransformInfoWrapperPass>(); 319 AU.addRequired<LoopInfoWrapperPass>(); 320 } 321 322 private: 323 template <typename F> 324 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 325 // Substituting can cause recursive simplifications, which can invalidate 326 // our iterator. Use a WeakTrackingVH to hold onto it in case this 327 // happens. 328 Value *CurValue = &*CurInstIterator; 329 WeakTrackingVH IterHandle(CurValue); 330 331 f(); 332 333 // If the iterator instruction was recursively deleted, start over at the 334 // start of the block. 335 if (IterHandle != CurValue) { 336 CurInstIterator = BB->begin(); 337 SunkAddrs.clear(); 338 } 339 } 340 341 bool eliminateFallThrough(Function &F); 342 bool eliminateMostlyEmptyBlocks(Function &F); 343 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 344 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 345 void eliminateMostlyEmptyBlock(BasicBlock *BB); 346 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 347 bool isPreheader); 348 bool optimizeBlock(BasicBlock &BB, DominatorTree &DT, bool &ModifiedDT); 349 bool optimizeInst(Instruction *I, DominatorTree &DT, bool &ModifiedDT); 350 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 351 Type *AccessTy, unsigned AddrSpace); 352 bool optimizeInlineAsmInst(CallInst *CS); 353 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 354 bool optimizeExt(Instruction *&I); 355 bool optimizeExtUses(Instruction *I); 356 bool optimizeLoadExt(LoadInst *Load); 357 bool optimizeSelectInst(SelectInst *SI); 358 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 359 bool optimizeSwitchInst(SwitchInst *SI); 360 bool optimizeExtractElementInst(Instruction *Inst); 361 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 362 bool placeDbgValues(Function &F); 363 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 364 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 365 bool tryToPromoteExts(TypePromotionTransaction &TPT, 366 const SmallVectorImpl<Instruction *> &Exts, 367 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 368 unsigned CreatedInstsCost = 0); 369 bool mergeSExts(Function &F, DominatorTree &DT); 370 bool splitLargeGEPOffsets(); 371 bool performAddressTypePromotion( 372 Instruction *&Inst, 373 bool AllowPromotionWithoutCommonHeader, 374 bool HasPromoted, TypePromotionTransaction &TPT, 375 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 376 bool splitBranchCondition(Function &F); 377 bool simplifyOffsetableRelocate(Instruction &I); 378 379 bool tryToSinkFreeOperands(Instruction *I); 380 }; 381 382 } // end anonymous namespace 383 384 char CodeGenPrepare::ID = 0; 385 386 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 387 "Optimize for code generation", false, false) 388 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 389 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 390 "Optimize for code generation", false, false) 391 392 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 393 394 bool CodeGenPrepare::runOnFunction(Function &F) { 395 if (skipFunction(F)) 396 return false; 397 398 DL = &F.getParent()->getDataLayout(); 399 400 bool EverMadeChange = false; 401 // Clear per function information. 402 InsertedInsts.clear(); 403 PromotedInsts.clear(); 404 405 ModifiedDT = false; 406 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 407 TM = &TPC->getTM<TargetMachine>(); 408 SubtargetInfo = TM->getSubtargetImpl(F); 409 TLI = SubtargetInfo->getTargetLowering(); 410 TRI = SubtargetInfo->getRegisterInfo(); 411 } 412 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 413 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 414 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 415 BPI.reset(new BranchProbabilityInfo(F, *LI)); 416 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 417 OptSize = F.optForSize(); 418 419 ProfileSummaryInfo *PSI = 420 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 421 if (ProfileGuidedSectionPrefix) { 422 if (PSI->isFunctionHotInCallGraph(&F, *BFI)) 423 F.setSectionPrefix(".hot"); 424 else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) 425 F.setSectionPrefix(".unlikely"); 426 } 427 428 /// This optimization identifies DIV instructions that can be 429 /// profitably bypassed and carried out with a shorter, faster divide. 430 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI && 431 TLI->isSlowDivBypassed()) { 432 const DenseMap<unsigned int, unsigned int> &BypassWidths = 433 TLI->getBypassSlowDivWidths(); 434 BasicBlock* BB = &*F.begin(); 435 while (BB != nullptr) { 436 // bypassSlowDivision may create new BBs, but we don't want to reapply the 437 // optimization to those blocks. 438 BasicBlock* Next = BB->getNextNode(); 439 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 440 BB = Next; 441 } 442 } 443 444 // Eliminate blocks that contain only PHI nodes and an 445 // unconditional branch. 446 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 447 448 if (!DisableBranchOpts) 449 EverMadeChange |= splitBranchCondition(F); 450 451 // Split some critical edges where one of the sources is an indirect branch, 452 // to help generate sane code for PHIs involving such edges. 453 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 454 455 bool MadeChange = true; 456 while (MadeChange) { 457 MadeChange = false; 458 DominatorTree DT(F); 459 for (Function::iterator I = F.begin(); I != F.end(); ) { 460 BasicBlock *BB = &*I++; 461 bool ModifiedDTOnIteration = false; 462 MadeChange |= optimizeBlock(*BB, DT, ModifiedDTOnIteration); 463 464 // Restart BB iteration if the dominator tree of the Function was changed 465 if (ModifiedDTOnIteration) 466 break; 467 } 468 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 469 MadeChange |= mergeSExts(F, DT); 470 if (!LargeOffsetGEPMap.empty()) 471 MadeChange |= splitLargeGEPOffsets(); 472 473 // Really free removed instructions during promotion. 474 for (Instruction *I : RemovedInsts) 475 I->deleteValue(); 476 477 EverMadeChange |= MadeChange; 478 SeenChainsForSExt.clear(); 479 ValToSExtendedUses.clear(); 480 RemovedInsts.clear(); 481 LargeOffsetGEPMap.clear(); 482 LargeOffsetGEPID.clear(); 483 } 484 485 SunkAddrs.clear(); 486 487 if (!DisableBranchOpts) { 488 MadeChange = false; 489 // Use a set vector to get deterministic iteration order. The order the 490 // blocks are removed may affect whether or not PHI nodes in successors 491 // are removed. 492 SmallSetVector<BasicBlock*, 8> WorkList; 493 for (BasicBlock &BB : F) { 494 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 495 MadeChange |= ConstantFoldTerminator(&BB, true); 496 if (!MadeChange) continue; 497 498 for (SmallVectorImpl<BasicBlock*>::iterator 499 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 500 if (pred_begin(*II) == pred_end(*II)) 501 WorkList.insert(*II); 502 } 503 504 // Delete the dead blocks and any of their dead successors. 505 MadeChange |= !WorkList.empty(); 506 while (!WorkList.empty()) { 507 BasicBlock *BB = WorkList.pop_back_val(); 508 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 509 510 DeleteDeadBlock(BB); 511 512 for (SmallVectorImpl<BasicBlock*>::iterator 513 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 514 if (pred_begin(*II) == pred_end(*II)) 515 WorkList.insert(*II); 516 } 517 518 // Merge pairs of basic blocks with unconditional branches, connected by 519 // a single edge. 520 if (EverMadeChange || MadeChange) 521 MadeChange |= eliminateFallThrough(F); 522 523 EverMadeChange |= MadeChange; 524 } 525 526 if (!DisableGCOpts) { 527 SmallVector<Instruction *, 2> Statepoints; 528 for (BasicBlock &BB : F) 529 for (Instruction &I : BB) 530 if (isStatepoint(I)) 531 Statepoints.push_back(&I); 532 for (auto &I : Statepoints) 533 EverMadeChange |= simplifyOffsetableRelocate(*I); 534 } 535 536 // Do this last to clean up use-before-def scenarios introduced by other 537 // preparatory transforms. 538 EverMadeChange |= placeDbgValues(F); 539 540 return EverMadeChange; 541 } 542 543 /// Merge basic blocks which are connected by a single edge, where one of the 544 /// basic blocks has a single successor pointing to the other basic block, 545 /// which has a single predecessor. 546 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 547 bool Changed = false; 548 // Scan all of the blocks in the function, except for the entry block. 549 // Use a temporary array to avoid iterator being invalidated when 550 // deleting blocks. 551 SmallVector<WeakTrackingVH, 16> Blocks; 552 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 553 Blocks.push_back(&Block); 554 555 for (auto &Block : Blocks) { 556 auto *BB = cast_or_null<BasicBlock>(Block); 557 if (!BB) 558 continue; 559 // If the destination block has a single pred, then this is a trivial 560 // edge, just collapse it. 561 BasicBlock *SinglePred = BB->getSinglePredecessor(); 562 563 // Don't merge if BB's address is taken. 564 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 565 566 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 567 if (Term && !Term->isConditional()) { 568 Changed = true; 569 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 570 571 // Merge BB into SinglePred and delete it. 572 MergeBlockIntoPredecessor(BB); 573 } 574 } 575 return Changed; 576 } 577 578 /// Find a destination block from BB if BB is mergeable empty block. 579 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 580 // If this block doesn't end with an uncond branch, ignore it. 581 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 582 if (!BI || !BI->isUnconditional()) 583 return nullptr; 584 585 // If the instruction before the branch (skipping debug info) isn't a phi 586 // node, then other stuff is happening here. 587 BasicBlock::iterator BBI = BI->getIterator(); 588 if (BBI != BB->begin()) { 589 --BBI; 590 while (isa<DbgInfoIntrinsic>(BBI)) { 591 if (BBI == BB->begin()) 592 break; 593 --BBI; 594 } 595 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 596 return nullptr; 597 } 598 599 // Do not break infinite loops. 600 BasicBlock *DestBB = BI->getSuccessor(0); 601 if (DestBB == BB) 602 return nullptr; 603 604 if (!canMergeBlocks(BB, DestBB)) 605 DestBB = nullptr; 606 607 return DestBB; 608 } 609 610 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 611 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 612 /// edges in ways that are non-optimal for isel. Start by eliminating these 613 /// blocks so we can split them the way we want them. 614 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 615 SmallPtrSet<BasicBlock *, 16> Preheaders; 616 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 617 while (!LoopList.empty()) { 618 Loop *L = LoopList.pop_back_val(); 619 LoopList.insert(LoopList.end(), L->begin(), L->end()); 620 if (BasicBlock *Preheader = L->getLoopPreheader()) 621 Preheaders.insert(Preheader); 622 } 623 624 bool MadeChange = false; 625 // Copy blocks into a temporary array to avoid iterator invalidation issues 626 // as we remove them. 627 // Note that this intentionally skips the entry block. 628 SmallVector<WeakTrackingVH, 16> Blocks; 629 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 630 Blocks.push_back(&Block); 631 632 for (auto &Block : Blocks) { 633 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 634 if (!BB) 635 continue; 636 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 637 if (!DestBB || 638 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 639 continue; 640 641 eliminateMostlyEmptyBlock(BB); 642 MadeChange = true; 643 } 644 return MadeChange; 645 } 646 647 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 648 BasicBlock *DestBB, 649 bool isPreheader) { 650 // Do not delete loop preheaders if doing so would create a critical edge. 651 // Loop preheaders can be good locations to spill registers. If the 652 // preheader is deleted and we create a critical edge, registers may be 653 // spilled in the loop body instead. 654 if (!DisablePreheaderProtect && isPreheader && 655 !(BB->getSinglePredecessor() && 656 BB->getSinglePredecessor()->getSingleSuccessor())) 657 return false; 658 659 // Skip merging if the block's successor is also a successor to any callbr 660 // that leads to this block. 661 // FIXME: Is this really needed? Is this a correctness issue? 662 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 663 if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator())) 664 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 665 if (DestBB == CBI->getSuccessor(i)) 666 return false; 667 } 668 669 // Try to skip merging if the unique predecessor of BB is terminated by a 670 // switch or indirect branch instruction, and BB is used as an incoming block 671 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 672 // add COPY instructions in the predecessor of BB instead of BB (if it is not 673 // merged). Note that the critical edge created by merging such blocks wont be 674 // split in MachineSink because the jump table is not analyzable. By keeping 675 // such empty block (BB), ISel will place COPY instructions in BB, not in the 676 // predecessor of BB. 677 BasicBlock *Pred = BB->getUniquePredecessor(); 678 if (!Pred || 679 !(isa<SwitchInst>(Pred->getTerminator()) || 680 isa<IndirectBrInst>(Pred->getTerminator()))) 681 return true; 682 683 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 684 return true; 685 686 // We use a simple cost heuristic which determine skipping merging is 687 // profitable if the cost of skipping merging is less than the cost of 688 // merging : Cost(skipping merging) < Cost(merging BB), where the 689 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 690 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 691 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 692 // Freq(Pred) / Freq(BB) > 2. 693 // Note that if there are multiple empty blocks sharing the same incoming 694 // value for the PHIs in the DestBB, we consider them together. In such 695 // case, Cost(merging BB) will be the sum of their frequencies. 696 697 if (!isa<PHINode>(DestBB->begin())) 698 return true; 699 700 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 701 702 // Find all other incoming blocks from which incoming values of all PHIs in 703 // DestBB are the same as the ones from BB. 704 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 705 ++PI) { 706 BasicBlock *DestBBPred = *PI; 707 if (DestBBPred == BB) 708 continue; 709 710 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 711 return DestPN.getIncomingValueForBlock(BB) == 712 DestPN.getIncomingValueForBlock(DestBBPred); 713 })) 714 SameIncomingValueBBs.insert(DestBBPred); 715 } 716 717 // See if all BB's incoming values are same as the value from Pred. In this 718 // case, no reason to skip merging because COPYs are expected to be place in 719 // Pred already. 720 if (SameIncomingValueBBs.count(Pred)) 721 return true; 722 723 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 724 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 725 726 for (auto SameValueBB : SameIncomingValueBBs) 727 if (SameValueBB->getUniquePredecessor() == Pred && 728 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 729 BBFreq += BFI->getBlockFreq(SameValueBB); 730 731 return PredFreq.getFrequency() <= 732 BBFreq.getFrequency() * FreqRatioToSkipMerge; 733 } 734 735 /// Return true if we can merge BB into DestBB if there is a single 736 /// unconditional branch between them, and BB contains no other non-phi 737 /// instructions. 738 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 739 const BasicBlock *DestBB) const { 740 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 741 // the successor. If there are more complex condition (e.g. preheaders), 742 // don't mess around with them. 743 for (const PHINode &PN : BB->phis()) { 744 for (const User *U : PN.users()) { 745 const Instruction *UI = cast<Instruction>(U); 746 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 747 return false; 748 // If User is inside DestBB block and it is a PHINode then check 749 // incoming value. If incoming value is not from BB then this is 750 // a complex condition (e.g. preheaders) we want to avoid here. 751 if (UI->getParent() == DestBB) { 752 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 753 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 754 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 755 if (Insn && Insn->getParent() == BB && 756 Insn->getParent() != UPN->getIncomingBlock(I)) 757 return false; 758 } 759 } 760 } 761 } 762 763 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 764 // and DestBB may have conflicting incoming values for the block. If so, we 765 // can't merge the block. 766 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 767 if (!DestBBPN) return true; // no conflict. 768 769 // Collect the preds of BB. 770 SmallPtrSet<const BasicBlock*, 16> BBPreds; 771 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 772 // It is faster to get preds from a PHI than with pred_iterator. 773 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 774 BBPreds.insert(BBPN->getIncomingBlock(i)); 775 } else { 776 BBPreds.insert(pred_begin(BB), pred_end(BB)); 777 } 778 779 // Walk the preds of DestBB. 780 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 781 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 782 if (BBPreds.count(Pred)) { // Common predecessor? 783 for (const PHINode &PN : DestBB->phis()) { 784 const Value *V1 = PN.getIncomingValueForBlock(Pred); 785 const Value *V2 = PN.getIncomingValueForBlock(BB); 786 787 // If V2 is a phi node in BB, look up what the mapped value will be. 788 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 789 if (V2PN->getParent() == BB) 790 V2 = V2PN->getIncomingValueForBlock(Pred); 791 792 // If there is a conflict, bail out. 793 if (V1 != V2) return false; 794 } 795 } 796 } 797 798 return true; 799 } 800 801 /// Eliminate a basic block that has only phi's and an unconditional branch in 802 /// it. 803 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 804 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 805 BasicBlock *DestBB = BI->getSuccessor(0); 806 807 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 808 << *BB << *DestBB); 809 810 // If the destination block has a single pred, then this is a trivial edge, 811 // just collapse it. 812 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 813 if (SinglePred != DestBB) { 814 assert(SinglePred == BB && 815 "Single predecessor not the same as predecessor"); 816 // Merge DestBB into SinglePred/BB and delete it. 817 MergeBlockIntoPredecessor(DestBB); 818 // Note: BB(=SinglePred) will not be deleted on this path. 819 // DestBB(=its single successor) is the one that was deleted. 820 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 821 return; 822 } 823 } 824 825 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 826 // to handle the new incoming edges it is about to have. 827 for (PHINode &PN : DestBB->phis()) { 828 // Remove the incoming value for BB, and remember it. 829 Value *InVal = PN.removeIncomingValue(BB, false); 830 831 // Two options: either the InVal is a phi node defined in BB or it is some 832 // value that dominates BB. 833 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 834 if (InValPhi && InValPhi->getParent() == BB) { 835 // Add all of the input values of the input PHI as inputs of this phi. 836 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 837 PN.addIncoming(InValPhi->getIncomingValue(i), 838 InValPhi->getIncomingBlock(i)); 839 } else { 840 // Otherwise, add one instance of the dominating value for each edge that 841 // we will be adding. 842 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 843 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 844 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 845 } else { 846 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 847 PN.addIncoming(InVal, *PI); 848 } 849 } 850 } 851 852 // The PHIs are now updated, change everything that refers to BB to use 853 // DestBB and remove BB. 854 BB->replaceAllUsesWith(DestBB); 855 BB->eraseFromParent(); 856 ++NumBlocksElim; 857 858 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 859 } 860 861 // Computes a map of base pointer relocation instructions to corresponding 862 // derived pointer relocation instructions given a vector of all relocate calls 863 static void computeBaseDerivedRelocateMap( 864 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 865 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 866 &RelocateInstMap) { 867 // Collect information in two maps: one primarily for locating the base object 868 // while filling the second map; the second map is the final structure holding 869 // a mapping between Base and corresponding Derived relocate calls 870 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 871 for (auto *ThisRelocate : AllRelocateCalls) { 872 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 873 ThisRelocate->getDerivedPtrIndex()); 874 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 875 } 876 for (auto &Item : RelocateIdxMap) { 877 std::pair<unsigned, unsigned> Key = Item.first; 878 if (Key.first == Key.second) 879 // Base relocation: nothing to insert 880 continue; 881 882 GCRelocateInst *I = Item.second; 883 auto BaseKey = std::make_pair(Key.first, Key.first); 884 885 // We're iterating over RelocateIdxMap so we cannot modify it. 886 auto MaybeBase = RelocateIdxMap.find(BaseKey); 887 if (MaybeBase == RelocateIdxMap.end()) 888 // TODO: We might want to insert a new base object relocate and gep off 889 // that, if there are enough derived object relocates. 890 continue; 891 892 RelocateInstMap[MaybeBase->second].push_back(I); 893 } 894 } 895 896 // Accepts a GEP and extracts the operands into a vector provided they're all 897 // small integer constants 898 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 899 SmallVectorImpl<Value *> &OffsetV) { 900 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 901 // Only accept small constant integer operands 902 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 903 if (!Op || Op->getZExtValue() > 20) 904 return false; 905 } 906 907 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 908 OffsetV.push_back(GEP->getOperand(i)); 909 return true; 910 } 911 912 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 913 // replace, computes a replacement, and affects it. 914 static bool 915 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 916 const SmallVectorImpl<GCRelocateInst *> &Targets) { 917 bool MadeChange = false; 918 // We must ensure the relocation of derived pointer is defined after 919 // relocation of base pointer. If we find a relocation corresponding to base 920 // defined earlier than relocation of base then we move relocation of base 921 // right before found relocation. We consider only relocation in the same 922 // basic block as relocation of base. Relocations from other basic block will 923 // be skipped by optimization and we do not care about them. 924 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 925 &*R != RelocatedBase; ++R) 926 if (auto RI = dyn_cast<GCRelocateInst>(R)) 927 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 928 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 929 RelocatedBase->moveBefore(RI); 930 break; 931 } 932 933 for (GCRelocateInst *ToReplace : Targets) { 934 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 935 "Not relocating a derived object of the original base object"); 936 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 937 // A duplicate relocate call. TODO: coalesce duplicates. 938 continue; 939 } 940 941 if (RelocatedBase->getParent() != ToReplace->getParent()) { 942 // Base and derived relocates are in different basic blocks. 943 // In this case transform is only valid when base dominates derived 944 // relocate. However it would be too expensive to check dominance 945 // for each such relocate, so we skip the whole transformation. 946 continue; 947 } 948 949 Value *Base = ToReplace->getBasePtr(); 950 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 951 if (!Derived || Derived->getPointerOperand() != Base) 952 continue; 953 954 SmallVector<Value *, 2> OffsetV; 955 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 956 continue; 957 958 // Create a Builder and replace the target callsite with a gep 959 assert(RelocatedBase->getNextNode() && 960 "Should always have one since it's not a terminator"); 961 962 // Insert after RelocatedBase 963 IRBuilder<> Builder(RelocatedBase->getNextNode()); 964 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 965 966 // If gc_relocate does not match the actual type, cast it to the right type. 967 // In theory, there must be a bitcast after gc_relocate if the type does not 968 // match, and we should reuse it to get the derived pointer. But it could be 969 // cases like this: 970 // bb1: 971 // ... 972 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 973 // br label %merge 974 // 975 // bb2: 976 // ... 977 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 978 // br label %merge 979 // 980 // merge: 981 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 982 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 983 // 984 // In this case, we can not find the bitcast any more. So we insert a new bitcast 985 // no matter there is already one or not. In this way, we can handle all cases, and 986 // the extra bitcast should be optimized away in later passes. 987 Value *ActualRelocatedBase = RelocatedBase; 988 if (RelocatedBase->getType() != Base->getType()) { 989 ActualRelocatedBase = 990 Builder.CreateBitCast(RelocatedBase, Base->getType()); 991 } 992 Value *Replacement = Builder.CreateGEP( 993 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 994 Replacement->takeName(ToReplace); 995 // If the newly generated derived pointer's type does not match the original derived 996 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 997 Value *ActualReplacement = Replacement; 998 if (Replacement->getType() != ToReplace->getType()) { 999 ActualReplacement = 1000 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1001 } 1002 ToReplace->replaceAllUsesWith(ActualReplacement); 1003 ToReplace->eraseFromParent(); 1004 1005 MadeChange = true; 1006 } 1007 return MadeChange; 1008 } 1009 1010 // Turns this: 1011 // 1012 // %base = ... 1013 // %ptr = gep %base + 15 1014 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1015 // %base' = relocate(%tok, i32 4, i32 4) 1016 // %ptr' = relocate(%tok, i32 4, i32 5) 1017 // %val = load %ptr' 1018 // 1019 // into this: 1020 // 1021 // %base = ... 1022 // %ptr = gep %base + 15 1023 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1024 // %base' = gc.relocate(%tok, i32 4, i32 4) 1025 // %ptr' = gep %base' + 15 1026 // %val = load %ptr' 1027 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 1028 bool MadeChange = false; 1029 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1030 1031 for (auto *U : I.users()) 1032 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1033 // Collect all the relocate calls associated with a statepoint 1034 AllRelocateCalls.push_back(Relocate); 1035 1036 // We need atleast one base pointer relocation + one derived pointer 1037 // relocation to mangle 1038 if (AllRelocateCalls.size() < 2) 1039 return false; 1040 1041 // RelocateInstMap is a mapping from the base relocate instruction to the 1042 // corresponding derived relocate instructions 1043 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1044 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1045 if (RelocateInstMap.empty()) 1046 return false; 1047 1048 for (auto &Item : RelocateInstMap) 1049 // Item.first is the RelocatedBase to offset against 1050 // Item.second is the vector of Targets to replace 1051 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1052 return MadeChange; 1053 } 1054 1055 /// SinkCast - Sink the specified cast instruction into its user blocks 1056 static bool SinkCast(CastInst *CI) { 1057 BasicBlock *DefBB = CI->getParent(); 1058 1059 /// InsertedCasts - Only insert a cast in each block once. 1060 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1061 1062 bool MadeChange = false; 1063 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1064 UI != E; ) { 1065 Use &TheUse = UI.getUse(); 1066 Instruction *User = cast<Instruction>(*UI); 1067 1068 // Figure out which BB this cast is used in. For PHI's this is the 1069 // appropriate predecessor block. 1070 BasicBlock *UserBB = User->getParent(); 1071 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1072 UserBB = PN->getIncomingBlock(TheUse); 1073 } 1074 1075 // Preincrement use iterator so we don't invalidate it. 1076 ++UI; 1077 1078 // The first insertion point of a block containing an EH pad is after the 1079 // pad. If the pad is the user, we cannot sink the cast past the pad. 1080 if (User->isEHPad()) 1081 continue; 1082 1083 // If the block selected to receive the cast is an EH pad that does not 1084 // allow non-PHI instructions before the terminator, we can't sink the 1085 // cast. 1086 if (UserBB->getTerminator()->isEHPad()) 1087 continue; 1088 1089 // If this user is in the same block as the cast, don't change the cast. 1090 if (UserBB == DefBB) continue; 1091 1092 // If we have already inserted a cast into this block, use it. 1093 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1094 1095 if (!InsertedCast) { 1096 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1097 assert(InsertPt != UserBB->end()); 1098 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1099 CI->getType(), "", &*InsertPt); 1100 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1101 } 1102 1103 // Replace a use of the cast with a use of the new cast. 1104 TheUse = InsertedCast; 1105 MadeChange = true; 1106 ++NumCastUses; 1107 } 1108 1109 // If we removed all uses, nuke the cast. 1110 if (CI->use_empty()) { 1111 salvageDebugInfo(*CI); 1112 CI->eraseFromParent(); 1113 MadeChange = true; 1114 } 1115 1116 return MadeChange; 1117 } 1118 1119 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1120 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1121 /// reduce the number of virtual registers that must be created and coalesced. 1122 /// 1123 /// Return true if any changes are made. 1124 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1125 const DataLayout &DL) { 1126 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1127 // than sinking only nop casts, but is helpful on some platforms. 1128 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1129 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), 1130 ASC->getDestAddressSpace())) 1131 return false; 1132 } 1133 1134 // If this is a noop copy, 1135 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1136 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1137 1138 // This is an fp<->int conversion? 1139 if (SrcVT.isInteger() != DstVT.isInteger()) 1140 return false; 1141 1142 // If this is an extension, it will be a zero or sign extension, which 1143 // isn't a noop. 1144 if (SrcVT.bitsLT(DstVT)) return false; 1145 1146 // If these values will be promoted, find out what they will be promoted 1147 // to. This helps us consider truncates on PPC as noop copies when they 1148 // are. 1149 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1150 TargetLowering::TypePromoteInteger) 1151 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1152 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1153 TargetLowering::TypePromoteInteger) 1154 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1155 1156 // If, after promotion, these are the same types, this is a noop copy. 1157 if (SrcVT != DstVT) 1158 return false; 1159 1160 return SinkCast(CI); 1161 } 1162 1163 static bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, CmpInst *Cmp, 1164 Intrinsic::ID IID, DominatorTree &DT) { 1165 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1166 Value *Arg0 = BO->getOperand(0); 1167 Value *Arg1 = BO->getOperand(1); 1168 if (BO->getOpcode() == Instruction::Add && 1169 IID == Intrinsic::usub_with_overflow) { 1170 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1171 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1172 } 1173 1174 Instruction *InsertPt; 1175 if (BO->hasOneUse() && BO->user_back() == Cmp) { 1176 // If the math is only used by the compare, insert at the compare to keep 1177 // the condition in the same block as its users. (CGP aggressively sinks 1178 // compares to help out SDAG.) 1179 InsertPt = Cmp; 1180 } else { 1181 // The math and compare may be independent instructions. Check dominance to 1182 // determine the insertion point for the intrinsic. 1183 bool MathDominates = DT.dominates(BO, Cmp); 1184 if (!MathDominates && !DT.dominates(Cmp, BO)) 1185 return false; 1186 InsertPt = MathDominates ? cast<Instruction>(BO) : cast<Instruction>(Cmp); 1187 } 1188 1189 IRBuilder<> Builder(InsertPt); 1190 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1191 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1192 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1193 BO->replaceAllUsesWith(Math); 1194 Cmp->replaceAllUsesWith(OV); 1195 BO->eraseFromParent(); 1196 Cmp->eraseFromParent(); 1197 return true; 1198 } 1199 1200 /// Match special-case patterns that check for unsigned add overflow. 1201 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, 1202 BinaryOperator *&Add) { 1203 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) 1204 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) 1205 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1206 1207 // We are not expecting non-canonical/degenerate code. Just bail out. 1208 if (isa<Constant>(A)) 1209 return false; 1210 1211 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1212 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) 1213 B = ConstantInt::get(B->getType(), 1); 1214 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) 1215 B = ConstantInt::get(B->getType(), -1); 1216 else 1217 return false; 1218 1219 // Check the users of the variable operand of the compare looking for an add 1220 // with the adjusted constant. 1221 for (User *U : A->users()) { 1222 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { 1223 Add = cast<BinaryOperator>(U); 1224 return true; 1225 } 1226 } 1227 return false; 1228 } 1229 1230 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1231 /// intrinsic. Return true if any changes were made. 1232 static bool combineToUAddWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1233 const DataLayout &DL, DominatorTree &DT, 1234 bool &ModifiedDT) { 1235 Value *A, *B; 1236 BinaryOperator *Add; 1237 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) 1238 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) 1239 return false; 1240 1241 if (!TLI.shouldFormOverflowOp(ISD::UADDO, 1242 TLI.getValueType(DL, Add->getType()))) 1243 return false; 1244 1245 // We don't want to move around uses of condition values this late, so we 1246 // check if it is legal to create the call to the intrinsic in the basic 1247 // block containing the icmp. 1248 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1249 return false; 1250 1251 if (!replaceMathCmpWithIntrinsic(Add, Cmp, Intrinsic::uadd_with_overflow, DT)) 1252 return false; 1253 1254 // Reset callers - do not crash by iterating over a dead instruction. 1255 ModifiedDT = true; 1256 return true; 1257 } 1258 1259 static bool combineToUSubWithOverflow(CmpInst *Cmp, const TargetLowering &TLI, 1260 const DataLayout &DL, DominatorTree &DT, 1261 bool &ModifiedDT) { 1262 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1263 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1264 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1265 if (Pred == ICmpInst::ICMP_UGT) { 1266 std::swap(A, B); 1267 Pred = ICmpInst::ICMP_ULT; 1268 } 1269 // Convert special-case: (A == 0) is the same as (A u< 1). 1270 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1271 B = ConstantInt::get(B->getType(), 1); 1272 Pred = ICmpInst::ICMP_ULT; 1273 } 1274 // Convert special-case: (A != 0) is the same as (0 u< A). 1275 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { 1276 std::swap(A, B); 1277 Pred = ICmpInst::ICMP_ULT; 1278 } 1279 if (Pred != ICmpInst::ICMP_ULT) 1280 return false; 1281 1282 // Walk the users of a variable operand of a compare looking for a subtract or 1283 // add with that same operand. Also match the 2nd operand of the compare to 1284 // the add/sub, but that may be a negated constant operand of an add. 1285 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1286 BinaryOperator *Sub = nullptr; 1287 for (User *U : CmpVariableOperand->users()) { 1288 // A - B, A u< B --> usubo(A, B) 1289 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1290 Sub = cast<BinaryOperator>(U); 1291 break; 1292 } 1293 1294 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1295 const APInt *CmpC, *AddC; 1296 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1297 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1298 Sub = cast<BinaryOperator>(U); 1299 break; 1300 } 1301 } 1302 if (!Sub) 1303 return false; 1304 1305 if (!TLI.shouldFormOverflowOp(ISD::USUBO, 1306 TLI.getValueType(DL, Sub->getType()))) 1307 return false; 1308 1309 if (!replaceMathCmpWithIntrinsic(Sub, Cmp, Intrinsic::usub_with_overflow, DT)) 1310 return false; 1311 1312 // Reset callers - do not crash by iterating over a dead instruction. 1313 ModifiedDT = true; 1314 return true; 1315 } 1316 1317 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1318 /// registers that must be created and coalesced. This is a clear win except on 1319 /// targets with multiple condition code registers (PowerPC), where it might 1320 /// lose; some adjustment may be wanted there. 1321 /// 1322 /// Return true if any changes are made. 1323 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1324 if (TLI.hasMultipleConditionRegisters()) 1325 return false; 1326 1327 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1328 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1329 return false; 1330 1331 // Only insert a cmp in each block once. 1332 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1333 1334 bool MadeChange = false; 1335 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1336 UI != E; ) { 1337 Use &TheUse = UI.getUse(); 1338 Instruction *User = cast<Instruction>(*UI); 1339 1340 // Preincrement use iterator so we don't invalidate it. 1341 ++UI; 1342 1343 // Don't bother for PHI nodes. 1344 if (isa<PHINode>(User)) 1345 continue; 1346 1347 // Figure out which BB this cmp is used in. 1348 BasicBlock *UserBB = User->getParent(); 1349 BasicBlock *DefBB = Cmp->getParent(); 1350 1351 // If this user is in the same block as the cmp, don't change the cmp. 1352 if (UserBB == DefBB) continue; 1353 1354 // If we have already inserted a cmp into this block, use it. 1355 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1356 1357 if (!InsertedCmp) { 1358 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1359 assert(InsertPt != UserBB->end()); 1360 InsertedCmp = 1361 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1362 Cmp->getOperand(0), Cmp->getOperand(1), "", 1363 &*InsertPt); 1364 // Propagate the debug info. 1365 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1366 } 1367 1368 // Replace a use of the cmp with a use of the new cmp. 1369 TheUse = InsertedCmp; 1370 MadeChange = true; 1371 ++NumCmpUses; 1372 } 1373 1374 // If we removed all uses, nuke the cmp. 1375 if (Cmp->use_empty()) { 1376 Cmp->eraseFromParent(); 1377 MadeChange = true; 1378 } 1379 1380 return MadeChange; 1381 } 1382 1383 static bool optimizeCmp(CmpInst *Cmp, const TargetLowering &TLI, 1384 const DataLayout &DL, DominatorTree &DT, 1385 bool &ModifiedDT) { 1386 if (sinkCmpExpression(Cmp, TLI)) 1387 return true; 1388 1389 if (combineToUAddWithOverflow(Cmp, TLI, DL, DT, ModifiedDT)) 1390 return true; 1391 1392 if (combineToUSubWithOverflow(Cmp, TLI, DL, DT, ModifiedDT)) 1393 return true; 1394 1395 return false; 1396 } 1397 1398 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1399 /// used in a compare to allow isel to generate better code for targets where 1400 /// this operation can be combined. 1401 /// 1402 /// Return true if any changes are made. 1403 static bool sinkAndCmp0Expression(Instruction *AndI, 1404 const TargetLowering &TLI, 1405 SetOfInstrs &InsertedInsts) { 1406 // Double-check that we're not trying to optimize an instruction that was 1407 // already optimized by some other part of this pass. 1408 assert(!InsertedInsts.count(AndI) && 1409 "Attempting to optimize already optimized and instruction"); 1410 (void) InsertedInsts; 1411 1412 // Nothing to do for single use in same basic block. 1413 if (AndI->hasOneUse() && 1414 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1415 return false; 1416 1417 // Try to avoid cases where sinking/duplicating is likely to increase register 1418 // pressure. 1419 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1420 !isa<ConstantInt>(AndI->getOperand(1)) && 1421 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1422 return false; 1423 1424 for (auto *U : AndI->users()) { 1425 Instruction *User = cast<Instruction>(U); 1426 1427 // Only sink for and mask feeding icmp with 0. 1428 if (!isa<ICmpInst>(User)) 1429 return false; 1430 1431 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1432 if (!CmpC || !CmpC->isZero()) 1433 return false; 1434 } 1435 1436 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1437 return false; 1438 1439 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1440 LLVM_DEBUG(AndI->getParent()->dump()); 1441 1442 // Push the 'and' into the same block as the icmp 0. There should only be 1443 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1444 // others, so we don't need to keep track of which BBs we insert into. 1445 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1446 UI != E; ) { 1447 Use &TheUse = UI.getUse(); 1448 Instruction *User = cast<Instruction>(*UI); 1449 1450 // Preincrement use iterator so we don't invalidate it. 1451 ++UI; 1452 1453 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1454 1455 // Keep the 'and' in the same place if the use is already in the same block. 1456 Instruction *InsertPt = 1457 User->getParent() == AndI->getParent() ? AndI : User; 1458 Instruction *InsertedAnd = 1459 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1460 AndI->getOperand(1), "", InsertPt); 1461 // Propagate the debug info. 1462 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1463 1464 // Replace a use of the 'and' with a use of the new 'and'. 1465 TheUse = InsertedAnd; 1466 ++NumAndUses; 1467 LLVM_DEBUG(User->getParent()->dump()); 1468 } 1469 1470 // We removed all uses, nuke the and. 1471 AndI->eraseFromParent(); 1472 return true; 1473 } 1474 1475 /// Check if the candidates could be combined with a shift instruction, which 1476 /// includes: 1477 /// 1. Truncate instruction 1478 /// 2. And instruction and the imm is a mask of the low bits: 1479 /// imm & (imm+1) == 0 1480 static bool isExtractBitsCandidateUse(Instruction *User) { 1481 if (!isa<TruncInst>(User)) { 1482 if (User->getOpcode() != Instruction::And || 1483 !isa<ConstantInt>(User->getOperand(1))) 1484 return false; 1485 1486 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1487 1488 if ((Cimm & (Cimm + 1)).getBoolValue()) 1489 return false; 1490 } 1491 return true; 1492 } 1493 1494 /// Sink both shift and truncate instruction to the use of truncate's BB. 1495 static bool 1496 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1497 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1498 const TargetLowering &TLI, const DataLayout &DL) { 1499 BasicBlock *UserBB = User->getParent(); 1500 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1501 TruncInst *TruncI = dyn_cast<TruncInst>(User); 1502 bool MadeChange = false; 1503 1504 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1505 TruncE = TruncI->user_end(); 1506 TruncUI != TruncE;) { 1507 1508 Use &TruncTheUse = TruncUI.getUse(); 1509 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1510 // Preincrement use iterator so we don't invalidate it. 1511 1512 ++TruncUI; 1513 1514 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1515 if (!ISDOpcode) 1516 continue; 1517 1518 // If the use is actually a legal node, there will not be an 1519 // implicit truncate. 1520 // FIXME: always querying the result type is just an 1521 // approximation; some nodes' legality is determined by the 1522 // operand or other means. There's no good way to find out though. 1523 if (TLI.isOperationLegalOrCustom( 1524 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1525 continue; 1526 1527 // Don't bother for PHI nodes. 1528 if (isa<PHINode>(TruncUser)) 1529 continue; 1530 1531 BasicBlock *TruncUserBB = TruncUser->getParent(); 1532 1533 if (UserBB == TruncUserBB) 1534 continue; 1535 1536 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1537 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1538 1539 if (!InsertedShift && !InsertedTrunc) { 1540 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1541 assert(InsertPt != TruncUserBB->end()); 1542 // Sink the shift 1543 if (ShiftI->getOpcode() == Instruction::AShr) 1544 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1545 "", &*InsertPt); 1546 else 1547 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1548 "", &*InsertPt); 1549 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1550 1551 // Sink the trunc 1552 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1553 TruncInsertPt++; 1554 assert(TruncInsertPt != TruncUserBB->end()); 1555 1556 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1557 TruncI->getType(), "", &*TruncInsertPt); 1558 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1559 1560 MadeChange = true; 1561 1562 TruncTheUse = InsertedTrunc; 1563 } 1564 } 1565 return MadeChange; 1566 } 1567 1568 /// Sink the shift *right* instruction into user blocks if the uses could 1569 /// potentially be combined with this shift instruction and generate BitExtract 1570 /// instruction. It will only be applied if the architecture supports BitExtract 1571 /// instruction. Here is an example: 1572 /// BB1: 1573 /// %x.extract.shift = lshr i64 %arg1, 32 1574 /// BB2: 1575 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1576 /// ==> 1577 /// 1578 /// BB2: 1579 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1580 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1581 /// 1582 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1583 /// instruction. 1584 /// Return true if any changes are made. 1585 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1586 const TargetLowering &TLI, 1587 const DataLayout &DL) { 1588 BasicBlock *DefBB = ShiftI->getParent(); 1589 1590 /// Only insert instructions in each block once. 1591 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1592 1593 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1594 1595 bool MadeChange = false; 1596 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1597 UI != E;) { 1598 Use &TheUse = UI.getUse(); 1599 Instruction *User = cast<Instruction>(*UI); 1600 // Preincrement use iterator so we don't invalidate it. 1601 ++UI; 1602 1603 // Don't bother for PHI nodes. 1604 if (isa<PHINode>(User)) 1605 continue; 1606 1607 if (!isExtractBitsCandidateUse(User)) 1608 continue; 1609 1610 BasicBlock *UserBB = User->getParent(); 1611 1612 if (UserBB == DefBB) { 1613 // If the shift and truncate instruction are in the same BB. The use of 1614 // the truncate(TruncUse) may still introduce another truncate if not 1615 // legal. In this case, we would like to sink both shift and truncate 1616 // instruction to the BB of TruncUse. 1617 // for example: 1618 // BB1: 1619 // i64 shift.result = lshr i64 opnd, imm 1620 // trunc.result = trunc shift.result to i16 1621 // 1622 // BB2: 1623 // ----> We will have an implicit truncate here if the architecture does 1624 // not have i16 compare. 1625 // cmp i16 trunc.result, opnd2 1626 // 1627 if (isa<TruncInst>(User) && shiftIsLegal 1628 // If the type of the truncate is legal, no truncate will be 1629 // introduced in other basic blocks. 1630 && 1631 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1632 MadeChange = 1633 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1634 1635 continue; 1636 } 1637 // If we have already inserted a shift into this block, use it. 1638 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1639 1640 if (!InsertedShift) { 1641 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1642 assert(InsertPt != UserBB->end()); 1643 1644 if (ShiftI->getOpcode() == Instruction::AShr) 1645 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1646 "", &*InsertPt); 1647 else 1648 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1649 "", &*InsertPt); 1650 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1651 1652 MadeChange = true; 1653 } 1654 1655 // Replace a use of the shift with a use of the new shift. 1656 TheUse = InsertedShift; 1657 } 1658 1659 // If we removed all uses, nuke the shift. 1660 if (ShiftI->use_empty()) { 1661 salvageDebugInfo(*ShiftI); 1662 ShiftI->eraseFromParent(); 1663 } 1664 1665 return MadeChange; 1666 } 1667 1668 /// If counting leading or trailing zeros is an expensive operation and a zero 1669 /// input is defined, add a check for zero to avoid calling the intrinsic. 1670 /// 1671 /// We want to transform: 1672 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1673 /// 1674 /// into: 1675 /// entry: 1676 /// %cmpz = icmp eq i64 %A, 0 1677 /// br i1 %cmpz, label %cond.end, label %cond.false 1678 /// cond.false: 1679 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1680 /// br label %cond.end 1681 /// cond.end: 1682 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1683 /// 1684 /// If the transform is performed, return true and set ModifiedDT to true. 1685 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1686 const TargetLowering *TLI, 1687 const DataLayout *DL, 1688 bool &ModifiedDT) { 1689 if (!TLI || !DL) 1690 return false; 1691 1692 // If a zero input is undefined, it doesn't make sense to despeculate that. 1693 if (match(CountZeros->getOperand(1), m_One())) 1694 return false; 1695 1696 // If it's cheap to speculate, there's nothing to do. 1697 auto IntrinsicID = CountZeros->getIntrinsicID(); 1698 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1699 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1700 return false; 1701 1702 // Only handle legal scalar cases. Anything else requires too much work. 1703 Type *Ty = CountZeros->getType(); 1704 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1705 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1706 return false; 1707 1708 // The intrinsic will be sunk behind a compare against zero and branch. 1709 BasicBlock *StartBlock = CountZeros->getParent(); 1710 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1711 1712 // Create another block after the count zero intrinsic. A PHI will be added 1713 // in this block to select the result of the intrinsic or the bit-width 1714 // constant if the input to the intrinsic is zero. 1715 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1716 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1717 1718 // Set up a builder to create a compare, conditional branch, and PHI. 1719 IRBuilder<> Builder(CountZeros->getContext()); 1720 Builder.SetInsertPoint(StartBlock->getTerminator()); 1721 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1722 1723 // Replace the unconditional branch that was created by the first split with 1724 // a compare against zero and a conditional branch. 1725 Value *Zero = Constant::getNullValue(Ty); 1726 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1727 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1728 StartBlock->getTerminator()->eraseFromParent(); 1729 1730 // Create a PHI in the end block to select either the output of the intrinsic 1731 // or the bit width of the operand. 1732 Builder.SetInsertPoint(&EndBlock->front()); 1733 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1734 CountZeros->replaceAllUsesWith(PN); 1735 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1736 PN->addIncoming(BitWidth, StartBlock); 1737 PN->addIncoming(CountZeros, CallBlock); 1738 1739 // We are explicitly handling the zero case, so we can set the intrinsic's 1740 // undefined zero argument to 'true'. This will also prevent reprocessing the 1741 // intrinsic; we only despeculate when a zero input is defined. 1742 CountZeros->setArgOperand(1, Builder.getTrue()); 1743 ModifiedDT = true; 1744 return true; 1745 } 1746 1747 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 1748 BasicBlock *BB = CI->getParent(); 1749 1750 // Lower inline assembly if we can. 1751 // If we found an inline asm expession, and if the target knows how to 1752 // lower it to normal LLVM code, do so now. 1753 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1754 if (TLI->ExpandInlineAsm(CI)) { 1755 // Avoid invalidating the iterator. 1756 CurInstIterator = BB->begin(); 1757 // Avoid processing instructions out of order, which could cause 1758 // reuse before a value is defined. 1759 SunkAddrs.clear(); 1760 return true; 1761 } 1762 // Sink address computing for memory operands into the block. 1763 if (optimizeInlineAsmInst(CI)) 1764 return true; 1765 } 1766 1767 // Align the pointer arguments to this call if the target thinks it's a good 1768 // idea 1769 unsigned MinSize, PrefAlign; 1770 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1771 for (auto &Arg : CI->arg_operands()) { 1772 // We want to align both objects whose address is used directly and 1773 // objects whose address is used in casts and GEPs, though it only makes 1774 // sense for GEPs if the offset is a multiple of the desired alignment and 1775 // if size - offset meets the size threshold. 1776 if (!Arg->getType()->isPointerTy()) 1777 continue; 1778 APInt Offset(DL->getIndexSizeInBits( 1779 cast<PointerType>(Arg->getType())->getAddressSpace()), 1780 0); 1781 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1782 uint64_t Offset2 = Offset.getLimitedValue(); 1783 if ((Offset2 & (PrefAlign-1)) != 0) 1784 continue; 1785 AllocaInst *AI; 1786 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1787 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1788 AI->setAlignment(PrefAlign); 1789 // Global variables can only be aligned if they are defined in this 1790 // object (i.e. they are uniquely initialized in this object), and 1791 // over-aligning global variables that have an explicit section is 1792 // forbidden. 1793 GlobalVariable *GV; 1794 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1795 GV->getPointerAlignment(*DL) < PrefAlign && 1796 DL->getTypeAllocSize(GV->getValueType()) >= 1797 MinSize + Offset2) 1798 GV->setAlignment(PrefAlign); 1799 } 1800 // If this is a memcpy (or similar) then we may be able to improve the 1801 // alignment 1802 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1803 unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); 1804 if (DestAlign > MI->getDestAlignment()) 1805 MI->setDestAlignment(DestAlign); 1806 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 1807 unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 1808 if (SrcAlign > MTI->getSourceAlignment()) 1809 MTI->setSourceAlignment(SrcAlign); 1810 } 1811 } 1812 } 1813 1814 // If we have a cold call site, try to sink addressing computation into the 1815 // cold block. This interacts with our handling for loads and stores to 1816 // ensure that we can fold all uses of a potential addressing computation 1817 // into their uses. TODO: generalize this to work over profiling data 1818 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 1819 for (auto &Arg : CI->arg_operands()) { 1820 if (!Arg->getType()->isPointerTy()) 1821 continue; 1822 unsigned AS = Arg->getType()->getPointerAddressSpace(); 1823 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 1824 } 1825 1826 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1827 if (II) { 1828 switch (II->getIntrinsicID()) { 1829 default: break; 1830 case Intrinsic::experimental_widenable_condition: { 1831 // Give up on future widening oppurtunties so that we can fold away dead 1832 // paths and merge blocks before going into block-local instruction 1833 // selection. 1834 if (II->use_empty()) { 1835 II->eraseFromParent(); 1836 return true; 1837 } 1838 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 1839 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1840 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1841 }); 1842 return true; 1843 } 1844 case Intrinsic::objectsize: { 1845 // Lower all uses of llvm.objectsize.* 1846 Value *RetVal = 1847 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); 1848 1849 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1850 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1851 }); 1852 return true; 1853 } 1854 case Intrinsic::is_constant: { 1855 // If is_constant hasn't folded away yet, lower it to false now. 1856 Constant *RetVal = ConstantInt::get(II->getType(), 0); 1857 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 1858 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1859 }); 1860 return true; 1861 } 1862 case Intrinsic::aarch64_stlxr: 1863 case Intrinsic::aarch64_stxr: { 1864 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1865 if (!ExtVal || !ExtVal->hasOneUse() || 1866 ExtVal->getParent() == CI->getParent()) 1867 return false; 1868 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1869 ExtVal->moveBefore(CI); 1870 // Mark this instruction as "inserted by CGP", so that other 1871 // optimizations don't touch it. 1872 InsertedInsts.insert(ExtVal); 1873 return true; 1874 } 1875 1876 case Intrinsic::launder_invariant_group: 1877 case Intrinsic::strip_invariant_group: { 1878 Value *ArgVal = II->getArgOperand(0); 1879 auto it = LargeOffsetGEPMap.find(II); 1880 if (it != LargeOffsetGEPMap.end()) { 1881 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 1882 // Make sure not to have to deal with iterator invalidation 1883 // after possibly adding ArgVal to LargeOffsetGEPMap. 1884 auto GEPs = std::move(it->second); 1885 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 1886 LargeOffsetGEPMap.erase(II); 1887 } 1888 1889 II->replaceAllUsesWith(ArgVal); 1890 II->eraseFromParent(); 1891 return true; 1892 } 1893 case Intrinsic::cttz: 1894 case Intrinsic::ctlz: 1895 // If counting zeros is expensive, try to avoid it. 1896 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1897 } 1898 1899 if (TLI) { 1900 SmallVector<Value*, 2> PtrOps; 1901 Type *AccessTy; 1902 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 1903 while (!PtrOps.empty()) { 1904 Value *PtrVal = PtrOps.pop_back_val(); 1905 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 1906 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 1907 return true; 1908 } 1909 } 1910 } 1911 1912 // From here on out we're working with named functions. 1913 if (!CI->getCalledFunction()) return false; 1914 1915 // Lower all default uses of _chk calls. This is very similar 1916 // to what InstCombineCalls does, but here we are only lowering calls 1917 // to fortified library functions (e.g. __memcpy_chk) that have the default 1918 // "don't know" as the objectsize. Anything else should be left alone. 1919 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1920 if (Value *V = Simplifier.optimizeCall(CI)) { 1921 CI->replaceAllUsesWith(V); 1922 CI->eraseFromParent(); 1923 return true; 1924 } 1925 1926 return false; 1927 } 1928 1929 /// Look for opportunities to duplicate return instructions to the predecessor 1930 /// to enable tail call optimizations. The case it is currently looking for is: 1931 /// @code 1932 /// bb0: 1933 /// %tmp0 = tail call i32 @f0() 1934 /// br label %return 1935 /// bb1: 1936 /// %tmp1 = tail call i32 @f1() 1937 /// br label %return 1938 /// bb2: 1939 /// %tmp2 = tail call i32 @f2() 1940 /// br label %return 1941 /// return: 1942 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1943 /// ret i32 %retval 1944 /// @endcode 1945 /// 1946 /// => 1947 /// 1948 /// @code 1949 /// bb0: 1950 /// %tmp0 = tail call i32 @f0() 1951 /// ret i32 %tmp0 1952 /// bb1: 1953 /// %tmp1 = tail call i32 @f1() 1954 /// ret i32 %tmp1 1955 /// bb2: 1956 /// %tmp2 = tail call i32 @f2() 1957 /// ret i32 %tmp2 1958 /// @endcode 1959 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1960 if (!TLI) 1961 return false; 1962 1963 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 1964 if (!RetI) 1965 return false; 1966 1967 PHINode *PN = nullptr; 1968 BitCastInst *BCI = nullptr; 1969 Value *V = RetI->getReturnValue(); 1970 if (V) { 1971 BCI = dyn_cast<BitCastInst>(V); 1972 if (BCI) 1973 V = BCI->getOperand(0); 1974 1975 PN = dyn_cast<PHINode>(V); 1976 if (!PN) 1977 return false; 1978 } 1979 1980 if (PN && PN->getParent() != BB) 1981 return false; 1982 1983 // Make sure there are no instructions between the PHI and return, or that the 1984 // return is the first instruction in the block. 1985 if (PN) { 1986 BasicBlock::iterator BI = BB->begin(); 1987 // Skip over debug and the bitcast. 1988 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI); 1989 if (&*BI != RetI) 1990 return false; 1991 } else { 1992 BasicBlock::iterator BI = BB->begin(); 1993 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1994 if (&*BI != RetI) 1995 return false; 1996 } 1997 1998 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1999 /// call. 2000 const Function *F = BB->getParent(); 2001 SmallVector<CallInst*, 4> TailCalls; 2002 if (PN) { 2003 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2004 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 2005 // Make sure the phi value is indeed produced by the tail call. 2006 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 2007 TLI->mayBeEmittedAsTailCall(CI) && 2008 attributesPermitTailCall(F, CI, RetI, *TLI)) 2009 TailCalls.push_back(CI); 2010 } 2011 } else { 2012 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2013 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2014 if (!VisitedBBs.insert(*PI).second) 2015 continue; 2016 2017 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2018 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2019 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2020 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2021 if (RI == RE) 2022 continue; 2023 2024 CallInst *CI = dyn_cast<CallInst>(&*RI); 2025 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2026 attributesPermitTailCall(F, CI, RetI, *TLI)) 2027 TailCalls.push_back(CI); 2028 } 2029 } 2030 2031 bool Changed = false; 2032 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 2033 CallInst *CI = TailCalls[i]; 2034 CallSite CS(CI); 2035 2036 // Make sure the call instruction is followed by an unconditional branch to 2037 // the return block. 2038 BasicBlock *CallBB = CI->getParent(); 2039 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2040 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2041 continue; 2042 2043 // Duplicate the return into CallBB. 2044 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); 2045 ModifiedDT = Changed = true; 2046 ++NumRetsDup; 2047 } 2048 2049 // If we eliminated all predecessors of the block, delete the block now. 2050 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2051 BB->eraseFromParent(); 2052 2053 return Changed; 2054 } 2055 2056 //===----------------------------------------------------------------------===// 2057 // Memory Optimization 2058 //===----------------------------------------------------------------------===// 2059 2060 namespace { 2061 2062 /// This is an extended version of TargetLowering::AddrMode 2063 /// which holds actual Value*'s for register values. 2064 struct ExtAddrMode : public TargetLowering::AddrMode { 2065 Value *BaseReg = nullptr; 2066 Value *ScaledReg = nullptr; 2067 Value *OriginalValue = nullptr; 2068 2069 enum FieldName { 2070 NoField = 0x00, 2071 BaseRegField = 0x01, 2072 BaseGVField = 0x02, 2073 BaseOffsField = 0x04, 2074 ScaledRegField = 0x08, 2075 ScaleField = 0x10, 2076 MultipleFields = 0xff 2077 }; 2078 2079 ExtAddrMode() = default; 2080 2081 void print(raw_ostream &OS) const; 2082 void dump() const; 2083 2084 FieldName compare(const ExtAddrMode &other) { 2085 // First check that the types are the same on each field, as differing types 2086 // is something we can't cope with later on. 2087 if (BaseReg && other.BaseReg && 2088 BaseReg->getType() != other.BaseReg->getType()) 2089 return MultipleFields; 2090 if (BaseGV && other.BaseGV && 2091 BaseGV->getType() != other.BaseGV->getType()) 2092 return MultipleFields; 2093 if (ScaledReg && other.ScaledReg && 2094 ScaledReg->getType() != other.ScaledReg->getType()) 2095 return MultipleFields; 2096 2097 // Check each field to see if it differs. 2098 unsigned Result = NoField; 2099 if (BaseReg != other.BaseReg) 2100 Result |= BaseRegField; 2101 if (BaseGV != other.BaseGV) 2102 Result |= BaseGVField; 2103 if (BaseOffs != other.BaseOffs) 2104 Result |= BaseOffsField; 2105 if (ScaledReg != other.ScaledReg) 2106 Result |= ScaledRegField; 2107 // Don't count 0 as being a different scale, because that actually means 2108 // unscaled (which will already be counted by having no ScaledReg). 2109 if (Scale && other.Scale && Scale != other.Scale) 2110 Result |= ScaleField; 2111 2112 if (countPopulation(Result) > 1) 2113 return MultipleFields; 2114 else 2115 return static_cast<FieldName>(Result); 2116 } 2117 2118 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2119 // with no offset. 2120 bool isTrivial() { 2121 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2122 // trivial if at most one of these terms is nonzero, except that BaseGV and 2123 // BaseReg both being zero actually means a null pointer value, which we 2124 // consider to be 'non-zero' here. 2125 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2126 } 2127 2128 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2129 switch (Field) { 2130 default: 2131 return nullptr; 2132 case BaseRegField: 2133 return BaseReg; 2134 case BaseGVField: 2135 return BaseGV; 2136 case ScaledRegField: 2137 return ScaledReg; 2138 case BaseOffsField: 2139 return ConstantInt::get(IntPtrTy, BaseOffs); 2140 } 2141 } 2142 2143 void SetCombinedField(FieldName Field, Value *V, 2144 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2145 switch (Field) { 2146 default: 2147 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2148 break; 2149 case ExtAddrMode::BaseRegField: 2150 BaseReg = V; 2151 break; 2152 case ExtAddrMode::BaseGVField: 2153 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2154 // in the BaseReg field. 2155 assert(BaseReg == nullptr); 2156 BaseReg = V; 2157 BaseGV = nullptr; 2158 break; 2159 case ExtAddrMode::ScaledRegField: 2160 ScaledReg = V; 2161 // If we have a mix of scaled and unscaled addrmodes then we want scale 2162 // to be the scale and not zero. 2163 if (!Scale) 2164 for (const ExtAddrMode &AM : AddrModes) 2165 if (AM.Scale) { 2166 Scale = AM.Scale; 2167 break; 2168 } 2169 break; 2170 case ExtAddrMode::BaseOffsField: 2171 // The offset is no longer a constant, so it goes in ScaledReg with a 2172 // scale of 1. 2173 assert(ScaledReg == nullptr); 2174 ScaledReg = V; 2175 Scale = 1; 2176 BaseOffs = 0; 2177 break; 2178 } 2179 } 2180 }; 2181 2182 } // end anonymous namespace 2183 2184 #ifndef NDEBUG 2185 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2186 AM.print(OS); 2187 return OS; 2188 } 2189 #endif 2190 2191 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2192 void ExtAddrMode::print(raw_ostream &OS) const { 2193 bool NeedPlus = false; 2194 OS << "["; 2195 if (BaseGV) { 2196 OS << (NeedPlus ? " + " : "") 2197 << "GV:"; 2198 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2199 NeedPlus = true; 2200 } 2201 2202 if (BaseOffs) { 2203 OS << (NeedPlus ? " + " : "") 2204 << BaseOffs; 2205 NeedPlus = true; 2206 } 2207 2208 if (BaseReg) { 2209 OS << (NeedPlus ? " + " : "") 2210 << "Base:"; 2211 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2212 NeedPlus = true; 2213 } 2214 if (Scale) { 2215 OS << (NeedPlus ? " + " : "") 2216 << Scale << "*"; 2217 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2218 } 2219 2220 OS << ']'; 2221 } 2222 2223 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2224 print(dbgs()); 2225 dbgs() << '\n'; 2226 } 2227 #endif 2228 2229 namespace { 2230 2231 /// This class provides transaction based operation on the IR. 2232 /// Every change made through this class is recorded in the internal state and 2233 /// can be undone (rollback) until commit is called. 2234 class TypePromotionTransaction { 2235 /// This represents the common interface of the individual transaction. 2236 /// Each class implements the logic for doing one specific modification on 2237 /// the IR via the TypePromotionTransaction. 2238 class TypePromotionAction { 2239 protected: 2240 /// The Instruction modified. 2241 Instruction *Inst; 2242 2243 public: 2244 /// Constructor of the action. 2245 /// The constructor performs the related action on the IR. 2246 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2247 2248 virtual ~TypePromotionAction() = default; 2249 2250 /// Undo the modification done by this action. 2251 /// When this method is called, the IR must be in the same state as it was 2252 /// before this action was applied. 2253 /// \pre Undoing the action works if and only if the IR is in the exact same 2254 /// state as it was directly after this action was applied. 2255 virtual void undo() = 0; 2256 2257 /// Advocate every change made by this action. 2258 /// When the results on the IR of the action are to be kept, it is important 2259 /// to call this function, otherwise hidden information may be kept forever. 2260 virtual void commit() { 2261 // Nothing to be done, this action is not doing anything. 2262 } 2263 }; 2264 2265 /// Utility to remember the position of an instruction. 2266 class InsertionHandler { 2267 /// Position of an instruction. 2268 /// Either an instruction: 2269 /// - Is the first in a basic block: BB is used. 2270 /// - Has a previous instruction: PrevInst is used. 2271 union { 2272 Instruction *PrevInst; 2273 BasicBlock *BB; 2274 } Point; 2275 2276 /// Remember whether or not the instruction had a previous instruction. 2277 bool HasPrevInstruction; 2278 2279 public: 2280 /// Record the position of \p Inst. 2281 InsertionHandler(Instruction *Inst) { 2282 BasicBlock::iterator It = Inst->getIterator(); 2283 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2284 if (HasPrevInstruction) 2285 Point.PrevInst = &*--It; 2286 else 2287 Point.BB = Inst->getParent(); 2288 } 2289 2290 /// Insert \p Inst at the recorded position. 2291 void insert(Instruction *Inst) { 2292 if (HasPrevInstruction) { 2293 if (Inst->getParent()) 2294 Inst->removeFromParent(); 2295 Inst->insertAfter(Point.PrevInst); 2296 } else { 2297 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2298 if (Inst->getParent()) 2299 Inst->moveBefore(Position); 2300 else 2301 Inst->insertBefore(Position); 2302 } 2303 } 2304 }; 2305 2306 /// Move an instruction before another. 2307 class InstructionMoveBefore : public TypePromotionAction { 2308 /// Original position of the instruction. 2309 InsertionHandler Position; 2310 2311 public: 2312 /// Move \p Inst before \p Before. 2313 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2314 : TypePromotionAction(Inst), Position(Inst) { 2315 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2316 << "\n"); 2317 Inst->moveBefore(Before); 2318 } 2319 2320 /// Move the instruction back to its original position. 2321 void undo() override { 2322 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2323 Position.insert(Inst); 2324 } 2325 }; 2326 2327 /// Set the operand of an instruction with a new value. 2328 class OperandSetter : public TypePromotionAction { 2329 /// Original operand of the instruction. 2330 Value *Origin; 2331 2332 /// Index of the modified instruction. 2333 unsigned Idx; 2334 2335 public: 2336 /// Set \p Idx operand of \p Inst with \p NewVal. 2337 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2338 : TypePromotionAction(Inst), Idx(Idx) { 2339 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2340 << "for:" << *Inst << "\n" 2341 << "with:" << *NewVal << "\n"); 2342 Origin = Inst->getOperand(Idx); 2343 Inst->setOperand(Idx, NewVal); 2344 } 2345 2346 /// Restore the original value of the instruction. 2347 void undo() override { 2348 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2349 << "for: " << *Inst << "\n" 2350 << "with: " << *Origin << "\n"); 2351 Inst->setOperand(Idx, Origin); 2352 } 2353 }; 2354 2355 /// Hide the operands of an instruction. 2356 /// Do as if this instruction was not using any of its operands. 2357 class OperandsHider : public TypePromotionAction { 2358 /// The list of original operands. 2359 SmallVector<Value *, 4> OriginalValues; 2360 2361 public: 2362 /// Remove \p Inst from the uses of the operands of \p Inst. 2363 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2364 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2365 unsigned NumOpnds = Inst->getNumOperands(); 2366 OriginalValues.reserve(NumOpnds); 2367 for (unsigned It = 0; It < NumOpnds; ++It) { 2368 // Save the current operand. 2369 Value *Val = Inst->getOperand(It); 2370 OriginalValues.push_back(Val); 2371 // Set a dummy one. 2372 // We could use OperandSetter here, but that would imply an overhead 2373 // that we are not willing to pay. 2374 Inst->setOperand(It, UndefValue::get(Val->getType())); 2375 } 2376 } 2377 2378 /// Restore the original list of uses. 2379 void undo() override { 2380 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2381 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2382 Inst->setOperand(It, OriginalValues[It]); 2383 } 2384 }; 2385 2386 /// Build a truncate instruction. 2387 class TruncBuilder : public TypePromotionAction { 2388 Value *Val; 2389 2390 public: 2391 /// Build a truncate instruction of \p Opnd producing a \p Ty 2392 /// result. 2393 /// trunc Opnd to Ty. 2394 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2395 IRBuilder<> Builder(Opnd); 2396 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2397 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2398 } 2399 2400 /// Get the built value. 2401 Value *getBuiltValue() { return Val; } 2402 2403 /// Remove the built instruction. 2404 void undo() override { 2405 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2406 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2407 IVal->eraseFromParent(); 2408 } 2409 }; 2410 2411 /// Build a sign extension instruction. 2412 class SExtBuilder : public TypePromotionAction { 2413 Value *Val; 2414 2415 public: 2416 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2417 /// result. 2418 /// sext Opnd to Ty. 2419 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2420 : TypePromotionAction(InsertPt) { 2421 IRBuilder<> Builder(InsertPt); 2422 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2423 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2424 } 2425 2426 /// Get the built value. 2427 Value *getBuiltValue() { return Val; } 2428 2429 /// Remove the built instruction. 2430 void undo() override { 2431 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2432 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2433 IVal->eraseFromParent(); 2434 } 2435 }; 2436 2437 /// Build a zero extension instruction. 2438 class ZExtBuilder : public TypePromotionAction { 2439 Value *Val; 2440 2441 public: 2442 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2443 /// result. 2444 /// zext Opnd to Ty. 2445 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2446 : TypePromotionAction(InsertPt) { 2447 IRBuilder<> Builder(InsertPt); 2448 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2449 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2450 } 2451 2452 /// Get the built value. 2453 Value *getBuiltValue() { return Val; } 2454 2455 /// Remove the built instruction. 2456 void undo() override { 2457 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2458 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2459 IVal->eraseFromParent(); 2460 } 2461 }; 2462 2463 /// Mutate an instruction to another type. 2464 class TypeMutator : public TypePromotionAction { 2465 /// Record the original type. 2466 Type *OrigTy; 2467 2468 public: 2469 /// Mutate the type of \p Inst into \p NewTy. 2470 TypeMutator(Instruction *Inst, Type *NewTy) 2471 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2472 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2473 << "\n"); 2474 Inst->mutateType(NewTy); 2475 } 2476 2477 /// Mutate the instruction back to its original type. 2478 void undo() override { 2479 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2480 << "\n"); 2481 Inst->mutateType(OrigTy); 2482 } 2483 }; 2484 2485 /// Replace the uses of an instruction by another instruction. 2486 class UsesReplacer : public TypePromotionAction { 2487 /// Helper structure to keep track of the replaced uses. 2488 struct InstructionAndIdx { 2489 /// The instruction using the instruction. 2490 Instruction *Inst; 2491 2492 /// The index where this instruction is used for Inst. 2493 unsigned Idx; 2494 2495 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2496 : Inst(Inst), Idx(Idx) {} 2497 }; 2498 2499 /// Keep track of the original uses (pair Instruction, Index). 2500 SmallVector<InstructionAndIdx, 4> OriginalUses; 2501 /// Keep track of the debug users. 2502 SmallVector<DbgValueInst *, 1> DbgValues; 2503 2504 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2505 2506 public: 2507 /// Replace all the use of \p Inst by \p New. 2508 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2509 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2510 << "\n"); 2511 // Record the original uses. 2512 for (Use &U : Inst->uses()) { 2513 Instruction *UserI = cast<Instruction>(U.getUser()); 2514 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2515 } 2516 // Record the debug uses separately. They are not in the instruction's 2517 // use list, but they are replaced by RAUW. 2518 findDbgValues(DbgValues, Inst); 2519 2520 // Now, we can replace the uses. 2521 Inst->replaceAllUsesWith(New); 2522 } 2523 2524 /// Reassign the original uses of Inst to Inst. 2525 void undo() override { 2526 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2527 for (use_iterator UseIt = OriginalUses.begin(), 2528 EndIt = OriginalUses.end(); 2529 UseIt != EndIt; ++UseIt) { 2530 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2531 } 2532 // RAUW has replaced all original uses with references to the new value, 2533 // including the debug uses. Since we are undoing the replacements, 2534 // the original debug uses must also be reinstated to maintain the 2535 // correctness and utility of debug value instructions. 2536 for (auto *DVI: DbgValues) { 2537 LLVMContext &Ctx = Inst->getType()->getContext(); 2538 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst)); 2539 DVI->setOperand(0, MV); 2540 } 2541 } 2542 }; 2543 2544 /// Remove an instruction from the IR. 2545 class InstructionRemover : public TypePromotionAction { 2546 /// Original position of the instruction. 2547 InsertionHandler Inserter; 2548 2549 /// Helper structure to hide all the link to the instruction. In other 2550 /// words, this helps to do as if the instruction was removed. 2551 OperandsHider Hider; 2552 2553 /// Keep track of the uses replaced, if any. 2554 UsesReplacer *Replacer = nullptr; 2555 2556 /// Keep track of instructions removed. 2557 SetOfInstrs &RemovedInsts; 2558 2559 public: 2560 /// Remove all reference of \p Inst and optionally replace all its 2561 /// uses with New. 2562 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2563 /// \pre If !Inst->use_empty(), then New != nullptr 2564 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2565 Value *New = nullptr) 2566 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2567 RemovedInsts(RemovedInsts) { 2568 if (New) 2569 Replacer = new UsesReplacer(Inst, New); 2570 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2571 RemovedInsts.insert(Inst); 2572 /// The instructions removed here will be freed after completing 2573 /// optimizeBlock() for all blocks as we need to keep track of the 2574 /// removed instructions during promotion. 2575 Inst->removeFromParent(); 2576 } 2577 2578 ~InstructionRemover() override { delete Replacer; } 2579 2580 /// Resurrect the instruction and reassign it to the proper uses if 2581 /// new value was provided when build this action. 2582 void undo() override { 2583 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2584 Inserter.insert(Inst); 2585 if (Replacer) 2586 Replacer->undo(); 2587 Hider.undo(); 2588 RemovedInsts.erase(Inst); 2589 } 2590 }; 2591 2592 public: 2593 /// Restoration point. 2594 /// The restoration point is a pointer to an action instead of an iterator 2595 /// because the iterator may be invalidated but not the pointer. 2596 using ConstRestorationPt = const TypePromotionAction *; 2597 2598 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2599 : RemovedInsts(RemovedInsts) {} 2600 2601 /// Advocate every changes made in that transaction. 2602 void commit(); 2603 2604 /// Undo all the changes made after the given point. 2605 void rollback(ConstRestorationPt Point); 2606 2607 /// Get the current restoration point. 2608 ConstRestorationPt getRestorationPoint() const; 2609 2610 /// \name API for IR modification with state keeping to support rollback. 2611 /// @{ 2612 /// Same as Instruction::setOperand. 2613 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2614 2615 /// Same as Instruction::eraseFromParent. 2616 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2617 2618 /// Same as Value::replaceAllUsesWith. 2619 void replaceAllUsesWith(Instruction *Inst, Value *New); 2620 2621 /// Same as Value::mutateType. 2622 void mutateType(Instruction *Inst, Type *NewTy); 2623 2624 /// Same as IRBuilder::createTrunc. 2625 Value *createTrunc(Instruction *Opnd, Type *Ty); 2626 2627 /// Same as IRBuilder::createSExt. 2628 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2629 2630 /// Same as IRBuilder::createZExt. 2631 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2632 2633 /// Same as Instruction::moveBefore. 2634 void moveBefore(Instruction *Inst, Instruction *Before); 2635 /// @} 2636 2637 private: 2638 /// The ordered list of actions made so far. 2639 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2640 2641 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2642 2643 SetOfInstrs &RemovedInsts; 2644 }; 2645 2646 } // end anonymous namespace 2647 2648 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2649 Value *NewVal) { 2650 Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( 2651 Inst, Idx, NewVal)); 2652 } 2653 2654 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2655 Value *NewVal) { 2656 Actions.push_back( 2657 llvm::make_unique<TypePromotionTransaction::InstructionRemover>( 2658 Inst, RemovedInsts, NewVal)); 2659 } 2660 2661 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2662 Value *New) { 2663 Actions.push_back( 2664 llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2665 } 2666 2667 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2668 Actions.push_back( 2669 llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2670 } 2671 2672 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2673 Type *Ty) { 2674 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2675 Value *Val = Ptr->getBuiltValue(); 2676 Actions.push_back(std::move(Ptr)); 2677 return Val; 2678 } 2679 2680 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2681 Value *Opnd, Type *Ty) { 2682 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2683 Value *Val = Ptr->getBuiltValue(); 2684 Actions.push_back(std::move(Ptr)); 2685 return Val; 2686 } 2687 2688 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2689 Value *Opnd, Type *Ty) { 2690 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2691 Value *Val = Ptr->getBuiltValue(); 2692 Actions.push_back(std::move(Ptr)); 2693 return Val; 2694 } 2695 2696 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2697 Instruction *Before) { 2698 Actions.push_back( 2699 llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 2700 Inst, Before)); 2701 } 2702 2703 TypePromotionTransaction::ConstRestorationPt 2704 TypePromotionTransaction::getRestorationPoint() const { 2705 return !Actions.empty() ? Actions.back().get() : nullptr; 2706 } 2707 2708 void TypePromotionTransaction::commit() { 2709 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2710 ++It) 2711 (*It)->commit(); 2712 Actions.clear(); 2713 } 2714 2715 void TypePromotionTransaction::rollback( 2716 TypePromotionTransaction::ConstRestorationPt Point) { 2717 while (!Actions.empty() && Point != Actions.back().get()) { 2718 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2719 Curr->undo(); 2720 } 2721 } 2722 2723 namespace { 2724 2725 /// A helper class for matching addressing modes. 2726 /// 2727 /// This encapsulates the logic for matching the target-legal addressing modes. 2728 class AddressingModeMatcher { 2729 SmallVectorImpl<Instruction*> &AddrModeInsts; 2730 const TargetLowering &TLI; 2731 const TargetRegisterInfo &TRI; 2732 const DataLayout &DL; 2733 2734 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2735 /// the memory instruction that we're computing this address for. 2736 Type *AccessTy; 2737 unsigned AddrSpace; 2738 Instruction *MemoryInst; 2739 2740 /// This is the addressing mode that we're building up. This is 2741 /// part of the return value of this addressing mode matching stuff. 2742 ExtAddrMode &AddrMode; 2743 2744 /// The instructions inserted by other CodeGenPrepare optimizations. 2745 const SetOfInstrs &InsertedInsts; 2746 2747 /// A map from the instructions to their type before promotion. 2748 InstrToOrigTy &PromotedInsts; 2749 2750 /// The ongoing transaction where every action should be registered. 2751 TypePromotionTransaction &TPT; 2752 2753 // A GEP which has too large offset to be folded into the addressing mode. 2754 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 2755 2756 /// This is set to true when we should not do profitability checks. 2757 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2758 bool IgnoreProfitability; 2759 2760 AddressingModeMatcher( 2761 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 2762 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, 2763 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, 2764 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 2765 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) 2766 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 2767 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2768 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2769 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP) { 2770 IgnoreProfitability = false; 2771 } 2772 2773 public: 2774 /// Find the maximal addressing mode that a load/store of V can fold, 2775 /// give an access type of AccessTy. This returns a list of involved 2776 /// instructions in AddrModeInsts. 2777 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2778 /// optimizations. 2779 /// \p PromotedInsts maps the instructions to their type before promotion. 2780 /// \p The ongoing transaction where every action should be registered. 2781 static ExtAddrMode 2782 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 2783 SmallVectorImpl<Instruction *> &AddrModeInsts, 2784 const TargetLowering &TLI, const TargetRegisterInfo &TRI, 2785 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 2786 TypePromotionTransaction &TPT, 2787 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) { 2788 ExtAddrMode Result; 2789 2790 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, 2791 MemoryInst, Result, InsertedInsts, 2792 PromotedInsts, TPT, LargeOffsetGEP) 2793 .matchAddr(V, 0); 2794 (void)Success; assert(Success && "Couldn't select *anything*?"); 2795 return Result; 2796 } 2797 2798 private: 2799 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2800 bool matchAddr(Value *Addr, unsigned Depth); 2801 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 2802 bool *MovedAway = nullptr); 2803 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2804 ExtAddrMode &AMBefore, 2805 ExtAddrMode &AMAfter); 2806 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2807 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2808 Value *PromotedOperand) const; 2809 }; 2810 2811 class PhiNodeSet; 2812 2813 /// An iterator for PhiNodeSet. 2814 class PhiNodeSetIterator { 2815 PhiNodeSet * const Set; 2816 size_t CurrentIndex = 0; 2817 2818 public: 2819 /// The constructor. Start should point to either a valid element, or be equal 2820 /// to the size of the underlying SmallVector of the PhiNodeSet. 2821 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 2822 PHINode * operator*() const; 2823 PhiNodeSetIterator& operator++(); 2824 bool operator==(const PhiNodeSetIterator &RHS) const; 2825 bool operator!=(const PhiNodeSetIterator &RHS) const; 2826 }; 2827 2828 /// Keeps a set of PHINodes. 2829 /// 2830 /// This is a minimal set implementation for a specific use case: 2831 /// It is very fast when there are very few elements, but also provides good 2832 /// performance when there are many. It is similar to SmallPtrSet, but also 2833 /// provides iteration by insertion order, which is deterministic and stable 2834 /// across runs. It is also similar to SmallSetVector, but provides removing 2835 /// elements in O(1) time. This is achieved by not actually removing the element 2836 /// from the underlying vector, so comes at the cost of using more memory, but 2837 /// that is fine, since PhiNodeSets are used as short lived objects. 2838 class PhiNodeSet { 2839 friend class PhiNodeSetIterator; 2840 2841 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 2842 using iterator = PhiNodeSetIterator; 2843 2844 /// Keeps the elements in the order of their insertion in the underlying 2845 /// vector. To achieve constant time removal, it never deletes any element. 2846 SmallVector<PHINode *, 32> NodeList; 2847 2848 /// Keeps the elements in the underlying set implementation. This (and not the 2849 /// NodeList defined above) is the source of truth on whether an element 2850 /// is actually in the collection. 2851 MapType NodeMap; 2852 2853 /// Points to the first valid (not deleted) element when the set is not empty 2854 /// and the value is not zero. Equals to the size of the underlying vector 2855 /// when the set is empty. When the value is 0, as in the beginning, the 2856 /// first element may or may not be valid. 2857 size_t FirstValidElement = 0; 2858 2859 public: 2860 /// Inserts a new element to the collection. 2861 /// \returns true if the element is actually added, i.e. was not in the 2862 /// collection before the operation. 2863 bool insert(PHINode *Ptr) { 2864 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 2865 NodeList.push_back(Ptr); 2866 return true; 2867 } 2868 return false; 2869 } 2870 2871 /// Removes the element from the collection. 2872 /// \returns whether the element is actually removed, i.e. was in the 2873 /// collection before the operation. 2874 bool erase(PHINode *Ptr) { 2875 auto it = NodeMap.find(Ptr); 2876 if (it != NodeMap.end()) { 2877 NodeMap.erase(Ptr); 2878 SkipRemovedElements(FirstValidElement); 2879 return true; 2880 } 2881 return false; 2882 } 2883 2884 /// Removes all elements and clears the collection. 2885 void clear() { 2886 NodeMap.clear(); 2887 NodeList.clear(); 2888 FirstValidElement = 0; 2889 } 2890 2891 /// \returns an iterator that will iterate the elements in the order of 2892 /// insertion. 2893 iterator begin() { 2894 if (FirstValidElement == 0) 2895 SkipRemovedElements(FirstValidElement); 2896 return PhiNodeSetIterator(this, FirstValidElement); 2897 } 2898 2899 /// \returns an iterator that points to the end of the collection. 2900 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 2901 2902 /// Returns the number of elements in the collection. 2903 size_t size() const { 2904 return NodeMap.size(); 2905 } 2906 2907 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 2908 size_t count(PHINode *Ptr) const { 2909 return NodeMap.count(Ptr); 2910 } 2911 2912 private: 2913 /// Updates the CurrentIndex so that it will point to a valid element. 2914 /// 2915 /// If the element of NodeList at CurrentIndex is valid, it does not 2916 /// change it. If there are no more valid elements, it updates CurrentIndex 2917 /// to point to the end of the NodeList. 2918 void SkipRemovedElements(size_t &CurrentIndex) { 2919 while (CurrentIndex < NodeList.size()) { 2920 auto it = NodeMap.find(NodeList[CurrentIndex]); 2921 // If the element has been deleted and added again later, NodeMap will 2922 // point to a different index, so CurrentIndex will still be invalid. 2923 if (it != NodeMap.end() && it->second == CurrentIndex) 2924 break; 2925 ++CurrentIndex; 2926 } 2927 } 2928 }; 2929 2930 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 2931 : Set(Set), CurrentIndex(Start) {} 2932 2933 PHINode * PhiNodeSetIterator::operator*() const { 2934 assert(CurrentIndex < Set->NodeList.size() && 2935 "PhiNodeSet access out of range"); 2936 return Set->NodeList[CurrentIndex]; 2937 } 2938 2939 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 2940 assert(CurrentIndex < Set->NodeList.size() && 2941 "PhiNodeSet access out of range"); 2942 ++CurrentIndex; 2943 Set->SkipRemovedElements(CurrentIndex); 2944 return *this; 2945 } 2946 2947 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 2948 return CurrentIndex == RHS.CurrentIndex; 2949 } 2950 2951 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 2952 return !((*this) == RHS); 2953 } 2954 2955 /// Keep track of simplification of Phi nodes. 2956 /// Accept the set of all phi nodes and erase phi node from this set 2957 /// if it is simplified. 2958 class SimplificationTracker { 2959 DenseMap<Value *, Value *> Storage; 2960 const SimplifyQuery &SQ; 2961 // Tracks newly created Phi nodes. The elements are iterated by insertion 2962 // order. 2963 PhiNodeSet AllPhiNodes; 2964 // Tracks newly created Select nodes. 2965 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 2966 2967 public: 2968 SimplificationTracker(const SimplifyQuery &sq) 2969 : SQ(sq) {} 2970 2971 Value *Get(Value *V) { 2972 do { 2973 auto SV = Storage.find(V); 2974 if (SV == Storage.end()) 2975 return V; 2976 V = SV->second; 2977 } while (true); 2978 } 2979 2980 Value *Simplify(Value *Val) { 2981 SmallVector<Value *, 32> WorkList; 2982 SmallPtrSet<Value *, 32> Visited; 2983 WorkList.push_back(Val); 2984 while (!WorkList.empty()) { 2985 auto P = WorkList.pop_back_val(); 2986 if (!Visited.insert(P).second) 2987 continue; 2988 if (auto *PI = dyn_cast<Instruction>(P)) 2989 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 2990 for (auto *U : PI->users()) 2991 WorkList.push_back(cast<Value>(U)); 2992 Put(PI, V); 2993 PI->replaceAllUsesWith(V); 2994 if (auto *PHI = dyn_cast<PHINode>(PI)) 2995 AllPhiNodes.erase(PHI); 2996 if (auto *Select = dyn_cast<SelectInst>(PI)) 2997 AllSelectNodes.erase(Select); 2998 PI->eraseFromParent(); 2999 } 3000 } 3001 return Get(Val); 3002 } 3003 3004 void Put(Value *From, Value *To) { 3005 Storage.insert({ From, To }); 3006 } 3007 3008 void ReplacePhi(PHINode *From, PHINode *To) { 3009 Value* OldReplacement = Get(From); 3010 while (OldReplacement != From) { 3011 From = To; 3012 To = dyn_cast<PHINode>(OldReplacement); 3013 OldReplacement = Get(From); 3014 } 3015 assert(Get(To) == To && "Replacement PHI node is already replaced."); 3016 Put(From, To); 3017 From->replaceAllUsesWith(To); 3018 AllPhiNodes.erase(From); 3019 From->eraseFromParent(); 3020 } 3021 3022 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 3023 3024 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 3025 3026 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 3027 3028 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 3029 3030 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 3031 3032 void destroyNewNodes(Type *CommonType) { 3033 // For safe erasing, replace the uses with dummy value first. 3034 auto Dummy = UndefValue::get(CommonType); 3035 for (auto I : AllPhiNodes) { 3036 I->replaceAllUsesWith(Dummy); 3037 I->eraseFromParent(); 3038 } 3039 AllPhiNodes.clear(); 3040 for (auto I : AllSelectNodes) { 3041 I->replaceAllUsesWith(Dummy); 3042 I->eraseFromParent(); 3043 } 3044 AllSelectNodes.clear(); 3045 } 3046 }; 3047 3048 /// A helper class for combining addressing modes. 3049 class AddressingModeCombiner { 3050 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3051 typedef std::pair<PHINode *, PHINode *> PHIPair; 3052 3053 private: 3054 /// The addressing modes we've collected. 3055 SmallVector<ExtAddrMode, 16> AddrModes; 3056 3057 /// The field in which the AddrModes differ, when we have more than one. 3058 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3059 3060 /// Are the AddrModes that we have all just equal to their original values? 3061 bool AllAddrModesTrivial = true; 3062 3063 /// Common Type for all different fields in addressing modes. 3064 Type *CommonType; 3065 3066 /// SimplifyQuery for simplifyInstruction utility. 3067 const SimplifyQuery &SQ; 3068 3069 /// Original Address. 3070 Value *Original; 3071 3072 public: 3073 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3074 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3075 3076 /// Get the combined AddrMode 3077 const ExtAddrMode &getAddrMode() const { 3078 return AddrModes[0]; 3079 } 3080 3081 /// Add a new AddrMode if it's compatible with the AddrModes we already 3082 /// have. 3083 /// \return True iff we succeeded in doing so. 3084 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3085 // Take note of if we have any non-trivial AddrModes, as we need to detect 3086 // when all AddrModes are trivial as then we would introduce a phi or select 3087 // which just duplicates what's already there. 3088 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3089 3090 // If this is the first addrmode then everything is fine. 3091 if (AddrModes.empty()) { 3092 AddrModes.emplace_back(NewAddrMode); 3093 return true; 3094 } 3095 3096 // Figure out how different this is from the other address modes, which we 3097 // can do just by comparing against the first one given that we only care 3098 // about the cumulative difference. 3099 ExtAddrMode::FieldName ThisDifferentField = 3100 AddrModes[0].compare(NewAddrMode); 3101 if (DifferentField == ExtAddrMode::NoField) 3102 DifferentField = ThisDifferentField; 3103 else if (DifferentField != ThisDifferentField) 3104 DifferentField = ExtAddrMode::MultipleFields; 3105 3106 // If NewAddrMode differs in more than one dimension we cannot handle it. 3107 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3108 3109 // If Scale Field is different then we reject. 3110 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3111 3112 // We also must reject the case when base offset is different and 3113 // scale reg is not null, we cannot handle this case due to merge of 3114 // different offsets will be used as ScaleReg. 3115 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3116 !NewAddrMode.ScaledReg); 3117 3118 // We also must reject the case when GV is different and BaseReg installed 3119 // due to we want to use base reg as a merge of GV values. 3120 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3121 !NewAddrMode.HasBaseReg); 3122 3123 // Even if NewAddMode is the same we still need to collect it due to 3124 // original value is different. And later we will need all original values 3125 // as anchors during finding the common Phi node. 3126 if (CanHandle) 3127 AddrModes.emplace_back(NewAddrMode); 3128 else 3129 AddrModes.clear(); 3130 3131 return CanHandle; 3132 } 3133 3134 /// Combine the addressing modes we've collected into a single 3135 /// addressing mode. 3136 /// \return True iff we successfully combined them or we only had one so 3137 /// didn't need to combine them anyway. 3138 bool combineAddrModes() { 3139 // If we have no AddrModes then they can't be combined. 3140 if (AddrModes.size() == 0) 3141 return false; 3142 3143 // A single AddrMode can trivially be combined. 3144 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3145 return true; 3146 3147 // If the AddrModes we collected are all just equal to the value they are 3148 // derived from then combining them wouldn't do anything useful. 3149 if (AllAddrModesTrivial) 3150 return false; 3151 3152 if (!addrModeCombiningAllowed()) 3153 return false; 3154 3155 // Build a map between <original value, basic block where we saw it> to 3156 // value of base register. 3157 // Bail out if there is no common type. 3158 FoldAddrToValueMapping Map; 3159 if (!initializeMap(Map)) 3160 return false; 3161 3162 Value *CommonValue = findCommon(Map); 3163 if (CommonValue) 3164 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3165 return CommonValue != nullptr; 3166 } 3167 3168 private: 3169 /// Initialize Map with anchor values. For address seen 3170 /// we set the value of different field saw in this address. 3171 /// At the same time we find a common type for different field we will 3172 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3173 /// Return false if there is no common type found. 3174 bool initializeMap(FoldAddrToValueMapping &Map) { 3175 // Keep track of keys where the value is null. We will need to replace it 3176 // with constant null when we know the common type. 3177 SmallVector<Value *, 2> NullValue; 3178 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3179 for (auto &AM : AddrModes) { 3180 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3181 if (DV) { 3182 auto *Type = DV->getType(); 3183 if (CommonType && CommonType != Type) 3184 return false; 3185 CommonType = Type; 3186 Map[AM.OriginalValue] = DV; 3187 } else { 3188 NullValue.push_back(AM.OriginalValue); 3189 } 3190 } 3191 assert(CommonType && "At least one non-null value must be!"); 3192 for (auto *V : NullValue) 3193 Map[V] = Constant::getNullValue(CommonType); 3194 return true; 3195 } 3196 3197 /// We have mapping between value A and other value B where B was a field in 3198 /// addressing mode represented by A. Also we have an original value C 3199 /// representing an address we start with. Traversing from C through phi and 3200 /// selects we ended up with A's in a map. This utility function tries to find 3201 /// a value V which is a field in addressing mode C and traversing through phi 3202 /// nodes and selects we will end up in corresponded values B in a map. 3203 /// The utility will create a new Phi/Selects if needed. 3204 // The simple example looks as follows: 3205 // BB1: 3206 // p1 = b1 + 40 3207 // br cond BB2, BB3 3208 // BB2: 3209 // p2 = b2 + 40 3210 // br BB3 3211 // BB3: 3212 // p = phi [p1, BB1], [p2, BB2] 3213 // v = load p 3214 // Map is 3215 // p1 -> b1 3216 // p2 -> b2 3217 // Request is 3218 // p -> ? 3219 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3220 Value *findCommon(FoldAddrToValueMapping &Map) { 3221 // Tracks the simplification of newly created phi nodes. The reason we use 3222 // this mapping is because we will add new created Phi nodes in AddrToBase. 3223 // Simplification of Phi nodes is recursive, so some Phi node may 3224 // be simplified after we added it to AddrToBase. In reality this 3225 // simplification is possible only if original phi/selects were not 3226 // simplified yet. 3227 // Using this mapping we can find the current value in AddrToBase. 3228 SimplificationTracker ST(SQ); 3229 3230 // First step, DFS to create PHI nodes for all intermediate blocks. 3231 // Also fill traverse order for the second step. 3232 SmallVector<Value *, 32> TraverseOrder; 3233 InsertPlaceholders(Map, TraverseOrder, ST); 3234 3235 // Second Step, fill new nodes by merged values and simplify if possible. 3236 FillPlaceholders(Map, TraverseOrder, ST); 3237 3238 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3239 ST.destroyNewNodes(CommonType); 3240 return nullptr; 3241 } 3242 3243 // Now we'd like to match New Phi nodes to existed ones. 3244 unsigned PhiNotMatchedCount = 0; 3245 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3246 ST.destroyNewNodes(CommonType); 3247 return nullptr; 3248 } 3249 3250 auto *Result = ST.Get(Map.find(Original)->second); 3251 if (Result) { 3252 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3253 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3254 } 3255 return Result; 3256 } 3257 3258 /// Try to match PHI node to Candidate. 3259 /// Matcher tracks the matched Phi nodes. 3260 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3261 SmallSetVector<PHIPair, 8> &Matcher, 3262 PhiNodeSet &PhiNodesToMatch) { 3263 SmallVector<PHIPair, 8> WorkList; 3264 Matcher.insert({ PHI, Candidate }); 3265 WorkList.push_back({ PHI, Candidate }); 3266 SmallSet<PHIPair, 8> Visited; 3267 while (!WorkList.empty()) { 3268 auto Item = WorkList.pop_back_val(); 3269 if (!Visited.insert(Item).second) 3270 continue; 3271 // We iterate over all incoming values to Phi to compare them. 3272 // If values are different and both of them Phi and the first one is a 3273 // Phi we added (subject to match) and both of them is in the same basic 3274 // block then we can match our pair if values match. So we state that 3275 // these values match and add it to work list to verify that. 3276 for (auto B : Item.first->blocks()) { 3277 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3278 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3279 if (FirstValue == SecondValue) 3280 continue; 3281 3282 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3283 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3284 3285 // One of them is not Phi or 3286 // The first one is not Phi node from the set we'd like to match or 3287 // Phi nodes from different basic blocks then 3288 // we will not be able to match. 3289 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3290 FirstPhi->getParent() != SecondPhi->getParent()) 3291 return false; 3292 3293 // If we already matched them then continue. 3294 if (Matcher.count({ FirstPhi, SecondPhi })) 3295 continue; 3296 // So the values are different and does not match. So we need them to 3297 // match. 3298 Matcher.insert({ FirstPhi, SecondPhi }); 3299 // But me must check it. 3300 WorkList.push_back({ FirstPhi, SecondPhi }); 3301 } 3302 } 3303 return true; 3304 } 3305 3306 /// For the given set of PHI nodes (in the SimplificationTracker) try 3307 /// to find their equivalents. 3308 /// Returns false if this matching fails and creation of new Phi is disabled. 3309 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3310 unsigned &PhiNotMatchedCount) { 3311 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3312 // order, so the replacements (ReplacePhi) are also done in a deterministic 3313 // order. 3314 SmallSetVector<PHIPair, 8> Matched; 3315 SmallPtrSet<PHINode *, 8> WillNotMatch; 3316 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3317 while (PhiNodesToMatch.size()) { 3318 PHINode *PHI = *PhiNodesToMatch.begin(); 3319 3320 // Add us, if no Phi nodes in the basic block we do not match. 3321 WillNotMatch.clear(); 3322 WillNotMatch.insert(PHI); 3323 3324 // Traverse all Phis until we found equivalent or fail to do that. 3325 bool IsMatched = false; 3326 for (auto &P : PHI->getParent()->phis()) { 3327 if (&P == PHI) 3328 continue; 3329 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3330 break; 3331 // If it does not match, collect all Phi nodes from matcher. 3332 // if we end up with no match, them all these Phi nodes will not match 3333 // later. 3334 for (auto M : Matched) 3335 WillNotMatch.insert(M.first); 3336 Matched.clear(); 3337 } 3338 if (IsMatched) { 3339 // Replace all matched values and erase them. 3340 for (auto MV : Matched) 3341 ST.ReplacePhi(MV.first, MV.second); 3342 Matched.clear(); 3343 continue; 3344 } 3345 // If we are not allowed to create new nodes then bail out. 3346 if (!AllowNewPhiNodes) 3347 return false; 3348 // Just remove all seen values in matcher. They will not match anything. 3349 PhiNotMatchedCount += WillNotMatch.size(); 3350 for (auto *P : WillNotMatch) 3351 PhiNodesToMatch.erase(P); 3352 } 3353 return true; 3354 } 3355 /// Fill the placeholders with values from predecessors and simplify them. 3356 void FillPlaceholders(FoldAddrToValueMapping &Map, 3357 SmallVectorImpl<Value *> &TraverseOrder, 3358 SimplificationTracker &ST) { 3359 while (!TraverseOrder.empty()) { 3360 Value *Current = TraverseOrder.pop_back_val(); 3361 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3362 Value *V = Map[Current]; 3363 3364 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3365 // CurrentValue also must be Select. 3366 auto *CurrentSelect = cast<SelectInst>(Current); 3367 auto *TrueValue = CurrentSelect->getTrueValue(); 3368 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3369 Select->setTrueValue(ST.Get(Map[TrueValue])); 3370 auto *FalseValue = CurrentSelect->getFalseValue(); 3371 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3372 Select->setFalseValue(ST.Get(Map[FalseValue])); 3373 } else { 3374 // Must be a Phi node then. 3375 PHINode *PHI = cast<PHINode>(V); 3376 auto *CurrentPhi = dyn_cast<PHINode>(Current); 3377 // Fill the Phi node with values from predecessors. 3378 for (auto B : predecessors(PHI->getParent())) { 3379 Value *PV = CurrentPhi->getIncomingValueForBlock(B); 3380 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3381 PHI->addIncoming(ST.Get(Map[PV]), B); 3382 } 3383 } 3384 Map[Current] = ST.Simplify(V); 3385 } 3386 } 3387 3388 /// Starting from original value recursively iterates over def-use chain up to 3389 /// known ending values represented in a map. For each traversed phi/select 3390 /// inserts a placeholder Phi or Select. 3391 /// Reports all new created Phi/Select nodes by adding them to set. 3392 /// Also reports and order in what values have been traversed. 3393 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3394 SmallVectorImpl<Value *> &TraverseOrder, 3395 SimplificationTracker &ST) { 3396 SmallVector<Value *, 32> Worklist; 3397 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3398 "Address must be a Phi or Select node"); 3399 auto *Dummy = UndefValue::get(CommonType); 3400 Worklist.push_back(Original); 3401 while (!Worklist.empty()) { 3402 Value *Current = Worklist.pop_back_val(); 3403 // if it is already visited or it is an ending value then skip it. 3404 if (Map.find(Current) != Map.end()) 3405 continue; 3406 TraverseOrder.push_back(Current); 3407 3408 // CurrentValue must be a Phi node or select. All others must be covered 3409 // by anchors. 3410 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3411 // Is it OK to get metadata from OrigSelect?! 3412 // Create a Select placeholder with dummy value. 3413 SelectInst *Select = SelectInst::Create( 3414 CurrentSelect->getCondition(), Dummy, Dummy, 3415 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3416 Map[Current] = Select; 3417 ST.insertNewSelect(Select); 3418 // We are interested in True and False values. 3419 Worklist.push_back(CurrentSelect->getTrueValue()); 3420 Worklist.push_back(CurrentSelect->getFalseValue()); 3421 } else { 3422 // It must be a Phi node then. 3423 PHINode *CurrentPhi = cast<PHINode>(Current); 3424 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3425 PHINode *PHI = 3426 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3427 Map[Current] = PHI; 3428 ST.insertNewPhi(PHI); 3429 for (Value *P : CurrentPhi->incoming_values()) 3430 Worklist.push_back(P); 3431 } 3432 } 3433 } 3434 3435 bool addrModeCombiningAllowed() { 3436 if (DisableComplexAddrModes) 3437 return false; 3438 switch (DifferentField) { 3439 default: 3440 return false; 3441 case ExtAddrMode::BaseRegField: 3442 return AddrSinkCombineBaseReg; 3443 case ExtAddrMode::BaseGVField: 3444 return AddrSinkCombineBaseGV; 3445 case ExtAddrMode::BaseOffsField: 3446 return AddrSinkCombineBaseOffs; 3447 case ExtAddrMode::ScaledRegField: 3448 return AddrSinkCombineScaledReg; 3449 } 3450 } 3451 }; 3452 } // end anonymous namespace 3453 3454 /// Try adding ScaleReg*Scale to the current addressing mode. 3455 /// Return true and update AddrMode if this addr mode is legal for the target, 3456 /// false if not. 3457 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3458 unsigned Depth) { 3459 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3460 // mode. Just process that directly. 3461 if (Scale == 1) 3462 return matchAddr(ScaleReg, Depth); 3463 3464 // If the scale is 0, it takes nothing to add this. 3465 if (Scale == 0) 3466 return true; 3467 3468 // If we already have a scale of this value, we can add to it, otherwise, we 3469 // need an available scale field. 3470 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3471 return false; 3472 3473 ExtAddrMode TestAddrMode = AddrMode; 3474 3475 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3476 // [A+B + A*7] -> [B+A*8]. 3477 TestAddrMode.Scale += Scale; 3478 TestAddrMode.ScaledReg = ScaleReg; 3479 3480 // If the new address isn't legal, bail out. 3481 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3482 return false; 3483 3484 // It was legal, so commit it. 3485 AddrMode = TestAddrMode; 3486 3487 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3488 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3489 // X*Scale + C*Scale to addr mode. 3490 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3491 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3492 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 3493 TestAddrMode.ScaledReg = AddLHS; 3494 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 3495 3496 // If this addressing mode is legal, commit it and remember that we folded 3497 // this instruction. 3498 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3499 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3500 AddrMode = TestAddrMode; 3501 return true; 3502 } 3503 } 3504 3505 // Otherwise, not (x+c)*scale, just return what we have. 3506 return true; 3507 } 3508 3509 /// This is a little filter, which returns true if an addressing computation 3510 /// involving I might be folded into a load/store accessing it. 3511 /// This doesn't need to be perfect, but needs to accept at least 3512 /// the set of instructions that MatchOperationAddr can. 3513 static bool MightBeFoldableInst(Instruction *I) { 3514 switch (I->getOpcode()) { 3515 case Instruction::BitCast: 3516 case Instruction::AddrSpaceCast: 3517 // Don't touch identity bitcasts. 3518 if (I->getType() == I->getOperand(0)->getType()) 3519 return false; 3520 return I->getType()->isIntOrPtrTy(); 3521 case Instruction::PtrToInt: 3522 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3523 return true; 3524 case Instruction::IntToPtr: 3525 // We know the input is intptr_t, so this is foldable. 3526 return true; 3527 case Instruction::Add: 3528 return true; 3529 case Instruction::Mul: 3530 case Instruction::Shl: 3531 // Can only handle X*C and X << C. 3532 return isa<ConstantInt>(I->getOperand(1)); 3533 case Instruction::GetElementPtr: 3534 return true; 3535 default: 3536 return false; 3537 } 3538 } 3539 3540 /// Check whether or not \p Val is a legal instruction for \p TLI. 3541 /// \note \p Val is assumed to be the product of some type promotion. 3542 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3543 /// to be legal, as the non-promoted value would have had the same state. 3544 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3545 const DataLayout &DL, Value *Val) { 3546 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3547 if (!PromotedInst) 3548 return false; 3549 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3550 // If the ISDOpcode is undefined, it was undefined before the promotion. 3551 if (!ISDOpcode) 3552 return true; 3553 // Otherwise, check if the promoted instruction is legal or not. 3554 return TLI.isOperationLegalOrCustom( 3555 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3556 } 3557 3558 namespace { 3559 3560 /// Hepler class to perform type promotion. 3561 class TypePromotionHelper { 3562 /// Utility function to add a promoted instruction \p ExtOpnd to 3563 /// \p PromotedInsts and record the type of extension we have seen. 3564 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3565 Instruction *ExtOpnd, 3566 bool IsSExt) { 3567 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3568 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3569 if (It != PromotedInsts.end()) { 3570 // If the new extension is same as original, the information in 3571 // PromotedInsts[ExtOpnd] is still correct. 3572 if (It->second.getInt() == ExtTy) 3573 return; 3574 3575 // Now the new extension is different from old extension, we make 3576 // the type information invalid by setting extension type to 3577 // BothExtension. 3578 ExtTy = BothExtension; 3579 } 3580 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 3581 } 3582 3583 /// Utility function to query the original type of instruction \p Opnd 3584 /// with a matched extension type. If the extension doesn't match, we 3585 /// cannot use the information we had on the original type. 3586 /// BothExtension doesn't match any extension type. 3587 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 3588 Instruction *Opnd, 3589 bool IsSExt) { 3590 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3591 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3592 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 3593 return It->second.getPointer(); 3594 return nullptr; 3595 } 3596 3597 /// Utility function to check whether or not a sign or zero extension 3598 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3599 /// either using the operands of \p Inst or promoting \p Inst. 3600 /// The type of the extension is defined by \p IsSExt. 3601 /// In other words, check if: 3602 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3603 /// #1 Promotion applies: 3604 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3605 /// #2 Operand reuses: 3606 /// ext opnd1 to ConsideredExtType. 3607 /// \p PromotedInsts maps the instructions to their type before promotion. 3608 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3609 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3610 3611 /// Utility function to determine if \p OpIdx should be promoted when 3612 /// promoting \p Inst. 3613 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3614 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3615 } 3616 3617 /// Utility function to promote the operand of \p Ext when this 3618 /// operand is a promotable trunc or sext or zext. 3619 /// \p PromotedInsts maps the instructions to their type before promotion. 3620 /// \p CreatedInstsCost[out] contains the cost of all instructions 3621 /// created to promote the operand of Ext. 3622 /// Newly added extensions are inserted in \p Exts. 3623 /// Newly added truncates are inserted in \p Truncs. 3624 /// Should never be called directly. 3625 /// \return The promoted value which is used instead of Ext. 3626 static Value *promoteOperandForTruncAndAnyExt( 3627 Instruction *Ext, TypePromotionTransaction &TPT, 3628 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3629 SmallVectorImpl<Instruction *> *Exts, 3630 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3631 3632 /// Utility function to promote the operand of \p Ext when this 3633 /// operand is promotable and is not a supported trunc or sext. 3634 /// \p PromotedInsts maps the instructions to their type before promotion. 3635 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3636 /// created to promote the operand of Ext. 3637 /// Newly added extensions are inserted in \p Exts. 3638 /// Newly added truncates are inserted in \p Truncs. 3639 /// Should never be called directly. 3640 /// \return The promoted value which is used instead of Ext. 3641 static Value *promoteOperandForOther(Instruction *Ext, 3642 TypePromotionTransaction &TPT, 3643 InstrToOrigTy &PromotedInsts, 3644 unsigned &CreatedInstsCost, 3645 SmallVectorImpl<Instruction *> *Exts, 3646 SmallVectorImpl<Instruction *> *Truncs, 3647 const TargetLowering &TLI, bool IsSExt); 3648 3649 /// \see promoteOperandForOther. 3650 static Value *signExtendOperandForOther( 3651 Instruction *Ext, TypePromotionTransaction &TPT, 3652 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3653 SmallVectorImpl<Instruction *> *Exts, 3654 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3655 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3656 Exts, Truncs, TLI, true); 3657 } 3658 3659 /// \see promoteOperandForOther. 3660 static Value *zeroExtendOperandForOther( 3661 Instruction *Ext, TypePromotionTransaction &TPT, 3662 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3663 SmallVectorImpl<Instruction *> *Exts, 3664 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3665 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3666 Exts, Truncs, TLI, false); 3667 } 3668 3669 public: 3670 /// Type for the utility function that promotes the operand of Ext. 3671 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3672 InstrToOrigTy &PromotedInsts, 3673 unsigned &CreatedInstsCost, 3674 SmallVectorImpl<Instruction *> *Exts, 3675 SmallVectorImpl<Instruction *> *Truncs, 3676 const TargetLowering &TLI); 3677 3678 /// Given a sign/zero extend instruction \p Ext, return the appropriate 3679 /// action to promote the operand of \p Ext instead of using Ext. 3680 /// \return NULL if no promotable action is possible with the current 3681 /// sign extension. 3682 /// \p InsertedInsts keeps track of all the instructions inserted by the 3683 /// other CodeGenPrepare optimizations. This information is important 3684 /// because we do not want to promote these instructions as CodeGenPrepare 3685 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3686 /// \p PromotedInsts maps the instructions to their type before promotion. 3687 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3688 const TargetLowering &TLI, 3689 const InstrToOrigTy &PromotedInsts); 3690 }; 3691 3692 } // end anonymous namespace 3693 3694 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3695 Type *ConsideredExtType, 3696 const InstrToOrigTy &PromotedInsts, 3697 bool IsSExt) { 3698 // The promotion helper does not know how to deal with vector types yet. 3699 // To be able to fix that, we would need to fix the places where we 3700 // statically extend, e.g., constants and such. 3701 if (Inst->getType()->isVectorTy()) 3702 return false; 3703 3704 // We can always get through zext. 3705 if (isa<ZExtInst>(Inst)) 3706 return true; 3707 3708 // sext(sext) is ok too. 3709 if (IsSExt && isa<SExtInst>(Inst)) 3710 return true; 3711 3712 // We can get through binary operator, if it is legal. In other words, the 3713 // binary operator must have a nuw or nsw flag. 3714 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3715 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 3716 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3717 (IsSExt && BinOp->hasNoSignedWrap()))) 3718 return true; 3719 3720 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 3721 if ((Inst->getOpcode() == Instruction::And || 3722 Inst->getOpcode() == Instruction::Or)) 3723 return true; 3724 3725 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 3726 if (Inst->getOpcode() == Instruction::Xor) { 3727 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 3728 // Make sure it is not a NOT. 3729 if (Cst && !Cst->getValue().isAllOnesValue()) 3730 return true; 3731 } 3732 3733 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 3734 // It may change a poisoned value into a regular value, like 3735 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 3736 // poisoned value regular value 3737 // It should be OK since undef covers valid value. 3738 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 3739 return true; 3740 3741 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 3742 // It may change a poisoned value into a regular value, like 3743 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 3744 // poisoned value regular value 3745 // It should be OK since undef covers valid value. 3746 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 3747 const Instruction *ExtInst = 3748 dyn_cast<const Instruction>(*Inst->user_begin()); 3749 if (ExtInst->hasOneUse()) { 3750 const Instruction *AndInst = 3751 dyn_cast<const Instruction>(*ExtInst->user_begin()); 3752 if (AndInst && AndInst->getOpcode() == Instruction::And) { 3753 const ConstantInt *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 3754 if (Cst && 3755 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 3756 return true; 3757 } 3758 } 3759 } 3760 3761 // Check if we can do the following simplification. 3762 // ext(trunc(opnd)) --> ext(opnd) 3763 if (!isa<TruncInst>(Inst)) 3764 return false; 3765 3766 Value *OpndVal = Inst->getOperand(0); 3767 // Check if we can use this operand in the extension. 3768 // If the type is larger than the result type of the extension, we cannot. 3769 if (!OpndVal->getType()->isIntegerTy() || 3770 OpndVal->getType()->getIntegerBitWidth() > 3771 ConsideredExtType->getIntegerBitWidth()) 3772 return false; 3773 3774 // If the operand of the truncate is not an instruction, we will not have 3775 // any information on the dropped bits. 3776 // (Actually we could for constant but it is not worth the extra logic). 3777 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 3778 if (!Opnd) 3779 return false; 3780 3781 // Check if the source of the type is narrow enough. 3782 // I.e., check that trunc just drops extended bits of the same kind of 3783 // the extension. 3784 // #1 get the type of the operand and check the kind of the extended bits. 3785 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 3786 if (OpndType) 3787 ; 3788 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 3789 OpndType = Opnd->getOperand(0)->getType(); 3790 else 3791 return false; 3792 3793 // #2 check that the truncate just drops extended bits. 3794 return Inst->getType()->getIntegerBitWidth() >= 3795 OpndType->getIntegerBitWidth(); 3796 } 3797 3798 TypePromotionHelper::Action TypePromotionHelper::getAction( 3799 Instruction *Ext, const SetOfInstrs &InsertedInsts, 3800 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 3801 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3802 "Unexpected instruction type"); 3803 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 3804 Type *ExtTy = Ext->getType(); 3805 bool IsSExt = isa<SExtInst>(Ext); 3806 // If the operand of the extension is not an instruction, we cannot 3807 // get through. 3808 // If it, check we can get through. 3809 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 3810 return nullptr; 3811 3812 // Do not promote if the operand has been added by codegenprepare. 3813 // Otherwise, it means we are undoing an optimization that is likely to be 3814 // redone, thus causing potential infinite loop. 3815 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 3816 return nullptr; 3817 3818 // SExt or Trunc instructions. 3819 // Return the related handler. 3820 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 3821 isa<ZExtInst>(ExtOpnd)) 3822 return promoteOperandForTruncAndAnyExt; 3823 3824 // Regular instruction. 3825 // Abort early if we will have to insert non-free instructions. 3826 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 3827 return nullptr; 3828 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 3829 } 3830 3831 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 3832 Instruction *SExt, TypePromotionTransaction &TPT, 3833 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3834 SmallVectorImpl<Instruction *> *Exts, 3835 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3836 // By construction, the operand of SExt is an instruction. Otherwise we cannot 3837 // get through it and this method should not be called. 3838 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 3839 Value *ExtVal = SExt; 3840 bool HasMergedNonFreeExt = false; 3841 if (isa<ZExtInst>(SExtOpnd)) { 3842 // Replace s|zext(zext(opnd)) 3843 // => zext(opnd). 3844 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 3845 Value *ZExt = 3846 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 3847 TPT.replaceAllUsesWith(SExt, ZExt); 3848 TPT.eraseInstruction(SExt); 3849 ExtVal = ZExt; 3850 } else { 3851 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 3852 // => z|sext(opnd). 3853 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 3854 } 3855 CreatedInstsCost = 0; 3856 3857 // Remove dead code. 3858 if (SExtOpnd->use_empty()) 3859 TPT.eraseInstruction(SExtOpnd); 3860 3861 // Check if the extension is still needed. 3862 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 3863 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 3864 if (ExtInst) { 3865 if (Exts) 3866 Exts->push_back(ExtInst); 3867 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 3868 } 3869 return ExtVal; 3870 } 3871 3872 // At this point we have: ext ty opnd to ty. 3873 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 3874 Value *NextVal = ExtInst->getOperand(0); 3875 TPT.eraseInstruction(ExtInst, NextVal); 3876 return NextVal; 3877 } 3878 3879 Value *TypePromotionHelper::promoteOperandForOther( 3880 Instruction *Ext, TypePromotionTransaction &TPT, 3881 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3882 SmallVectorImpl<Instruction *> *Exts, 3883 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 3884 bool IsSExt) { 3885 // By construction, the operand of Ext is an instruction. Otherwise we cannot 3886 // get through it and this method should not be called. 3887 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 3888 CreatedInstsCost = 0; 3889 if (!ExtOpnd->hasOneUse()) { 3890 // ExtOpnd will be promoted. 3891 // All its uses, but Ext, will need to use a truncated value of the 3892 // promoted version. 3893 // Create the truncate now. 3894 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 3895 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 3896 // Insert it just after the definition. 3897 ITrunc->moveAfter(ExtOpnd); 3898 if (Truncs) 3899 Truncs->push_back(ITrunc); 3900 } 3901 3902 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 3903 // Restore the operand of Ext (which has been replaced by the previous call 3904 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 3905 TPT.setOperand(Ext, 0, ExtOpnd); 3906 } 3907 3908 // Get through the Instruction: 3909 // 1. Update its type. 3910 // 2. Replace the uses of Ext by Inst. 3911 // 3. Extend each operand that needs to be extended. 3912 3913 // Remember the original type of the instruction before promotion. 3914 // This is useful to know that the high bits are sign extended bits. 3915 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 3916 // Step #1. 3917 TPT.mutateType(ExtOpnd, Ext->getType()); 3918 // Step #2. 3919 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3920 // Step #3. 3921 Instruction *ExtForOpnd = Ext; 3922 3923 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 3924 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3925 ++OpIdx) { 3926 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3927 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3928 !shouldExtOperand(ExtOpnd, OpIdx)) { 3929 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 3930 continue; 3931 } 3932 // Check if we can statically extend the operand. 3933 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3934 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3935 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3936 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3937 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3938 : Cst->getValue().zext(BitWidth); 3939 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3940 continue; 3941 } 3942 // UndefValue are typed, so we have to statically sign extend them. 3943 if (isa<UndefValue>(Opnd)) { 3944 LLVM_DEBUG(dbgs() << "Statically extend\n"); 3945 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3946 continue; 3947 } 3948 3949 // Otherwise we have to explicitly sign extend the operand. 3950 // Check if Ext was reused to extend an operand. 3951 if (!ExtForOpnd) { 3952 // If yes, create a new one. 3953 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 3954 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3955 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3956 if (!isa<Instruction>(ValForExtOpnd)) { 3957 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3958 continue; 3959 } 3960 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3961 } 3962 if (Exts) 3963 Exts->push_back(ExtForOpnd); 3964 TPT.setOperand(ExtForOpnd, 0, Opnd); 3965 3966 // Move the sign extension before the insertion point. 3967 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3968 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3969 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3970 // If more sext are required, new instructions will have to be created. 3971 ExtForOpnd = nullptr; 3972 } 3973 if (ExtForOpnd == Ext) { 3974 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 3975 TPT.eraseInstruction(Ext); 3976 } 3977 return ExtOpnd; 3978 } 3979 3980 /// Check whether or not promoting an instruction to a wider type is profitable. 3981 /// \p NewCost gives the cost of extension instructions created by the 3982 /// promotion. 3983 /// \p OldCost gives the cost of extension instructions before the promotion 3984 /// plus the number of instructions that have been 3985 /// matched in the addressing mode the promotion. 3986 /// \p PromotedOperand is the value that has been promoted. 3987 /// \return True if the promotion is profitable, false otherwise. 3988 bool AddressingModeMatcher::isPromotionProfitable( 3989 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3990 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 3991 << '\n'); 3992 // The cost of the new extensions is greater than the cost of the 3993 // old extension plus what we folded. 3994 // This is not profitable. 3995 if (NewCost > OldCost) 3996 return false; 3997 if (NewCost < OldCost) 3998 return true; 3999 // The promotion is neutral but it may help folding the sign extension in 4000 // loads for instance. 4001 // Check that we did not create an illegal instruction. 4002 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 4003 } 4004 4005 /// Given an instruction or constant expr, see if we can fold the operation 4006 /// into the addressing mode. If so, update the addressing mode and return 4007 /// true, otherwise return false without modifying AddrMode. 4008 /// If \p MovedAway is not NULL, it contains the information of whether or 4009 /// not AddrInst has to be folded into the addressing mode on success. 4010 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 4011 /// because it has been moved away. 4012 /// Thus AddrInst must not be added in the matched instructions. 4013 /// This state can happen when AddrInst is a sext, since it may be moved away. 4014 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 4015 /// not be referenced anymore. 4016 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 4017 unsigned Depth, 4018 bool *MovedAway) { 4019 // Avoid exponential behavior on extremely deep expression trees. 4020 if (Depth >= 5) return false; 4021 4022 // By default, all matched instructions stay in place. 4023 if (MovedAway) 4024 *MovedAway = false; 4025 4026 switch (Opcode) { 4027 case Instruction::PtrToInt: 4028 // PtrToInt is always a noop, as we know that the int type is pointer sized. 4029 return matchAddr(AddrInst->getOperand(0), Depth); 4030 case Instruction::IntToPtr: { 4031 auto AS = AddrInst->getType()->getPointerAddressSpace(); 4032 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 4033 // This inttoptr is a no-op if the integer type is pointer sized. 4034 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 4035 return matchAddr(AddrInst->getOperand(0), Depth); 4036 return false; 4037 } 4038 case Instruction::BitCast: 4039 // BitCast is always a noop, and we can handle it as long as it is 4040 // int->int or pointer->pointer (we don't want int<->fp or something). 4041 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 4042 // Don't touch identity bitcasts. These were probably put here by LSR, 4043 // and we don't want to mess around with them. Assume it knows what it 4044 // is doing. 4045 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4046 return matchAddr(AddrInst->getOperand(0), Depth); 4047 return false; 4048 case Instruction::AddrSpaceCast: { 4049 unsigned SrcAS 4050 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4051 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4052 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 4053 return matchAddr(AddrInst->getOperand(0), Depth); 4054 return false; 4055 } 4056 case Instruction::Add: { 4057 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4058 ExtAddrMode BackupAddrMode = AddrMode; 4059 unsigned OldSize = AddrModeInsts.size(); 4060 // Start a transaction at this point. 4061 // The LHS may match but not the RHS. 4062 // Therefore, we need a higher level restoration point to undo partially 4063 // matched operation. 4064 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4065 TPT.getRestorationPoint(); 4066 4067 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4068 matchAddr(AddrInst->getOperand(0), Depth+1)) 4069 return true; 4070 4071 // Restore the old addr mode info. 4072 AddrMode = BackupAddrMode; 4073 AddrModeInsts.resize(OldSize); 4074 TPT.rollback(LastKnownGood); 4075 4076 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4077 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4078 matchAddr(AddrInst->getOperand(1), Depth+1)) 4079 return true; 4080 4081 // Otherwise we definitely can't merge the ADD in. 4082 AddrMode = BackupAddrMode; 4083 AddrModeInsts.resize(OldSize); 4084 TPT.rollback(LastKnownGood); 4085 break; 4086 } 4087 //case Instruction::Or: 4088 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4089 //break; 4090 case Instruction::Mul: 4091 case Instruction::Shl: { 4092 // Can only handle X*C and X << C. 4093 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4094 if (!RHS || RHS->getBitWidth() > 64) 4095 return false; 4096 int64_t Scale = RHS->getSExtValue(); 4097 if (Opcode == Instruction::Shl) 4098 Scale = 1LL << Scale; 4099 4100 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4101 } 4102 case Instruction::GetElementPtr: { 4103 // Scan the GEP. We check it if it contains constant offsets and at most 4104 // one variable offset. 4105 int VariableOperand = -1; 4106 unsigned VariableScale = 0; 4107 4108 int64_t ConstantOffset = 0; 4109 gep_type_iterator GTI = gep_type_begin(AddrInst); 4110 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4111 if (StructType *STy = GTI.getStructTypeOrNull()) { 4112 const StructLayout *SL = DL.getStructLayout(STy); 4113 unsigned Idx = 4114 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4115 ConstantOffset += SL->getElementOffset(Idx); 4116 } else { 4117 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 4118 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4119 const APInt &CVal = CI->getValue(); 4120 if (CVal.getMinSignedBits() <= 64) { 4121 ConstantOffset += CVal.getSExtValue() * TypeSize; 4122 continue; 4123 } 4124 } 4125 if (TypeSize) { // Scales of zero don't do anything. 4126 // We only allow one variable index at the moment. 4127 if (VariableOperand != -1) 4128 return false; 4129 4130 // Remember the variable index. 4131 VariableOperand = i; 4132 VariableScale = TypeSize; 4133 } 4134 } 4135 } 4136 4137 // A common case is for the GEP to only do a constant offset. In this case, 4138 // just add it to the disp field and check validity. 4139 if (VariableOperand == -1) { 4140 AddrMode.BaseOffs += ConstantOffset; 4141 if (ConstantOffset == 0 || 4142 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4143 // Check to see if we can fold the base pointer in too. 4144 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 4145 return true; 4146 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4147 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4148 ConstantOffset > 0) { 4149 // Record GEPs with non-zero offsets as candidates for splitting in the 4150 // event that the offset cannot fit into the r+i addressing mode. 4151 // Simple and common case that only one GEP is used in calculating the 4152 // address for the memory access. 4153 Value *Base = AddrInst->getOperand(0); 4154 auto *BaseI = dyn_cast<Instruction>(Base); 4155 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4156 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4157 (BaseI && !isa<CastInst>(BaseI) && 4158 !isa<GetElementPtrInst>(BaseI))) { 4159 // If the base is an instruction, make sure the GEP is not in the same 4160 // basic block as the base. If the base is an argument or global 4161 // value, make sure the GEP is not in the entry block. Otherwise, 4162 // instruction selection can undo the split. Also make sure the 4163 // parent block allows inserting non-PHI instructions before the 4164 // terminator. 4165 BasicBlock *Parent = 4166 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4167 if (GEP->getParent() != Parent && !Parent->getTerminator()->isEHPad()) 4168 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4169 } 4170 } 4171 AddrMode.BaseOffs -= ConstantOffset; 4172 return false; 4173 } 4174 4175 // Save the valid addressing mode in case we can't match. 4176 ExtAddrMode BackupAddrMode = AddrMode; 4177 unsigned OldSize = AddrModeInsts.size(); 4178 4179 // See if the scale and offset amount is valid for this target. 4180 AddrMode.BaseOffs += ConstantOffset; 4181 4182 // Match the base operand of the GEP. 4183 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4184 // If it couldn't be matched, just stuff the value in a register. 4185 if (AddrMode.HasBaseReg) { 4186 AddrMode = BackupAddrMode; 4187 AddrModeInsts.resize(OldSize); 4188 return false; 4189 } 4190 AddrMode.HasBaseReg = true; 4191 AddrMode.BaseReg = AddrInst->getOperand(0); 4192 } 4193 4194 // Match the remaining variable portion of the GEP. 4195 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4196 Depth)) { 4197 // If it couldn't be matched, try stuffing the base into a register 4198 // instead of matching it, and retrying the match of the scale. 4199 AddrMode = BackupAddrMode; 4200 AddrModeInsts.resize(OldSize); 4201 if (AddrMode.HasBaseReg) 4202 return false; 4203 AddrMode.HasBaseReg = true; 4204 AddrMode.BaseReg = AddrInst->getOperand(0); 4205 AddrMode.BaseOffs += ConstantOffset; 4206 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4207 VariableScale, Depth)) { 4208 // If even that didn't work, bail. 4209 AddrMode = BackupAddrMode; 4210 AddrModeInsts.resize(OldSize); 4211 return false; 4212 } 4213 } 4214 4215 return true; 4216 } 4217 case Instruction::SExt: 4218 case Instruction::ZExt: { 4219 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4220 if (!Ext) 4221 return false; 4222 4223 // Try to move this ext out of the way of the addressing mode. 4224 // Ask for a method for doing so. 4225 TypePromotionHelper::Action TPH = 4226 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4227 if (!TPH) 4228 return false; 4229 4230 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4231 TPT.getRestorationPoint(); 4232 unsigned CreatedInstsCost = 0; 4233 unsigned ExtCost = !TLI.isExtFree(Ext); 4234 Value *PromotedOperand = 4235 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4236 // SExt has been moved away. 4237 // Thus either it will be rematched later in the recursive calls or it is 4238 // gone. Anyway, we must not fold it into the addressing mode at this point. 4239 // E.g., 4240 // op = add opnd, 1 4241 // idx = ext op 4242 // addr = gep base, idx 4243 // is now: 4244 // promotedOpnd = ext opnd <- no match here 4245 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4246 // addr = gep base, op <- match 4247 if (MovedAway) 4248 *MovedAway = true; 4249 4250 assert(PromotedOperand && 4251 "TypePromotionHelper should have filtered out those cases"); 4252 4253 ExtAddrMode BackupAddrMode = AddrMode; 4254 unsigned OldSize = AddrModeInsts.size(); 4255 4256 if (!matchAddr(PromotedOperand, Depth) || 4257 // The total of the new cost is equal to the cost of the created 4258 // instructions. 4259 // The total of the old cost is equal to the cost of the extension plus 4260 // what we have saved in the addressing mode. 4261 !isPromotionProfitable(CreatedInstsCost, 4262 ExtCost + (AddrModeInsts.size() - OldSize), 4263 PromotedOperand)) { 4264 AddrMode = BackupAddrMode; 4265 AddrModeInsts.resize(OldSize); 4266 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4267 TPT.rollback(LastKnownGood); 4268 return false; 4269 } 4270 return true; 4271 } 4272 } 4273 return false; 4274 } 4275 4276 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4277 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4278 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4279 /// for the target. 4280 /// 4281 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4282 // Start a transaction at this point that we will rollback if the matching 4283 // fails. 4284 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4285 TPT.getRestorationPoint(); 4286 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4287 // Fold in immediates if legal for the target. 4288 AddrMode.BaseOffs += CI->getSExtValue(); 4289 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4290 return true; 4291 AddrMode.BaseOffs -= CI->getSExtValue(); 4292 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4293 // If this is a global variable, try to fold it into the addressing mode. 4294 if (!AddrMode.BaseGV) { 4295 AddrMode.BaseGV = GV; 4296 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4297 return true; 4298 AddrMode.BaseGV = nullptr; 4299 } 4300 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4301 ExtAddrMode BackupAddrMode = AddrMode; 4302 unsigned OldSize = AddrModeInsts.size(); 4303 4304 // Check to see if it is possible to fold this operation. 4305 bool MovedAway = false; 4306 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4307 // This instruction may have been moved away. If so, there is nothing 4308 // to check here. 4309 if (MovedAway) 4310 return true; 4311 // Okay, it's possible to fold this. Check to see if it is actually 4312 // *profitable* to do so. We use a simple cost model to avoid increasing 4313 // register pressure too much. 4314 if (I->hasOneUse() || 4315 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4316 AddrModeInsts.push_back(I); 4317 return true; 4318 } 4319 4320 // It isn't profitable to do this, roll back. 4321 //cerr << "NOT FOLDING: " << *I; 4322 AddrMode = BackupAddrMode; 4323 AddrModeInsts.resize(OldSize); 4324 TPT.rollback(LastKnownGood); 4325 } 4326 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4327 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4328 return true; 4329 TPT.rollback(LastKnownGood); 4330 } else if (isa<ConstantPointerNull>(Addr)) { 4331 // Null pointer gets folded without affecting the addressing mode. 4332 return true; 4333 } 4334 4335 // Worse case, the target should support [reg] addressing modes. :) 4336 if (!AddrMode.HasBaseReg) { 4337 AddrMode.HasBaseReg = true; 4338 AddrMode.BaseReg = Addr; 4339 // Still check for legality in case the target supports [imm] but not [i+r]. 4340 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4341 return true; 4342 AddrMode.HasBaseReg = false; 4343 AddrMode.BaseReg = nullptr; 4344 } 4345 4346 // If the base register is already taken, see if we can do [r+r]. 4347 if (AddrMode.Scale == 0) { 4348 AddrMode.Scale = 1; 4349 AddrMode.ScaledReg = Addr; 4350 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4351 return true; 4352 AddrMode.Scale = 0; 4353 AddrMode.ScaledReg = nullptr; 4354 } 4355 // Couldn't match. 4356 TPT.rollback(LastKnownGood); 4357 return false; 4358 } 4359 4360 /// Check to see if all uses of OpVal by the specified inline asm call are due 4361 /// to memory operands. If so, return true, otherwise return false. 4362 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4363 const TargetLowering &TLI, 4364 const TargetRegisterInfo &TRI) { 4365 const Function *F = CI->getFunction(); 4366 TargetLowering::AsmOperandInfoVector TargetConstraints = 4367 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, 4368 ImmutableCallSite(CI)); 4369 4370 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4371 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4372 4373 // Compute the constraint code and ConstraintType to use. 4374 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4375 4376 // If this asm operand is our Value*, and if it isn't an indirect memory 4377 // operand, we can't fold it! 4378 if (OpInfo.CallOperandVal == OpVal && 4379 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4380 !OpInfo.isIndirect)) 4381 return false; 4382 } 4383 4384 return true; 4385 } 4386 4387 // Max number of memory uses to look at before aborting the search to conserve 4388 // compile time. 4389 static constexpr int MaxMemoryUsesToScan = 20; 4390 4391 /// Recursively walk all the uses of I until we find a memory use. 4392 /// If we find an obviously non-foldable instruction, return true. 4393 /// Add the ultimately found memory instructions to MemoryUses. 4394 static bool FindAllMemoryUses( 4395 Instruction *I, 4396 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4397 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4398 const TargetRegisterInfo &TRI, int SeenInsts = 0) { 4399 // If we already considered this instruction, we're done. 4400 if (!ConsideredInsts.insert(I).second) 4401 return false; 4402 4403 // If this is an obviously unfoldable instruction, bail out. 4404 if (!MightBeFoldableInst(I)) 4405 return true; 4406 4407 const bool OptSize = I->getFunction()->optForSize(); 4408 4409 // Loop over all the uses, recursively processing them. 4410 for (Use &U : I->uses()) { 4411 // Conservatively return true if we're seeing a large number or a deep chain 4412 // of users. This avoids excessive compilation times in pathological cases. 4413 if (SeenInsts++ >= MaxMemoryUsesToScan) 4414 return true; 4415 4416 Instruction *UserI = cast<Instruction>(U.getUser()); 4417 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4418 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4419 continue; 4420 } 4421 4422 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4423 unsigned opNo = U.getOperandNo(); 4424 if (opNo != StoreInst::getPointerOperandIndex()) 4425 return true; // Storing addr, not into addr. 4426 MemoryUses.push_back(std::make_pair(SI, opNo)); 4427 continue; 4428 } 4429 4430 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4431 unsigned opNo = U.getOperandNo(); 4432 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4433 return true; // Storing addr, not into addr. 4434 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4435 continue; 4436 } 4437 4438 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4439 unsigned opNo = U.getOperandNo(); 4440 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4441 return true; // Storing addr, not into addr. 4442 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4443 continue; 4444 } 4445 4446 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4447 // If this is a cold call, we can sink the addressing calculation into 4448 // the cold path. See optimizeCallInst 4449 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 4450 continue; 4451 4452 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 4453 if (!IA) return true; 4454 4455 // If this is a memory operand, we're cool, otherwise bail out. 4456 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4457 return true; 4458 continue; 4459 } 4460 4461 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, 4462 SeenInsts)) 4463 return true; 4464 } 4465 4466 return false; 4467 } 4468 4469 /// Return true if Val is already known to be live at the use site that we're 4470 /// folding it into. If so, there is no cost to include it in the addressing 4471 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4472 /// instruction already. 4473 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4474 Value *KnownLive2) { 4475 // If Val is either of the known-live values, we know it is live! 4476 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4477 return true; 4478 4479 // All values other than instructions and arguments (e.g. constants) are live. 4480 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4481 4482 // If Val is a constant sized alloca in the entry block, it is live, this is 4483 // true because it is just a reference to the stack/frame pointer, which is 4484 // live for the whole function. 4485 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4486 if (AI->isStaticAlloca()) 4487 return true; 4488 4489 // Check to see if this value is already used in the memory instruction's 4490 // block. If so, it's already live into the block at the very least, so we 4491 // can reasonably fold it. 4492 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4493 } 4494 4495 /// It is possible for the addressing mode of the machine to fold the specified 4496 /// instruction into a load or store that ultimately uses it. 4497 /// However, the specified instruction has multiple uses. 4498 /// Given this, it may actually increase register pressure to fold it 4499 /// into the load. For example, consider this code: 4500 /// 4501 /// X = ... 4502 /// Y = X+1 4503 /// use(Y) -> nonload/store 4504 /// Z = Y+1 4505 /// load Z 4506 /// 4507 /// In this case, Y has multiple uses, and can be folded into the load of Z 4508 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4509 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4510 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4511 /// number of computations either. 4512 /// 4513 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4514 /// X was live across 'load Z' for other reasons, we actually *would* want to 4515 /// fold the addressing mode in the Z case. This would make Y die earlier. 4516 bool AddressingModeMatcher:: 4517 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4518 ExtAddrMode &AMAfter) { 4519 if (IgnoreProfitability) return true; 4520 4521 // AMBefore is the addressing mode before this instruction was folded into it, 4522 // and AMAfter is the addressing mode after the instruction was folded. Get 4523 // the set of registers referenced by AMAfter and subtract out those 4524 // referenced by AMBefore: this is the set of values which folding in this 4525 // address extends the lifetime of. 4526 // 4527 // Note that there are only two potential values being referenced here, 4528 // BaseReg and ScaleReg (global addresses are always available, as are any 4529 // folded immediates). 4530 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4531 4532 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4533 // lifetime wasn't extended by adding this instruction. 4534 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4535 BaseReg = nullptr; 4536 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4537 ScaledReg = nullptr; 4538 4539 // If folding this instruction (and it's subexprs) didn't extend any live 4540 // ranges, we're ok with it. 4541 if (!BaseReg && !ScaledReg) 4542 return true; 4543 4544 // If all uses of this instruction can have the address mode sunk into them, 4545 // we can remove the addressing mode and effectively trade one live register 4546 // for another (at worst.) In this context, folding an addressing mode into 4547 // the use is just a particularly nice way of sinking it. 4548 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4549 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4550 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) 4551 return false; // Has a non-memory, non-foldable use! 4552 4553 // Now that we know that all uses of this instruction are part of a chain of 4554 // computation involving only operations that could theoretically be folded 4555 // into a memory use, loop over each of these memory operation uses and see 4556 // if they could *actually* fold the instruction. The assumption is that 4557 // addressing modes are cheap and that duplicating the computation involved 4558 // many times is worthwhile, even on a fastpath. For sinking candidates 4559 // (i.e. cold call sites), this serves as a way to prevent excessive code 4560 // growth since most architectures have some reasonable small and fast way to 4561 // compute an effective address. (i.e LEA on x86) 4562 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4563 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4564 Instruction *User = MemoryUses[i].first; 4565 unsigned OpNo = MemoryUses[i].second; 4566 4567 // Get the access type of this use. If the use isn't a pointer, we don't 4568 // know what it accesses. 4569 Value *Address = User->getOperand(OpNo); 4570 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4571 if (!AddrTy) 4572 return false; 4573 Type *AddressAccessTy = AddrTy->getElementType(); 4574 unsigned AS = AddrTy->getAddressSpace(); 4575 4576 // Do a match against the root of this address, ignoring profitability. This 4577 // will tell us if the addressing mode for the memory operation will 4578 // *actually* cover the shared instruction. 4579 ExtAddrMode Result; 4580 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4581 0); 4582 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4583 TPT.getRestorationPoint(); 4584 AddressingModeMatcher Matcher( 4585 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, 4586 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4587 Matcher.IgnoreProfitability = true; 4588 bool Success = Matcher.matchAddr(Address, 0); 4589 (void)Success; assert(Success && "Couldn't select *anything*?"); 4590 4591 // The match was to check the profitability, the changes made are not 4592 // part of the original matcher. Therefore, they should be dropped 4593 // otherwise the original matcher will not present the right state. 4594 TPT.rollback(LastKnownGood); 4595 4596 // If the match didn't cover I, then it won't be shared by it. 4597 if (!is_contained(MatchedAddrModeInsts, I)) 4598 return false; 4599 4600 MatchedAddrModeInsts.clear(); 4601 } 4602 4603 return true; 4604 } 4605 4606 /// Return true if the specified values are defined in a 4607 /// different basic block than BB. 4608 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4609 if (Instruction *I = dyn_cast<Instruction>(V)) 4610 return I->getParent() != BB; 4611 return false; 4612 } 4613 4614 /// Sink addressing mode computation immediate before MemoryInst if doing so 4615 /// can be done without increasing register pressure. The need for the 4616 /// register pressure constraint means this can end up being an all or nothing 4617 /// decision for all uses of the same addressing computation. 4618 /// 4619 /// Load and Store Instructions often have addressing modes that can do 4620 /// significant amounts of computation. As such, instruction selection will try 4621 /// to get the load or store to do as much computation as possible for the 4622 /// program. The problem is that isel can only see within a single block. As 4623 /// such, we sink as much legal addressing mode work into the block as possible. 4624 /// 4625 /// This method is used to optimize both load/store and inline asms with memory 4626 /// operands. It's also used to sink addressing computations feeding into cold 4627 /// call sites into their (cold) basic block. 4628 /// 4629 /// The motivation for handling sinking into cold blocks is that doing so can 4630 /// both enable other address mode sinking (by satisfying the register pressure 4631 /// constraint above), and reduce register pressure globally (by removing the 4632 /// addressing mode computation from the fast path entirely.). 4633 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4634 Type *AccessTy, unsigned AddrSpace) { 4635 Value *Repl = Addr; 4636 4637 // Try to collapse single-value PHI nodes. This is necessary to undo 4638 // unprofitable PRE transformations. 4639 SmallVector<Value*, 8> worklist; 4640 SmallPtrSet<Value*, 16> Visited; 4641 worklist.push_back(Addr); 4642 4643 // Use a worklist to iteratively look through PHI and select nodes, and 4644 // ensure that the addressing mode obtained from the non-PHI/select roots of 4645 // the graph are compatible. 4646 bool PhiOrSelectSeen = false; 4647 SmallVector<Instruction*, 16> AddrModeInsts; 4648 const SimplifyQuery SQ(*DL, TLInfo); 4649 AddressingModeCombiner AddrModes(SQ, Addr); 4650 TypePromotionTransaction TPT(RemovedInsts); 4651 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4652 TPT.getRestorationPoint(); 4653 while (!worklist.empty()) { 4654 Value *V = worklist.back(); 4655 worklist.pop_back(); 4656 4657 // We allow traversing cyclic Phi nodes. 4658 // In case of success after this loop we ensure that traversing through 4659 // Phi nodes ends up with all cases to compute address of the form 4660 // BaseGV + Base + Scale * Index + Offset 4661 // where Scale and Offset are constans and BaseGV, Base and Index 4662 // are exactly the same Values in all cases. 4663 // It means that BaseGV, Scale and Offset dominate our memory instruction 4664 // and have the same value as they had in address computation represented 4665 // as Phi. So we can safely sink address computation to memory instruction. 4666 if (!Visited.insert(V).second) 4667 continue; 4668 4669 // For a PHI node, push all of its incoming values. 4670 if (PHINode *P = dyn_cast<PHINode>(V)) { 4671 for (Value *IncValue : P->incoming_values()) 4672 worklist.push_back(IncValue); 4673 PhiOrSelectSeen = true; 4674 continue; 4675 } 4676 // Similar for select. 4677 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4678 worklist.push_back(SI->getFalseValue()); 4679 worklist.push_back(SI->getTrueValue()); 4680 PhiOrSelectSeen = true; 4681 continue; 4682 } 4683 4684 // For non-PHIs, determine the addressing mode being computed. Note that 4685 // the result may differ depending on what other uses our candidate 4686 // addressing instructions might have. 4687 AddrModeInsts.clear(); 4688 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4689 0); 4690 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4691 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4692 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); 4693 4694 GetElementPtrInst *GEP = LargeOffsetGEP.first; 4695 if (GEP && GEP->getParent() != MemoryInst->getParent() && 4696 !NewGEPBases.count(GEP)) { 4697 // If splitting the underlying data structure can reduce the offset of a 4698 // GEP, collect the GEP. Skip the GEPs that are the new bases of 4699 // previously split data structures. 4700 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 4701 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 4702 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 4703 } 4704 4705 NewAddrMode.OriginalValue = V; 4706 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4707 break; 4708 } 4709 4710 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4711 // or we have multiple but either couldn't combine them or combining them 4712 // wouldn't do anything useful, bail out now. 4713 if (!AddrModes.combineAddrModes()) { 4714 TPT.rollback(LastKnownGood); 4715 return false; 4716 } 4717 TPT.commit(); 4718 4719 // Get the combined AddrMode (or the only AddrMode, if we only had one). 4720 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 4721 4722 // If all the instructions matched are already in this BB, don't do anything. 4723 // If we saw a Phi node then it is not local definitely, and if we saw a select 4724 // then we want to push the address calculation past it even if it's already 4725 // in this BB. 4726 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 4727 return IsNonLocalValue(V, MemoryInst->getParent()); 4728 })) { 4729 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 4730 << "\n"); 4731 return false; 4732 } 4733 4734 // Insert this computation right after this user. Since our caller is 4735 // scanning from the top of the BB to the bottom, reuse of the expr are 4736 // guaranteed to happen later. 4737 IRBuilder<> Builder(MemoryInst); 4738 4739 // Now that we determined the addressing expression we want to use and know 4740 // that we have to sink it into this block. Check to see if we have already 4741 // done this for some other load/store instr in this block. If so, reuse 4742 // the computation. Before attempting reuse, check if the address is valid 4743 // as it may have been erased. 4744 4745 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 4746 4747 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 4748 if (SunkAddr) { 4749 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 4750 << " for " << *MemoryInst << "\n"); 4751 if (SunkAddr->getType() != Addr->getType()) 4752 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4753 } else if (AddrSinkUsingGEPs || 4754 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) { 4755 // By default, we use the GEP-based method when AA is used later. This 4756 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 4757 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4758 << " for " << *MemoryInst << "\n"); 4759 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4760 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 4761 4762 // First, find the pointer. 4763 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 4764 ResultPtr = AddrMode.BaseReg; 4765 AddrMode.BaseReg = nullptr; 4766 } 4767 4768 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 4769 // We can't add more than one pointer together, nor can we scale a 4770 // pointer (both of which seem meaningless). 4771 if (ResultPtr || AddrMode.Scale != 1) 4772 return false; 4773 4774 ResultPtr = AddrMode.ScaledReg; 4775 AddrMode.Scale = 0; 4776 } 4777 4778 // It is only safe to sign extend the BaseReg if we know that the math 4779 // required to create it did not overflow before we extend it. Since 4780 // the original IR value was tossed in favor of a constant back when 4781 // the AddrMode was created we need to bail out gracefully if widths 4782 // do not match instead of extending it. 4783 // 4784 // (See below for code to add the scale.) 4785 if (AddrMode.Scale) { 4786 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 4787 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 4788 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 4789 return false; 4790 } 4791 4792 if (AddrMode.BaseGV) { 4793 if (ResultPtr) 4794 return false; 4795 4796 ResultPtr = AddrMode.BaseGV; 4797 } 4798 4799 // If the real base value actually came from an inttoptr, then the matcher 4800 // will look through it and provide only the integer value. In that case, 4801 // use it here. 4802 if (!DL->isNonIntegralPointerType(Addr->getType())) { 4803 if (!ResultPtr && AddrMode.BaseReg) { 4804 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 4805 "sunkaddr"); 4806 AddrMode.BaseReg = nullptr; 4807 } else if (!ResultPtr && AddrMode.Scale == 1) { 4808 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 4809 "sunkaddr"); 4810 AddrMode.Scale = 0; 4811 } 4812 } 4813 4814 if (!ResultPtr && 4815 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 4816 SunkAddr = Constant::getNullValue(Addr->getType()); 4817 } else if (!ResultPtr) { 4818 return false; 4819 } else { 4820 Type *I8PtrTy = 4821 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 4822 Type *I8Ty = Builder.getInt8Ty(); 4823 4824 // Start with the base register. Do this first so that subsequent address 4825 // matching finds it last, which will prevent it from trying to match it 4826 // as the scaled value in case it happens to be a mul. That would be 4827 // problematic if we've sunk a different mul for the scale, because then 4828 // we'd end up sinking both muls. 4829 if (AddrMode.BaseReg) { 4830 Value *V = AddrMode.BaseReg; 4831 if (V->getType() != IntPtrTy) 4832 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4833 4834 ResultIndex = V; 4835 } 4836 4837 // Add the scale value. 4838 if (AddrMode.Scale) { 4839 Value *V = AddrMode.ScaledReg; 4840 if (V->getType() == IntPtrTy) { 4841 // done. 4842 } else { 4843 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 4844 cast<IntegerType>(V->getType())->getBitWidth() && 4845 "We can't transform if ScaledReg is too narrow"); 4846 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4847 } 4848 4849 if (AddrMode.Scale != 1) 4850 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4851 "sunkaddr"); 4852 if (ResultIndex) 4853 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 4854 else 4855 ResultIndex = V; 4856 } 4857 4858 // Add in the Base Offset if present. 4859 if (AddrMode.BaseOffs) { 4860 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4861 if (ResultIndex) { 4862 // We need to add this separately from the scale above to help with 4863 // SDAG consecutive load/store merging. 4864 if (ResultPtr->getType() != I8PtrTy) 4865 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4866 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4867 } 4868 4869 ResultIndex = V; 4870 } 4871 4872 if (!ResultIndex) { 4873 SunkAddr = ResultPtr; 4874 } else { 4875 if (ResultPtr->getType() != I8PtrTy) 4876 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 4877 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 4878 } 4879 4880 if (SunkAddr->getType() != Addr->getType()) 4881 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 4882 } 4883 } else { 4884 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 4885 // non-integral pointers, so in that case bail out now. 4886 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 4887 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 4888 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 4889 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 4890 if (DL->isNonIntegralPointerType(Addr->getType()) || 4891 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 4892 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 4893 (AddrMode.BaseGV && 4894 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 4895 return false; 4896 4897 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 4898 << " for " << *MemoryInst << "\n"); 4899 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 4900 Value *Result = nullptr; 4901 4902 // Start with the base register. Do this first so that subsequent address 4903 // matching finds it last, which will prevent it from trying to match it 4904 // as the scaled value in case it happens to be a mul. That would be 4905 // problematic if we've sunk a different mul for the scale, because then 4906 // we'd end up sinking both muls. 4907 if (AddrMode.BaseReg) { 4908 Value *V = AddrMode.BaseReg; 4909 if (V->getType()->isPointerTy()) 4910 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4911 if (V->getType() != IntPtrTy) 4912 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 4913 Result = V; 4914 } 4915 4916 // Add the scale value. 4917 if (AddrMode.Scale) { 4918 Value *V = AddrMode.ScaledReg; 4919 if (V->getType() == IntPtrTy) { 4920 // done. 4921 } else if (V->getType()->isPointerTy()) { 4922 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 4923 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 4924 cast<IntegerType>(V->getType())->getBitWidth()) { 4925 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 4926 } else { 4927 // It is only safe to sign extend the BaseReg if we know that the math 4928 // required to create it did not overflow before we extend it. Since 4929 // the original IR value was tossed in favor of a constant back when 4930 // the AddrMode was created we need to bail out gracefully if widths 4931 // do not match instead of extending it. 4932 Instruction *I = dyn_cast_or_null<Instruction>(Result); 4933 if (I && (Result != AddrMode.BaseReg)) 4934 I->eraseFromParent(); 4935 return false; 4936 } 4937 if (AddrMode.Scale != 1) 4938 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 4939 "sunkaddr"); 4940 if (Result) 4941 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4942 else 4943 Result = V; 4944 } 4945 4946 // Add in the BaseGV if present. 4947 if (AddrMode.BaseGV) { 4948 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 4949 if (Result) 4950 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4951 else 4952 Result = V; 4953 } 4954 4955 // Add in the Base Offset if present. 4956 if (AddrMode.BaseOffs) { 4957 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 4958 if (Result) 4959 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 4960 else 4961 Result = V; 4962 } 4963 4964 if (!Result) 4965 SunkAddr = Constant::getNullValue(Addr->getType()); 4966 else 4967 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 4968 } 4969 4970 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 4971 // Store the newly computed address into the cache. In the case we reused a 4972 // value, this should be idempotent. 4973 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 4974 4975 // If we have no uses, recursively delete the value and all dead instructions 4976 // using it. 4977 if (Repl->use_empty()) { 4978 // This can cause recursive deletion, which can invalidate our iterator. 4979 // Use a WeakTrackingVH to hold onto it in case this happens. 4980 Value *CurValue = &*CurInstIterator; 4981 WeakTrackingVH IterHandle(CurValue); 4982 BasicBlock *BB = CurInstIterator->getParent(); 4983 4984 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 4985 4986 if (IterHandle != CurValue) { 4987 // If the iterator instruction was recursively deleted, start over at the 4988 // start of the block. 4989 CurInstIterator = BB->begin(); 4990 SunkAddrs.clear(); 4991 } 4992 } 4993 ++NumMemoryInsts; 4994 return true; 4995 } 4996 4997 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4998 /// address computing into the block when possible / profitable. 4999 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 5000 bool MadeChange = false; 5001 5002 const TargetRegisterInfo *TRI = 5003 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 5004 TargetLowering::AsmOperandInfoVector TargetConstraints = 5005 TLI->ParseConstraints(*DL, TRI, CS); 5006 unsigned ArgNo = 0; 5007 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5008 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 5009 5010 // Compute the constraint code and ConstraintType to use. 5011 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 5012 5013 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5014 OpInfo.isIndirect) { 5015 Value *OpVal = CS->getArgOperand(ArgNo++); 5016 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 5017 } else if (OpInfo.Type == InlineAsm::isInput) 5018 ArgNo++; 5019 } 5020 5021 return MadeChange; 5022 } 5023 5024 /// Check if all the uses of \p Val are equivalent (or free) zero or 5025 /// sign extensions. 5026 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 5027 assert(!Val->use_empty() && "Input must have at least one use"); 5028 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 5029 bool IsSExt = isa<SExtInst>(FirstUser); 5030 Type *ExtTy = FirstUser->getType(); 5031 for (const User *U : Val->users()) { 5032 const Instruction *UI = cast<Instruction>(U); 5033 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 5034 return false; 5035 Type *CurTy = UI->getType(); 5036 // Same input and output types: Same instruction after CSE. 5037 if (CurTy == ExtTy) 5038 continue; 5039 5040 // If IsSExt is true, we are in this situation: 5041 // a = Val 5042 // b = sext ty1 a to ty2 5043 // c = sext ty1 a to ty3 5044 // Assuming ty2 is shorter than ty3, this could be turned into: 5045 // a = Val 5046 // b = sext ty1 a to ty2 5047 // c = sext ty2 b to ty3 5048 // However, the last sext is not free. 5049 if (IsSExt) 5050 return false; 5051 5052 // This is a ZExt, maybe this is free to extend from one type to another. 5053 // In that case, we would not account for a different use. 5054 Type *NarrowTy; 5055 Type *LargeTy; 5056 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5057 CurTy->getScalarType()->getIntegerBitWidth()) { 5058 NarrowTy = CurTy; 5059 LargeTy = ExtTy; 5060 } else { 5061 NarrowTy = ExtTy; 5062 LargeTy = CurTy; 5063 } 5064 5065 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5066 return false; 5067 } 5068 // All uses are the same or can be derived from one another for free. 5069 return true; 5070 } 5071 5072 /// Try to speculatively promote extensions in \p Exts and continue 5073 /// promoting through newly promoted operands recursively as far as doing so is 5074 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5075 /// When some promotion happened, \p TPT contains the proper state to revert 5076 /// them. 5077 /// 5078 /// \return true if some promotion happened, false otherwise. 5079 bool CodeGenPrepare::tryToPromoteExts( 5080 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5081 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5082 unsigned CreatedInstsCost) { 5083 bool Promoted = false; 5084 5085 // Iterate over all the extensions to try to promote them. 5086 for (auto I : Exts) { 5087 // Early check if we directly have ext(load). 5088 if (isa<LoadInst>(I->getOperand(0))) { 5089 ProfitablyMovedExts.push_back(I); 5090 continue; 5091 } 5092 5093 // Check whether or not we want to do any promotion. The reason we have 5094 // this check inside the for loop is to catch the case where an extension 5095 // is directly fed by a load because in such case the extension can be moved 5096 // up without any promotion on its operands. 5097 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5098 return false; 5099 5100 // Get the action to perform the promotion. 5101 TypePromotionHelper::Action TPH = 5102 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5103 // Check if we can promote. 5104 if (!TPH) { 5105 // Save the current extension as we cannot move up through its operand. 5106 ProfitablyMovedExts.push_back(I); 5107 continue; 5108 } 5109 5110 // Save the current state. 5111 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5112 TPT.getRestorationPoint(); 5113 SmallVector<Instruction *, 4> NewExts; 5114 unsigned NewCreatedInstsCost = 0; 5115 unsigned ExtCost = !TLI->isExtFree(I); 5116 // Promote. 5117 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5118 &NewExts, nullptr, *TLI); 5119 assert(PromotedVal && 5120 "TypePromotionHelper should have filtered out those cases"); 5121 5122 // We would be able to merge only one extension in a load. 5123 // Therefore, if we have more than 1 new extension we heuristically 5124 // cut this search path, because it means we degrade the code quality. 5125 // With exactly 2, the transformation is neutral, because we will merge 5126 // one extension but leave one. However, we optimistically keep going, 5127 // because the new extension may be removed too. 5128 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5129 // FIXME: It would be possible to propagate a negative value instead of 5130 // conservatively ceiling it to 0. 5131 TotalCreatedInstsCost = 5132 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5133 if (!StressExtLdPromotion && 5134 (TotalCreatedInstsCost > 1 || 5135 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5136 // This promotion is not profitable, rollback to the previous state, and 5137 // save the current extension in ProfitablyMovedExts as the latest 5138 // speculative promotion turned out to be unprofitable. 5139 TPT.rollback(LastKnownGood); 5140 ProfitablyMovedExts.push_back(I); 5141 continue; 5142 } 5143 // Continue promoting NewExts as far as doing so is profitable. 5144 SmallVector<Instruction *, 2> NewlyMovedExts; 5145 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5146 bool NewPromoted = false; 5147 for (auto ExtInst : NewlyMovedExts) { 5148 Instruction *MovedExt = cast<Instruction>(ExtInst); 5149 Value *ExtOperand = MovedExt->getOperand(0); 5150 // If we have reached to a load, we need this extra profitability check 5151 // as it could potentially be merged into an ext(load). 5152 if (isa<LoadInst>(ExtOperand) && 5153 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5154 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5155 continue; 5156 5157 ProfitablyMovedExts.push_back(MovedExt); 5158 NewPromoted = true; 5159 } 5160 5161 // If none of speculative promotions for NewExts is profitable, rollback 5162 // and save the current extension (I) as the last profitable extension. 5163 if (!NewPromoted) { 5164 TPT.rollback(LastKnownGood); 5165 ProfitablyMovedExts.push_back(I); 5166 continue; 5167 } 5168 // The promotion is profitable. 5169 Promoted = true; 5170 } 5171 return Promoted; 5172 } 5173 5174 /// Merging redundant sexts when one is dominating the other. 5175 bool CodeGenPrepare::mergeSExts(Function &F, DominatorTree &DT) { 5176 bool Changed = false; 5177 for (auto &Entry : ValToSExtendedUses) { 5178 SExts &Insts = Entry.second; 5179 SExts CurPts; 5180 for (Instruction *Inst : Insts) { 5181 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5182 Inst->getOperand(0) != Entry.first) 5183 continue; 5184 bool inserted = false; 5185 for (auto &Pt : CurPts) { 5186 if (DT.dominates(Inst, Pt)) { 5187 Pt->replaceAllUsesWith(Inst); 5188 RemovedInsts.insert(Pt); 5189 Pt->removeFromParent(); 5190 Pt = Inst; 5191 inserted = true; 5192 Changed = true; 5193 break; 5194 } 5195 if (!DT.dominates(Pt, Inst)) 5196 // Give up if we need to merge in a common dominator as the 5197 // experiments show it is not profitable. 5198 continue; 5199 Inst->replaceAllUsesWith(Pt); 5200 RemovedInsts.insert(Inst); 5201 Inst->removeFromParent(); 5202 inserted = true; 5203 Changed = true; 5204 break; 5205 } 5206 if (!inserted) 5207 CurPts.push_back(Inst); 5208 } 5209 } 5210 return Changed; 5211 } 5212 5213 // Spliting large data structures so that the GEPs accessing them can have 5214 // smaller offsets so that they can be sunk to the same blocks as their users. 5215 // For example, a large struct starting from %base is splitted into two parts 5216 // where the second part starts from %new_base. 5217 // 5218 // Before: 5219 // BB0: 5220 // %base = 5221 // 5222 // BB1: 5223 // %gep0 = gep %base, off0 5224 // %gep1 = gep %base, off1 5225 // %gep2 = gep %base, off2 5226 // 5227 // BB2: 5228 // %load1 = load %gep0 5229 // %load2 = load %gep1 5230 // %load3 = load %gep2 5231 // 5232 // After: 5233 // BB0: 5234 // %base = 5235 // %new_base = gep %base, off0 5236 // 5237 // BB1: 5238 // %new_gep0 = %new_base 5239 // %new_gep1 = gep %new_base, off1 - off0 5240 // %new_gep2 = gep %new_base, off2 - off0 5241 // 5242 // BB2: 5243 // %load1 = load i32, i32* %new_gep0 5244 // %load2 = load i32, i32* %new_gep1 5245 // %load3 = load i32, i32* %new_gep2 5246 // 5247 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5248 // their offsets are smaller enough to fit into the addressing mode. 5249 bool CodeGenPrepare::splitLargeGEPOffsets() { 5250 bool Changed = false; 5251 for (auto &Entry : LargeOffsetGEPMap) { 5252 Value *OldBase = Entry.first; 5253 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5254 &LargeOffsetGEPs = Entry.second; 5255 auto compareGEPOffset = 5256 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5257 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5258 if (LHS.first == RHS.first) 5259 return false; 5260 if (LHS.second != RHS.second) 5261 return LHS.second < RHS.second; 5262 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5263 }; 5264 // Sorting all the GEPs of the same data structures based on the offsets. 5265 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5266 LargeOffsetGEPs.erase( 5267 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5268 LargeOffsetGEPs.end()); 5269 // Skip if all the GEPs have the same offsets. 5270 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5271 continue; 5272 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5273 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5274 Value *NewBaseGEP = nullptr; 5275 5276 auto LargeOffsetGEP = LargeOffsetGEPs.begin(); 5277 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5278 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5279 int64_t Offset = LargeOffsetGEP->second; 5280 if (Offset != BaseOffset) { 5281 TargetLowering::AddrMode AddrMode; 5282 AddrMode.BaseOffs = Offset - BaseOffset; 5283 // The result type of the GEP might not be the type of the memory 5284 // access. 5285 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5286 GEP->getResultElementType(), 5287 GEP->getAddressSpace())) { 5288 // We need to create a new base if the offset to the current base is 5289 // too large to fit into the addressing mode. So, a very large struct 5290 // may be splitted into several parts. 5291 BaseGEP = GEP; 5292 BaseOffset = Offset; 5293 NewBaseGEP = nullptr; 5294 } 5295 } 5296 5297 // Generate a new GEP to replace the current one. 5298 LLVMContext &Ctx = GEP->getContext(); 5299 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5300 Type *I8PtrTy = 5301 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5302 Type *I8Ty = Type::getInt8Ty(Ctx); 5303 5304 if (!NewBaseGEP) { 5305 // Create a new base if we don't have one yet. Find the insertion 5306 // pointer for the new base first. 5307 BasicBlock::iterator NewBaseInsertPt; 5308 BasicBlock *NewBaseInsertBB; 5309 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5310 // If the base of the struct is an instruction, the new base will be 5311 // inserted close to it. 5312 NewBaseInsertBB = BaseI->getParent(); 5313 if (isa<PHINode>(BaseI)) 5314 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5315 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5316 NewBaseInsertBB = 5317 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5318 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5319 } else 5320 NewBaseInsertPt = std::next(BaseI->getIterator()); 5321 } else { 5322 // If the current base is an argument or global value, the new base 5323 // will be inserted to the entry block. 5324 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5325 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5326 } 5327 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5328 // Create a new base. 5329 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5330 NewBaseGEP = OldBase; 5331 if (NewBaseGEP->getType() != I8PtrTy) 5332 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5333 NewBaseGEP = 5334 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5335 NewGEPBases.insert(NewBaseGEP); 5336 } 5337 5338 IRBuilder<> Builder(GEP); 5339 Value *NewGEP = NewBaseGEP; 5340 if (Offset == BaseOffset) { 5341 if (GEP->getType() != I8PtrTy) 5342 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5343 } else { 5344 // Calculate the new offset for the new GEP. 5345 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5346 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5347 5348 if (GEP->getType() != I8PtrTy) 5349 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5350 } 5351 GEP->replaceAllUsesWith(NewGEP); 5352 LargeOffsetGEPID.erase(GEP); 5353 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5354 GEP->eraseFromParent(); 5355 Changed = true; 5356 } 5357 } 5358 return Changed; 5359 } 5360 5361 /// Return true, if an ext(load) can be formed from an extension in 5362 /// \p MovedExts. 5363 bool CodeGenPrepare::canFormExtLd( 5364 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5365 Instruction *&Inst, bool HasPromoted) { 5366 for (auto *MovedExtInst : MovedExts) { 5367 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5368 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5369 Inst = MovedExtInst; 5370 break; 5371 } 5372 } 5373 if (!LI) 5374 return false; 5375 5376 // If they're already in the same block, there's nothing to do. 5377 // Make the cheap checks first if we did not promote. 5378 // If we promoted, we need to check if it is indeed profitable. 5379 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5380 return false; 5381 5382 return TLI->isExtLoad(LI, Inst, *DL); 5383 } 5384 5385 /// Move a zext or sext fed by a load into the same basic block as the load, 5386 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5387 /// extend into the load. 5388 /// 5389 /// E.g., 5390 /// \code 5391 /// %ld = load i32* %addr 5392 /// %add = add nuw i32 %ld, 4 5393 /// %zext = zext i32 %add to i64 5394 // \endcode 5395 /// => 5396 /// \code 5397 /// %ld = load i32* %addr 5398 /// %zext = zext i32 %ld to i64 5399 /// %add = add nuw i64 %zext, 4 5400 /// \encode 5401 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5402 /// allow us to match zext(load i32*) to i64. 5403 /// 5404 /// Also, try to promote the computations used to obtain a sign extended 5405 /// value used into memory accesses. 5406 /// E.g., 5407 /// \code 5408 /// a = add nsw i32 b, 3 5409 /// d = sext i32 a to i64 5410 /// e = getelementptr ..., i64 d 5411 /// \endcode 5412 /// => 5413 /// \code 5414 /// f = sext i32 b to i64 5415 /// a = add nsw i64 f, 3 5416 /// e = getelementptr ..., i64 a 5417 /// \endcode 5418 /// 5419 /// \p Inst[in/out] the extension may be modified during the process if some 5420 /// promotions apply. 5421 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5422 // ExtLoad formation and address type promotion infrastructure requires TLI to 5423 // be effective. 5424 if (!TLI) 5425 return false; 5426 5427 bool AllowPromotionWithoutCommonHeader = false; 5428 /// See if it is an interesting sext operations for the address type 5429 /// promotion before trying to promote it, e.g., the ones with the right 5430 /// type and used in memory accesses. 5431 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5432 *Inst, AllowPromotionWithoutCommonHeader); 5433 TypePromotionTransaction TPT(RemovedInsts); 5434 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5435 TPT.getRestorationPoint(); 5436 SmallVector<Instruction *, 1> Exts; 5437 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5438 Exts.push_back(Inst); 5439 5440 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5441 5442 // Look for a load being extended. 5443 LoadInst *LI = nullptr; 5444 Instruction *ExtFedByLoad; 5445 5446 // Try to promote a chain of computation if it allows to form an extended 5447 // load. 5448 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 5449 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 5450 TPT.commit(); 5451 // Move the extend into the same block as the load 5452 ExtFedByLoad->moveAfter(LI); 5453 // CGP does not check if the zext would be speculatively executed when moved 5454 // to the same basic block as the load. Preserving its original location 5455 // would pessimize the debugging experience, as well as negatively impact 5456 // the quality of sample pgo. We don't want to use "line 0" as that has a 5457 // size cost in the line-table section and logically the zext can be seen as 5458 // part of the load. Therefore we conservatively reuse the same debug 5459 // location for the load and the zext. 5460 ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); 5461 ++NumExtsMoved; 5462 Inst = ExtFedByLoad; 5463 return true; 5464 } 5465 5466 // Continue promoting SExts if known as considerable depending on targets. 5467 if (ATPConsiderable && 5468 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 5469 HasPromoted, TPT, SpeculativelyMovedExts)) 5470 return true; 5471 5472 TPT.rollback(LastKnownGood); 5473 return false; 5474 } 5475 5476 // Perform address type promotion if doing so is profitable. 5477 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 5478 // instructions that sign extended the same initial value. However, if 5479 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 5480 // extension is just profitable. 5481 bool CodeGenPrepare::performAddressTypePromotion( 5482 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 5483 bool HasPromoted, TypePromotionTransaction &TPT, 5484 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 5485 bool Promoted = false; 5486 SmallPtrSet<Instruction *, 1> UnhandledExts; 5487 bool AllSeenFirst = true; 5488 for (auto I : SpeculativelyMovedExts) { 5489 Value *HeadOfChain = I->getOperand(0); 5490 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 5491 SeenChainsForSExt.find(HeadOfChain); 5492 // If there is an unhandled SExt which has the same header, try to promote 5493 // it as well. 5494 if (AlreadySeen != SeenChainsForSExt.end()) { 5495 if (AlreadySeen->second != nullptr) 5496 UnhandledExts.insert(AlreadySeen->second); 5497 AllSeenFirst = false; 5498 } 5499 } 5500 5501 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 5502 SpeculativelyMovedExts.size() == 1)) { 5503 TPT.commit(); 5504 if (HasPromoted) 5505 Promoted = true; 5506 for (auto I : SpeculativelyMovedExts) { 5507 Value *HeadOfChain = I->getOperand(0); 5508 SeenChainsForSExt[HeadOfChain] = nullptr; 5509 ValToSExtendedUses[HeadOfChain].push_back(I); 5510 } 5511 // Update Inst as promotion happen. 5512 Inst = SpeculativelyMovedExts.pop_back_val(); 5513 } else { 5514 // This is the first chain visited from the header, keep the current chain 5515 // as unhandled. Defer to promote this until we encounter another SExt 5516 // chain derived from the same header. 5517 for (auto I : SpeculativelyMovedExts) { 5518 Value *HeadOfChain = I->getOperand(0); 5519 SeenChainsForSExt[HeadOfChain] = Inst; 5520 } 5521 return false; 5522 } 5523 5524 if (!AllSeenFirst && !UnhandledExts.empty()) 5525 for (auto VisitedSExt : UnhandledExts) { 5526 if (RemovedInsts.count(VisitedSExt)) 5527 continue; 5528 TypePromotionTransaction TPT(RemovedInsts); 5529 SmallVector<Instruction *, 1> Exts; 5530 SmallVector<Instruction *, 2> Chains; 5531 Exts.push_back(VisitedSExt); 5532 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 5533 TPT.commit(); 5534 if (HasPromoted) 5535 Promoted = true; 5536 for (auto I : Chains) { 5537 Value *HeadOfChain = I->getOperand(0); 5538 // Mark this as handled. 5539 SeenChainsForSExt[HeadOfChain] = nullptr; 5540 ValToSExtendedUses[HeadOfChain].push_back(I); 5541 } 5542 } 5543 return Promoted; 5544 } 5545 5546 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 5547 BasicBlock *DefBB = I->getParent(); 5548 5549 // If the result of a {s|z}ext and its source are both live out, rewrite all 5550 // other uses of the source with result of extension. 5551 Value *Src = I->getOperand(0); 5552 if (Src->hasOneUse()) 5553 return false; 5554 5555 // Only do this xform if truncating is free. 5556 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 5557 return false; 5558 5559 // Only safe to perform the optimization if the source is also defined in 5560 // this block. 5561 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 5562 return false; 5563 5564 bool DefIsLiveOut = false; 5565 for (User *U : I->users()) { 5566 Instruction *UI = cast<Instruction>(U); 5567 5568 // Figure out which BB this ext is used in. 5569 BasicBlock *UserBB = UI->getParent(); 5570 if (UserBB == DefBB) continue; 5571 DefIsLiveOut = true; 5572 break; 5573 } 5574 if (!DefIsLiveOut) 5575 return false; 5576 5577 // Make sure none of the uses are PHI nodes. 5578 for (User *U : Src->users()) { 5579 Instruction *UI = cast<Instruction>(U); 5580 BasicBlock *UserBB = UI->getParent(); 5581 if (UserBB == DefBB) continue; 5582 // Be conservative. We don't want this xform to end up introducing 5583 // reloads just before load / store instructions. 5584 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 5585 return false; 5586 } 5587 5588 // InsertedTruncs - Only insert one trunc in each block once. 5589 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 5590 5591 bool MadeChange = false; 5592 for (Use &U : Src->uses()) { 5593 Instruction *User = cast<Instruction>(U.getUser()); 5594 5595 // Figure out which BB this ext is used in. 5596 BasicBlock *UserBB = User->getParent(); 5597 if (UserBB == DefBB) continue; 5598 5599 // Both src and def are live in this block. Rewrite the use. 5600 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 5601 5602 if (!InsertedTrunc) { 5603 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 5604 assert(InsertPt != UserBB->end()); 5605 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 5606 InsertedInsts.insert(InsertedTrunc); 5607 } 5608 5609 // Replace a use of the {s|z}ext source with a use of the result. 5610 U = InsertedTrunc; 5611 ++NumExtUses; 5612 MadeChange = true; 5613 } 5614 5615 return MadeChange; 5616 } 5617 5618 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 5619 // just after the load if the target can fold this into one extload instruction, 5620 // with the hope of eliminating some of the other later "and" instructions using 5621 // the loaded value. "and"s that are made trivially redundant by the insertion 5622 // of the new "and" are removed by this function, while others (e.g. those whose 5623 // path from the load goes through a phi) are left for isel to potentially 5624 // remove. 5625 // 5626 // For example: 5627 // 5628 // b0: 5629 // x = load i32 5630 // ... 5631 // b1: 5632 // y = and x, 0xff 5633 // z = use y 5634 // 5635 // becomes: 5636 // 5637 // b0: 5638 // x = load i32 5639 // x' = and x, 0xff 5640 // ... 5641 // b1: 5642 // z = use x' 5643 // 5644 // whereas: 5645 // 5646 // b0: 5647 // x1 = load i32 5648 // ... 5649 // b1: 5650 // x2 = load i32 5651 // ... 5652 // b2: 5653 // x = phi x1, x2 5654 // y = and x, 0xff 5655 // 5656 // becomes (after a call to optimizeLoadExt for each load): 5657 // 5658 // b0: 5659 // x1 = load i32 5660 // x1' = and x1, 0xff 5661 // ... 5662 // b1: 5663 // x2 = load i32 5664 // x2' = and x2, 0xff 5665 // ... 5666 // b2: 5667 // x = phi x1', x2' 5668 // y = and x, 0xff 5669 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 5670 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 5671 return false; 5672 5673 // Skip loads we've already transformed. 5674 if (Load->hasOneUse() && 5675 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 5676 return false; 5677 5678 // Look at all uses of Load, looking through phis, to determine how many bits 5679 // of the loaded value are needed. 5680 SmallVector<Instruction *, 8> WorkList; 5681 SmallPtrSet<Instruction *, 16> Visited; 5682 SmallVector<Instruction *, 8> AndsToMaybeRemove; 5683 for (auto *U : Load->users()) 5684 WorkList.push_back(cast<Instruction>(U)); 5685 5686 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 5687 unsigned BitWidth = LoadResultVT.getSizeInBits(); 5688 APInt DemandBits(BitWidth, 0); 5689 APInt WidestAndBits(BitWidth, 0); 5690 5691 while (!WorkList.empty()) { 5692 Instruction *I = WorkList.back(); 5693 WorkList.pop_back(); 5694 5695 // Break use-def graph loops. 5696 if (!Visited.insert(I).second) 5697 continue; 5698 5699 // For a PHI node, push all of its users. 5700 if (auto *Phi = dyn_cast<PHINode>(I)) { 5701 for (auto *U : Phi->users()) 5702 WorkList.push_back(cast<Instruction>(U)); 5703 continue; 5704 } 5705 5706 switch (I->getOpcode()) { 5707 case Instruction::And: { 5708 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 5709 if (!AndC) 5710 return false; 5711 APInt AndBits = AndC->getValue(); 5712 DemandBits |= AndBits; 5713 // Keep track of the widest and mask we see. 5714 if (AndBits.ugt(WidestAndBits)) 5715 WidestAndBits = AndBits; 5716 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 5717 AndsToMaybeRemove.push_back(I); 5718 break; 5719 } 5720 5721 case Instruction::Shl: { 5722 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 5723 if (!ShlC) 5724 return false; 5725 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 5726 DemandBits.setLowBits(BitWidth - ShiftAmt); 5727 break; 5728 } 5729 5730 case Instruction::Trunc: { 5731 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 5732 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 5733 DemandBits.setLowBits(TruncBitWidth); 5734 break; 5735 } 5736 5737 default: 5738 return false; 5739 } 5740 } 5741 5742 uint32_t ActiveBits = DemandBits.getActiveBits(); 5743 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 5744 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 5745 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 5746 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 5747 // followed by an AND. 5748 // TODO: Look into removing this restriction by fixing backends to either 5749 // return false for isLoadExtLegal for i1 or have them select this pattern to 5750 // a single instruction. 5751 // 5752 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 5753 // mask, since these are the only ands that will be removed by isel. 5754 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 5755 WidestAndBits != DemandBits) 5756 return false; 5757 5758 LLVMContext &Ctx = Load->getType()->getContext(); 5759 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 5760 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 5761 5762 // Reject cases that won't be matched as extloads. 5763 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 5764 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 5765 return false; 5766 5767 IRBuilder<> Builder(Load->getNextNode()); 5768 auto *NewAnd = dyn_cast<Instruction>( 5769 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 5770 // Mark this instruction as "inserted by CGP", so that other 5771 // optimizations don't touch it. 5772 InsertedInsts.insert(NewAnd); 5773 5774 // Replace all uses of load with new and (except for the use of load in the 5775 // new and itself). 5776 Load->replaceAllUsesWith(NewAnd); 5777 NewAnd->setOperand(0, Load); 5778 5779 // Remove any and instructions that are now redundant. 5780 for (auto *And : AndsToMaybeRemove) 5781 // Check that the and mask is the same as the one we decided to put on the 5782 // new and. 5783 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 5784 And->replaceAllUsesWith(NewAnd); 5785 if (&*CurInstIterator == And) 5786 CurInstIterator = std::next(And->getIterator()); 5787 And->eraseFromParent(); 5788 ++NumAndUses; 5789 } 5790 5791 ++NumAndsAdded; 5792 return true; 5793 } 5794 5795 /// Check if V (an operand of a select instruction) is an expensive instruction 5796 /// that is only used once. 5797 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 5798 auto *I = dyn_cast<Instruction>(V); 5799 // If it's safe to speculatively execute, then it should not have side 5800 // effects; therefore, it's safe to sink and possibly *not* execute. 5801 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 5802 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 5803 } 5804 5805 /// Returns true if a SelectInst should be turned into an explicit branch. 5806 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 5807 const TargetLowering *TLI, 5808 SelectInst *SI) { 5809 // If even a predictable select is cheap, then a branch can't be cheaper. 5810 if (!TLI->isPredictableSelectExpensive()) 5811 return false; 5812 5813 // FIXME: This should use the same heuristics as IfConversion to determine 5814 // whether a select is better represented as a branch. 5815 5816 // If metadata tells us that the select condition is obviously predictable, 5817 // then we want to replace the select with a branch. 5818 uint64_t TrueWeight, FalseWeight; 5819 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 5820 uint64_t Max = std::max(TrueWeight, FalseWeight); 5821 uint64_t Sum = TrueWeight + FalseWeight; 5822 if (Sum != 0) { 5823 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 5824 if (Probability > TLI->getPredictableBranchThreshold()) 5825 return true; 5826 } 5827 } 5828 5829 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 5830 5831 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 5832 // comparison condition. If the compare has more than one use, there's 5833 // probably another cmov or setcc around, so it's not worth emitting a branch. 5834 if (!Cmp || !Cmp->hasOneUse()) 5835 return false; 5836 5837 // If either operand of the select is expensive and only needed on one side 5838 // of the select, we should form a branch. 5839 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 5840 sinkSelectOperand(TTI, SI->getFalseValue())) 5841 return true; 5842 5843 return false; 5844 } 5845 5846 /// If \p isTrue is true, return the true value of \p SI, otherwise return 5847 /// false value of \p SI. If the true/false value of \p SI is defined by any 5848 /// select instructions in \p Selects, look through the defining select 5849 /// instruction until the true/false value is not defined in \p Selects. 5850 static Value *getTrueOrFalseValue( 5851 SelectInst *SI, bool isTrue, 5852 const SmallPtrSet<const Instruction *, 2> &Selects) { 5853 Value *V; 5854 5855 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 5856 DefSI = dyn_cast<SelectInst>(V)) { 5857 assert(DefSI->getCondition() == SI->getCondition() && 5858 "The condition of DefSI does not match with SI"); 5859 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 5860 } 5861 return V; 5862 } 5863 5864 /// If we have a SelectInst that will likely profit from branch prediction, 5865 /// turn it into a branch. 5866 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 5867 // If branch conversion isn't desirable, exit early. 5868 if (DisableSelectToBranch || OptSize || !TLI) 5869 return false; 5870 5871 // Find all consecutive select instructions that share the same condition. 5872 SmallVector<SelectInst *, 2> ASI; 5873 ASI.push_back(SI); 5874 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 5875 It != SI->getParent()->end(); ++It) { 5876 SelectInst *I = dyn_cast<SelectInst>(&*It); 5877 if (I && SI->getCondition() == I->getCondition()) { 5878 ASI.push_back(I); 5879 } else { 5880 break; 5881 } 5882 } 5883 5884 SelectInst *LastSI = ASI.back(); 5885 // Increment the current iterator to skip all the rest of select instructions 5886 // because they will be either "not lowered" or "all lowered" to branch. 5887 CurInstIterator = std::next(LastSI->getIterator()); 5888 5889 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 5890 5891 // Can we convert the 'select' to CF ? 5892 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 5893 return false; 5894 5895 TargetLowering::SelectSupportKind SelectKind; 5896 if (VectorCond) 5897 SelectKind = TargetLowering::VectorMaskSelect; 5898 else if (SI->getType()->isVectorTy()) 5899 SelectKind = TargetLowering::ScalarCondVectorVal; 5900 else 5901 SelectKind = TargetLowering::ScalarValSelect; 5902 5903 if (TLI->isSelectSupported(SelectKind) && 5904 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 5905 return false; 5906 5907 ModifiedDT = true; 5908 5909 // Transform a sequence like this: 5910 // start: 5911 // %cmp = cmp uge i32 %a, %b 5912 // %sel = select i1 %cmp, i32 %c, i32 %d 5913 // 5914 // Into: 5915 // start: 5916 // %cmp = cmp uge i32 %a, %b 5917 // br i1 %cmp, label %select.true, label %select.false 5918 // select.true: 5919 // br label %select.end 5920 // select.false: 5921 // br label %select.end 5922 // select.end: 5923 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 5924 // 5925 // In addition, we may sink instructions that produce %c or %d from 5926 // the entry block into the destination(s) of the new branch. 5927 // If the true or false blocks do not contain a sunken instruction, that 5928 // block and its branch may be optimized away. In that case, one side of the 5929 // first branch will point directly to select.end, and the corresponding PHI 5930 // predecessor block will be the start block. 5931 5932 // First, we split the block containing the select into 2 blocks. 5933 BasicBlock *StartBlock = SI->getParent(); 5934 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 5935 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 5936 5937 // Delete the unconditional branch that was just created by the split. 5938 StartBlock->getTerminator()->eraseFromParent(); 5939 5940 // These are the new basic blocks for the conditional branch. 5941 // At least one will become an actual new basic block. 5942 BasicBlock *TrueBlock = nullptr; 5943 BasicBlock *FalseBlock = nullptr; 5944 BranchInst *TrueBranch = nullptr; 5945 BranchInst *FalseBranch = nullptr; 5946 5947 // Sink expensive instructions into the conditional blocks to avoid executing 5948 // them speculatively. 5949 for (SelectInst *SI : ASI) { 5950 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 5951 if (TrueBlock == nullptr) { 5952 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 5953 EndBlock->getParent(), EndBlock); 5954 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 5955 TrueBranch->setDebugLoc(SI->getDebugLoc()); 5956 } 5957 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 5958 TrueInst->moveBefore(TrueBranch); 5959 } 5960 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 5961 if (FalseBlock == nullptr) { 5962 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 5963 EndBlock->getParent(), EndBlock); 5964 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5965 FalseBranch->setDebugLoc(SI->getDebugLoc()); 5966 } 5967 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 5968 FalseInst->moveBefore(FalseBranch); 5969 } 5970 } 5971 5972 // If there was nothing to sink, then arbitrarily choose the 'false' side 5973 // for a new input value to the PHI. 5974 if (TrueBlock == FalseBlock) { 5975 assert(TrueBlock == nullptr && 5976 "Unexpected basic block transform while optimizing select"); 5977 5978 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 5979 EndBlock->getParent(), EndBlock); 5980 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 5981 FalseBranch->setDebugLoc(SI->getDebugLoc()); 5982 } 5983 5984 // Insert the real conditional branch based on the original condition. 5985 // If we did not create a new block for one of the 'true' or 'false' paths 5986 // of the condition, it means that side of the branch goes to the end block 5987 // directly and the path originates from the start block from the point of 5988 // view of the new PHI. 5989 BasicBlock *TT, *FT; 5990 if (TrueBlock == nullptr) { 5991 TT = EndBlock; 5992 FT = FalseBlock; 5993 TrueBlock = StartBlock; 5994 } else if (FalseBlock == nullptr) { 5995 TT = TrueBlock; 5996 FT = EndBlock; 5997 FalseBlock = StartBlock; 5998 } else { 5999 TT = TrueBlock; 6000 FT = FalseBlock; 6001 } 6002 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); 6003 6004 SmallPtrSet<const Instruction *, 2> INS; 6005 INS.insert(ASI.begin(), ASI.end()); 6006 // Use reverse iterator because later select may use the value of the 6007 // earlier select, and we need to propagate value through earlier select 6008 // to get the PHI operand. 6009 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 6010 SelectInst *SI = *It; 6011 // The select itself is replaced with a PHI Node. 6012 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 6013 PN->takeName(SI); 6014 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 6015 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 6016 PN->setDebugLoc(SI->getDebugLoc()); 6017 6018 SI->replaceAllUsesWith(PN); 6019 SI->eraseFromParent(); 6020 INS.erase(SI); 6021 ++NumSelectsExpanded; 6022 } 6023 6024 // Instruct OptimizeBlock to skip to the next block. 6025 CurInstIterator = StartBlock->end(); 6026 return true; 6027 } 6028 6029 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 6030 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 6031 int SplatElem = -1; 6032 for (unsigned i = 0; i < Mask.size(); ++i) { 6033 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 6034 return false; 6035 SplatElem = Mask[i]; 6036 } 6037 6038 return true; 6039 } 6040 6041 /// Some targets have expensive vector shifts if the lanes aren't all the same 6042 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 6043 /// it's often worth sinking a shufflevector splat down to its use so that 6044 /// codegen can spot all lanes are identical. 6045 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6046 BasicBlock *DefBB = SVI->getParent(); 6047 6048 // Only do this xform if variable vector shifts are particularly expensive. 6049 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 6050 return false; 6051 6052 // We only expect better codegen by sinking a shuffle if we can recognise a 6053 // constant splat. 6054 if (!isBroadcastShuffle(SVI)) 6055 return false; 6056 6057 // InsertedShuffles - Only insert a shuffle in each block once. 6058 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 6059 6060 bool MadeChange = false; 6061 for (User *U : SVI->users()) { 6062 Instruction *UI = cast<Instruction>(U); 6063 6064 // Figure out which BB this ext is used in. 6065 BasicBlock *UserBB = UI->getParent(); 6066 if (UserBB == DefBB) continue; 6067 6068 // For now only apply this when the splat is used by a shift instruction. 6069 if (!UI->isShift()) continue; 6070 6071 // Everything checks out, sink the shuffle if the user's block doesn't 6072 // already have a copy. 6073 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 6074 6075 if (!InsertedShuffle) { 6076 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6077 assert(InsertPt != UserBB->end()); 6078 InsertedShuffle = 6079 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 6080 SVI->getOperand(2), "", &*InsertPt); 6081 } 6082 6083 UI->replaceUsesOfWith(SVI, InsertedShuffle); 6084 MadeChange = true; 6085 } 6086 6087 // If we removed all uses, nuke the shuffle. 6088 if (SVI->use_empty()) { 6089 SVI->eraseFromParent(); 6090 MadeChange = true; 6091 } 6092 6093 return MadeChange; 6094 } 6095 6096 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6097 // If the operands of I can be folded into a target instruction together with 6098 // I, duplicate and sink them. 6099 SmallVector<Use *, 4> OpsToSink; 6100 if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink)) 6101 return false; 6102 6103 // OpsToSink can contain multiple uses in a use chain (e.g. 6104 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6105 // uses must come first, which means they are sunk first, temporarily creating 6106 // invalid IR. This will be fixed once their dominated users are sunk and 6107 // updated. 6108 BasicBlock *TargetBB = I->getParent(); 6109 bool Changed = false; 6110 SmallVector<Use *, 4> ToReplace; 6111 for (Use *U : OpsToSink) { 6112 auto *UI = cast<Instruction>(U->get()); 6113 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6114 continue; 6115 ToReplace.push_back(U); 6116 } 6117 6118 SmallPtrSet<Instruction *, 4> MaybeDead; 6119 for (Use *U : ToReplace) { 6120 auto *UI = cast<Instruction>(U->get()); 6121 Instruction *NI = UI->clone(); 6122 MaybeDead.insert(UI); 6123 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6124 NI->insertBefore(I); 6125 InsertedInsts.insert(NI); 6126 U->set(NI); 6127 Changed = true; 6128 } 6129 6130 // Remove instructions that are dead after sinking. 6131 for (auto *I : MaybeDead) 6132 if (!I->hasNUsesOrMore(1)) 6133 I->eraseFromParent(); 6134 6135 return Changed; 6136 } 6137 6138 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6139 if (!TLI || !DL) 6140 return false; 6141 6142 Value *Cond = SI->getCondition(); 6143 Type *OldType = Cond->getType(); 6144 LLVMContext &Context = Cond->getContext(); 6145 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6146 unsigned RegWidth = RegType.getSizeInBits(); 6147 6148 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6149 return false; 6150 6151 // If the register width is greater than the type width, expand the condition 6152 // of the switch instruction and each case constant to the width of the 6153 // register. By widening the type of the switch condition, subsequent 6154 // comparisons (for case comparisons) will not need to be extended to the 6155 // preferred register width, so we will potentially eliminate N-1 extends, 6156 // where N is the number of cases in the switch. 6157 auto *NewType = Type::getIntNTy(Context, RegWidth); 6158 6159 // Zero-extend the switch condition and case constants unless the switch 6160 // condition is a function argument that is already being sign-extended. 6161 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6162 // everything instead. 6163 Instruction::CastOps ExtType = Instruction::ZExt; 6164 if (auto *Arg = dyn_cast<Argument>(Cond)) 6165 if (Arg->hasSExtAttr()) 6166 ExtType = Instruction::SExt; 6167 6168 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6169 ExtInst->insertBefore(SI); 6170 ExtInst->setDebugLoc(SI->getDebugLoc()); 6171 SI->setCondition(ExtInst); 6172 for (auto Case : SI->cases()) { 6173 APInt NarrowConst = Case.getCaseValue()->getValue(); 6174 APInt WideConst = (ExtType == Instruction::ZExt) ? 6175 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6176 Case.setValue(ConstantInt::get(Context, WideConst)); 6177 } 6178 6179 return true; 6180 } 6181 6182 6183 namespace { 6184 6185 /// Helper class to promote a scalar operation to a vector one. 6186 /// This class is used to move downward extractelement transition. 6187 /// E.g., 6188 /// a = vector_op <2 x i32> 6189 /// b = extractelement <2 x i32> a, i32 0 6190 /// c = scalar_op b 6191 /// store c 6192 /// 6193 /// => 6194 /// a = vector_op <2 x i32> 6195 /// c = vector_op a (equivalent to scalar_op on the related lane) 6196 /// * d = extractelement <2 x i32> c, i32 0 6197 /// * store d 6198 /// Assuming both extractelement and store can be combine, we get rid of the 6199 /// transition. 6200 class VectorPromoteHelper { 6201 /// DataLayout associated with the current module. 6202 const DataLayout &DL; 6203 6204 /// Used to perform some checks on the legality of vector operations. 6205 const TargetLowering &TLI; 6206 6207 /// Used to estimated the cost of the promoted chain. 6208 const TargetTransformInfo &TTI; 6209 6210 /// The transition being moved downwards. 6211 Instruction *Transition; 6212 6213 /// The sequence of instructions to be promoted. 6214 SmallVector<Instruction *, 4> InstsToBePromoted; 6215 6216 /// Cost of combining a store and an extract. 6217 unsigned StoreExtractCombineCost; 6218 6219 /// Instruction that will be combined with the transition. 6220 Instruction *CombineInst = nullptr; 6221 6222 /// The instruction that represents the current end of the transition. 6223 /// Since we are faking the promotion until we reach the end of the chain 6224 /// of computation, we need a way to get the current end of the transition. 6225 Instruction *getEndOfTransition() const { 6226 if (InstsToBePromoted.empty()) 6227 return Transition; 6228 return InstsToBePromoted.back(); 6229 } 6230 6231 /// Return the index of the original value in the transition. 6232 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 6233 /// c, is at index 0. 6234 unsigned getTransitionOriginalValueIdx() const { 6235 assert(isa<ExtractElementInst>(Transition) && 6236 "Other kind of transitions are not supported yet"); 6237 return 0; 6238 } 6239 6240 /// Return the index of the index in the transition. 6241 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 6242 /// is at index 1. 6243 unsigned getTransitionIdx() const { 6244 assert(isa<ExtractElementInst>(Transition) && 6245 "Other kind of transitions are not supported yet"); 6246 return 1; 6247 } 6248 6249 /// Get the type of the transition. 6250 /// This is the type of the original value. 6251 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 6252 /// transition is <2 x i32>. 6253 Type *getTransitionType() const { 6254 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 6255 } 6256 6257 /// Promote \p ToBePromoted by moving \p Def downward through. 6258 /// I.e., we have the following sequence: 6259 /// Def = Transition <ty1> a to <ty2> 6260 /// b = ToBePromoted <ty2> Def, ... 6261 /// => 6262 /// b = ToBePromoted <ty1> a, ... 6263 /// Def = Transition <ty1> ToBePromoted to <ty2> 6264 void promoteImpl(Instruction *ToBePromoted); 6265 6266 /// Check whether or not it is profitable to promote all the 6267 /// instructions enqueued to be promoted. 6268 bool isProfitableToPromote() { 6269 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 6270 unsigned Index = isa<ConstantInt>(ValIdx) 6271 ? cast<ConstantInt>(ValIdx)->getZExtValue() 6272 : -1; 6273 Type *PromotedType = getTransitionType(); 6274 6275 StoreInst *ST = cast<StoreInst>(CombineInst); 6276 unsigned AS = ST->getPointerAddressSpace(); 6277 unsigned Align = ST->getAlignment(); 6278 // Check if this store is supported. 6279 if (!TLI.allowsMisalignedMemoryAccesses( 6280 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 6281 Align)) { 6282 // If this is not supported, there is no way we can combine 6283 // the extract with the store. 6284 return false; 6285 } 6286 6287 // The scalar chain of computation has to pay for the transition 6288 // scalar to vector. 6289 // The vector chain has to account for the combining cost. 6290 uint64_t ScalarCost = 6291 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 6292 uint64_t VectorCost = StoreExtractCombineCost; 6293 for (const auto &Inst : InstsToBePromoted) { 6294 // Compute the cost. 6295 // By construction, all instructions being promoted are arithmetic ones. 6296 // Moreover, one argument is a constant that can be viewed as a splat 6297 // constant. 6298 Value *Arg0 = Inst->getOperand(0); 6299 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 6300 isa<ConstantFP>(Arg0); 6301 TargetTransformInfo::OperandValueKind Arg0OVK = 6302 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6303 : TargetTransformInfo::OK_AnyValue; 6304 TargetTransformInfo::OperandValueKind Arg1OVK = 6305 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6306 : TargetTransformInfo::OK_AnyValue; 6307 ScalarCost += TTI.getArithmeticInstrCost( 6308 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 6309 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6310 Arg0OVK, Arg1OVK); 6311 } 6312 LLVM_DEBUG( 6313 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6314 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6315 return ScalarCost > VectorCost; 6316 } 6317 6318 /// Generate a constant vector with \p Val with the same 6319 /// number of elements as the transition. 6320 /// \p UseSplat defines whether or not \p Val should be replicated 6321 /// across the whole vector. 6322 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6323 /// otherwise we generate a vector with as many undef as possible: 6324 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6325 /// used at the index of the extract. 6326 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6327 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6328 if (!UseSplat) { 6329 // If we cannot determine where the constant must be, we have to 6330 // use a splat constant. 6331 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6332 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6333 ExtractIdx = CstVal->getSExtValue(); 6334 else 6335 UseSplat = true; 6336 } 6337 6338 unsigned End = getTransitionType()->getVectorNumElements(); 6339 if (UseSplat) 6340 return ConstantVector::getSplat(End, Val); 6341 6342 SmallVector<Constant *, 4> ConstVec; 6343 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6344 for (unsigned Idx = 0; Idx != End; ++Idx) { 6345 if (Idx == ExtractIdx) 6346 ConstVec.push_back(Val); 6347 else 6348 ConstVec.push_back(UndefVal); 6349 } 6350 return ConstantVector::get(ConstVec); 6351 } 6352 6353 /// Check if promoting to a vector type an operand at \p OperandIdx 6354 /// in \p Use can trigger undefined behavior. 6355 static bool canCauseUndefinedBehavior(const Instruction *Use, 6356 unsigned OperandIdx) { 6357 // This is not safe to introduce undef when the operand is on 6358 // the right hand side of a division-like instruction. 6359 if (OperandIdx != 1) 6360 return false; 6361 switch (Use->getOpcode()) { 6362 default: 6363 return false; 6364 case Instruction::SDiv: 6365 case Instruction::UDiv: 6366 case Instruction::SRem: 6367 case Instruction::URem: 6368 return true; 6369 case Instruction::FDiv: 6370 case Instruction::FRem: 6371 return !Use->hasNoNaNs(); 6372 } 6373 llvm_unreachable(nullptr); 6374 } 6375 6376 public: 6377 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 6378 const TargetTransformInfo &TTI, Instruction *Transition, 6379 unsigned CombineCost) 6380 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 6381 StoreExtractCombineCost(CombineCost) { 6382 assert(Transition && "Do not know how to promote null"); 6383 } 6384 6385 /// Check if we can promote \p ToBePromoted to \p Type. 6386 bool canPromote(const Instruction *ToBePromoted) const { 6387 // We could support CastInst too. 6388 return isa<BinaryOperator>(ToBePromoted); 6389 } 6390 6391 /// Check if it is profitable to promote \p ToBePromoted 6392 /// by moving downward the transition through. 6393 bool shouldPromote(const Instruction *ToBePromoted) const { 6394 // Promote only if all the operands can be statically expanded. 6395 // Indeed, we do not want to introduce any new kind of transitions. 6396 for (const Use &U : ToBePromoted->operands()) { 6397 const Value *Val = U.get(); 6398 if (Val == getEndOfTransition()) { 6399 // If the use is a division and the transition is on the rhs, 6400 // we cannot promote the operation, otherwise we may create a 6401 // division by zero. 6402 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 6403 return false; 6404 continue; 6405 } 6406 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 6407 !isa<ConstantFP>(Val)) 6408 return false; 6409 } 6410 // Check that the resulting operation is legal. 6411 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 6412 if (!ISDOpcode) 6413 return false; 6414 return StressStoreExtract || 6415 TLI.isOperationLegalOrCustom( 6416 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 6417 } 6418 6419 /// Check whether or not \p Use can be combined 6420 /// with the transition. 6421 /// I.e., is it possible to do Use(Transition) => AnotherUse? 6422 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 6423 6424 /// Record \p ToBePromoted as part of the chain to be promoted. 6425 void enqueueForPromotion(Instruction *ToBePromoted) { 6426 InstsToBePromoted.push_back(ToBePromoted); 6427 } 6428 6429 /// Set the instruction that will be combined with the transition. 6430 void recordCombineInstruction(Instruction *ToBeCombined) { 6431 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 6432 CombineInst = ToBeCombined; 6433 } 6434 6435 /// Promote all the instructions enqueued for promotion if it is 6436 /// is profitable. 6437 /// \return True if the promotion happened, false otherwise. 6438 bool promote() { 6439 // Check if there is something to promote. 6440 // Right now, if we do not have anything to combine with, 6441 // we assume the promotion is not profitable. 6442 if (InstsToBePromoted.empty() || !CombineInst) 6443 return false; 6444 6445 // Check cost. 6446 if (!StressStoreExtract && !isProfitableToPromote()) 6447 return false; 6448 6449 // Promote. 6450 for (auto &ToBePromoted : InstsToBePromoted) 6451 promoteImpl(ToBePromoted); 6452 InstsToBePromoted.clear(); 6453 return true; 6454 } 6455 }; 6456 6457 } // end anonymous namespace 6458 6459 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 6460 // At this point, we know that all the operands of ToBePromoted but Def 6461 // can be statically promoted. 6462 // For Def, we need to use its parameter in ToBePromoted: 6463 // b = ToBePromoted ty1 a 6464 // Def = Transition ty1 b to ty2 6465 // Move the transition down. 6466 // 1. Replace all uses of the promoted operation by the transition. 6467 // = ... b => = ... Def. 6468 assert(ToBePromoted->getType() == Transition->getType() && 6469 "The type of the result of the transition does not match " 6470 "the final type"); 6471 ToBePromoted->replaceAllUsesWith(Transition); 6472 // 2. Update the type of the uses. 6473 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 6474 Type *TransitionTy = getTransitionType(); 6475 ToBePromoted->mutateType(TransitionTy); 6476 // 3. Update all the operands of the promoted operation with promoted 6477 // operands. 6478 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 6479 for (Use &U : ToBePromoted->operands()) { 6480 Value *Val = U.get(); 6481 Value *NewVal = nullptr; 6482 if (Val == Transition) 6483 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 6484 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 6485 isa<ConstantFP>(Val)) { 6486 // Use a splat constant if it is not safe to use undef. 6487 NewVal = getConstantVector( 6488 cast<Constant>(Val), 6489 isa<UndefValue>(Val) || 6490 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 6491 } else 6492 llvm_unreachable("Did you modified shouldPromote and forgot to update " 6493 "this?"); 6494 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 6495 } 6496 Transition->moveAfter(ToBePromoted); 6497 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 6498 } 6499 6500 /// Some targets can do store(extractelement) with one instruction. 6501 /// Try to push the extractelement towards the stores when the target 6502 /// has this feature and this is profitable. 6503 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 6504 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 6505 if (DisableStoreExtract || !TLI || 6506 (!StressStoreExtract && 6507 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 6508 Inst->getOperand(1), CombineCost))) 6509 return false; 6510 6511 // At this point we know that Inst is a vector to scalar transition. 6512 // Try to move it down the def-use chain, until: 6513 // - We can combine the transition with its single use 6514 // => we got rid of the transition. 6515 // - We escape the current basic block 6516 // => we would need to check that we are moving it at a cheaper place and 6517 // we do not do that for now. 6518 BasicBlock *Parent = Inst->getParent(); 6519 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 6520 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 6521 // If the transition has more than one use, assume this is not going to be 6522 // beneficial. 6523 while (Inst->hasOneUse()) { 6524 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 6525 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 6526 6527 if (ToBePromoted->getParent() != Parent) { 6528 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 6529 << ToBePromoted->getParent()->getName() 6530 << ") than the transition (" << Parent->getName() 6531 << ").\n"); 6532 return false; 6533 } 6534 6535 if (VPH.canCombine(ToBePromoted)) { 6536 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 6537 << "will be combined with: " << *ToBePromoted << '\n'); 6538 VPH.recordCombineInstruction(ToBePromoted); 6539 bool Changed = VPH.promote(); 6540 NumStoreExtractExposed += Changed; 6541 return Changed; 6542 } 6543 6544 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 6545 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 6546 return false; 6547 6548 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 6549 6550 VPH.enqueueForPromotion(ToBePromoted); 6551 Inst = ToBePromoted; 6552 } 6553 return false; 6554 } 6555 6556 /// For the instruction sequence of store below, F and I values 6557 /// are bundled together as an i64 value before being stored into memory. 6558 /// Sometimes it is more efficient to generate separate stores for F and I, 6559 /// which can remove the bitwise instructions or sink them to colder places. 6560 /// 6561 /// (store (or (zext (bitcast F to i32) to i64), 6562 /// (shl (zext I to i64), 32)), addr) --> 6563 /// (store F, addr) and (store I, addr+4) 6564 /// 6565 /// Similarly, splitting for other merged store can also be beneficial, like: 6566 /// For pair of {i32, i32}, i64 store --> two i32 stores. 6567 /// For pair of {i32, i16}, i64 store --> two i32 stores. 6568 /// For pair of {i16, i16}, i32 store --> two i16 stores. 6569 /// For pair of {i16, i8}, i32 store --> two i16 stores. 6570 /// For pair of {i8, i8}, i16 store --> two i8 stores. 6571 /// 6572 /// We allow each target to determine specifically which kind of splitting is 6573 /// supported. 6574 /// 6575 /// The store patterns are commonly seen from the simple code snippet below 6576 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 6577 /// void goo(const std::pair<int, float> &); 6578 /// hoo() { 6579 /// ... 6580 /// goo(std::make_pair(tmp, ftmp)); 6581 /// ... 6582 /// } 6583 /// 6584 /// Although we already have similar splitting in DAG Combine, we duplicate 6585 /// it in CodeGenPrepare to catch the case in which pattern is across 6586 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 6587 /// during code expansion. 6588 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 6589 const TargetLowering &TLI) { 6590 // Handle simple but common cases only. 6591 Type *StoreType = SI.getValueOperand()->getType(); 6592 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || 6593 DL.getTypeSizeInBits(StoreType) == 0) 6594 return false; 6595 6596 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 6597 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 6598 if (DL.getTypeStoreSizeInBits(SplitStoreType) != 6599 DL.getTypeSizeInBits(SplitStoreType)) 6600 return false; 6601 6602 // Match the following patterns: 6603 // (store (or (zext LValue to i64), 6604 // (shl (zext HValue to i64), 32)), HalfValBitSize) 6605 // or 6606 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 6607 // (zext LValue to i64), 6608 // Expect both operands of OR and the first operand of SHL have only 6609 // one use. 6610 Value *LValue, *HValue; 6611 if (!match(SI.getValueOperand(), 6612 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 6613 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 6614 m_SpecificInt(HalfValBitSize)))))) 6615 return false; 6616 6617 // Check LValue and HValue are int with size less or equal than 32. 6618 if (!LValue->getType()->isIntegerTy() || 6619 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 6620 !HValue->getType()->isIntegerTy() || 6621 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 6622 return false; 6623 6624 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 6625 // as the input of target query. 6626 auto *LBC = dyn_cast<BitCastInst>(LValue); 6627 auto *HBC = dyn_cast<BitCastInst>(HValue); 6628 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 6629 : EVT::getEVT(LValue->getType()); 6630 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 6631 : EVT::getEVT(HValue->getType()); 6632 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 6633 return false; 6634 6635 // Start to split store. 6636 IRBuilder<> Builder(SI.getContext()); 6637 Builder.SetInsertPoint(&SI); 6638 6639 // If LValue/HValue is a bitcast in another BB, create a new one in current 6640 // BB so it may be merged with the splitted stores by dag combiner. 6641 if (LBC && LBC->getParent() != SI.getParent()) 6642 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 6643 if (HBC && HBC->getParent() != SI.getParent()) 6644 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 6645 6646 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 6647 auto CreateSplitStore = [&](Value *V, bool Upper) { 6648 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 6649 Value *Addr = Builder.CreateBitCast( 6650 SI.getOperand(1), 6651 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 6652 if ((IsLE && Upper) || (!IsLE && !Upper)) 6653 Addr = Builder.CreateGEP( 6654 SplitStoreType, Addr, 6655 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 6656 Builder.CreateAlignedStore( 6657 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); 6658 }; 6659 6660 CreateSplitStore(LValue, false); 6661 CreateSplitStore(HValue, true); 6662 6663 // Delete the old store. 6664 SI.eraseFromParent(); 6665 return true; 6666 } 6667 6668 // Return true if the GEP has two operands, the first operand is of a sequential 6669 // type, and the second operand is a constant. 6670 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 6671 gep_type_iterator I = gep_type_begin(*GEP); 6672 return GEP->getNumOperands() == 2 && 6673 I.isSequential() && 6674 isa<ConstantInt>(GEP->getOperand(1)); 6675 } 6676 6677 // Try unmerging GEPs to reduce liveness interference (register pressure) across 6678 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 6679 // reducing liveness interference across those edges benefits global register 6680 // allocation. Currently handles only certain cases. 6681 // 6682 // For example, unmerge %GEPI and %UGEPI as below. 6683 // 6684 // ---------- BEFORE ---------- 6685 // SrcBlock: 6686 // ... 6687 // %GEPIOp = ... 6688 // ... 6689 // %GEPI = gep %GEPIOp, Idx 6690 // ... 6691 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 6692 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 6693 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 6694 // %UGEPI) 6695 // 6696 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 6697 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 6698 // ... 6699 // 6700 // DstBi: 6701 // ... 6702 // %UGEPI = gep %GEPIOp, UIdx 6703 // ... 6704 // --------------------------- 6705 // 6706 // ---------- AFTER ---------- 6707 // SrcBlock: 6708 // ... (same as above) 6709 // (* %GEPI is still alive on the indirectbr edges) 6710 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 6711 // unmerging) 6712 // ... 6713 // 6714 // DstBi: 6715 // ... 6716 // %UGEPI = gep %GEPI, (UIdx-Idx) 6717 // ... 6718 // --------------------------- 6719 // 6720 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 6721 // no longer alive on them. 6722 // 6723 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 6724 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 6725 // not to disable further simplications and optimizations as a result of GEP 6726 // merging. 6727 // 6728 // Note this unmerging may increase the length of the data flow critical path 6729 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 6730 // between the register pressure and the length of data-flow critical 6731 // path. Restricting this to the uncommon IndirectBr case would minimize the 6732 // impact of potentially longer critical path, if any, and the impact on compile 6733 // time. 6734 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 6735 const TargetTransformInfo *TTI) { 6736 BasicBlock *SrcBlock = GEPI->getParent(); 6737 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 6738 // (non-IndirectBr) cases exit early here. 6739 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 6740 return false; 6741 // Check that GEPI is a simple gep with a single constant index. 6742 if (!GEPSequentialConstIndexed(GEPI)) 6743 return false; 6744 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 6745 // Check that GEPI is a cheap one. 6746 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) 6747 > TargetTransformInfo::TCC_Basic) 6748 return false; 6749 Value *GEPIOp = GEPI->getOperand(0); 6750 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 6751 if (!isa<Instruction>(GEPIOp)) 6752 return false; 6753 auto *GEPIOpI = cast<Instruction>(GEPIOp); 6754 if (GEPIOpI->getParent() != SrcBlock) 6755 return false; 6756 // Check that GEP is used outside the block, meaning it's alive on the 6757 // IndirectBr edge(s). 6758 if (find_if(GEPI->users(), [&](User *Usr) { 6759 if (auto *I = dyn_cast<Instruction>(Usr)) { 6760 if (I->getParent() != SrcBlock) { 6761 return true; 6762 } 6763 } 6764 return false; 6765 }) == GEPI->users().end()) 6766 return false; 6767 // The second elements of the GEP chains to be unmerged. 6768 std::vector<GetElementPtrInst *> UGEPIs; 6769 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 6770 // on IndirectBr edges. 6771 for (User *Usr : GEPIOp->users()) { 6772 if (Usr == GEPI) continue; 6773 // Check if Usr is an Instruction. If not, give up. 6774 if (!isa<Instruction>(Usr)) 6775 return false; 6776 auto *UI = cast<Instruction>(Usr); 6777 // Check if Usr in the same block as GEPIOp, which is fine, skip. 6778 if (UI->getParent() == SrcBlock) 6779 continue; 6780 // Check if Usr is a GEP. If not, give up. 6781 if (!isa<GetElementPtrInst>(Usr)) 6782 return false; 6783 auto *UGEPI = cast<GetElementPtrInst>(Usr); 6784 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 6785 // the pointer operand to it. If so, record it in the vector. If not, give 6786 // up. 6787 if (!GEPSequentialConstIndexed(UGEPI)) 6788 return false; 6789 if (UGEPI->getOperand(0) != GEPIOp) 6790 return false; 6791 if (GEPIIdx->getType() != 6792 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 6793 return false; 6794 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6795 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) 6796 > TargetTransformInfo::TCC_Basic) 6797 return false; 6798 UGEPIs.push_back(UGEPI); 6799 } 6800 if (UGEPIs.size() == 0) 6801 return false; 6802 // Check the materializing cost of (Uidx-Idx). 6803 for (GetElementPtrInst *UGEPI : UGEPIs) { 6804 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6805 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 6806 unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); 6807 if (ImmCost > TargetTransformInfo::TCC_Basic) 6808 return false; 6809 } 6810 // Now unmerge between GEPI and UGEPIs. 6811 for (GetElementPtrInst *UGEPI : UGEPIs) { 6812 UGEPI->setOperand(0, GEPI); 6813 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 6814 Constant *NewUGEPIIdx = 6815 ConstantInt::get(GEPIIdx->getType(), 6816 UGEPIIdx->getValue() - GEPIIdx->getValue()); 6817 UGEPI->setOperand(1, NewUGEPIIdx); 6818 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 6819 // inbounds to avoid UB. 6820 if (!GEPI->isInBounds()) { 6821 UGEPI->setIsInBounds(false); 6822 } 6823 } 6824 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 6825 // alive on IndirectBr edges). 6826 assert(find_if(GEPIOp->users(), [&](User *Usr) { 6827 return cast<Instruction>(Usr)->getParent() != SrcBlock; 6828 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 6829 return true; 6830 } 6831 6832 bool CodeGenPrepare::optimizeInst(Instruction *I, DominatorTree &DT, 6833 bool &ModifiedDT) { 6834 // Bail out if we inserted the instruction to prevent optimizations from 6835 // stepping on each other's toes. 6836 if (InsertedInsts.count(I)) 6837 return false; 6838 6839 if (PHINode *P = dyn_cast<PHINode>(I)) { 6840 // It is possible for very late stage optimizations (such as SimplifyCFG) 6841 // to introduce PHI nodes too late to be cleaned up. If we detect such a 6842 // trivial PHI, go ahead and zap it here. 6843 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 6844 P->replaceAllUsesWith(V); 6845 P->eraseFromParent(); 6846 ++NumPHIsElim; 6847 return true; 6848 } 6849 return false; 6850 } 6851 6852 if (CastInst *CI = dyn_cast<CastInst>(I)) { 6853 // If the source of the cast is a constant, then this should have 6854 // already been constant folded. The only reason NOT to constant fold 6855 // it is if something (e.g. LSR) was careful to place the constant 6856 // evaluation in a block other than then one that uses it (e.g. to hoist 6857 // the address of globals out of a loop). If this is the case, we don't 6858 // want to forward-subst the cast. 6859 if (isa<Constant>(CI->getOperand(0))) 6860 return false; 6861 6862 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 6863 return true; 6864 6865 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 6866 /// Sink a zext or sext into its user blocks if the target type doesn't 6867 /// fit in one register 6868 if (TLI && 6869 TLI->getTypeAction(CI->getContext(), 6870 TLI->getValueType(*DL, CI->getType())) == 6871 TargetLowering::TypeExpandInteger) { 6872 return SinkCast(CI); 6873 } else { 6874 bool MadeChange = optimizeExt(I); 6875 return MadeChange | optimizeExtUses(I); 6876 } 6877 } 6878 return false; 6879 } 6880 6881 if (auto *Cmp = dyn_cast<CmpInst>(I)) 6882 if (TLI && optimizeCmp(Cmp, *TLI, *DL, DT, ModifiedDT)) 6883 return true; 6884 6885 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6886 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6887 if (TLI) { 6888 bool Modified = optimizeLoadExt(LI); 6889 unsigned AS = LI->getPointerAddressSpace(); 6890 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 6891 return Modified; 6892 } 6893 return false; 6894 } 6895 6896 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 6897 if (TLI && splitMergedValStore(*SI, *DL, *TLI)) 6898 return true; 6899 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 6900 if (TLI) { 6901 unsigned AS = SI->getPointerAddressSpace(); 6902 return optimizeMemoryInst(I, SI->getOperand(1), 6903 SI->getOperand(0)->getType(), AS); 6904 } 6905 return false; 6906 } 6907 6908 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 6909 unsigned AS = RMW->getPointerAddressSpace(); 6910 return optimizeMemoryInst(I, RMW->getPointerOperand(), 6911 RMW->getType(), AS); 6912 } 6913 6914 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 6915 unsigned AS = CmpX->getPointerAddressSpace(); 6916 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 6917 CmpX->getCompareOperand()->getType(), AS); 6918 } 6919 6920 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 6921 6922 if (BinOp && (BinOp->getOpcode() == Instruction::And) && 6923 EnableAndCmpSinking && TLI) 6924 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 6925 6926 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 6927 BinOp->getOpcode() == Instruction::LShr)) { 6928 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 6929 if (TLI && CI && TLI->hasExtractBitsInsn()) 6930 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 6931 6932 return false; 6933 } 6934 6935 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 6936 if (GEPI->hasAllZeroIndices()) { 6937 /// The GEP operand must be a pointer, so must its result -> BitCast 6938 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 6939 GEPI->getName(), GEPI); 6940 NC->setDebugLoc(GEPI->getDebugLoc()); 6941 GEPI->replaceAllUsesWith(NC); 6942 GEPI->eraseFromParent(); 6943 ++NumGEPsElim; 6944 optimizeInst(NC, DT, ModifiedDT); 6945 return true; 6946 } 6947 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 6948 return true; 6949 } 6950 return false; 6951 } 6952 6953 if (tryToSinkFreeOperands(I)) 6954 return true; 6955 6956 if (CallInst *CI = dyn_cast<CallInst>(I)) 6957 return optimizeCallInst(CI, ModifiedDT); 6958 6959 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 6960 return optimizeSelectInst(SI); 6961 6962 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 6963 return optimizeShuffleVectorInst(SVI); 6964 6965 if (auto *Switch = dyn_cast<SwitchInst>(I)) 6966 return optimizeSwitchInst(Switch); 6967 6968 if (isa<ExtractElementInst>(I)) 6969 return optimizeExtractElementInst(I); 6970 6971 return false; 6972 } 6973 6974 /// Given an OR instruction, check to see if this is a bitreverse 6975 /// idiom. If so, insert the new intrinsic and return true. 6976 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 6977 const TargetLowering &TLI) { 6978 if (!I.getType()->isIntegerTy() || 6979 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 6980 TLI.getValueType(DL, I.getType(), true))) 6981 return false; 6982 6983 SmallVector<Instruction*, 4> Insts; 6984 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 6985 return false; 6986 Instruction *LastInst = Insts.back(); 6987 I.replaceAllUsesWith(LastInst); 6988 RecursivelyDeleteTriviallyDeadInstructions(&I); 6989 return true; 6990 } 6991 6992 // In this pass we look for GEP and cast instructions that are used 6993 // across basic blocks and rewrite them to improve basic-block-at-a-time 6994 // selection. 6995 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, DominatorTree &DT, 6996 bool &ModifiedDT) { 6997 SunkAddrs.clear(); 6998 bool MadeChange = false; 6999 7000 CurInstIterator = BB.begin(); 7001 while (CurInstIterator != BB.end()) { 7002 MadeChange |= optimizeInst(&*CurInstIterator++, DT, ModifiedDT); 7003 if (ModifiedDT) 7004 return true; 7005 } 7006 7007 bool MadeBitReverse = true; 7008 while (TLI && MadeBitReverse) { 7009 MadeBitReverse = false; 7010 for (auto &I : reverse(BB)) { 7011 if (makeBitReverse(I, *DL, *TLI)) { 7012 MadeBitReverse = MadeChange = true; 7013 ModifiedDT = true; 7014 break; 7015 } 7016 } 7017 } 7018 MadeChange |= dupRetToEnableTailCallOpts(&BB); 7019 7020 return MadeChange; 7021 } 7022 7023 // llvm.dbg.value is far away from the value then iSel may not be able 7024 // handle it properly. iSel will drop llvm.dbg.value if it can not 7025 // find a node corresponding to the value. 7026 bool CodeGenPrepare::placeDbgValues(Function &F) { 7027 bool MadeChange = false; 7028 for (BasicBlock &BB : F) { 7029 Instruction *PrevNonDbgInst = nullptr; 7030 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 7031 Instruction *Insn = &*BI++; 7032 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 7033 // Leave dbg.values that refer to an alloca alone. These 7034 // intrinsics describe the address of a variable (= the alloca) 7035 // being taken. They should not be moved next to the alloca 7036 // (and to the beginning of the scope), but rather stay close to 7037 // where said address is used. 7038 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 7039 PrevNonDbgInst = Insn; 7040 continue; 7041 } 7042 7043 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7044 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 7045 // If VI is a phi in a block with an EHPad terminator, we can't insert 7046 // after it. 7047 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7048 continue; 7049 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7050 << *DVI << ' ' << *VI); 7051 DVI->removeFromParent(); 7052 if (isa<PHINode>(VI)) 7053 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7054 else 7055 DVI->insertAfter(VI); 7056 MadeChange = true; 7057 ++NumDbgValueMoved; 7058 } 7059 } 7060 } 7061 return MadeChange; 7062 } 7063 7064 /// Scale down both weights to fit into uint32_t. 7065 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7066 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7067 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7068 NewTrue = NewTrue / Scale; 7069 NewFalse = NewFalse / Scale; 7070 } 7071 7072 /// Some targets prefer to split a conditional branch like: 7073 /// \code 7074 /// %0 = icmp ne i32 %a, 0 7075 /// %1 = icmp ne i32 %b, 0 7076 /// %or.cond = or i1 %0, %1 7077 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7078 /// \endcode 7079 /// into multiple branch instructions like: 7080 /// \code 7081 /// bb1: 7082 /// %0 = icmp ne i32 %a, 0 7083 /// br i1 %0, label %TrueBB, label %bb2 7084 /// bb2: 7085 /// %1 = icmp ne i32 %b, 0 7086 /// br i1 %1, label %TrueBB, label %FalseBB 7087 /// \endcode 7088 /// This usually allows instruction selection to do even further optimizations 7089 /// and combine the compare with the branch instruction. Currently this is 7090 /// applied for targets which have "cheap" jump instructions. 7091 /// 7092 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7093 /// 7094 bool CodeGenPrepare::splitBranchCondition(Function &F) { 7095 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 7096 return false; 7097 7098 bool MadeChange = false; 7099 for (auto &BB : F) { 7100 // Does this BB end with the following? 7101 // %cond1 = icmp|fcmp|binary instruction ... 7102 // %cond2 = icmp|fcmp|binary instruction ... 7103 // %cond.or = or|and i1 %cond1, cond2 7104 // br i1 %cond.or label %dest1, label %dest2" 7105 BinaryOperator *LogicOp; 7106 BasicBlock *TBB, *FBB; 7107 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 7108 continue; 7109 7110 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7111 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7112 continue; 7113 7114 unsigned Opc; 7115 Value *Cond1, *Cond2; 7116 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 7117 m_OneUse(m_Value(Cond2))))) 7118 Opc = Instruction::And; 7119 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 7120 m_OneUse(m_Value(Cond2))))) 7121 Opc = Instruction::Or; 7122 else 7123 continue; 7124 7125 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 7126 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 7127 continue; 7128 7129 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 7130 7131 // Create a new BB. 7132 auto TmpBB = 7133 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 7134 BB.getParent(), BB.getNextNode()); 7135 7136 // Update original basic block by using the first condition directly by the 7137 // branch instruction and removing the no longer needed and/or instruction. 7138 Br1->setCondition(Cond1); 7139 LogicOp->eraseFromParent(); 7140 7141 // Depending on the condition we have to either replace the true or the 7142 // false successor of the original branch instruction. 7143 if (Opc == Instruction::And) 7144 Br1->setSuccessor(0, TmpBB); 7145 else 7146 Br1->setSuccessor(1, TmpBB); 7147 7148 // Fill in the new basic block. 7149 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 7150 if (auto *I = dyn_cast<Instruction>(Cond2)) { 7151 I->removeFromParent(); 7152 I->insertBefore(Br2); 7153 } 7154 7155 // Update PHI nodes in both successors. The original BB needs to be 7156 // replaced in one successor's PHI nodes, because the branch comes now from 7157 // the newly generated BB (NewBB). In the other successor we need to add one 7158 // incoming edge to the PHI nodes, because both branch instructions target 7159 // now the same successor. Depending on the original branch condition 7160 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 7161 // we perform the correct update for the PHI nodes. 7162 // This doesn't change the successor order of the just created branch 7163 // instruction (or any other instruction). 7164 if (Opc == Instruction::Or) 7165 std::swap(TBB, FBB); 7166 7167 // Replace the old BB with the new BB. 7168 for (PHINode &PN : TBB->phis()) { 7169 int i; 7170 while ((i = PN.getBasicBlockIndex(&BB)) >= 0) 7171 PN.setIncomingBlock(i, TmpBB); 7172 } 7173 7174 // Add another incoming edge form the new BB. 7175 for (PHINode &PN : FBB->phis()) { 7176 auto *Val = PN.getIncomingValueForBlock(&BB); 7177 PN.addIncoming(Val, TmpBB); 7178 } 7179 7180 // Update the branch weights (from SelectionDAGBuilder:: 7181 // FindMergedConditions). 7182 if (Opc == Instruction::Or) { 7183 // Codegen X | Y as: 7184 // BB1: 7185 // jmp_if_X TBB 7186 // jmp TmpBB 7187 // TmpBB: 7188 // jmp_if_Y TBB 7189 // jmp FBB 7190 // 7191 7192 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 7193 // The requirement is that 7194 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 7195 // = TrueProb for original BB. 7196 // Assuming the original weights are A and B, one choice is to set BB1's 7197 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 7198 // assumes that 7199 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 7200 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 7201 // TmpBB, but the math is more complicated. 7202 uint64_t TrueWeight, FalseWeight; 7203 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7204 uint64_t NewTrueWeight = TrueWeight; 7205 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 7206 scaleWeights(NewTrueWeight, NewFalseWeight); 7207 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7208 .createBranchWeights(TrueWeight, FalseWeight)); 7209 7210 NewTrueWeight = TrueWeight; 7211 NewFalseWeight = 2 * FalseWeight; 7212 scaleWeights(NewTrueWeight, NewFalseWeight); 7213 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7214 .createBranchWeights(TrueWeight, FalseWeight)); 7215 } 7216 } else { 7217 // Codegen X & Y as: 7218 // BB1: 7219 // jmp_if_X TmpBB 7220 // jmp FBB 7221 // TmpBB: 7222 // jmp_if_Y TBB 7223 // jmp FBB 7224 // 7225 // This requires creation of TmpBB after CurBB. 7226 7227 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 7228 // The requirement is that 7229 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 7230 // = FalseProb for original BB. 7231 // Assuming the original weights are A and B, one choice is to set BB1's 7232 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 7233 // assumes that 7234 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 7235 uint64_t TrueWeight, FalseWeight; 7236 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7237 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 7238 uint64_t NewFalseWeight = FalseWeight; 7239 scaleWeights(NewTrueWeight, NewFalseWeight); 7240 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7241 .createBranchWeights(TrueWeight, FalseWeight)); 7242 7243 NewTrueWeight = 2 * TrueWeight; 7244 NewFalseWeight = FalseWeight; 7245 scaleWeights(NewTrueWeight, NewFalseWeight); 7246 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7247 .createBranchWeights(TrueWeight, FalseWeight)); 7248 } 7249 } 7250 7251 // Note: No point in getting fancy here, since the DT info is never 7252 // available to CodeGenPrepare. 7253 ModifiedDT = true; 7254 7255 MadeChange = true; 7256 7257 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 7258 TmpBB->dump()); 7259 } 7260 return MadeChange; 7261 } 7262