1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BranchProbabilityInfo.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/ProfileSummaryInfo.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Analysis/VectorUtils.h" 35 #include "llvm/CodeGen/Analysis.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/ValueTypes.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Argument.h" 44 #include "llvm/IR/Attributes.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/IntrinsicsAArch64.h" 63 #include "llvm/IR/LLVMContext.h" 64 #include "llvm/IR/MDBuilder.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Statepoint.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/ValueMap.h" 75 #include "llvm/InitializePasses.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/BlockFrequency.h" 78 #include "llvm/Support/BranchProbability.h" 79 #include "llvm/Support/Casting.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/MachineValueType.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 90 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 91 #include "llvm/Transforms/Utils/Local.h" 92 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 93 #include "llvm/Transforms/Utils/SizeOpts.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <limits> 99 #include <memory> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 106 #define DEBUG_TYPE "codegenprepare" 107 108 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 109 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 110 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 111 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 112 "sunken Cmps"); 113 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 114 "of sunken Casts"); 115 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 116 "computations were sunk"); 117 STATISTIC(NumMemoryInstsPhiCreated, 118 "Number of phis created when address " 119 "computations were sunk to memory instructions"); 120 STATISTIC(NumMemoryInstsSelectCreated, 121 "Number of select created when address " 122 "computations were sunk to memory instructions"); 123 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 124 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 125 STATISTIC(NumAndsAdded, 126 "Number of and mask instructions added to form ext loads"); 127 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 128 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 129 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 130 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 131 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 132 133 static cl::opt<bool> DisableBranchOpts( 134 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable branch optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> 138 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 139 cl::desc("Disable GC optimizations in CodeGenPrepare")); 140 141 static cl::opt<bool> DisableSelectToBranch( 142 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 143 cl::desc("Disable select to branch conversion.")); 144 145 static cl::opt<bool> AddrSinkUsingGEPs( 146 "addr-sink-using-gep", cl::Hidden, cl::init(true), 147 cl::desc("Address sinking in CGP using GEPs.")); 148 149 static cl::opt<bool> EnableAndCmpSinking( 150 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 151 cl::desc("Enable sinkinig and/cmp into branches.")); 152 153 static cl::opt<bool> DisableStoreExtract( 154 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> StressStoreExtract( 158 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 159 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 160 161 static cl::opt<bool> DisableExtLdPromotion( 162 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 163 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 164 "CodeGenPrepare")); 165 166 static cl::opt<bool> StressExtLdPromotion( 167 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 168 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 169 "optimization in CodeGenPrepare")); 170 171 static cl::opt<bool> DisablePreheaderProtect( 172 "disable-preheader-prot", cl::Hidden, cl::init(false), 173 cl::desc("Disable protection against removing loop preheaders")); 174 175 static cl::opt<bool> ProfileGuidedSectionPrefix( 176 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 177 cl::desc("Use profile info to add section prefix for hot/cold functions")); 178 179 static cl::opt<bool> ProfileUnknownInSpecialSection( 180 "profile-unknown-in-special-section", cl::Hidden, cl::init(false), 181 cl::ZeroOrMore, 182 cl::desc("In profiling mode like sampleFDO, if a function doesn't have " 183 "profile, we cannot tell the function is cold for sure because " 184 "it may be a function newly added without ever being sampled. " 185 "With the flag enabled, compiler can put such profile unknown " 186 "functions into a special section, so runtime system can choose " 187 "to handle it in a different way than .text section, to save " 188 "RAM for example. ")); 189 190 static cl::opt<unsigned> FreqRatioToSkipMerge( 191 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 192 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 193 "(frequency of destination block) is greater than this ratio")); 194 195 static cl::opt<bool> ForceSplitStore( 196 "force-split-store", cl::Hidden, cl::init(false), 197 cl::desc("Force store splitting no matter what the target query says.")); 198 199 static cl::opt<bool> 200 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 201 cl::desc("Enable merging of redundant sexts when one is dominating" 202 " the other."), cl::init(true)); 203 204 static cl::opt<bool> DisableComplexAddrModes( 205 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 206 cl::desc("Disables combining addressing modes with different parts " 207 "in optimizeMemoryInst.")); 208 209 static cl::opt<bool> 210 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 211 cl::desc("Allow creation of Phis in Address sinking.")); 212 213 static cl::opt<bool> 214 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 215 cl::desc("Allow creation of selects in Address sinking.")); 216 217 static cl::opt<bool> AddrSinkCombineBaseReg( 218 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 219 cl::desc("Allow combining of BaseReg field in Address sinking.")); 220 221 static cl::opt<bool> AddrSinkCombineBaseGV( 222 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 223 cl::desc("Allow combining of BaseGV field in Address sinking.")); 224 225 static cl::opt<bool> AddrSinkCombineBaseOffs( 226 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 227 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 228 229 static cl::opt<bool> AddrSinkCombineScaledReg( 230 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 231 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 232 233 static cl::opt<bool> 234 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 235 cl::init(true), 236 cl::desc("Enable splitting large offset of GEP.")); 237 238 static cl::opt<bool> EnableICMP_EQToICMP_ST( 239 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), 240 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); 241 242 static cl::opt<bool> 243 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), 244 cl::desc("Enable BFI update verification for " 245 "CodeGenPrepare.")); 246 247 static cl::opt<bool> OptimizePhiTypes( 248 "cgp-optimize-phi-types", cl::Hidden, cl::init(false), 249 cl::desc("Enable converting phi types in CodeGenPrepare")); 250 251 namespace { 252 253 enum ExtType { 254 ZeroExtension, // Zero extension has been seen. 255 SignExtension, // Sign extension has been seen. 256 BothExtension // This extension type is used if we saw sext after 257 // ZeroExtension had been set, or if we saw zext after 258 // SignExtension had been set. It makes the type 259 // information of a promoted instruction invalid. 260 }; 261 262 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 263 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 264 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 265 using SExts = SmallVector<Instruction *, 16>; 266 using ValueToSExts = DenseMap<Value *, SExts>; 267 268 class TypePromotionTransaction; 269 270 class CodeGenPrepare : public FunctionPass { 271 const TargetMachine *TM = nullptr; 272 const TargetSubtargetInfo *SubtargetInfo; 273 const TargetLowering *TLI = nullptr; 274 const TargetRegisterInfo *TRI; 275 const TargetTransformInfo *TTI = nullptr; 276 const TargetLibraryInfo *TLInfo; 277 const LoopInfo *LI; 278 std::unique_ptr<BlockFrequencyInfo> BFI; 279 std::unique_ptr<BranchProbabilityInfo> BPI; 280 ProfileSummaryInfo *PSI; 281 282 /// As we scan instructions optimizing them, this is the next instruction 283 /// to optimize. Transforms that can invalidate this should update it. 284 BasicBlock::iterator CurInstIterator; 285 286 /// Keeps track of non-local addresses that have been sunk into a block. 287 /// This allows us to avoid inserting duplicate code for blocks with 288 /// multiple load/stores of the same address. The usage of WeakTrackingVH 289 /// enables SunkAddrs to be treated as a cache whose entries can be 290 /// invalidated if a sunken address computation has been erased. 291 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 292 293 /// Keeps track of all instructions inserted for the current function. 294 SetOfInstrs InsertedInsts; 295 296 /// Keeps track of the type of the related instruction before their 297 /// promotion for the current function. 298 InstrToOrigTy PromotedInsts; 299 300 /// Keep track of instructions removed during promotion. 301 SetOfInstrs RemovedInsts; 302 303 /// Keep track of sext chains based on their initial value. 304 DenseMap<Value *, Instruction *> SeenChainsForSExt; 305 306 /// Keep track of GEPs accessing the same data structures such as structs or 307 /// arrays that are candidates to be split later because of their large 308 /// size. 309 MapVector< 310 AssertingVH<Value>, 311 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 312 LargeOffsetGEPMap; 313 314 /// Keep track of new GEP base after splitting the GEPs having large offset. 315 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 316 317 /// Map serial numbers to Large offset GEPs. 318 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 319 320 /// Keep track of SExt promoted. 321 ValueToSExts ValToSExtendedUses; 322 323 /// True if the function has the OptSize attribute. 324 bool OptSize; 325 326 /// DataLayout for the Function being processed. 327 const DataLayout *DL = nullptr; 328 329 /// Building the dominator tree can be expensive, so we only build it 330 /// lazily and update it when required. 331 std::unique_ptr<DominatorTree> DT; 332 333 public: 334 static char ID; // Pass identification, replacement for typeid 335 336 CodeGenPrepare() : FunctionPass(ID) { 337 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 338 } 339 340 bool runOnFunction(Function &F) override; 341 342 StringRef getPassName() const override { return "CodeGen Prepare"; } 343 344 void getAnalysisUsage(AnalysisUsage &AU) const override { 345 // FIXME: When we can selectively preserve passes, preserve the domtree. 346 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 347 AU.addRequired<TargetLibraryInfoWrapperPass>(); 348 AU.addRequired<TargetPassConfig>(); 349 AU.addRequired<TargetTransformInfoWrapperPass>(); 350 AU.addRequired<LoopInfoWrapperPass>(); 351 } 352 353 private: 354 template <typename F> 355 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 356 // Substituting can cause recursive simplifications, which can invalidate 357 // our iterator. Use a WeakTrackingVH to hold onto it in case this 358 // happens. 359 Value *CurValue = &*CurInstIterator; 360 WeakTrackingVH IterHandle(CurValue); 361 362 f(); 363 364 // If the iterator instruction was recursively deleted, start over at the 365 // start of the block. 366 if (IterHandle != CurValue) { 367 CurInstIterator = BB->begin(); 368 SunkAddrs.clear(); 369 } 370 } 371 372 // Get the DominatorTree, building if necessary. 373 DominatorTree &getDT(Function &F) { 374 if (!DT) 375 DT = std::make_unique<DominatorTree>(F); 376 return *DT; 377 } 378 379 void removeAllAssertingVHReferences(Value *V); 380 bool eliminateFallThrough(Function &F); 381 bool eliminateMostlyEmptyBlocks(Function &F); 382 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 383 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 384 void eliminateMostlyEmptyBlock(BasicBlock *BB); 385 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 386 bool isPreheader); 387 bool makeBitReverse(Instruction &I); 388 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 389 bool optimizeInst(Instruction *I, bool &ModifiedDT); 390 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 391 Type *AccessTy, unsigned AddrSpace); 392 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); 393 bool optimizeInlineAsmInst(CallInst *CS); 394 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 395 bool optimizeExt(Instruction *&I); 396 bool optimizeExtUses(Instruction *I); 397 bool optimizeLoadExt(LoadInst *Load); 398 bool optimizeShiftInst(BinaryOperator *BO); 399 bool optimizeFunnelShift(IntrinsicInst *Fsh); 400 bool optimizeSelectInst(SelectInst *SI); 401 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 402 bool optimizeSwitchInst(SwitchInst *SI); 403 bool optimizeExtractElementInst(Instruction *Inst); 404 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); 405 bool fixupDbgValue(Instruction *I); 406 bool placeDbgValues(Function &F); 407 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 408 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 409 bool tryToPromoteExts(TypePromotionTransaction &TPT, 410 const SmallVectorImpl<Instruction *> &Exts, 411 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 412 unsigned CreatedInstsCost = 0); 413 bool mergeSExts(Function &F); 414 bool splitLargeGEPOffsets(); 415 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, 416 SmallPtrSetImpl<Instruction *> &DeletedInstrs); 417 bool optimizePhiTypes(Function &F); 418 bool performAddressTypePromotion( 419 Instruction *&Inst, 420 bool AllowPromotionWithoutCommonHeader, 421 bool HasPromoted, TypePromotionTransaction &TPT, 422 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 423 bool splitBranchCondition(Function &F, bool &ModifiedDT); 424 bool simplifyOffsetableRelocate(GCStatepointInst &I); 425 426 bool tryToSinkFreeOperands(Instruction *I); 427 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, 428 Value *Arg1, CmpInst *Cmp, 429 Intrinsic::ID IID); 430 bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT); 431 bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 432 bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 433 void verifyBFIUpdates(Function &F); 434 }; 435 436 } // end anonymous namespace 437 438 char CodeGenPrepare::ID = 0; 439 440 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 441 "Optimize for code generation", false, false) 442 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 443 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 444 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 445 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 446 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 447 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 448 "Optimize for code generation", false, false) 449 450 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 451 452 bool CodeGenPrepare::runOnFunction(Function &F) { 453 if (skipFunction(F)) 454 return false; 455 456 DL = &F.getParent()->getDataLayout(); 457 458 bool EverMadeChange = false; 459 // Clear per function information. 460 InsertedInsts.clear(); 461 PromotedInsts.clear(); 462 463 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); 464 SubtargetInfo = TM->getSubtargetImpl(F); 465 TLI = SubtargetInfo->getTargetLowering(); 466 TRI = SubtargetInfo->getRegisterInfo(); 467 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 468 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 469 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 470 BPI.reset(new BranchProbabilityInfo(F, *LI)); 471 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 472 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 473 OptSize = F.hasOptSize(); 474 if (ProfileGuidedSectionPrefix) { 475 if (PSI->isFunctionHotInCallGraph(&F, *BFI)) 476 F.setSectionPrefix(".hot"); 477 else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) 478 F.setSectionPrefix(".unlikely"); 479 else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && 480 PSI->isFunctionHotnessUnknown(F)) 481 F.setSectionPrefix(".unknown"); 482 } 483 484 /// This optimization identifies DIV instructions that can be 485 /// profitably bypassed and carried out with a shorter, faster divide. 486 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { 487 const DenseMap<unsigned int, unsigned int> &BypassWidths = 488 TLI->getBypassSlowDivWidths(); 489 BasicBlock* BB = &*F.begin(); 490 while (BB != nullptr) { 491 // bypassSlowDivision may create new BBs, but we don't want to reapply the 492 // optimization to those blocks. 493 BasicBlock* Next = BB->getNextNode(); 494 // F.hasOptSize is already checked in the outer if statement. 495 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 496 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 497 BB = Next; 498 } 499 } 500 501 // Eliminate blocks that contain only PHI nodes and an 502 // unconditional branch. 503 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 504 505 bool ModifiedDT = false; 506 if (!DisableBranchOpts) 507 EverMadeChange |= splitBranchCondition(F, ModifiedDT); 508 509 // Split some critical edges where one of the sources is an indirect branch, 510 // to help generate sane code for PHIs involving such edges. 511 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 512 513 bool MadeChange = true; 514 while (MadeChange) { 515 MadeChange = false; 516 DT.reset(); 517 for (Function::iterator I = F.begin(); I != F.end(); ) { 518 BasicBlock *BB = &*I++; 519 bool ModifiedDTOnIteration = false; 520 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 521 522 // Restart BB iteration if the dominator tree of the Function was changed 523 if (ModifiedDTOnIteration) 524 break; 525 } 526 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 527 MadeChange |= mergeSExts(F); 528 if (!LargeOffsetGEPMap.empty()) 529 MadeChange |= splitLargeGEPOffsets(); 530 MadeChange |= optimizePhiTypes(F); 531 532 if (MadeChange) 533 eliminateFallThrough(F); 534 535 // Really free removed instructions during promotion. 536 for (Instruction *I : RemovedInsts) 537 I->deleteValue(); 538 539 EverMadeChange |= MadeChange; 540 SeenChainsForSExt.clear(); 541 ValToSExtendedUses.clear(); 542 RemovedInsts.clear(); 543 LargeOffsetGEPMap.clear(); 544 LargeOffsetGEPID.clear(); 545 } 546 547 SunkAddrs.clear(); 548 549 if (!DisableBranchOpts) { 550 MadeChange = false; 551 // Use a set vector to get deterministic iteration order. The order the 552 // blocks are removed may affect whether or not PHI nodes in successors 553 // are removed. 554 SmallSetVector<BasicBlock*, 8> WorkList; 555 for (BasicBlock &BB : F) { 556 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 557 MadeChange |= ConstantFoldTerminator(&BB, true); 558 if (!MadeChange) continue; 559 560 for (SmallVectorImpl<BasicBlock*>::iterator 561 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 562 if (pred_begin(*II) == pred_end(*II)) 563 WorkList.insert(*II); 564 } 565 566 // Delete the dead blocks and any of their dead successors. 567 MadeChange |= !WorkList.empty(); 568 while (!WorkList.empty()) { 569 BasicBlock *BB = WorkList.pop_back_val(); 570 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 571 572 DeleteDeadBlock(BB); 573 574 for (SmallVectorImpl<BasicBlock*>::iterator 575 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 576 if (pred_begin(*II) == pred_end(*II)) 577 WorkList.insert(*II); 578 } 579 580 // Merge pairs of basic blocks with unconditional branches, connected by 581 // a single edge. 582 if (EverMadeChange || MadeChange) 583 MadeChange |= eliminateFallThrough(F); 584 585 EverMadeChange |= MadeChange; 586 } 587 588 if (!DisableGCOpts) { 589 SmallVector<GCStatepointInst *, 2> Statepoints; 590 for (BasicBlock &BB : F) 591 for (Instruction &I : BB) 592 if (auto *SP = dyn_cast<GCStatepointInst>(&I)) 593 Statepoints.push_back(SP); 594 for (auto &I : Statepoints) 595 EverMadeChange |= simplifyOffsetableRelocate(*I); 596 } 597 598 // Do this last to clean up use-before-def scenarios introduced by other 599 // preparatory transforms. 600 EverMadeChange |= placeDbgValues(F); 601 602 #ifndef NDEBUG 603 if (VerifyBFIUpdates) 604 verifyBFIUpdates(F); 605 #endif 606 607 return EverMadeChange; 608 } 609 610 /// An instruction is about to be deleted, so remove all references to it in our 611 /// GEP-tracking data strcutures. 612 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { 613 LargeOffsetGEPMap.erase(V); 614 NewGEPBases.erase(V); 615 616 auto GEP = dyn_cast<GetElementPtrInst>(V); 617 if (!GEP) 618 return; 619 620 LargeOffsetGEPID.erase(GEP); 621 622 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); 623 if (VecI == LargeOffsetGEPMap.end()) 624 return; 625 626 auto &GEPVector = VecI->second; 627 const auto &I = std::find_if(GEPVector.begin(), GEPVector.end(), 628 [=](auto &Elt) { return Elt.first == GEP; }); 629 if (I == GEPVector.end()) 630 return; 631 632 GEPVector.erase(I); 633 if (GEPVector.empty()) 634 LargeOffsetGEPMap.erase(VecI); 635 } 636 637 // Verify BFI has been updated correctly by recomputing BFI and comparing them. 638 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) { 639 DominatorTree NewDT(F); 640 LoopInfo NewLI(NewDT); 641 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); 642 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); 643 NewBFI.verifyMatch(*BFI); 644 } 645 646 /// Merge basic blocks which are connected by a single edge, where one of the 647 /// basic blocks has a single successor pointing to the other basic block, 648 /// which has a single predecessor. 649 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 650 bool Changed = false; 651 // Scan all of the blocks in the function, except for the entry block. 652 // Use a temporary array to avoid iterator being invalidated when 653 // deleting blocks. 654 SmallVector<WeakTrackingVH, 16> Blocks; 655 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 656 Blocks.push_back(&Block); 657 658 for (auto &Block : Blocks) { 659 auto *BB = cast_or_null<BasicBlock>(Block); 660 if (!BB) 661 continue; 662 // If the destination block has a single pred, then this is a trivial 663 // edge, just collapse it. 664 BasicBlock *SinglePred = BB->getSinglePredecessor(); 665 666 // Don't merge if BB's address is taken. 667 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 668 669 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 670 if (Term && !Term->isConditional()) { 671 Changed = true; 672 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 673 674 // Merge BB into SinglePred and delete it. 675 MergeBlockIntoPredecessor(BB); 676 } 677 } 678 return Changed; 679 } 680 681 /// Find a destination block from BB if BB is mergeable empty block. 682 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 683 // If this block doesn't end with an uncond branch, ignore it. 684 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 685 if (!BI || !BI->isUnconditional()) 686 return nullptr; 687 688 // If the instruction before the branch (skipping debug info) isn't a phi 689 // node, then other stuff is happening here. 690 BasicBlock::iterator BBI = BI->getIterator(); 691 if (BBI != BB->begin()) { 692 --BBI; 693 while (isa<DbgInfoIntrinsic>(BBI)) { 694 if (BBI == BB->begin()) 695 break; 696 --BBI; 697 } 698 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 699 return nullptr; 700 } 701 702 // Do not break infinite loops. 703 BasicBlock *DestBB = BI->getSuccessor(0); 704 if (DestBB == BB) 705 return nullptr; 706 707 if (!canMergeBlocks(BB, DestBB)) 708 DestBB = nullptr; 709 710 return DestBB; 711 } 712 713 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 714 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 715 /// edges in ways that are non-optimal for isel. Start by eliminating these 716 /// blocks so we can split them the way we want them. 717 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 718 SmallPtrSet<BasicBlock *, 16> Preheaders; 719 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 720 while (!LoopList.empty()) { 721 Loop *L = LoopList.pop_back_val(); 722 LoopList.insert(LoopList.end(), L->begin(), L->end()); 723 if (BasicBlock *Preheader = L->getLoopPreheader()) 724 Preheaders.insert(Preheader); 725 } 726 727 bool MadeChange = false; 728 // Copy blocks into a temporary array to avoid iterator invalidation issues 729 // as we remove them. 730 // Note that this intentionally skips the entry block. 731 SmallVector<WeakTrackingVH, 16> Blocks; 732 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) 733 Blocks.push_back(&Block); 734 735 for (auto &Block : Blocks) { 736 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 737 if (!BB) 738 continue; 739 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 740 if (!DestBB || 741 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 742 continue; 743 744 eliminateMostlyEmptyBlock(BB); 745 MadeChange = true; 746 } 747 return MadeChange; 748 } 749 750 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 751 BasicBlock *DestBB, 752 bool isPreheader) { 753 // Do not delete loop preheaders if doing so would create a critical edge. 754 // Loop preheaders can be good locations to spill registers. If the 755 // preheader is deleted and we create a critical edge, registers may be 756 // spilled in the loop body instead. 757 if (!DisablePreheaderProtect && isPreheader && 758 !(BB->getSinglePredecessor() && 759 BB->getSinglePredecessor()->getSingleSuccessor())) 760 return false; 761 762 // Skip merging if the block's successor is also a successor to any callbr 763 // that leads to this block. 764 // FIXME: Is this really needed? Is this a correctness issue? 765 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 766 if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator())) 767 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 768 if (DestBB == CBI->getSuccessor(i)) 769 return false; 770 } 771 772 // Try to skip merging if the unique predecessor of BB is terminated by a 773 // switch or indirect branch instruction, and BB is used as an incoming block 774 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 775 // add COPY instructions in the predecessor of BB instead of BB (if it is not 776 // merged). Note that the critical edge created by merging such blocks wont be 777 // split in MachineSink because the jump table is not analyzable. By keeping 778 // such empty block (BB), ISel will place COPY instructions in BB, not in the 779 // predecessor of BB. 780 BasicBlock *Pred = BB->getUniquePredecessor(); 781 if (!Pred || 782 !(isa<SwitchInst>(Pred->getTerminator()) || 783 isa<IndirectBrInst>(Pred->getTerminator()))) 784 return true; 785 786 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 787 return true; 788 789 // We use a simple cost heuristic which determine skipping merging is 790 // profitable if the cost of skipping merging is less than the cost of 791 // merging : Cost(skipping merging) < Cost(merging BB), where the 792 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 793 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 794 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 795 // Freq(Pred) / Freq(BB) > 2. 796 // Note that if there are multiple empty blocks sharing the same incoming 797 // value for the PHIs in the DestBB, we consider them together. In such 798 // case, Cost(merging BB) will be the sum of their frequencies. 799 800 if (!isa<PHINode>(DestBB->begin())) 801 return true; 802 803 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 804 805 // Find all other incoming blocks from which incoming values of all PHIs in 806 // DestBB are the same as the ones from BB. 807 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; 808 ++PI) { 809 BasicBlock *DestBBPred = *PI; 810 if (DestBBPred == BB) 811 continue; 812 813 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 814 return DestPN.getIncomingValueForBlock(BB) == 815 DestPN.getIncomingValueForBlock(DestBBPred); 816 })) 817 SameIncomingValueBBs.insert(DestBBPred); 818 } 819 820 // See if all BB's incoming values are same as the value from Pred. In this 821 // case, no reason to skip merging because COPYs are expected to be place in 822 // Pred already. 823 if (SameIncomingValueBBs.count(Pred)) 824 return true; 825 826 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 827 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 828 829 for (auto *SameValueBB : SameIncomingValueBBs) 830 if (SameValueBB->getUniquePredecessor() == Pred && 831 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 832 BBFreq += BFI->getBlockFreq(SameValueBB); 833 834 return PredFreq.getFrequency() <= 835 BBFreq.getFrequency() * FreqRatioToSkipMerge; 836 } 837 838 /// Return true if we can merge BB into DestBB if there is a single 839 /// unconditional branch between them, and BB contains no other non-phi 840 /// instructions. 841 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 842 const BasicBlock *DestBB) const { 843 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 844 // the successor. If there are more complex condition (e.g. preheaders), 845 // don't mess around with them. 846 for (const PHINode &PN : BB->phis()) { 847 for (const User *U : PN.users()) { 848 const Instruction *UI = cast<Instruction>(U); 849 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 850 return false; 851 // If User is inside DestBB block and it is a PHINode then check 852 // incoming value. If incoming value is not from BB then this is 853 // a complex condition (e.g. preheaders) we want to avoid here. 854 if (UI->getParent() == DestBB) { 855 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 856 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 857 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 858 if (Insn && Insn->getParent() == BB && 859 Insn->getParent() != UPN->getIncomingBlock(I)) 860 return false; 861 } 862 } 863 } 864 } 865 866 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 867 // and DestBB may have conflicting incoming values for the block. If so, we 868 // can't merge the block. 869 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 870 if (!DestBBPN) return true; // no conflict. 871 872 // Collect the preds of BB. 873 SmallPtrSet<const BasicBlock*, 16> BBPreds; 874 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 875 // It is faster to get preds from a PHI than with pred_iterator. 876 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 877 BBPreds.insert(BBPN->getIncomingBlock(i)); 878 } else { 879 BBPreds.insert(pred_begin(BB), pred_end(BB)); 880 } 881 882 // Walk the preds of DestBB. 883 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 884 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 885 if (BBPreds.count(Pred)) { // Common predecessor? 886 for (const PHINode &PN : DestBB->phis()) { 887 const Value *V1 = PN.getIncomingValueForBlock(Pred); 888 const Value *V2 = PN.getIncomingValueForBlock(BB); 889 890 // If V2 is a phi node in BB, look up what the mapped value will be. 891 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 892 if (V2PN->getParent() == BB) 893 V2 = V2PN->getIncomingValueForBlock(Pred); 894 895 // If there is a conflict, bail out. 896 if (V1 != V2) return false; 897 } 898 } 899 } 900 901 return true; 902 } 903 904 /// Eliminate a basic block that has only phi's and an unconditional branch in 905 /// it. 906 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 907 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 908 BasicBlock *DestBB = BI->getSuccessor(0); 909 910 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 911 << *BB << *DestBB); 912 913 // If the destination block has a single pred, then this is a trivial edge, 914 // just collapse it. 915 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 916 if (SinglePred != DestBB) { 917 assert(SinglePred == BB && 918 "Single predecessor not the same as predecessor"); 919 // Merge DestBB into SinglePred/BB and delete it. 920 MergeBlockIntoPredecessor(DestBB); 921 // Note: BB(=SinglePred) will not be deleted on this path. 922 // DestBB(=its single successor) is the one that was deleted. 923 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 924 return; 925 } 926 } 927 928 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 929 // to handle the new incoming edges it is about to have. 930 for (PHINode &PN : DestBB->phis()) { 931 // Remove the incoming value for BB, and remember it. 932 Value *InVal = PN.removeIncomingValue(BB, false); 933 934 // Two options: either the InVal is a phi node defined in BB or it is some 935 // value that dominates BB. 936 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 937 if (InValPhi && InValPhi->getParent() == BB) { 938 // Add all of the input values of the input PHI as inputs of this phi. 939 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 940 PN.addIncoming(InValPhi->getIncomingValue(i), 941 InValPhi->getIncomingBlock(i)); 942 } else { 943 // Otherwise, add one instance of the dominating value for each edge that 944 // we will be adding. 945 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 946 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 947 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 948 } else { 949 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 950 PN.addIncoming(InVal, *PI); 951 } 952 } 953 } 954 955 // The PHIs are now updated, change everything that refers to BB to use 956 // DestBB and remove BB. 957 BB->replaceAllUsesWith(DestBB); 958 BB->eraseFromParent(); 959 ++NumBlocksElim; 960 961 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 962 } 963 964 // Computes a map of base pointer relocation instructions to corresponding 965 // derived pointer relocation instructions given a vector of all relocate calls 966 static void computeBaseDerivedRelocateMap( 967 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 968 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 969 &RelocateInstMap) { 970 // Collect information in two maps: one primarily for locating the base object 971 // while filling the second map; the second map is the final structure holding 972 // a mapping between Base and corresponding Derived relocate calls 973 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 974 for (auto *ThisRelocate : AllRelocateCalls) { 975 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 976 ThisRelocate->getDerivedPtrIndex()); 977 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 978 } 979 for (auto &Item : RelocateIdxMap) { 980 std::pair<unsigned, unsigned> Key = Item.first; 981 if (Key.first == Key.second) 982 // Base relocation: nothing to insert 983 continue; 984 985 GCRelocateInst *I = Item.second; 986 auto BaseKey = std::make_pair(Key.first, Key.first); 987 988 // We're iterating over RelocateIdxMap so we cannot modify it. 989 auto MaybeBase = RelocateIdxMap.find(BaseKey); 990 if (MaybeBase == RelocateIdxMap.end()) 991 // TODO: We might want to insert a new base object relocate and gep off 992 // that, if there are enough derived object relocates. 993 continue; 994 995 RelocateInstMap[MaybeBase->second].push_back(I); 996 } 997 } 998 999 // Accepts a GEP and extracts the operands into a vector provided they're all 1000 // small integer constants 1001 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 1002 SmallVectorImpl<Value *> &OffsetV) { 1003 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 1004 // Only accept small constant integer operands 1005 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 1006 if (!Op || Op->getZExtValue() > 20) 1007 return false; 1008 } 1009 1010 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 1011 OffsetV.push_back(GEP->getOperand(i)); 1012 return true; 1013 } 1014 1015 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 1016 // replace, computes a replacement, and affects it. 1017 static bool 1018 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 1019 const SmallVectorImpl<GCRelocateInst *> &Targets) { 1020 bool MadeChange = false; 1021 // We must ensure the relocation of derived pointer is defined after 1022 // relocation of base pointer. If we find a relocation corresponding to base 1023 // defined earlier than relocation of base then we move relocation of base 1024 // right before found relocation. We consider only relocation in the same 1025 // basic block as relocation of base. Relocations from other basic block will 1026 // be skipped by optimization and we do not care about them. 1027 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 1028 &*R != RelocatedBase; ++R) 1029 if (auto *RI = dyn_cast<GCRelocateInst>(R)) 1030 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 1031 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 1032 RelocatedBase->moveBefore(RI); 1033 break; 1034 } 1035 1036 for (GCRelocateInst *ToReplace : Targets) { 1037 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 1038 "Not relocating a derived object of the original base object"); 1039 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 1040 // A duplicate relocate call. TODO: coalesce duplicates. 1041 continue; 1042 } 1043 1044 if (RelocatedBase->getParent() != ToReplace->getParent()) { 1045 // Base and derived relocates are in different basic blocks. 1046 // In this case transform is only valid when base dominates derived 1047 // relocate. However it would be too expensive to check dominance 1048 // for each such relocate, so we skip the whole transformation. 1049 continue; 1050 } 1051 1052 Value *Base = ToReplace->getBasePtr(); 1053 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 1054 if (!Derived || Derived->getPointerOperand() != Base) 1055 continue; 1056 1057 SmallVector<Value *, 2> OffsetV; 1058 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 1059 continue; 1060 1061 // Create a Builder and replace the target callsite with a gep 1062 assert(RelocatedBase->getNextNode() && 1063 "Should always have one since it's not a terminator"); 1064 1065 // Insert after RelocatedBase 1066 IRBuilder<> Builder(RelocatedBase->getNextNode()); 1067 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 1068 1069 // If gc_relocate does not match the actual type, cast it to the right type. 1070 // In theory, there must be a bitcast after gc_relocate if the type does not 1071 // match, and we should reuse it to get the derived pointer. But it could be 1072 // cases like this: 1073 // bb1: 1074 // ... 1075 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1076 // br label %merge 1077 // 1078 // bb2: 1079 // ... 1080 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1081 // br label %merge 1082 // 1083 // merge: 1084 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 1085 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 1086 // 1087 // In this case, we can not find the bitcast any more. So we insert a new bitcast 1088 // no matter there is already one or not. In this way, we can handle all cases, and 1089 // the extra bitcast should be optimized away in later passes. 1090 Value *ActualRelocatedBase = RelocatedBase; 1091 if (RelocatedBase->getType() != Base->getType()) { 1092 ActualRelocatedBase = 1093 Builder.CreateBitCast(RelocatedBase, Base->getType()); 1094 } 1095 Value *Replacement = Builder.CreateGEP( 1096 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 1097 Replacement->takeName(ToReplace); 1098 // If the newly generated derived pointer's type does not match the original derived 1099 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1100 Value *ActualReplacement = Replacement; 1101 if (Replacement->getType() != ToReplace->getType()) { 1102 ActualReplacement = 1103 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1104 } 1105 ToReplace->replaceAllUsesWith(ActualReplacement); 1106 ToReplace->eraseFromParent(); 1107 1108 MadeChange = true; 1109 } 1110 return MadeChange; 1111 } 1112 1113 // Turns this: 1114 // 1115 // %base = ... 1116 // %ptr = gep %base + 15 1117 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1118 // %base' = relocate(%tok, i32 4, i32 4) 1119 // %ptr' = relocate(%tok, i32 4, i32 5) 1120 // %val = load %ptr' 1121 // 1122 // into this: 1123 // 1124 // %base = ... 1125 // %ptr = gep %base + 15 1126 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1127 // %base' = gc.relocate(%tok, i32 4, i32 4) 1128 // %ptr' = gep %base' + 15 1129 // %val = load %ptr' 1130 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { 1131 bool MadeChange = false; 1132 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1133 for (auto *U : I.users()) 1134 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1135 // Collect all the relocate calls associated with a statepoint 1136 AllRelocateCalls.push_back(Relocate); 1137 1138 // We need at least one base pointer relocation + one derived pointer 1139 // relocation to mangle 1140 if (AllRelocateCalls.size() < 2) 1141 return false; 1142 1143 // RelocateInstMap is a mapping from the base relocate instruction to the 1144 // corresponding derived relocate instructions 1145 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1146 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1147 if (RelocateInstMap.empty()) 1148 return false; 1149 1150 for (auto &Item : RelocateInstMap) 1151 // Item.first is the RelocatedBase to offset against 1152 // Item.second is the vector of Targets to replace 1153 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1154 return MadeChange; 1155 } 1156 1157 /// Sink the specified cast instruction into its user blocks. 1158 static bool SinkCast(CastInst *CI) { 1159 BasicBlock *DefBB = CI->getParent(); 1160 1161 /// InsertedCasts - Only insert a cast in each block once. 1162 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1163 1164 bool MadeChange = false; 1165 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1166 UI != E; ) { 1167 Use &TheUse = UI.getUse(); 1168 Instruction *User = cast<Instruction>(*UI); 1169 1170 // Figure out which BB this cast is used in. For PHI's this is the 1171 // appropriate predecessor block. 1172 BasicBlock *UserBB = User->getParent(); 1173 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1174 UserBB = PN->getIncomingBlock(TheUse); 1175 } 1176 1177 // Preincrement use iterator so we don't invalidate it. 1178 ++UI; 1179 1180 // The first insertion point of a block containing an EH pad is after the 1181 // pad. If the pad is the user, we cannot sink the cast past the pad. 1182 if (User->isEHPad()) 1183 continue; 1184 1185 // If the block selected to receive the cast is an EH pad that does not 1186 // allow non-PHI instructions before the terminator, we can't sink the 1187 // cast. 1188 if (UserBB->getTerminator()->isEHPad()) 1189 continue; 1190 1191 // If this user is in the same block as the cast, don't change the cast. 1192 if (UserBB == DefBB) continue; 1193 1194 // If we have already inserted a cast into this block, use it. 1195 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1196 1197 if (!InsertedCast) { 1198 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1199 assert(InsertPt != UserBB->end()); 1200 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1201 CI->getType(), "", &*InsertPt); 1202 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1203 } 1204 1205 // Replace a use of the cast with a use of the new cast. 1206 TheUse = InsertedCast; 1207 MadeChange = true; 1208 ++NumCastUses; 1209 } 1210 1211 // If we removed all uses, nuke the cast. 1212 if (CI->use_empty()) { 1213 salvageDebugInfo(*CI); 1214 CI->eraseFromParent(); 1215 MadeChange = true; 1216 } 1217 1218 return MadeChange; 1219 } 1220 1221 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1222 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1223 /// reduce the number of virtual registers that must be created and coalesced. 1224 /// 1225 /// Return true if any changes are made. 1226 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1227 const DataLayout &DL) { 1228 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1229 // than sinking only nop casts, but is helpful on some platforms. 1230 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1231 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), 1232 ASC->getDestAddressSpace())) 1233 return false; 1234 } 1235 1236 // If this is a noop copy, 1237 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1238 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1239 1240 // This is an fp<->int conversion? 1241 if (SrcVT.isInteger() != DstVT.isInteger()) 1242 return false; 1243 1244 // If this is an extension, it will be a zero or sign extension, which 1245 // isn't a noop. 1246 if (SrcVT.bitsLT(DstVT)) return false; 1247 1248 // If these values will be promoted, find out what they will be promoted 1249 // to. This helps us consider truncates on PPC as noop copies when they 1250 // are. 1251 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1252 TargetLowering::TypePromoteInteger) 1253 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1254 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1255 TargetLowering::TypePromoteInteger) 1256 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1257 1258 // If, after promotion, these are the same types, this is a noop copy. 1259 if (SrcVT != DstVT) 1260 return false; 1261 1262 return SinkCast(CI); 1263 } 1264 1265 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, 1266 Value *Arg0, Value *Arg1, 1267 CmpInst *Cmp, 1268 Intrinsic::ID IID) { 1269 if (BO->getParent() != Cmp->getParent()) { 1270 // We used to use a dominator tree here to allow multi-block optimization. 1271 // But that was problematic because: 1272 // 1. It could cause a perf regression by hoisting the math op into the 1273 // critical path. 1274 // 2. It could cause a perf regression by creating a value that was live 1275 // across multiple blocks and increasing register pressure. 1276 // 3. Use of a dominator tree could cause large compile-time regression. 1277 // This is because we recompute the DT on every change in the main CGP 1278 // run-loop. The recomputing is probably unnecessary in many cases, so if 1279 // that was fixed, using a DT here would be ok. 1280 return false; 1281 } 1282 1283 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1284 if (BO->getOpcode() == Instruction::Add && 1285 IID == Intrinsic::usub_with_overflow) { 1286 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1287 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1288 } 1289 1290 // Insert at the first instruction of the pair. 1291 Instruction *InsertPt = nullptr; 1292 for (Instruction &Iter : *Cmp->getParent()) { 1293 // If BO is an XOR, it is not guaranteed that it comes after both inputs to 1294 // the overflow intrinsic are defined. 1295 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { 1296 InsertPt = &Iter; 1297 break; 1298 } 1299 } 1300 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop"); 1301 1302 IRBuilder<> Builder(InsertPt); 1303 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1304 if (BO->getOpcode() != Instruction::Xor) { 1305 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1306 BO->replaceAllUsesWith(Math); 1307 } else 1308 assert(BO->hasOneUse() && 1309 "Patterns with XOr should use the BO only in the compare"); 1310 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1311 Cmp->replaceAllUsesWith(OV); 1312 Cmp->eraseFromParent(); 1313 BO->eraseFromParent(); 1314 return true; 1315 } 1316 1317 /// Match special-case patterns that check for unsigned add overflow. 1318 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, 1319 BinaryOperator *&Add) { 1320 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) 1321 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) 1322 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1323 1324 // We are not expecting non-canonical/degenerate code. Just bail out. 1325 if (isa<Constant>(A)) 1326 return false; 1327 1328 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1329 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) 1330 B = ConstantInt::get(B->getType(), 1); 1331 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) 1332 B = ConstantInt::get(B->getType(), -1); 1333 else 1334 return false; 1335 1336 // Check the users of the variable operand of the compare looking for an add 1337 // with the adjusted constant. 1338 for (User *U : A->users()) { 1339 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { 1340 Add = cast<BinaryOperator>(U); 1341 return true; 1342 } 1343 } 1344 return false; 1345 } 1346 1347 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1348 /// intrinsic. Return true if any changes were made. 1349 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, 1350 bool &ModifiedDT) { 1351 Value *A, *B; 1352 BinaryOperator *Add; 1353 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { 1354 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) 1355 return false; 1356 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. 1357 A = Add->getOperand(0); 1358 B = Add->getOperand(1); 1359 } 1360 1361 if (!TLI->shouldFormOverflowOp(ISD::UADDO, 1362 TLI->getValueType(*DL, Add->getType()), 1363 Add->hasNUsesOrMore(2))) 1364 return false; 1365 1366 // We don't want to move around uses of condition values this late, so we 1367 // check if it is legal to create the call to the intrinsic in the basic 1368 // block containing the icmp. 1369 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1370 return false; 1371 1372 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, 1373 Intrinsic::uadd_with_overflow)) 1374 return false; 1375 1376 // Reset callers - do not crash by iterating over a dead instruction. 1377 ModifiedDT = true; 1378 return true; 1379 } 1380 1381 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, 1382 bool &ModifiedDT) { 1383 // We are not expecting non-canonical/degenerate code. Just bail out. 1384 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1385 if (isa<Constant>(A) && isa<Constant>(B)) 1386 return false; 1387 1388 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1389 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1390 if (Pred == ICmpInst::ICMP_UGT) { 1391 std::swap(A, B); 1392 Pred = ICmpInst::ICMP_ULT; 1393 } 1394 // Convert special-case: (A == 0) is the same as (A u< 1). 1395 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1396 B = ConstantInt::get(B->getType(), 1); 1397 Pred = ICmpInst::ICMP_ULT; 1398 } 1399 // Convert special-case: (A != 0) is the same as (0 u< A). 1400 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { 1401 std::swap(A, B); 1402 Pred = ICmpInst::ICMP_ULT; 1403 } 1404 if (Pred != ICmpInst::ICMP_ULT) 1405 return false; 1406 1407 // Walk the users of a variable operand of a compare looking for a subtract or 1408 // add with that same operand. Also match the 2nd operand of the compare to 1409 // the add/sub, but that may be a negated constant operand of an add. 1410 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1411 BinaryOperator *Sub = nullptr; 1412 for (User *U : CmpVariableOperand->users()) { 1413 // A - B, A u< B --> usubo(A, B) 1414 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1415 Sub = cast<BinaryOperator>(U); 1416 break; 1417 } 1418 1419 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1420 const APInt *CmpC, *AddC; 1421 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1422 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1423 Sub = cast<BinaryOperator>(U); 1424 break; 1425 } 1426 } 1427 if (!Sub) 1428 return false; 1429 1430 if (!TLI->shouldFormOverflowOp(ISD::USUBO, 1431 TLI->getValueType(*DL, Sub->getType()), 1432 Sub->hasNUsesOrMore(2))) 1433 return false; 1434 1435 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), 1436 Cmp, Intrinsic::usub_with_overflow)) 1437 return false; 1438 1439 // Reset callers - do not crash by iterating over a dead instruction. 1440 ModifiedDT = true; 1441 return true; 1442 } 1443 1444 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1445 /// registers that must be created and coalesced. This is a clear win except on 1446 /// targets with multiple condition code registers (PowerPC), where it might 1447 /// lose; some adjustment may be wanted there. 1448 /// 1449 /// Return true if any changes are made. 1450 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1451 if (TLI.hasMultipleConditionRegisters()) 1452 return false; 1453 1454 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1455 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1456 return false; 1457 1458 // Only insert a cmp in each block once. 1459 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1460 1461 bool MadeChange = false; 1462 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1463 UI != E; ) { 1464 Use &TheUse = UI.getUse(); 1465 Instruction *User = cast<Instruction>(*UI); 1466 1467 // Preincrement use iterator so we don't invalidate it. 1468 ++UI; 1469 1470 // Don't bother for PHI nodes. 1471 if (isa<PHINode>(User)) 1472 continue; 1473 1474 // Figure out which BB this cmp is used in. 1475 BasicBlock *UserBB = User->getParent(); 1476 BasicBlock *DefBB = Cmp->getParent(); 1477 1478 // If this user is in the same block as the cmp, don't change the cmp. 1479 if (UserBB == DefBB) continue; 1480 1481 // If we have already inserted a cmp into this block, use it. 1482 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1483 1484 if (!InsertedCmp) { 1485 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1486 assert(InsertPt != UserBB->end()); 1487 InsertedCmp = 1488 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1489 Cmp->getOperand(0), Cmp->getOperand(1), "", 1490 &*InsertPt); 1491 // Propagate the debug info. 1492 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1493 } 1494 1495 // Replace a use of the cmp with a use of the new cmp. 1496 TheUse = InsertedCmp; 1497 MadeChange = true; 1498 ++NumCmpUses; 1499 } 1500 1501 // If we removed all uses, nuke the cmp. 1502 if (Cmp->use_empty()) { 1503 Cmp->eraseFromParent(); 1504 MadeChange = true; 1505 } 1506 1507 return MadeChange; 1508 } 1509 1510 /// For pattern like: 1511 /// 1512 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) 1513 /// ... 1514 /// DomBB: 1515 /// ... 1516 /// br DomCond, TrueBB, CmpBB 1517 /// CmpBB: (with DomBB being the single predecessor) 1518 /// ... 1519 /// Cmp = icmp eq CmpOp0, CmpOp1 1520 /// ... 1521 /// 1522 /// It would use two comparison on targets that lowering of icmp sgt/slt is 1523 /// different from lowering of icmp eq (PowerPC). This function try to convert 1524 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. 1525 /// After that, DomCond and Cmp can use the same comparison so reduce one 1526 /// comparison. 1527 /// 1528 /// Return true if any changes are made. 1529 static bool foldICmpWithDominatingICmp(CmpInst *Cmp, 1530 const TargetLowering &TLI) { 1531 if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) 1532 return false; 1533 1534 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1535 if (Pred != ICmpInst::ICMP_EQ) 1536 return false; 1537 1538 // If icmp eq has users other than BranchInst and SelectInst, converting it to 1539 // icmp slt/sgt would introduce more redundant LLVM IR. 1540 for (User *U : Cmp->users()) { 1541 if (isa<BranchInst>(U)) 1542 continue; 1543 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) 1544 continue; 1545 return false; 1546 } 1547 1548 // This is a cheap/incomplete check for dominance - just match a single 1549 // predecessor with a conditional branch. 1550 BasicBlock *CmpBB = Cmp->getParent(); 1551 BasicBlock *DomBB = CmpBB->getSinglePredecessor(); 1552 if (!DomBB) 1553 return false; 1554 1555 // We want to ensure that the only way control gets to the comparison of 1556 // interest is that a less/greater than comparison on the same operands is 1557 // false. 1558 Value *DomCond; 1559 BasicBlock *TrueBB, *FalseBB; 1560 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) 1561 return false; 1562 if (CmpBB != FalseBB) 1563 return false; 1564 1565 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); 1566 ICmpInst::Predicate DomPred; 1567 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) 1568 return false; 1569 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) 1570 return false; 1571 1572 // Convert the equality comparison to the opposite of the dominating 1573 // comparison and swap the direction for all branch/select users. 1574 // We have conceptually converted: 1575 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; 1576 // to 1577 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; 1578 // And similarly for branches. 1579 for (User *U : Cmp->users()) { 1580 if (auto *BI = dyn_cast<BranchInst>(U)) { 1581 assert(BI->isConditional() && "Must be conditional"); 1582 BI->swapSuccessors(); 1583 continue; 1584 } 1585 if (auto *SI = dyn_cast<SelectInst>(U)) { 1586 // Swap operands 1587 SI->swapValues(); 1588 SI->swapProfMetadata(); 1589 continue; 1590 } 1591 llvm_unreachable("Must be a branch or a select"); 1592 } 1593 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); 1594 return true; 1595 } 1596 1597 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) { 1598 if (sinkCmpExpression(Cmp, *TLI)) 1599 return true; 1600 1601 if (combineToUAddWithOverflow(Cmp, ModifiedDT)) 1602 return true; 1603 1604 if (combineToUSubWithOverflow(Cmp, ModifiedDT)) 1605 return true; 1606 1607 if (foldICmpWithDominatingICmp(Cmp, *TLI)) 1608 return true; 1609 1610 return false; 1611 } 1612 1613 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1614 /// used in a compare to allow isel to generate better code for targets where 1615 /// this operation can be combined. 1616 /// 1617 /// Return true if any changes are made. 1618 static bool sinkAndCmp0Expression(Instruction *AndI, 1619 const TargetLowering &TLI, 1620 SetOfInstrs &InsertedInsts) { 1621 // Double-check that we're not trying to optimize an instruction that was 1622 // already optimized by some other part of this pass. 1623 assert(!InsertedInsts.count(AndI) && 1624 "Attempting to optimize already optimized and instruction"); 1625 (void) InsertedInsts; 1626 1627 // Nothing to do for single use in same basic block. 1628 if (AndI->hasOneUse() && 1629 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1630 return false; 1631 1632 // Try to avoid cases where sinking/duplicating is likely to increase register 1633 // pressure. 1634 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1635 !isa<ConstantInt>(AndI->getOperand(1)) && 1636 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1637 return false; 1638 1639 for (auto *U : AndI->users()) { 1640 Instruction *User = cast<Instruction>(U); 1641 1642 // Only sink 'and' feeding icmp with 0. 1643 if (!isa<ICmpInst>(User)) 1644 return false; 1645 1646 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1647 if (!CmpC || !CmpC->isZero()) 1648 return false; 1649 } 1650 1651 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1652 return false; 1653 1654 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1655 LLVM_DEBUG(AndI->getParent()->dump()); 1656 1657 // Push the 'and' into the same block as the icmp 0. There should only be 1658 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1659 // others, so we don't need to keep track of which BBs we insert into. 1660 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1661 UI != E; ) { 1662 Use &TheUse = UI.getUse(); 1663 Instruction *User = cast<Instruction>(*UI); 1664 1665 // Preincrement use iterator so we don't invalidate it. 1666 ++UI; 1667 1668 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1669 1670 // Keep the 'and' in the same place if the use is already in the same block. 1671 Instruction *InsertPt = 1672 User->getParent() == AndI->getParent() ? AndI : User; 1673 Instruction *InsertedAnd = 1674 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1675 AndI->getOperand(1), "", InsertPt); 1676 // Propagate the debug info. 1677 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1678 1679 // Replace a use of the 'and' with a use of the new 'and'. 1680 TheUse = InsertedAnd; 1681 ++NumAndUses; 1682 LLVM_DEBUG(User->getParent()->dump()); 1683 } 1684 1685 // We removed all uses, nuke the and. 1686 AndI->eraseFromParent(); 1687 return true; 1688 } 1689 1690 /// Check if the candidates could be combined with a shift instruction, which 1691 /// includes: 1692 /// 1. Truncate instruction 1693 /// 2. And instruction and the imm is a mask of the low bits: 1694 /// imm & (imm+1) == 0 1695 static bool isExtractBitsCandidateUse(Instruction *User) { 1696 if (!isa<TruncInst>(User)) { 1697 if (User->getOpcode() != Instruction::And || 1698 !isa<ConstantInt>(User->getOperand(1))) 1699 return false; 1700 1701 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1702 1703 if ((Cimm & (Cimm + 1)).getBoolValue()) 1704 return false; 1705 } 1706 return true; 1707 } 1708 1709 /// Sink both shift and truncate instruction to the use of truncate's BB. 1710 static bool 1711 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1712 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1713 const TargetLowering &TLI, const DataLayout &DL) { 1714 BasicBlock *UserBB = User->getParent(); 1715 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1716 auto *TruncI = cast<TruncInst>(User); 1717 bool MadeChange = false; 1718 1719 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1720 TruncE = TruncI->user_end(); 1721 TruncUI != TruncE;) { 1722 1723 Use &TruncTheUse = TruncUI.getUse(); 1724 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1725 // Preincrement use iterator so we don't invalidate it. 1726 1727 ++TruncUI; 1728 1729 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1730 if (!ISDOpcode) 1731 continue; 1732 1733 // If the use is actually a legal node, there will not be an 1734 // implicit truncate. 1735 // FIXME: always querying the result type is just an 1736 // approximation; some nodes' legality is determined by the 1737 // operand or other means. There's no good way to find out though. 1738 if (TLI.isOperationLegalOrCustom( 1739 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1740 continue; 1741 1742 // Don't bother for PHI nodes. 1743 if (isa<PHINode>(TruncUser)) 1744 continue; 1745 1746 BasicBlock *TruncUserBB = TruncUser->getParent(); 1747 1748 if (UserBB == TruncUserBB) 1749 continue; 1750 1751 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1752 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1753 1754 if (!InsertedShift && !InsertedTrunc) { 1755 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1756 assert(InsertPt != TruncUserBB->end()); 1757 // Sink the shift 1758 if (ShiftI->getOpcode() == Instruction::AShr) 1759 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1760 "", &*InsertPt); 1761 else 1762 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1763 "", &*InsertPt); 1764 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1765 1766 // Sink the trunc 1767 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1768 TruncInsertPt++; 1769 assert(TruncInsertPt != TruncUserBB->end()); 1770 1771 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1772 TruncI->getType(), "", &*TruncInsertPt); 1773 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1774 1775 MadeChange = true; 1776 1777 TruncTheUse = InsertedTrunc; 1778 } 1779 } 1780 return MadeChange; 1781 } 1782 1783 /// Sink the shift *right* instruction into user blocks if the uses could 1784 /// potentially be combined with this shift instruction and generate BitExtract 1785 /// instruction. It will only be applied if the architecture supports BitExtract 1786 /// instruction. Here is an example: 1787 /// BB1: 1788 /// %x.extract.shift = lshr i64 %arg1, 32 1789 /// BB2: 1790 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1791 /// ==> 1792 /// 1793 /// BB2: 1794 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1795 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1796 /// 1797 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1798 /// instruction. 1799 /// Return true if any changes are made. 1800 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1801 const TargetLowering &TLI, 1802 const DataLayout &DL) { 1803 BasicBlock *DefBB = ShiftI->getParent(); 1804 1805 /// Only insert instructions in each block once. 1806 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1807 1808 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1809 1810 bool MadeChange = false; 1811 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1812 UI != E;) { 1813 Use &TheUse = UI.getUse(); 1814 Instruction *User = cast<Instruction>(*UI); 1815 // Preincrement use iterator so we don't invalidate it. 1816 ++UI; 1817 1818 // Don't bother for PHI nodes. 1819 if (isa<PHINode>(User)) 1820 continue; 1821 1822 if (!isExtractBitsCandidateUse(User)) 1823 continue; 1824 1825 BasicBlock *UserBB = User->getParent(); 1826 1827 if (UserBB == DefBB) { 1828 // If the shift and truncate instruction are in the same BB. The use of 1829 // the truncate(TruncUse) may still introduce another truncate if not 1830 // legal. In this case, we would like to sink both shift and truncate 1831 // instruction to the BB of TruncUse. 1832 // for example: 1833 // BB1: 1834 // i64 shift.result = lshr i64 opnd, imm 1835 // trunc.result = trunc shift.result to i16 1836 // 1837 // BB2: 1838 // ----> We will have an implicit truncate here if the architecture does 1839 // not have i16 compare. 1840 // cmp i16 trunc.result, opnd2 1841 // 1842 if (isa<TruncInst>(User) && shiftIsLegal 1843 // If the type of the truncate is legal, no truncate will be 1844 // introduced in other basic blocks. 1845 && 1846 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1847 MadeChange = 1848 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1849 1850 continue; 1851 } 1852 // If we have already inserted a shift into this block, use it. 1853 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1854 1855 if (!InsertedShift) { 1856 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1857 assert(InsertPt != UserBB->end()); 1858 1859 if (ShiftI->getOpcode() == Instruction::AShr) 1860 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1861 "", &*InsertPt); 1862 else 1863 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1864 "", &*InsertPt); 1865 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1866 1867 MadeChange = true; 1868 } 1869 1870 // Replace a use of the shift with a use of the new shift. 1871 TheUse = InsertedShift; 1872 } 1873 1874 // If we removed all uses, or there are none, nuke the shift. 1875 if (ShiftI->use_empty()) { 1876 salvageDebugInfo(*ShiftI); 1877 ShiftI->eraseFromParent(); 1878 MadeChange = true; 1879 } 1880 1881 return MadeChange; 1882 } 1883 1884 /// If counting leading or trailing zeros is an expensive operation and a zero 1885 /// input is defined, add a check for zero to avoid calling the intrinsic. 1886 /// 1887 /// We want to transform: 1888 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1889 /// 1890 /// into: 1891 /// entry: 1892 /// %cmpz = icmp eq i64 %A, 0 1893 /// br i1 %cmpz, label %cond.end, label %cond.false 1894 /// cond.false: 1895 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1896 /// br label %cond.end 1897 /// cond.end: 1898 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1899 /// 1900 /// If the transform is performed, return true and set ModifiedDT to true. 1901 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1902 const TargetLowering *TLI, 1903 const DataLayout *DL, 1904 bool &ModifiedDT) { 1905 // If a zero input is undefined, it doesn't make sense to despeculate that. 1906 if (match(CountZeros->getOperand(1), m_One())) 1907 return false; 1908 1909 // If it's cheap to speculate, there's nothing to do. 1910 auto IntrinsicID = CountZeros->getIntrinsicID(); 1911 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1912 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1913 return false; 1914 1915 // Only handle legal scalar cases. Anything else requires too much work. 1916 Type *Ty = CountZeros->getType(); 1917 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1918 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1919 return false; 1920 1921 // The intrinsic will be sunk behind a compare against zero and branch. 1922 BasicBlock *StartBlock = CountZeros->getParent(); 1923 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1924 1925 // Create another block after the count zero intrinsic. A PHI will be added 1926 // in this block to select the result of the intrinsic or the bit-width 1927 // constant if the input to the intrinsic is zero. 1928 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1929 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1930 1931 // Set up a builder to create a compare, conditional branch, and PHI. 1932 IRBuilder<> Builder(CountZeros->getContext()); 1933 Builder.SetInsertPoint(StartBlock->getTerminator()); 1934 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1935 1936 // Replace the unconditional branch that was created by the first split with 1937 // a compare against zero and a conditional branch. 1938 Value *Zero = Constant::getNullValue(Ty); 1939 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1940 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1941 StartBlock->getTerminator()->eraseFromParent(); 1942 1943 // Create a PHI in the end block to select either the output of the intrinsic 1944 // or the bit width of the operand. 1945 Builder.SetInsertPoint(&EndBlock->front()); 1946 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1947 CountZeros->replaceAllUsesWith(PN); 1948 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1949 PN->addIncoming(BitWidth, StartBlock); 1950 PN->addIncoming(CountZeros, CallBlock); 1951 1952 // We are explicitly handling the zero case, so we can set the intrinsic's 1953 // undefined zero argument to 'true'. This will also prevent reprocessing the 1954 // intrinsic; we only despeculate when a zero input is defined. 1955 CountZeros->setArgOperand(1, Builder.getTrue()); 1956 ModifiedDT = true; 1957 return true; 1958 } 1959 1960 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 1961 BasicBlock *BB = CI->getParent(); 1962 1963 // Lower inline assembly if we can. 1964 // If we found an inline asm expession, and if the target knows how to 1965 // lower it to normal LLVM code, do so now. 1966 if (CI->isInlineAsm()) { 1967 if (TLI->ExpandInlineAsm(CI)) { 1968 // Avoid invalidating the iterator. 1969 CurInstIterator = BB->begin(); 1970 // Avoid processing instructions out of order, which could cause 1971 // reuse before a value is defined. 1972 SunkAddrs.clear(); 1973 return true; 1974 } 1975 // Sink address computing for memory operands into the block. 1976 if (optimizeInlineAsmInst(CI)) 1977 return true; 1978 } 1979 1980 // Align the pointer arguments to this call if the target thinks it's a good 1981 // idea 1982 unsigned MinSize, PrefAlign; 1983 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1984 for (auto &Arg : CI->arg_operands()) { 1985 // We want to align both objects whose address is used directly and 1986 // objects whose address is used in casts and GEPs, though it only makes 1987 // sense for GEPs if the offset is a multiple of the desired alignment and 1988 // if size - offset meets the size threshold. 1989 if (!Arg->getType()->isPointerTy()) 1990 continue; 1991 APInt Offset(DL->getIndexSizeInBits( 1992 cast<PointerType>(Arg->getType())->getAddressSpace()), 1993 0); 1994 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1995 uint64_t Offset2 = Offset.getLimitedValue(); 1996 if ((Offset2 & (PrefAlign-1)) != 0) 1997 continue; 1998 AllocaInst *AI; 1999 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2000 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2001 AI->setAlignment(Align(PrefAlign)); 2002 // Global variables can only be aligned if they are defined in this 2003 // object (i.e. they are uniquely initialized in this object), and 2004 // over-aligning global variables that have an explicit section is 2005 // forbidden. 2006 GlobalVariable *GV; 2007 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2008 GV->getPointerAlignment(*DL) < PrefAlign && 2009 DL->getTypeAllocSize(GV->getValueType()) >= 2010 MinSize + Offset2) 2011 GV->setAlignment(MaybeAlign(PrefAlign)); 2012 } 2013 // If this is a memcpy (or similar) then we may be able to improve the 2014 // alignment 2015 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2016 Align DestAlign = getKnownAlignment(MI->getDest(), *DL); 2017 MaybeAlign MIDestAlign = MI->getDestAlign(); 2018 if (!MIDestAlign || DestAlign > *MIDestAlign) 2019 MI->setDestAlignment(DestAlign); 2020 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 2021 MaybeAlign MTISrcAlign = MTI->getSourceAlign(); 2022 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 2023 if (!MTISrcAlign || SrcAlign > *MTISrcAlign) 2024 MTI->setSourceAlignment(SrcAlign); 2025 } 2026 } 2027 } 2028 2029 // If we have a cold call site, try to sink addressing computation into the 2030 // cold block. This interacts with our handling for loads and stores to 2031 // ensure that we can fold all uses of a potential addressing computation 2032 // into their uses. TODO: generalize this to work over profiling data 2033 if (CI->hasFnAttr(Attribute::Cold) && 2034 !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 2035 for (auto &Arg : CI->arg_operands()) { 2036 if (!Arg->getType()->isPointerTy()) 2037 continue; 2038 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2039 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2040 } 2041 2042 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2043 if (II) { 2044 switch (II->getIntrinsicID()) { 2045 default: break; 2046 case Intrinsic::assume: { 2047 II->eraseFromParent(); 2048 return true; 2049 } 2050 2051 case Intrinsic::experimental_widenable_condition: { 2052 // Give up on future widening oppurtunties so that we can fold away dead 2053 // paths and merge blocks before going into block-local instruction 2054 // selection. 2055 if (II->use_empty()) { 2056 II->eraseFromParent(); 2057 return true; 2058 } 2059 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 2060 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 2061 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2062 }); 2063 return true; 2064 } 2065 case Intrinsic::objectsize: 2066 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2067 case Intrinsic::is_constant: 2068 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2069 case Intrinsic::aarch64_stlxr: 2070 case Intrinsic::aarch64_stxr: { 2071 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2072 if (!ExtVal || !ExtVal->hasOneUse() || 2073 ExtVal->getParent() == CI->getParent()) 2074 return false; 2075 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2076 ExtVal->moveBefore(CI); 2077 // Mark this instruction as "inserted by CGP", so that other 2078 // optimizations don't touch it. 2079 InsertedInsts.insert(ExtVal); 2080 return true; 2081 } 2082 2083 case Intrinsic::launder_invariant_group: 2084 case Intrinsic::strip_invariant_group: { 2085 Value *ArgVal = II->getArgOperand(0); 2086 auto it = LargeOffsetGEPMap.find(II); 2087 if (it != LargeOffsetGEPMap.end()) { 2088 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 2089 // Make sure not to have to deal with iterator invalidation 2090 // after possibly adding ArgVal to LargeOffsetGEPMap. 2091 auto GEPs = std::move(it->second); 2092 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 2093 LargeOffsetGEPMap.erase(II); 2094 } 2095 2096 II->replaceAllUsesWith(ArgVal); 2097 II->eraseFromParent(); 2098 return true; 2099 } 2100 case Intrinsic::cttz: 2101 case Intrinsic::ctlz: 2102 // If counting zeros is expensive, try to avoid it. 2103 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2104 case Intrinsic::fshl: 2105 case Intrinsic::fshr: 2106 return optimizeFunnelShift(II); 2107 case Intrinsic::dbg_value: 2108 return fixupDbgValue(II); 2109 case Intrinsic::vscale: { 2110 // If datalayout has no special restrictions on vector data layout, 2111 // replace `llvm.vscale` by an equivalent constant expression 2112 // to benefit from cheap constant propagation. 2113 Type *ScalableVectorTy = 2114 VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); 2115 if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { 2116 auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); 2117 auto *One = ConstantInt::getSigned(II->getType(), 1); 2118 auto *CGep = 2119 ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One); 2120 II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType())); 2121 II->eraseFromParent(); 2122 return true; 2123 } 2124 break; 2125 } 2126 case Intrinsic::masked_gather: 2127 return optimizeGatherScatterInst(II, II->getArgOperand(0)); 2128 case Intrinsic::masked_scatter: 2129 return optimizeGatherScatterInst(II, II->getArgOperand(1)); 2130 } 2131 2132 SmallVector<Value *, 2> PtrOps; 2133 Type *AccessTy; 2134 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2135 while (!PtrOps.empty()) { 2136 Value *PtrVal = PtrOps.pop_back_val(); 2137 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2138 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2139 return true; 2140 } 2141 } 2142 2143 // From here on out we're working with named functions. 2144 if (!CI->getCalledFunction()) return false; 2145 2146 // Lower all default uses of _chk calls. This is very similar 2147 // to what InstCombineCalls does, but here we are only lowering calls 2148 // to fortified library functions (e.g. __memcpy_chk) that have the default 2149 // "don't know" as the objectsize. Anything else should be left alone. 2150 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2151 IRBuilder<> Builder(CI); 2152 if (Value *V = Simplifier.optimizeCall(CI, Builder)) { 2153 CI->replaceAllUsesWith(V); 2154 CI->eraseFromParent(); 2155 return true; 2156 } 2157 2158 return false; 2159 } 2160 2161 /// Look for opportunities to duplicate return instructions to the predecessor 2162 /// to enable tail call optimizations. The case it is currently looking for is: 2163 /// @code 2164 /// bb0: 2165 /// %tmp0 = tail call i32 @f0() 2166 /// br label %return 2167 /// bb1: 2168 /// %tmp1 = tail call i32 @f1() 2169 /// br label %return 2170 /// bb2: 2171 /// %tmp2 = tail call i32 @f2() 2172 /// br label %return 2173 /// return: 2174 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2175 /// ret i32 %retval 2176 /// @endcode 2177 /// 2178 /// => 2179 /// 2180 /// @code 2181 /// bb0: 2182 /// %tmp0 = tail call i32 @f0() 2183 /// ret i32 %tmp0 2184 /// bb1: 2185 /// %tmp1 = tail call i32 @f1() 2186 /// ret i32 %tmp1 2187 /// bb2: 2188 /// %tmp2 = tail call i32 @f2() 2189 /// ret i32 %tmp2 2190 /// @endcode 2191 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) { 2192 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2193 if (!RetI) 2194 return false; 2195 2196 PHINode *PN = nullptr; 2197 ExtractValueInst *EVI = nullptr; 2198 BitCastInst *BCI = nullptr; 2199 Value *V = RetI->getReturnValue(); 2200 if (V) { 2201 BCI = dyn_cast<BitCastInst>(V); 2202 if (BCI) 2203 V = BCI->getOperand(0); 2204 2205 EVI = dyn_cast<ExtractValueInst>(V); 2206 if (EVI) { 2207 V = EVI->getOperand(0); 2208 if (!std::all_of(EVI->idx_begin(), EVI->idx_end(), 2209 [](unsigned idx) { return idx == 0; })) 2210 return false; 2211 } 2212 2213 PN = dyn_cast<PHINode>(V); 2214 if (!PN) 2215 return false; 2216 } 2217 2218 if (PN && PN->getParent() != BB) 2219 return false; 2220 2221 // Make sure there are no instructions between the PHI and return, or that the 2222 // return is the first instruction in the block. 2223 if (PN) { 2224 BasicBlock::iterator BI = BB->begin(); 2225 // Skip over debug and the bitcast. 2226 do { 2227 ++BI; 2228 } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI || &*BI == EVI); 2229 if (&*BI != RetI) 2230 return false; 2231 } else { 2232 BasicBlock::iterator BI = BB->begin(); 2233 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 2234 if (&*BI != RetI) 2235 return false; 2236 } 2237 2238 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2239 /// call. 2240 const Function *F = BB->getParent(); 2241 SmallVector<BasicBlock*, 4> TailCallBBs; 2242 if (PN) { 2243 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2244 // Look through bitcasts. 2245 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); 2246 CallInst *CI = dyn_cast<CallInst>(IncomingVal); 2247 BasicBlock *PredBB = PN->getIncomingBlock(I); 2248 // Make sure the phi value is indeed produced by the tail call. 2249 if (CI && CI->hasOneUse() && CI->getParent() == PredBB && 2250 TLI->mayBeEmittedAsTailCall(CI) && 2251 attributesPermitTailCall(F, CI, RetI, *TLI)) 2252 TailCallBBs.push_back(PredBB); 2253 } 2254 } else { 2255 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2256 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 2257 if (!VisitedBBs.insert(*PI).second) 2258 continue; 2259 2260 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 2261 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 2262 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 2263 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 2264 if (RI == RE) 2265 continue; 2266 2267 CallInst *CI = dyn_cast<CallInst>(&*RI); 2268 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2269 attributesPermitTailCall(F, CI, RetI, *TLI)) 2270 TailCallBBs.push_back(*PI); 2271 } 2272 } 2273 2274 bool Changed = false; 2275 for (auto const &TailCallBB : TailCallBBs) { 2276 // Make sure the call instruction is followed by an unconditional branch to 2277 // the return block. 2278 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); 2279 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2280 continue; 2281 2282 // Duplicate the return into TailCallBB. 2283 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); 2284 assert(!VerifyBFIUpdates || 2285 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)); 2286 BFI->setBlockFreq( 2287 BB, 2288 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); 2289 ModifiedDT = Changed = true; 2290 ++NumRetsDup; 2291 } 2292 2293 // If we eliminated all predecessors of the block, delete the block now. 2294 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2295 BB->eraseFromParent(); 2296 2297 return Changed; 2298 } 2299 2300 //===----------------------------------------------------------------------===// 2301 // Memory Optimization 2302 //===----------------------------------------------------------------------===// 2303 2304 namespace { 2305 2306 /// This is an extended version of TargetLowering::AddrMode 2307 /// which holds actual Value*'s for register values. 2308 struct ExtAddrMode : public TargetLowering::AddrMode { 2309 Value *BaseReg = nullptr; 2310 Value *ScaledReg = nullptr; 2311 Value *OriginalValue = nullptr; 2312 bool InBounds = true; 2313 2314 enum FieldName { 2315 NoField = 0x00, 2316 BaseRegField = 0x01, 2317 BaseGVField = 0x02, 2318 BaseOffsField = 0x04, 2319 ScaledRegField = 0x08, 2320 ScaleField = 0x10, 2321 MultipleFields = 0xff 2322 }; 2323 2324 2325 ExtAddrMode() = default; 2326 2327 void print(raw_ostream &OS) const; 2328 void dump() const; 2329 2330 FieldName compare(const ExtAddrMode &other) { 2331 // First check that the types are the same on each field, as differing types 2332 // is something we can't cope with later on. 2333 if (BaseReg && other.BaseReg && 2334 BaseReg->getType() != other.BaseReg->getType()) 2335 return MultipleFields; 2336 if (BaseGV && other.BaseGV && 2337 BaseGV->getType() != other.BaseGV->getType()) 2338 return MultipleFields; 2339 if (ScaledReg && other.ScaledReg && 2340 ScaledReg->getType() != other.ScaledReg->getType()) 2341 return MultipleFields; 2342 2343 // Conservatively reject 'inbounds' mismatches. 2344 if (InBounds != other.InBounds) 2345 return MultipleFields; 2346 2347 // Check each field to see if it differs. 2348 unsigned Result = NoField; 2349 if (BaseReg != other.BaseReg) 2350 Result |= BaseRegField; 2351 if (BaseGV != other.BaseGV) 2352 Result |= BaseGVField; 2353 if (BaseOffs != other.BaseOffs) 2354 Result |= BaseOffsField; 2355 if (ScaledReg != other.ScaledReg) 2356 Result |= ScaledRegField; 2357 // Don't count 0 as being a different scale, because that actually means 2358 // unscaled (which will already be counted by having no ScaledReg). 2359 if (Scale && other.Scale && Scale != other.Scale) 2360 Result |= ScaleField; 2361 2362 if (countPopulation(Result) > 1) 2363 return MultipleFields; 2364 else 2365 return static_cast<FieldName>(Result); 2366 } 2367 2368 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2369 // with no offset. 2370 bool isTrivial() { 2371 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2372 // trivial if at most one of these terms is nonzero, except that BaseGV and 2373 // BaseReg both being zero actually means a null pointer value, which we 2374 // consider to be 'non-zero' here. 2375 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2376 } 2377 2378 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2379 switch (Field) { 2380 default: 2381 return nullptr; 2382 case BaseRegField: 2383 return BaseReg; 2384 case BaseGVField: 2385 return BaseGV; 2386 case ScaledRegField: 2387 return ScaledReg; 2388 case BaseOffsField: 2389 return ConstantInt::get(IntPtrTy, BaseOffs); 2390 } 2391 } 2392 2393 void SetCombinedField(FieldName Field, Value *V, 2394 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2395 switch (Field) { 2396 default: 2397 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2398 break; 2399 case ExtAddrMode::BaseRegField: 2400 BaseReg = V; 2401 break; 2402 case ExtAddrMode::BaseGVField: 2403 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2404 // in the BaseReg field. 2405 assert(BaseReg == nullptr); 2406 BaseReg = V; 2407 BaseGV = nullptr; 2408 break; 2409 case ExtAddrMode::ScaledRegField: 2410 ScaledReg = V; 2411 // If we have a mix of scaled and unscaled addrmodes then we want scale 2412 // to be the scale and not zero. 2413 if (!Scale) 2414 for (const ExtAddrMode &AM : AddrModes) 2415 if (AM.Scale) { 2416 Scale = AM.Scale; 2417 break; 2418 } 2419 break; 2420 case ExtAddrMode::BaseOffsField: 2421 // The offset is no longer a constant, so it goes in ScaledReg with a 2422 // scale of 1. 2423 assert(ScaledReg == nullptr); 2424 ScaledReg = V; 2425 Scale = 1; 2426 BaseOffs = 0; 2427 break; 2428 } 2429 } 2430 }; 2431 2432 } // end anonymous namespace 2433 2434 #ifndef NDEBUG 2435 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2436 AM.print(OS); 2437 return OS; 2438 } 2439 #endif 2440 2441 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2442 void ExtAddrMode::print(raw_ostream &OS) const { 2443 bool NeedPlus = false; 2444 OS << "["; 2445 if (InBounds) 2446 OS << "inbounds "; 2447 if (BaseGV) { 2448 OS << (NeedPlus ? " + " : "") 2449 << "GV:"; 2450 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2451 NeedPlus = true; 2452 } 2453 2454 if (BaseOffs) { 2455 OS << (NeedPlus ? " + " : "") 2456 << BaseOffs; 2457 NeedPlus = true; 2458 } 2459 2460 if (BaseReg) { 2461 OS << (NeedPlus ? " + " : "") 2462 << "Base:"; 2463 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2464 NeedPlus = true; 2465 } 2466 if (Scale) { 2467 OS << (NeedPlus ? " + " : "") 2468 << Scale << "*"; 2469 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2470 } 2471 2472 OS << ']'; 2473 } 2474 2475 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2476 print(dbgs()); 2477 dbgs() << '\n'; 2478 } 2479 #endif 2480 2481 namespace { 2482 2483 /// This class provides transaction based operation on the IR. 2484 /// Every change made through this class is recorded in the internal state and 2485 /// can be undone (rollback) until commit is called. 2486 /// CGP does not check if instructions could be speculatively executed when 2487 /// moved. Preserving the original location would pessimize the debugging 2488 /// experience, as well as negatively impact the quality of sample PGO. 2489 class TypePromotionTransaction { 2490 /// This represents the common interface of the individual transaction. 2491 /// Each class implements the logic for doing one specific modification on 2492 /// the IR via the TypePromotionTransaction. 2493 class TypePromotionAction { 2494 protected: 2495 /// The Instruction modified. 2496 Instruction *Inst; 2497 2498 public: 2499 /// Constructor of the action. 2500 /// The constructor performs the related action on the IR. 2501 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2502 2503 virtual ~TypePromotionAction() = default; 2504 2505 /// Undo the modification done by this action. 2506 /// When this method is called, the IR must be in the same state as it was 2507 /// before this action was applied. 2508 /// \pre Undoing the action works if and only if the IR is in the exact same 2509 /// state as it was directly after this action was applied. 2510 virtual void undo() = 0; 2511 2512 /// Advocate every change made by this action. 2513 /// When the results on the IR of the action are to be kept, it is important 2514 /// to call this function, otherwise hidden information may be kept forever. 2515 virtual void commit() { 2516 // Nothing to be done, this action is not doing anything. 2517 } 2518 }; 2519 2520 /// Utility to remember the position of an instruction. 2521 class InsertionHandler { 2522 /// Position of an instruction. 2523 /// Either an instruction: 2524 /// - Is the first in a basic block: BB is used. 2525 /// - Has a previous instruction: PrevInst is used. 2526 union { 2527 Instruction *PrevInst; 2528 BasicBlock *BB; 2529 } Point; 2530 2531 /// Remember whether or not the instruction had a previous instruction. 2532 bool HasPrevInstruction; 2533 2534 public: 2535 /// Record the position of \p Inst. 2536 InsertionHandler(Instruction *Inst) { 2537 BasicBlock::iterator It = Inst->getIterator(); 2538 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2539 if (HasPrevInstruction) 2540 Point.PrevInst = &*--It; 2541 else 2542 Point.BB = Inst->getParent(); 2543 } 2544 2545 /// Insert \p Inst at the recorded position. 2546 void insert(Instruction *Inst) { 2547 if (HasPrevInstruction) { 2548 if (Inst->getParent()) 2549 Inst->removeFromParent(); 2550 Inst->insertAfter(Point.PrevInst); 2551 } else { 2552 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2553 if (Inst->getParent()) 2554 Inst->moveBefore(Position); 2555 else 2556 Inst->insertBefore(Position); 2557 } 2558 } 2559 }; 2560 2561 /// Move an instruction before another. 2562 class InstructionMoveBefore : public TypePromotionAction { 2563 /// Original position of the instruction. 2564 InsertionHandler Position; 2565 2566 public: 2567 /// Move \p Inst before \p Before. 2568 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2569 : TypePromotionAction(Inst), Position(Inst) { 2570 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2571 << "\n"); 2572 Inst->moveBefore(Before); 2573 } 2574 2575 /// Move the instruction back to its original position. 2576 void undo() override { 2577 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2578 Position.insert(Inst); 2579 } 2580 }; 2581 2582 /// Set the operand of an instruction with a new value. 2583 class OperandSetter : public TypePromotionAction { 2584 /// Original operand of the instruction. 2585 Value *Origin; 2586 2587 /// Index of the modified instruction. 2588 unsigned Idx; 2589 2590 public: 2591 /// Set \p Idx operand of \p Inst with \p NewVal. 2592 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2593 : TypePromotionAction(Inst), Idx(Idx) { 2594 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2595 << "for:" << *Inst << "\n" 2596 << "with:" << *NewVal << "\n"); 2597 Origin = Inst->getOperand(Idx); 2598 Inst->setOperand(Idx, NewVal); 2599 } 2600 2601 /// Restore the original value of the instruction. 2602 void undo() override { 2603 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2604 << "for: " << *Inst << "\n" 2605 << "with: " << *Origin << "\n"); 2606 Inst->setOperand(Idx, Origin); 2607 } 2608 }; 2609 2610 /// Hide the operands of an instruction. 2611 /// Do as if this instruction was not using any of its operands. 2612 class OperandsHider : public TypePromotionAction { 2613 /// The list of original operands. 2614 SmallVector<Value *, 4> OriginalValues; 2615 2616 public: 2617 /// Remove \p Inst from the uses of the operands of \p Inst. 2618 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2619 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2620 unsigned NumOpnds = Inst->getNumOperands(); 2621 OriginalValues.reserve(NumOpnds); 2622 for (unsigned It = 0; It < NumOpnds; ++It) { 2623 // Save the current operand. 2624 Value *Val = Inst->getOperand(It); 2625 OriginalValues.push_back(Val); 2626 // Set a dummy one. 2627 // We could use OperandSetter here, but that would imply an overhead 2628 // that we are not willing to pay. 2629 Inst->setOperand(It, UndefValue::get(Val->getType())); 2630 } 2631 } 2632 2633 /// Restore the original list of uses. 2634 void undo() override { 2635 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2636 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2637 Inst->setOperand(It, OriginalValues[It]); 2638 } 2639 }; 2640 2641 /// Build a truncate instruction. 2642 class TruncBuilder : public TypePromotionAction { 2643 Value *Val; 2644 2645 public: 2646 /// Build a truncate instruction of \p Opnd producing a \p Ty 2647 /// result. 2648 /// trunc Opnd to Ty. 2649 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2650 IRBuilder<> Builder(Opnd); 2651 Builder.SetCurrentDebugLocation(DebugLoc()); 2652 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2653 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2654 } 2655 2656 /// Get the built value. 2657 Value *getBuiltValue() { return Val; } 2658 2659 /// Remove the built instruction. 2660 void undo() override { 2661 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2662 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2663 IVal->eraseFromParent(); 2664 } 2665 }; 2666 2667 /// Build a sign extension instruction. 2668 class SExtBuilder : public TypePromotionAction { 2669 Value *Val; 2670 2671 public: 2672 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2673 /// result. 2674 /// sext Opnd to Ty. 2675 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2676 : TypePromotionAction(InsertPt) { 2677 IRBuilder<> Builder(InsertPt); 2678 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2679 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2680 } 2681 2682 /// Get the built value. 2683 Value *getBuiltValue() { return Val; } 2684 2685 /// Remove the built instruction. 2686 void undo() override { 2687 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2688 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2689 IVal->eraseFromParent(); 2690 } 2691 }; 2692 2693 /// Build a zero extension instruction. 2694 class ZExtBuilder : public TypePromotionAction { 2695 Value *Val; 2696 2697 public: 2698 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2699 /// result. 2700 /// zext Opnd to Ty. 2701 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2702 : TypePromotionAction(InsertPt) { 2703 IRBuilder<> Builder(InsertPt); 2704 Builder.SetCurrentDebugLocation(DebugLoc()); 2705 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2706 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2707 } 2708 2709 /// Get the built value. 2710 Value *getBuiltValue() { return Val; } 2711 2712 /// Remove the built instruction. 2713 void undo() override { 2714 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2715 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2716 IVal->eraseFromParent(); 2717 } 2718 }; 2719 2720 /// Mutate an instruction to another type. 2721 class TypeMutator : public TypePromotionAction { 2722 /// Record the original type. 2723 Type *OrigTy; 2724 2725 public: 2726 /// Mutate the type of \p Inst into \p NewTy. 2727 TypeMutator(Instruction *Inst, Type *NewTy) 2728 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2729 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2730 << "\n"); 2731 Inst->mutateType(NewTy); 2732 } 2733 2734 /// Mutate the instruction back to its original type. 2735 void undo() override { 2736 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2737 << "\n"); 2738 Inst->mutateType(OrigTy); 2739 } 2740 }; 2741 2742 /// Replace the uses of an instruction by another instruction. 2743 class UsesReplacer : public TypePromotionAction { 2744 /// Helper structure to keep track of the replaced uses. 2745 struct InstructionAndIdx { 2746 /// The instruction using the instruction. 2747 Instruction *Inst; 2748 2749 /// The index where this instruction is used for Inst. 2750 unsigned Idx; 2751 2752 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2753 : Inst(Inst), Idx(Idx) {} 2754 }; 2755 2756 /// Keep track of the original uses (pair Instruction, Index). 2757 SmallVector<InstructionAndIdx, 4> OriginalUses; 2758 /// Keep track of the debug users. 2759 SmallVector<DbgValueInst *, 1> DbgValues; 2760 2761 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2762 2763 public: 2764 /// Replace all the use of \p Inst by \p New. 2765 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2766 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2767 << "\n"); 2768 // Record the original uses. 2769 for (Use &U : Inst->uses()) { 2770 Instruction *UserI = cast<Instruction>(U.getUser()); 2771 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2772 } 2773 // Record the debug uses separately. They are not in the instruction's 2774 // use list, but they are replaced by RAUW. 2775 findDbgValues(DbgValues, Inst); 2776 2777 // Now, we can replace the uses. 2778 Inst->replaceAllUsesWith(New); 2779 } 2780 2781 /// Reassign the original uses of Inst to Inst. 2782 void undo() override { 2783 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2784 for (use_iterator UseIt = OriginalUses.begin(), 2785 EndIt = OriginalUses.end(); 2786 UseIt != EndIt; ++UseIt) { 2787 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2788 } 2789 // RAUW has replaced all original uses with references to the new value, 2790 // including the debug uses. Since we are undoing the replacements, 2791 // the original debug uses must also be reinstated to maintain the 2792 // correctness and utility of debug value instructions. 2793 for (auto *DVI: DbgValues) { 2794 LLVMContext &Ctx = Inst->getType()->getContext(); 2795 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst)); 2796 DVI->setOperand(0, MV); 2797 } 2798 } 2799 }; 2800 2801 /// Remove an instruction from the IR. 2802 class InstructionRemover : public TypePromotionAction { 2803 /// Original position of the instruction. 2804 InsertionHandler Inserter; 2805 2806 /// Helper structure to hide all the link to the instruction. In other 2807 /// words, this helps to do as if the instruction was removed. 2808 OperandsHider Hider; 2809 2810 /// Keep track of the uses replaced, if any. 2811 UsesReplacer *Replacer = nullptr; 2812 2813 /// Keep track of instructions removed. 2814 SetOfInstrs &RemovedInsts; 2815 2816 public: 2817 /// Remove all reference of \p Inst and optionally replace all its 2818 /// uses with New. 2819 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2820 /// \pre If !Inst->use_empty(), then New != nullptr 2821 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2822 Value *New = nullptr) 2823 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2824 RemovedInsts(RemovedInsts) { 2825 if (New) 2826 Replacer = new UsesReplacer(Inst, New); 2827 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2828 RemovedInsts.insert(Inst); 2829 /// The instructions removed here will be freed after completing 2830 /// optimizeBlock() for all blocks as we need to keep track of the 2831 /// removed instructions during promotion. 2832 Inst->removeFromParent(); 2833 } 2834 2835 ~InstructionRemover() override { delete Replacer; } 2836 2837 /// Resurrect the instruction and reassign it to the proper uses if 2838 /// new value was provided when build this action. 2839 void undo() override { 2840 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2841 Inserter.insert(Inst); 2842 if (Replacer) 2843 Replacer->undo(); 2844 Hider.undo(); 2845 RemovedInsts.erase(Inst); 2846 } 2847 }; 2848 2849 public: 2850 /// Restoration point. 2851 /// The restoration point is a pointer to an action instead of an iterator 2852 /// because the iterator may be invalidated but not the pointer. 2853 using ConstRestorationPt = const TypePromotionAction *; 2854 2855 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2856 : RemovedInsts(RemovedInsts) {} 2857 2858 /// Advocate every changes made in that transaction. Return true if any change 2859 /// happen. 2860 bool commit(); 2861 2862 /// Undo all the changes made after the given point. 2863 void rollback(ConstRestorationPt Point); 2864 2865 /// Get the current restoration point. 2866 ConstRestorationPt getRestorationPoint() const; 2867 2868 /// \name API for IR modification with state keeping to support rollback. 2869 /// @{ 2870 /// Same as Instruction::setOperand. 2871 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2872 2873 /// Same as Instruction::eraseFromParent. 2874 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2875 2876 /// Same as Value::replaceAllUsesWith. 2877 void replaceAllUsesWith(Instruction *Inst, Value *New); 2878 2879 /// Same as Value::mutateType. 2880 void mutateType(Instruction *Inst, Type *NewTy); 2881 2882 /// Same as IRBuilder::createTrunc. 2883 Value *createTrunc(Instruction *Opnd, Type *Ty); 2884 2885 /// Same as IRBuilder::createSExt. 2886 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2887 2888 /// Same as IRBuilder::createZExt. 2889 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2890 2891 /// Same as Instruction::moveBefore. 2892 void moveBefore(Instruction *Inst, Instruction *Before); 2893 /// @} 2894 2895 private: 2896 /// The ordered list of actions made so far. 2897 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2898 2899 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2900 2901 SetOfInstrs &RemovedInsts; 2902 }; 2903 2904 } // end anonymous namespace 2905 2906 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2907 Value *NewVal) { 2908 Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( 2909 Inst, Idx, NewVal)); 2910 } 2911 2912 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2913 Value *NewVal) { 2914 Actions.push_back( 2915 std::make_unique<TypePromotionTransaction::InstructionRemover>( 2916 Inst, RemovedInsts, NewVal)); 2917 } 2918 2919 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2920 Value *New) { 2921 Actions.push_back( 2922 std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2923 } 2924 2925 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2926 Actions.push_back( 2927 std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2928 } 2929 2930 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2931 Type *Ty) { 2932 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2933 Value *Val = Ptr->getBuiltValue(); 2934 Actions.push_back(std::move(Ptr)); 2935 return Val; 2936 } 2937 2938 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2939 Value *Opnd, Type *Ty) { 2940 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2941 Value *Val = Ptr->getBuiltValue(); 2942 Actions.push_back(std::move(Ptr)); 2943 return Val; 2944 } 2945 2946 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2947 Value *Opnd, Type *Ty) { 2948 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2949 Value *Val = Ptr->getBuiltValue(); 2950 Actions.push_back(std::move(Ptr)); 2951 return Val; 2952 } 2953 2954 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2955 Instruction *Before) { 2956 Actions.push_back( 2957 std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 2958 Inst, Before)); 2959 } 2960 2961 TypePromotionTransaction::ConstRestorationPt 2962 TypePromotionTransaction::getRestorationPoint() const { 2963 return !Actions.empty() ? Actions.back().get() : nullptr; 2964 } 2965 2966 bool TypePromotionTransaction::commit() { 2967 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2968 ++It) 2969 (*It)->commit(); 2970 bool Modified = !Actions.empty(); 2971 Actions.clear(); 2972 return Modified; 2973 } 2974 2975 void TypePromotionTransaction::rollback( 2976 TypePromotionTransaction::ConstRestorationPt Point) { 2977 while (!Actions.empty() && Point != Actions.back().get()) { 2978 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2979 Curr->undo(); 2980 } 2981 } 2982 2983 namespace { 2984 2985 /// A helper class for matching addressing modes. 2986 /// 2987 /// This encapsulates the logic for matching the target-legal addressing modes. 2988 class AddressingModeMatcher { 2989 SmallVectorImpl<Instruction*> &AddrModeInsts; 2990 const TargetLowering &TLI; 2991 const TargetRegisterInfo &TRI; 2992 const DataLayout &DL; 2993 2994 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2995 /// the memory instruction that we're computing this address for. 2996 Type *AccessTy; 2997 unsigned AddrSpace; 2998 Instruction *MemoryInst; 2999 3000 /// This is the addressing mode that we're building up. This is 3001 /// part of the return value of this addressing mode matching stuff. 3002 ExtAddrMode &AddrMode; 3003 3004 /// The instructions inserted by other CodeGenPrepare optimizations. 3005 const SetOfInstrs &InsertedInsts; 3006 3007 /// A map from the instructions to their type before promotion. 3008 InstrToOrigTy &PromotedInsts; 3009 3010 /// The ongoing transaction where every action should be registered. 3011 TypePromotionTransaction &TPT; 3012 3013 // A GEP which has too large offset to be folded into the addressing mode. 3014 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 3015 3016 /// This is set to true when we should not do profitability checks. 3017 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3018 bool IgnoreProfitability; 3019 3020 /// True if we are optimizing for size. 3021 bool OptSize; 3022 3023 ProfileSummaryInfo *PSI; 3024 BlockFrequencyInfo *BFI; 3025 3026 AddressingModeMatcher( 3027 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 3028 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, 3029 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, 3030 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 3031 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3032 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) 3033 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3034 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3035 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3036 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP), 3037 OptSize(OptSize), PSI(PSI), BFI(BFI) { 3038 IgnoreProfitability = false; 3039 } 3040 3041 public: 3042 /// Find the maximal addressing mode that a load/store of V can fold, 3043 /// give an access type of AccessTy. This returns a list of involved 3044 /// instructions in AddrModeInsts. 3045 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3046 /// optimizations. 3047 /// \p PromotedInsts maps the instructions to their type before promotion. 3048 /// \p The ongoing transaction where every action should be registered. 3049 static ExtAddrMode 3050 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 3051 SmallVectorImpl<Instruction *> &AddrModeInsts, 3052 const TargetLowering &TLI, const TargetRegisterInfo &TRI, 3053 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 3054 TypePromotionTransaction &TPT, 3055 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3056 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 3057 ExtAddrMode Result; 3058 3059 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, 3060 MemoryInst, Result, InsertedInsts, 3061 PromotedInsts, TPT, LargeOffsetGEP, 3062 OptSize, PSI, BFI) 3063 .matchAddr(V, 0); 3064 (void)Success; assert(Success && "Couldn't select *anything*?"); 3065 return Result; 3066 } 3067 3068 private: 3069 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3070 bool matchAddr(Value *Addr, unsigned Depth); 3071 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 3072 bool *MovedAway = nullptr); 3073 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3074 ExtAddrMode &AMBefore, 3075 ExtAddrMode &AMAfter); 3076 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3077 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3078 Value *PromotedOperand) const; 3079 }; 3080 3081 class PhiNodeSet; 3082 3083 /// An iterator for PhiNodeSet. 3084 class PhiNodeSetIterator { 3085 PhiNodeSet * const Set; 3086 size_t CurrentIndex = 0; 3087 3088 public: 3089 /// The constructor. Start should point to either a valid element, or be equal 3090 /// to the size of the underlying SmallVector of the PhiNodeSet. 3091 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 3092 PHINode * operator*() const; 3093 PhiNodeSetIterator& operator++(); 3094 bool operator==(const PhiNodeSetIterator &RHS) const; 3095 bool operator!=(const PhiNodeSetIterator &RHS) const; 3096 }; 3097 3098 /// Keeps a set of PHINodes. 3099 /// 3100 /// This is a minimal set implementation for a specific use case: 3101 /// It is very fast when there are very few elements, but also provides good 3102 /// performance when there are many. It is similar to SmallPtrSet, but also 3103 /// provides iteration by insertion order, which is deterministic and stable 3104 /// across runs. It is also similar to SmallSetVector, but provides removing 3105 /// elements in O(1) time. This is achieved by not actually removing the element 3106 /// from the underlying vector, so comes at the cost of using more memory, but 3107 /// that is fine, since PhiNodeSets are used as short lived objects. 3108 class PhiNodeSet { 3109 friend class PhiNodeSetIterator; 3110 3111 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 3112 using iterator = PhiNodeSetIterator; 3113 3114 /// Keeps the elements in the order of their insertion in the underlying 3115 /// vector. To achieve constant time removal, it never deletes any element. 3116 SmallVector<PHINode *, 32> NodeList; 3117 3118 /// Keeps the elements in the underlying set implementation. This (and not the 3119 /// NodeList defined above) is the source of truth on whether an element 3120 /// is actually in the collection. 3121 MapType NodeMap; 3122 3123 /// Points to the first valid (not deleted) element when the set is not empty 3124 /// and the value is not zero. Equals to the size of the underlying vector 3125 /// when the set is empty. When the value is 0, as in the beginning, the 3126 /// first element may or may not be valid. 3127 size_t FirstValidElement = 0; 3128 3129 public: 3130 /// Inserts a new element to the collection. 3131 /// \returns true if the element is actually added, i.e. was not in the 3132 /// collection before the operation. 3133 bool insert(PHINode *Ptr) { 3134 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 3135 NodeList.push_back(Ptr); 3136 return true; 3137 } 3138 return false; 3139 } 3140 3141 /// Removes the element from the collection. 3142 /// \returns whether the element is actually removed, i.e. was in the 3143 /// collection before the operation. 3144 bool erase(PHINode *Ptr) { 3145 auto it = NodeMap.find(Ptr); 3146 if (it != NodeMap.end()) { 3147 NodeMap.erase(Ptr); 3148 SkipRemovedElements(FirstValidElement); 3149 return true; 3150 } 3151 return false; 3152 } 3153 3154 /// Removes all elements and clears the collection. 3155 void clear() { 3156 NodeMap.clear(); 3157 NodeList.clear(); 3158 FirstValidElement = 0; 3159 } 3160 3161 /// \returns an iterator that will iterate the elements in the order of 3162 /// insertion. 3163 iterator begin() { 3164 if (FirstValidElement == 0) 3165 SkipRemovedElements(FirstValidElement); 3166 return PhiNodeSetIterator(this, FirstValidElement); 3167 } 3168 3169 /// \returns an iterator that points to the end of the collection. 3170 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 3171 3172 /// Returns the number of elements in the collection. 3173 size_t size() const { 3174 return NodeMap.size(); 3175 } 3176 3177 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 3178 size_t count(PHINode *Ptr) const { 3179 return NodeMap.count(Ptr); 3180 } 3181 3182 private: 3183 /// Updates the CurrentIndex so that it will point to a valid element. 3184 /// 3185 /// If the element of NodeList at CurrentIndex is valid, it does not 3186 /// change it. If there are no more valid elements, it updates CurrentIndex 3187 /// to point to the end of the NodeList. 3188 void SkipRemovedElements(size_t &CurrentIndex) { 3189 while (CurrentIndex < NodeList.size()) { 3190 auto it = NodeMap.find(NodeList[CurrentIndex]); 3191 // If the element has been deleted and added again later, NodeMap will 3192 // point to a different index, so CurrentIndex will still be invalid. 3193 if (it != NodeMap.end() && it->second == CurrentIndex) 3194 break; 3195 ++CurrentIndex; 3196 } 3197 } 3198 }; 3199 3200 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 3201 : Set(Set), CurrentIndex(Start) {} 3202 3203 PHINode * PhiNodeSetIterator::operator*() const { 3204 assert(CurrentIndex < Set->NodeList.size() && 3205 "PhiNodeSet access out of range"); 3206 return Set->NodeList[CurrentIndex]; 3207 } 3208 3209 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 3210 assert(CurrentIndex < Set->NodeList.size() && 3211 "PhiNodeSet access out of range"); 3212 ++CurrentIndex; 3213 Set->SkipRemovedElements(CurrentIndex); 3214 return *this; 3215 } 3216 3217 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 3218 return CurrentIndex == RHS.CurrentIndex; 3219 } 3220 3221 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 3222 return !((*this) == RHS); 3223 } 3224 3225 /// Keep track of simplification of Phi nodes. 3226 /// Accept the set of all phi nodes and erase phi node from this set 3227 /// if it is simplified. 3228 class SimplificationTracker { 3229 DenseMap<Value *, Value *> Storage; 3230 const SimplifyQuery &SQ; 3231 // Tracks newly created Phi nodes. The elements are iterated by insertion 3232 // order. 3233 PhiNodeSet AllPhiNodes; 3234 // Tracks newly created Select nodes. 3235 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 3236 3237 public: 3238 SimplificationTracker(const SimplifyQuery &sq) 3239 : SQ(sq) {} 3240 3241 Value *Get(Value *V) { 3242 do { 3243 auto SV = Storage.find(V); 3244 if (SV == Storage.end()) 3245 return V; 3246 V = SV->second; 3247 } while (true); 3248 } 3249 3250 Value *Simplify(Value *Val) { 3251 SmallVector<Value *, 32> WorkList; 3252 SmallPtrSet<Value *, 32> Visited; 3253 WorkList.push_back(Val); 3254 while (!WorkList.empty()) { 3255 auto *P = WorkList.pop_back_val(); 3256 if (!Visited.insert(P).second) 3257 continue; 3258 if (auto *PI = dyn_cast<Instruction>(P)) 3259 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 3260 for (auto *U : PI->users()) 3261 WorkList.push_back(cast<Value>(U)); 3262 Put(PI, V); 3263 PI->replaceAllUsesWith(V); 3264 if (auto *PHI = dyn_cast<PHINode>(PI)) 3265 AllPhiNodes.erase(PHI); 3266 if (auto *Select = dyn_cast<SelectInst>(PI)) 3267 AllSelectNodes.erase(Select); 3268 PI->eraseFromParent(); 3269 } 3270 } 3271 return Get(Val); 3272 } 3273 3274 void Put(Value *From, Value *To) { 3275 Storage.insert({ From, To }); 3276 } 3277 3278 void ReplacePhi(PHINode *From, PHINode *To) { 3279 Value* OldReplacement = Get(From); 3280 while (OldReplacement != From) { 3281 From = To; 3282 To = dyn_cast<PHINode>(OldReplacement); 3283 OldReplacement = Get(From); 3284 } 3285 assert(To && Get(To) == To && "Replacement PHI node is already replaced."); 3286 Put(From, To); 3287 From->replaceAllUsesWith(To); 3288 AllPhiNodes.erase(From); 3289 From->eraseFromParent(); 3290 } 3291 3292 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 3293 3294 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 3295 3296 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 3297 3298 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 3299 3300 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 3301 3302 void destroyNewNodes(Type *CommonType) { 3303 // For safe erasing, replace the uses with dummy value first. 3304 auto *Dummy = UndefValue::get(CommonType); 3305 for (auto *I : AllPhiNodes) { 3306 I->replaceAllUsesWith(Dummy); 3307 I->eraseFromParent(); 3308 } 3309 AllPhiNodes.clear(); 3310 for (auto *I : AllSelectNodes) { 3311 I->replaceAllUsesWith(Dummy); 3312 I->eraseFromParent(); 3313 } 3314 AllSelectNodes.clear(); 3315 } 3316 }; 3317 3318 /// A helper class for combining addressing modes. 3319 class AddressingModeCombiner { 3320 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3321 typedef std::pair<PHINode *, PHINode *> PHIPair; 3322 3323 private: 3324 /// The addressing modes we've collected. 3325 SmallVector<ExtAddrMode, 16> AddrModes; 3326 3327 /// The field in which the AddrModes differ, when we have more than one. 3328 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3329 3330 /// Are the AddrModes that we have all just equal to their original values? 3331 bool AllAddrModesTrivial = true; 3332 3333 /// Common Type for all different fields in addressing modes. 3334 Type *CommonType; 3335 3336 /// SimplifyQuery for simplifyInstruction utility. 3337 const SimplifyQuery &SQ; 3338 3339 /// Original Address. 3340 Value *Original; 3341 3342 public: 3343 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3344 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3345 3346 /// Get the combined AddrMode 3347 const ExtAddrMode &getAddrMode() const { 3348 return AddrModes[0]; 3349 } 3350 3351 /// Add a new AddrMode if it's compatible with the AddrModes we already 3352 /// have. 3353 /// \return True iff we succeeded in doing so. 3354 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3355 // Take note of if we have any non-trivial AddrModes, as we need to detect 3356 // when all AddrModes are trivial as then we would introduce a phi or select 3357 // which just duplicates what's already there. 3358 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3359 3360 // If this is the first addrmode then everything is fine. 3361 if (AddrModes.empty()) { 3362 AddrModes.emplace_back(NewAddrMode); 3363 return true; 3364 } 3365 3366 // Figure out how different this is from the other address modes, which we 3367 // can do just by comparing against the first one given that we only care 3368 // about the cumulative difference. 3369 ExtAddrMode::FieldName ThisDifferentField = 3370 AddrModes[0].compare(NewAddrMode); 3371 if (DifferentField == ExtAddrMode::NoField) 3372 DifferentField = ThisDifferentField; 3373 else if (DifferentField != ThisDifferentField) 3374 DifferentField = ExtAddrMode::MultipleFields; 3375 3376 // If NewAddrMode differs in more than one dimension we cannot handle it. 3377 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3378 3379 // If Scale Field is different then we reject. 3380 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3381 3382 // We also must reject the case when base offset is different and 3383 // scale reg is not null, we cannot handle this case due to merge of 3384 // different offsets will be used as ScaleReg. 3385 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3386 !NewAddrMode.ScaledReg); 3387 3388 // We also must reject the case when GV is different and BaseReg installed 3389 // due to we want to use base reg as a merge of GV values. 3390 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3391 !NewAddrMode.HasBaseReg); 3392 3393 // Even if NewAddMode is the same we still need to collect it due to 3394 // original value is different. And later we will need all original values 3395 // as anchors during finding the common Phi node. 3396 if (CanHandle) 3397 AddrModes.emplace_back(NewAddrMode); 3398 else 3399 AddrModes.clear(); 3400 3401 return CanHandle; 3402 } 3403 3404 /// Combine the addressing modes we've collected into a single 3405 /// addressing mode. 3406 /// \return True iff we successfully combined them or we only had one so 3407 /// didn't need to combine them anyway. 3408 bool combineAddrModes() { 3409 // If we have no AddrModes then they can't be combined. 3410 if (AddrModes.size() == 0) 3411 return false; 3412 3413 // A single AddrMode can trivially be combined. 3414 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3415 return true; 3416 3417 // If the AddrModes we collected are all just equal to the value they are 3418 // derived from then combining them wouldn't do anything useful. 3419 if (AllAddrModesTrivial) 3420 return false; 3421 3422 if (!addrModeCombiningAllowed()) 3423 return false; 3424 3425 // Build a map between <original value, basic block where we saw it> to 3426 // value of base register. 3427 // Bail out if there is no common type. 3428 FoldAddrToValueMapping Map; 3429 if (!initializeMap(Map)) 3430 return false; 3431 3432 Value *CommonValue = findCommon(Map); 3433 if (CommonValue) 3434 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3435 return CommonValue != nullptr; 3436 } 3437 3438 private: 3439 /// Initialize Map with anchor values. For address seen 3440 /// we set the value of different field saw in this address. 3441 /// At the same time we find a common type for different field we will 3442 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3443 /// Return false if there is no common type found. 3444 bool initializeMap(FoldAddrToValueMapping &Map) { 3445 // Keep track of keys where the value is null. We will need to replace it 3446 // with constant null when we know the common type. 3447 SmallVector<Value *, 2> NullValue; 3448 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3449 for (auto &AM : AddrModes) { 3450 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3451 if (DV) { 3452 auto *Type = DV->getType(); 3453 if (CommonType && CommonType != Type) 3454 return false; 3455 CommonType = Type; 3456 Map[AM.OriginalValue] = DV; 3457 } else { 3458 NullValue.push_back(AM.OriginalValue); 3459 } 3460 } 3461 assert(CommonType && "At least one non-null value must be!"); 3462 for (auto *V : NullValue) 3463 Map[V] = Constant::getNullValue(CommonType); 3464 return true; 3465 } 3466 3467 /// We have mapping between value A and other value B where B was a field in 3468 /// addressing mode represented by A. Also we have an original value C 3469 /// representing an address we start with. Traversing from C through phi and 3470 /// selects we ended up with A's in a map. This utility function tries to find 3471 /// a value V which is a field in addressing mode C and traversing through phi 3472 /// nodes and selects we will end up in corresponded values B in a map. 3473 /// The utility will create a new Phi/Selects if needed. 3474 // The simple example looks as follows: 3475 // BB1: 3476 // p1 = b1 + 40 3477 // br cond BB2, BB3 3478 // BB2: 3479 // p2 = b2 + 40 3480 // br BB3 3481 // BB3: 3482 // p = phi [p1, BB1], [p2, BB2] 3483 // v = load p 3484 // Map is 3485 // p1 -> b1 3486 // p2 -> b2 3487 // Request is 3488 // p -> ? 3489 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3490 Value *findCommon(FoldAddrToValueMapping &Map) { 3491 // Tracks the simplification of newly created phi nodes. The reason we use 3492 // this mapping is because we will add new created Phi nodes in AddrToBase. 3493 // Simplification of Phi nodes is recursive, so some Phi node may 3494 // be simplified after we added it to AddrToBase. In reality this 3495 // simplification is possible only if original phi/selects were not 3496 // simplified yet. 3497 // Using this mapping we can find the current value in AddrToBase. 3498 SimplificationTracker ST(SQ); 3499 3500 // First step, DFS to create PHI nodes for all intermediate blocks. 3501 // Also fill traverse order for the second step. 3502 SmallVector<Value *, 32> TraverseOrder; 3503 InsertPlaceholders(Map, TraverseOrder, ST); 3504 3505 // Second Step, fill new nodes by merged values and simplify if possible. 3506 FillPlaceholders(Map, TraverseOrder, ST); 3507 3508 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3509 ST.destroyNewNodes(CommonType); 3510 return nullptr; 3511 } 3512 3513 // Now we'd like to match New Phi nodes to existed ones. 3514 unsigned PhiNotMatchedCount = 0; 3515 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3516 ST.destroyNewNodes(CommonType); 3517 return nullptr; 3518 } 3519 3520 auto *Result = ST.Get(Map.find(Original)->second); 3521 if (Result) { 3522 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3523 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3524 } 3525 return Result; 3526 } 3527 3528 /// Try to match PHI node to Candidate. 3529 /// Matcher tracks the matched Phi nodes. 3530 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3531 SmallSetVector<PHIPair, 8> &Matcher, 3532 PhiNodeSet &PhiNodesToMatch) { 3533 SmallVector<PHIPair, 8> WorkList; 3534 Matcher.insert({ PHI, Candidate }); 3535 SmallSet<PHINode *, 8> MatchedPHIs; 3536 MatchedPHIs.insert(PHI); 3537 WorkList.push_back({ PHI, Candidate }); 3538 SmallSet<PHIPair, 8> Visited; 3539 while (!WorkList.empty()) { 3540 auto Item = WorkList.pop_back_val(); 3541 if (!Visited.insert(Item).second) 3542 continue; 3543 // We iterate over all incoming values to Phi to compare them. 3544 // If values are different and both of them Phi and the first one is a 3545 // Phi we added (subject to match) and both of them is in the same basic 3546 // block then we can match our pair if values match. So we state that 3547 // these values match and add it to work list to verify that. 3548 for (auto B : Item.first->blocks()) { 3549 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3550 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3551 if (FirstValue == SecondValue) 3552 continue; 3553 3554 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3555 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3556 3557 // One of them is not Phi or 3558 // The first one is not Phi node from the set we'd like to match or 3559 // Phi nodes from different basic blocks then 3560 // we will not be able to match. 3561 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3562 FirstPhi->getParent() != SecondPhi->getParent()) 3563 return false; 3564 3565 // If we already matched them then continue. 3566 if (Matcher.count({ FirstPhi, SecondPhi })) 3567 continue; 3568 // So the values are different and does not match. So we need them to 3569 // match. (But we register no more than one match per PHI node, so that 3570 // we won't later try to replace them twice.) 3571 if (MatchedPHIs.insert(FirstPhi).second) 3572 Matcher.insert({ FirstPhi, SecondPhi }); 3573 // But me must check it. 3574 WorkList.push_back({ FirstPhi, SecondPhi }); 3575 } 3576 } 3577 return true; 3578 } 3579 3580 /// For the given set of PHI nodes (in the SimplificationTracker) try 3581 /// to find their equivalents. 3582 /// Returns false if this matching fails and creation of new Phi is disabled. 3583 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3584 unsigned &PhiNotMatchedCount) { 3585 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3586 // order, so the replacements (ReplacePhi) are also done in a deterministic 3587 // order. 3588 SmallSetVector<PHIPair, 8> Matched; 3589 SmallPtrSet<PHINode *, 8> WillNotMatch; 3590 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3591 while (PhiNodesToMatch.size()) { 3592 PHINode *PHI = *PhiNodesToMatch.begin(); 3593 3594 // Add us, if no Phi nodes in the basic block we do not match. 3595 WillNotMatch.clear(); 3596 WillNotMatch.insert(PHI); 3597 3598 // Traverse all Phis until we found equivalent or fail to do that. 3599 bool IsMatched = false; 3600 for (auto &P : PHI->getParent()->phis()) { 3601 if (&P == PHI) 3602 continue; 3603 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3604 break; 3605 // If it does not match, collect all Phi nodes from matcher. 3606 // if we end up with no match, them all these Phi nodes will not match 3607 // later. 3608 for (auto M : Matched) 3609 WillNotMatch.insert(M.first); 3610 Matched.clear(); 3611 } 3612 if (IsMatched) { 3613 // Replace all matched values and erase them. 3614 for (auto MV : Matched) 3615 ST.ReplacePhi(MV.first, MV.second); 3616 Matched.clear(); 3617 continue; 3618 } 3619 // If we are not allowed to create new nodes then bail out. 3620 if (!AllowNewPhiNodes) 3621 return false; 3622 // Just remove all seen values in matcher. They will not match anything. 3623 PhiNotMatchedCount += WillNotMatch.size(); 3624 for (auto *P : WillNotMatch) 3625 PhiNodesToMatch.erase(P); 3626 } 3627 return true; 3628 } 3629 /// Fill the placeholders with values from predecessors and simplify them. 3630 void FillPlaceholders(FoldAddrToValueMapping &Map, 3631 SmallVectorImpl<Value *> &TraverseOrder, 3632 SimplificationTracker &ST) { 3633 while (!TraverseOrder.empty()) { 3634 Value *Current = TraverseOrder.pop_back_val(); 3635 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3636 Value *V = Map[Current]; 3637 3638 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3639 // CurrentValue also must be Select. 3640 auto *CurrentSelect = cast<SelectInst>(Current); 3641 auto *TrueValue = CurrentSelect->getTrueValue(); 3642 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3643 Select->setTrueValue(ST.Get(Map[TrueValue])); 3644 auto *FalseValue = CurrentSelect->getFalseValue(); 3645 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3646 Select->setFalseValue(ST.Get(Map[FalseValue])); 3647 } else { 3648 // Must be a Phi node then. 3649 auto *PHI = cast<PHINode>(V); 3650 // Fill the Phi node with values from predecessors. 3651 for (auto *B : predecessors(PHI->getParent())) { 3652 Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); 3653 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3654 PHI->addIncoming(ST.Get(Map[PV]), B); 3655 } 3656 } 3657 Map[Current] = ST.Simplify(V); 3658 } 3659 } 3660 3661 /// Starting from original value recursively iterates over def-use chain up to 3662 /// known ending values represented in a map. For each traversed phi/select 3663 /// inserts a placeholder Phi or Select. 3664 /// Reports all new created Phi/Select nodes by adding them to set. 3665 /// Also reports and order in what values have been traversed. 3666 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3667 SmallVectorImpl<Value *> &TraverseOrder, 3668 SimplificationTracker &ST) { 3669 SmallVector<Value *, 32> Worklist; 3670 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3671 "Address must be a Phi or Select node"); 3672 auto *Dummy = UndefValue::get(CommonType); 3673 Worklist.push_back(Original); 3674 while (!Worklist.empty()) { 3675 Value *Current = Worklist.pop_back_val(); 3676 // if it is already visited or it is an ending value then skip it. 3677 if (Map.find(Current) != Map.end()) 3678 continue; 3679 TraverseOrder.push_back(Current); 3680 3681 // CurrentValue must be a Phi node or select. All others must be covered 3682 // by anchors. 3683 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3684 // Is it OK to get metadata from OrigSelect?! 3685 // Create a Select placeholder with dummy value. 3686 SelectInst *Select = SelectInst::Create( 3687 CurrentSelect->getCondition(), Dummy, Dummy, 3688 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3689 Map[Current] = Select; 3690 ST.insertNewSelect(Select); 3691 // We are interested in True and False values. 3692 Worklist.push_back(CurrentSelect->getTrueValue()); 3693 Worklist.push_back(CurrentSelect->getFalseValue()); 3694 } else { 3695 // It must be a Phi node then. 3696 PHINode *CurrentPhi = cast<PHINode>(Current); 3697 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3698 PHINode *PHI = 3699 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3700 Map[Current] = PHI; 3701 ST.insertNewPhi(PHI); 3702 for (Value *P : CurrentPhi->incoming_values()) 3703 Worklist.push_back(P); 3704 } 3705 } 3706 } 3707 3708 bool addrModeCombiningAllowed() { 3709 if (DisableComplexAddrModes) 3710 return false; 3711 switch (DifferentField) { 3712 default: 3713 return false; 3714 case ExtAddrMode::BaseRegField: 3715 return AddrSinkCombineBaseReg; 3716 case ExtAddrMode::BaseGVField: 3717 return AddrSinkCombineBaseGV; 3718 case ExtAddrMode::BaseOffsField: 3719 return AddrSinkCombineBaseOffs; 3720 case ExtAddrMode::ScaledRegField: 3721 return AddrSinkCombineScaledReg; 3722 } 3723 } 3724 }; 3725 } // end anonymous namespace 3726 3727 /// Try adding ScaleReg*Scale to the current addressing mode. 3728 /// Return true and update AddrMode if this addr mode is legal for the target, 3729 /// false if not. 3730 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3731 unsigned Depth) { 3732 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3733 // mode. Just process that directly. 3734 if (Scale == 1) 3735 return matchAddr(ScaleReg, Depth); 3736 3737 // If the scale is 0, it takes nothing to add this. 3738 if (Scale == 0) 3739 return true; 3740 3741 // If we already have a scale of this value, we can add to it, otherwise, we 3742 // need an available scale field. 3743 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3744 return false; 3745 3746 ExtAddrMode TestAddrMode = AddrMode; 3747 3748 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3749 // [A+B + A*7] -> [B+A*8]. 3750 TestAddrMode.Scale += Scale; 3751 TestAddrMode.ScaledReg = ScaleReg; 3752 3753 // If the new address isn't legal, bail out. 3754 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3755 return false; 3756 3757 // It was legal, so commit it. 3758 AddrMode = TestAddrMode; 3759 3760 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3761 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3762 // X*Scale + C*Scale to addr mode. 3763 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3764 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3765 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && 3766 CI->getValue().isSignedIntN(64)) { 3767 TestAddrMode.InBounds = false; 3768 TestAddrMode.ScaledReg = AddLHS; 3769 TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; 3770 3771 // If this addressing mode is legal, commit it and remember that we folded 3772 // this instruction. 3773 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3774 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3775 AddrMode = TestAddrMode; 3776 return true; 3777 } 3778 } 3779 3780 // Otherwise, not (x+c)*scale, just return what we have. 3781 return true; 3782 } 3783 3784 /// This is a little filter, which returns true if an addressing computation 3785 /// involving I might be folded into a load/store accessing it. 3786 /// This doesn't need to be perfect, but needs to accept at least 3787 /// the set of instructions that MatchOperationAddr can. 3788 static bool MightBeFoldableInst(Instruction *I) { 3789 switch (I->getOpcode()) { 3790 case Instruction::BitCast: 3791 case Instruction::AddrSpaceCast: 3792 // Don't touch identity bitcasts. 3793 if (I->getType() == I->getOperand(0)->getType()) 3794 return false; 3795 return I->getType()->isIntOrPtrTy(); 3796 case Instruction::PtrToInt: 3797 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3798 return true; 3799 case Instruction::IntToPtr: 3800 // We know the input is intptr_t, so this is foldable. 3801 return true; 3802 case Instruction::Add: 3803 return true; 3804 case Instruction::Mul: 3805 case Instruction::Shl: 3806 // Can only handle X*C and X << C. 3807 return isa<ConstantInt>(I->getOperand(1)); 3808 case Instruction::GetElementPtr: 3809 return true; 3810 default: 3811 return false; 3812 } 3813 } 3814 3815 /// Check whether or not \p Val is a legal instruction for \p TLI. 3816 /// \note \p Val is assumed to be the product of some type promotion. 3817 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3818 /// to be legal, as the non-promoted value would have had the same state. 3819 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3820 const DataLayout &DL, Value *Val) { 3821 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3822 if (!PromotedInst) 3823 return false; 3824 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3825 // If the ISDOpcode is undefined, it was undefined before the promotion. 3826 if (!ISDOpcode) 3827 return true; 3828 // Otherwise, check if the promoted instruction is legal or not. 3829 return TLI.isOperationLegalOrCustom( 3830 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3831 } 3832 3833 namespace { 3834 3835 /// Hepler class to perform type promotion. 3836 class TypePromotionHelper { 3837 /// Utility function to add a promoted instruction \p ExtOpnd to 3838 /// \p PromotedInsts and record the type of extension we have seen. 3839 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3840 Instruction *ExtOpnd, 3841 bool IsSExt) { 3842 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3843 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3844 if (It != PromotedInsts.end()) { 3845 // If the new extension is same as original, the information in 3846 // PromotedInsts[ExtOpnd] is still correct. 3847 if (It->second.getInt() == ExtTy) 3848 return; 3849 3850 // Now the new extension is different from old extension, we make 3851 // the type information invalid by setting extension type to 3852 // BothExtension. 3853 ExtTy = BothExtension; 3854 } 3855 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 3856 } 3857 3858 /// Utility function to query the original type of instruction \p Opnd 3859 /// with a matched extension type. If the extension doesn't match, we 3860 /// cannot use the information we had on the original type. 3861 /// BothExtension doesn't match any extension type. 3862 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 3863 Instruction *Opnd, 3864 bool IsSExt) { 3865 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3866 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3867 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 3868 return It->second.getPointer(); 3869 return nullptr; 3870 } 3871 3872 /// Utility function to check whether or not a sign or zero extension 3873 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3874 /// either using the operands of \p Inst or promoting \p Inst. 3875 /// The type of the extension is defined by \p IsSExt. 3876 /// In other words, check if: 3877 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3878 /// #1 Promotion applies: 3879 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3880 /// #2 Operand reuses: 3881 /// ext opnd1 to ConsideredExtType. 3882 /// \p PromotedInsts maps the instructions to their type before promotion. 3883 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3884 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3885 3886 /// Utility function to determine if \p OpIdx should be promoted when 3887 /// promoting \p Inst. 3888 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3889 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3890 } 3891 3892 /// Utility function to promote the operand of \p Ext when this 3893 /// operand is a promotable trunc or sext or zext. 3894 /// \p PromotedInsts maps the instructions to their type before promotion. 3895 /// \p CreatedInstsCost[out] contains the cost of all instructions 3896 /// created to promote the operand of Ext. 3897 /// Newly added extensions are inserted in \p Exts. 3898 /// Newly added truncates are inserted in \p Truncs. 3899 /// Should never be called directly. 3900 /// \return The promoted value which is used instead of Ext. 3901 static Value *promoteOperandForTruncAndAnyExt( 3902 Instruction *Ext, TypePromotionTransaction &TPT, 3903 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3904 SmallVectorImpl<Instruction *> *Exts, 3905 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3906 3907 /// Utility function to promote the operand of \p Ext when this 3908 /// operand is promotable and is not a supported trunc or sext. 3909 /// \p PromotedInsts maps the instructions to their type before promotion. 3910 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3911 /// created to promote the operand of Ext. 3912 /// Newly added extensions are inserted in \p Exts. 3913 /// Newly added truncates are inserted in \p Truncs. 3914 /// Should never be called directly. 3915 /// \return The promoted value which is used instead of Ext. 3916 static Value *promoteOperandForOther(Instruction *Ext, 3917 TypePromotionTransaction &TPT, 3918 InstrToOrigTy &PromotedInsts, 3919 unsigned &CreatedInstsCost, 3920 SmallVectorImpl<Instruction *> *Exts, 3921 SmallVectorImpl<Instruction *> *Truncs, 3922 const TargetLowering &TLI, bool IsSExt); 3923 3924 /// \see promoteOperandForOther. 3925 static Value *signExtendOperandForOther( 3926 Instruction *Ext, TypePromotionTransaction &TPT, 3927 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3928 SmallVectorImpl<Instruction *> *Exts, 3929 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3930 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3931 Exts, Truncs, TLI, true); 3932 } 3933 3934 /// \see promoteOperandForOther. 3935 static Value *zeroExtendOperandForOther( 3936 Instruction *Ext, TypePromotionTransaction &TPT, 3937 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3938 SmallVectorImpl<Instruction *> *Exts, 3939 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 3940 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 3941 Exts, Truncs, TLI, false); 3942 } 3943 3944 public: 3945 /// Type for the utility function that promotes the operand of Ext. 3946 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 3947 InstrToOrigTy &PromotedInsts, 3948 unsigned &CreatedInstsCost, 3949 SmallVectorImpl<Instruction *> *Exts, 3950 SmallVectorImpl<Instruction *> *Truncs, 3951 const TargetLowering &TLI); 3952 3953 /// Given a sign/zero extend instruction \p Ext, return the appropriate 3954 /// action to promote the operand of \p Ext instead of using Ext. 3955 /// \return NULL if no promotable action is possible with the current 3956 /// sign extension. 3957 /// \p InsertedInsts keeps track of all the instructions inserted by the 3958 /// other CodeGenPrepare optimizations. This information is important 3959 /// because we do not want to promote these instructions as CodeGenPrepare 3960 /// will reinsert them later. Thus creating an infinite loop: create/remove. 3961 /// \p PromotedInsts maps the instructions to their type before promotion. 3962 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 3963 const TargetLowering &TLI, 3964 const InstrToOrigTy &PromotedInsts); 3965 }; 3966 3967 } // end anonymous namespace 3968 3969 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 3970 Type *ConsideredExtType, 3971 const InstrToOrigTy &PromotedInsts, 3972 bool IsSExt) { 3973 // The promotion helper does not know how to deal with vector types yet. 3974 // To be able to fix that, we would need to fix the places where we 3975 // statically extend, e.g., constants and such. 3976 if (Inst->getType()->isVectorTy()) 3977 return false; 3978 3979 // We can always get through zext. 3980 if (isa<ZExtInst>(Inst)) 3981 return true; 3982 3983 // sext(sext) is ok too. 3984 if (IsSExt && isa<SExtInst>(Inst)) 3985 return true; 3986 3987 // We can get through binary operator, if it is legal. In other words, the 3988 // binary operator must have a nuw or nsw flag. 3989 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 3990 if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) && 3991 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 3992 (IsSExt && BinOp->hasNoSignedWrap()))) 3993 return true; 3994 3995 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 3996 if ((Inst->getOpcode() == Instruction::And || 3997 Inst->getOpcode() == Instruction::Or)) 3998 return true; 3999 4000 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 4001 if (Inst->getOpcode() == Instruction::Xor) { 4002 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 4003 // Make sure it is not a NOT. 4004 if (Cst && !Cst->getValue().isAllOnesValue()) 4005 return true; 4006 } 4007 4008 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 4009 // It may change a poisoned value into a regular value, like 4010 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 4011 // poisoned value regular value 4012 // It should be OK since undef covers valid value. 4013 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 4014 return true; 4015 4016 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 4017 // It may change a poisoned value into a regular value, like 4018 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 4019 // poisoned value regular value 4020 // It should be OK since undef covers valid value. 4021 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 4022 const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); 4023 if (ExtInst->hasOneUse()) { 4024 const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); 4025 if (AndInst && AndInst->getOpcode() == Instruction::And) { 4026 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 4027 if (Cst && 4028 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 4029 return true; 4030 } 4031 } 4032 } 4033 4034 // Check if we can do the following simplification. 4035 // ext(trunc(opnd)) --> ext(opnd) 4036 if (!isa<TruncInst>(Inst)) 4037 return false; 4038 4039 Value *OpndVal = Inst->getOperand(0); 4040 // Check if we can use this operand in the extension. 4041 // If the type is larger than the result type of the extension, we cannot. 4042 if (!OpndVal->getType()->isIntegerTy() || 4043 OpndVal->getType()->getIntegerBitWidth() > 4044 ConsideredExtType->getIntegerBitWidth()) 4045 return false; 4046 4047 // If the operand of the truncate is not an instruction, we will not have 4048 // any information on the dropped bits. 4049 // (Actually we could for constant but it is not worth the extra logic). 4050 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 4051 if (!Opnd) 4052 return false; 4053 4054 // Check if the source of the type is narrow enough. 4055 // I.e., check that trunc just drops extended bits of the same kind of 4056 // the extension. 4057 // #1 get the type of the operand and check the kind of the extended bits. 4058 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 4059 if (OpndType) 4060 ; 4061 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 4062 OpndType = Opnd->getOperand(0)->getType(); 4063 else 4064 return false; 4065 4066 // #2 check that the truncate just drops extended bits. 4067 return Inst->getType()->getIntegerBitWidth() >= 4068 OpndType->getIntegerBitWidth(); 4069 } 4070 4071 TypePromotionHelper::Action TypePromotionHelper::getAction( 4072 Instruction *Ext, const SetOfInstrs &InsertedInsts, 4073 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 4074 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4075 "Unexpected instruction type"); 4076 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 4077 Type *ExtTy = Ext->getType(); 4078 bool IsSExt = isa<SExtInst>(Ext); 4079 // If the operand of the extension is not an instruction, we cannot 4080 // get through. 4081 // If it, check we can get through. 4082 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 4083 return nullptr; 4084 4085 // Do not promote if the operand has been added by codegenprepare. 4086 // Otherwise, it means we are undoing an optimization that is likely to be 4087 // redone, thus causing potential infinite loop. 4088 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 4089 return nullptr; 4090 4091 // SExt or Trunc instructions. 4092 // Return the related handler. 4093 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 4094 isa<ZExtInst>(ExtOpnd)) 4095 return promoteOperandForTruncAndAnyExt; 4096 4097 // Regular instruction. 4098 // Abort early if we will have to insert non-free instructions. 4099 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 4100 return nullptr; 4101 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 4102 } 4103 4104 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 4105 Instruction *SExt, TypePromotionTransaction &TPT, 4106 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4107 SmallVectorImpl<Instruction *> *Exts, 4108 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4109 // By construction, the operand of SExt is an instruction. Otherwise we cannot 4110 // get through it and this method should not be called. 4111 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 4112 Value *ExtVal = SExt; 4113 bool HasMergedNonFreeExt = false; 4114 if (isa<ZExtInst>(SExtOpnd)) { 4115 // Replace s|zext(zext(opnd)) 4116 // => zext(opnd). 4117 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 4118 Value *ZExt = 4119 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 4120 TPT.replaceAllUsesWith(SExt, ZExt); 4121 TPT.eraseInstruction(SExt); 4122 ExtVal = ZExt; 4123 } else { 4124 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 4125 // => z|sext(opnd). 4126 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 4127 } 4128 CreatedInstsCost = 0; 4129 4130 // Remove dead code. 4131 if (SExtOpnd->use_empty()) 4132 TPT.eraseInstruction(SExtOpnd); 4133 4134 // Check if the extension is still needed. 4135 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 4136 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 4137 if (ExtInst) { 4138 if (Exts) 4139 Exts->push_back(ExtInst); 4140 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 4141 } 4142 return ExtVal; 4143 } 4144 4145 // At this point we have: ext ty opnd to ty. 4146 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 4147 Value *NextVal = ExtInst->getOperand(0); 4148 TPT.eraseInstruction(ExtInst, NextVal); 4149 return NextVal; 4150 } 4151 4152 Value *TypePromotionHelper::promoteOperandForOther( 4153 Instruction *Ext, TypePromotionTransaction &TPT, 4154 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4155 SmallVectorImpl<Instruction *> *Exts, 4156 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 4157 bool IsSExt) { 4158 // By construction, the operand of Ext is an instruction. Otherwise we cannot 4159 // get through it and this method should not be called. 4160 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 4161 CreatedInstsCost = 0; 4162 if (!ExtOpnd->hasOneUse()) { 4163 // ExtOpnd will be promoted. 4164 // All its uses, but Ext, will need to use a truncated value of the 4165 // promoted version. 4166 // Create the truncate now. 4167 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 4168 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 4169 // Insert it just after the definition. 4170 ITrunc->moveAfter(ExtOpnd); 4171 if (Truncs) 4172 Truncs->push_back(ITrunc); 4173 } 4174 4175 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 4176 // Restore the operand of Ext (which has been replaced by the previous call 4177 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 4178 TPT.setOperand(Ext, 0, ExtOpnd); 4179 } 4180 4181 // Get through the Instruction: 4182 // 1. Update its type. 4183 // 2. Replace the uses of Ext by Inst. 4184 // 3. Extend each operand that needs to be extended. 4185 4186 // Remember the original type of the instruction before promotion. 4187 // This is useful to know that the high bits are sign extended bits. 4188 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 4189 // Step #1. 4190 TPT.mutateType(ExtOpnd, Ext->getType()); 4191 // Step #2. 4192 TPT.replaceAllUsesWith(Ext, ExtOpnd); 4193 // Step #3. 4194 Instruction *ExtForOpnd = Ext; 4195 4196 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 4197 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 4198 ++OpIdx) { 4199 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 4200 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 4201 !shouldExtOperand(ExtOpnd, OpIdx)) { 4202 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 4203 continue; 4204 } 4205 // Check if we can statically extend the operand. 4206 Value *Opnd = ExtOpnd->getOperand(OpIdx); 4207 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 4208 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4209 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 4210 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 4211 : Cst->getValue().zext(BitWidth); 4212 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 4213 continue; 4214 } 4215 // UndefValue are typed, so we have to statically sign extend them. 4216 if (isa<UndefValue>(Opnd)) { 4217 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4218 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 4219 continue; 4220 } 4221 4222 // Otherwise we have to explicitly sign extend the operand. 4223 // Check if Ext was reused to extend an operand. 4224 if (!ExtForOpnd) { 4225 // If yes, create a new one. 4226 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 4227 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 4228 : TPT.createZExt(Ext, Opnd, Ext->getType()); 4229 if (!isa<Instruction>(ValForExtOpnd)) { 4230 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 4231 continue; 4232 } 4233 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 4234 } 4235 if (Exts) 4236 Exts->push_back(ExtForOpnd); 4237 TPT.setOperand(ExtForOpnd, 0, Opnd); 4238 4239 // Move the sign extension before the insertion point. 4240 TPT.moveBefore(ExtForOpnd, ExtOpnd); 4241 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 4242 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 4243 // If more sext are required, new instructions will have to be created. 4244 ExtForOpnd = nullptr; 4245 } 4246 if (ExtForOpnd == Ext) { 4247 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 4248 TPT.eraseInstruction(Ext); 4249 } 4250 return ExtOpnd; 4251 } 4252 4253 /// Check whether or not promoting an instruction to a wider type is profitable. 4254 /// \p NewCost gives the cost of extension instructions created by the 4255 /// promotion. 4256 /// \p OldCost gives the cost of extension instructions before the promotion 4257 /// plus the number of instructions that have been 4258 /// matched in the addressing mode the promotion. 4259 /// \p PromotedOperand is the value that has been promoted. 4260 /// \return True if the promotion is profitable, false otherwise. 4261 bool AddressingModeMatcher::isPromotionProfitable( 4262 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 4263 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 4264 << '\n'); 4265 // The cost of the new extensions is greater than the cost of the 4266 // old extension plus what we folded. 4267 // This is not profitable. 4268 if (NewCost > OldCost) 4269 return false; 4270 if (NewCost < OldCost) 4271 return true; 4272 // The promotion is neutral but it may help folding the sign extension in 4273 // loads for instance. 4274 // Check that we did not create an illegal instruction. 4275 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 4276 } 4277 4278 /// Given an instruction or constant expr, see if we can fold the operation 4279 /// into the addressing mode. If so, update the addressing mode and return 4280 /// true, otherwise return false without modifying AddrMode. 4281 /// If \p MovedAway is not NULL, it contains the information of whether or 4282 /// not AddrInst has to be folded into the addressing mode on success. 4283 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 4284 /// because it has been moved away. 4285 /// Thus AddrInst must not be added in the matched instructions. 4286 /// This state can happen when AddrInst is a sext, since it may be moved away. 4287 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 4288 /// not be referenced anymore. 4289 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 4290 unsigned Depth, 4291 bool *MovedAway) { 4292 // Avoid exponential behavior on extremely deep expression trees. 4293 if (Depth >= 5) return false; 4294 4295 // By default, all matched instructions stay in place. 4296 if (MovedAway) 4297 *MovedAway = false; 4298 4299 switch (Opcode) { 4300 case Instruction::PtrToInt: 4301 // PtrToInt is always a noop, as we know that the int type is pointer sized. 4302 return matchAddr(AddrInst->getOperand(0), Depth); 4303 case Instruction::IntToPtr: { 4304 auto AS = AddrInst->getType()->getPointerAddressSpace(); 4305 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 4306 // This inttoptr is a no-op if the integer type is pointer sized. 4307 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 4308 return matchAddr(AddrInst->getOperand(0), Depth); 4309 return false; 4310 } 4311 case Instruction::BitCast: 4312 // BitCast is always a noop, and we can handle it as long as it is 4313 // int->int or pointer->pointer (we don't want int<->fp or something). 4314 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 4315 // Don't touch identity bitcasts. These were probably put here by LSR, 4316 // and we don't want to mess around with them. Assume it knows what it 4317 // is doing. 4318 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4319 return matchAddr(AddrInst->getOperand(0), Depth); 4320 return false; 4321 case Instruction::AddrSpaceCast: { 4322 unsigned SrcAS 4323 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4324 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4325 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 4326 return matchAddr(AddrInst->getOperand(0), Depth); 4327 return false; 4328 } 4329 case Instruction::Add: { 4330 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4331 ExtAddrMode BackupAddrMode = AddrMode; 4332 unsigned OldSize = AddrModeInsts.size(); 4333 // Start a transaction at this point. 4334 // The LHS may match but not the RHS. 4335 // Therefore, we need a higher level restoration point to undo partially 4336 // matched operation. 4337 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4338 TPT.getRestorationPoint(); 4339 4340 AddrMode.InBounds = false; 4341 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4342 matchAddr(AddrInst->getOperand(0), Depth+1)) 4343 return true; 4344 4345 // Restore the old addr mode info. 4346 AddrMode = BackupAddrMode; 4347 AddrModeInsts.resize(OldSize); 4348 TPT.rollback(LastKnownGood); 4349 4350 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4351 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4352 matchAddr(AddrInst->getOperand(1), Depth+1)) 4353 return true; 4354 4355 // Otherwise we definitely can't merge the ADD in. 4356 AddrMode = BackupAddrMode; 4357 AddrModeInsts.resize(OldSize); 4358 TPT.rollback(LastKnownGood); 4359 break; 4360 } 4361 //case Instruction::Or: 4362 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4363 //break; 4364 case Instruction::Mul: 4365 case Instruction::Shl: { 4366 // Can only handle X*C and X << C. 4367 AddrMode.InBounds = false; 4368 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4369 if (!RHS || RHS->getBitWidth() > 64) 4370 return false; 4371 int64_t Scale = RHS->getSExtValue(); 4372 if (Opcode == Instruction::Shl) 4373 Scale = 1LL << Scale; 4374 4375 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4376 } 4377 case Instruction::GetElementPtr: { 4378 // Scan the GEP. We check it if it contains constant offsets and at most 4379 // one variable offset. 4380 int VariableOperand = -1; 4381 unsigned VariableScale = 0; 4382 4383 int64_t ConstantOffset = 0; 4384 gep_type_iterator GTI = gep_type_begin(AddrInst); 4385 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4386 if (StructType *STy = GTI.getStructTypeOrNull()) { 4387 const StructLayout *SL = DL.getStructLayout(STy); 4388 unsigned Idx = 4389 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4390 ConstantOffset += SL->getElementOffset(Idx); 4391 } else { 4392 TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); 4393 if (TS.isNonZero()) { 4394 // The optimisations below currently only work for fixed offsets. 4395 if (TS.isScalable()) 4396 return false; 4397 int64_t TypeSize = TS.getFixedSize(); 4398 if (ConstantInt *CI = 4399 dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4400 const APInt &CVal = CI->getValue(); 4401 if (CVal.getMinSignedBits() <= 64) { 4402 ConstantOffset += CVal.getSExtValue() * TypeSize; 4403 continue; 4404 } 4405 } 4406 // We only allow one variable index at the moment. 4407 if (VariableOperand != -1) 4408 return false; 4409 4410 // Remember the variable index. 4411 VariableOperand = i; 4412 VariableScale = TypeSize; 4413 } 4414 } 4415 } 4416 4417 // A common case is for the GEP to only do a constant offset. In this case, 4418 // just add it to the disp field and check validity. 4419 if (VariableOperand == -1) { 4420 AddrMode.BaseOffs += ConstantOffset; 4421 if (ConstantOffset == 0 || 4422 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4423 // Check to see if we can fold the base pointer in too. 4424 if (matchAddr(AddrInst->getOperand(0), Depth+1)) { 4425 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4426 AddrMode.InBounds = false; 4427 return true; 4428 } 4429 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4430 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4431 ConstantOffset > 0) { 4432 // Record GEPs with non-zero offsets as candidates for splitting in the 4433 // event that the offset cannot fit into the r+i addressing mode. 4434 // Simple and common case that only one GEP is used in calculating the 4435 // address for the memory access. 4436 Value *Base = AddrInst->getOperand(0); 4437 auto *BaseI = dyn_cast<Instruction>(Base); 4438 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4439 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4440 (BaseI && !isa<CastInst>(BaseI) && 4441 !isa<GetElementPtrInst>(BaseI))) { 4442 // Make sure the parent block allows inserting non-PHI instructions 4443 // before the terminator. 4444 BasicBlock *Parent = 4445 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4446 if (!Parent->getTerminator()->isEHPad()) 4447 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4448 } 4449 } 4450 AddrMode.BaseOffs -= ConstantOffset; 4451 return false; 4452 } 4453 4454 // Save the valid addressing mode in case we can't match. 4455 ExtAddrMode BackupAddrMode = AddrMode; 4456 unsigned OldSize = AddrModeInsts.size(); 4457 4458 // See if the scale and offset amount is valid for this target. 4459 AddrMode.BaseOffs += ConstantOffset; 4460 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4461 AddrMode.InBounds = false; 4462 4463 // Match the base operand of the GEP. 4464 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4465 // If it couldn't be matched, just stuff the value in a register. 4466 if (AddrMode.HasBaseReg) { 4467 AddrMode = BackupAddrMode; 4468 AddrModeInsts.resize(OldSize); 4469 return false; 4470 } 4471 AddrMode.HasBaseReg = true; 4472 AddrMode.BaseReg = AddrInst->getOperand(0); 4473 } 4474 4475 // Match the remaining variable portion of the GEP. 4476 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4477 Depth)) { 4478 // If it couldn't be matched, try stuffing the base into a register 4479 // instead of matching it, and retrying the match of the scale. 4480 AddrMode = BackupAddrMode; 4481 AddrModeInsts.resize(OldSize); 4482 if (AddrMode.HasBaseReg) 4483 return false; 4484 AddrMode.HasBaseReg = true; 4485 AddrMode.BaseReg = AddrInst->getOperand(0); 4486 AddrMode.BaseOffs += ConstantOffset; 4487 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4488 VariableScale, Depth)) { 4489 // If even that didn't work, bail. 4490 AddrMode = BackupAddrMode; 4491 AddrModeInsts.resize(OldSize); 4492 return false; 4493 } 4494 } 4495 4496 return true; 4497 } 4498 case Instruction::SExt: 4499 case Instruction::ZExt: { 4500 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4501 if (!Ext) 4502 return false; 4503 4504 // Try to move this ext out of the way of the addressing mode. 4505 // Ask for a method for doing so. 4506 TypePromotionHelper::Action TPH = 4507 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4508 if (!TPH) 4509 return false; 4510 4511 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4512 TPT.getRestorationPoint(); 4513 unsigned CreatedInstsCost = 0; 4514 unsigned ExtCost = !TLI.isExtFree(Ext); 4515 Value *PromotedOperand = 4516 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4517 // SExt has been moved away. 4518 // Thus either it will be rematched later in the recursive calls or it is 4519 // gone. Anyway, we must not fold it into the addressing mode at this point. 4520 // E.g., 4521 // op = add opnd, 1 4522 // idx = ext op 4523 // addr = gep base, idx 4524 // is now: 4525 // promotedOpnd = ext opnd <- no match here 4526 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4527 // addr = gep base, op <- match 4528 if (MovedAway) 4529 *MovedAway = true; 4530 4531 assert(PromotedOperand && 4532 "TypePromotionHelper should have filtered out those cases"); 4533 4534 ExtAddrMode BackupAddrMode = AddrMode; 4535 unsigned OldSize = AddrModeInsts.size(); 4536 4537 if (!matchAddr(PromotedOperand, Depth) || 4538 // The total of the new cost is equal to the cost of the created 4539 // instructions. 4540 // The total of the old cost is equal to the cost of the extension plus 4541 // what we have saved in the addressing mode. 4542 !isPromotionProfitable(CreatedInstsCost, 4543 ExtCost + (AddrModeInsts.size() - OldSize), 4544 PromotedOperand)) { 4545 AddrMode = BackupAddrMode; 4546 AddrModeInsts.resize(OldSize); 4547 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4548 TPT.rollback(LastKnownGood); 4549 return false; 4550 } 4551 return true; 4552 } 4553 } 4554 return false; 4555 } 4556 4557 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4558 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4559 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4560 /// for the target. 4561 /// 4562 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4563 // Start a transaction at this point that we will rollback if the matching 4564 // fails. 4565 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4566 TPT.getRestorationPoint(); 4567 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4568 if (CI->getValue().isSignedIntN(64)) { 4569 // Fold in immediates if legal for the target. 4570 AddrMode.BaseOffs += CI->getSExtValue(); 4571 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4572 return true; 4573 AddrMode.BaseOffs -= CI->getSExtValue(); 4574 } 4575 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4576 // If this is a global variable, try to fold it into the addressing mode. 4577 if (!AddrMode.BaseGV) { 4578 AddrMode.BaseGV = GV; 4579 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4580 return true; 4581 AddrMode.BaseGV = nullptr; 4582 } 4583 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4584 ExtAddrMode BackupAddrMode = AddrMode; 4585 unsigned OldSize = AddrModeInsts.size(); 4586 4587 // Check to see if it is possible to fold this operation. 4588 bool MovedAway = false; 4589 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4590 // This instruction may have been moved away. If so, there is nothing 4591 // to check here. 4592 if (MovedAway) 4593 return true; 4594 // Okay, it's possible to fold this. Check to see if it is actually 4595 // *profitable* to do so. We use a simple cost model to avoid increasing 4596 // register pressure too much. 4597 if (I->hasOneUse() || 4598 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4599 AddrModeInsts.push_back(I); 4600 return true; 4601 } 4602 4603 // It isn't profitable to do this, roll back. 4604 //cerr << "NOT FOLDING: " << *I; 4605 AddrMode = BackupAddrMode; 4606 AddrModeInsts.resize(OldSize); 4607 TPT.rollback(LastKnownGood); 4608 } 4609 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4610 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4611 return true; 4612 TPT.rollback(LastKnownGood); 4613 } else if (isa<ConstantPointerNull>(Addr)) { 4614 // Null pointer gets folded without affecting the addressing mode. 4615 return true; 4616 } 4617 4618 // Worse case, the target should support [reg] addressing modes. :) 4619 if (!AddrMode.HasBaseReg) { 4620 AddrMode.HasBaseReg = true; 4621 AddrMode.BaseReg = Addr; 4622 // Still check for legality in case the target supports [imm] but not [i+r]. 4623 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4624 return true; 4625 AddrMode.HasBaseReg = false; 4626 AddrMode.BaseReg = nullptr; 4627 } 4628 4629 // If the base register is already taken, see if we can do [r+r]. 4630 if (AddrMode.Scale == 0) { 4631 AddrMode.Scale = 1; 4632 AddrMode.ScaledReg = Addr; 4633 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4634 return true; 4635 AddrMode.Scale = 0; 4636 AddrMode.ScaledReg = nullptr; 4637 } 4638 // Couldn't match. 4639 TPT.rollback(LastKnownGood); 4640 return false; 4641 } 4642 4643 /// Check to see if all uses of OpVal by the specified inline asm call are due 4644 /// to memory operands. If so, return true, otherwise return false. 4645 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4646 const TargetLowering &TLI, 4647 const TargetRegisterInfo &TRI) { 4648 const Function *F = CI->getFunction(); 4649 TargetLowering::AsmOperandInfoVector TargetConstraints = 4650 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); 4651 4652 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4653 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4654 4655 // Compute the constraint code and ConstraintType to use. 4656 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4657 4658 // If this asm operand is our Value*, and if it isn't an indirect memory 4659 // operand, we can't fold it! 4660 if (OpInfo.CallOperandVal == OpVal && 4661 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4662 !OpInfo.isIndirect)) 4663 return false; 4664 } 4665 4666 return true; 4667 } 4668 4669 // Max number of memory uses to look at before aborting the search to conserve 4670 // compile time. 4671 static constexpr int MaxMemoryUsesToScan = 20; 4672 4673 /// Recursively walk all the uses of I until we find a memory use. 4674 /// If we find an obviously non-foldable instruction, return true. 4675 /// Add the ultimately found memory instructions to MemoryUses. 4676 static bool FindAllMemoryUses( 4677 Instruction *I, 4678 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4679 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4680 const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, 4681 BlockFrequencyInfo *BFI, int SeenInsts = 0) { 4682 // If we already considered this instruction, we're done. 4683 if (!ConsideredInsts.insert(I).second) 4684 return false; 4685 4686 // If this is an obviously unfoldable instruction, bail out. 4687 if (!MightBeFoldableInst(I)) 4688 return true; 4689 4690 // Loop over all the uses, recursively processing them. 4691 for (Use &U : I->uses()) { 4692 // Conservatively return true if we're seeing a large number or a deep chain 4693 // of users. This avoids excessive compilation times in pathological cases. 4694 if (SeenInsts++ >= MaxMemoryUsesToScan) 4695 return true; 4696 4697 Instruction *UserI = cast<Instruction>(U.getUser()); 4698 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4699 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4700 continue; 4701 } 4702 4703 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4704 unsigned opNo = U.getOperandNo(); 4705 if (opNo != StoreInst::getPointerOperandIndex()) 4706 return true; // Storing addr, not into addr. 4707 MemoryUses.push_back(std::make_pair(SI, opNo)); 4708 continue; 4709 } 4710 4711 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4712 unsigned opNo = U.getOperandNo(); 4713 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4714 return true; // Storing addr, not into addr. 4715 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4716 continue; 4717 } 4718 4719 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4720 unsigned opNo = U.getOperandNo(); 4721 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4722 return true; // Storing addr, not into addr. 4723 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4724 continue; 4725 } 4726 4727 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4728 if (CI->hasFnAttr(Attribute::Cold)) { 4729 // If this is a cold call, we can sink the addressing calculation into 4730 // the cold path. See optimizeCallInst 4731 bool OptForSize = OptSize || 4732 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); 4733 if (!OptForSize) 4734 continue; 4735 } 4736 4737 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); 4738 if (!IA) return true; 4739 4740 // If this is a memory operand, we're cool, otherwise bail out. 4741 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4742 return true; 4743 continue; 4744 } 4745 4746 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4747 PSI, BFI, SeenInsts)) 4748 return true; 4749 } 4750 4751 return false; 4752 } 4753 4754 /// Return true if Val is already known to be live at the use site that we're 4755 /// folding it into. If so, there is no cost to include it in the addressing 4756 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4757 /// instruction already. 4758 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4759 Value *KnownLive2) { 4760 // If Val is either of the known-live values, we know it is live! 4761 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4762 return true; 4763 4764 // All values other than instructions and arguments (e.g. constants) are live. 4765 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4766 4767 // If Val is a constant sized alloca in the entry block, it is live, this is 4768 // true because it is just a reference to the stack/frame pointer, which is 4769 // live for the whole function. 4770 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4771 if (AI->isStaticAlloca()) 4772 return true; 4773 4774 // Check to see if this value is already used in the memory instruction's 4775 // block. If so, it's already live into the block at the very least, so we 4776 // can reasonably fold it. 4777 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4778 } 4779 4780 /// It is possible for the addressing mode of the machine to fold the specified 4781 /// instruction into a load or store that ultimately uses it. 4782 /// However, the specified instruction has multiple uses. 4783 /// Given this, it may actually increase register pressure to fold it 4784 /// into the load. For example, consider this code: 4785 /// 4786 /// X = ... 4787 /// Y = X+1 4788 /// use(Y) -> nonload/store 4789 /// Z = Y+1 4790 /// load Z 4791 /// 4792 /// In this case, Y has multiple uses, and can be folded into the load of Z 4793 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4794 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4795 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4796 /// number of computations either. 4797 /// 4798 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4799 /// X was live across 'load Z' for other reasons, we actually *would* want to 4800 /// fold the addressing mode in the Z case. This would make Y die earlier. 4801 bool AddressingModeMatcher:: 4802 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4803 ExtAddrMode &AMAfter) { 4804 if (IgnoreProfitability) return true; 4805 4806 // AMBefore is the addressing mode before this instruction was folded into it, 4807 // and AMAfter is the addressing mode after the instruction was folded. Get 4808 // the set of registers referenced by AMAfter and subtract out those 4809 // referenced by AMBefore: this is the set of values which folding in this 4810 // address extends the lifetime of. 4811 // 4812 // Note that there are only two potential values being referenced here, 4813 // BaseReg and ScaleReg (global addresses are always available, as are any 4814 // folded immediates). 4815 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4816 4817 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4818 // lifetime wasn't extended by adding this instruction. 4819 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4820 BaseReg = nullptr; 4821 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4822 ScaledReg = nullptr; 4823 4824 // If folding this instruction (and it's subexprs) didn't extend any live 4825 // ranges, we're ok with it. 4826 if (!BaseReg && !ScaledReg) 4827 return true; 4828 4829 // If all uses of this instruction can have the address mode sunk into them, 4830 // we can remove the addressing mode and effectively trade one live register 4831 // for another (at worst.) In this context, folding an addressing mode into 4832 // the use is just a particularly nice way of sinking it. 4833 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4834 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4835 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4836 PSI, BFI)) 4837 return false; // Has a non-memory, non-foldable use! 4838 4839 // Now that we know that all uses of this instruction are part of a chain of 4840 // computation involving only operations that could theoretically be folded 4841 // into a memory use, loop over each of these memory operation uses and see 4842 // if they could *actually* fold the instruction. The assumption is that 4843 // addressing modes are cheap and that duplicating the computation involved 4844 // many times is worthwhile, even on a fastpath. For sinking candidates 4845 // (i.e. cold call sites), this serves as a way to prevent excessive code 4846 // growth since most architectures have some reasonable small and fast way to 4847 // compute an effective address. (i.e LEA on x86) 4848 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4849 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4850 Instruction *User = MemoryUses[i].first; 4851 unsigned OpNo = MemoryUses[i].second; 4852 4853 // Get the access type of this use. If the use isn't a pointer, we don't 4854 // know what it accesses. 4855 Value *Address = User->getOperand(OpNo); 4856 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4857 if (!AddrTy) 4858 return false; 4859 Type *AddressAccessTy = AddrTy->getElementType(); 4860 unsigned AS = AddrTy->getAddressSpace(); 4861 4862 // Do a match against the root of this address, ignoring profitability. This 4863 // will tell us if the addressing mode for the memory operation will 4864 // *actually* cover the shared instruction. 4865 ExtAddrMode Result; 4866 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4867 0); 4868 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4869 TPT.getRestorationPoint(); 4870 AddressingModeMatcher Matcher( 4871 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, 4872 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, BFI); 4873 Matcher.IgnoreProfitability = true; 4874 bool Success = Matcher.matchAddr(Address, 0); 4875 (void)Success; assert(Success && "Couldn't select *anything*?"); 4876 4877 // The match was to check the profitability, the changes made are not 4878 // part of the original matcher. Therefore, they should be dropped 4879 // otherwise the original matcher will not present the right state. 4880 TPT.rollback(LastKnownGood); 4881 4882 // If the match didn't cover I, then it won't be shared by it. 4883 if (!is_contained(MatchedAddrModeInsts, I)) 4884 return false; 4885 4886 MatchedAddrModeInsts.clear(); 4887 } 4888 4889 return true; 4890 } 4891 4892 /// Return true if the specified values are defined in a 4893 /// different basic block than BB. 4894 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4895 if (Instruction *I = dyn_cast<Instruction>(V)) 4896 return I->getParent() != BB; 4897 return false; 4898 } 4899 4900 /// Sink addressing mode computation immediate before MemoryInst if doing so 4901 /// can be done without increasing register pressure. The need for the 4902 /// register pressure constraint means this can end up being an all or nothing 4903 /// decision for all uses of the same addressing computation. 4904 /// 4905 /// Load and Store Instructions often have addressing modes that can do 4906 /// significant amounts of computation. As such, instruction selection will try 4907 /// to get the load or store to do as much computation as possible for the 4908 /// program. The problem is that isel can only see within a single block. As 4909 /// such, we sink as much legal addressing mode work into the block as possible. 4910 /// 4911 /// This method is used to optimize both load/store and inline asms with memory 4912 /// operands. It's also used to sink addressing computations feeding into cold 4913 /// call sites into their (cold) basic block. 4914 /// 4915 /// The motivation for handling sinking into cold blocks is that doing so can 4916 /// both enable other address mode sinking (by satisfying the register pressure 4917 /// constraint above), and reduce register pressure globally (by removing the 4918 /// addressing mode computation from the fast path entirely.). 4919 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4920 Type *AccessTy, unsigned AddrSpace) { 4921 Value *Repl = Addr; 4922 4923 // Try to collapse single-value PHI nodes. This is necessary to undo 4924 // unprofitable PRE transformations. 4925 SmallVector<Value*, 8> worklist; 4926 SmallPtrSet<Value*, 16> Visited; 4927 worklist.push_back(Addr); 4928 4929 // Use a worklist to iteratively look through PHI and select nodes, and 4930 // ensure that the addressing mode obtained from the non-PHI/select roots of 4931 // the graph are compatible. 4932 bool PhiOrSelectSeen = false; 4933 SmallVector<Instruction*, 16> AddrModeInsts; 4934 const SimplifyQuery SQ(*DL, TLInfo); 4935 AddressingModeCombiner AddrModes(SQ, Addr); 4936 TypePromotionTransaction TPT(RemovedInsts); 4937 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4938 TPT.getRestorationPoint(); 4939 while (!worklist.empty()) { 4940 Value *V = worklist.back(); 4941 worklist.pop_back(); 4942 4943 // We allow traversing cyclic Phi nodes. 4944 // In case of success after this loop we ensure that traversing through 4945 // Phi nodes ends up with all cases to compute address of the form 4946 // BaseGV + Base + Scale * Index + Offset 4947 // where Scale and Offset are constans and BaseGV, Base and Index 4948 // are exactly the same Values in all cases. 4949 // It means that BaseGV, Scale and Offset dominate our memory instruction 4950 // and have the same value as they had in address computation represented 4951 // as Phi. So we can safely sink address computation to memory instruction. 4952 if (!Visited.insert(V).second) 4953 continue; 4954 4955 // For a PHI node, push all of its incoming values. 4956 if (PHINode *P = dyn_cast<PHINode>(V)) { 4957 for (Value *IncValue : P->incoming_values()) 4958 worklist.push_back(IncValue); 4959 PhiOrSelectSeen = true; 4960 continue; 4961 } 4962 // Similar for select. 4963 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 4964 worklist.push_back(SI->getFalseValue()); 4965 worklist.push_back(SI->getTrueValue()); 4966 PhiOrSelectSeen = true; 4967 continue; 4968 } 4969 4970 // For non-PHIs, determine the addressing mode being computed. Note that 4971 // the result may differ depending on what other uses our candidate 4972 // addressing instructions might have. 4973 AddrModeInsts.clear(); 4974 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4975 0); 4976 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 4977 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 4978 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, 4979 BFI.get()); 4980 4981 GetElementPtrInst *GEP = LargeOffsetGEP.first; 4982 if (GEP && !NewGEPBases.count(GEP)) { 4983 // If splitting the underlying data structure can reduce the offset of a 4984 // GEP, collect the GEP. Skip the GEPs that are the new bases of 4985 // previously split data structures. 4986 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 4987 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 4988 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 4989 } 4990 4991 NewAddrMode.OriginalValue = V; 4992 if (!AddrModes.addNewAddrMode(NewAddrMode)) 4993 break; 4994 } 4995 4996 // Try to combine the AddrModes we've collected. If we couldn't collect any, 4997 // or we have multiple but either couldn't combine them or combining them 4998 // wouldn't do anything useful, bail out now. 4999 if (!AddrModes.combineAddrModes()) { 5000 TPT.rollback(LastKnownGood); 5001 return false; 5002 } 5003 bool Modified = TPT.commit(); 5004 5005 // Get the combined AddrMode (or the only AddrMode, if we only had one). 5006 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 5007 5008 // If all the instructions matched are already in this BB, don't do anything. 5009 // If we saw a Phi node then it is not local definitely, and if we saw a select 5010 // then we want to push the address calculation past it even if it's already 5011 // in this BB. 5012 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 5013 return IsNonLocalValue(V, MemoryInst->getParent()); 5014 })) { 5015 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 5016 << "\n"); 5017 return Modified; 5018 } 5019 5020 // Insert this computation right after this user. Since our caller is 5021 // scanning from the top of the BB to the bottom, reuse of the expr are 5022 // guaranteed to happen later. 5023 IRBuilder<> Builder(MemoryInst); 5024 5025 // Now that we determined the addressing expression we want to use and know 5026 // that we have to sink it into this block. Check to see if we have already 5027 // done this for some other load/store instr in this block. If so, reuse 5028 // the computation. Before attempting reuse, check if the address is valid 5029 // as it may have been erased. 5030 5031 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 5032 5033 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 5034 if (SunkAddr) { 5035 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 5036 << " for " << *MemoryInst << "\n"); 5037 if (SunkAddr->getType() != Addr->getType()) 5038 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5039 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 5040 SubtargetInfo->addrSinkUsingGEPs())) { 5041 // By default, we use the GEP-based method when AA is used later. This 5042 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 5043 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5044 << " for " << *MemoryInst << "\n"); 5045 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5046 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 5047 5048 // First, find the pointer. 5049 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 5050 ResultPtr = AddrMode.BaseReg; 5051 AddrMode.BaseReg = nullptr; 5052 } 5053 5054 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 5055 // We can't add more than one pointer together, nor can we scale a 5056 // pointer (both of which seem meaningless). 5057 if (ResultPtr || AddrMode.Scale != 1) 5058 return Modified; 5059 5060 ResultPtr = AddrMode.ScaledReg; 5061 AddrMode.Scale = 0; 5062 } 5063 5064 // It is only safe to sign extend the BaseReg if we know that the math 5065 // required to create it did not overflow before we extend it. Since 5066 // the original IR value was tossed in favor of a constant back when 5067 // the AddrMode was created we need to bail out gracefully if widths 5068 // do not match instead of extending it. 5069 // 5070 // (See below for code to add the scale.) 5071 if (AddrMode.Scale) { 5072 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 5073 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 5074 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 5075 return Modified; 5076 } 5077 5078 if (AddrMode.BaseGV) { 5079 if (ResultPtr) 5080 return Modified; 5081 5082 ResultPtr = AddrMode.BaseGV; 5083 } 5084 5085 // If the real base value actually came from an inttoptr, then the matcher 5086 // will look through it and provide only the integer value. In that case, 5087 // use it here. 5088 if (!DL->isNonIntegralPointerType(Addr->getType())) { 5089 if (!ResultPtr && AddrMode.BaseReg) { 5090 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 5091 "sunkaddr"); 5092 AddrMode.BaseReg = nullptr; 5093 } else if (!ResultPtr && AddrMode.Scale == 1) { 5094 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 5095 "sunkaddr"); 5096 AddrMode.Scale = 0; 5097 } 5098 } 5099 5100 if (!ResultPtr && 5101 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 5102 SunkAddr = Constant::getNullValue(Addr->getType()); 5103 } else if (!ResultPtr) { 5104 return Modified; 5105 } else { 5106 Type *I8PtrTy = 5107 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 5108 Type *I8Ty = Builder.getInt8Ty(); 5109 5110 // Start with the base register. Do this first so that subsequent address 5111 // matching finds it last, which will prevent it from trying to match it 5112 // as the scaled value in case it happens to be a mul. That would be 5113 // problematic if we've sunk a different mul for the scale, because then 5114 // we'd end up sinking both muls. 5115 if (AddrMode.BaseReg) { 5116 Value *V = AddrMode.BaseReg; 5117 if (V->getType() != IntPtrTy) 5118 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5119 5120 ResultIndex = V; 5121 } 5122 5123 // Add the scale value. 5124 if (AddrMode.Scale) { 5125 Value *V = AddrMode.ScaledReg; 5126 if (V->getType() == IntPtrTy) { 5127 // done. 5128 } else { 5129 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 5130 cast<IntegerType>(V->getType())->getBitWidth() && 5131 "We can't transform if ScaledReg is too narrow"); 5132 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5133 } 5134 5135 if (AddrMode.Scale != 1) 5136 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5137 "sunkaddr"); 5138 if (ResultIndex) 5139 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 5140 else 5141 ResultIndex = V; 5142 } 5143 5144 // Add in the Base Offset if present. 5145 if (AddrMode.BaseOffs) { 5146 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5147 if (ResultIndex) { 5148 // We need to add this separately from the scale above to help with 5149 // SDAG consecutive load/store merging. 5150 if (ResultPtr->getType() != I8PtrTy) 5151 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5152 ResultPtr = 5153 AddrMode.InBounds 5154 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5155 "sunkaddr") 5156 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5157 } 5158 5159 ResultIndex = V; 5160 } 5161 5162 if (!ResultIndex) { 5163 SunkAddr = ResultPtr; 5164 } else { 5165 if (ResultPtr->getType() != I8PtrTy) 5166 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5167 SunkAddr = 5168 AddrMode.InBounds 5169 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5170 "sunkaddr") 5171 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5172 } 5173 5174 if (SunkAddr->getType() != Addr->getType()) 5175 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5176 } 5177 } else { 5178 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 5179 // non-integral pointers, so in that case bail out now. 5180 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 5181 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 5182 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 5183 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 5184 if (DL->isNonIntegralPointerType(Addr->getType()) || 5185 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 5186 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 5187 (AddrMode.BaseGV && 5188 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 5189 return Modified; 5190 5191 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5192 << " for " << *MemoryInst << "\n"); 5193 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5194 Value *Result = nullptr; 5195 5196 // Start with the base register. Do this first so that subsequent address 5197 // matching finds it last, which will prevent it from trying to match it 5198 // as the scaled value in case it happens to be a mul. That would be 5199 // problematic if we've sunk a different mul for the scale, because then 5200 // we'd end up sinking both muls. 5201 if (AddrMode.BaseReg) { 5202 Value *V = AddrMode.BaseReg; 5203 if (V->getType()->isPointerTy()) 5204 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5205 if (V->getType() != IntPtrTy) 5206 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5207 Result = V; 5208 } 5209 5210 // Add the scale value. 5211 if (AddrMode.Scale) { 5212 Value *V = AddrMode.ScaledReg; 5213 if (V->getType() == IntPtrTy) { 5214 // done. 5215 } else if (V->getType()->isPointerTy()) { 5216 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5217 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 5218 cast<IntegerType>(V->getType())->getBitWidth()) { 5219 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5220 } else { 5221 // It is only safe to sign extend the BaseReg if we know that the math 5222 // required to create it did not overflow before we extend it. Since 5223 // the original IR value was tossed in favor of a constant back when 5224 // the AddrMode was created we need to bail out gracefully if widths 5225 // do not match instead of extending it. 5226 Instruction *I = dyn_cast_or_null<Instruction>(Result); 5227 if (I && (Result != AddrMode.BaseReg)) 5228 I->eraseFromParent(); 5229 return Modified; 5230 } 5231 if (AddrMode.Scale != 1) 5232 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5233 "sunkaddr"); 5234 if (Result) 5235 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5236 else 5237 Result = V; 5238 } 5239 5240 // Add in the BaseGV if present. 5241 if (AddrMode.BaseGV) { 5242 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 5243 if (Result) 5244 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5245 else 5246 Result = V; 5247 } 5248 5249 // Add in the Base Offset if present. 5250 if (AddrMode.BaseOffs) { 5251 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5252 if (Result) 5253 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5254 else 5255 Result = V; 5256 } 5257 5258 if (!Result) 5259 SunkAddr = Constant::getNullValue(Addr->getType()); 5260 else 5261 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 5262 } 5263 5264 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 5265 // Store the newly computed address into the cache. In the case we reused a 5266 // value, this should be idempotent. 5267 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 5268 5269 // If we have no uses, recursively delete the value and all dead instructions 5270 // using it. 5271 if (Repl->use_empty()) { 5272 // This can cause recursive deletion, which can invalidate our iterator. 5273 // Use a WeakTrackingVH to hold onto it in case this happens. 5274 Value *CurValue = &*CurInstIterator; 5275 WeakTrackingVH IterHandle(CurValue); 5276 BasicBlock *BB = CurInstIterator->getParent(); 5277 5278 RecursivelyDeleteTriviallyDeadInstructions( 5279 Repl, TLInfo, nullptr, 5280 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5281 5282 if (IterHandle != CurValue) { 5283 // If the iterator instruction was recursively deleted, start over at the 5284 // start of the block. 5285 CurInstIterator = BB->begin(); 5286 SunkAddrs.clear(); 5287 } 5288 } 5289 ++NumMemoryInsts; 5290 return true; 5291 } 5292 5293 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find 5294 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can 5295 /// only handle a 2 operand GEP in the same basic block or a splat constant 5296 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector 5297 /// index. 5298 /// 5299 /// If the existing GEP has a vector base pointer that is splat, we can look 5300 /// through the splat to find the scalar pointer. If we can't find a scalar 5301 /// pointer there's nothing we can do. 5302 /// 5303 /// If we have a GEP with more than 2 indices where the middle indices are all 5304 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. 5305 /// 5306 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP 5307 /// followed by a GEP with an all zeroes vector index. This will enable 5308 /// SelectionDAGBuilder to use a the scalar GEP as the uniform base and have a 5309 /// zero index. 5310 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, 5311 Value *Ptr) { 5312 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 5313 if (!GEP || !GEP->hasIndices()) 5314 return false; 5315 5316 // If the GEP and the gather/scatter aren't in the same BB, don't optimize. 5317 // FIXME: We should support this by sinking the GEP. 5318 if (MemoryInst->getParent() != GEP->getParent()) 5319 return false; 5320 5321 SmallVector<Value *, 2> Ops(GEP->op_begin(), GEP->op_end()); 5322 5323 bool RewriteGEP = false; 5324 5325 if (Ops[0]->getType()->isVectorTy()) { 5326 Ops[0] = const_cast<Value *>(getSplatValue(Ops[0])); 5327 if (!Ops[0]) 5328 return false; 5329 RewriteGEP = true; 5330 } 5331 5332 unsigned FinalIndex = Ops.size() - 1; 5333 5334 // Ensure all but the last index is 0. 5335 // FIXME: This isn't strictly required. All that's required is that they are 5336 // all scalars or splats. 5337 for (unsigned i = 1; i < FinalIndex; ++i) { 5338 auto *C = dyn_cast<Constant>(Ops[i]); 5339 if (!C) 5340 return false; 5341 if (isa<VectorType>(C->getType())) 5342 C = C->getSplatValue(); 5343 auto *CI = dyn_cast_or_null<ConstantInt>(C); 5344 if (!CI || !CI->isZero()) 5345 return false; 5346 // Scalarize the index if needed. 5347 Ops[i] = CI; 5348 } 5349 5350 // Try to scalarize the final index. 5351 if (Ops[FinalIndex]->getType()->isVectorTy()) { 5352 if (Value *V = const_cast<Value *>(getSplatValue(Ops[FinalIndex]))) { 5353 auto *C = dyn_cast<ConstantInt>(V); 5354 // Don't scalarize all zeros vector. 5355 if (!C || !C->isZero()) { 5356 Ops[FinalIndex] = V; 5357 RewriteGEP = true; 5358 } 5359 } 5360 } 5361 5362 // If we made any changes or the we have extra operands, we need to generate 5363 // new instructions. 5364 if (!RewriteGEP && Ops.size() == 2) 5365 return false; 5366 5367 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements(); 5368 5369 IRBuilder<> Builder(MemoryInst); 5370 5371 Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); 5372 5373 Value *NewAddr; 5374 5375 // If the final index isn't a vector, emit a scalar GEP containing all ops 5376 // and a vector GEP with all zeroes final index. 5377 if (!Ops[FinalIndex]->getType()->isVectorTy()) { 5378 NewAddr = Builder.CreateGEP(Ops[0], makeArrayRef(Ops).drop_front()); 5379 auto *IndexTy = FixedVectorType::get(ScalarIndexTy, NumElts); 5380 NewAddr = Builder.CreateGEP(NewAddr, Constant::getNullValue(IndexTy)); 5381 } else { 5382 Value *Base = Ops[0]; 5383 Value *Index = Ops[FinalIndex]; 5384 5385 // Create a scalar GEP if there are more than 2 operands. 5386 if (Ops.size() != 2) { 5387 // Replace the last index with 0. 5388 Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); 5389 Base = Builder.CreateGEP(Base, makeArrayRef(Ops).drop_front()); 5390 } 5391 5392 // Now create the GEP with scalar pointer and vector index. 5393 NewAddr = Builder.CreateGEP(Base, Index); 5394 } 5395 5396 MemoryInst->replaceUsesOfWith(Ptr, NewAddr); 5397 5398 // If we have no uses, recursively delete the value and all dead instructions 5399 // using it. 5400 if (Ptr->use_empty()) 5401 RecursivelyDeleteTriviallyDeadInstructions( 5402 Ptr, TLInfo, nullptr, 5403 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5404 5405 return true; 5406 } 5407 5408 /// If there are any memory operands, use OptimizeMemoryInst to sink their 5409 /// address computing into the block when possible / profitable. 5410 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 5411 bool MadeChange = false; 5412 5413 const TargetRegisterInfo *TRI = 5414 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 5415 TargetLowering::AsmOperandInfoVector TargetConstraints = 5416 TLI->ParseConstraints(*DL, TRI, *CS); 5417 unsigned ArgNo = 0; 5418 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5419 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 5420 5421 // Compute the constraint code and ConstraintType to use. 5422 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 5423 5424 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5425 OpInfo.isIndirect) { 5426 Value *OpVal = CS->getArgOperand(ArgNo++); 5427 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 5428 } else if (OpInfo.Type == InlineAsm::isInput) 5429 ArgNo++; 5430 } 5431 5432 return MadeChange; 5433 } 5434 5435 /// Check if all the uses of \p Val are equivalent (or free) zero or 5436 /// sign extensions. 5437 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 5438 assert(!Val->use_empty() && "Input must have at least one use"); 5439 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 5440 bool IsSExt = isa<SExtInst>(FirstUser); 5441 Type *ExtTy = FirstUser->getType(); 5442 for (const User *U : Val->users()) { 5443 const Instruction *UI = cast<Instruction>(U); 5444 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 5445 return false; 5446 Type *CurTy = UI->getType(); 5447 // Same input and output types: Same instruction after CSE. 5448 if (CurTy == ExtTy) 5449 continue; 5450 5451 // If IsSExt is true, we are in this situation: 5452 // a = Val 5453 // b = sext ty1 a to ty2 5454 // c = sext ty1 a to ty3 5455 // Assuming ty2 is shorter than ty3, this could be turned into: 5456 // a = Val 5457 // b = sext ty1 a to ty2 5458 // c = sext ty2 b to ty3 5459 // However, the last sext is not free. 5460 if (IsSExt) 5461 return false; 5462 5463 // This is a ZExt, maybe this is free to extend from one type to another. 5464 // In that case, we would not account for a different use. 5465 Type *NarrowTy; 5466 Type *LargeTy; 5467 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5468 CurTy->getScalarType()->getIntegerBitWidth()) { 5469 NarrowTy = CurTy; 5470 LargeTy = ExtTy; 5471 } else { 5472 NarrowTy = ExtTy; 5473 LargeTy = CurTy; 5474 } 5475 5476 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5477 return false; 5478 } 5479 // All uses are the same or can be derived from one another for free. 5480 return true; 5481 } 5482 5483 /// Try to speculatively promote extensions in \p Exts and continue 5484 /// promoting through newly promoted operands recursively as far as doing so is 5485 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5486 /// When some promotion happened, \p TPT contains the proper state to revert 5487 /// them. 5488 /// 5489 /// \return true if some promotion happened, false otherwise. 5490 bool CodeGenPrepare::tryToPromoteExts( 5491 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5492 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5493 unsigned CreatedInstsCost) { 5494 bool Promoted = false; 5495 5496 // Iterate over all the extensions to try to promote them. 5497 for (auto *I : Exts) { 5498 // Early check if we directly have ext(load). 5499 if (isa<LoadInst>(I->getOperand(0))) { 5500 ProfitablyMovedExts.push_back(I); 5501 continue; 5502 } 5503 5504 // Check whether or not we want to do any promotion. The reason we have 5505 // this check inside the for loop is to catch the case where an extension 5506 // is directly fed by a load because in such case the extension can be moved 5507 // up without any promotion on its operands. 5508 if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5509 return false; 5510 5511 // Get the action to perform the promotion. 5512 TypePromotionHelper::Action TPH = 5513 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5514 // Check if we can promote. 5515 if (!TPH) { 5516 // Save the current extension as we cannot move up through its operand. 5517 ProfitablyMovedExts.push_back(I); 5518 continue; 5519 } 5520 5521 // Save the current state. 5522 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5523 TPT.getRestorationPoint(); 5524 SmallVector<Instruction *, 4> NewExts; 5525 unsigned NewCreatedInstsCost = 0; 5526 unsigned ExtCost = !TLI->isExtFree(I); 5527 // Promote. 5528 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5529 &NewExts, nullptr, *TLI); 5530 assert(PromotedVal && 5531 "TypePromotionHelper should have filtered out those cases"); 5532 5533 // We would be able to merge only one extension in a load. 5534 // Therefore, if we have more than 1 new extension we heuristically 5535 // cut this search path, because it means we degrade the code quality. 5536 // With exactly 2, the transformation is neutral, because we will merge 5537 // one extension but leave one. However, we optimistically keep going, 5538 // because the new extension may be removed too. 5539 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5540 // FIXME: It would be possible to propagate a negative value instead of 5541 // conservatively ceiling it to 0. 5542 TotalCreatedInstsCost = 5543 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5544 if (!StressExtLdPromotion && 5545 (TotalCreatedInstsCost > 1 || 5546 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5547 // This promotion is not profitable, rollback to the previous state, and 5548 // save the current extension in ProfitablyMovedExts as the latest 5549 // speculative promotion turned out to be unprofitable. 5550 TPT.rollback(LastKnownGood); 5551 ProfitablyMovedExts.push_back(I); 5552 continue; 5553 } 5554 // Continue promoting NewExts as far as doing so is profitable. 5555 SmallVector<Instruction *, 2> NewlyMovedExts; 5556 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5557 bool NewPromoted = false; 5558 for (auto *ExtInst : NewlyMovedExts) { 5559 Instruction *MovedExt = cast<Instruction>(ExtInst); 5560 Value *ExtOperand = MovedExt->getOperand(0); 5561 // If we have reached to a load, we need this extra profitability check 5562 // as it could potentially be merged into an ext(load). 5563 if (isa<LoadInst>(ExtOperand) && 5564 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5565 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5566 continue; 5567 5568 ProfitablyMovedExts.push_back(MovedExt); 5569 NewPromoted = true; 5570 } 5571 5572 // If none of speculative promotions for NewExts is profitable, rollback 5573 // and save the current extension (I) as the last profitable extension. 5574 if (!NewPromoted) { 5575 TPT.rollback(LastKnownGood); 5576 ProfitablyMovedExts.push_back(I); 5577 continue; 5578 } 5579 // The promotion is profitable. 5580 Promoted = true; 5581 } 5582 return Promoted; 5583 } 5584 5585 /// Merging redundant sexts when one is dominating the other. 5586 bool CodeGenPrepare::mergeSExts(Function &F) { 5587 bool Changed = false; 5588 for (auto &Entry : ValToSExtendedUses) { 5589 SExts &Insts = Entry.second; 5590 SExts CurPts; 5591 for (Instruction *Inst : Insts) { 5592 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5593 Inst->getOperand(0) != Entry.first) 5594 continue; 5595 bool inserted = false; 5596 for (auto &Pt : CurPts) { 5597 if (getDT(F).dominates(Inst, Pt)) { 5598 Pt->replaceAllUsesWith(Inst); 5599 RemovedInsts.insert(Pt); 5600 Pt->removeFromParent(); 5601 Pt = Inst; 5602 inserted = true; 5603 Changed = true; 5604 break; 5605 } 5606 if (!getDT(F).dominates(Pt, Inst)) 5607 // Give up if we need to merge in a common dominator as the 5608 // experiments show it is not profitable. 5609 continue; 5610 Inst->replaceAllUsesWith(Pt); 5611 RemovedInsts.insert(Inst); 5612 Inst->removeFromParent(); 5613 inserted = true; 5614 Changed = true; 5615 break; 5616 } 5617 if (!inserted) 5618 CurPts.push_back(Inst); 5619 } 5620 } 5621 return Changed; 5622 } 5623 5624 // Splitting large data structures so that the GEPs accessing them can have 5625 // smaller offsets so that they can be sunk to the same blocks as their users. 5626 // For example, a large struct starting from %base is split into two parts 5627 // where the second part starts from %new_base. 5628 // 5629 // Before: 5630 // BB0: 5631 // %base = 5632 // 5633 // BB1: 5634 // %gep0 = gep %base, off0 5635 // %gep1 = gep %base, off1 5636 // %gep2 = gep %base, off2 5637 // 5638 // BB2: 5639 // %load1 = load %gep0 5640 // %load2 = load %gep1 5641 // %load3 = load %gep2 5642 // 5643 // After: 5644 // BB0: 5645 // %base = 5646 // %new_base = gep %base, off0 5647 // 5648 // BB1: 5649 // %new_gep0 = %new_base 5650 // %new_gep1 = gep %new_base, off1 - off0 5651 // %new_gep2 = gep %new_base, off2 - off0 5652 // 5653 // BB2: 5654 // %load1 = load i32, i32* %new_gep0 5655 // %load2 = load i32, i32* %new_gep1 5656 // %load3 = load i32, i32* %new_gep2 5657 // 5658 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5659 // their offsets are smaller enough to fit into the addressing mode. 5660 bool CodeGenPrepare::splitLargeGEPOffsets() { 5661 bool Changed = false; 5662 for (auto &Entry : LargeOffsetGEPMap) { 5663 Value *OldBase = Entry.first; 5664 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5665 &LargeOffsetGEPs = Entry.second; 5666 auto compareGEPOffset = 5667 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5668 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5669 if (LHS.first == RHS.first) 5670 return false; 5671 if (LHS.second != RHS.second) 5672 return LHS.second < RHS.second; 5673 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5674 }; 5675 // Sorting all the GEPs of the same data structures based on the offsets. 5676 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5677 LargeOffsetGEPs.erase( 5678 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5679 LargeOffsetGEPs.end()); 5680 // Skip if all the GEPs have the same offsets. 5681 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5682 continue; 5683 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5684 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5685 Value *NewBaseGEP = nullptr; 5686 5687 auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); 5688 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5689 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5690 int64_t Offset = LargeOffsetGEP->second; 5691 if (Offset != BaseOffset) { 5692 TargetLowering::AddrMode AddrMode; 5693 AddrMode.BaseOffs = Offset - BaseOffset; 5694 // The result type of the GEP might not be the type of the memory 5695 // access. 5696 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5697 GEP->getResultElementType(), 5698 GEP->getAddressSpace())) { 5699 // We need to create a new base if the offset to the current base is 5700 // too large to fit into the addressing mode. So, a very large struct 5701 // may be split into several parts. 5702 BaseGEP = GEP; 5703 BaseOffset = Offset; 5704 NewBaseGEP = nullptr; 5705 } 5706 } 5707 5708 // Generate a new GEP to replace the current one. 5709 LLVMContext &Ctx = GEP->getContext(); 5710 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5711 Type *I8PtrTy = 5712 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5713 Type *I8Ty = Type::getInt8Ty(Ctx); 5714 5715 if (!NewBaseGEP) { 5716 // Create a new base if we don't have one yet. Find the insertion 5717 // pointer for the new base first. 5718 BasicBlock::iterator NewBaseInsertPt; 5719 BasicBlock *NewBaseInsertBB; 5720 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5721 // If the base of the struct is an instruction, the new base will be 5722 // inserted close to it. 5723 NewBaseInsertBB = BaseI->getParent(); 5724 if (isa<PHINode>(BaseI)) 5725 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5726 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5727 NewBaseInsertBB = 5728 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5729 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5730 } else 5731 NewBaseInsertPt = std::next(BaseI->getIterator()); 5732 } else { 5733 // If the current base is an argument or global value, the new base 5734 // will be inserted to the entry block. 5735 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5736 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5737 } 5738 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5739 // Create a new base. 5740 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5741 NewBaseGEP = OldBase; 5742 if (NewBaseGEP->getType() != I8PtrTy) 5743 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5744 NewBaseGEP = 5745 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5746 NewGEPBases.insert(NewBaseGEP); 5747 } 5748 5749 IRBuilder<> Builder(GEP); 5750 Value *NewGEP = NewBaseGEP; 5751 if (Offset == BaseOffset) { 5752 if (GEP->getType() != I8PtrTy) 5753 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5754 } else { 5755 // Calculate the new offset for the new GEP. 5756 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5757 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5758 5759 if (GEP->getType() != I8PtrTy) 5760 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5761 } 5762 GEP->replaceAllUsesWith(NewGEP); 5763 LargeOffsetGEPID.erase(GEP); 5764 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5765 GEP->eraseFromParent(); 5766 Changed = true; 5767 } 5768 } 5769 return Changed; 5770 } 5771 5772 bool CodeGenPrepare::optimizePhiType( 5773 PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, 5774 SmallPtrSetImpl<Instruction *> &DeletedInstrs) { 5775 // We are looking for a collection on interconnected phi nodes that together 5776 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts 5777 // are of the same type. Convert the whole set of nodes to the type of the 5778 // bitcast. 5779 Type *PhiTy = I->getType(); 5780 Type *ConvertTy = nullptr; 5781 if (Visited.count(I) || 5782 (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) 5783 return false; 5784 5785 SmallVector<Instruction *, 4> Worklist; 5786 Worklist.push_back(cast<Instruction>(I)); 5787 SmallPtrSet<PHINode *, 4> PhiNodes; 5788 PhiNodes.insert(I); 5789 Visited.insert(I); 5790 SmallPtrSet<Instruction *, 4> Defs; 5791 SmallPtrSet<Instruction *, 4> Uses; 5792 5793 while (!Worklist.empty()) { 5794 Instruction *II = Worklist.pop_back_val(); 5795 5796 if (auto *Phi = dyn_cast<PHINode>(II)) { 5797 // Handle Defs, which might also be PHI's 5798 for (Value *V : Phi->incoming_values()) { 5799 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 5800 if (!PhiNodes.count(OpPhi)) { 5801 if (Visited.count(OpPhi)) 5802 return false; 5803 PhiNodes.insert(OpPhi); 5804 Visited.insert(OpPhi); 5805 Worklist.push_back(OpPhi); 5806 } 5807 } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) { 5808 if (!Defs.count(OpLoad)) { 5809 Defs.insert(OpLoad); 5810 Worklist.push_back(OpLoad); 5811 } 5812 } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) { 5813 if (!Defs.count(OpEx)) { 5814 Defs.insert(OpEx); 5815 Worklist.push_back(OpEx); 5816 } 5817 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 5818 if (!ConvertTy) 5819 ConvertTy = OpBC->getOperand(0)->getType(); 5820 if (OpBC->getOperand(0)->getType() != ConvertTy) 5821 return false; 5822 if (!Defs.count(OpBC)) { 5823 Defs.insert(OpBC); 5824 Worklist.push_back(OpBC); 5825 } 5826 } else if (!isa<UndefValue>(V)) 5827 return false; 5828 } 5829 } 5830 5831 // Handle uses which might also be phi's 5832 for (User *V : II->users()) { 5833 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 5834 if (!PhiNodes.count(OpPhi)) { 5835 if (Visited.count(OpPhi)) 5836 return false; 5837 PhiNodes.insert(OpPhi); 5838 Visited.insert(OpPhi); 5839 Worklist.push_back(OpPhi); 5840 } 5841 } else if (auto *OpStore = dyn_cast<StoreInst>(V)) { 5842 if (OpStore->getOperand(0) != II) 5843 return false; 5844 Uses.insert(OpStore); 5845 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 5846 if (!ConvertTy) 5847 ConvertTy = OpBC->getType(); 5848 if (OpBC->getType() != ConvertTy) 5849 return false; 5850 Uses.insert(OpBC); 5851 } else 5852 return false; 5853 } 5854 } 5855 5856 if (!ConvertTy || !TLI->shouldConvertPhiType(PhiTy, ConvertTy)) 5857 return false; 5858 5859 LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to " 5860 << *ConvertTy << "\n"); 5861 5862 // Create all the new phi nodes of the new type, and bitcast any loads to the 5863 // correct type. 5864 ValueToValueMap ValMap; 5865 ValMap[UndefValue::get(PhiTy)] = UndefValue::get(ConvertTy); 5866 for (Instruction *D : Defs) { 5867 if (isa<BitCastInst>(D)) 5868 ValMap[D] = D->getOperand(0); 5869 else 5870 ValMap[D] = 5871 new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode()); 5872 } 5873 for (PHINode *Phi : PhiNodes) 5874 ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(), 5875 Phi->getName() + ".tc", Phi); 5876 // Pipe together all the PhiNodes. 5877 for (PHINode *Phi : PhiNodes) { 5878 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]); 5879 for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) 5880 NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)], 5881 Phi->getIncomingBlock(i)); 5882 } 5883 // And finally pipe up the stores and bitcasts 5884 for (Instruction *U : Uses) { 5885 if (isa<BitCastInst>(U)) { 5886 DeletedInstrs.insert(U); 5887 U->replaceAllUsesWith(ValMap[U->getOperand(0)]); 5888 } else 5889 U->setOperand(0, 5890 new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U)); 5891 } 5892 5893 // Save the removed phis to be deleted later. 5894 for (PHINode *Phi : PhiNodes) 5895 DeletedInstrs.insert(Phi); 5896 return true; 5897 } 5898 5899 bool CodeGenPrepare::optimizePhiTypes(Function &F) { 5900 if (!OptimizePhiTypes) 5901 return false; 5902 5903 bool Changed = false; 5904 SmallPtrSet<PHINode *, 4> Visited; 5905 SmallPtrSet<Instruction *, 4> DeletedInstrs; 5906 5907 // Attempt to optimize all the phis in the functions to the correct type. 5908 for (auto &BB : F) 5909 for (auto &Phi : BB.phis()) 5910 Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs); 5911 5912 // Remove any old phi's that have been converted. 5913 for (auto *I : DeletedInstrs) { 5914 I->replaceAllUsesWith(UndefValue::get(I->getType())); 5915 I->eraseFromParent(); 5916 } 5917 5918 return Changed; 5919 } 5920 5921 /// Return true, if an ext(load) can be formed from an extension in 5922 /// \p MovedExts. 5923 bool CodeGenPrepare::canFormExtLd( 5924 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 5925 Instruction *&Inst, bool HasPromoted) { 5926 for (auto *MovedExtInst : MovedExts) { 5927 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 5928 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 5929 Inst = MovedExtInst; 5930 break; 5931 } 5932 } 5933 if (!LI) 5934 return false; 5935 5936 // If they're already in the same block, there's nothing to do. 5937 // Make the cheap checks first if we did not promote. 5938 // If we promoted, we need to check if it is indeed profitable. 5939 if (!HasPromoted && LI->getParent() == Inst->getParent()) 5940 return false; 5941 5942 return TLI->isExtLoad(LI, Inst, *DL); 5943 } 5944 5945 /// Move a zext or sext fed by a load into the same basic block as the load, 5946 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 5947 /// extend into the load. 5948 /// 5949 /// E.g., 5950 /// \code 5951 /// %ld = load i32* %addr 5952 /// %add = add nuw i32 %ld, 4 5953 /// %zext = zext i32 %add to i64 5954 // \endcode 5955 /// => 5956 /// \code 5957 /// %ld = load i32* %addr 5958 /// %zext = zext i32 %ld to i64 5959 /// %add = add nuw i64 %zext, 4 5960 /// \encode 5961 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 5962 /// allow us to match zext(load i32*) to i64. 5963 /// 5964 /// Also, try to promote the computations used to obtain a sign extended 5965 /// value used into memory accesses. 5966 /// E.g., 5967 /// \code 5968 /// a = add nsw i32 b, 3 5969 /// d = sext i32 a to i64 5970 /// e = getelementptr ..., i64 d 5971 /// \endcode 5972 /// => 5973 /// \code 5974 /// f = sext i32 b to i64 5975 /// a = add nsw i64 f, 3 5976 /// e = getelementptr ..., i64 a 5977 /// \endcode 5978 /// 5979 /// \p Inst[in/out] the extension may be modified during the process if some 5980 /// promotions apply. 5981 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 5982 bool AllowPromotionWithoutCommonHeader = false; 5983 /// See if it is an interesting sext operations for the address type 5984 /// promotion before trying to promote it, e.g., the ones with the right 5985 /// type and used in memory accesses. 5986 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 5987 *Inst, AllowPromotionWithoutCommonHeader); 5988 TypePromotionTransaction TPT(RemovedInsts); 5989 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5990 TPT.getRestorationPoint(); 5991 SmallVector<Instruction *, 1> Exts; 5992 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 5993 Exts.push_back(Inst); 5994 5995 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 5996 5997 // Look for a load being extended. 5998 LoadInst *LI = nullptr; 5999 Instruction *ExtFedByLoad; 6000 6001 // Try to promote a chain of computation if it allows to form an extended 6002 // load. 6003 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 6004 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 6005 TPT.commit(); 6006 // Move the extend into the same block as the load. 6007 ExtFedByLoad->moveAfter(LI); 6008 ++NumExtsMoved; 6009 Inst = ExtFedByLoad; 6010 return true; 6011 } 6012 6013 // Continue promoting SExts if known as considerable depending on targets. 6014 if (ATPConsiderable && 6015 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 6016 HasPromoted, TPT, SpeculativelyMovedExts)) 6017 return true; 6018 6019 TPT.rollback(LastKnownGood); 6020 return false; 6021 } 6022 6023 // Perform address type promotion if doing so is profitable. 6024 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 6025 // instructions that sign extended the same initial value. However, if 6026 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 6027 // extension is just profitable. 6028 bool CodeGenPrepare::performAddressTypePromotion( 6029 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 6030 bool HasPromoted, TypePromotionTransaction &TPT, 6031 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 6032 bool Promoted = false; 6033 SmallPtrSet<Instruction *, 1> UnhandledExts; 6034 bool AllSeenFirst = true; 6035 for (auto *I : SpeculativelyMovedExts) { 6036 Value *HeadOfChain = I->getOperand(0); 6037 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 6038 SeenChainsForSExt.find(HeadOfChain); 6039 // If there is an unhandled SExt which has the same header, try to promote 6040 // it as well. 6041 if (AlreadySeen != SeenChainsForSExt.end()) { 6042 if (AlreadySeen->second != nullptr) 6043 UnhandledExts.insert(AlreadySeen->second); 6044 AllSeenFirst = false; 6045 } 6046 } 6047 6048 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 6049 SpeculativelyMovedExts.size() == 1)) { 6050 TPT.commit(); 6051 if (HasPromoted) 6052 Promoted = true; 6053 for (auto *I : SpeculativelyMovedExts) { 6054 Value *HeadOfChain = I->getOperand(0); 6055 SeenChainsForSExt[HeadOfChain] = nullptr; 6056 ValToSExtendedUses[HeadOfChain].push_back(I); 6057 } 6058 // Update Inst as promotion happen. 6059 Inst = SpeculativelyMovedExts.pop_back_val(); 6060 } else { 6061 // This is the first chain visited from the header, keep the current chain 6062 // as unhandled. Defer to promote this until we encounter another SExt 6063 // chain derived from the same header. 6064 for (auto *I : SpeculativelyMovedExts) { 6065 Value *HeadOfChain = I->getOperand(0); 6066 SeenChainsForSExt[HeadOfChain] = Inst; 6067 } 6068 return false; 6069 } 6070 6071 if (!AllSeenFirst && !UnhandledExts.empty()) 6072 for (auto *VisitedSExt : UnhandledExts) { 6073 if (RemovedInsts.count(VisitedSExt)) 6074 continue; 6075 TypePromotionTransaction TPT(RemovedInsts); 6076 SmallVector<Instruction *, 1> Exts; 6077 SmallVector<Instruction *, 2> Chains; 6078 Exts.push_back(VisitedSExt); 6079 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 6080 TPT.commit(); 6081 if (HasPromoted) 6082 Promoted = true; 6083 for (auto *I : Chains) { 6084 Value *HeadOfChain = I->getOperand(0); 6085 // Mark this as handled. 6086 SeenChainsForSExt[HeadOfChain] = nullptr; 6087 ValToSExtendedUses[HeadOfChain].push_back(I); 6088 } 6089 } 6090 return Promoted; 6091 } 6092 6093 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 6094 BasicBlock *DefBB = I->getParent(); 6095 6096 // If the result of a {s|z}ext and its source are both live out, rewrite all 6097 // other uses of the source with result of extension. 6098 Value *Src = I->getOperand(0); 6099 if (Src->hasOneUse()) 6100 return false; 6101 6102 // Only do this xform if truncating is free. 6103 if (!TLI->isTruncateFree(I->getType(), Src->getType())) 6104 return false; 6105 6106 // Only safe to perform the optimization if the source is also defined in 6107 // this block. 6108 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 6109 return false; 6110 6111 bool DefIsLiveOut = false; 6112 for (User *U : I->users()) { 6113 Instruction *UI = cast<Instruction>(U); 6114 6115 // Figure out which BB this ext is used in. 6116 BasicBlock *UserBB = UI->getParent(); 6117 if (UserBB == DefBB) continue; 6118 DefIsLiveOut = true; 6119 break; 6120 } 6121 if (!DefIsLiveOut) 6122 return false; 6123 6124 // Make sure none of the uses are PHI nodes. 6125 for (User *U : Src->users()) { 6126 Instruction *UI = cast<Instruction>(U); 6127 BasicBlock *UserBB = UI->getParent(); 6128 if (UserBB == DefBB) continue; 6129 // Be conservative. We don't want this xform to end up introducing 6130 // reloads just before load / store instructions. 6131 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 6132 return false; 6133 } 6134 6135 // InsertedTruncs - Only insert one trunc in each block once. 6136 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 6137 6138 bool MadeChange = false; 6139 for (Use &U : Src->uses()) { 6140 Instruction *User = cast<Instruction>(U.getUser()); 6141 6142 // Figure out which BB this ext is used in. 6143 BasicBlock *UserBB = User->getParent(); 6144 if (UserBB == DefBB) continue; 6145 6146 // Both src and def are live in this block. Rewrite the use. 6147 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 6148 6149 if (!InsertedTrunc) { 6150 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6151 assert(InsertPt != UserBB->end()); 6152 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 6153 InsertedInsts.insert(InsertedTrunc); 6154 } 6155 6156 // Replace a use of the {s|z}ext source with a use of the result. 6157 U = InsertedTrunc; 6158 ++NumExtUses; 6159 MadeChange = true; 6160 } 6161 6162 return MadeChange; 6163 } 6164 6165 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 6166 // just after the load if the target can fold this into one extload instruction, 6167 // with the hope of eliminating some of the other later "and" instructions using 6168 // the loaded value. "and"s that are made trivially redundant by the insertion 6169 // of the new "and" are removed by this function, while others (e.g. those whose 6170 // path from the load goes through a phi) are left for isel to potentially 6171 // remove. 6172 // 6173 // For example: 6174 // 6175 // b0: 6176 // x = load i32 6177 // ... 6178 // b1: 6179 // y = and x, 0xff 6180 // z = use y 6181 // 6182 // becomes: 6183 // 6184 // b0: 6185 // x = load i32 6186 // x' = and x, 0xff 6187 // ... 6188 // b1: 6189 // z = use x' 6190 // 6191 // whereas: 6192 // 6193 // b0: 6194 // x1 = load i32 6195 // ... 6196 // b1: 6197 // x2 = load i32 6198 // ... 6199 // b2: 6200 // x = phi x1, x2 6201 // y = and x, 0xff 6202 // 6203 // becomes (after a call to optimizeLoadExt for each load): 6204 // 6205 // b0: 6206 // x1 = load i32 6207 // x1' = and x1, 0xff 6208 // ... 6209 // b1: 6210 // x2 = load i32 6211 // x2' = and x2, 0xff 6212 // ... 6213 // b2: 6214 // x = phi x1', x2' 6215 // y = and x, 0xff 6216 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 6217 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 6218 return false; 6219 6220 // Skip loads we've already transformed. 6221 if (Load->hasOneUse() && 6222 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 6223 return false; 6224 6225 // Look at all uses of Load, looking through phis, to determine how many bits 6226 // of the loaded value are needed. 6227 SmallVector<Instruction *, 8> WorkList; 6228 SmallPtrSet<Instruction *, 16> Visited; 6229 SmallVector<Instruction *, 8> AndsToMaybeRemove; 6230 for (auto *U : Load->users()) 6231 WorkList.push_back(cast<Instruction>(U)); 6232 6233 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 6234 unsigned BitWidth = LoadResultVT.getSizeInBits(); 6235 APInt DemandBits(BitWidth, 0); 6236 APInt WidestAndBits(BitWidth, 0); 6237 6238 while (!WorkList.empty()) { 6239 Instruction *I = WorkList.back(); 6240 WorkList.pop_back(); 6241 6242 // Break use-def graph loops. 6243 if (!Visited.insert(I).second) 6244 continue; 6245 6246 // For a PHI node, push all of its users. 6247 if (auto *Phi = dyn_cast<PHINode>(I)) { 6248 for (auto *U : Phi->users()) 6249 WorkList.push_back(cast<Instruction>(U)); 6250 continue; 6251 } 6252 6253 switch (I->getOpcode()) { 6254 case Instruction::And: { 6255 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 6256 if (!AndC) 6257 return false; 6258 APInt AndBits = AndC->getValue(); 6259 DemandBits |= AndBits; 6260 // Keep track of the widest and mask we see. 6261 if (AndBits.ugt(WidestAndBits)) 6262 WidestAndBits = AndBits; 6263 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 6264 AndsToMaybeRemove.push_back(I); 6265 break; 6266 } 6267 6268 case Instruction::Shl: { 6269 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 6270 if (!ShlC) 6271 return false; 6272 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 6273 DemandBits.setLowBits(BitWidth - ShiftAmt); 6274 break; 6275 } 6276 6277 case Instruction::Trunc: { 6278 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 6279 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 6280 DemandBits.setLowBits(TruncBitWidth); 6281 break; 6282 } 6283 6284 default: 6285 return false; 6286 } 6287 } 6288 6289 uint32_t ActiveBits = DemandBits.getActiveBits(); 6290 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 6291 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 6292 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 6293 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 6294 // followed by an AND. 6295 // TODO: Look into removing this restriction by fixing backends to either 6296 // return false for isLoadExtLegal for i1 or have them select this pattern to 6297 // a single instruction. 6298 // 6299 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 6300 // mask, since these are the only ands that will be removed by isel. 6301 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 6302 WidestAndBits != DemandBits) 6303 return false; 6304 6305 LLVMContext &Ctx = Load->getType()->getContext(); 6306 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 6307 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 6308 6309 // Reject cases that won't be matched as extloads. 6310 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 6311 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 6312 return false; 6313 6314 IRBuilder<> Builder(Load->getNextNode()); 6315 auto *NewAnd = cast<Instruction>( 6316 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 6317 // Mark this instruction as "inserted by CGP", so that other 6318 // optimizations don't touch it. 6319 InsertedInsts.insert(NewAnd); 6320 6321 // Replace all uses of load with new and (except for the use of load in the 6322 // new and itself). 6323 Load->replaceAllUsesWith(NewAnd); 6324 NewAnd->setOperand(0, Load); 6325 6326 // Remove any and instructions that are now redundant. 6327 for (auto *And : AndsToMaybeRemove) 6328 // Check that the and mask is the same as the one we decided to put on the 6329 // new and. 6330 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 6331 And->replaceAllUsesWith(NewAnd); 6332 if (&*CurInstIterator == And) 6333 CurInstIterator = std::next(And->getIterator()); 6334 And->eraseFromParent(); 6335 ++NumAndUses; 6336 } 6337 6338 ++NumAndsAdded; 6339 return true; 6340 } 6341 6342 /// Check if V (an operand of a select instruction) is an expensive instruction 6343 /// that is only used once. 6344 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 6345 auto *I = dyn_cast<Instruction>(V); 6346 // If it's safe to speculatively execute, then it should not have side 6347 // effects; therefore, it's safe to sink and possibly *not* execute. 6348 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 6349 TTI->getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency) >= 6350 TargetTransformInfo::TCC_Expensive; 6351 } 6352 6353 /// Returns true if a SelectInst should be turned into an explicit branch. 6354 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 6355 const TargetLowering *TLI, 6356 SelectInst *SI) { 6357 // If even a predictable select is cheap, then a branch can't be cheaper. 6358 if (!TLI->isPredictableSelectExpensive()) 6359 return false; 6360 6361 // FIXME: This should use the same heuristics as IfConversion to determine 6362 // whether a select is better represented as a branch. 6363 6364 // If metadata tells us that the select condition is obviously predictable, 6365 // then we want to replace the select with a branch. 6366 uint64_t TrueWeight, FalseWeight; 6367 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 6368 uint64_t Max = std::max(TrueWeight, FalseWeight); 6369 uint64_t Sum = TrueWeight + FalseWeight; 6370 if (Sum != 0) { 6371 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 6372 if (Probability > TLI->getPredictableBranchThreshold()) 6373 return true; 6374 } 6375 } 6376 6377 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 6378 6379 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 6380 // comparison condition. If the compare has more than one use, there's 6381 // probably another cmov or setcc around, so it's not worth emitting a branch. 6382 if (!Cmp || !Cmp->hasOneUse()) 6383 return false; 6384 6385 // If either operand of the select is expensive and only needed on one side 6386 // of the select, we should form a branch. 6387 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 6388 sinkSelectOperand(TTI, SI->getFalseValue())) 6389 return true; 6390 6391 return false; 6392 } 6393 6394 /// If \p isTrue is true, return the true value of \p SI, otherwise return 6395 /// false value of \p SI. If the true/false value of \p SI is defined by any 6396 /// select instructions in \p Selects, look through the defining select 6397 /// instruction until the true/false value is not defined in \p Selects. 6398 static Value *getTrueOrFalseValue( 6399 SelectInst *SI, bool isTrue, 6400 const SmallPtrSet<const Instruction *, 2> &Selects) { 6401 Value *V = nullptr; 6402 6403 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 6404 DefSI = dyn_cast<SelectInst>(V)) { 6405 assert(DefSI->getCondition() == SI->getCondition() && 6406 "The condition of DefSI does not match with SI"); 6407 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 6408 } 6409 6410 assert(V && "Failed to get select true/false value"); 6411 return V; 6412 } 6413 6414 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { 6415 assert(Shift->isShift() && "Expected a shift"); 6416 6417 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than 6418 // general vector shifts, and (3) the shift amount is a select-of-splatted 6419 // values, hoist the shifts before the select: 6420 // shift Op0, (select Cond, TVal, FVal) --> 6421 // select Cond, (shift Op0, TVal), (shift Op0, FVal) 6422 // 6423 // This is inverting a generic IR transform when we know that the cost of a 6424 // general vector shift is more than the cost of 2 shift-by-scalars. 6425 // We can't do this effectively in SDAG because we may not be able to 6426 // determine if the select operands are splats from within a basic block. 6427 Type *Ty = Shift->getType(); 6428 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6429 return false; 6430 Value *Cond, *TVal, *FVal; 6431 if (!match(Shift->getOperand(1), 6432 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6433 return false; 6434 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6435 return false; 6436 6437 IRBuilder<> Builder(Shift); 6438 BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); 6439 Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal); 6440 Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal); 6441 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6442 Shift->replaceAllUsesWith(NewSel); 6443 Shift->eraseFromParent(); 6444 return true; 6445 } 6446 6447 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { 6448 Intrinsic::ID Opcode = Fsh->getIntrinsicID(); 6449 assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && 6450 "Expected a funnel shift"); 6451 6452 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper 6453 // than general vector shifts, and (3) the shift amount is select-of-splatted 6454 // values, hoist the funnel shifts before the select: 6455 // fsh Op0, Op1, (select Cond, TVal, FVal) --> 6456 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) 6457 // 6458 // This is inverting a generic IR transform when we know that the cost of a 6459 // general vector shift is more than the cost of 2 shift-by-scalars. 6460 // We can't do this effectively in SDAG because we may not be able to 6461 // determine if the select operands are splats from within a basic block. 6462 Type *Ty = Fsh->getType(); 6463 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6464 return false; 6465 Value *Cond, *TVal, *FVal; 6466 if (!match(Fsh->getOperand(2), 6467 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6468 return false; 6469 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6470 return false; 6471 6472 IRBuilder<> Builder(Fsh); 6473 Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1); 6474 Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, TVal }); 6475 Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, FVal }); 6476 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6477 Fsh->replaceAllUsesWith(NewSel); 6478 Fsh->eraseFromParent(); 6479 return true; 6480 } 6481 6482 /// If we have a SelectInst that will likely profit from branch prediction, 6483 /// turn it into a branch. 6484 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 6485 // If branch conversion isn't desirable, exit early. 6486 if (DisableSelectToBranch || OptSize || 6487 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())) 6488 return false; 6489 6490 // Find all consecutive select instructions that share the same condition. 6491 SmallVector<SelectInst *, 2> ASI; 6492 ASI.push_back(SI); 6493 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 6494 It != SI->getParent()->end(); ++It) { 6495 SelectInst *I = dyn_cast<SelectInst>(&*It); 6496 if (I && SI->getCondition() == I->getCondition()) { 6497 ASI.push_back(I); 6498 } else { 6499 break; 6500 } 6501 } 6502 6503 SelectInst *LastSI = ASI.back(); 6504 // Increment the current iterator to skip all the rest of select instructions 6505 // because they will be either "not lowered" or "all lowered" to branch. 6506 CurInstIterator = std::next(LastSI->getIterator()); 6507 6508 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 6509 6510 // Can we convert the 'select' to CF ? 6511 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 6512 return false; 6513 6514 TargetLowering::SelectSupportKind SelectKind; 6515 if (VectorCond) 6516 SelectKind = TargetLowering::VectorMaskSelect; 6517 else if (SI->getType()->isVectorTy()) 6518 SelectKind = TargetLowering::ScalarCondVectorVal; 6519 else 6520 SelectKind = TargetLowering::ScalarValSelect; 6521 6522 if (TLI->isSelectSupported(SelectKind) && 6523 !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) 6524 return false; 6525 6526 // The DominatorTree needs to be rebuilt by any consumers after this 6527 // transformation. We simply reset here rather than setting the ModifiedDT 6528 // flag to avoid restarting the function walk in runOnFunction for each 6529 // select optimized. 6530 DT.reset(); 6531 6532 // Transform a sequence like this: 6533 // start: 6534 // %cmp = cmp uge i32 %a, %b 6535 // %sel = select i1 %cmp, i32 %c, i32 %d 6536 // 6537 // Into: 6538 // start: 6539 // %cmp = cmp uge i32 %a, %b 6540 // %cmp.frozen = freeze %cmp 6541 // br i1 %cmp.frozen, label %select.true, label %select.false 6542 // select.true: 6543 // br label %select.end 6544 // select.false: 6545 // br label %select.end 6546 // select.end: 6547 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 6548 // 6549 // %cmp should be frozen, otherwise it may introduce undefined behavior. 6550 // In addition, we may sink instructions that produce %c or %d from 6551 // the entry block into the destination(s) of the new branch. 6552 // If the true or false blocks do not contain a sunken instruction, that 6553 // block and its branch may be optimized away. In that case, one side of the 6554 // first branch will point directly to select.end, and the corresponding PHI 6555 // predecessor block will be the start block. 6556 6557 // First, we split the block containing the select into 2 blocks. 6558 BasicBlock *StartBlock = SI->getParent(); 6559 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 6560 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 6561 BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency()); 6562 6563 // Delete the unconditional branch that was just created by the split. 6564 StartBlock->getTerminator()->eraseFromParent(); 6565 6566 // These are the new basic blocks for the conditional branch. 6567 // At least one will become an actual new basic block. 6568 BasicBlock *TrueBlock = nullptr; 6569 BasicBlock *FalseBlock = nullptr; 6570 BranchInst *TrueBranch = nullptr; 6571 BranchInst *FalseBranch = nullptr; 6572 6573 // Sink expensive instructions into the conditional blocks to avoid executing 6574 // them speculatively. 6575 for (SelectInst *SI : ASI) { 6576 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 6577 if (TrueBlock == nullptr) { 6578 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 6579 EndBlock->getParent(), EndBlock); 6580 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 6581 TrueBranch->setDebugLoc(SI->getDebugLoc()); 6582 } 6583 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 6584 TrueInst->moveBefore(TrueBranch); 6585 } 6586 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 6587 if (FalseBlock == nullptr) { 6588 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 6589 EndBlock->getParent(), EndBlock); 6590 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6591 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6592 } 6593 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 6594 FalseInst->moveBefore(FalseBranch); 6595 } 6596 } 6597 6598 // If there was nothing to sink, then arbitrarily choose the 'false' side 6599 // for a new input value to the PHI. 6600 if (TrueBlock == FalseBlock) { 6601 assert(TrueBlock == nullptr && 6602 "Unexpected basic block transform while optimizing select"); 6603 6604 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 6605 EndBlock->getParent(), EndBlock); 6606 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6607 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6608 } 6609 6610 // Insert the real conditional branch based on the original condition. 6611 // If we did not create a new block for one of the 'true' or 'false' paths 6612 // of the condition, it means that side of the branch goes to the end block 6613 // directly and the path originates from the start block from the point of 6614 // view of the new PHI. 6615 BasicBlock *TT, *FT; 6616 if (TrueBlock == nullptr) { 6617 TT = EndBlock; 6618 FT = FalseBlock; 6619 TrueBlock = StartBlock; 6620 } else if (FalseBlock == nullptr) { 6621 TT = TrueBlock; 6622 FT = EndBlock; 6623 FalseBlock = StartBlock; 6624 } else { 6625 TT = TrueBlock; 6626 FT = FalseBlock; 6627 } 6628 IRBuilder<> IB(SI); 6629 auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen"); 6630 IB.CreateCondBr(CondFr, TT, FT, SI); 6631 6632 SmallPtrSet<const Instruction *, 2> INS; 6633 INS.insert(ASI.begin(), ASI.end()); 6634 // Use reverse iterator because later select may use the value of the 6635 // earlier select, and we need to propagate value through earlier select 6636 // to get the PHI operand. 6637 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 6638 SelectInst *SI = *It; 6639 // The select itself is replaced with a PHI Node. 6640 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 6641 PN->takeName(SI); 6642 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 6643 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 6644 PN->setDebugLoc(SI->getDebugLoc()); 6645 6646 SI->replaceAllUsesWith(PN); 6647 SI->eraseFromParent(); 6648 INS.erase(SI); 6649 ++NumSelectsExpanded; 6650 } 6651 6652 // Instruct OptimizeBlock to skip to the next block. 6653 CurInstIterator = StartBlock->end(); 6654 return true; 6655 } 6656 6657 /// Some targets only accept certain types for splat inputs. For example a VDUP 6658 /// in MVE takes a GPR (integer) register, and the instruction that incorporate 6659 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. 6660 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6661 if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 6662 m_Undef(), m_ZeroMask()))) 6663 return false; 6664 Type *NewType = TLI->shouldConvertSplatType(SVI); 6665 if (!NewType) 6666 return false; 6667 6668 auto *SVIVecType = cast<FixedVectorType>(SVI->getType()); 6669 assert(!NewType->isVectorTy() && "Expected a scalar type!"); 6670 assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && 6671 "Expected a type of the same size!"); 6672 auto *NewVecType = 6673 FixedVectorType::get(NewType, SVIVecType->getNumElements()); 6674 6675 // Create a bitcast (shuffle (insert (bitcast(..)))) 6676 IRBuilder<> Builder(SVI->getContext()); 6677 Builder.SetInsertPoint(SVI); 6678 Value *BC1 = Builder.CreateBitCast( 6679 cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType); 6680 Value *Insert = Builder.CreateInsertElement(UndefValue::get(NewVecType), BC1, 6681 (uint64_t)0); 6682 Value *Shuffle = Builder.CreateShuffleVector( 6683 Insert, UndefValue::get(NewVecType), SVI->getShuffleMask()); 6684 Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType); 6685 6686 SVI->replaceAllUsesWith(BC2); 6687 RecursivelyDeleteTriviallyDeadInstructions( 6688 SVI, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 6689 6690 // Also hoist the bitcast up to its operand if it they are not in the same 6691 // block. 6692 if (auto *BCI = dyn_cast<Instruction>(BC1)) 6693 if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0))) 6694 if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) && 6695 !Op->isTerminator() && !Op->isEHPad()) 6696 BCI->moveAfter(Op); 6697 6698 return true; 6699 } 6700 6701 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6702 // If the operands of I can be folded into a target instruction together with 6703 // I, duplicate and sink them. 6704 SmallVector<Use *, 4> OpsToSink; 6705 if (!TLI->shouldSinkOperands(I, OpsToSink)) 6706 return false; 6707 6708 // OpsToSink can contain multiple uses in a use chain (e.g. 6709 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6710 // uses must come first, so we process the ops in reverse order so as to not 6711 // create invalid IR. 6712 BasicBlock *TargetBB = I->getParent(); 6713 bool Changed = false; 6714 SmallVector<Use *, 4> ToReplace; 6715 for (Use *U : reverse(OpsToSink)) { 6716 auto *UI = cast<Instruction>(U->get()); 6717 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6718 continue; 6719 ToReplace.push_back(U); 6720 } 6721 6722 SetVector<Instruction *> MaybeDead; 6723 DenseMap<Instruction *, Instruction *> NewInstructions; 6724 Instruction *InsertPoint = I; 6725 for (Use *U : ToReplace) { 6726 auto *UI = cast<Instruction>(U->get()); 6727 Instruction *NI = UI->clone(); 6728 NewInstructions[UI] = NI; 6729 MaybeDead.insert(UI); 6730 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6731 NI->insertBefore(InsertPoint); 6732 InsertPoint = NI; 6733 InsertedInsts.insert(NI); 6734 6735 // Update the use for the new instruction, making sure that we update the 6736 // sunk instruction uses, if it is part of a chain that has already been 6737 // sunk. 6738 Instruction *OldI = cast<Instruction>(U->getUser()); 6739 if (NewInstructions.count(OldI)) 6740 NewInstructions[OldI]->setOperand(U->getOperandNo(), NI); 6741 else 6742 U->set(NI); 6743 Changed = true; 6744 } 6745 6746 // Remove instructions that are dead after sinking. 6747 for (auto *I : MaybeDead) { 6748 if (!I->hasNUsesOrMore(1)) { 6749 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n"); 6750 I->eraseFromParent(); 6751 } 6752 } 6753 6754 return Changed; 6755 } 6756 6757 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6758 Value *Cond = SI->getCondition(); 6759 Type *OldType = Cond->getType(); 6760 LLVMContext &Context = Cond->getContext(); 6761 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6762 unsigned RegWidth = RegType.getSizeInBits(); 6763 6764 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6765 return false; 6766 6767 // If the register width is greater than the type width, expand the condition 6768 // of the switch instruction and each case constant to the width of the 6769 // register. By widening the type of the switch condition, subsequent 6770 // comparisons (for case comparisons) will not need to be extended to the 6771 // preferred register width, so we will potentially eliminate N-1 extends, 6772 // where N is the number of cases in the switch. 6773 auto *NewType = Type::getIntNTy(Context, RegWidth); 6774 6775 // Zero-extend the switch condition and case constants unless the switch 6776 // condition is a function argument that is already being sign-extended. 6777 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6778 // everything instead. 6779 Instruction::CastOps ExtType = Instruction::ZExt; 6780 if (auto *Arg = dyn_cast<Argument>(Cond)) 6781 if (Arg->hasSExtAttr()) 6782 ExtType = Instruction::SExt; 6783 6784 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6785 ExtInst->insertBefore(SI); 6786 ExtInst->setDebugLoc(SI->getDebugLoc()); 6787 SI->setCondition(ExtInst); 6788 for (auto Case : SI->cases()) { 6789 APInt NarrowConst = Case.getCaseValue()->getValue(); 6790 APInt WideConst = (ExtType == Instruction::ZExt) ? 6791 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6792 Case.setValue(ConstantInt::get(Context, WideConst)); 6793 } 6794 6795 return true; 6796 } 6797 6798 6799 namespace { 6800 6801 /// Helper class to promote a scalar operation to a vector one. 6802 /// This class is used to move downward extractelement transition. 6803 /// E.g., 6804 /// a = vector_op <2 x i32> 6805 /// b = extractelement <2 x i32> a, i32 0 6806 /// c = scalar_op b 6807 /// store c 6808 /// 6809 /// => 6810 /// a = vector_op <2 x i32> 6811 /// c = vector_op a (equivalent to scalar_op on the related lane) 6812 /// * d = extractelement <2 x i32> c, i32 0 6813 /// * store d 6814 /// Assuming both extractelement and store can be combine, we get rid of the 6815 /// transition. 6816 class VectorPromoteHelper { 6817 /// DataLayout associated with the current module. 6818 const DataLayout &DL; 6819 6820 /// Used to perform some checks on the legality of vector operations. 6821 const TargetLowering &TLI; 6822 6823 /// Used to estimated the cost of the promoted chain. 6824 const TargetTransformInfo &TTI; 6825 6826 /// The transition being moved downwards. 6827 Instruction *Transition; 6828 6829 /// The sequence of instructions to be promoted. 6830 SmallVector<Instruction *, 4> InstsToBePromoted; 6831 6832 /// Cost of combining a store and an extract. 6833 unsigned StoreExtractCombineCost; 6834 6835 /// Instruction that will be combined with the transition. 6836 Instruction *CombineInst = nullptr; 6837 6838 /// The instruction that represents the current end of the transition. 6839 /// Since we are faking the promotion until we reach the end of the chain 6840 /// of computation, we need a way to get the current end of the transition. 6841 Instruction *getEndOfTransition() const { 6842 if (InstsToBePromoted.empty()) 6843 return Transition; 6844 return InstsToBePromoted.back(); 6845 } 6846 6847 /// Return the index of the original value in the transition. 6848 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 6849 /// c, is at index 0. 6850 unsigned getTransitionOriginalValueIdx() const { 6851 assert(isa<ExtractElementInst>(Transition) && 6852 "Other kind of transitions are not supported yet"); 6853 return 0; 6854 } 6855 6856 /// Return the index of the index in the transition. 6857 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 6858 /// is at index 1. 6859 unsigned getTransitionIdx() const { 6860 assert(isa<ExtractElementInst>(Transition) && 6861 "Other kind of transitions are not supported yet"); 6862 return 1; 6863 } 6864 6865 /// Get the type of the transition. 6866 /// This is the type of the original value. 6867 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 6868 /// transition is <2 x i32>. 6869 Type *getTransitionType() const { 6870 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 6871 } 6872 6873 /// Promote \p ToBePromoted by moving \p Def downward through. 6874 /// I.e., we have the following sequence: 6875 /// Def = Transition <ty1> a to <ty2> 6876 /// b = ToBePromoted <ty2> Def, ... 6877 /// => 6878 /// b = ToBePromoted <ty1> a, ... 6879 /// Def = Transition <ty1> ToBePromoted to <ty2> 6880 void promoteImpl(Instruction *ToBePromoted); 6881 6882 /// Check whether or not it is profitable to promote all the 6883 /// instructions enqueued to be promoted. 6884 bool isProfitableToPromote() { 6885 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 6886 unsigned Index = isa<ConstantInt>(ValIdx) 6887 ? cast<ConstantInt>(ValIdx)->getZExtValue() 6888 : -1; 6889 Type *PromotedType = getTransitionType(); 6890 6891 StoreInst *ST = cast<StoreInst>(CombineInst); 6892 unsigned AS = ST->getPointerAddressSpace(); 6893 unsigned Align = ST->getAlignment(); 6894 // Check if this store is supported. 6895 if (!TLI.allowsMisalignedMemoryAccesses( 6896 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 6897 Align)) { 6898 // If this is not supported, there is no way we can combine 6899 // the extract with the store. 6900 return false; 6901 } 6902 6903 // The scalar chain of computation has to pay for the transition 6904 // scalar to vector. 6905 // The vector chain has to account for the combining cost. 6906 uint64_t ScalarCost = 6907 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 6908 uint64_t VectorCost = StoreExtractCombineCost; 6909 enum TargetTransformInfo::TargetCostKind CostKind = 6910 TargetTransformInfo::TCK_RecipThroughput; 6911 for (const auto &Inst : InstsToBePromoted) { 6912 // Compute the cost. 6913 // By construction, all instructions being promoted are arithmetic ones. 6914 // Moreover, one argument is a constant that can be viewed as a splat 6915 // constant. 6916 Value *Arg0 = Inst->getOperand(0); 6917 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 6918 isa<ConstantFP>(Arg0); 6919 TargetTransformInfo::OperandValueKind Arg0OVK = 6920 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6921 : TargetTransformInfo::OK_AnyValue; 6922 TargetTransformInfo::OperandValueKind Arg1OVK = 6923 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 6924 : TargetTransformInfo::OK_AnyValue; 6925 ScalarCost += TTI.getArithmeticInstrCost( 6926 Inst->getOpcode(), Inst->getType(), CostKind, Arg0OVK, Arg1OVK); 6927 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 6928 CostKind, 6929 Arg0OVK, Arg1OVK); 6930 } 6931 LLVM_DEBUG( 6932 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 6933 << ScalarCost << "\nVector: " << VectorCost << '\n'); 6934 return ScalarCost > VectorCost; 6935 } 6936 6937 /// Generate a constant vector with \p Val with the same 6938 /// number of elements as the transition. 6939 /// \p UseSplat defines whether or not \p Val should be replicated 6940 /// across the whole vector. 6941 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 6942 /// otherwise we generate a vector with as many undef as possible: 6943 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 6944 /// used at the index of the extract. 6945 Value *getConstantVector(Constant *Val, bool UseSplat) const { 6946 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 6947 if (!UseSplat) { 6948 // If we cannot determine where the constant must be, we have to 6949 // use a splat constant. 6950 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 6951 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 6952 ExtractIdx = CstVal->getSExtValue(); 6953 else 6954 UseSplat = true; 6955 } 6956 6957 ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount(); 6958 if (UseSplat) 6959 return ConstantVector::getSplat(EC, Val); 6960 6961 if (!EC.Scalable) { 6962 SmallVector<Constant *, 4> ConstVec; 6963 UndefValue *UndefVal = UndefValue::get(Val->getType()); 6964 for (unsigned Idx = 0; Idx != EC.Min; ++Idx) { 6965 if (Idx == ExtractIdx) 6966 ConstVec.push_back(Val); 6967 else 6968 ConstVec.push_back(UndefVal); 6969 } 6970 return ConstantVector::get(ConstVec); 6971 } else 6972 llvm_unreachable( 6973 "Generate scalable vector for non-splat is unimplemented"); 6974 } 6975 6976 /// Check if promoting to a vector type an operand at \p OperandIdx 6977 /// in \p Use can trigger undefined behavior. 6978 static bool canCauseUndefinedBehavior(const Instruction *Use, 6979 unsigned OperandIdx) { 6980 // This is not safe to introduce undef when the operand is on 6981 // the right hand side of a division-like instruction. 6982 if (OperandIdx != 1) 6983 return false; 6984 switch (Use->getOpcode()) { 6985 default: 6986 return false; 6987 case Instruction::SDiv: 6988 case Instruction::UDiv: 6989 case Instruction::SRem: 6990 case Instruction::URem: 6991 return true; 6992 case Instruction::FDiv: 6993 case Instruction::FRem: 6994 return !Use->hasNoNaNs(); 6995 } 6996 llvm_unreachable(nullptr); 6997 } 6998 6999 public: 7000 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 7001 const TargetTransformInfo &TTI, Instruction *Transition, 7002 unsigned CombineCost) 7003 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 7004 StoreExtractCombineCost(CombineCost) { 7005 assert(Transition && "Do not know how to promote null"); 7006 } 7007 7008 /// Check if we can promote \p ToBePromoted to \p Type. 7009 bool canPromote(const Instruction *ToBePromoted) const { 7010 // We could support CastInst too. 7011 return isa<BinaryOperator>(ToBePromoted); 7012 } 7013 7014 /// Check if it is profitable to promote \p ToBePromoted 7015 /// by moving downward the transition through. 7016 bool shouldPromote(const Instruction *ToBePromoted) const { 7017 // Promote only if all the operands can be statically expanded. 7018 // Indeed, we do not want to introduce any new kind of transitions. 7019 for (const Use &U : ToBePromoted->operands()) { 7020 const Value *Val = U.get(); 7021 if (Val == getEndOfTransition()) { 7022 // If the use is a division and the transition is on the rhs, 7023 // we cannot promote the operation, otherwise we may create a 7024 // division by zero. 7025 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 7026 return false; 7027 continue; 7028 } 7029 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 7030 !isa<ConstantFP>(Val)) 7031 return false; 7032 } 7033 // Check that the resulting operation is legal. 7034 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 7035 if (!ISDOpcode) 7036 return false; 7037 return StressStoreExtract || 7038 TLI.isOperationLegalOrCustom( 7039 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 7040 } 7041 7042 /// Check whether or not \p Use can be combined 7043 /// with the transition. 7044 /// I.e., is it possible to do Use(Transition) => AnotherUse? 7045 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 7046 7047 /// Record \p ToBePromoted as part of the chain to be promoted. 7048 void enqueueForPromotion(Instruction *ToBePromoted) { 7049 InstsToBePromoted.push_back(ToBePromoted); 7050 } 7051 7052 /// Set the instruction that will be combined with the transition. 7053 void recordCombineInstruction(Instruction *ToBeCombined) { 7054 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 7055 CombineInst = ToBeCombined; 7056 } 7057 7058 /// Promote all the instructions enqueued for promotion if it is 7059 /// is profitable. 7060 /// \return True if the promotion happened, false otherwise. 7061 bool promote() { 7062 // Check if there is something to promote. 7063 // Right now, if we do not have anything to combine with, 7064 // we assume the promotion is not profitable. 7065 if (InstsToBePromoted.empty() || !CombineInst) 7066 return false; 7067 7068 // Check cost. 7069 if (!StressStoreExtract && !isProfitableToPromote()) 7070 return false; 7071 7072 // Promote. 7073 for (auto &ToBePromoted : InstsToBePromoted) 7074 promoteImpl(ToBePromoted); 7075 InstsToBePromoted.clear(); 7076 return true; 7077 } 7078 }; 7079 7080 } // end anonymous namespace 7081 7082 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 7083 // At this point, we know that all the operands of ToBePromoted but Def 7084 // can be statically promoted. 7085 // For Def, we need to use its parameter in ToBePromoted: 7086 // b = ToBePromoted ty1 a 7087 // Def = Transition ty1 b to ty2 7088 // Move the transition down. 7089 // 1. Replace all uses of the promoted operation by the transition. 7090 // = ... b => = ... Def. 7091 assert(ToBePromoted->getType() == Transition->getType() && 7092 "The type of the result of the transition does not match " 7093 "the final type"); 7094 ToBePromoted->replaceAllUsesWith(Transition); 7095 // 2. Update the type of the uses. 7096 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 7097 Type *TransitionTy = getTransitionType(); 7098 ToBePromoted->mutateType(TransitionTy); 7099 // 3. Update all the operands of the promoted operation with promoted 7100 // operands. 7101 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 7102 for (Use &U : ToBePromoted->operands()) { 7103 Value *Val = U.get(); 7104 Value *NewVal = nullptr; 7105 if (Val == Transition) 7106 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 7107 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 7108 isa<ConstantFP>(Val)) { 7109 // Use a splat constant if it is not safe to use undef. 7110 NewVal = getConstantVector( 7111 cast<Constant>(Val), 7112 isa<UndefValue>(Val) || 7113 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 7114 } else 7115 llvm_unreachable("Did you modified shouldPromote and forgot to update " 7116 "this?"); 7117 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 7118 } 7119 Transition->moveAfter(ToBePromoted); 7120 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 7121 } 7122 7123 /// Some targets can do store(extractelement) with one instruction. 7124 /// Try to push the extractelement towards the stores when the target 7125 /// has this feature and this is profitable. 7126 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 7127 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 7128 if (DisableStoreExtract || 7129 (!StressStoreExtract && 7130 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 7131 Inst->getOperand(1), CombineCost))) 7132 return false; 7133 7134 // At this point we know that Inst is a vector to scalar transition. 7135 // Try to move it down the def-use chain, until: 7136 // - We can combine the transition with its single use 7137 // => we got rid of the transition. 7138 // - We escape the current basic block 7139 // => we would need to check that we are moving it at a cheaper place and 7140 // we do not do that for now. 7141 BasicBlock *Parent = Inst->getParent(); 7142 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 7143 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 7144 // If the transition has more than one use, assume this is not going to be 7145 // beneficial. 7146 while (Inst->hasOneUse()) { 7147 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 7148 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 7149 7150 if (ToBePromoted->getParent() != Parent) { 7151 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 7152 << ToBePromoted->getParent()->getName() 7153 << ") than the transition (" << Parent->getName() 7154 << ").\n"); 7155 return false; 7156 } 7157 7158 if (VPH.canCombine(ToBePromoted)) { 7159 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 7160 << "will be combined with: " << *ToBePromoted << '\n'); 7161 VPH.recordCombineInstruction(ToBePromoted); 7162 bool Changed = VPH.promote(); 7163 NumStoreExtractExposed += Changed; 7164 return Changed; 7165 } 7166 7167 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 7168 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 7169 return false; 7170 7171 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 7172 7173 VPH.enqueueForPromotion(ToBePromoted); 7174 Inst = ToBePromoted; 7175 } 7176 return false; 7177 } 7178 7179 /// For the instruction sequence of store below, F and I values 7180 /// are bundled together as an i64 value before being stored into memory. 7181 /// Sometimes it is more efficient to generate separate stores for F and I, 7182 /// which can remove the bitwise instructions or sink them to colder places. 7183 /// 7184 /// (store (or (zext (bitcast F to i32) to i64), 7185 /// (shl (zext I to i64), 32)), addr) --> 7186 /// (store F, addr) and (store I, addr+4) 7187 /// 7188 /// Similarly, splitting for other merged store can also be beneficial, like: 7189 /// For pair of {i32, i32}, i64 store --> two i32 stores. 7190 /// For pair of {i32, i16}, i64 store --> two i32 stores. 7191 /// For pair of {i16, i16}, i32 store --> two i16 stores. 7192 /// For pair of {i16, i8}, i32 store --> two i16 stores. 7193 /// For pair of {i8, i8}, i16 store --> two i8 stores. 7194 /// 7195 /// We allow each target to determine specifically which kind of splitting is 7196 /// supported. 7197 /// 7198 /// The store patterns are commonly seen from the simple code snippet below 7199 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 7200 /// void goo(const std::pair<int, float> &); 7201 /// hoo() { 7202 /// ... 7203 /// goo(std::make_pair(tmp, ftmp)); 7204 /// ... 7205 /// } 7206 /// 7207 /// Although we already have similar splitting in DAG Combine, we duplicate 7208 /// it in CodeGenPrepare to catch the case in which pattern is across 7209 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 7210 /// during code expansion. 7211 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 7212 const TargetLowering &TLI) { 7213 // Handle simple but common cases only. 7214 Type *StoreType = SI.getValueOperand()->getType(); 7215 7216 // The code below assumes shifting a value by <number of bits>, 7217 // whereas scalable vectors would have to be shifted by 7218 // <2log(vscale) + number of bits> in order to store the 7219 // low/high parts. Bailing out for now. 7220 if (isa<ScalableVectorType>(StoreType)) 7221 return false; 7222 7223 if (!DL.typeSizeEqualsStoreSize(StoreType) || 7224 DL.getTypeSizeInBits(StoreType) == 0) 7225 return false; 7226 7227 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 7228 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 7229 if (!DL.typeSizeEqualsStoreSize(SplitStoreType)) 7230 return false; 7231 7232 // Don't split the store if it is volatile. 7233 if (SI.isVolatile()) 7234 return false; 7235 7236 // Match the following patterns: 7237 // (store (or (zext LValue to i64), 7238 // (shl (zext HValue to i64), 32)), HalfValBitSize) 7239 // or 7240 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 7241 // (zext LValue to i64), 7242 // Expect both operands of OR and the first operand of SHL have only 7243 // one use. 7244 Value *LValue, *HValue; 7245 if (!match(SI.getValueOperand(), 7246 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 7247 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 7248 m_SpecificInt(HalfValBitSize)))))) 7249 return false; 7250 7251 // Check LValue and HValue are int with size less or equal than 32. 7252 if (!LValue->getType()->isIntegerTy() || 7253 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 7254 !HValue->getType()->isIntegerTy() || 7255 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 7256 return false; 7257 7258 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 7259 // as the input of target query. 7260 auto *LBC = dyn_cast<BitCastInst>(LValue); 7261 auto *HBC = dyn_cast<BitCastInst>(HValue); 7262 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 7263 : EVT::getEVT(LValue->getType()); 7264 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 7265 : EVT::getEVT(HValue->getType()); 7266 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 7267 return false; 7268 7269 // Start to split store. 7270 IRBuilder<> Builder(SI.getContext()); 7271 Builder.SetInsertPoint(&SI); 7272 7273 // If LValue/HValue is a bitcast in another BB, create a new one in current 7274 // BB so it may be merged with the splitted stores by dag combiner. 7275 if (LBC && LBC->getParent() != SI.getParent()) 7276 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 7277 if (HBC && HBC->getParent() != SI.getParent()) 7278 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 7279 7280 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 7281 auto CreateSplitStore = [&](Value *V, bool Upper) { 7282 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 7283 Value *Addr = Builder.CreateBitCast( 7284 SI.getOperand(1), 7285 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 7286 Align Alignment = SI.getAlign(); 7287 const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); 7288 if (IsOffsetStore) { 7289 Addr = Builder.CreateGEP( 7290 SplitStoreType, Addr, 7291 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 7292 7293 // When splitting the store in half, naturally one half will retain the 7294 // alignment of the original wider store, regardless of whether it was 7295 // over-aligned or not, while the other will require adjustment. 7296 Alignment = commonAlignment(Alignment, HalfValBitSize / 8); 7297 } 7298 Builder.CreateAlignedStore(V, Addr, Alignment); 7299 }; 7300 7301 CreateSplitStore(LValue, false); 7302 CreateSplitStore(HValue, true); 7303 7304 // Delete the old store. 7305 SI.eraseFromParent(); 7306 return true; 7307 } 7308 7309 // Return true if the GEP has two operands, the first operand is of a sequential 7310 // type, and the second operand is a constant. 7311 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 7312 gep_type_iterator I = gep_type_begin(*GEP); 7313 return GEP->getNumOperands() == 2 && 7314 I.isSequential() && 7315 isa<ConstantInt>(GEP->getOperand(1)); 7316 } 7317 7318 // Try unmerging GEPs to reduce liveness interference (register pressure) across 7319 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 7320 // reducing liveness interference across those edges benefits global register 7321 // allocation. Currently handles only certain cases. 7322 // 7323 // For example, unmerge %GEPI and %UGEPI as below. 7324 // 7325 // ---------- BEFORE ---------- 7326 // SrcBlock: 7327 // ... 7328 // %GEPIOp = ... 7329 // ... 7330 // %GEPI = gep %GEPIOp, Idx 7331 // ... 7332 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 7333 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 7334 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 7335 // %UGEPI) 7336 // 7337 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 7338 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 7339 // ... 7340 // 7341 // DstBi: 7342 // ... 7343 // %UGEPI = gep %GEPIOp, UIdx 7344 // ... 7345 // --------------------------- 7346 // 7347 // ---------- AFTER ---------- 7348 // SrcBlock: 7349 // ... (same as above) 7350 // (* %GEPI is still alive on the indirectbr edges) 7351 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 7352 // unmerging) 7353 // ... 7354 // 7355 // DstBi: 7356 // ... 7357 // %UGEPI = gep %GEPI, (UIdx-Idx) 7358 // ... 7359 // --------------------------- 7360 // 7361 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 7362 // no longer alive on them. 7363 // 7364 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 7365 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 7366 // not to disable further simplications and optimizations as a result of GEP 7367 // merging. 7368 // 7369 // Note this unmerging may increase the length of the data flow critical path 7370 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 7371 // between the register pressure and the length of data-flow critical 7372 // path. Restricting this to the uncommon IndirectBr case would minimize the 7373 // impact of potentially longer critical path, if any, and the impact on compile 7374 // time. 7375 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 7376 const TargetTransformInfo *TTI) { 7377 BasicBlock *SrcBlock = GEPI->getParent(); 7378 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 7379 // (non-IndirectBr) cases exit early here. 7380 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 7381 return false; 7382 // Check that GEPI is a simple gep with a single constant index. 7383 if (!GEPSequentialConstIndexed(GEPI)) 7384 return false; 7385 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 7386 // Check that GEPI is a cheap one. 7387 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(), 7388 TargetTransformInfo::TCK_SizeAndLatency) 7389 > TargetTransformInfo::TCC_Basic) 7390 return false; 7391 Value *GEPIOp = GEPI->getOperand(0); 7392 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 7393 if (!isa<Instruction>(GEPIOp)) 7394 return false; 7395 auto *GEPIOpI = cast<Instruction>(GEPIOp); 7396 if (GEPIOpI->getParent() != SrcBlock) 7397 return false; 7398 // Check that GEP is used outside the block, meaning it's alive on the 7399 // IndirectBr edge(s). 7400 if (find_if(GEPI->users(), [&](User *Usr) { 7401 if (auto *I = dyn_cast<Instruction>(Usr)) { 7402 if (I->getParent() != SrcBlock) { 7403 return true; 7404 } 7405 } 7406 return false; 7407 }) == GEPI->users().end()) 7408 return false; 7409 // The second elements of the GEP chains to be unmerged. 7410 std::vector<GetElementPtrInst *> UGEPIs; 7411 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 7412 // on IndirectBr edges. 7413 for (User *Usr : GEPIOp->users()) { 7414 if (Usr == GEPI) continue; 7415 // Check if Usr is an Instruction. If not, give up. 7416 if (!isa<Instruction>(Usr)) 7417 return false; 7418 auto *UI = cast<Instruction>(Usr); 7419 // Check if Usr in the same block as GEPIOp, which is fine, skip. 7420 if (UI->getParent() == SrcBlock) 7421 continue; 7422 // Check if Usr is a GEP. If not, give up. 7423 if (!isa<GetElementPtrInst>(Usr)) 7424 return false; 7425 auto *UGEPI = cast<GetElementPtrInst>(Usr); 7426 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 7427 // the pointer operand to it. If so, record it in the vector. If not, give 7428 // up. 7429 if (!GEPSequentialConstIndexed(UGEPI)) 7430 return false; 7431 if (UGEPI->getOperand(0) != GEPIOp) 7432 return false; 7433 if (GEPIIdx->getType() != 7434 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 7435 return false; 7436 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7437 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(), 7438 TargetTransformInfo::TCK_SizeAndLatency) 7439 > TargetTransformInfo::TCC_Basic) 7440 return false; 7441 UGEPIs.push_back(UGEPI); 7442 } 7443 if (UGEPIs.size() == 0) 7444 return false; 7445 // Check the materializing cost of (Uidx-Idx). 7446 for (GetElementPtrInst *UGEPI : UGEPIs) { 7447 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7448 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 7449 unsigned ImmCost = 7450 TTI->getIntImmCost(NewIdx, GEPIIdx->getType(), 7451 TargetTransformInfo::TCK_SizeAndLatency); 7452 if (ImmCost > TargetTransformInfo::TCC_Basic) 7453 return false; 7454 } 7455 // Now unmerge between GEPI and UGEPIs. 7456 for (GetElementPtrInst *UGEPI : UGEPIs) { 7457 UGEPI->setOperand(0, GEPI); 7458 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7459 Constant *NewUGEPIIdx = 7460 ConstantInt::get(GEPIIdx->getType(), 7461 UGEPIIdx->getValue() - GEPIIdx->getValue()); 7462 UGEPI->setOperand(1, NewUGEPIIdx); 7463 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 7464 // inbounds to avoid UB. 7465 if (!GEPI->isInBounds()) { 7466 UGEPI->setIsInBounds(false); 7467 } 7468 } 7469 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 7470 // alive on IndirectBr edges). 7471 assert(find_if(GEPIOp->users(), [&](User *Usr) { 7472 return cast<Instruction>(Usr)->getParent() != SrcBlock; 7473 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 7474 return true; 7475 } 7476 7477 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 7478 // Bail out if we inserted the instruction to prevent optimizations from 7479 // stepping on each other's toes. 7480 if (InsertedInsts.count(I)) 7481 return false; 7482 7483 // TODO: Move into the switch on opcode below here. 7484 if (PHINode *P = dyn_cast<PHINode>(I)) { 7485 // It is possible for very late stage optimizations (such as SimplifyCFG) 7486 // to introduce PHI nodes too late to be cleaned up. If we detect such a 7487 // trivial PHI, go ahead and zap it here. 7488 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 7489 LargeOffsetGEPMap.erase(P); 7490 P->replaceAllUsesWith(V); 7491 P->eraseFromParent(); 7492 ++NumPHIsElim; 7493 return true; 7494 } 7495 return false; 7496 } 7497 7498 if (CastInst *CI = dyn_cast<CastInst>(I)) { 7499 // If the source of the cast is a constant, then this should have 7500 // already been constant folded. The only reason NOT to constant fold 7501 // it is if something (e.g. LSR) was careful to place the constant 7502 // evaluation in a block other than then one that uses it (e.g. to hoist 7503 // the address of globals out of a loop). If this is the case, we don't 7504 // want to forward-subst the cast. 7505 if (isa<Constant>(CI->getOperand(0))) 7506 return false; 7507 7508 if (OptimizeNoopCopyExpression(CI, *TLI, *DL)) 7509 return true; 7510 7511 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 7512 /// Sink a zext or sext into its user blocks if the target type doesn't 7513 /// fit in one register 7514 if (TLI->getTypeAction(CI->getContext(), 7515 TLI->getValueType(*DL, CI->getType())) == 7516 TargetLowering::TypeExpandInteger) { 7517 return SinkCast(CI); 7518 } else { 7519 bool MadeChange = optimizeExt(I); 7520 return MadeChange | optimizeExtUses(I); 7521 } 7522 } 7523 return false; 7524 } 7525 7526 if (auto *Cmp = dyn_cast<CmpInst>(I)) 7527 if (optimizeCmp(Cmp, ModifiedDT)) 7528 return true; 7529 7530 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7531 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7532 bool Modified = optimizeLoadExt(LI); 7533 unsigned AS = LI->getPointerAddressSpace(); 7534 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 7535 return Modified; 7536 } 7537 7538 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 7539 if (splitMergedValStore(*SI, *DL, *TLI)) 7540 return true; 7541 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7542 unsigned AS = SI->getPointerAddressSpace(); 7543 return optimizeMemoryInst(I, SI->getOperand(1), 7544 SI->getOperand(0)->getType(), AS); 7545 } 7546 7547 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 7548 unsigned AS = RMW->getPointerAddressSpace(); 7549 return optimizeMemoryInst(I, RMW->getPointerOperand(), 7550 RMW->getType(), AS); 7551 } 7552 7553 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 7554 unsigned AS = CmpX->getPointerAddressSpace(); 7555 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 7556 CmpX->getCompareOperand()->getType(), AS); 7557 } 7558 7559 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 7560 7561 if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking) 7562 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 7563 7564 // TODO: Move this into the switch on opcode - it handles shifts already. 7565 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 7566 BinOp->getOpcode() == Instruction::LShr)) { 7567 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 7568 if (CI && TLI->hasExtractBitsInsn()) 7569 if (OptimizeExtractBits(BinOp, CI, *TLI, *DL)) 7570 return true; 7571 } 7572 7573 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 7574 if (GEPI->hasAllZeroIndices()) { 7575 /// The GEP operand must be a pointer, so must its result -> BitCast 7576 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 7577 GEPI->getName(), GEPI); 7578 NC->setDebugLoc(GEPI->getDebugLoc()); 7579 GEPI->replaceAllUsesWith(NC); 7580 GEPI->eraseFromParent(); 7581 ++NumGEPsElim; 7582 optimizeInst(NC, ModifiedDT); 7583 return true; 7584 } 7585 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 7586 return true; 7587 } 7588 return false; 7589 } 7590 7591 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 7592 // freeze(icmp a, const)) -> icmp (freeze a), const 7593 // This helps generate efficient conditional jumps. 7594 Instruction *CmpI = nullptr; 7595 if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0))) 7596 CmpI = II; 7597 else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0))) 7598 CmpI = F->getFastMathFlags().none() ? F : nullptr; 7599 7600 if (CmpI && CmpI->hasOneUse()) { 7601 auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1); 7602 bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) || 7603 isa<ConstantPointerNull>(Op0); 7604 bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) || 7605 isa<ConstantPointerNull>(Op1); 7606 if (Const0 || Const1) { 7607 if (!Const0 || !Const1) { 7608 auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI); 7609 F->takeName(FI); 7610 CmpI->setOperand(Const0 ? 1 : 0, F); 7611 } 7612 FI->replaceAllUsesWith(CmpI); 7613 FI->eraseFromParent(); 7614 return true; 7615 } 7616 } 7617 return false; 7618 } 7619 7620 if (tryToSinkFreeOperands(I)) 7621 return true; 7622 7623 switch (I->getOpcode()) { 7624 case Instruction::Shl: 7625 case Instruction::LShr: 7626 case Instruction::AShr: 7627 return optimizeShiftInst(cast<BinaryOperator>(I)); 7628 case Instruction::Call: 7629 return optimizeCallInst(cast<CallInst>(I), ModifiedDT); 7630 case Instruction::Select: 7631 return optimizeSelectInst(cast<SelectInst>(I)); 7632 case Instruction::ShuffleVector: 7633 return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I)); 7634 case Instruction::Switch: 7635 return optimizeSwitchInst(cast<SwitchInst>(I)); 7636 case Instruction::ExtractElement: 7637 return optimizeExtractElementInst(cast<ExtractElementInst>(I)); 7638 } 7639 7640 return false; 7641 } 7642 7643 /// Given an OR instruction, check to see if this is a bitreverse 7644 /// idiom. If so, insert the new intrinsic and return true. 7645 bool CodeGenPrepare::makeBitReverse(Instruction &I) { 7646 if (!I.getType()->isIntegerTy() || 7647 !TLI->isOperationLegalOrCustom(ISD::BITREVERSE, 7648 TLI->getValueType(*DL, I.getType(), true))) 7649 return false; 7650 7651 SmallVector<Instruction*, 4> Insts; 7652 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 7653 return false; 7654 Instruction *LastInst = Insts.back(); 7655 I.replaceAllUsesWith(LastInst); 7656 RecursivelyDeleteTriviallyDeadInstructions( 7657 &I, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 7658 return true; 7659 } 7660 7661 // In this pass we look for GEP and cast instructions that are used 7662 // across basic blocks and rewrite them to improve basic-block-at-a-time 7663 // selection. 7664 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 7665 SunkAddrs.clear(); 7666 bool MadeChange = false; 7667 7668 CurInstIterator = BB.begin(); 7669 while (CurInstIterator != BB.end()) { 7670 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 7671 if (ModifiedDT) 7672 return true; 7673 } 7674 7675 bool MadeBitReverse = true; 7676 while (MadeBitReverse) { 7677 MadeBitReverse = false; 7678 for (auto &I : reverse(BB)) { 7679 if (makeBitReverse(I)) { 7680 MadeBitReverse = MadeChange = true; 7681 break; 7682 } 7683 } 7684 } 7685 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); 7686 7687 return MadeChange; 7688 } 7689 7690 // Some CGP optimizations may move or alter what's computed in a block. Check 7691 // whether a dbg.value intrinsic could be pointed at a more appropriate operand. 7692 bool CodeGenPrepare::fixupDbgValue(Instruction *I) { 7693 assert(isa<DbgValueInst>(I)); 7694 DbgValueInst &DVI = *cast<DbgValueInst>(I); 7695 7696 // Does this dbg.value refer to a sunk address calculation? 7697 Value *Location = DVI.getVariableLocation(); 7698 WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; 7699 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 7700 if (SunkAddr) { 7701 // Point dbg.value at locally computed address, which should give the best 7702 // opportunity to be accurately lowered. This update may change the type of 7703 // pointer being referred to; however this makes no difference to debugging 7704 // information, and we can't generate bitcasts that may affect codegen. 7705 DVI.setOperand(0, MetadataAsValue::get(DVI.getContext(), 7706 ValueAsMetadata::get(SunkAddr))); 7707 return true; 7708 } 7709 return false; 7710 } 7711 7712 // A llvm.dbg.value may be using a value before its definition, due to 7713 // optimizations in this pass and others. Scan for such dbg.values, and rescue 7714 // them by moving the dbg.value to immediately after the value definition. 7715 // FIXME: Ideally this should never be necessary, and this has the potential 7716 // to re-order dbg.value intrinsics. 7717 bool CodeGenPrepare::placeDbgValues(Function &F) { 7718 bool MadeChange = false; 7719 DominatorTree DT(F); 7720 7721 for (BasicBlock &BB : F) { 7722 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 7723 Instruction *Insn = &*BI++; 7724 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 7725 if (!DVI) 7726 continue; 7727 7728 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7729 7730 if (!VI || VI->isTerminator()) 7731 continue; 7732 7733 // If VI is a phi in a block with an EHPad terminator, we can't insert 7734 // after it. 7735 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7736 continue; 7737 7738 // If the defining instruction dominates the dbg.value, we do not need 7739 // to move the dbg.value. 7740 if (DT.dominates(VI, DVI)) 7741 continue; 7742 7743 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7744 << *DVI << ' ' << *VI); 7745 DVI->removeFromParent(); 7746 if (isa<PHINode>(VI)) 7747 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7748 else 7749 DVI->insertAfter(VI); 7750 MadeChange = true; 7751 ++NumDbgValueMoved; 7752 } 7753 } 7754 return MadeChange; 7755 } 7756 7757 /// Scale down both weights to fit into uint32_t. 7758 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7759 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7760 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7761 NewTrue = NewTrue / Scale; 7762 NewFalse = NewFalse / Scale; 7763 } 7764 7765 /// Some targets prefer to split a conditional branch like: 7766 /// \code 7767 /// %0 = icmp ne i32 %a, 0 7768 /// %1 = icmp ne i32 %b, 0 7769 /// %or.cond = or i1 %0, %1 7770 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7771 /// \endcode 7772 /// into multiple branch instructions like: 7773 /// \code 7774 /// bb1: 7775 /// %0 = icmp ne i32 %a, 0 7776 /// br i1 %0, label %TrueBB, label %bb2 7777 /// bb2: 7778 /// %1 = icmp ne i32 %b, 0 7779 /// br i1 %1, label %TrueBB, label %FalseBB 7780 /// \endcode 7781 /// This usually allows instruction selection to do even further optimizations 7782 /// and combine the compare with the branch instruction. Currently this is 7783 /// applied for targets which have "cheap" jump instructions. 7784 /// 7785 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7786 /// 7787 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) { 7788 if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) 7789 return false; 7790 7791 bool MadeChange = false; 7792 for (auto &BB : F) { 7793 // Does this BB end with the following? 7794 // %cond1 = icmp|fcmp|binary instruction ... 7795 // %cond2 = icmp|fcmp|binary instruction ... 7796 // %cond.or = or|and i1 %cond1, cond2 7797 // br i1 %cond.or label %dest1, label %dest2" 7798 BinaryOperator *LogicOp; 7799 BasicBlock *TBB, *FBB; 7800 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 7801 continue; 7802 7803 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7804 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7805 continue; 7806 7807 // The merging of mostly empty BB can cause a degenerate branch. 7808 if (TBB == FBB) 7809 continue; 7810 7811 unsigned Opc; 7812 Value *Cond1, *Cond2; 7813 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 7814 m_OneUse(m_Value(Cond2))))) 7815 Opc = Instruction::And; 7816 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 7817 m_OneUse(m_Value(Cond2))))) 7818 Opc = Instruction::Or; 7819 else 7820 continue; 7821 7822 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 7823 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 7824 continue; 7825 7826 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 7827 7828 // Create a new BB. 7829 auto *TmpBB = 7830 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 7831 BB.getParent(), BB.getNextNode()); 7832 7833 // Update original basic block by using the first condition directly by the 7834 // branch instruction and removing the no longer needed and/or instruction. 7835 Br1->setCondition(Cond1); 7836 LogicOp->eraseFromParent(); 7837 7838 // Depending on the condition we have to either replace the true or the 7839 // false successor of the original branch instruction. 7840 if (Opc == Instruction::And) 7841 Br1->setSuccessor(0, TmpBB); 7842 else 7843 Br1->setSuccessor(1, TmpBB); 7844 7845 // Fill in the new basic block. 7846 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 7847 if (auto *I = dyn_cast<Instruction>(Cond2)) { 7848 I->removeFromParent(); 7849 I->insertBefore(Br2); 7850 } 7851 7852 // Update PHI nodes in both successors. The original BB needs to be 7853 // replaced in one successor's PHI nodes, because the branch comes now from 7854 // the newly generated BB (NewBB). In the other successor we need to add one 7855 // incoming edge to the PHI nodes, because both branch instructions target 7856 // now the same successor. Depending on the original branch condition 7857 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 7858 // we perform the correct update for the PHI nodes. 7859 // This doesn't change the successor order of the just created branch 7860 // instruction (or any other instruction). 7861 if (Opc == Instruction::Or) 7862 std::swap(TBB, FBB); 7863 7864 // Replace the old BB with the new BB. 7865 TBB->replacePhiUsesWith(&BB, TmpBB); 7866 7867 // Add another incoming edge form the new BB. 7868 for (PHINode &PN : FBB->phis()) { 7869 auto *Val = PN.getIncomingValueForBlock(&BB); 7870 PN.addIncoming(Val, TmpBB); 7871 } 7872 7873 // Update the branch weights (from SelectionDAGBuilder:: 7874 // FindMergedConditions). 7875 if (Opc == Instruction::Or) { 7876 // Codegen X | Y as: 7877 // BB1: 7878 // jmp_if_X TBB 7879 // jmp TmpBB 7880 // TmpBB: 7881 // jmp_if_Y TBB 7882 // jmp FBB 7883 // 7884 7885 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 7886 // The requirement is that 7887 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 7888 // = TrueProb for original BB. 7889 // Assuming the original weights are A and B, one choice is to set BB1's 7890 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 7891 // assumes that 7892 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 7893 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 7894 // TmpBB, but the math is more complicated. 7895 uint64_t TrueWeight, FalseWeight; 7896 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7897 uint64_t NewTrueWeight = TrueWeight; 7898 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 7899 scaleWeights(NewTrueWeight, NewFalseWeight); 7900 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7901 .createBranchWeights(TrueWeight, FalseWeight)); 7902 7903 NewTrueWeight = TrueWeight; 7904 NewFalseWeight = 2 * FalseWeight; 7905 scaleWeights(NewTrueWeight, NewFalseWeight); 7906 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7907 .createBranchWeights(TrueWeight, FalseWeight)); 7908 } 7909 } else { 7910 // Codegen X & Y as: 7911 // BB1: 7912 // jmp_if_X TmpBB 7913 // jmp FBB 7914 // TmpBB: 7915 // jmp_if_Y TBB 7916 // jmp FBB 7917 // 7918 // This requires creation of TmpBB after CurBB. 7919 7920 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 7921 // The requirement is that 7922 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 7923 // = FalseProb for original BB. 7924 // Assuming the original weights are A and B, one choice is to set BB1's 7925 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 7926 // assumes that 7927 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 7928 uint64_t TrueWeight, FalseWeight; 7929 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7930 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 7931 uint64_t NewFalseWeight = FalseWeight; 7932 scaleWeights(NewTrueWeight, NewFalseWeight); 7933 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 7934 .createBranchWeights(TrueWeight, FalseWeight)); 7935 7936 NewTrueWeight = 2 * TrueWeight; 7937 NewFalseWeight = FalseWeight; 7938 scaleWeights(NewTrueWeight, NewFalseWeight); 7939 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 7940 .createBranchWeights(TrueWeight, FalseWeight)); 7941 } 7942 } 7943 7944 ModifiedDT = true; 7945 MadeChange = true; 7946 7947 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 7948 TmpBB->dump()); 7949 } 7950 return MadeChange; 7951 } 7952