1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BranchProbabilityInfo.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/ProfileSummaryInfo.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Analysis/VectorUtils.h" 35 #include "llvm/CodeGen/Analysis.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/ValueTypes.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Argument.h" 44 #include "llvm/IR/Attributes.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/IntrinsicsAArch64.h" 63 #include "llvm/IR/LLVMContext.h" 64 #include "llvm/IR/MDBuilder.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Statepoint.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/ValueMap.h" 75 #include "llvm/InitializePasses.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/BlockFrequency.h" 78 #include "llvm/Support/BranchProbability.h" 79 #include "llvm/Support/Casting.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/MachineValueType.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 90 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 91 #include "llvm/Transforms/Utils/Local.h" 92 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 93 #include "llvm/Transforms/Utils/SizeOpts.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <limits> 99 #include <memory> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 106 #define DEBUG_TYPE "codegenprepare" 107 108 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 109 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 110 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 111 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 112 "sunken Cmps"); 113 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 114 "of sunken Casts"); 115 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 116 "computations were sunk"); 117 STATISTIC(NumMemoryInstsPhiCreated, 118 "Number of phis created when address " 119 "computations were sunk to memory instructions"); 120 STATISTIC(NumMemoryInstsSelectCreated, 121 "Number of select created when address " 122 "computations were sunk to memory instructions"); 123 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 124 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 125 STATISTIC(NumAndsAdded, 126 "Number of and mask instructions added to form ext loads"); 127 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 128 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 129 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 130 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 131 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 132 133 static cl::opt<bool> DisableBranchOpts( 134 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable branch optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> 138 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 139 cl::desc("Disable GC optimizations in CodeGenPrepare")); 140 141 static cl::opt<bool> DisableSelectToBranch( 142 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 143 cl::desc("Disable select to branch conversion.")); 144 145 static cl::opt<bool> AddrSinkUsingGEPs( 146 "addr-sink-using-gep", cl::Hidden, cl::init(true), 147 cl::desc("Address sinking in CGP using GEPs.")); 148 149 static cl::opt<bool> EnableAndCmpSinking( 150 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 151 cl::desc("Enable sinkinig and/cmp into branches.")); 152 153 static cl::opt<bool> DisableStoreExtract( 154 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> StressStoreExtract( 158 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 159 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 160 161 static cl::opt<bool> DisableExtLdPromotion( 162 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 163 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 164 "CodeGenPrepare")); 165 166 static cl::opt<bool> StressExtLdPromotion( 167 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 168 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 169 "optimization in CodeGenPrepare")); 170 171 static cl::opt<bool> DisablePreheaderProtect( 172 "disable-preheader-prot", cl::Hidden, cl::init(false), 173 cl::desc("Disable protection against removing loop preheaders")); 174 175 static cl::opt<bool> ProfileGuidedSectionPrefix( 176 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 177 cl::desc("Use profile info to add section prefix for hot/cold functions")); 178 179 static cl::opt<bool> ProfileUnknownInSpecialSection( 180 "profile-unknown-in-special-section", cl::Hidden, cl::init(false), 181 cl::ZeroOrMore, 182 cl::desc("In profiling mode like sampleFDO, if a function doesn't have " 183 "profile, we cannot tell the function is cold for sure because " 184 "it may be a function newly added without ever being sampled. " 185 "With the flag enabled, compiler can put such profile unknown " 186 "functions into a special section, so runtime system can choose " 187 "to handle it in a different way than .text section, to save " 188 "RAM for example. ")); 189 190 static cl::opt<unsigned> FreqRatioToSkipMerge( 191 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 192 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 193 "(frequency of destination block) is greater than this ratio")); 194 195 static cl::opt<bool> ForceSplitStore( 196 "force-split-store", cl::Hidden, cl::init(false), 197 cl::desc("Force store splitting no matter what the target query says.")); 198 199 static cl::opt<bool> 200 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 201 cl::desc("Enable merging of redundant sexts when one is dominating" 202 " the other."), cl::init(true)); 203 204 static cl::opt<bool> DisableComplexAddrModes( 205 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 206 cl::desc("Disables combining addressing modes with different parts " 207 "in optimizeMemoryInst.")); 208 209 static cl::opt<bool> 210 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 211 cl::desc("Allow creation of Phis in Address sinking.")); 212 213 static cl::opt<bool> 214 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 215 cl::desc("Allow creation of selects in Address sinking.")); 216 217 static cl::opt<bool> AddrSinkCombineBaseReg( 218 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 219 cl::desc("Allow combining of BaseReg field in Address sinking.")); 220 221 static cl::opt<bool> AddrSinkCombineBaseGV( 222 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 223 cl::desc("Allow combining of BaseGV field in Address sinking.")); 224 225 static cl::opt<bool> AddrSinkCombineBaseOffs( 226 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 227 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 228 229 static cl::opt<bool> AddrSinkCombineScaledReg( 230 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 231 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 232 233 static cl::opt<bool> 234 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 235 cl::init(true), 236 cl::desc("Enable splitting large offset of GEP.")); 237 238 static cl::opt<bool> EnableICMP_EQToICMP_ST( 239 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), 240 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); 241 242 static cl::opt<bool> 243 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), 244 cl::desc("Enable BFI update verification for " 245 "CodeGenPrepare.")); 246 247 static cl::opt<bool> OptimizePhiTypes( 248 "cgp-optimize-phi-types", cl::Hidden, cl::init(false), 249 cl::desc("Enable converting phi types in CodeGenPrepare")); 250 251 namespace { 252 253 enum ExtType { 254 ZeroExtension, // Zero extension has been seen. 255 SignExtension, // Sign extension has been seen. 256 BothExtension // This extension type is used if we saw sext after 257 // ZeroExtension had been set, or if we saw zext after 258 // SignExtension had been set. It makes the type 259 // information of a promoted instruction invalid. 260 }; 261 262 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 263 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 264 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 265 using SExts = SmallVector<Instruction *, 16>; 266 using ValueToSExts = DenseMap<Value *, SExts>; 267 268 class TypePromotionTransaction; 269 270 class CodeGenPrepare : public FunctionPass { 271 const TargetMachine *TM = nullptr; 272 const TargetSubtargetInfo *SubtargetInfo; 273 const TargetLowering *TLI = nullptr; 274 const TargetRegisterInfo *TRI; 275 const TargetTransformInfo *TTI = nullptr; 276 const TargetLibraryInfo *TLInfo; 277 const LoopInfo *LI; 278 std::unique_ptr<BlockFrequencyInfo> BFI; 279 std::unique_ptr<BranchProbabilityInfo> BPI; 280 ProfileSummaryInfo *PSI; 281 282 /// As we scan instructions optimizing them, this is the next instruction 283 /// to optimize. Transforms that can invalidate this should update it. 284 BasicBlock::iterator CurInstIterator; 285 286 /// Keeps track of non-local addresses that have been sunk into a block. 287 /// This allows us to avoid inserting duplicate code for blocks with 288 /// multiple load/stores of the same address. The usage of WeakTrackingVH 289 /// enables SunkAddrs to be treated as a cache whose entries can be 290 /// invalidated if a sunken address computation has been erased. 291 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 292 293 /// Keeps track of all instructions inserted for the current function. 294 SetOfInstrs InsertedInsts; 295 296 /// Keeps track of the type of the related instruction before their 297 /// promotion for the current function. 298 InstrToOrigTy PromotedInsts; 299 300 /// Keep track of instructions removed during promotion. 301 SetOfInstrs RemovedInsts; 302 303 /// Keep track of sext chains based on their initial value. 304 DenseMap<Value *, Instruction *> SeenChainsForSExt; 305 306 /// Keep track of GEPs accessing the same data structures such as structs or 307 /// arrays that are candidates to be split later because of their large 308 /// size. 309 MapVector< 310 AssertingVH<Value>, 311 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 312 LargeOffsetGEPMap; 313 314 /// Keep track of new GEP base after splitting the GEPs having large offset. 315 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 316 317 /// Map serial numbers to Large offset GEPs. 318 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 319 320 /// Keep track of SExt promoted. 321 ValueToSExts ValToSExtendedUses; 322 323 /// True if the function has the OptSize attribute. 324 bool OptSize; 325 326 /// DataLayout for the Function being processed. 327 const DataLayout *DL = nullptr; 328 329 /// Building the dominator tree can be expensive, so we only build it 330 /// lazily and update it when required. 331 std::unique_ptr<DominatorTree> DT; 332 333 public: 334 static char ID; // Pass identification, replacement for typeid 335 336 CodeGenPrepare() : FunctionPass(ID) { 337 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 338 } 339 340 bool runOnFunction(Function &F) override; 341 342 StringRef getPassName() const override { return "CodeGen Prepare"; } 343 344 void getAnalysisUsage(AnalysisUsage &AU) const override { 345 // FIXME: When we can selectively preserve passes, preserve the domtree. 346 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 347 AU.addRequired<TargetLibraryInfoWrapperPass>(); 348 AU.addRequired<TargetPassConfig>(); 349 AU.addRequired<TargetTransformInfoWrapperPass>(); 350 AU.addRequired<LoopInfoWrapperPass>(); 351 } 352 353 private: 354 template <typename F> 355 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 356 // Substituting can cause recursive simplifications, which can invalidate 357 // our iterator. Use a WeakTrackingVH to hold onto it in case this 358 // happens. 359 Value *CurValue = &*CurInstIterator; 360 WeakTrackingVH IterHandle(CurValue); 361 362 f(); 363 364 // If the iterator instruction was recursively deleted, start over at the 365 // start of the block. 366 if (IterHandle != CurValue) { 367 CurInstIterator = BB->begin(); 368 SunkAddrs.clear(); 369 } 370 } 371 372 // Get the DominatorTree, building if necessary. 373 DominatorTree &getDT(Function &F) { 374 if (!DT) 375 DT = std::make_unique<DominatorTree>(F); 376 return *DT; 377 } 378 379 void removeAllAssertingVHReferences(Value *V); 380 bool eliminateFallThrough(Function &F); 381 bool eliminateMostlyEmptyBlocks(Function &F); 382 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 383 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 384 void eliminateMostlyEmptyBlock(BasicBlock *BB); 385 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 386 bool isPreheader); 387 bool makeBitReverse(Instruction &I); 388 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 389 bool optimizeInst(Instruction *I, bool &ModifiedDT); 390 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 391 Type *AccessTy, unsigned AddrSpace); 392 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); 393 bool optimizeInlineAsmInst(CallInst *CS); 394 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 395 bool optimizeExt(Instruction *&I); 396 bool optimizeExtUses(Instruction *I); 397 bool optimizeLoadExt(LoadInst *Load); 398 bool optimizeShiftInst(BinaryOperator *BO); 399 bool optimizeFunnelShift(IntrinsicInst *Fsh); 400 bool optimizeSelectInst(SelectInst *SI); 401 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 402 bool optimizeSwitchInst(SwitchInst *SI); 403 bool optimizeExtractElementInst(Instruction *Inst); 404 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); 405 bool fixupDbgValue(Instruction *I); 406 bool placeDbgValues(Function &F); 407 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 408 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 409 bool tryToPromoteExts(TypePromotionTransaction &TPT, 410 const SmallVectorImpl<Instruction *> &Exts, 411 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 412 unsigned CreatedInstsCost = 0); 413 bool mergeSExts(Function &F); 414 bool splitLargeGEPOffsets(); 415 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, 416 SmallPtrSetImpl<Instruction *> &DeletedInstrs); 417 bool optimizePhiTypes(Function &F); 418 bool performAddressTypePromotion( 419 Instruction *&Inst, 420 bool AllowPromotionWithoutCommonHeader, 421 bool HasPromoted, TypePromotionTransaction &TPT, 422 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 423 bool splitBranchCondition(Function &F, bool &ModifiedDT); 424 bool simplifyOffsetableRelocate(GCStatepointInst &I); 425 426 bool tryToSinkFreeOperands(Instruction *I); 427 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, 428 Value *Arg1, CmpInst *Cmp, 429 Intrinsic::ID IID); 430 bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT); 431 bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 432 bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 433 void verifyBFIUpdates(Function &F); 434 }; 435 436 } // end anonymous namespace 437 438 char CodeGenPrepare::ID = 0; 439 440 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 441 "Optimize for code generation", false, false) 442 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 443 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 444 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 445 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 446 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 447 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 448 "Optimize for code generation", false, false) 449 450 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 451 452 bool CodeGenPrepare::runOnFunction(Function &F) { 453 if (skipFunction(F)) 454 return false; 455 456 DL = &F.getParent()->getDataLayout(); 457 458 bool EverMadeChange = false; 459 // Clear per function information. 460 InsertedInsts.clear(); 461 PromotedInsts.clear(); 462 463 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); 464 SubtargetInfo = TM->getSubtargetImpl(F); 465 TLI = SubtargetInfo->getTargetLowering(); 466 TRI = SubtargetInfo->getRegisterInfo(); 467 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 468 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 469 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 470 BPI.reset(new BranchProbabilityInfo(F, *LI)); 471 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 472 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 473 OptSize = F.hasOptSize(); 474 if (ProfileGuidedSectionPrefix) { 475 // The hot attribute overwrites profile count based hotness while profile 476 // counts based hotness overwrite the cold attribute. 477 // This is a conservative behabvior. 478 if (F.hasFnAttribute(Attribute::Hot) || 479 PSI->isFunctionHotInCallGraph(&F, *BFI)) 480 F.setSectionPrefix("hot"); 481 // If PSI shows this function is not hot, we will placed the function 482 // into unlikely section if (1) PSI shows this is a cold function, or 483 // (2) the function has a attribute of cold. 484 else if (PSI->isFunctionColdInCallGraph(&F, *BFI) || 485 F.hasFnAttribute(Attribute::Cold)) 486 F.setSectionPrefix("unlikely"); 487 else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && 488 PSI->isFunctionHotnessUnknown(F)) 489 F.setSectionPrefix("unknown"); 490 } 491 492 /// This optimization identifies DIV instructions that can be 493 /// profitably bypassed and carried out with a shorter, faster divide. 494 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { 495 const DenseMap<unsigned int, unsigned int> &BypassWidths = 496 TLI->getBypassSlowDivWidths(); 497 BasicBlock* BB = &*F.begin(); 498 while (BB != nullptr) { 499 // bypassSlowDivision may create new BBs, but we don't want to reapply the 500 // optimization to those blocks. 501 BasicBlock* Next = BB->getNextNode(); 502 // F.hasOptSize is already checked in the outer if statement. 503 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 504 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 505 BB = Next; 506 } 507 } 508 509 // Eliminate blocks that contain only PHI nodes and an 510 // unconditional branch. 511 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 512 513 bool ModifiedDT = false; 514 if (!DisableBranchOpts) 515 EverMadeChange |= splitBranchCondition(F, ModifiedDT); 516 517 // Split some critical edges where one of the sources is an indirect branch, 518 // to help generate sane code for PHIs involving such edges. 519 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 520 521 bool MadeChange = true; 522 while (MadeChange) { 523 MadeChange = false; 524 DT.reset(); 525 for (Function::iterator I = F.begin(); I != F.end(); ) { 526 BasicBlock *BB = &*I++; 527 bool ModifiedDTOnIteration = false; 528 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 529 530 // Restart BB iteration if the dominator tree of the Function was changed 531 if (ModifiedDTOnIteration) 532 break; 533 } 534 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 535 MadeChange |= mergeSExts(F); 536 if (!LargeOffsetGEPMap.empty()) 537 MadeChange |= splitLargeGEPOffsets(); 538 MadeChange |= optimizePhiTypes(F); 539 540 if (MadeChange) 541 eliminateFallThrough(F); 542 543 // Really free removed instructions during promotion. 544 for (Instruction *I : RemovedInsts) 545 I->deleteValue(); 546 547 EverMadeChange |= MadeChange; 548 SeenChainsForSExt.clear(); 549 ValToSExtendedUses.clear(); 550 RemovedInsts.clear(); 551 LargeOffsetGEPMap.clear(); 552 LargeOffsetGEPID.clear(); 553 } 554 555 NewGEPBases.clear(); 556 SunkAddrs.clear(); 557 558 if (!DisableBranchOpts) { 559 MadeChange = false; 560 // Use a set vector to get deterministic iteration order. The order the 561 // blocks are removed may affect whether or not PHI nodes in successors 562 // are removed. 563 SmallSetVector<BasicBlock*, 8> WorkList; 564 for (BasicBlock &BB : F) { 565 SmallVector<BasicBlock *, 2> Successors(successors(&BB)); 566 MadeChange |= ConstantFoldTerminator(&BB, true); 567 if (!MadeChange) continue; 568 569 for (BasicBlock *Succ : Successors) 570 if (pred_empty(Succ)) 571 WorkList.insert(Succ); 572 } 573 574 // Delete the dead blocks and any of their dead successors. 575 MadeChange |= !WorkList.empty(); 576 while (!WorkList.empty()) { 577 BasicBlock *BB = WorkList.pop_back_val(); 578 SmallVector<BasicBlock*, 2> Successors(successors(BB)); 579 580 DeleteDeadBlock(BB); 581 582 for (BasicBlock *Succ : Successors) 583 if (pred_empty(Succ)) 584 WorkList.insert(Succ); 585 } 586 587 // Merge pairs of basic blocks with unconditional branches, connected by 588 // a single edge. 589 if (EverMadeChange || MadeChange) 590 MadeChange |= eliminateFallThrough(F); 591 592 EverMadeChange |= MadeChange; 593 } 594 595 if (!DisableGCOpts) { 596 SmallVector<GCStatepointInst *, 2> Statepoints; 597 for (BasicBlock &BB : F) 598 for (Instruction &I : BB) 599 if (auto *SP = dyn_cast<GCStatepointInst>(&I)) 600 Statepoints.push_back(SP); 601 for (auto &I : Statepoints) 602 EverMadeChange |= simplifyOffsetableRelocate(*I); 603 } 604 605 // Do this last to clean up use-before-def scenarios introduced by other 606 // preparatory transforms. 607 EverMadeChange |= placeDbgValues(F); 608 609 #ifndef NDEBUG 610 if (VerifyBFIUpdates) 611 verifyBFIUpdates(F); 612 #endif 613 614 return EverMadeChange; 615 } 616 617 /// An instruction is about to be deleted, so remove all references to it in our 618 /// GEP-tracking data strcutures. 619 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { 620 LargeOffsetGEPMap.erase(V); 621 NewGEPBases.erase(V); 622 623 auto GEP = dyn_cast<GetElementPtrInst>(V); 624 if (!GEP) 625 return; 626 627 LargeOffsetGEPID.erase(GEP); 628 629 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); 630 if (VecI == LargeOffsetGEPMap.end()) 631 return; 632 633 auto &GEPVector = VecI->second; 634 const auto &I = 635 llvm::find_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; }); 636 if (I == GEPVector.end()) 637 return; 638 639 GEPVector.erase(I); 640 if (GEPVector.empty()) 641 LargeOffsetGEPMap.erase(VecI); 642 } 643 644 // Verify BFI has been updated correctly by recomputing BFI and comparing them. 645 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) { 646 DominatorTree NewDT(F); 647 LoopInfo NewLI(NewDT); 648 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); 649 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); 650 NewBFI.verifyMatch(*BFI); 651 } 652 653 /// Merge basic blocks which are connected by a single edge, where one of the 654 /// basic blocks has a single successor pointing to the other basic block, 655 /// which has a single predecessor. 656 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 657 bool Changed = false; 658 // Scan all of the blocks in the function, except for the entry block. 659 // Use a temporary array to avoid iterator being invalidated when 660 // deleting blocks. 661 SmallVector<WeakTrackingVH, 16> Blocks; 662 for (auto &Block : llvm::drop_begin(F)) 663 Blocks.push_back(&Block); 664 665 SmallSet<WeakTrackingVH, 16> Preds; 666 for (auto &Block : Blocks) { 667 auto *BB = cast_or_null<BasicBlock>(Block); 668 if (!BB) 669 continue; 670 // If the destination block has a single pred, then this is a trivial 671 // edge, just collapse it. 672 BasicBlock *SinglePred = BB->getSinglePredecessor(); 673 674 // Don't merge if BB's address is taken. 675 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 676 677 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 678 if (Term && !Term->isConditional()) { 679 Changed = true; 680 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 681 682 // Merge BB into SinglePred and delete it. 683 MergeBlockIntoPredecessor(BB); 684 Preds.insert(SinglePred); 685 } 686 } 687 688 // (Repeatedly) merging blocks into their predecessors can create redundant 689 // debug intrinsics. 690 for (auto &Pred : Preds) 691 if (auto *BB = cast_or_null<BasicBlock>(Pred)) 692 RemoveRedundantDbgInstrs(BB); 693 694 return Changed; 695 } 696 697 /// Find a destination block from BB if BB is mergeable empty block. 698 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 699 // If this block doesn't end with an uncond branch, ignore it. 700 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 701 if (!BI || !BI->isUnconditional()) 702 return nullptr; 703 704 // If the instruction before the branch (skipping debug info) isn't a phi 705 // node, then other stuff is happening here. 706 BasicBlock::iterator BBI = BI->getIterator(); 707 if (BBI != BB->begin()) { 708 --BBI; 709 while (isa<DbgInfoIntrinsic>(BBI)) { 710 if (BBI == BB->begin()) 711 break; 712 --BBI; 713 } 714 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 715 return nullptr; 716 } 717 718 // Do not break infinite loops. 719 BasicBlock *DestBB = BI->getSuccessor(0); 720 if (DestBB == BB) 721 return nullptr; 722 723 if (!canMergeBlocks(BB, DestBB)) 724 DestBB = nullptr; 725 726 return DestBB; 727 } 728 729 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 730 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 731 /// edges in ways that are non-optimal for isel. Start by eliminating these 732 /// blocks so we can split them the way we want them. 733 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 734 SmallPtrSet<BasicBlock *, 16> Preheaders; 735 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 736 while (!LoopList.empty()) { 737 Loop *L = LoopList.pop_back_val(); 738 llvm::append_range(LoopList, *L); 739 if (BasicBlock *Preheader = L->getLoopPreheader()) 740 Preheaders.insert(Preheader); 741 } 742 743 bool MadeChange = false; 744 // Copy blocks into a temporary array to avoid iterator invalidation issues 745 // as we remove them. 746 // Note that this intentionally skips the entry block. 747 SmallVector<WeakTrackingVH, 16> Blocks; 748 for (auto &Block : llvm::drop_begin(F)) 749 Blocks.push_back(&Block); 750 751 for (auto &Block : Blocks) { 752 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 753 if (!BB) 754 continue; 755 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 756 if (!DestBB || 757 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 758 continue; 759 760 eliminateMostlyEmptyBlock(BB); 761 MadeChange = true; 762 } 763 return MadeChange; 764 } 765 766 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 767 BasicBlock *DestBB, 768 bool isPreheader) { 769 // Do not delete loop preheaders if doing so would create a critical edge. 770 // Loop preheaders can be good locations to spill registers. If the 771 // preheader is deleted and we create a critical edge, registers may be 772 // spilled in the loop body instead. 773 if (!DisablePreheaderProtect && isPreheader && 774 !(BB->getSinglePredecessor() && 775 BB->getSinglePredecessor()->getSingleSuccessor())) 776 return false; 777 778 // Skip merging if the block's successor is also a successor to any callbr 779 // that leads to this block. 780 // FIXME: Is this really needed? Is this a correctness issue? 781 for (BasicBlock *Pred : predecessors(BB)) { 782 if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator())) 783 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 784 if (DestBB == CBI->getSuccessor(i)) 785 return false; 786 } 787 788 // Try to skip merging if the unique predecessor of BB is terminated by a 789 // switch or indirect branch instruction, and BB is used as an incoming block 790 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 791 // add COPY instructions in the predecessor of BB instead of BB (if it is not 792 // merged). Note that the critical edge created by merging such blocks wont be 793 // split in MachineSink because the jump table is not analyzable. By keeping 794 // such empty block (BB), ISel will place COPY instructions in BB, not in the 795 // predecessor of BB. 796 BasicBlock *Pred = BB->getUniquePredecessor(); 797 if (!Pred || 798 !(isa<SwitchInst>(Pred->getTerminator()) || 799 isa<IndirectBrInst>(Pred->getTerminator()))) 800 return true; 801 802 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 803 return true; 804 805 // We use a simple cost heuristic which determine skipping merging is 806 // profitable if the cost of skipping merging is less than the cost of 807 // merging : Cost(skipping merging) < Cost(merging BB), where the 808 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 809 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 810 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 811 // Freq(Pred) / Freq(BB) > 2. 812 // Note that if there are multiple empty blocks sharing the same incoming 813 // value for the PHIs in the DestBB, we consider them together. In such 814 // case, Cost(merging BB) will be the sum of their frequencies. 815 816 if (!isa<PHINode>(DestBB->begin())) 817 return true; 818 819 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 820 821 // Find all other incoming blocks from which incoming values of all PHIs in 822 // DestBB are the same as the ones from BB. 823 for (BasicBlock *DestBBPred : predecessors(DestBB)) { 824 if (DestBBPred == BB) 825 continue; 826 827 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 828 return DestPN.getIncomingValueForBlock(BB) == 829 DestPN.getIncomingValueForBlock(DestBBPred); 830 })) 831 SameIncomingValueBBs.insert(DestBBPred); 832 } 833 834 // See if all BB's incoming values are same as the value from Pred. In this 835 // case, no reason to skip merging because COPYs are expected to be place in 836 // Pred already. 837 if (SameIncomingValueBBs.count(Pred)) 838 return true; 839 840 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 841 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 842 843 for (auto *SameValueBB : SameIncomingValueBBs) 844 if (SameValueBB->getUniquePredecessor() == Pred && 845 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 846 BBFreq += BFI->getBlockFreq(SameValueBB); 847 848 return PredFreq.getFrequency() <= 849 BBFreq.getFrequency() * FreqRatioToSkipMerge; 850 } 851 852 /// Return true if we can merge BB into DestBB if there is a single 853 /// unconditional branch between them, and BB contains no other non-phi 854 /// instructions. 855 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 856 const BasicBlock *DestBB) const { 857 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 858 // the successor. If there are more complex condition (e.g. preheaders), 859 // don't mess around with them. 860 for (const PHINode &PN : BB->phis()) { 861 for (const User *U : PN.users()) { 862 const Instruction *UI = cast<Instruction>(U); 863 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 864 return false; 865 // If User is inside DestBB block and it is a PHINode then check 866 // incoming value. If incoming value is not from BB then this is 867 // a complex condition (e.g. preheaders) we want to avoid here. 868 if (UI->getParent() == DestBB) { 869 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 870 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 871 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 872 if (Insn && Insn->getParent() == BB && 873 Insn->getParent() != UPN->getIncomingBlock(I)) 874 return false; 875 } 876 } 877 } 878 } 879 880 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 881 // and DestBB may have conflicting incoming values for the block. If so, we 882 // can't merge the block. 883 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 884 if (!DestBBPN) return true; // no conflict. 885 886 // Collect the preds of BB. 887 SmallPtrSet<const BasicBlock*, 16> BBPreds; 888 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 889 // It is faster to get preds from a PHI than with pred_iterator. 890 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 891 BBPreds.insert(BBPN->getIncomingBlock(i)); 892 } else { 893 BBPreds.insert(pred_begin(BB), pred_end(BB)); 894 } 895 896 // Walk the preds of DestBB. 897 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 898 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 899 if (BBPreds.count(Pred)) { // Common predecessor? 900 for (const PHINode &PN : DestBB->phis()) { 901 const Value *V1 = PN.getIncomingValueForBlock(Pred); 902 const Value *V2 = PN.getIncomingValueForBlock(BB); 903 904 // If V2 is a phi node in BB, look up what the mapped value will be. 905 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 906 if (V2PN->getParent() == BB) 907 V2 = V2PN->getIncomingValueForBlock(Pred); 908 909 // If there is a conflict, bail out. 910 if (V1 != V2) return false; 911 } 912 } 913 } 914 915 return true; 916 } 917 918 /// Eliminate a basic block that has only phi's and an unconditional branch in 919 /// it. 920 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 921 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 922 BasicBlock *DestBB = BI->getSuccessor(0); 923 924 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 925 << *BB << *DestBB); 926 927 // If the destination block has a single pred, then this is a trivial edge, 928 // just collapse it. 929 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 930 if (SinglePred != DestBB) { 931 assert(SinglePred == BB && 932 "Single predecessor not the same as predecessor"); 933 // Merge DestBB into SinglePred/BB and delete it. 934 MergeBlockIntoPredecessor(DestBB); 935 // Note: BB(=SinglePred) will not be deleted on this path. 936 // DestBB(=its single successor) is the one that was deleted. 937 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 938 return; 939 } 940 } 941 942 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 943 // to handle the new incoming edges it is about to have. 944 for (PHINode &PN : DestBB->phis()) { 945 // Remove the incoming value for BB, and remember it. 946 Value *InVal = PN.removeIncomingValue(BB, false); 947 948 // Two options: either the InVal is a phi node defined in BB or it is some 949 // value that dominates BB. 950 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 951 if (InValPhi && InValPhi->getParent() == BB) { 952 // Add all of the input values of the input PHI as inputs of this phi. 953 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 954 PN.addIncoming(InValPhi->getIncomingValue(i), 955 InValPhi->getIncomingBlock(i)); 956 } else { 957 // Otherwise, add one instance of the dominating value for each edge that 958 // we will be adding. 959 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 960 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 961 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 962 } else { 963 for (BasicBlock *Pred : predecessors(BB)) 964 PN.addIncoming(InVal, Pred); 965 } 966 } 967 } 968 969 // The PHIs are now updated, change everything that refers to BB to use 970 // DestBB and remove BB. 971 BB->replaceAllUsesWith(DestBB); 972 BB->eraseFromParent(); 973 ++NumBlocksElim; 974 975 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 976 } 977 978 // Computes a map of base pointer relocation instructions to corresponding 979 // derived pointer relocation instructions given a vector of all relocate calls 980 static void computeBaseDerivedRelocateMap( 981 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 982 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 983 &RelocateInstMap) { 984 // Collect information in two maps: one primarily for locating the base object 985 // while filling the second map; the second map is the final structure holding 986 // a mapping between Base and corresponding Derived relocate calls 987 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 988 for (auto *ThisRelocate : AllRelocateCalls) { 989 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 990 ThisRelocate->getDerivedPtrIndex()); 991 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 992 } 993 for (auto &Item : RelocateIdxMap) { 994 std::pair<unsigned, unsigned> Key = Item.first; 995 if (Key.first == Key.second) 996 // Base relocation: nothing to insert 997 continue; 998 999 GCRelocateInst *I = Item.second; 1000 auto BaseKey = std::make_pair(Key.first, Key.first); 1001 1002 // We're iterating over RelocateIdxMap so we cannot modify it. 1003 auto MaybeBase = RelocateIdxMap.find(BaseKey); 1004 if (MaybeBase == RelocateIdxMap.end()) 1005 // TODO: We might want to insert a new base object relocate and gep off 1006 // that, if there are enough derived object relocates. 1007 continue; 1008 1009 RelocateInstMap[MaybeBase->second].push_back(I); 1010 } 1011 } 1012 1013 // Accepts a GEP and extracts the operands into a vector provided they're all 1014 // small integer constants 1015 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 1016 SmallVectorImpl<Value *> &OffsetV) { 1017 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 1018 // Only accept small constant integer operands 1019 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 1020 if (!Op || Op->getZExtValue() > 20) 1021 return false; 1022 } 1023 1024 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 1025 OffsetV.push_back(GEP->getOperand(i)); 1026 return true; 1027 } 1028 1029 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 1030 // replace, computes a replacement, and affects it. 1031 static bool 1032 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 1033 const SmallVectorImpl<GCRelocateInst *> &Targets) { 1034 bool MadeChange = false; 1035 // We must ensure the relocation of derived pointer is defined after 1036 // relocation of base pointer. If we find a relocation corresponding to base 1037 // defined earlier than relocation of base then we move relocation of base 1038 // right before found relocation. We consider only relocation in the same 1039 // basic block as relocation of base. Relocations from other basic block will 1040 // be skipped by optimization and we do not care about them. 1041 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 1042 &*R != RelocatedBase; ++R) 1043 if (auto *RI = dyn_cast<GCRelocateInst>(R)) 1044 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 1045 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 1046 RelocatedBase->moveBefore(RI); 1047 break; 1048 } 1049 1050 for (GCRelocateInst *ToReplace : Targets) { 1051 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 1052 "Not relocating a derived object of the original base object"); 1053 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 1054 // A duplicate relocate call. TODO: coalesce duplicates. 1055 continue; 1056 } 1057 1058 if (RelocatedBase->getParent() != ToReplace->getParent()) { 1059 // Base and derived relocates are in different basic blocks. 1060 // In this case transform is only valid when base dominates derived 1061 // relocate. However it would be too expensive to check dominance 1062 // for each such relocate, so we skip the whole transformation. 1063 continue; 1064 } 1065 1066 Value *Base = ToReplace->getBasePtr(); 1067 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 1068 if (!Derived || Derived->getPointerOperand() != Base) 1069 continue; 1070 1071 SmallVector<Value *, 2> OffsetV; 1072 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 1073 continue; 1074 1075 // Create a Builder and replace the target callsite with a gep 1076 assert(RelocatedBase->getNextNode() && 1077 "Should always have one since it's not a terminator"); 1078 1079 // Insert after RelocatedBase 1080 IRBuilder<> Builder(RelocatedBase->getNextNode()); 1081 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 1082 1083 // If gc_relocate does not match the actual type, cast it to the right type. 1084 // In theory, there must be a bitcast after gc_relocate if the type does not 1085 // match, and we should reuse it to get the derived pointer. But it could be 1086 // cases like this: 1087 // bb1: 1088 // ... 1089 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1090 // br label %merge 1091 // 1092 // bb2: 1093 // ... 1094 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1095 // br label %merge 1096 // 1097 // merge: 1098 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 1099 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 1100 // 1101 // In this case, we can not find the bitcast any more. So we insert a new bitcast 1102 // no matter there is already one or not. In this way, we can handle all cases, and 1103 // the extra bitcast should be optimized away in later passes. 1104 Value *ActualRelocatedBase = RelocatedBase; 1105 if (RelocatedBase->getType() != Base->getType()) { 1106 ActualRelocatedBase = 1107 Builder.CreateBitCast(RelocatedBase, Base->getType()); 1108 } 1109 Value *Replacement = Builder.CreateGEP( 1110 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 1111 Replacement->takeName(ToReplace); 1112 // If the newly generated derived pointer's type does not match the original derived 1113 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1114 Value *ActualReplacement = Replacement; 1115 if (Replacement->getType() != ToReplace->getType()) { 1116 ActualReplacement = 1117 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1118 } 1119 ToReplace->replaceAllUsesWith(ActualReplacement); 1120 ToReplace->eraseFromParent(); 1121 1122 MadeChange = true; 1123 } 1124 return MadeChange; 1125 } 1126 1127 // Turns this: 1128 // 1129 // %base = ... 1130 // %ptr = gep %base + 15 1131 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1132 // %base' = relocate(%tok, i32 4, i32 4) 1133 // %ptr' = relocate(%tok, i32 4, i32 5) 1134 // %val = load %ptr' 1135 // 1136 // into this: 1137 // 1138 // %base = ... 1139 // %ptr = gep %base + 15 1140 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1141 // %base' = gc.relocate(%tok, i32 4, i32 4) 1142 // %ptr' = gep %base' + 15 1143 // %val = load %ptr' 1144 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { 1145 bool MadeChange = false; 1146 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1147 for (auto *U : I.users()) 1148 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1149 // Collect all the relocate calls associated with a statepoint 1150 AllRelocateCalls.push_back(Relocate); 1151 1152 // We need at least one base pointer relocation + one derived pointer 1153 // relocation to mangle 1154 if (AllRelocateCalls.size() < 2) 1155 return false; 1156 1157 // RelocateInstMap is a mapping from the base relocate instruction to the 1158 // corresponding derived relocate instructions 1159 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1160 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1161 if (RelocateInstMap.empty()) 1162 return false; 1163 1164 for (auto &Item : RelocateInstMap) 1165 // Item.first is the RelocatedBase to offset against 1166 // Item.second is the vector of Targets to replace 1167 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1168 return MadeChange; 1169 } 1170 1171 /// Sink the specified cast instruction into its user blocks. 1172 static bool SinkCast(CastInst *CI) { 1173 BasicBlock *DefBB = CI->getParent(); 1174 1175 /// InsertedCasts - Only insert a cast in each block once. 1176 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1177 1178 bool MadeChange = false; 1179 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1180 UI != E; ) { 1181 Use &TheUse = UI.getUse(); 1182 Instruction *User = cast<Instruction>(*UI); 1183 1184 // Figure out which BB this cast is used in. For PHI's this is the 1185 // appropriate predecessor block. 1186 BasicBlock *UserBB = User->getParent(); 1187 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1188 UserBB = PN->getIncomingBlock(TheUse); 1189 } 1190 1191 // Preincrement use iterator so we don't invalidate it. 1192 ++UI; 1193 1194 // The first insertion point of a block containing an EH pad is after the 1195 // pad. If the pad is the user, we cannot sink the cast past the pad. 1196 if (User->isEHPad()) 1197 continue; 1198 1199 // If the block selected to receive the cast is an EH pad that does not 1200 // allow non-PHI instructions before the terminator, we can't sink the 1201 // cast. 1202 if (UserBB->getTerminator()->isEHPad()) 1203 continue; 1204 1205 // If this user is in the same block as the cast, don't change the cast. 1206 if (UserBB == DefBB) continue; 1207 1208 // If we have already inserted a cast into this block, use it. 1209 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1210 1211 if (!InsertedCast) { 1212 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1213 assert(InsertPt != UserBB->end()); 1214 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1215 CI->getType(), "", &*InsertPt); 1216 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1217 } 1218 1219 // Replace a use of the cast with a use of the new cast. 1220 TheUse = InsertedCast; 1221 MadeChange = true; 1222 ++NumCastUses; 1223 } 1224 1225 // If we removed all uses, nuke the cast. 1226 if (CI->use_empty()) { 1227 salvageDebugInfo(*CI); 1228 CI->eraseFromParent(); 1229 MadeChange = true; 1230 } 1231 1232 return MadeChange; 1233 } 1234 1235 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1236 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1237 /// reduce the number of virtual registers that must be created and coalesced. 1238 /// 1239 /// Return true if any changes are made. 1240 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1241 const DataLayout &DL) { 1242 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1243 // than sinking only nop casts, but is helpful on some platforms. 1244 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1245 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), 1246 ASC->getDestAddressSpace())) 1247 return false; 1248 } 1249 1250 // If this is a noop copy, 1251 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1252 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1253 1254 // This is an fp<->int conversion? 1255 if (SrcVT.isInteger() != DstVT.isInteger()) 1256 return false; 1257 1258 // If this is an extension, it will be a zero or sign extension, which 1259 // isn't a noop. 1260 if (SrcVT.bitsLT(DstVT)) return false; 1261 1262 // If these values will be promoted, find out what they will be promoted 1263 // to. This helps us consider truncates on PPC as noop copies when they 1264 // are. 1265 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1266 TargetLowering::TypePromoteInteger) 1267 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1268 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1269 TargetLowering::TypePromoteInteger) 1270 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1271 1272 // If, after promotion, these are the same types, this is a noop copy. 1273 if (SrcVT != DstVT) 1274 return false; 1275 1276 return SinkCast(CI); 1277 } 1278 1279 /// If given \p PN is an inductive variable with value IVInc coming from the 1280 /// backedge, and on each iteration it gets increased by Step, return pair 1281 /// <IVInc, Step>. Otherwise, return None. 1282 static Optional<std::pair<Instruction *, Constant *> > 1283 getIVIncrement(const PHINode *PN, const LoopInfo *LI) { 1284 const Loop *L = LI->getLoopFor(PN->getParent()); 1285 if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) 1286 return None; 1287 auto *IVInc = 1288 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); 1289 if (!IVInc) 1290 return None; 1291 Constant *Step = nullptr; 1292 if (match(IVInc, m_Sub(m_Specific(PN), m_Constant(Step)))) 1293 return std::make_pair(IVInc, ConstantExpr::getNeg(Step)); 1294 if (match(IVInc, m_Add(m_Specific(PN), m_Constant(Step)))) 1295 return std::make_pair(IVInc, Step); 1296 if (match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>( 1297 m_Specific(PN), m_Constant(Step))))) 1298 return std::make_pair(IVInc, ConstantExpr::getNeg(Step)); 1299 if (match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>( 1300 m_Specific(PN), m_Constant(Step))))) 1301 return std::make_pair(IVInc, Step); 1302 return None; 1303 } 1304 1305 static bool isIVIncrement(const BinaryOperator *BO, const LoopInfo *LI) { 1306 auto *PN = dyn_cast<PHINode>(BO->getOperand(0)); 1307 if (!PN) 1308 return false; 1309 if (auto IVInc = getIVIncrement(PN, LI)) 1310 return IVInc->first == BO; 1311 return false; 1312 } 1313 1314 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, 1315 Value *Arg0, Value *Arg1, 1316 CmpInst *Cmp, 1317 Intrinsic::ID IID) { 1318 auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { 1319 if (!isIVIncrement(BO, LI)) 1320 return false; 1321 const Loop *L = LI->getLoopFor(BO->getParent()); 1322 // IV increment may have other users than the IV. We do not want to make 1323 // dominance queries to analyze the legality of moving it towards the cmp, 1324 // so just check that there is no other users. 1325 if (!BO->hasOneUse()) 1326 return false; 1327 // Do not risk on moving increment into a child loop. 1328 if (LI->getLoopFor(Cmp->getParent()) != L) 1329 return false; 1330 // Ultimately, the insertion point must dominate latch. This should be a 1331 // cheap check because no CFG changes & dom tree recomputation happens 1332 // during the transform. 1333 Function *F = BO->getParent()->getParent(); 1334 return getDT(*F).dominates(Cmp->getParent(), L->getLoopLatch()); 1335 }; 1336 if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { 1337 // We used to use a dominator tree here to allow multi-block optimization. 1338 // But that was problematic because: 1339 // 1. It could cause a perf regression by hoisting the math op into the 1340 // critical path. 1341 // 2. It could cause a perf regression by creating a value that was live 1342 // across multiple blocks and increasing register pressure. 1343 // 3. Use of a dominator tree could cause large compile-time regression. 1344 // This is because we recompute the DT on every change in the main CGP 1345 // run-loop. The recomputing is probably unnecessary in many cases, so if 1346 // that was fixed, using a DT here would be ok. 1347 // 1348 // There is one important particular case we still want to handle: if BO is 1349 // the IV increment. Important properties that make it profitable: 1350 // - We can speculate IV increment anywhere in the loop (as long as the 1351 // indvar Phi is its only user); 1352 // - Upon computing Cmp, we effectively compute something equivalent to the 1353 // IV increment (despite it loops differently in the IR). So moving it up 1354 // to the cmp point does not really increase register pressure. 1355 return false; 1356 } 1357 1358 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1359 if (BO->getOpcode() == Instruction::Add && 1360 IID == Intrinsic::usub_with_overflow) { 1361 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1362 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1363 } 1364 1365 // Insert at the first instruction of the pair. 1366 Instruction *InsertPt = nullptr; 1367 for (Instruction &Iter : *Cmp->getParent()) { 1368 // If BO is an XOR, it is not guaranteed that it comes after both inputs to 1369 // the overflow intrinsic are defined. 1370 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { 1371 InsertPt = &Iter; 1372 break; 1373 } 1374 } 1375 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop"); 1376 1377 IRBuilder<> Builder(InsertPt); 1378 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1379 if (BO->getOpcode() != Instruction::Xor) { 1380 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1381 BO->replaceAllUsesWith(Math); 1382 } else 1383 assert(BO->hasOneUse() && 1384 "Patterns with XOr should use the BO only in the compare"); 1385 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1386 Cmp->replaceAllUsesWith(OV); 1387 Cmp->eraseFromParent(); 1388 BO->eraseFromParent(); 1389 return true; 1390 } 1391 1392 /// Match special-case patterns that check for unsigned add overflow. 1393 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, 1394 BinaryOperator *&Add) { 1395 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) 1396 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) 1397 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1398 1399 // We are not expecting non-canonical/degenerate code. Just bail out. 1400 if (isa<Constant>(A)) 1401 return false; 1402 1403 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1404 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) 1405 B = ConstantInt::get(B->getType(), 1); 1406 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) 1407 B = ConstantInt::get(B->getType(), -1); 1408 else 1409 return false; 1410 1411 // Check the users of the variable operand of the compare looking for an add 1412 // with the adjusted constant. 1413 for (User *U : A->users()) { 1414 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { 1415 Add = cast<BinaryOperator>(U); 1416 return true; 1417 } 1418 } 1419 return false; 1420 } 1421 1422 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1423 /// intrinsic. Return true if any changes were made. 1424 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, 1425 bool &ModifiedDT) { 1426 Value *A, *B; 1427 BinaryOperator *Add; 1428 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { 1429 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) 1430 return false; 1431 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. 1432 A = Add->getOperand(0); 1433 B = Add->getOperand(1); 1434 } 1435 1436 if (!TLI->shouldFormOverflowOp(ISD::UADDO, 1437 TLI->getValueType(*DL, Add->getType()), 1438 Add->hasNUsesOrMore(2))) 1439 return false; 1440 1441 // We don't want to move around uses of condition values this late, so we 1442 // check if it is legal to create the call to the intrinsic in the basic 1443 // block containing the icmp. 1444 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1445 return false; 1446 1447 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, 1448 Intrinsic::uadd_with_overflow)) 1449 return false; 1450 1451 // Reset callers - do not crash by iterating over a dead instruction. 1452 ModifiedDT = true; 1453 return true; 1454 } 1455 1456 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, 1457 bool &ModifiedDT) { 1458 // We are not expecting non-canonical/degenerate code. Just bail out. 1459 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1460 if (isa<Constant>(A) && isa<Constant>(B)) 1461 return false; 1462 1463 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1464 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1465 if (Pred == ICmpInst::ICMP_UGT) { 1466 std::swap(A, B); 1467 Pred = ICmpInst::ICMP_ULT; 1468 } 1469 // Convert special-case: (A == 0) is the same as (A u< 1). 1470 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1471 B = ConstantInt::get(B->getType(), 1); 1472 Pred = ICmpInst::ICMP_ULT; 1473 } 1474 // Convert special-case: (A != 0) is the same as (0 u< A). 1475 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { 1476 std::swap(A, B); 1477 Pred = ICmpInst::ICMP_ULT; 1478 } 1479 if (Pred != ICmpInst::ICMP_ULT) 1480 return false; 1481 1482 // Walk the users of a variable operand of a compare looking for a subtract or 1483 // add with that same operand. Also match the 2nd operand of the compare to 1484 // the add/sub, but that may be a negated constant operand of an add. 1485 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1486 BinaryOperator *Sub = nullptr; 1487 for (User *U : CmpVariableOperand->users()) { 1488 // A - B, A u< B --> usubo(A, B) 1489 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1490 Sub = cast<BinaryOperator>(U); 1491 break; 1492 } 1493 1494 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1495 const APInt *CmpC, *AddC; 1496 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1497 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1498 Sub = cast<BinaryOperator>(U); 1499 break; 1500 } 1501 } 1502 if (!Sub) 1503 return false; 1504 1505 if (!TLI->shouldFormOverflowOp(ISD::USUBO, 1506 TLI->getValueType(*DL, Sub->getType()), 1507 Sub->hasNUsesOrMore(2))) 1508 return false; 1509 1510 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), 1511 Cmp, Intrinsic::usub_with_overflow)) 1512 return false; 1513 1514 // Reset callers - do not crash by iterating over a dead instruction. 1515 ModifiedDT = true; 1516 return true; 1517 } 1518 1519 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1520 /// registers that must be created and coalesced. This is a clear win except on 1521 /// targets with multiple condition code registers (PowerPC), where it might 1522 /// lose; some adjustment may be wanted there. 1523 /// 1524 /// Return true if any changes are made. 1525 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1526 if (TLI.hasMultipleConditionRegisters()) 1527 return false; 1528 1529 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1530 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1531 return false; 1532 1533 // Only insert a cmp in each block once. 1534 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1535 1536 bool MadeChange = false; 1537 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1538 UI != E; ) { 1539 Use &TheUse = UI.getUse(); 1540 Instruction *User = cast<Instruction>(*UI); 1541 1542 // Preincrement use iterator so we don't invalidate it. 1543 ++UI; 1544 1545 // Don't bother for PHI nodes. 1546 if (isa<PHINode>(User)) 1547 continue; 1548 1549 // Figure out which BB this cmp is used in. 1550 BasicBlock *UserBB = User->getParent(); 1551 BasicBlock *DefBB = Cmp->getParent(); 1552 1553 // If this user is in the same block as the cmp, don't change the cmp. 1554 if (UserBB == DefBB) continue; 1555 1556 // If we have already inserted a cmp into this block, use it. 1557 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1558 1559 if (!InsertedCmp) { 1560 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1561 assert(InsertPt != UserBB->end()); 1562 InsertedCmp = 1563 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1564 Cmp->getOperand(0), Cmp->getOperand(1), "", 1565 &*InsertPt); 1566 // Propagate the debug info. 1567 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1568 } 1569 1570 // Replace a use of the cmp with a use of the new cmp. 1571 TheUse = InsertedCmp; 1572 MadeChange = true; 1573 ++NumCmpUses; 1574 } 1575 1576 // If we removed all uses, nuke the cmp. 1577 if (Cmp->use_empty()) { 1578 Cmp->eraseFromParent(); 1579 MadeChange = true; 1580 } 1581 1582 return MadeChange; 1583 } 1584 1585 /// For pattern like: 1586 /// 1587 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) 1588 /// ... 1589 /// DomBB: 1590 /// ... 1591 /// br DomCond, TrueBB, CmpBB 1592 /// CmpBB: (with DomBB being the single predecessor) 1593 /// ... 1594 /// Cmp = icmp eq CmpOp0, CmpOp1 1595 /// ... 1596 /// 1597 /// It would use two comparison on targets that lowering of icmp sgt/slt is 1598 /// different from lowering of icmp eq (PowerPC). This function try to convert 1599 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. 1600 /// After that, DomCond and Cmp can use the same comparison so reduce one 1601 /// comparison. 1602 /// 1603 /// Return true if any changes are made. 1604 static bool foldICmpWithDominatingICmp(CmpInst *Cmp, 1605 const TargetLowering &TLI) { 1606 if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) 1607 return false; 1608 1609 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1610 if (Pred != ICmpInst::ICMP_EQ) 1611 return false; 1612 1613 // If icmp eq has users other than BranchInst and SelectInst, converting it to 1614 // icmp slt/sgt would introduce more redundant LLVM IR. 1615 for (User *U : Cmp->users()) { 1616 if (isa<BranchInst>(U)) 1617 continue; 1618 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) 1619 continue; 1620 return false; 1621 } 1622 1623 // This is a cheap/incomplete check for dominance - just match a single 1624 // predecessor with a conditional branch. 1625 BasicBlock *CmpBB = Cmp->getParent(); 1626 BasicBlock *DomBB = CmpBB->getSinglePredecessor(); 1627 if (!DomBB) 1628 return false; 1629 1630 // We want to ensure that the only way control gets to the comparison of 1631 // interest is that a less/greater than comparison on the same operands is 1632 // false. 1633 Value *DomCond; 1634 BasicBlock *TrueBB, *FalseBB; 1635 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) 1636 return false; 1637 if (CmpBB != FalseBB) 1638 return false; 1639 1640 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); 1641 ICmpInst::Predicate DomPred; 1642 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) 1643 return false; 1644 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) 1645 return false; 1646 1647 // Convert the equality comparison to the opposite of the dominating 1648 // comparison and swap the direction for all branch/select users. 1649 // We have conceptually converted: 1650 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; 1651 // to 1652 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; 1653 // And similarly for branches. 1654 for (User *U : Cmp->users()) { 1655 if (auto *BI = dyn_cast<BranchInst>(U)) { 1656 assert(BI->isConditional() && "Must be conditional"); 1657 BI->swapSuccessors(); 1658 continue; 1659 } 1660 if (auto *SI = dyn_cast<SelectInst>(U)) { 1661 // Swap operands 1662 SI->swapValues(); 1663 SI->swapProfMetadata(); 1664 continue; 1665 } 1666 llvm_unreachable("Must be a branch or a select"); 1667 } 1668 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); 1669 return true; 1670 } 1671 1672 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) { 1673 if (sinkCmpExpression(Cmp, *TLI)) 1674 return true; 1675 1676 if (combineToUAddWithOverflow(Cmp, ModifiedDT)) 1677 return true; 1678 1679 if (combineToUSubWithOverflow(Cmp, ModifiedDT)) 1680 return true; 1681 1682 if (foldICmpWithDominatingICmp(Cmp, *TLI)) 1683 return true; 1684 1685 return false; 1686 } 1687 1688 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1689 /// used in a compare to allow isel to generate better code for targets where 1690 /// this operation can be combined. 1691 /// 1692 /// Return true if any changes are made. 1693 static bool sinkAndCmp0Expression(Instruction *AndI, 1694 const TargetLowering &TLI, 1695 SetOfInstrs &InsertedInsts) { 1696 // Double-check that we're not trying to optimize an instruction that was 1697 // already optimized by some other part of this pass. 1698 assert(!InsertedInsts.count(AndI) && 1699 "Attempting to optimize already optimized and instruction"); 1700 (void) InsertedInsts; 1701 1702 // Nothing to do for single use in same basic block. 1703 if (AndI->hasOneUse() && 1704 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1705 return false; 1706 1707 // Try to avoid cases where sinking/duplicating is likely to increase register 1708 // pressure. 1709 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1710 !isa<ConstantInt>(AndI->getOperand(1)) && 1711 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1712 return false; 1713 1714 for (auto *U : AndI->users()) { 1715 Instruction *User = cast<Instruction>(U); 1716 1717 // Only sink 'and' feeding icmp with 0. 1718 if (!isa<ICmpInst>(User)) 1719 return false; 1720 1721 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1722 if (!CmpC || !CmpC->isZero()) 1723 return false; 1724 } 1725 1726 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1727 return false; 1728 1729 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1730 LLVM_DEBUG(AndI->getParent()->dump()); 1731 1732 // Push the 'and' into the same block as the icmp 0. There should only be 1733 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1734 // others, so we don't need to keep track of which BBs we insert into. 1735 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1736 UI != E; ) { 1737 Use &TheUse = UI.getUse(); 1738 Instruction *User = cast<Instruction>(*UI); 1739 1740 // Preincrement use iterator so we don't invalidate it. 1741 ++UI; 1742 1743 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1744 1745 // Keep the 'and' in the same place if the use is already in the same block. 1746 Instruction *InsertPt = 1747 User->getParent() == AndI->getParent() ? AndI : User; 1748 Instruction *InsertedAnd = 1749 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1750 AndI->getOperand(1), "", InsertPt); 1751 // Propagate the debug info. 1752 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1753 1754 // Replace a use of the 'and' with a use of the new 'and'. 1755 TheUse = InsertedAnd; 1756 ++NumAndUses; 1757 LLVM_DEBUG(User->getParent()->dump()); 1758 } 1759 1760 // We removed all uses, nuke the and. 1761 AndI->eraseFromParent(); 1762 return true; 1763 } 1764 1765 /// Check if the candidates could be combined with a shift instruction, which 1766 /// includes: 1767 /// 1. Truncate instruction 1768 /// 2. And instruction and the imm is a mask of the low bits: 1769 /// imm & (imm+1) == 0 1770 static bool isExtractBitsCandidateUse(Instruction *User) { 1771 if (!isa<TruncInst>(User)) { 1772 if (User->getOpcode() != Instruction::And || 1773 !isa<ConstantInt>(User->getOperand(1))) 1774 return false; 1775 1776 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1777 1778 if ((Cimm & (Cimm + 1)).getBoolValue()) 1779 return false; 1780 } 1781 return true; 1782 } 1783 1784 /// Sink both shift and truncate instruction to the use of truncate's BB. 1785 static bool 1786 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1787 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1788 const TargetLowering &TLI, const DataLayout &DL) { 1789 BasicBlock *UserBB = User->getParent(); 1790 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1791 auto *TruncI = cast<TruncInst>(User); 1792 bool MadeChange = false; 1793 1794 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1795 TruncE = TruncI->user_end(); 1796 TruncUI != TruncE;) { 1797 1798 Use &TruncTheUse = TruncUI.getUse(); 1799 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1800 // Preincrement use iterator so we don't invalidate it. 1801 1802 ++TruncUI; 1803 1804 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1805 if (!ISDOpcode) 1806 continue; 1807 1808 // If the use is actually a legal node, there will not be an 1809 // implicit truncate. 1810 // FIXME: always querying the result type is just an 1811 // approximation; some nodes' legality is determined by the 1812 // operand or other means. There's no good way to find out though. 1813 if (TLI.isOperationLegalOrCustom( 1814 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1815 continue; 1816 1817 // Don't bother for PHI nodes. 1818 if (isa<PHINode>(TruncUser)) 1819 continue; 1820 1821 BasicBlock *TruncUserBB = TruncUser->getParent(); 1822 1823 if (UserBB == TruncUserBB) 1824 continue; 1825 1826 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1827 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1828 1829 if (!InsertedShift && !InsertedTrunc) { 1830 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1831 assert(InsertPt != TruncUserBB->end()); 1832 // Sink the shift 1833 if (ShiftI->getOpcode() == Instruction::AShr) 1834 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1835 "", &*InsertPt); 1836 else 1837 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1838 "", &*InsertPt); 1839 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1840 1841 // Sink the trunc 1842 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1843 TruncInsertPt++; 1844 assert(TruncInsertPt != TruncUserBB->end()); 1845 1846 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1847 TruncI->getType(), "", &*TruncInsertPt); 1848 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1849 1850 MadeChange = true; 1851 1852 TruncTheUse = InsertedTrunc; 1853 } 1854 } 1855 return MadeChange; 1856 } 1857 1858 /// Sink the shift *right* instruction into user blocks if the uses could 1859 /// potentially be combined with this shift instruction and generate BitExtract 1860 /// instruction. It will only be applied if the architecture supports BitExtract 1861 /// instruction. Here is an example: 1862 /// BB1: 1863 /// %x.extract.shift = lshr i64 %arg1, 32 1864 /// BB2: 1865 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1866 /// ==> 1867 /// 1868 /// BB2: 1869 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1870 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1871 /// 1872 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1873 /// instruction. 1874 /// Return true if any changes are made. 1875 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1876 const TargetLowering &TLI, 1877 const DataLayout &DL) { 1878 BasicBlock *DefBB = ShiftI->getParent(); 1879 1880 /// Only insert instructions in each block once. 1881 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1882 1883 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1884 1885 bool MadeChange = false; 1886 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1887 UI != E;) { 1888 Use &TheUse = UI.getUse(); 1889 Instruction *User = cast<Instruction>(*UI); 1890 // Preincrement use iterator so we don't invalidate it. 1891 ++UI; 1892 1893 // Don't bother for PHI nodes. 1894 if (isa<PHINode>(User)) 1895 continue; 1896 1897 if (!isExtractBitsCandidateUse(User)) 1898 continue; 1899 1900 BasicBlock *UserBB = User->getParent(); 1901 1902 if (UserBB == DefBB) { 1903 // If the shift and truncate instruction are in the same BB. The use of 1904 // the truncate(TruncUse) may still introduce another truncate if not 1905 // legal. In this case, we would like to sink both shift and truncate 1906 // instruction to the BB of TruncUse. 1907 // for example: 1908 // BB1: 1909 // i64 shift.result = lshr i64 opnd, imm 1910 // trunc.result = trunc shift.result to i16 1911 // 1912 // BB2: 1913 // ----> We will have an implicit truncate here if the architecture does 1914 // not have i16 compare. 1915 // cmp i16 trunc.result, opnd2 1916 // 1917 if (isa<TruncInst>(User) && shiftIsLegal 1918 // If the type of the truncate is legal, no truncate will be 1919 // introduced in other basic blocks. 1920 && 1921 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1922 MadeChange = 1923 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1924 1925 continue; 1926 } 1927 // If we have already inserted a shift into this block, use it. 1928 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1929 1930 if (!InsertedShift) { 1931 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1932 assert(InsertPt != UserBB->end()); 1933 1934 if (ShiftI->getOpcode() == Instruction::AShr) 1935 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1936 "", &*InsertPt); 1937 else 1938 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1939 "", &*InsertPt); 1940 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1941 1942 MadeChange = true; 1943 } 1944 1945 // Replace a use of the shift with a use of the new shift. 1946 TheUse = InsertedShift; 1947 } 1948 1949 // If we removed all uses, or there are none, nuke the shift. 1950 if (ShiftI->use_empty()) { 1951 salvageDebugInfo(*ShiftI); 1952 ShiftI->eraseFromParent(); 1953 MadeChange = true; 1954 } 1955 1956 return MadeChange; 1957 } 1958 1959 /// If counting leading or trailing zeros is an expensive operation and a zero 1960 /// input is defined, add a check for zero to avoid calling the intrinsic. 1961 /// 1962 /// We want to transform: 1963 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1964 /// 1965 /// into: 1966 /// entry: 1967 /// %cmpz = icmp eq i64 %A, 0 1968 /// br i1 %cmpz, label %cond.end, label %cond.false 1969 /// cond.false: 1970 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1971 /// br label %cond.end 1972 /// cond.end: 1973 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1974 /// 1975 /// If the transform is performed, return true and set ModifiedDT to true. 1976 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1977 const TargetLowering *TLI, 1978 const DataLayout *DL, 1979 bool &ModifiedDT) { 1980 // If a zero input is undefined, it doesn't make sense to despeculate that. 1981 if (match(CountZeros->getOperand(1), m_One())) 1982 return false; 1983 1984 // If it's cheap to speculate, there's nothing to do. 1985 auto IntrinsicID = CountZeros->getIntrinsicID(); 1986 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1987 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1988 return false; 1989 1990 // Only handle legal scalar cases. Anything else requires too much work. 1991 Type *Ty = CountZeros->getType(); 1992 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1993 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 1994 return false; 1995 1996 // The intrinsic will be sunk behind a compare against zero and branch. 1997 BasicBlock *StartBlock = CountZeros->getParent(); 1998 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1999 2000 // Create another block after the count zero intrinsic. A PHI will be added 2001 // in this block to select the result of the intrinsic or the bit-width 2002 // constant if the input to the intrinsic is zero. 2003 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 2004 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 2005 2006 // Set up a builder to create a compare, conditional branch, and PHI. 2007 IRBuilder<> Builder(CountZeros->getContext()); 2008 Builder.SetInsertPoint(StartBlock->getTerminator()); 2009 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 2010 2011 // Replace the unconditional branch that was created by the first split with 2012 // a compare against zero and a conditional branch. 2013 Value *Zero = Constant::getNullValue(Ty); 2014 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 2015 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 2016 StartBlock->getTerminator()->eraseFromParent(); 2017 2018 // Create a PHI in the end block to select either the output of the intrinsic 2019 // or the bit width of the operand. 2020 Builder.SetInsertPoint(&EndBlock->front()); 2021 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 2022 CountZeros->replaceAllUsesWith(PN); 2023 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 2024 PN->addIncoming(BitWidth, StartBlock); 2025 PN->addIncoming(CountZeros, CallBlock); 2026 2027 // We are explicitly handling the zero case, so we can set the intrinsic's 2028 // undefined zero argument to 'true'. This will also prevent reprocessing the 2029 // intrinsic; we only despeculate when a zero input is defined. 2030 CountZeros->setArgOperand(1, Builder.getTrue()); 2031 ModifiedDT = true; 2032 return true; 2033 } 2034 2035 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 2036 BasicBlock *BB = CI->getParent(); 2037 2038 // Lower inline assembly if we can. 2039 // If we found an inline asm expession, and if the target knows how to 2040 // lower it to normal LLVM code, do so now. 2041 if (CI->isInlineAsm()) { 2042 if (TLI->ExpandInlineAsm(CI)) { 2043 // Avoid invalidating the iterator. 2044 CurInstIterator = BB->begin(); 2045 // Avoid processing instructions out of order, which could cause 2046 // reuse before a value is defined. 2047 SunkAddrs.clear(); 2048 return true; 2049 } 2050 // Sink address computing for memory operands into the block. 2051 if (optimizeInlineAsmInst(CI)) 2052 return true; 2053 } 2054 2055 // Align the pointer arguments to this call if the target thinks it's a good 2056 // idea 2057 unsigned MinSize, PrefAlign; 2058 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2059 for (auto &Arg : CI->arg_operands()) { 2060 // We want to align both objects whose address is used directly and 2061 // objects whose address is used in casts and GEPs, though it only makes 2062 // sense for GEPs if the offset is a multiple of the desired alignment and 2063 // if size - offset meets the size threshold. 2064 if (!Arg->getType()->isPointerTy()) 2065 continue; 2066 APInt Offset(DL->getIndexSizeInBits( 2067 cast<PointerType>(Arg->getType())->getAddressSpace()), 2068 0); 2069 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2070 uint64_t Offset2 = Offset.getLimitedValue(); 2071 if ((Offset2 & (PrefAlign-1)) != 0) 2072 continue; 2073 AllocaInst *AI; 2074 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2075 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2076 AI->setAlignment(Align(PrefAlign)); 2077 // Global variables can only be aligned if they are defined in this 2078 // object (i.e. they are uniquely initialized in this object), and 2079 // over-aligning global variables that have an explicit section is 2080 // forbidden. 2081 GlobalVariable *GV; 2082 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2083 GV->getPointerAlignment(*DL) < PrefAlign && 2084 DL->getTypeAllocSize(GV->getValueType()) >= 2085 MinSize + Offset2) 2086 GV->setAlignment(MaybeAlign(PrefAlign)); 2087 } 2088 // If this is a memcpy (or similar) then we may be able to improve the 2089 // alignment 2090 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2091 Align DestAlign = getKnownAlignment(MI->getDest(), *DL); 2092 MaybeAlign MIDestAlign = MI->getDestAlign(); 2093 if (!MIDestAlign || DestAlign > *MIDestAlign) 2094 MI->setDestAlignment(DestAlign); 2095 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 2096 MaybeAlign MTISrcAlign = MTI->getSourceAlign(); 2097 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 2098 if (!MTISrcAlign || SrcAlign > *MTISrcAlign) 2099 MTI->setSourceAlignment(SrcAlign); 2100 } 2101 } 2102 } 2103 2104 // If we have a cold call site, try to sink addressing computation into the 2105 // cold block. This interacts with our handling for loads and stores to 2106 // ensure that we can fold all uses of a potential addressing computation 2107 // into their uses. TODO: generalize this to work over profiling data 2108 if (CI->hasFnAttr(Attribute::Cold) && 2109 !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 2110 for (auto &Arg : CI->arg_operands()) { 2111 if (!Arg->getType()->isPointerTy()) 2112 continue; 2113 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2114 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2115 } 2116 2117 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2118 if (II) { 2119 switch (II->getIntrinsicID()) { 2120 default: break; 2121 case Intrinsic::assume: { 2122 Value *Operand = II->getOperand(0); 2123 II->eraseFromParent(); 2124 // Prune the operand, it's most likely dead. 2125 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 2126 RecursivelyDeleteTriviallyDeadInstructions( 2127 Operand, TLInfo, nullptr, 2128 [&](Value *V) { removeAllAssertingVHReferences(V); }); 2129 }); 2130 return true; 2131 } 2132 2133 case Intrinsic::experimental_widenable_condition: { 2134 // Give up on future widening oppurtunties so that we can fold away dead 2135 // paths and merge blocks before going into block-local instruction 2136 // selection. 2137 if (II->use_empty()) { 2138 II->eraseFromParent(); 2139 return true; 2140 } 2141 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 2142 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 2143 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2144 }); 2145 return true; 2146 } 2147 case Intrinsic::objectsize: 2148 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2149 case Intrinsic::is_constant: 2150 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2151 case Intrinsic::aarch64_stlxr: 2152 case Intrinsic::aarch64_stxr: { 2153 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2154 if (!ExtVal || !ExtVal->hasOneUse() || 2155 ExtVal->getParent() == CI->getParent()) 2156 return false; 2157 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2158 ExtVal->moveBefore(CI); 2159 // Mark this instruction as "inserted by CGP", so that other 2160 // optimizations don't touch it. 2161 InsertedInsts.insert(ExtVal); 2162 return true; 2163 } 2164 2165 case Intrinsic::launder_invariant_group: 2166 case Intrinsic::strip_invariant_group: { 2167 Value *ArgVal = II->getArgOperand(0); 2168 auto it = LargeOffsetGEPMap.find(II); 2169 if (it != LargeOffsetGEPMap.end()) { 2170 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 2171 // Make sure not to have to deal with iterator invalidation 2172 // after possibly adding ArgVal to LargeOffsetGEPMap. 2173 auto GEPs = std::move(it->second); 2174 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 2175 LargeOffsetGEPMap.erase(II); 2176 } 2177 2178 II->replaceAllUsesWith(ArgVal); 2179 II->eraseFromParent(); 2180 return true; 2181 } 2182 case Intrinsic::cttz: 2183 case Intrinsic::ctlz: 2184 // If counting zeros is expensive, try to avoid it. 2185 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2186 case Intrinsic::fshl: 2187 case Intrinsic::fshr: 2188 return optimizeFunnelShift(II); 2189 case Intrinsic::dbg_value: 2190 return fixupDbgValue(II); 2191 case Intrinsic::vscale: { 2192 // If datalayout has no special restrictions on vector data layout, 2193 // replace `llvm.vscale` by an equivalent constant expression 2194 // to benefit from cheap constant propagation. 2195 Type *ScalableVectorTy = 2196 VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); 2197 if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { 2198 auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); 2199 auto *One = ConstantInt::getSigned(II->getType(), 1); 2200 auto *CGep = 2201 ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One); 2202 II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType())); 2203 II->eraseFromParent(); 2204 return true; 2205 } 2206 break; 2207 } 2208 case Intrinsic::masked_gather: 2209 return optimizeGatherScatterInst(II, II->getArgOperand(0)); 2210 case Intrinsic::masked_scatter: 2211 return optimizeGatherScatterInst(II, II->getArgOperand(1)); 2212 } 2213 2214 SmallVector<Value *, 2> PtrOps; 2215 Type *AccessTy; 2216 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2217 while (!PtrOps.empty()) { 2218 Value *PtrVal = PtrOps.pop_back_val(); 2219 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2220 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2221 return true; 2222 } 2223 } 2224 2225 // From here on out we're working with named functions. 2226 if (!CI->getCalledFunction()) return false; 2227 2228 // Lower all default uses of _chk calls. This is very similar 2229 // to what InstCombineCalls does, but here we are only lowering calls 2230 // to fortified library functions (e.g. __memcpy_chk) that have the default 2231 // "don't know" as the objectsize. Anything else should be left alone. 2232 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2233 IRBuilder<> Builder(CI); 2234 if (Value *V = Simplifier.optimizeCall(CI, Builder)) { 2235 CI->replaceAllUsesWith(V); 2236 CI->eraseFromParent(); 2237 return true; 2238 } 2239 2240 return false; 2241 } 2242 2243 /// Look for opportunities to duplicate return instructions to the predecessor 2244 /// to enable tail call optimizations. The case it is currently looking for is: 2245 /// @code 2246 /// bb0: 2247 /// %tmp0 = tail call i32 @f0() 2248 /// br label %return 2249 /// bb1: 2250 /// %tmp1 = tail call i32 @f1() 2251 /// br label %return 2252 /// bb2: 2253 /// %tmp2 = tail call i32 @f2() 2254 /// br label %return 2255 /// return: 2256 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2257 /// ret i32 %retval 2258 /// @endcode 2259 /// 2260 /// => 2261 /// 2262 /// @code 2263 /// bb0: 2264 /// %tmp0 = tail call i32 @f0() 2265 /// ret i32 %tmp0 2266 /// bb1: 2267 /// %tmp1 = tail call i32 @f1() 2268 /// ret i32 %tmp1 2269 /// bb2: 2270 /// %tmp2 = tail call i32 @f2() 2271 /// ret i32 %tmp2 2272 /// @endcode 2273 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) { 2274 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2275 if (!RetI) 2276 return false; 2277 2278 PHINode *PN = nullptr; 2279 ExtractValueInst *EVI = nullptr; 2280 BitCastInst *BCI = nullptr; 2281 Value *V = RetI->getReturnValue(); 2282 if (V) { 2283 BCI = dyn_cast<BitCastInst>(V); 2284 if (BCI) 2285 V = BCI->getOperand(0); 2286 2287 EVI = dyn_cast<ExtractValueInst>(V); 2288 if (EVI) { 2289 V = EVI->getOperand(0); 2290 if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; })) 2291 return false; 2292 } 2293 2294 PN = dyn_cast<PHINode>(V); 2295 if (!PN) 2296 return false; 2297 } 2298 2299 if (PN && PN->getParent() != BB) 2300 return false; 2301 2302 auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { 2303 const BitCastInst *BC = dyn_cast<BitCastInst>(Inst); 2304 if (BC && BC->hasOneUse()) 2305 Inst = BC->user_back(); 2306 2307 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 2308 return II->getIntrinsicID() == Intrinsic::lifetime_end; 2309 return false; 2310 }; 2311 2312 // Make sure there are no instructions between the first instruction 2313 // and return. 2314 const Instruction *BI = BB->getFirstNonPHI(); 2315 // Skip over debug and the bitcast. 2316 while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI || 2317 isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI)) 2318 BI = BI->getNextNode(); 2319 if (BI != RetI) 2320 return false; 2321 2322 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2323 /// call. 2324 const Function *F = BB->getParent(); 2325 SmallVector<BasicBlock*, 4> TailCallBBs; 2326 if (PN) { 2327 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2328 // Look through bitcasts. 2329 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); 2330 CallInst *CI = dyn_cast<CallInst>(IncomingVal); 2331 BasicBlock *PredBB = PN->getIncomingBlock(I); 2332 // Make sure the phi value is indeed produced by the tail call. 2333 if (CI && CI->hasOneUse() && CI->getParent() == PredBB && 2334 TLI->mayBeEmittedAsTailCall(CI) && 2335 attributesPermitTailCall(F, CI, RetI, *TLI)) 2336 TailCallBBs.push_back(PredBB); 2337 } 2338 } else { 2339 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2340 for (BasicBlock *Pred : predecessors(BB)) { 2341 if (!VisitedBBs.insert(Pred).second) 2342 continue; 2343 if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { 2344 CallInst *CI = dyn_cast<CallInst>(I); 2345 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2346 attributesPermitTailCall(F, CI, RetI, *TLI)) 2347 TailCallBBs.push_back(Pred); 2348 } 2349 } 2350 } 2351 2352 bool Changed = false; 2353 for (auto const &TailCallBB : TailCallBBs) { 2354 // Make sure the call instruction is followed by an unconditional branch to 2355 // the return block. 2356 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); 2357 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2358 continue; 2359 2360 // Duplicate the return into TailCallBB. 2361 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); 2362 assert(!VerifyBFIUpdates || 2363 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)); 2364 BFI->setBlockFreq( 2365 BB, 2366 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); 2367 ModifiedDT = Changed = true; 2368 ++NumRetsDup; 2369 } 2370 2371 // If we eliminated all predecessors of the block, delete the block now. 2372 if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) 2373 BB->eraseFromParent(); 2374 2375 return Changed; 2376 } 2377 2378 //===----------------------------------------------------------------------===// 2379 // Memory Optimization 2380 //===----------------------------------------------------------------------===// 2381 2382 namespace { 2383 2384 /// This is an extended version of TargetLowering::AddrMode 2385 /// which holds actual Value*'s for register values. 2386 struct ExtAddrMode : public TargetLowering::AddrMode { 2387 Value *BaseReg = nullptr; 2388 Value *ScaledReg = nullptr; 2389 Value *OriginalValue = nullptr; 2390 bool InBounds = true; 2391 2392 enum FieldName { 2393 NoField = 0x00, 2394 BaseRegField = 0x01, 2395 BaseGVField = 0x02, 2396 BaseOffsField = 0x04, 2397 ScaledRegField = 0x08, 2398 ScaleField = 0x10, 2399 MultipleFields = 0xff 2400 }; 2401 2402 2403 ExtAddrMode() = default; 2404 2405 void print(raw_ostream &OS) const; 2406 void dump() const; 2407 2408 FieldName compare(const ExtAddrMode &other) { 2409 // First check that the types are the same on each field, as differing types 2410 // is something we can't cope with later on. 2411 if (BaseReg && other.BaseReg && 2412 BaseReg->getType() != other.BaseReg->getType()) 2413 return MultipleFields; 2414 if (BaseGV && other.BaseGV && 2415 BaseGV->getType() != other.BaseGV->getType()) 2416 return MultipleFields; 2417 if (ScaledReg && other.ScaledReg && 2418 ScaledReg->getType() != other.ScaledReg->getType()) 2419 return MultipleFields; 2420 2421 // Conservatively reject 'inbounds' mismatches. 2422 if (InBounds != other.InBounds) 2423 return MultipleFields; 2424 2425 // Check each field to see if it differs. 2426 unsigned Result = NoField; 2427 if (BaseReg != other.BaseReg) 2428 Result |= BaseRegField; 2429 if (BaseGV != other.BaseGV) 2430 Result |= BaseGVField; 2431 if (BaseOffs != other.BaseOffs) 2432 Result |= BaseOffsField; 2433 if (ScaledReg != other.ScaledReg) 2434 Result |= ScaledRegField; 2435 // Don't count 0 as being a different scale, because that actually means 2436 // unscaled (which will already be counted by having no ScaledReg). 2437 if (Scale && other.Scale && Scale != other.Scale) 2438 Result |= ScaleField; 2439 2440 if (countPopulation(Result) > 1) 2441 return MultipleFields; 2442 else 2443 return static_cast<FieldName>(Result); 2444 } 2445 2446 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2447 // with no offset. 2448 bool isTrivial() { 2449 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2450 // trivial if at most one of these terms is nonzero, except that BaseGV and 2451 // BaseReg both being zero actually means a null pointer value, which we 2452 // consider to be 'non-zero' here. 2453 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2454 } 2455 2456 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2457 switch (Field) { 2458 default: 2459 return nullptr; 2460 case BaseRegField: 2461 return BaseReg; 2462 case BaseGVField: 2463 return BaseGV; 2464 case ScaledRegField: 2465 return ScaledReg; 2466 case BaseOffsField: 2467 return ConstantInt::get(IntPtrTy, BaseOffs); 2468 } 2469 } 2470 2471 void SetCombinedField(FieldName Field, Value *V, 2472 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2473 switch (Field) { 2474 default: 2475 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2476 break; 2477 case ExtAddrMode::BaseRegField: 2478 BaseReg = V; 2479 break; 2480 case ExtAddrMode::BaseGVField: 2481 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2482 // in the BaseReg field. 2483 assert(BaseReg == nullptr); 2484 BaseReg = V; 2485 BaseGV = nullptr; 2486 break; 2487 case ExtAddrMode::ScaledRegField: 2488 ScaledReg = V; 2489 // If we have a mix of scaled and unscaled addrmodes then we want scale 2490 // to be the scale and not zero. 2491 if (!Scale) 2492 for (const ExtAddrMode &AM : AddrModes) 2493 if (AM.Scale) { 2494 Scale = AM.Scale; 2495 break; 2496 } 2497 break; 2498 case ExtAddrMode::BaseOffsField: 2499 // The offset is no longer a constant, so it goes in ScaledReg with a 2500 // scale of 1. 2501 assert(ScaledReg == nullptr); 2502 ScaledReg = V; 2503 Scale = 1; 2504 BaseOffs = 0; 2505 break; 2506 } 2507 } 2508 }; 2509 2510 } // end anonymous namespace 2511 2512 #ifndef NDEBUG 2513 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2514 AM.print(OS); 2515 return OS; 2516 } 2517 #endif 2518 2519 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2520 void ExtAddrMode::print(raw_ostream &OS) const { 2521 bool NeedPlus = false; 2522 OS << "["; 2523 if (InBounds) 2524 OS << "inbounds "; 2525 if (BaseGV) { 2526 OS << (NeedPlus ? " + " : "") 2527 << "GV:"; 2528 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2529 NeedPlus = true; 2530 } 2531 2532 if (BaseOffs) { 2533 OS << (NeedPlus ? " + " : "") 2534 << BaseOffs; 2535 NeedPlus = true; 2536 } 2537 2538 if (BaseReg) { 2539 OS << (NeedPlus ? " + " : "") 2540 << "Base:"; 2541 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2542 NeedPlus = true; 2543 } 2544 if (Scale) { 2545 OS << (NeedPlus ? " + " : "") 2546 << Scale << "*"; 2547 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2548 } 2549 2550 OS << ']'; 2551 } 2552 2553 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2554 print(dbgs()); 2555 dbgs() << '\n'; 2556 } 2557 #endif 2558 2559 namespace { 2560 2561 /// This class provides transaction based operation on the IR. 2562 /// Every change made through this class is recorded in the internal state and 2563 /// can be undone (rollback) until commit is called. 2564 /// CGP does not check if instructions could be speculatively executed when 2565 /// moved. Preserving the original location would pessimize the debugging 2566 /// experience, as well as negatively impact the quality of sample PGO. 2567 class TypePromotionTransaction { 2568 /// This represents the common interface of the individual transaction. 2569 /// Each class implements the logic for doing one specific modification on 2570 /// the IR via the TypePromotionTransaction. 2571 class TypePromotionAction { 2572 protected: 2573 /// The Instruction modified. 2574 Instruction *Inst; 2575 2576 public: 2577 /// Constructor of the action. 2578 /// The constructor performs the related action on the IR. 2579 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2580 2581 virtual ~TypePromotionAction() = default; 2582 2583 /// Undo the modification done by this action. 2584 /// When this method is called, the IR must be in the same state as it was 2585 /// before this action was applied. 2586 /// \pre Undoing the action works if and only if the IR is in the exact same 2587 /// state as it was directly after this action was applied. 2588 virtual void undo() = 0; 2589 2590 /// Advocate every change made by this action. 2591 /// When the results on the IR of the action are to be kept, it is important 2592 /// to call this function, otherwise hidden information may be kept forever. 2593 virtual void commit() { 2594 // Nothing to be done, this action is not doing anything. 2595 } 2596 }; 2597 2598 /// Utility to remember the position of an instruction. 2599 class InsertionHandler { 2600 /// Position of an instruction. 2601 /// Either an instruction: 2602 /// - Is the first in a basic block: BB is used. 2603 /// - Has a previous instruction: PrevInst is used. 2604 union { 2605 Instruction *PrevInst; 2606 BasicBlock *BB; 2607 } Point; 2608 2609 /// Remember whether or not the instruction had a previous instruction. 2610 bool HasPrevInstruction; 2611 2612 public: 2613 /// Record the position of \p Inst. 2614 InsertionHandler(Instruction *Inst) { 2615 BasicBlock::iterator It = Inst->getIterator(); 2616 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2617 if (HasPrevInstruction) 2618 Point.PrevInst = &*--It; 2619 else 2620 Point.BB = Inst->getParent(); 2621 } 2622 2623 /// Insert \p Inst at the recorded position. 2624 void insert(Instruction *Inst) { 2625 if (HasPrevInstruction) { 2626 if (Inst->getParent()) 2627 Inst->removeFromParent(); 2628 Inst->insertAfter(Point.PrevInst); 2629 } else { 2630 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2631 if (Inst->getParent()) 2632 Inst->moveBefore(Position); 2633 else 2634 Inst->insertBefore(Position); 2635 } 2636 } 2637 }; 2638 2639 /// Move an instruction before another. 2640 class InstructionMoveBefore : public TypePromotionAction { 2641 /// Original position of the instruction. 2642 InsertionHandler Position; 2643 2644 public: 2645 /// Move \p Inst before \p Before. 2646 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2647 : TypePromotionAction(Inst), Position(Inst) { 2648 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2649 << "\n"); 2650 Inst->moveBefore(Before); 2651 } 2652 2653 /// Move the instruction back to its original position. 2654 void undo() override { 2655 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2656 Position.insert(Inst); 2657 } 2658 }; 2659 2660 /// Set the operand of an instruction with a new value. 2661 class OperandSetter : public TypePromotionAction { 2662 /// Original operand of the instruction. 2663 Value *Origin; 2664 2665 /// Index of the modified instruction. 2666 unsigned Idx; 2667 2668 public: 2669 /// Set \p Idx operand of \p Inst with \p NewVal. 2670 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2671 : TypePromotionAction(Inst), Idx(Idx) { 2672 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2673 << "for:" << *Inst << "\n" 2674 << "with:" << *NewVal << "\n"); 2675 Origin = Inst->getOperand(Idx); 2676 Inst->setOperand(Idx, NewVal); 2677 } 2678 2679 /// Restore the original value of the instruction. 2680 void undo() override { 2681 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2682 << "for: " << *Inst << "\n" 2683 << "with: " << *Origin << "\n"); 2684 Inst->setOperand(Idx, Origin); 2685 } 2686 }; 2687 2688 /// Hide the operands of an instruction. 2689 /// Do as if this instruction was not using any of its operands. 2690 class OperandsHider : public TypePromotionAction { 2691 /// The list of original operands. 2692 SmallVector<Value *, 4> OriginalValues; 2693 2694 public: 2695 /// Remove \p Inst from the uses of the operands of \p Inst. 2696 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2697 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2698 unsigned NumOpnds = Inst->getNumOperands(); 2699 OriginalValues.reserve(NumOpnds); 2700 for (unsigned It = 0; It < NumOpnds; ++It) { 2701 // Save the current operand. 2702 Value *Val = Inst->getOperand(It); 2703 OriginalValues.push_back(Val); 2704 // Set a dummy one. 2705 // We could use OperandSetter here, but that would imply an overhead 2706 // that we are not willing to pay. 2707 Inst->setOperand(It, UndefValue::get(Val->getType())); 2708 } 2709 } 2710 2711 /// Restore the original list of uses. 2712 void undo() override { 2713 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2714 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2715 Inst->setOperand(It, OriginalValues[It]); 2716 } 2717 }; 2718 2719 /// Build a truncate instruction. 2720 class TruncBuilder : public TypePromotionAction { 2721 Value *Val; 2722 2723 public: 2724 /// Build a truncate instruction of \p Opnd producing a \p Ty 2725 /// result. 2726 /// trunc Opnd to Ty. 2727 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2728 IRBuilder<> Builder(Opnd); 2729 Builder.SetCurrentDebugLocation(DebugLoc()); 2730 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2731 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2732 } 2733 2734 /// Get the built value. 2735 Value *getBuiltValue() { return Val; } 2736 2737 /// Remove the built instruction. 2738 void undo() override { 2739 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2740 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2741 IVal->eraseFromParent(); 2742 } 2743 }; 2744 2745 /// Build a sign extension instruction. 2746 class SExtBuilder : public TypePromotionAction { 2747 Value *Val; 2748 2749 public: 2750 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2751 /// result. 2752 /// sext Opnd to Ty. 2753 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2754 : TypePromotionAction(InsertPt) { 2755 IRBuilder<> Builder(InsertPt); 2756 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2757 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2758 } 2759 2760 /// Get the built value. 2761 Value *getBuiltValue() { return Val; } 2762 2763 /// Remove the built instruction. 2764 void undo() override { 2765 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2766 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2767 IVal->eraseFromParent(); 2768 } 2769 }; 2770 2771 /// Build a zero extension instruction. 2772 class ZExtBuilder : public TypePromotionAction { 2773 Value *Val; 2774 2775 public: 2776 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2777 /// result. 2778 /// zext Opnd to Ty. 2779 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2780 : TypePromotionAction(InsertPt) { 2781 IRBuilder<> Builder(InsertPt); 2782 Builder.SetCurrentDebugLocation(DebugLoc()); 2783 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2784 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2785 } 2786 2787 /// Get the built value. 2788 Value *getBuiltValue() { return Val; } 2789 2790 /// Remove the built instruction. 2791 void undo() override { 2792 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2793 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2794 IVal->eraseFromParent(); 2795 } 2796 }; 2797 2798 /// Mutate an instruction to another type. 2799 class TypeMutator : public TypePromotionAction { 2800 /// Record the original type. 2801 Type *OrigTy; 2802 2803 public: 2804 /// Mutate the type of \p Inst into \p NewTy. 2805 TypeMutator(Instruction *Inst, Type *NewTy) 2806 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2807 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2808 << "\n"); 2809 Inst->mutateType(NewTy); 2810 } 2811 2812 /// Mutate the instruction back to its original type. 2813 void undo() override { 2814 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2815 << "\n"); 2816 Inst->mutateType(OrigTy); 2817 } 2818 }; 2819 2820 /// Replace the uses of an instruction by another instruction. 2821 class UsesReplacer : public TypePromotionAction { 2822 /// Helper structure to keep track of the replaced uses. 2823 struct InstructionAndIdx { 2824 /// The instruction using the instruction. 2825 Instruction *Inst; 2826 2827 /// The index where this instruction is used for Inst. 2828 unsigned Idx; 2829 2830 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2831 : Inst(Inst), Idx(Idx) {} 2832 }; 2833 2834 /// Keep track of the original uses (pair Instruction, Index). 2835 SmallVector<InstructionAndIdx, 4> OriginalUses; 2836 /// Keep track of the debug users. 2837 SmallVector<DbgValueInst *, 1> DbgValues; 2838 2839 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2840 2841 public: 2842 /// Replace all the use of \p Inst by \p New. 2843 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2844 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2845 << "\n"); 2846 // Record the original uses. 2847 for (Use &U : Inst->uses()) { 2848 Instruction *UserI = cast<Instruction>(U.getUser()); 2849 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2850 } 2851 // Record the debug uses separately. They are not in the instruction's 2852 // use list, but they are replaced by RAUW. 2853 findDbgValues(DbgValues, Inst); 2854 2855 // Now, we can replace the uses. 2856 Inst->replaceAllUsesWith(New); 2857 } 2858 2859 /// Reassign the original uses of Inst to Inst. 2860 void undo() override { 2861 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2862 for (InstructionAndIdx &Use : OriginalUses) 2863 Use.Inst->setOperand(Use.Idx, Inst); 2864 // RAUW has replaced all original uses with references to the new value, 2865 // including the debug uses. Since we are undoing the replacements, 2866 // the original debug uses must also be reinstated to maintain the 2867 // correctness and utility of debug value instructions. 2868 for (auto *DVI: DbgValues) { 2869 LLVMContext &Ctx = Inst->getType()->getContext(); 2870 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst)); 2871 DVI->setOperand(0, MV); 2872 } 2873 } 2874 }; 2875 2876 /// Remove an instruction from the IR. 2877 class InstructionRemover : public TypePromotionAction { 2878 /// Original position of the instruction. 2879 InsertionHandler Inserter; 2880 2881 /// Helper structure to hide all the link to the instruction. In other 2882 /// words, this helps to do as if the instruction was removed. 2883 OperandsHider Hider; 2884 2885 /// Keep track of the uses replaced, if any. 2886 UsesReplacer *Replacer = nullptr; 2887 2888 /// Keep track of instructions removed. 2889 SetOfInstrs &RemovedInsts; 2890 2891 public: 2892 /// Remove all reference of \p Inst and optionally replace all its 2893 /// uses with New. 2894 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2895 /// \pre If !Inst->use_empty(), then New != nullptr 2896 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2897 Value *New = nullptr) 2898 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2899 RemovedInsts(RemovedInsts) { 2900 if (New) 2901 Replacer = new UsesReplacer(Inst, New); 2902 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2903 RemovedInsts.insert(Inst); 2904 /// The instructions removed here will be freed after completing 2905 /// optimizeBlock() for all blocks as we need to keep track of the 2906 /// removed instructions during promotion. 2907 Inst->removeFromParent(); 2908 } 2909 2910 ~InstructionRemover() override { delete Replacer; } 2911 2912 /// Resurrect the instruction and reassign it to the proper uses if 2913 /// new value was provided when build this action. 2914 void undo() override { 2915 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2916 Inserter.insert(Inst); 2917 if (Replacer) 2918 Replacer->undo(); 2919 Hider.undo(); 2920 RemovedInsts.erase(Inst); 2921 } 2922 }; 2923 2924 public: 2925 /// Restoration point. 2926 /// The restoration point is a pointer to an action instead of an iterator 2927 /// because the iterator may be invalidated but not the pointer. 2928 using ConstRestorationPt = const TypePromotionAction *; 2929 2930 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2931 : RemovedInsts(RemovedInsts) {} 2932 2933 /// Advocate every changes made in that transaction. Return true if any change 2934 /// happen. 2935 bool commit(); 2936 2937 /// Undo all the changes made after the given point. 2938 void rollback(ConstRestorationPt Point); 2939 2940 /// Get the current restoration point. 2941 ConstRestorationPt getRestorationPoint() const; 2942 2943 /// \name API for IR modification with state keeping to support rollback. 2944 /// @{ 2945 /// Same as Instruction::setOperand. 2946 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2947 2948 /// Same as Instruction::eraseFromParent. 2949 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2950 2951 /// Same as Value::replaceAllUsesWith. 2952 void replaceAllUsesWith(Instruction *Inst, Value *New); 2953 2954 /// Same as Value::mutateType. 2955 void mutateType(Instruction *Inst, Type *NewTy); 2956 2957 /// Same as IRBuilder::createTrunc. 2958 Value *createTrunc(Instruction *Opnd, Type *Ty); 2959 2960 /// Same as IRBuilder::createSExt. 2961 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2962 2963 /// Same as IRBuilder::createZExt. 2964 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2965 2966 /// Same as Instruction::moveBefore. 2967 void moveBefore(Instruction *Inst, Instruction *Before); 2968 /// @} 2969 2970 private: 2971 /// The ordered list of actions made so far. 2972 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2973 2974 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2975 2976 SetOfInstrs &RemovedInsts; 2977 }; 2978 2979 } // end anonymous namespace 2980 2981 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2982 Value *NewVal) { 2983 Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( 2984 Inst, Idx, NewVal)); 2985 } 2986 2987 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2988 Value *NewVal) { 2989 Actions.push_back( 2990 std::make_unique<TypePromotionTransaction::InstructionRemover>( 2991 Inst, RemovedInsts, NewVal)); 2992 } 2993 2994 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2995 Value *New) { 2996 Actions.push_back( 2997 std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2998 } 2999 3000 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 3001 Actions.push_back( 3002 std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 3003 } 3004 3005 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 3006 Type *Ty) { 3007 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 3008 Value *Val = Ptr->getBuiltValue(); 3009 Actions.push_back(std::move(Ptr)); 3010 return Val; 3011 } 3012 3013 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 3014 Value *Opnd, Type *Ty) { 3015 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 3016 Value *Val = Ptr->getBuiltValue(); 3017 Actions.push_back(std::move(Ptr)); 3018 return Val; 3019 } 3020 3021 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 3022 Value *Opnd, Type *Ty) { 3023 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 3024 Value *Val = Ptr->getBuiltValue(); 3025 Actions.push_back(std::move(Ptr)); 3026 return Val; 3027 } 3028 3029 void TypePromotionTransaction::moveBefore(Instruction *Inst, 3030 Instruction *Before) { 3031 Actions.push_back( 3032 std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 3033 Inst, Before)); 3034 } 3035 3036 TypePromotionTransaction::ConstRestorationPt 3037 TypePromotionTransaction::getRestorationPoint() const { 3038 return !Actions.empty() ? Actions.back().get() : nullptr; 3039 } 3040 3041 bool TypePromotionTransaction::commit() { 3042 for (std::unique_ptr<TypePromotionAction> &Action : Actions) 3043 Action->commit(); 3044 bool Modified = !Actions.empty(); 3045 Actions.clear(); 3046 return Modified; 3047 } 3048 3049 void TypePromotionTransaction::rollback( 3050 TypePromotionTransaction::ConstRestorationPt Point) { 3051 while (!Actions.empty() && Point != Actions.back().get()) { 3052 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 3053 Curr->undo(); 3054 } 3055 } 3056 3057 namespace { 3058 3059 /// A helper class for matching addressing modes. 3060 /// 3061 /// This encapsulates the logic for matching the target-legal addressing modes. 3062 class AddressingModeMatcher { 3063 SmallVectorImpl<Instruction*> &AddrModeInsts; 3064 const TargetLowering &TLI; 3065 const TargetRegisterInfo &TRI; 3066 const DataLayout &DL; 3067 3068 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 3069 /// the memory instruction that we're computing this address for. 3070 Type *AccessTy; 3071 unsigned AddrSpace; 3072 Instruction *MemoryInst; 3073 3074 /// This is the addressing mode that we're building up. This is 3075 /// part of the return value of this addressing mode matching stuff. 3076 ExtAddrMode &AddrMode; 3077 3078 /// The instructions inserted by other CodeGenPrepare optimizations. 3079 const SetOfInstrs &InsertedInsts; 3080 3081 /// A map from the instructions to their type before promotion. 3082 InstrToOrigTy &PromotedInsts; 3083 3084 /// The ongoing transaction where every action should be registered. 3085 TypePromotionTransaction &TPT; 3086 3087 // A GEP which has too large offset to be folded into the addressing mode. 3088 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 3089 3090 /// This is set to true when we should not do profitability checks. 3091 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3092 bool IgnoreProfitability; 3093 3094 /// True if we are optimizing for size. 3095 bool OptSize; 3096 3097 ProfileSummaryInfo *PSI; 3098 BlockFrequencyInfo *BFI; 3099 3100 AddressingModeMatcher( 3101 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 3102 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, 3103 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, 3104 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 3105 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3106 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) 3107 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3108 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 3109 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 3110 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP), 3111 OptSize(OptSize), PSI(PSI), BFI(BFI) { 3112 IgnoreProfitability = false; 3113 } 3114 3115 public: 3116 /// Find the maximal addressing mode that a load/store of V can fold, 3117 /// give an access type of AccessTy. This returns a list of involved 3118 /// instructions in AddrModeInsts. 3119 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3120 /// optimizations. 3121 /// \p PromotedInsts maps the instructions to their type before promotion. 3122 /// \p The ongoing transaction where every action should be registered. 3123 static ExtAddrMode 3124 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 3125 SmallVectorImpl<Instruction *> &AddrModeInsts, 3126 const TargetLowering &TLI, const TargetRegisterInfo &TRI, 3127 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 3128 TypePromotionTransaction &TPT, 3129 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3130 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 3131 ExtAddrMode Result; 3132 3133 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, 3134 MemoryInst, Result, InsertedInsts, 3135 PromotedInsts, TPT, LargeOffsetGEP, 3136 OptSize, PSI, BFI) 3137 .matchAddr(V, 0); 3138 (void)Success; assert(Success && "Couldn't select *anything*?"); 3139 return Result; 3140 } 3141 3142 private: 3143 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3144 bool matchAddr(Value *Addr, unsigned Depth); 3145 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 3146 bool *MovedAway = nullptr); 3147 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3148 ExtAddrMode &AMBefore, 3149 ExtAddrMode &AMAfter); 3150 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3151 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3152 Value *PromotedOperand) const; 3153 }; 3154 3155 class PhiNodeSet; 3156 3157 /// An iterator for PhiNodeSet. 3158 class PhiNodeSetIterator { 3159 PhiNodeSet * const Set; 3160 size_t CurrentIndex = 0; 3161 3162 public: 3163 /// The constructor. Start should point to either a valid element, or be equal 3164 /// to the size of the underlying SmallVector of the PhiNodeSet. 3165 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 3166 PHINode * operator*() const; 3167 PhiNodeSetIterator& operator++(); 3168 bool operator==(const PhiNodeSetIterator &RHS) const; 3169 bool operator!=(const PhiNodeSetIterator &RHS) const; 3170 }; 3171 3172 /// Keeps a set of PHINodes. 3173 /// 3174 /// This is a minimal set implementation for a specific use case: 3175 /// It is very fast when there are very few elements, but also provides good 3176 /// performance when there are many. It is similar to SmallPtrSet, but also 3177 /// provides iteration by insertion order, which is deterministic and stable 3178 /// across runs. It is also similar to SmallSetVector, but provides removing 3179 /// elements in O(1) time. This is achieved by not actually removing the element 3180 /// from the underlying vector, so comes at the cost of using more memory, but 3181 /// that is fine, since PhiNodeSets are used as short lived objects. 3182 class PhiNodeSet { 3183 friend class PhiNodeSetIterator; 3184 3185 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 3186 using iterator = PhiNodeSetIterator; 3187 3188 /// Keeps the elements in the order of their insertion in the underlying 3189 /// vector. To achieve constant time removal, it never deletes any element. 3190 SmallVector<PHINode *, 32> NodeList; 3191 3192 /// Keeps the elements in the underlying set implementation. This (and not the 3193 /// NodeList defined above) is the source of truth on whether an element 3194 /// is actually in the collection. 3195 MapType NodeMap; 3196 3197 /// Points to the first valid (not deleted) element when the set is not empty 3198 /// and the value is not zero. Equals to the size of the underlying vector 3199 /// when the set is empty. When the value is 0, as in the beginning, the 3200 /// first element may or may not be valid. 3201 size_t FirstValidElement = 0; 3202 3203 public: 3204 /// Inserts a new element to the collection. 3205 /// \returns true if the element is actually added, i.e. was not in the 3206 /// collection before the operation. 3207 bool insert(PHINode *Ptr) { 3208 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 3209 NodeList.push_back(Ptr); 3210 return true; 3211 } 3212 return false; 3213 } 3214 3215 /// Removes the element from the collection. 3216 /// \returns whether the element is actually removed, i.e. was in the 3217 /// collection before the operation. 3218 bool erase(PHINode *Ptr) { 3219 if (NodeMap.erase(Ptr)) { 3220 SkipRemovedElements(FirstValidElement); 3221 return true; 3222 } 3223 return false; 3224 } 3225 3226 /// Removes all elements and clears the collection. 3227 void clear() { 3228 NodeMap.clear(); 3229 NodeList.clear(); 3230 FirstValidElement = 0; 3231 } 3232 3233 /// \returns an iterator that will iterate the elements in the order of 3234 /// insertion. 3235 iterator begin() { 3236 if (FirstValidElement == 0) 3237 SkipRemovedElements(FirstValidElement); 3238 return PhiNodeSetIterator(this, FirstValidElement); 3239 } 3240 3241 /// \returns an iterator that points to the end of the collection. 3242 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 3243 3244 /// Returns the number of elements in the collection. 3245 size_t size() const { 3246 return NodeMap.size(); 3247 } 3248 3249 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 3250 size_t count(PHINode *Ptr) const { 3251 return NodeMap.count(Ptr); 3252 } 3253 3254 private: 3255 /// Updates the CurrentIndex so that it will point to a valid element. 3256 /// 3257 /// If the element of NodeList at CurrentIndex is valid, it does not 3258 /// change it. If there are no more valid elements, it updates CurrentIndex 3259 /// to point to the end of the NodeList. 3260 void SkipRemovedElements(size_t &CurrentIndex) { 3261 while (CurrentIndex < NodeList.size()) { 3262 auto it = NodeMap.find(NodeList[CurrentIndex]); 3263 // If the element has been deleted and added again later, NodeMap will 3264 // point to a different index, so CurrentIndex will still be invalid. 3265 if (it != NodeMap.end() && it->second == CurrentIndex) 3266 break; 3267 ++CurrentIndex; 3268 } 3269 } 3270 }; 3271 3272 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 3273 : Set(Set), CurrentIndex(Start) {} 3274 3275 PHINode * PhiNodeSetIterator::operator*() const { 3276 assert(CurrentIndex < Set->NodeList.size() && 3277 "PhiNodeSet access out of range"); 3278 return Set->NodeList[CurrentIndex]; 3279 } 3280 3281 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 3282 assert(CurrentIndex < Set->NodeList.size() && 3283 "PhiNodeSet access out of range"); 3284 ++CurrentIndex; 3285 Set->SkipRemovedElements(CurrentIndex); 3286 return *this; 3287 } 3288 3289 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 3290 return CurrentIndex == RHS.CurrentIndex; 3291 } 3292 3293 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 3294 return !((*this) == RHS); 3295 } 3296 3297 /// Keep track of simplification of Phi nodes. 3298 /// Accept the set of all phi nodes and erase phi node from this set 3299 /// if it is simplified. 3300 class SimplificationTracker { 3301 DenseMap<Value *, Value *> Storage; 3302 const SimplifyQuery &SQ; 3303 // Tracks newly created Phi nodes. The elements are iterated by insertion 3304 // order. 3305 PhiNodeSet AllPhiNodes; 3306 // Tracks newly created Select nodes. 3307 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 3308 3309 public: 3310 SimplificationTracker(const SimplifyQuery &sq) 3311 : SQ(sq) {} 3312 3313 Value *Get(Value *V) { 3314 do { 3315 auto SV = Storage.find(V); 3316 if (SV == Storage.end()) 3317 return V; 3318 V = SV->second; 3319 } while (true); 3320 } 3321 3322 Value *Simplify(Value *Val) { 3323 SmallVector<Value *, 32> WorkList; 3324 SmallPtrSet<Value *, 32> Visited; 3325 WorkList.push_back(Val); 3326 while (!WorkList.empty()) { 3327 auto *P = WorkList.pop_back_val(); 3328 if (!Visited.insert(P).second) 3329 continue; 3330 if (auto *PI = dyn_cast<Instruction>(P)) 3331 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 3332 for (auto *U : PI->users()) 3333 WorkList.push_back(cast<Value>(U)); 3334 Put(PI, V); 3335 PI->replaceAllUsesWith(V); 3336 if (auto *PHI = dyn_cast<PHINode>(PI)) 3337 AllPhiNodes.erase(PHI); 3338 if (auto *Select = dyn_cast<SelectInst>(PI)) 3339 AllSelectNodes.erase(Select); 3340 PI->eraseFromParent(); 3341 } 3342 } 3343 return Get(Val); 3344 } 3345 3346 void Put(Value *From, Value *To) { 3347 Storage.insert({ From, To }); 3348 } 3349 3350 void ReplacePhi(PHINode *From, PHINode *To) { 3351 Value* OldReplacement = Get(From); 3352 while (OldReplacement != From) { 3353 From = To; 3354 To = dyn_cast<PHINode>(OldReplacement); 3355 OldReplacement = Get(From); 3356 } 3357 assert(To && Get(To) == To && "Replacement PHI node is already replaced."); 3358 Put(From, To); 3359 From->replaceAllUsesWith(To); 3360 AllPhiNodes.erase(From); 3361 From->eraseFromParent(); 3362 } 3363 3364 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 3365 3366 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 3367 3368 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 3369 3370 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 3371 3372 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 3373 3374 void destroyNewNodes(Type *CommonType) { 3375 // For safe erasing, replace the uses with dummy value first. 3376 auto *Dummy = UndefValue::get(CommonType); 3377 for (auto *I : AllPhiNodes) { 3378 I->replaceAllUsesWith(Dummy); 3379 I->eraseFromParent(); 3380 } 3381 AllPhiNodes.clear(); 3382 for (auto *I : AllSelectNodes) { 3383 I->replaceAllUsesWith(Dummy); 3384 I->eraseFromParent(); 3385 } 3386 AllSelectNodes.clear(); 3387 } 3388 }; 3389 3390 /// A helper class for combining addressing modes. 3391 class AddressingModeCombiner { 3392 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3393 typedef std::pair<PHINode *, PHINode *> PHIPair; 3394 3395 private: 3396 /// The addressing modes we've collected. 3397 SmallVector<ExtAddrMode, 16> AddrModes; 3398 3399 /// The field in which the AddrModes differ, when we have more than one. 3400 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3401 3402 /// Are the AddrModes that we have all just equal to their original values? 3403 bool AllAddrModesTrivial = true; 3404 3405 /// Common Type for all different fields in addressing modes. 3406 Type *CommonType; 3407 3408 /// SimplifyQuery for simplifyInstruction utility. 3409 const SimplifyQuery &SQ; 3410 3411 /// Original Address. 3412 Value *Original; 3413 3414 public: 3415 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3416 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3417 3418 /// Get the combined AddrMode 3419 const ExtAddrMode &getAddrMode() const { 3420 return AddrModes[0]; 3421 } 3422 3423 /// Add a new AddrMode if it's compatible with the AddrModes we already 3424 /// have. 3425 /// \return True iff we succeeded in doing so. 3426 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3427 // Take note of if we have any non-trivial AddrModes, as we need to detect 3428 // when all AddrModes are trivial as then we would introduce a phi or select 3429 // which just duplicates what's already there. 3430 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3431 3432 // If this is the first addrmode then everything is fine. 3433 if (AddrModes.empty()) { 3434 AddrModes.emplace_back(NewAddrMode); 3435 return true; 3436 } 3437 3438 // Figure out how different this is from the other address modes, which we 3439 // can do just by comparing against the first one given that we only care 3440 // about the cumulative difference. 3441 ExtAddrMode::FieldName ThisDifferentField = 3442 AddrModes[0].compare(NewAddrMode); 3443 if (DifferentField == ExtAddrMode::NoField) 3444 DifferentField = ThisDifferentField; 3445 else if (DifferentField != ThisDifferentField) 3446 DifferentField = ExtAddrMode::MultipleFields; 3447 3448 // If NewAddrMode differs in more than one dimension we cannot handle it. 3449 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3450 3451 // If Scale Field is different then we reject. 3452 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3453 3454 // We also must reject the case when base offset is different and 3455 // scale reg is not null, we cannot handle this case due to merge of 3456 // different offsets will be used as ScaleReg. 3457 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3458 !NewAddrMode.ScaledReg); 3459 3460 // We also must reject the case when GV is different and BaseReg installed 3461 // due to we want to use base reg as a merge of GV values. 3462 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3463 !NewAddrMode.HasBaseReg); 3464 3465 // Even if NewAddMode is the same we still need to collect it due to 3466 // original value is different. And later we will need all original values 3467 // as anchors during finding the common Phi node. 3468 if (CanHandle) 3469 AddrModes.emplace_back(NewAddrMode); 3470 else 3471 AddrModes.clear(); 3472 3473 return CanHandle; 3474 } 3475 3476 /// Combine the addressing modes we've collected into a single 3477 /// addressing mode. 3478 /// \return True iff we successfully combined them or we only had one so 3479 /// didn't need to combine them anyway. 3480 bool combineAddrModes() { 3481 // If we have no AddrModes then they can't be combined. 3482 if (AddrModes.size() == 0) 3483 return false; 3484 3485 // A single AddrMode can trivially be combined. 3486 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3487 return true; 3488 3489 // If the AddrModes we collected are all just equal to the value they are 3490 // derived from then combining them wouldn't do anything useful. 3491 if (AllAddrModesTrivial) 3492 return false; 3493 3494 if (!addrModeCombiningAllowed()) 3495 return false; 3496 3497 // Build a map between <original value, basic block where we saw it> to 3498 // value of base register. 3499 // Bail out if there is no common type. 3500 FoldAddrToValueMapping Map; 3501 if (!initializeMap(Map)) 3502 return false; 3503 3504 Value *CommonValue = findCommon(Map); 3505 if (CommonValue) 3506 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3507 return CommonValue != nullptr; 3508 } 3509 3510 private: 3511 /// Initialize Map with anchor values. For address seen 3512 /// we set the value of different field saw in this address. 3513 /// At the same time we find a common type for different field we will 3514 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3515 /// Return false if there is no common type found. 3516 bool initializeMap(FoldAddrToValueMapping &Map) { 3517 // Keep track of keys where the value is null. We will need to replace it 3518 // with constant null when we know the common type. 3519 SmallVector<Value *, 2> NullValue; 3520 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3521 for (auto &AM : AddrModes) { 3522 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3523 if (DV) { 3524 auto *Type = DV->getType(); 3525 if (CommonType && CommonType != Type) 3526 return false; 3527 CommonType = Type; 3528 Map[AM.OriginalValue] = DV; 3529 } else { 3530 NullValue.push_back(AM.OriginalValue); 3531 } 3532 } 3533 assert(CommonType && "At least one non-null value must be!"); 3534 for (auto *V : NullValue) 3535 Map[V] = Constant::getNullValue(CommonType); 3536 return true; 3537 } 3538 3539 /// We have mapping between value A and other value B where B was a field in 3540 /// addressing mode represented by A. Also we have an original value C 3541 /// representing an address we start with. Traversing from C through phi and 3542 /// selects we ended up with A's in a map. This utility function tries to find 3543 /// a value V which is a field in addressing mode C and traversing through phi 3544 /// nodes and selects we will end up in corresponded values B in a map. 3545 /// The utility will create a new Phi/Selects if needed. 3546 // The simple example looks as follows: 3547 // BB1: 3548 // p1 = b1 + 40 3549 // br cond BB2, BB3 3550 // BB2: 3551 // p2 = b2 + 40 3552 // br BB3 3553 // BB3: 3554 // p = phi [p1, BB1], [p2, BB2] 3555 // v = load p 3556 // Map is 3557 // p1 -> b1 3558 // p2 -> b2 3559 // Request is 3560 // p -> ? 3561 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3562 Value *findCommon(FoldAddrToValueMapping &Map) { 3563 // Tracks the simplification of newly created phi nodes. The reason we use 3564 // this mapping is because we will add new created Phi nodes in AddrToBase. 3565 // Simplification of Phi nodes is recursive, so some Phi node may 3566 // be simplified after we added it to AddrToBase. In reality this 3567 // simplification is possible only if original phi/selects were not 3568 // simplified yet. 3569 // Using this mapping we can find the current value in AddrToBase. 3570 SimplificationTracker ST(SQ); 3571 3572 // First step, DFS to create PHI nodes for all intermediate blocks. 3573 // Also fill traverse order for the second step. 3574 SmallVector<Value *, 32> TraverseOrder; 3575 InsertPlaceholders(Map, TraverseOrder, ST); 3576 3577 // Second Step, fill new nodes by merged values and simplify if possible. 3578 FillPlaceholders(Map, TraverseOrder, ST); 3579 3580 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3581 ST.destroyNewNodes(CommonType); 3582 return nullptr; 3583 } 3584 3585 // Now we'd like to match New Phi nodes to existed ones. 3586 unsigned PhiNotMatchedCount = 0; 3587 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3588 ST.destroyNewNodes(CommonType); 3589 return nullptr; 3590 } 3591 3592 auto *Result = ST.Get(Map.find(Original)->second); 3593 if (Result) { 3594 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3595 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3596 } 3597 return Result; 3598 } 3599 3600 /// Try to match PHI node to Candidate. 3601 /// Matcher tracks the matched Phi nodes. 3602 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3603 SmallSetVector<PHIPair, 8> &Matcher, 3604 PhiNodeSet &PhiNodesToMatch) { 3605 SmallVector<PHIPair, 8> WorkList; 3606 Matcher.insert({ PHI, Candidate }); 3607 SmallSet<PHINode *, 8> MatchedPHIs; 3608 MatchedPHIs.insert(PHI); 3609 WorkList.push_back({ PHI, Candidate }); 3610 SmallSet<PHIPair, 8> Visited; 3611 while (!WorkList.empty()) { 3612 auto Item = WorkList.pop_back_val(); 3613 if (!Visited.insert(Item).second) 3614 continue; 3615 // We iterate over all incoming values to Phi to compare them. 3616 // If values are different and both of them Phi and the first one is a 3617 // Phi we added (subject to match) and both of them is in the same basic 3618 // block then we can match our pair if values match. So we state that 3619 // these values match and add it to work list to verify that. 3620 for (auto B : Item.first->blocks()) { 3621 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3622 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3623 if (FirstValue == SecondValue) 3624 continue; 3625 3626 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3627 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3628 3629 // One of them is not Phi or 3630 // The first one is not Phi node from the set we'd like to match or 3631 // Phi nodes from different basic blocks then 3632 // we will not be able to match. 3633 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3634 FirstPhi->getParent() != SecondPhi->getParent()) 3635 return false; 3636 3637 // If we already matched them then continue. 3638 if (Matcher.count({ FirstPhi, SecondPhi })) 3639 continue; 3640 // So the values are different and does not match. So we need them to 3641 // match. (But we register no more than one match per PHI node, so that 3642 // we won't later try to replace them twice.) 3643 if (MatchedPHIs.insert(FirstPhi).second) 3644 Matcher.insert({ FirstPhi, SecondPhi }); 3645 // But me must check it. 3646 WorkList.push_back({ FirstPhi, SecondPhi }); 3647 } 3648 } 3649 return true; 3650 } 3651 3652 /// For the given set of PHI nodes (in the SimplificationTracker) try 3653 /// to find their equivalents. 3654 /// Returns false if this matching fails and creation of new Phi is disabled. 3655 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3656 unsigned &PhiNotMatchedCount) { 3657 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3658 // order, so the replacements (ReplacePhi) are also done in a deterministic 3659 // order. 3660 SmallSetVector<PHIPair, 8> Matched; 3661 SmallPtrSet<PHINode *, 8> WillNotMatch; 3662 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3663 while (PhiNodesToMatch.size()) { 3664 PHINode *PHI = *PhiNodesToMatch.begin(); 3665 3666 // Add us, if no Phi nodes in the basic block we do not match. 3667 WillNotMatch.clear(); 3668 WillNotMatch.insert(PHI); 3669 3670 // Traverse all Phis until we found equivalent or fail to do that. 3671 bool IsMatched = false; 3672 for (auto &P : PHI->getParent()->phis()) { 3673 if (&P == PHI) 3674 continue; 3675 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3676 break; 3677 // If it does not match, collect all Phi nodes from matcher. 3678 // if we end up with no match, them all these Phi nodes will not match 3679 // later. 3680 for (auto M : Matched) 3681 WillNotMatch.insert(M.first); 3682 Matched.clear(); 3683 } 3684 if (IsMatched) { 3685 // Replace all matched values and erase them. 3686 for (auto MV : Matched) 3687 ST.ReplacePhi(MV.first, MV.second); 3688 Matched.clear(); 3689 continue; 3690 } 3691 // If we are not allowed to create new nodes then bail out. 3692 if (!AllowNewPhiNodes) 3693 return false; 3694 // Just remove all seen values in matcher. They will not match anything. 3695 PhiNotMatchedCount += WillNotMatch.size(); 3696 for (auto *P : WillNotMatch) 3697 PhiNodesToMatch.erase(P); 3698 } 3699 return true; 3700 } 3701 /// Fill the placeholders with values from predecessors and simplify them. 3702 void FillPlaceholders(FoldAddrToValueMapping &Map, 3703 SmallVectorImpl<Value *> &TraverseOrder, 3704 SimplificationTracker &ST) { 3705 while (!TraverseOrder.empty()) { 3706 Value *Current = TraverseOrder.pop_back_val(); 3707 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3708 Value *V = Map[Current]; 3709 3710 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3711 // CurrentValue also must be Select. 3712 auto *CurrentSelect = cast<SelectInst>(Current); 3713 auto *TrueValue = CurrentSelect->getTrueValue(); 3714 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3715 Select->setTrueValue(ST.Get(Map[TrueValue])); 3716 auto *FalseValue = CurrentSelect->getFalseValue(); 3717 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3718 Select->setFalseValue(ST.Get(Map[FalseValue])); 3719 } else { 3720 // Must be a Phi node then. 3721 auto *PHI = cast<PHINode>(V); 3722 // Fill the Phi node with values from predecessors. 3723 for (auto *B : predecessors(PHI->getParent())) { 3724 Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); 3725 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3726 PHI->addIncoming(ST.Get(Map[PV]), B); 3727 } 3728 } 3729 Map[Current] = ST.Simplify(V); 3730 } 3731 } 3732 3733 /// Starting from original value recursively iterates over def-use chain up to 3734 /// known ending values represented in a map. For each traversed phi/select 3735 /// inserts a placeholder Phi or Select. 3736 /// Reports all new created Phi/Select nodes by adding them to set. 3737 /// Also reports and order in what values have been traversed. 3738 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3739 SmallVectorImpl<Value *> &TraverseOrder, 3740 SimplificationTracker &ST) { 3741 SmallVector<Value *, 32> Worklist; 3742 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3743 "Address must be a Phi or Select node"); 3744 auto *Dummy = UndefValue::get(CommonType); 3745 Worklist.push_back(Original); 3746 while (!Worklist.empty()) { 3747 Value *Current = Worklist.pop_back_val(); 3748 // if it is already visited or it is an ending value then skip it. 3749 if (Map.find(Current) != Map.end()) 3750 continue; 3751 TraverseOrder.push_back(Current); 3752 3753 // CurrentValue must be a Phi node or select. All others must be covered 3754 // by anchors. 3755 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3756 // Is it OK to get metadata from OrigSelect?! 3757 // Create a Select placeholder with dummy value. 3758 SelectInst *Select = SelectInst::Create( 3759 CurrentSelect->getCondition(), Dummy, Dummy, 3760 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3761 Map[Current] = Select; 3762 ST.insertNewSelect(Select); 3763 // We are interested in True and False values. 3764 Worklist.push_back(CurrentSelect->getTrueValue()); 3765 Worklist.push_back(CurrentSelect->getFalseValue()); 3766 } else { 3767 // It must be a Phi node then. 3768 PHINode *CurrentPhi = cast<PHINode>(Current); 3769 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3770 PHINode *PHI = 3771 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3772 Map[Current] = PHI; 3773 ST.insertNewPhi(PHI); 3774 append_range(Worklist, CurrentPhi->incoming_values()); 3775 } 3776 } 3777 } 3778 3779 bool addrModeCombiningAllowed() { 3780 if (DisableComplexAddrModes) 3781 return false; 3782 switch (DifferentField) { 3783 default: 3784 return false; 3785 case ExtAddrMode::BaseRegField: 3786 return AddrSinkCombineBaseReg; 3787 case ExtAddrMode::BaseGVField: 3788 return AddrSinkCombineBaseGV; 3789 case ExtAddrMode::BaseOffsField: 3790 return AddrSinkCombineBaseOffs; 3791 case ExtAddrMode::ScaledRegField: 3792 return AddrSinkCombineScaledReg; 3793 } 3794 } 3795 }; 3796 } // end anonymous namespace 3797 3798 /// Try adding ScaleReg*Scale to the current addressing mode. 3799 /// Return true and update AddrMode if this addr mode is legal for the target, 3800 /// false if not. 3801 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3802 unsigned Depth) { 3803 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3804 // mode. Just process that directly. 3805 if (Scale == 1) 3806 return matchAddr(ScaleReg, Depth); 3807 3808 // If the scale is 0, it takes nothing to add this. 3809 if (Scale == 0) 3810 return true; 3811 3812 // If we already have a scale of this value, we can add to it, otherwise, we 3813 // need an available scale field. 3814 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3815 return false; 3816 3817 ExtAddrMode TestAddrMode = AddrMode; 3818 3819 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3820 // [A+B + A*7] -> [B+A*8]. 3821 TestAddrMode.Scale += Scale; 3822 TestAddrMode.ScaledReg = ScaleReg; 3823 3824 // If the new address isn't legal, bail out. 3825 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3826 return false; 3827 3828 // It was legal, so commit it. 3829 AddrMode = TestAddrMode; 3830 3831 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3832 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3833 // X*Scale + C*Scale to addr mode. 3834 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3835 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3836 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && 3837 CI->getValue().isSignedIntN(64)) { 3838 TestAddrMode.InBounds = false; 3839 TestAddrMode.ScaledReg = AddLHS; 3840 TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; 3841 3842 // If this addressing mode is legal, commit it and remember that we folded 3843 // this instruction. 3844 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3845 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3846 AddrMode = TestAddrMode; 3847 return true; 3848 } 3849 } 3850 3851 // Otherwise, not (x+c)*scale, just return what we have. 3852 return true; 3853 } 3854 3855 /// This is a little filter, which returns true if an addressing computation 3856 /// involving I might be folded into a load/store accessing it. 3857 /// This doesn't need to be perfect, but needs to accept at least 3858 /// the set of instructions that MatchOperationAddr can. 3859 static bool MightBeFoldableInst(Instruction *I) { 3860 switch (I->getOpcode()) { 3861 case Instruction::BitCast: 3862 case Instruction::AddrSpaceCast: 3863 // Don't touch identity bitcasts. 3864 if (I->getType() == I->getOperand(0)->getType()) 3865 return false; 3866 return I->getType()->isIntOrPtrTy(); 3867 case Instruction::PtrToInt: 3868 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3869 return true; 3870 case Instruction::IntToPtr: 3871 // We know the input is intptr_t, so this is foldable. 3872 return true; 3873 case Instruction::Add: 3874 return true; 3875 case Instruction::Mul: 3876 case Instruction::Shl: 3877 // Can only handle X*C and X << C. 3878 return isa<ConstantInt>(I->getOperand(1)); 3879 case Instruction::GetElementPtr: 3880 return true; 3881 default: 3882 return false; 3883 } 3884 } 3885 3886 /// Check whether or not \p Val is a legal instruction for \p TLI. 3887 /// \note \p Val is assumed to be the product of some type promotion. 3888 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3889 /// to be legal, as the non-promoted value would have had the same state. 3890 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3891 const DataLayout &DL, Value *Val) { 3892 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3893 if (!PromotedInst) 3894 return false; 3895 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3896 // If the ISDOpcode is undefined, it was undefined before the promotion. 3897 if (!ISDOpcode) 3898 return true; 3899 // Otherwise, check if the promoted instruction is legal or not. 3900 return TLI.isOperationLegalOrCustom( 3901 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3902 } 3903 3904 namespace { 3905 3906 /// Hepler class to perform type promotion. 3907 class TypePromotionHelper { 3908 /// Utility function to add a promoted instruction \p ExtOpnd to 3909 /// \p PromotedInsts and record the type of extension we have seen. 3910 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3911 Instruction *ExtOpnd, 3912 bool IsSExt) { 3913 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3914 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3915 if (It != PromotedInsts.end()) { 3916 // If the new extension is same as original, the information in 3917 // PromotedInsts[ExtOpnd] is still correct. 3918 if (It->second.getInt() == ExtTy) 3919 return; 3920 3921 // Now the new extension is different from old extension, we make 3922 // the type information invalid by setting extension type to 3923 // BothExtension. 3924 ExtTy = BothExtension; 3925 } 3926 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 3927 } 3928 3929 /// Utility function to query the original type of instruction \p Opnd 3930 /// with a matched extension type. If the extension doesn't match, we 3931 /// cannot use the information we had on the original type. 3932 /// BothExtension doesn't match any extension type. 3933 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 3934 Instruction *Opnd, 3935 bool IsSExt) { 3936 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3937 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 3938 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 3939 return It->second.getPointer(); 3940 return nullptr; 3941 } 3942 3943 /// Utility function to check whether or not a sign or zero extension 3944 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 3945 /// either using the operands of \p Inst or promoting \p Inst. 3946 /// The type of the extension is defined by \p IsSExt. 3947 /// In other words, check if: 3948 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 3949 /// #1 Promotion applies: 3950 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 3951 /// #2 Operand reuses: 3952 /// ext opnd1 to ConsideredExtType. 3953 /// \p PromotedInsts maps the instructions to their type before promotion. 3954 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 3955 const InstrToOrigTy &PromotedInsts, bool IsSExt); 3956 3957 /// Utility function to determine if \p OpIdx should be promoted when 3958 /// promoting \p Inst. 3959 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 3960 return !(isa<SelectInst>(Inst) && OpIdx == 0); 3961 } 3962 3963 /// Utility function to promote the operand of \p Ext when this 3964 /// operand is a promotable trunc or sext or zext. 3965 /// \p PromotedInsts maps the instructions to their type before promotion. 3966 /// \p CreatedInstsCost[out] contains the cost of all instructions 3967 /// created to promote the operand of Ext. 3968 /// Newly added extensions are inserted in \p Exts. 3969 /// Newly added truncates are inserted in \p Truncs. 3970 /// Should never be called directly. 3971 /// \return The promoted value which is used instead of Ext. 3972 static Value *promoteOperandForTruncAndAnyExt( 3973 Instruction *Ext, TypePromotionTransaction &TPT, 3974 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3975 SmallVectorImpl<Instruction *> *Exts, 3976 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 3977 3978 /// Utility function to promote the operand of \p Ext when this 3979 /// operand is promotable and is not a supported trunc or sext. 3980 /// \p PromotedInsts maps the instructions to their type before promotion. 3981 /// \p CreatedInstsCost[out] contains the cost of all the instructions 3982 /// created to promote the operand of Ext. 3983 /// Newly added extensions are inserted in \p Exts. 3984 /// Newly added truncates are inserted in \p Truncs. 3985 /// Should never be called directly. 3986 /// \return The promoted value which is used instead of Ext. 3987 static Value *promoteOperandForOther(Instruction *Ext, 3988 TypePromotionTransaction &TPT, 3989 InstrToOrigTy &PromotedInsts, 3990 unsigned &CreatedInstsCost, 3991 SmallVectorImpl<Instruction *> *Exts, 3992 SmallVectorImpl<Instruction *> *Truncs, 3993 const TargetLowering &TLI, bool IsSExt); 3994 3995 /// \see promoteOperandForOther. 3996 static Value *signExtendOperandForOther( 3997 Instruction *Ext, TypePromotionTransaction &TPT, 3998 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 3999 SmallVectorImpl<Instruction *> *Exts, 4000 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4001 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 4002 Exts, Truncs, TLI, true); 4003 } 4004 4005 /// \see promoteOperandForOther. 4006 static Value *zeroExtendOperandForOther( 4007 Instruction *Ext, TypePromotionTransaction &TPT, 4008 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4009 SmallVectorImpl<Instruction *> *Exts, 4010 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4011 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 4012 Exts, Truncs, TLI, false); 4013 } 4014 4015 public: 4016 /// Type for the utility function that promotes the operand of Ext. 4017 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 4018 InstrToOrigTy &PromotedInsts, 4019 unsigned &CreatedInstsCost, 4020 SmallVectorImpl<Instruction *> *Exts, 4021 SmallVectorImpl<Instruction *> *Truncs, 4022 const TargetLowering &TLI); 4023 4024 /// Given a sign/zero extend instruction \p Ext, return the appropriate 4025 /// action to promote the operand of \p Ext instead of using Ext. 4026 /// \return NULL if no promotable action is possible with the current 4027 /// sign extension. 4028 /// \p InsertedInsts keeps track of all the instructions inserted by the 4029 /// other CodeGenPrepare optimizations. This information is important 4030 /// because we do not want to promote these instructions as CodeGenPrepare 4031 /// will reinsert them later. Thus creating an infinite loop: create/remove. 4032 /// \p PromotedInsts maps the instructions to their type before promotion. 4033 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 4034 const TargetLowering &TLI, 4035 const InstrToOrigTy &PromotedInsts); 4036 }; 4037 4038 } // end anonymous namespace 4039 4040 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 4041 Type *ConsideredExtType, 4042 const InstrToOrigTy &PromotedInsts, 4043 bool IsSExt) { 4044 // The promotion helper does not know how to deal with vector types yet. 4045 // To be able to fix that, we would need to fix the places where we 4046 // statically extend, e.g., constants and such. 4047 if (Inst->getType()->isVectorTy()) 4048 return false; 4049 4050 // We can always get through zext. 4051 if (isa<ZExtInst>(Inst)) 4052 return true; 4053 4054 // sext(sext) is ok too. 4055 if (IsSExt && isa<SExtInst>(Inst)) 4056 return true; 4057 4058 // We can get through binary operator, if it is legal. In other words, the 4059 // binary operator must have a nuw or nsw flag. 4060 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 4061 if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) && 4062 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 4063 (IsSExt && BinOp->hasNoSignedWrap()))) 4064 return true; 4065 4066 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 4067 if ((Inst->getOpcode() == Instruction::And || 4068 Inst->getOpcode() == Instruction::Or)) 4069 return true; 4070 4071 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 4072 if (Inst->getOpcode() == Instruction::Xor) { 4073 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 4074 // Make sure it is not a NOT. 4075 if (Cst && !Cst->getValue().isAllOnesValue()) 4076 return true; 4077 } 4078 4079 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 4080 // It may change a poisoned value into a regular value, like 4081 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 4082 // poisoned value regular value 4083 // It should be OK since undef covers valid value. 4084 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 4085 return true; 4086 4087 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 4088 // It may change a poisoned value into a regular value, like 4089 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 4090 // poisoned value regular value 4091 // It should be OK since undef covers valid value. 4092 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 4093 const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); 4094 if (ExtInst->hasOneUse()) { 4095 const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); 4096 if (AndInst && AndInst->getOpcode() == Instruction::And) { 4097 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 4098 if (Cst && 4099 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 4100 return true; 4101 } 4102 } 4103 } 4104 4105 // Check if we can do the following simplification. 4106 // ext(trunc(opnd)) --> ext(opnd) 4107 if (!isa<TruncInst>(Inst)) 4108 return false; 4109 4110 Value *OpndVal = Inst->getOperand(0); 4111 // Check if we can use this operand in the extension. 4112 // If the type is larger than the result type of the extension, we cannot. 4113 if (!OpndVal->getType()->isIntegerTy() || 4114 OpndVal->getType()->getIntegerBitWidth() > 4115 ConsideredExtType->getIntegerBitWidth()) 4116 return false; 4117 4118 // If the operand of the truncate is not an instruction, we will not have 4119 // any information on the dropped bits. 4120 // (Actually we could for constant but it is not worth the extra logic). 4121 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 4122 if (!Opnd) 4123 return false; 4124 4125 // Check if the source of the type is narrow enough. 4126 // I.e., check that trunc just drops extended bits of the same kind of 4127 // the extension. 4128 // #1 get the type of the operand and check the kind of the extended bits. 4129 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 4130 if (OpndType) 4131 ; 4132 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 4133 OpndType = Opnd->getOperand(0)->getType(); 4134 else 4135 return false; 4136 4137 // #2 check that the truncate just drops extended bits. 4138 return Inst->getType()->getIntegerBitWidth() >= 4139 OpndType->getIntegerBitWidth(); 4140 } 4141 4142 TypePromotionHelper::Action TypePromotionHelper::getAction( 4143 Instruction *Ext, const SetOfInstrs &InsertedInsts, 4144 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 4145 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4146 "Unexpected instruction type"); 4147 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 4148 Type *ExtTy = Ext->getType(); 4149 bool IsSExt = isa<SExtInst>(Ext); 4150 // If the operand of the extension is not an instruction, we cannot 4151 // get through. 4152 // If it, check we can get through. 4153 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 4154 return nullptr; 4155 4156 // Do not promote if the operand has been added by codegenprepare. 4157 // Otherwise, it means we are undoing an optimization that is likely to be 4158 // redone, thus causing potential infinite loop. 4159 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 4160 return nullptr; 4161 4162 // SExt or Trunc instructions. 4163 // Return the related handler. 4164 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 4165 isa<ZExtInst>(ExtOpnd)) 4166 return promoteOperandForTruncAndAnyExt; 4167 4168 // Regular instruction. 4169 // Abort early if we will have to insert non-free instructions. 4170 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 4171 return nullptr; 4172 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 4173 } 4174 4175 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 4176 Instruction *SExt, TypePromotionTransaction &TPT, 4177 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4178 SmallVectorImpl<Instruction *> *Exts, 4179 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4180 // By construction, the operand of SExt is an instruction. Otherwise we cannot 4181 // get through it and this method should not be called. 4182 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 4183 Value *ExtVal = SExt; 4184 bool HasMergedNonFreeExt = false; 4185 if (isa<ZExtInst>(SExtOpnd)) { 4186 // Replace s|zext(zext(opnd)) 4187 // => zext(opnd). 4188 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 4189 Value *ZExt = 4190 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 4191 TPT.replaceAllUsesWith(SExt, ZExt); 4192 TPT.eraseInstruction(SExt); 4193 ExtVal = ZExt; 4194 } else { 4195 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 4196 // => z|sext(opnd). 4197 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 4198 } 4199 CreatedInstsCost = 0; 4200 4201 // Remove dead code. 4202 if (SExtOpnd->use_empty()) 4203 TPT.eraseInstruction(SExtOpnd); 4204 4205 // Check if the extension is still needed. 4206 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 4207 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 4208 if (ExtInst) { 4209 if (Exts) 4210 Exts->push_back(ExtInst); 4211 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 4212 } 4213 return ExtVal; 4214 } 4215 4216 // At this point we have: ext ty opnd to ty. 4217 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 4218 Value *NextVal = ExtInst->getOperand(0); 4219 TPT.eraseInstruction(ExtInst, NextVal); 4220 return NextVal; 4221 } 4222 4223 Value *TypePromotionHelper::promoteOperandForOther( 4224 Instruction *Ext, TypePromotionTransaction &TPT, 4225 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4226 SmallVectorImpl<Instruction *> *Exts, 4227 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 4228 bool IsSExt) { 4229 // By construction, the operand of Ext is an instruction. Otherwise we cannot 4230 // get through it and this method should not be called. 4231 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 4232 CreatedInstsCost = 0; 4233 if (!ExtOpnd->hasOneUse()) { 4234 // ExtOpnd will be promoted. 4235 // All its uses, but Ext, will need to use a truncated value of the 4236 // promoted version. 4237 // Create the truncate now. 4238 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 4239 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 4240 // Insert it just after the definition. 4241 ITrunc->moveAfter(ExtOpnd); 4242 if (Truncs) 4243 Truncs->push_back(ITrunc); 4244 } 4245 4246 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 4247 // Restore the operand of Ext (which has been replaced by the previous call 4248 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 4249 TPT.setOperand(Ext, 0, ExtOpnd); 4250 } 4251 4252 // Get through the Instruction: 4253 // 1. Update its type. 4254 // 2. Replace the uses of Ext by Inst. 4255 // 3. Extend each operand that needs to be extended. 4256 4257 // Remember the original type of the instruction before promotion. 4258 // This is useful to know that the high bits are sign extended bits. 4259 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 4260 // Step #1. 4261 TPT.mutateType(ExtOpnd, Ext->getType()); 4262 // Step #2. 4263 TPT.replaceAllUsesWith(Ext, ExtOpnd); 4264 // Step #3. 4265 Instruction *ExtForOpnd = Ext; 4266 4267 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 4268 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 4269 ++OpIdx) { 4270 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 4271 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 4272 !shouldExtOperand(ExtOpnd, OpIdx)) { 4273 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 4274 continue; 4275 } 4276 // Check if we can statically extend the operand. 4277 Value *Opnd = ExtOpnd->getOperand(OpIdx); 4278 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 4279 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4280 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 4281 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 4282 : Cst->getValue().zext(BitWidth); 4283 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 4284 continue; 4285 } 4286 // UndefValue are typed, so we have to statically sign extend them. 4287 if (isa<UndefValue>(Opnd)) { 4288 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4289 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 4290 continue; 4291 } 4292 4293 // Otherwise we have to explicitly sign extend the operand. 4294 // Check if Ext was reused to extend an operand. 4295 if (!ExtForOpnd) { 4296 // If yes, create a new one. 4297 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 4298 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 4299 : TPT.createZExt(Ext, Opnd, Ext->getType()); 4300 if (!isa<Instruction>(ValForExtOpnd)) { 4301 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 4302 continue; 4303 } 4304 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 4305 } 4306 if (Exts) 4307 Exts->push_back(ExtForOpnd); 4308 TPT.setOperand(ExtForOpnd, 0, Opnd); 4309 4310 // Move the sign extension before the insertion point. 4311 TPT.moveBefore(ExtForOpnd, ExtOpnd); 4312 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 4313 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 4314 // If more sext are required, new instructions will have to be created. 4315 ExtForOpnd = nullptr; 4316 } 4317 if (ExtForOpnd == Ext) { 4318 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 4319 TPT.eraseInstruction(Ext); 4320 } 4321 return ExtOpnd; 4322 } 4323 4324 /// Check whether or not promoting an instruction to a wider type is profitable. 4325 /// \p NewCost gives the cost of extension instructions created by the 4326 /// promotion. 4327 /// \p OldCost gives the cost of extension instructions before the promotion 4328 /// plus the number of instructions that have been 4329 /// matched in the addressing mode the promotion. 4330 /// \p PromotedOperand is the value that has been promoted. 4331 /// \return True if the promotion is profitable, false otherwise. 4332 bool AddressingModeMatcher::isPromotionProfitable( 4333 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 4334 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 4335 << '\n'); 4336 // The cost of the new extensions is greater than the cost of the 4337 // old extension plus what we folded. 4338 // This is not profitable. 4339 if (NewCost > OldCost) 4340 return false; 4341 if (NewCost < OldCost) 4342 return true; 4343 // The promotion is neutral but it may help folding the sign extension in 4344 // loads for instance. 4345 // Check that we did not create an illegal instruction. 4346 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 4347 } 4348 4349 /// Given an instruction or constant expr, see if we can fold the operation 4350 /// into the addressing mode. If so, update the addressing mode and return 4351 /// true, otherwise return false without modifying AddrMode. 4352 /// If \p MovedAway is not NULL, it contains the information of whether or 4353 /// not AddrInst has to be folded into the addressing mode on success. 4354 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 4355 /// because it has been moved away. 4356 /// Thus AddrInst must not be added in the matched instructions. 4357 /// This state can happen when AddrInst is a sext, since it may be moved away. 4358 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 4359 /// not be referenced anymore. 4360 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 4361 unsigned Depth, 4362 bool *MovedAway) { 4363 // Avoid exponential behavior on extremely deep expression trees. 4364 if (Depth >= 5) return false; 4365 4366 // By default, all matched instructions stay in place. 4367 if (MovedAway) 4368 *MovedAway = false; 4369 4370 switch (Opcode) { 4371 case Instruction::PtrToInt: 4372 // PtrToInt is always a noop, as we know that the int type is pointer sized. 4373 return matchAddr(AddrInst->getOperand(0), Depth); 4374 case Instruction::IntToPtr: { 4375 auto AS = AddrInst->getType()->getPointerAddressSpace(); 4376 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 4377 // This inttoptr is a no-op if the integer type is pointer sized. 4378 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 4379 return matchAddr(AddrInst->getOperand(0), Depth); 4380 return false; 4381 } 4382 case Instruction::BitCast: 4383 // BitCast is always a noop, and we can handle it as long as it is 4384 // int->int or pointer->pointer (we don't want int<->fp or something). 4385 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 4386 // Don't touch identity bitcasts. These were probably put here by LSR, 4387 // and we don't want to mess around with them. Assume it knows what it 4388 // is doing. 4389 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4390 return matchAddr(AddrInst->getOperand(0), Depth); 4391 return false; 4392 case Instruction::AddrSpaceCast: { 4393 unsigned SrcAS 4394 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4395 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4396 if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) 4397 return matchAddr(AddrInst->getOperand(0), Depth); 4398 return false; 4399 } 4400 case Instruction::Add: { 4401 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4402 ExtAddrMode BackupAddrMode = AddrMode; 4403 unsigned OldSize = AddrModeInsts.size(); 4404 // Start a transaction at this point. 4405 // The LHS may match but not the RHS. 4406 // Therefore, we need a higher level restoration point to undo partially 4407 // matched operation. 4408 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4409 TPT.getRestorationPoint(); 4410 4411 AddrMode.InBounds = false; 4412 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4413 matchAddr(AddrInst->getOperand(0), Depth+1)) 4414 return true; 4415 4416 // Restore the old addr mode info. 4417 AddrMode = BackupAddrMode; 4418 AddrModeInsts.resize(OldSize); 4419 TPT.rollback(LastKnownGood); 4420 4421 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4422 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4423 matchAddr(AddrInst->getOperand(1), Depth+1)) 4424 return true; 4425 4426 // Otherwise we definitely can't merge the ADD in. 4427 AddrMode = BackupAddrMode; 4428 AddrModeInsts.resize(OldSize); 4429 TPT.rollback(LastKnownGood); 4430 break; 4431 } 4432 //case Instruction::Or: 4433 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4434 //break; 4435 case Instruction::Mul: 4436 case Instruction::Shl: { 4437 // Can only handle X*C and X << C. 4438 AddrMode.InBounds = false; 4439 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4440 if (!RHS || RHS->getBitWidth() > 64) 4441 return false; 4442 int64_t Scale = RHS->getSExtValue(); 4443 if (Opcode == Instruction::Shl) 4444 Scale = 1LL << Scale; 4445 4446 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4447 } 4448 case Instruction::GetElementPtr: { 4449 // Scan the GEP. We check it if it contains constant offsets and at most 4450 // one variable offset. 4451 int VariableOperand = -1; 4452 unsigned VariableScale = 0; 4453 4454 int64_t ConstantOffset = 0; 4455 gep_type_iterator GTI = gep_type_begin(AddrInst); 4456 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4457 if (StructType *STy = GTI.getStructTypeOrNull()) { 4458 const StructLayout *SL = DL.getStructLayout(STy); 4459 unsigned Idx = 4460 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4461 ConstantOffset += SL->getElementOffset(Idx); 4462 } else { 4463 TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); 4464 if (TS.isNonZero()) { 4465 // The optimisations below currently only work for fixed offsets. 4466 if (TS.isScalable()) 4467 return false; 4468 int64_t TypeSize = TS.getFixedSize(); 4469 if (ConstantInt *CI = 4470 dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4471 const APInt &CVal = CI->getValue(); 4472 if (CVal.getMinSignedBits() <= 64) { 4473 ConstantOffset += CVal.getSExtValue() * TypeSize; 4474 continue; 4475 } 4476 } 4477 // We only allow one variable index at the moment. 4478 if (VariableOperand != -1) 4479 return false; 4480 4481 // Remember the variable index. 4482 VariableOperand = i; 4483 VariableScale = TypeSize; 4484 } 4485 } 4486 } 4487 4488 // A common case is for the GEP to only do a constant offset. In this case, 4489 // just add it to the disp field and check validity. 4490 if (VariableOperand == -1) { 4491 AddrMode.BaseOffs += ConstantOffset; 4492 if (ConstantOffset == 0 || 4493 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4494 // Check to see if we can fold the base pointer in too. 4495 if (matchAddr(AddrInst->getOperand(0), Depth+1)) { 4496 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4497 AddrMode.InBounds = false; 4498 return true; 4499 } 4500 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4501 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4502 ConstantOffset > 0) { 4503 // Record GEPs with non-zero offsets as candidates for splitting in the 4504 // event that the offset cannot fit into the r+i addressing mode. 4505 // Simple and common case that only one GEP is used in calculating the 4506 // address for the memory access. 4507 Value *Base = AddrInst->getOperand(0); 4508 auto *BaseI = dyn_cast<Instruction>(Base); 4509 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4510 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4511 (BaseI && !isa<CastInst>(BaseI) && 4512 !isa<GetElementPtrInst>(BaseI))) { 4513 // Make sure the parent block allows inserting non-PHI instructions 4514 // before the terminator. 4515 BasicBlock *Parent = 4516 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4517 if (!Parent->getTerminator()->isEHPad()) 4518 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4519 } 4520 } 4521 AddrMode.BaseOffs -= ConstantOffset; 4522 return false; 4523 } 4524 4525 // Save the valid addressing mode in case we can't match. 4526 ExtAddrMode BackupAddrMode = AddrMode; 4527 unsigned OldSize = AddrModeInsts.size(); 4528 4529 // See if the scale and offset amount is valid for this target. 4530 AddrMode.BaseOffs += ConstantOffset; 4531 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4532 AddrMode.InBounds = false; 4533 4534 // Match the base operand of the GEP. 4535 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4536 // If it couldn't be matched, just stuff the value in a register. 4537 if (AddrMode.HasBaseReg) { 4538 AddrMode = BackupAddrMode; 4539 AddrModeInsts.resize(OldSize); 4540 return false; 4541 } 4542 AddrMode.HasBaseReg = true; 4543 AddrMode.BaseReg = AddrInst->getOperand(0); 4544 } 4545 4546 // Match the remaining variable portion of the GEP. 4547 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4548 Depth)) { 4549 // If it couldn't be matched, try stuffing the base into a register 4550 // instead of matching it, and retrying the match of the scale. 4551 AddrMode = BackupAddrMode; 4552 AddrModeInsts.resize(OldSize); 4553 if (AddrMode.HasBaseReg) 4554 return false; 4555 AddrMode.HasBaseReg = true; 4556 AddrMode.BaseReg = AddrInst->getOperand(0); 4557 AddrMode.BaseOffs += ConstantOffset; 4558 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4559 VariableScale, Depth)) { 4560 // If even that didn't work, bail. 4561 AddrMode = BackupAddrMode; 4562 AddrModeInsts.resize(OldSize); 4563 return false; 4564 } 4565 } 4566 4567 return true; 4568 } 4569 case Instruction::SExt: 4570 case Instruction::ZExt: { 4571 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4572 if (!Ext) 4573 return false; 4574 4575 // Try to move this ext out of the way of the addressing mode. 4576 // Ask for a method for doing so. 4577 TypePromotionHelper::Action TPH = 4578 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4579 if (!TPH) 4580 return false; 4581 4582 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4583 TPT.getRestorationPoint(); 4584 unsigned CreatedInstsCost = 0; 4585 unsigned ExtCost = !TLI.isExtFree(Ext); 4586 Value *PromotedOperand = 4587 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4588 // SExt has been moved away. 4589 // Thus either it will be rematched later in the recursive calls or it is 4590 // gone. Anyway, we must not fold it into the addressing mode at this point. 4591 // E.g., 4592 // op = add opnd, 1 4593 // idx = ext op 4594 // addr = gep base, idx 4595 // is now: 4596 // promotedOpnd = ext opnd <- no match here 4597 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4598 // addr = gep base, op <- match 4599 if (MovedAway) 4600 *MovedAway = true; 4601 4602 assert(PromotedOperand && 4603 "TypePromotionHelper should have filtered out those cases"); 4604 4605 ExtAddrMode BackupAddrMode = AddrMode; 4606 unsigned OldSize = AddrModeInsts.size(); 4607 4608 if (!matchAddr(PromotedOperand, Depth) || 4609 // The total of the new cost is equal to the cost of the created 4610 // instructions. 4611 // The total of the old cost is equal to the cost of the extension plus 4612 // what we have saved in the addressing mode. 4613 !isPromotionProfitable(CreatedInstsCost, 4614 ExtCost + (AddrModeInsts.size() - OldSize), 4615 PromotedOperand)) { 4616 AddrMode = BackupAddrMode; 4617 AddrModeInsts.resize(OldSize); 4618 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4619 TPT.rollback(LastKnownGood); 4620 return false; 4621 } 4622 return true; 4623 } 4624 } 4625 return false; 4626 } 4627 4628 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4629 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4630 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4631 /// for the target. 4632 /// 4633 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4634 // Start a transaction at this point that we will rollback if the matching 4635 // fails. 4636 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4637 TPT.getRestorationPoint(); 4638 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4639 if (CI->getValue().isSignedIntN(64)) { 4640 // Fold in immediates if legal for the target. 4641 AddrMode.BaseOffs += CI->getSExtValue(); 4642 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4643 return true; 4644 AddrMode.BaseOffs -= CI->getSExtValue(); 4645 } 4646 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4647 // If this is a global variable, try to fold it into the addressing mode. 4648 if (!AddrMode.BaseGV) { 4649 AddrMode.BaseGV = GV; 4650 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4651 return true; 4652 AddrMode.BaseGV = nullptr; 4653 } 4654 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4655 ExtAddrMode BackupAddrMode = AddrMode; 4656 unsigned OldSize = AddrModeInsts.size(); 4657 4658 // Check to see if it is possible to fold this operation. 4659 bool MovedAway = false; 4660 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4661 // This instruction may have been moved away. If so, there is nothing 4662 // to check here. 4663 if (MovedAway) 4664 return true; 4665 // Okay, it's possible to fold this. Check to see if it is actually 4666 // *profitable* to do so. We use a simple cost model to avoid increasing 4667 // register pressure too much. 4668 if (I->hasOneUse() || 4669 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4670 AddrModeInsts.push_back(I); 4671 return true; 4672 } 4673 4674 // It isn't profitable to do this, roll back. 4675 //cerr << "NOT FOLDING: " << *I; 4676 AddrMode = BackupAddrMode; 4677 AddrModeInsts.resize(OldSize); 4678 TPT.rollback(LastKnownGood); 4679 } 4680 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4681 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4682 return true; 4683 TPT.rollback(LastKnownGood); 4684 } else if (isa<ConstantPointerNull>(Addr)) { 4685 // Null pointer gets folded without affecting the addressing mode. 4686 return true; 4687 } 4688 4689 // Worse case, the target should support [reg] addressing modes. :) 4690 if (!AddrMode.HasBaseReg) { 4691 AddrMode.HasBaseReg = true; 4692 AddrMode.BaseReg = Addr; 4693 // Still check for legality in case the target supports [imm] but not [i+r]. 4694 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4695 return true; 4696 AddrMode.HasBaseReg = false; 4697 AddrMode.BaseReg = nullptr; 4698 } 4699 4700 // If the base register is already taken, see if we can do [r+r]. 4701 if (AddrMode.Scale == 0) { 4702 AddrMode.Scale = 1; 4703 AddrMode.ScaledReg = Addr; 4704 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4705 return true; 4706 AddrMode.Scale = 0; 4707 AddrMode.ScaledReg = nullptr; 4708 } 4709 // Couldn't match. 4710 TPT.rollback(LastKnownGood); 4711 return false; 4712 } 4713 4714 /// Check to see if all uses of OpVal by the specified inline asm call are due 4715 /// to memory operands. If so, return true, otherwise return false. 4716 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4717 const TargetLowering &TLI, 4718 const TargetRegisterInfo &TRI) { 4719 const Function *F = CI->getFunction(); 4720 TargetLowering::AsmOperandInfoVector TargetConstraints = 4721 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); 4722 4723 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4724 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4725 4726 // Compute the constraint code and ConstraintType to use. 4727 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4728 4729 // If this asm operand is our Value*, and if it isn't an indirect memory 4730 // operand, we can't fold it! 4731 if (OpInfo.CallOperandVal == OpVal && 4732 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4733 !OpInfo.isIndirect)) 4734 return false; 4735 } 4736 4737 return true; 4738 } 4739 4740 // Max number of memory uses to look at before aborting the search to conserve 4741 // compile time. 4742 static constexpr int MaxMemoryUsesToScan = 20; 4743 4744 /// Recursively walk all the uses of I until we find a memory use. 4745 /// If we find an obviously non-foldable instruction, return true. 4746 /// Add the ultimately found memory instructions to MemoryUses. 4747 static bool FindAllMemoryUses( 4748 Instruction *I, 4749 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4750 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4751 const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, 4752 BlockFrequencyInfo *BFI, int SeenInsts = 0) { 4753 // If we already considered this instruction, we're done. 4754 if (!ConsideredInsts.insert(I).second) 4755 return false; 4756 4757 // If this is an obviously unfoldable instruction, bail out. 4758 if (!MightBeFoldableInst(I)) 4759 return true; 4760 4761 // Loop over all the uses, recursively processing them. 4762 for (Use &U : I->uses()) { 4763 // Conservatively return true if we're seeing a large number or a deep chain 4764 // of users. This avoids excessive compilation times in pathological cases. 4765 if (SeenInsts++ >= MaxMemoryUsesToScan) 4766 return true; 4767 4768 Instruction *UserI = cast<Instruction>(U.getUser()); 4769 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4770 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4771 continue; 4772 } 4773 4774 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4775 unsigned opNo = U.getOperandNo(); 4776 if (opNo != StoreInst::getPointerOperandIndex()) 4777 return true; // Storing addr, not into addr. 4778 MemoryUses.push_back(std::make_pair(SI, opNo)); 4779 continue; 4780 } 4781 4782 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4783 unsigned opNo = U.getOperandNo(); 4784 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4785 return true; // Storing addr, not into addr. 4786 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4787 continue; 4788 } 4789 4790 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4791 unsigned opNo = U.getOperandNo(); 4792 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4793 return true; // Storing addr, not into addr. 4794 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4795 continue; 4796 } 4797 4798 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4799 if (CI->hasFnAttr(Attribute::Cold)) { 4800 // If this is a cold call, we can sink the addressing calculation into 4801 // the cold path. See optimizeCallInst 4802 bool OptForSize = OptSize || 4803 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); 4804 if (!OptForSize) 4805 continue; 4806 } 4807 4808 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); 4809 if (!IA) return true; 4810 4811 // If this is a memory operand, we're cool, otherwise bail out. 4812 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4813 return true; 4814 continue; 4815 } 4816 4817 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4818 PSI, BFI, SeenInsts)) 4819 return true; 4820 } 4821 4822 return false; 4823 } 4824 4825 /// Return true if Val is already known to be live at the use site that we're 4826 /// folding it into. If so, there is no cost to include it in the addressing 4827 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4828 /// instruction already. 4829 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4830 Value *KnownLive2) { 4831 // If Val is either of the known-live values, we know it is live! 4832 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4833 return true; 4834 4835 // All values other than instructions and arguments (e.g. constants) are live. 4836 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4837 4838 // If Val is a constant sized alloca in the entry block, it is live, this is 4839 // true because it is just a reference to the stack/frame pointer, which is 4840 // live for the whole function. 4841 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4842 if (AI->isStaticAlloca()) 4843 return true; 4844 4845 // Check to see if this value is already used in the memory instruction's 4846 // block. If so, it's already live into the block at the very least, so we 4847 // can reasonably fold it. 4848 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4849 } 4850 4851 /// It is possible for the addressing mode of the machine to fold the specified 4852 /// instruction into a load or store that ultimately uses it. 4853 /// However, the specified instruction has multiple uses. 4854 /// Given this, it may actually increase register pressure to fold it 4855 /// into the load. For example, consider this code: 4856 /// 4857 /// X = ... 4858 /// Y = X+1 4859 /// use(Y) -> nonload/store 4860 /// Z = Y+1 4861 /// load Z 4862 /// 4863 /// In this case, Y has multiple uses, and can be folded into the load of Z 4864 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4865 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4866 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4867 /// number of computations either. 4868 /// 4869 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4870 /// X was live across 'load Z' for other reasons, we actually *would* want to 4871 /// fold the addressing mode in the Z case. This would make Y die earlier. 4872 bool AddressingModeMatcher:: 4873 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4874 ExtAddrMode &AMAfter) { 4875 if (IgnoreProfitability) return true; 4876 4877 // AMBefore is the addressing mode before this instruction was folded into it, 4878 // and AMAfter is the addressing mode after the instruction was folded. Get 4879 // the set of registers referenced by AMAfter and subtract out those 4880 // referenced by AMBefore: this is the set of values which folding in this 4881 // address extends the lifetime of. 4882 // 4883 // Note that there are only two potential values being referenced here, 4884 // BaseReg and ScaleReg (global addresses are always available, as are any 4885 // folded immediates). 4886 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4887 4888 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4889 // lifetime wasn't extended by adding this instruction. 4890 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4891 BaseReg = nullptr; 4892 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4893 ScaledReg = nullptr; 4894 4895 // If folding this instruction (and it's subexprs) didn't extend any live 4896 // ranges, we're ok with it. 4897 if (!BaseReg && !ScaledReg) 4898 return true; 4899 4900 // If all uses of this instruction can have the address mode sunk into them, 4901 // we can remove the addressing mode and effectively trade one live register 4902 // for another (at worst.) In this context, folding an addressing mode into 4903 // the use is just a particularly nice way of sinking it. 4904 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4905 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4906 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4907 PSI, BFI)) 4908 return false; // Has a non-memory, non-foldable use! 4909 4910 // Now that we know that all uses of this instruction are part of a chain of 4911 // computation involving only operations that could theoretically be folded 4912 // into a memory use, loop over each of these memory operation uses and see 4913 // if they could *actually* fold the instruction. The assumption is that 4914 // addressing modes are cheap and that duplicating the computation involved 4915 // many times is worthwhile, even on a fastpath. For sinking candidates 4916 // (i.e. cold call sites), this serves as a way to prevent excessive code 4917 // growth since most architectures have some reasonable small and fast way to 4918 // compute an effective address. (i.e LEA on x86) 4919 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 4920 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 4921 Instruction *User = MemoryUses[i].first; 4922 unsigned OpNo = MemoryUses[i].second; 4923 4924 // Get the access type of this use. If the use isn't a pointer, we don't 4925 // know what it accesses. 4926 Value *Address = User->getOperand(OpNo); 4927 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 4928 if (!AddrTy) 4929 return false; 4930 Type *AddressAccessTy = AddrTy->getElementType(); 4931 unsigned AS = AddrTy->getAddressSpace(); 4932 4933 // Do a match against the root of this address, ignoring profitability. This 4934 // will tell us if the addressing mode for the memory operation will 4935 // *actually* cover the shared instruction. 4936 ExtAddrMode Result; 4937 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 4938 0); 4939 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4940 TPT.getRestorationPoint(); 4941 AddressingModeMatcher Matcher( 4942 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, 4943 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, BFI); 4944 Matcher.IgnoreProfitability = true; 4945 bool Success = Matcher.matchAddr(Address, 0); 4946 (void)Success; assert(Success && "Couldn't select *anything*?"); 4947 4948 // The match was to check the profitability, the changes made are not 4949 // part of the original matcher. Therefore, they should be dropped 4950 // otherwise the original matcher will not present the right state. 4951 TPT.rollback(LastKnownGood); 4952 4953 // If the match didn't cover I, then it won't be shared by it. 4954 if (!is_contained(MatchedAddrModeInsts, I)) 4955 return false; 4956 4957 MatchedAddrModeInsts.clear(); 4958 } 4959 4960 return true; 4961 } 4962 4963 /// Return true if the specified values are defined in a 4964 /// different basic block than BB. 4965 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 4966 if (Instruction *I = dyn_cast<Instruction>(V)) 4967 return I->getParent() != BB; 4968 return false; 4969 } 4970 4971 /// Sink addressing mode computation immediate before MemoryInst if doing so 4972 /// can be done without increasing register pressure. The need for the 4973 /// register pressure constraint means this can end up being an all or nothing 4974 /// decision for all uses of the same addressing computation. 4975 /// 4976 /// Load and Store Instructions often have addressing modes that can do 4977 /// significant amounts of computation. As such, instruction selection will try 4978 /// to get the load or store to do as much computation as possible for the 4979 /// program. The problem is that isel can only see within a single block. As 4980 /// such, we sink as much legal addressing mode work into the block as possible. 4981 /// 4982 /// This method is used to optimize both load/store and inline asms with memory 4983 /// operands. It's also used to sink addressing computations feeding into cold 4984 /// call sites into their (cold) basic block. 4985 /// 4986 /// The motivation for handling sinking into cold blocks is that doing so can 4987 /// both enable other address mode sinking (by satisfying the register pressure 4988 /// constraint above), and reduce register pressure globally (by removing the 4989 /// addressing mode computation from the fast path entirely.). 4990 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 4991 Type *AccessTy, unsigned AddrSpace) { 4992 Value *Repl = Addr; 4993 4994 // Try to collapse single-value PHI nodes. This is necessary to undo 4995 // unprofitable PRE transformations. 4996 SmallVector<Value*, 8> worklist; 4997 SmallPtrSet<Value*, 16> Visited; 4998 worklist.push_back(Addr); 4999 5000 // Use a worklist to iteratively look through PHI and select nodes, and 5001 // ensure that the addressing mode obtained from the non-PHI/select roots of 5002 // the graph are compatible. 5003 bool PhiOrSelectSeen = false; 5004 SmallVector<Instruction*, 16> AddrModeInsts; 5005 const SimplifyQuery SQ(*DL, TLInfo); 5006 AddressingModeCombiner AddrModes(SQ, Addr); 5007 TypePromotionTransaction TPT(RemovedInsts); 5008 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5009 TPT.getRestorationPoint(); 5010 while (!worklist.empty()) { 5011 Value *V = worklist.back(); 5012 worklist.pop_back(); 5013 5014 // We allow traversing cyclic Phi nodes. 5015 // In case of success after this loop we ensure that traversing through 5016 // Phi nodes ends up with all cases to compute address of the form 5017 // BaseGV + Base + Scale * Index + Offset 5018 // where Scale and Offset are constans and BaseGV, Base and Index 5019 // are exactly the same Values in all cases. 5020 // It means that BaseGV, Scale and Offset dominate our memory instruction 5021 // and have the same value as they had in address computation represented 5022 // as Phi. So we can safely sink address computation to memory instruction. 5023 if (!Visited.insert(V).second) 5024 continue; 5025 5026 // For a PHI node, push all of its incoming values. 5027 if (PHINode *P = dyn_cast<PHINode>(V)) { 5028 append_range(worklist, P->incoming_values()); 5029 PhiOrSelectSeen = true; 5030 continue; 5031 } 5032 // Similar for select. 5033 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 5034 worklist.push_back(SI->getFalseValue()); 5035 worklist.push_back(SI->getTrueValue()); 5036 PhiOrSelectSeen = true; 5037 continue; 5038 } 5039 5040 // For non-PHIs, determine the addressing mode being computed. Note that 5041 // the result may differ depending on what other uses our candidate 5042 // addressing instructions might have. 5043 AddrModeInsts.clear(); 5044 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 5045 0); 5046 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 5047 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, 5048 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, 5049 BFI.get()); 5050 5051 GetElementPtrInst *GEP = LargeOffsetGEP.first; 5052 if (GEP && !NewGEPBases.count(GEP)) { 5053 // If splitting the underlying data structure can reduce the offset of a 5054 // GEP, collect the GEP. Skip the GEPs that are the new bases of 5055 // previously split data structures. 5056 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 5057 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 5058 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 5059 } 5060 5061 NewAddrMode.OriginalValue = V; 5062 if (!AddrModes.addNewAddrMode(NewAddrMode)) 5063 break; 5064 } 5065 5066 // Try to combine the AddrModes we've collected. If we couldn't collect any, 5067 // or we have multiple but either couldn't combine them or combining them 5068 // wouldn't do anything useful, bail out now. 5069 if (!AddrModes.combineAddrModes()) { 5070 TPT.rollback(LastKnownGood); 5071 return false; 5072 } 5073 bool Modified = TPT.commit(); 5074 5075 // Get the combined AddrMode (or the only AddrMode, if we only had one). 5076 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 5077 5078 // If all the instructions matched are already in this BB, don't do anything. 5079 // If we saw a Phi node then it is not local definitely, and if we saw a select 5080 // then we want to push the address calculation past it even if it's already 5081 // in this BB. 5082 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 5083 return IsNonLocalValue(V, MemoryInst->getParent()); 5084 })) { 5085 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 5086 << "\n"); 5087 return Modified; 5088 } 5089 5090 // Insert this computation right after this user. Since our caller is 5091 // scanning from the top of the BB to the bottom, reuse of the expr are 5092 // guaranteed to happen later. 5093 IRBuilder<> Builder(MemoryInst); 5094 5095 // Now that we determined the addressing expression we want to use and know 5096 // that we have to sink it into this block. Check to see if we have already 5097 // done this for some other load/store instr in this block. If so, reuse 5098 // the computation. Before attempting reuse, check if the address is valid 5099 // as it may have been erased. 5100 5101 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 5102 5103 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 5104 if (SunkAddr) { 5105 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 5106 << " for " << *MemoryInst << "\n"); 5107 if (SunkAddr->getType() != Addr->getType()) 5108 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5109 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 5110 SubtargetInfo->addrSinkUsingGEPs())) { 5111 // By default, we use the GEP-based method when AA is used later. This 5112 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 5113 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5114 << " for " << *MemoryInst << "\n"); 5115 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5116 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 5117 5118 // First, find the pointer. 5119 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 5120 ResultPtr = AddrMode.BaseReg; 5121 AddrMode.BaseReg = nullptr; 5122 } 5123 5124 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 5125 // We can't add more than one pointer together, nor can we scale a 5126 // pointer (both of which seem meaningless). 5127 if (ResultPtr || AddrMode.Scale != 1) 5128 return Modified; 5129 5130 ResultPtr = AddrMode.ScaledReg; 5131 AddrMode.Scale = 0; 5132 } 5133 5134 // It is only safe to sign extend the BaseReg if we know that the math 5135 // required to create it did not overflow before we extend it. Since 5136 // the original IR value was tossed in favor of a constant back when 5137 // the AddrMode was created we need to bail out gracefully if widths 5138 // do not match instead of extending it. 5139 // 5140 // (See below for code to add the scale.) 5141 if (AddrMode.Scale) { 5142 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 5143 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 5144 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 5145 return Modified; 5146 } 5147 5148 if (AddrMode.BaseGV) { 5149 if (ResultPtr) 5150 return Modified; 5151 5152 ResultPtr = AddrMode.BaseGV; 5153 } 5154 5155 // If the real base value actually came from an inttoptr, then the matcher 5156 // will look through it and provide only the integer value. In that case, 5157 // use it here. 5158 if (!DL->isNonIntegralPointerType(Addr->getType())) { 5159 if (!ResultPtr && AddrMode.BaseReg) { 5160 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 5161 "sunkaddr"); 5162 AddrMode.BaseReg = nullptr; 5163 } else if (!ResultPtr && AddrMode.Scale == 1) { 5164 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 5165 "sunkaddr"); 5166 AddrMode.Scale = 0; 5167 } 5168 } 5169 5170 if (!ResultPtr && 5171 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 5172 SunkAddr = Constant::getNullValue(Addr->getType()); 5173 } else if (!ResultPtr) { 5174 return Modified; 5175 } else { 5176 Type *I8PtrTy = 5177 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 5178 Type *I8Ty = Builder.getInt8Ty(); 5179 5180 // Start with the base register. Do this first so that subsequent address 5181 // matching finds it last, which will prevent it from trying to match it 5182 // as the scaled value in case it happens to be a mul. That would be 5183 // problematic if we've sunk a different mul for the scale, because then 5184 // we'd end up sinking both muls. 5185 if (AddrMode.BaseReg) { 5186 Value *V = AddrMode.BaseReg; 5187 if (V->getType() != IntPtrTy) 5188 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5189 5190 ResultIndex = V; 5191 } 5192 5193 // Add the scale value. 5194 if (AddrMode.Scale) { 5195 Value *V = AddrMode.ScaledReg; 5196 if (V->getType() == IntPtrTy) { 5197 // done. 5198 } else { 5199 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 5200 cast<IntegerType>(V->getType())->getBitWidth() && 5201 "We can't transform if ScaledReg is too narrow"); 5202 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5203 } 5204 5205 if (AddrMode.Scale != 1) 5206 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5207 "sunkaddr"); 5208 if (ResultIndex) 5209 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 5210 else 5211 ResultIndex = V; 5212 } 5213 5214 // Add in the Base Offset if present. 5215 if (AddrMode.BaseOffs) { 5216 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5217 if (ResultIndex) { 5218 // We need to add this separately from the scale above to help with 5219 // SDAG consecutive load/store merging. 5220 if (ResultPtr->getType() != I8PtrTy) 5221 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5222 ResultPtr = 5223 AddrMode.InBounds 5224 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5225 "sunkaddr") 5226 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5227 } 5228 5229 ResultIndex = V; 5230 } 5231 5232 if (!ResultIndex) { 5233 SunkAddr = ResultPtr; 5234 } else { 5235 if (ResultPtr->getType() != I8PtrTy) 5236 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5237 SunkAddr = 5238 AddrMode.InBounds 5239 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5240 "sunkaddr") 5241 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5242 } 5243 5244 if (SunkAddr->getType() != Addr->getType()) 5245 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5246 } 5247 } else { 5248 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 5249 // non-integral pointers, so in that case bail out now. 5250 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 5251 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 5252 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 5253 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 5254 if (DL->isNonIntegralPointerType(Addr->getType()) || 5255 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 5256 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 5257 (AddrMode.BaseGV && 5258 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 5259 return Modified; 5260 5261 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5262 << " for " << *MemoryInst << "\n"); 5263 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5264 Value *Result = nullptr; 5265 5266 // Start with the base register. Do this first so that subsequent address 5267 // matching finds it last, which will prevent it from trying to match it 5268 // as the scaled value in case it happens to be a mul. That would be 5269 // problematic if we've sunk a different mul for the scale, because then 5270 // we'd end up sinking both muls. 5271 if (AddrMode.BaseReg) { 5272 Value *V = AddrMode.BaseReg; 5273 if (V->getType()->isPointerTy()) 5274 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5275 if (V->getType() != IntPtrTy) 5276 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5277 Result = V; 5278 } 5279 5280 // Add the scale value. 5281 if (AddrMode.Scale) { 5282 Value *V = AddrMode.ScaledReg; 5283 if (V->getType() == IntPtrTy) { 5284 // done. 5285 } else if (V->getType()->isPointerTy()) { 5286 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5287 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 5288 cast<IntegerType>(V->getType())->getBitWidth()) { 5289 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5290 } else { 5291 // It is only safe to sign extend the BaseReg if we know that the math 5292 // required to create it did not overflow before we extend it. Since 5293 // the original IR value was tossed in favor of a constant back when 5294 // the AddrMode was created we need to bail out gracefully if widths 5295 // do not match instead of extending it. 5296 Instruction *I = dyn_cast_or_null<Instruction>(Result); 5297 if (I && (Result != AddrMode.BaseReg)) 5298 I->eraseFromParent(); 5299 return Modified; 5300 } 5301 if (AddrMode.Scale != 1) 5302 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5303 "sunkaddr"); 5304 if (Result) 5305 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5306 else 5307 Result = V; 5308 } 5309 5310 // Add in the BaseGV if present. 5311 if (AddrMode.BaseGV) { 5312 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 5313 if (Result) 5314 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5315 else 5316 Result = V; 5317 } 5318 5319 // Add in the Base Offset if present. 5320 if (AddrMode.BaseOffs) { 5321 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5322 if (Result) 5323 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5324 else 5325 Result = V; 5326 } 5327 5328 if (!Result) 5329 SunkAddr = Constant::getNullValue(Addr->getType()); 5330 else 5331 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 5332 } 5333 5334 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 5335 // Store the newly computed address into the cache. In the case we reused a 5336 // value, this should be idempotent. 5337 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 5338 5339 // If we have no uses, recursively delete the value and all dead instructions 5340 // using it. 5341 if (Repl->use_empty()) { 5342 resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() { 5343 RecursivelyDeleteTriviallyDeadInstructions( 5344 Repl, TLInfo, nullptr, 5345 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5346 }); 5347 } 5348 ++NumMemoryInsts; 5349 return true; 5350 } 5351 5352 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find 5353 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can 5354 /// only handle a 2 operand GEP in the same basic block or a splat constant 5355 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector 5356 /// index. 5357 /// 5358 /// If the existing GEP has a vector base pointer that is splat, we can look 5359 /// through the splat to find the scalar pointer. If we can't find a scalar 5360 /// pointer there's nothing we can do. 5361 /// 5362 /// If we have a GEP with more than 2 indices where the middle indices are all 5363 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. 5364 /// 5365 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP 5366 /// followed by a GEP with an all zeroes vector index. This will enable 5367 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a 5368 /// zero index. 5369 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, 5370 Value *Ptr) { 5371 Value *NewAddr; 5372 5373 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 5374 // Don't optimize GEPs that don't have indices. 5375 if (!GEP->hasIndices()) 5376 return false; 5377 5378 // If the GEP and the gather/scatter aren't in the same BB, don't optimize. 5379 // FIXME: We should support this by sinking the GEP. 5380 if (MemoryInst->getParent() != GEP->getParent()) 5381 return false; 5382 5383 SmallVector<Value *, 2> Ops(GEP->operands()); 5384 5385 bool RewriteGEP = false; 5386 5387 if (Ops[0]->getType()->isVectorTy()) { 5388 Ops[0] = getSplatValue(Ops[0]); 5389 if (!Ops[0]) 5390 return false; 5391 RewriteGEP = true; 5392 } 5393 5394 unsigned FinalIndex = Ops.size() - 1; 5395 5396 // Ensure all but the last index is 0. 5397 // FIXME: This isn't strictly required. All that's required is that they are 5398 // all scalars or splats. 5399 for (unsigned i = 1; i < FinalIndex; ++i) { 5400 auto *C = dyn_cast<Constant>(Ops[i]); 5401 if (!C) 5402 return false; 5403 if (isa<VectorType>(C->getType())) 5404 C = C->getSplatValue(); 5405 auto *CI = dyn_cast_or_null<ConstantInt>(C); 5406 if (!CI || !CI->isZero()) 5407 return false; 5408 // Scalarize the index if needed. 5409 Ops[i] = CI; 5410 } 5411 5412 // Try to scalarize the final index. 5413 if (Ops[FinalIndex]->getType()->isVectorTy()) { 5414 if (Value *V = getSplatValue(Ops[FinalIndex])) { 5415 auto *C = dyn_cast<ConstantInt>(V); 5416 // Don't scalarize all zeros vector. 5417 if (!C || !C->isZero()) { 5418 Ops[FinalIndex] = V; 5419 RewriteGEP = true; 5420 } 5421 } 5422 } 5423 5424 // If we made any changes or the we have extra operands, we need to generate 5425 // new instructions. 5426 if (!RewriteGEP && Ops.size() == 2) 5427 return false; 5428 5429 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); 5430 5431 IRBuilder<> Builder(MemoryInst); 5432 5433 Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); 5434 5435 // If the final index isn't a vector, emit a scalar GEP containing all ops 5436 // and a vector GEP with all zeroes final index. 5437 if (!Ops[FinalIndex]->getType()->isVectorTy()) { 5438 NewAddr = Builder.CreateGEP(Ops[0], makeArrayRef(Ops).drop_front()); 5439 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); 5440 NewAddr = Builder.CreateGEP(NewAddr, Constant::getNullValue(IndexTy)); 5441 } else { 5442 Value *Base = Ops[0]; 5443 Value *Index = Ops[FinalIndex]; 5444 5445 // Create a scalar GEP if there are more than 2 operands. 5446 if (Ops.size() != 2) { 5447 // Replace the last index with 0. 5448 Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); 5449 Base = Builder.CreateGEP(Base, makeArrayRef(Ops).drop_front()); 5450 } 5451 5452 // Now create the GEP with scalar pointer and vector index. 5453 NewAddr = Builder.CreateGEP(Base, Index); 5454 } 5455 } else if (!isa<Constant>(Ptr)) { 5456 // Not a GEP, maybe its a splat and we can create a GEP to enable 5457 // SelectionDAGBuilder to use it as a uniform base. 5458 Value *V = getSplatValue(Ptr); 5459 if (!V) 5460 return false; 5461 5462 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); 5463 5464 IRBuilder<> Builder(MemoryInst); 5465 5466 // Emit a vector GEP with a scalar pointer and all 0s vector index. 5467 Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType()); 5468 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); 5469 NewAddr = Builder.CreateGEP(V, Constant::getNullValue(IndexTy)); 5470 } else { 5471 // Constant, SelectionDAGBuilder knows to check if its a splat. 5472 return false; 5473 } 5474 5475 MemoryInst->replaceUsesOfWith(Ptr, NewAddr); 5476 5477 // If we have no uses, recursively delete the value and all dead instructions 5478 // using it. 5479 if (Ptr->use_empty()) 5480 RecursivelyDeleteTriviallyDeadInstructions( 5481 Ptr, TLInfo, nullptr, 5482 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5483 5484 return true; 5485 } 5486 5487 /// If there are any memory operands, use OptimizeMemoryInst to sink their 5488 /// address computing into the block when possible / profitable. 5489 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 5490 bool MadeChange = false; 5491 5492 const TargetRegisterInfo *TRI = 5493 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 5494 TargetLowering::AsmOperandInfoVector TargetConstraints = 5495 TLI->ParseConstraints(*DL, TRI, *CS); 5496 unsigned ArgNo = 0; 5497 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5498 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 5499 5500 // Compute the constraint code and ConstraintType to use. 5501 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 5502 5503 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5504 OpInfo.isIndirect) { 5505 Value *OpVal = CS->getArgOperand(ArgNo++); 5506 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 5507 } else if (OpInfo.Type == InlineAsm::isInput) 5508 ArgNo++; 5509 } 5510 5511 return MadeChange; 5512 } 5513 5514 /// Check if all the uses of \p Val are equivalent (or free) zero or 5515 /// sign extensions. 5516 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 5517 assert(!Val->use_empty() && "Input must have at least one use"); 5518 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 5519 bool IsSExt = isa<SExtInst>(FirstUser); 5520 Type *ExtTy = FirstUser->getType(); 5521 for (const User *U : Val->users()) { 5522 const Instruction *UI = cast<Instruction>(U); 5523 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 5524 return false; 5525 Type *CurTy = UI->getType(); 5526 // Same input and output types: Same instruction after CSE. 5527 if (CurTy == ExtTy) 5528 continue; 5529 5530 // If IsSExt is true, we are in this situation: 5531 // a = Val 5532 // b = sext ty1 a to ty2 5533 // c = sext ty1 a to ty3 5534 // Assuming ty2 is shorter than ty3, this could be turned into: 5535 // a = Val 5536 // b = sext ty1 a to ty2 5537 // c = sext ty2 b to ty3 5538 // However, the last sext is not free. 5539 if (IsSExt) 5540 return false; 5541 5542 // This is a ZExt, maybe this is free to extend from one type to another. 5543 // In that case, we would not account for a different use. 5544 Type *NarrowTy; 5545 Type *LargeTy; 5546 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5547 CurTy->getScalarType()->getIntegerBitWidth()) { 5548 NarrowTy = CurTy; 5549 LargeTy = ExtTy; 5550 } else { 5551 NarrowTy = ExtTy; 5552 LargeTy = CurTy; 5553 } 5554 5555 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5556 return false; 5557 } 5558 // All uses are the same or can be derived from one another for free. 5559 return true; 5560 } 5561 5562 /// Try to speculatively promote extensions in \p Exts and continue 5563 /// promoting through newly promoted operands recursively as far as doing so is 5564 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5565 /// When some promotion happened, \p TPT contains the proper state to revert 5566 /// them. 5567 /// 5568 /// \return true if some promotion happened, false otherwise. 5569 bool CodeGenPrepare::tryToPromoteExts( 5570 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5571 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5572 unsigned CreatedInstsCost) { 5573 bool Promoted = false; 5574 5575 // Iterate over all the extensions to try to promote them. 5576 for (auto *I : Exts) { 5577 // Early check if we directly have ext(load). 5578 if (isa<LoadInst>(I->getOperand(0))) { 5579 ProfitablyMovedExts.push_back(I); 5580 continue; 5581 } 5582 5583 // Check whether or not we want to do any promotion. The reason we have 5584 // this check inside the for loop is to catch the case where an extension 5585 // is directly fed by a load because in such case the extension can be moved 5586 // up without any promotion on its operands. 5587 if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5588 return false; 5589 5590 // Get the action to perform the promotion. 5591 TypePromotionHelper::Action TPH = 5592 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5593 // Check if we can promote. 5594 if (!TPH) { 5595 // Save the current extension as we cannot move up through its operand. 5596 ProfitablyMovedExts.push_back(I); 5597 continue; 5598 } 5599 5600 // Save the current state. 5601 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5602 TPT.getRestorationPoint(); 5603 SmallVector<Instruction *, 4> NewExts; 5604 unsigned NewCreatedInstsCost = 0; 5605 unsigned ExtCost = !TLI->isExtFree(I); 5606 // Promote. 5607 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5608 &NewExts, nullptr, *TLI); 5609 assert(PromotedVal && 5610 "TypePromotionHelper should have filtered out those cases"); 5611 5612 // We would be able to merge only one extension in a load. 5613 // Therefore, if we have more than 1 new extension we heuristically 5614 // cut this search path, because it means we degrade the code quality. 5615 // With exactly 2, the transformation is neutral, because we will merge 5616 // one extension but leave one. However, we optimistically keep going, 5617 // because the new extension may be removed too. 5618 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5619 // FIXME: It would be possible to propagate a negative value instead of 5620 // conservatively ceiling it to 0. 5621 TotalCreatedInstsCost = 5622 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5623 if (!StressExtLdPromotion && 5624 (TotalCreatedInstsCost > 1 || 5625 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5626 // This promotion is not profitable, rollback to the previous state, and 5627 // save the current extension in ProfitablyMovedExts as the latest 5628 // speculative promotion turned out to be unprofitable. 5629 TPT.rollback(LastKnownGood); 5630 ProfitablyMovedExts.push_back(I); 5631 continue; 5632 } 5633 // Continue promoting NewExts as far as doing so is profitable. 5634 SmallVector<Instruction *, 2> NewlyMovedExts; 5635 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5636 bool NewPromoted = false; 5637 for (auto *ExtInst : NewlyMovedExts) { 5638 Instruction *MovedExt = cast<Instruction>(ExtInst); 5639 Value *ExtOperand = MovedExt->getOperand(0); 5640 // If we have reached to a load, we need this extra profitability check 5641 // as it could potentially be merged into an ext(load). 5642 if (isa<LoadInst>(ExtOperand) && 5643 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5644 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5645 continue; 5646 5647 ProfitablyMovedExts.push_back(MovedExt); 5648 NewPromoted = true; 5649 } 5650 5651 // If none of speculative promotions for NewExts is profitable, rollback 5652 // and save the current extension (I) as the last profitable extension. 5653 if (!NewPromoted) { 5654 TPT.rollback(LastKnownGood); 5655 ProfitablyMovedExts.push_back(I); 5656 continue; 5657 } 5658 // The promotion is profitable. 5659 Promoted = true; 5660 } 5661 return Promoted; 5662 } 5663 5664 /// Merging redundant sexts when one is dominating the other. 5665 bool CodeGenPrepare::mergeSExts(Function &F) { 5666 bool Changed = false; 5667 for (auto &Entry : ValToSExtendedUses) { 5668 SExts &Insts = Entry.second; 5669 SExts CurPts; 5670 for (Instruction *Inst : Insts) { 5671 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5672 Inst->getOperand(0) != Entry.first) 5673 continue; 5674 bool inserted = false; 5675 for (auto &Pt : CurPts) { 5676 if (getDT(F).dominates(Inst, Pt)) { 5677 Pt->replaceAllUsesWith(Inst); 5678 RemovedInsts.insert(Pt); 5679 Pt->removeFromParent(); 5680 Pt = Inst; 5681 inserted = true; 5682 Changed = true; 5683 break; 5684 } 5685 if (!getDT(F).dominates(Pt, Inst)) 5686 // Give up if we need to merge in a common dominator as the 5687 // experiments show it is not profitable. 5688 continue; 5689 Inst->replaceAllUsesWith(Pt); 5690 RemovedInsts.insert(Inst); 5691 Inst->removeFromParent(); 5692 inserted = true; 5693 Changed = true; 5694 break; 5695 } 5696 if (!inserted) 5697 CurPts.push_back(Inst); 5698 } 5699 } 5700 return Changed; 5701 } 5702 5703 // Splitting large data structures so that the GEPs accessing them can have 5704 // smaller offsets so that they can be sunk to the same blocks as their users. 5705 // For example, a large struct starting from %base is split into two parts 5706 // where the second part starts from %new_base. 5707 // 5708 // Before: 5709 // BB0: 5710 // %base = 5711 // 5712 // BB1: 5713 // %gep0 = gep %base, off0 5714 // %gep1 = gep %base, off1 5715 // %gep2 = gep %base, off2 5716 // 5717 // BB2: 5718 // %load1 = load %gep0 5719 // %load2 = load %gep1 5720 // %load3 = load %gep2 5721 // 5722 // After: 5723 // BB0: 5724 // %base = 5725 // %new_base = gep %base, off0 5726 // 5727 // BB1: 5728 // %new_gep0 = %new_base 5729 // %new_gep1 = gep %new_base, off1 - off0 5730 // %new_gep2 = gep %new_base, off2 - off0 5731 // 5732 // BB2: 5733 // %load1 = load i32, i32* %new_gep0 5734 // %load2 = load i32, i32* %new_gep1 5735 // %load3 = load i32, i32* %new_gep2 5736 // 5737 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5738 // their offsets are smaller enough to fit into the addressing mode. 5739 bool CodeGenPrepare::splitLargeGEPOffsets() { 5740 bool Changed = false; 5741 for (auto &Entry : LargeOffsetGEPMap) { 5742 Value *OldBase = Entry.first; 5743 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5744 &LargeOffsetGEPs = Entry.second; 5745 auto compareGEPOffset = 5746 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5747 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5748 if (LHS.first == RHS.first) 5749 return false; 5750 if (LHS.second != RHS.second) 5751 return LHS.second < RHS.second; 5752 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5753 }; 5754 // Sorting all the GEPs of the same data structures based on the offsets. 5755 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5756 LargeOffsetGEPs.erase( 5757 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5758 LargeOffsetGEPs.end()); 5759 // Skip if all the GEPs have the same offsets. 5760 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5761 continue; 5762 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5763 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5764 Value *NewBaseGEP = nullptr; 5765 5766 auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); 5767 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5768 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5769 int64_t Offset = LargeOffsetGEP->second; 5770 if (Offset != BaseOffset) { 5771 TargetLowering::AddrMode AddrMode; 5772 AddrMode.BaseOffs = Offset - BaseOffset; 5773 // The result type of the GEP might not be the type of the memory 5774 // access. 5775 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5776 GEP->getResultElementType(), 5777 GEP->getAddressSpace())) { 5778 // We need to create a new base if the offset to the current base is 5779 // too large to fit into the addressing mode. So, a very large struct 5780 // may be split into several parts. 5781 BaseGEP = GEP; 5782 BaseOffset = Offset; 5783 NewBaseGEP = nullptr; 5784 } 5785 } 5786 5787 // Generate a new GEP to replace the current one. 5788 LLVMContext &Ctx = GEP->getContext(); 5789 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5790 Type *I8PtrTy = 5791 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5792 Type *I8Ty = Type::getInt8Ty(Ctx); 5793 5794 if (!NewBaseGEP) { 5795 // Create a new base if we don't have one yet. Find the insertion 5796 // pointer for the new base first. 5797 BasicBlock::iterator NewBaseInsertPt; 5798 BasicBlock *NewBaseInsertBB; 5799 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5800 // If the base of the struct is an instruction, the new base will be 5801 // inserted close to it. 5802 NewBaseInsertBB = BaseI->getParent(); 5803 if (isa<PHINode>(BaseI)) 5804 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5805 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5806 NewBaseInsertBB = 5807 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5808 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5809 } else 5810 NewBaseInsertPt = std::next(BaseI->getIterator()); 5811 } else { 5812 // If the current base is an argument or global value, the new base 5813 // will be inserted to the entry block. 5814 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5815 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5816 } 5817 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5818 // Create a new base. 5819 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5820 NewBaseGEP = OldBase; 5821 if (NewBaseGEP->getType() != I8PtrTy) 5822 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5823 NewBaseGEP = 5824 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5825 NewGEPBases.insert(NewBaseGEP); 5826 } 5827 5828 IRBuilder<> Builder(GEP); 5829 Value *NewGEP = NewBaseGEP; 5830 if (Offset == BaseOffset) { 5831 if (GEP->getType() != I8PtrTy) 5832 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5833 } else { 5834 // Calculate the new offset for the new GEP. 5835 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5836 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5837 5838 if (GEP->getType() != I8PtrTy) 5839 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5840 } 5841 GEP->replaceAllUsesWith(NewGEP); 5842 LargeOffsetGEPID.erase(GEP); 5843 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5844 GEP->eraseFromParent(); 5845 Changed = true; 5846 } 5847 } 5848 return Changed; 5849 } 5850 5851 bool CodeGenPrepare::optimizePhiType( 5852 PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, 5853 SmallPtrSetImpl<Instruction *> &DeletedInstrs) { 5854 // We are looking for a collection on interconnected phi nodes that together 5855 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts 5856 // are of the same type. Convert the whole set of nodes to the type of the 5857 // bitcast. 5858 Type *PhiTy = I->getType(); 5859 Type *ConvertTy = nullptr; 5860 if (Visited.count(I) || 5861 (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) 5862 return false; 5863 5864 SmallVector<Instruction *, 4> Worklist; 5865 Worklist.push_back(cast<Instruction>(I)); 5866 SmallPtrSet<PHINode *, 4> PhiNodes; 5867 PhiNodes.insert(I); 5868 Visited.insert(I); 5869 SmallPtrSet<Instruction *, 4> Defs; 5870 SmallPtrSet<Instruction *, 4> Uses; 5871 // This works by adding extra bitcasts between load/stores and removing 5872 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi)) 5873 // we can get in the situation where we remove a bitcast in one iteration 5874 // just to add it again in the next. We need to ensure that at least one 5875 // bitcast we remove are anchored to something that will not change back. 5876 bool AnyAnchored = false; 5877 5878 while (!Worklist.empty()) { 5879 Instruction *II = Worklist.pop_back_val(); 5880 5881 if (auto *Phi = dyn_cast<PHINode>(II)) { 5882 // Handle Defs, which might also be PHI's 5883 for (Value *V : Phi->incoming_values()) { 5884 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 5885 if (!PhiNodes.count(OpPhi)) { 5886 if (Visited.count(OpPhi)) 5887 return false; 5888 PhiNodes.insert(OpPhi); 5889 Visited.insert(OpPhi); 5890 Worklist.push_back(OpPhi); 5891 } 5892 } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) { 5893 if (!OpLoad->isSimple()) 5894 return false; 5895 if (!Defs.count(OpLoad)) { 5896 Defs.insert(OpLoad); 5897 Worklist.push_back(OpLoad); 5898 } 5899 } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) { 5900 if (!Defs.count(OpEx)) { 5901 Defs.insert(OpEx); 5902 Worklist.push_back(OpEx); 5903 } 5904 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 5905 if (!ConvertTy) 5906 ConvertTy = OpBC->getOperand(0)->getType(); 5907 if (OpBC->getOperand(0)->getType() != ConvertTy) 5908 return false; 5909 if (!Defs.count(OpBC)) { 5910 Defs.insert(OpBC); 5911 Worklist.push_back(OpBC); 5912 AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) && 5913 !isa<ExtractElementInst>(OpBC->getOperand(0)); 5914 } 5915 } else if (!isa<UndefValue>(V)) { 5916 return false; 5917 } 5918 } 5919 } 5920 5921 // Handle uses which might also be phi's 5922 for (User *V : II->users()) { 5923 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 5924 if (!PhiNodes.count(OpPhi)) { 5925 if (Visited.count(OpPhi)) 5926 return false; 5927 PhiNodes.insert(OpPhi); 5928 Visited.insert(OpPhi); 5929 Worklist.push_back(OpPhi); 5930 } 5931 } else if (auto *OpStore = dyn_cast<StoreInst>(V)) { 5932 if (!OpStore->isSimple() || OpStore->getOperand(0) != II) 5933 return false; 5934 Uses.insert(OpStore); 5935 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 5936 if (!ConvertTy) 5937 ConvertTy = OpBC->getType(); 5938 if (OpBC->getType() != ConvertTy) 5939 return false; 5940 Uses.insert(OpBC); 5941 AnyAnchored |= 5942 any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); }); 5943 } else { 5944 return false; 5945 } 5946 } 5947 } 5948 5949 if (!ConvertTy || !AnyAnchored || !TLI->shouldConvertPhiType(PhiTy, ConvertTy)) 5950 return false; 5951 5952 LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to " 5953 << *ConvertTy << "\n"); 5954 5955 // Create all the new phi nodes of the new type, and bitcast any loads to the 5956 // correct type. 5957 ValueToValueMap ValMap; 5958 ValMap[UndefValue::get(PhiTy)] = UndefValue::get(ConvertTy); 5959 for (Instruction *D : Defs) { 5960 if (isa<BitCastInst>(D)) { 5961 ValMap[D] = D->getOperand(0); 5962 DeletedInstrs.insert(D); 5963 } else { 5964 ValMap[D] = 5965 new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode()); 5966 } 5967 } 5968 for (PHINode *Phi : PhiNodes) 5969 ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(), 5970 Phi->getName() + ".tc", Phi); 5971 // Pipe together all the PhiNodes. 5972 for (PHINode *Phi : PhiNodes) { 5973 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]); 5974 for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) 5975 NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)], 5976 Phi->getIncomingBlock(i)); 5977 Visited.insert(NewPhi); 5978 } 5979 // And finally pipe up the stores and bitcasts 5980 for (Instruction *U : Uses) { 5981 if (isa<BitCastInst>(U)) { 5982 DeletedInstrs.insert(U); 5983 U->replaceAllUsesWith(ValMap[U->getOperand(0)]); 5984 } else { 5985 U->setOperand(0, 5986 new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U)); 5987 } 5988 } 5989 5990 // Save the removed phis to be deleted later. 5991 for (PHINode *Phi : PhiNodes) 5992 DeletedInstrs.insert(Phi); 5993 return true; 5994 } 5995 5996 bool CodeGenPrepare::optimizePhiTypes(Function &F) { 5997 if (!OptimizePhiTypes) 5998 return false; 5999 6000 bool Changed = false; 6001 SmallPtrSet<PHINode *, 4> Visited; 6002 SmallPtrSet<Instruction *, 4> DeletedInstrs; 6003 6004 // Attempt to optimize all the phis in the functions to the correct type. 6005 for (auto &BB : F) 6006 for (auto &Phi : BB.phis()) 6007 Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs); 6008 6009 // Remove any old phi's that have been converted. 6010 for (auto *I : DeletedInstrs) { 6011 I->replaceAllUsesWith(UndefValue::get(I->getType())); 6012 I->eraseFromParent(); 6013 } 6014 6015 return Changed; 6016 } 6017 6018 /// Return true, if an ext(load) can be formed from an extension in 6019 /// \p MovedExts. 6020 bool CodeGenPrepare::canFormExtLd( 6021 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 6022 Instruction *&Inst, bool HasPromoted) { 6023 for (auto *MovedExtInst : MovedExts) { 6024 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 6025 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 6026 Inst = MovedExtInst; 6027 break; 6028 } 6029 } 6030 if (!LI) 6031 return false; 6032 6033 // If they're already in the same block, there's nothing to do. 6034 // Make the cheap checks first if we did not promote. 6035 // If we promoted, we need to check if it is indeed profitable. 6036 if (!HasPromoted && LI->getParent() == Inst->getParent()) 6037 return false; 6038 6039 return TLI->isExtLoad(LI, Inst, *DL); 6040 } 6041 6042 /// Move a zext or sext fed by a load into the same basic block as the load, 6043 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 6044 /// extend into the load. 6045 /// 6046 /// E.g., 6047 /// \code 6048 /// %ld = load i32* %addr 6049 /// %add = add nuw i32 %ld, 4 6050 /// %zext = zext i32 %add to i64 6051 // \endcode 6052 /// => 6053 /// \code 6054 /// %ld = load i32* %addr 6055 /// %zext = zext i32 %ld to i64 6056 /// %add = add nuw i64 %zext, 4 6057 /// \encode 6058 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 6059 /// allow us to match zext(load i32*) to i64. 6060 /// 6061 /// Also, try to promote the computations used to obtain a sign extended 6062 /// value used into memory accesses. 6063 /// E.g., 6064 /// \code 6065 /// a = add nsw i32 b, 3 6066 /// d = sext i32 a to i64 6067 /// e = getelementptr ..., i64 d 6068 /// \endcode 6069 /// => 6070 /// \code 6071 /// f = sext i32 b to i64 6072 /// a = add nsw i64 f, 3 6073 /// e = getelementptr ..., i64 a 6074 /// \endcode 6075 /// 6076 /// \p Inst[in/out] the extension may be modified during the process if some 6077 /// promotions apply. 6078 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 6079 bool AllowPromotionWithoutCommonHeader = false; 6080 /// See if it is an interesting sext operations for the address type 6081 /// promotion before trying to promote it, e.g., the ones with the right 6082 /// type and used in memory accesses. 6083 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 6084 *Inst, AllowPromotionWithoutCommonHeader); 6085 TypePromotionTransaction TPT(RemovedInsts); 6086 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 6087 TPT.getRestorationPoint(); 6088 SmallVector<Instruction *, 1> Exts; 6089 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 6090 Exts.push_back(Inst); 6091 6092 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 6093 6094 // Look for a load being extended. 6095 LoadInst *LI = nullptr; 6096 Instruction *ExtFedByLoad; 6097 6098 // Try to promote a chain of computation if it allows to form an extended 6099 // load. 6100 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 6101 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 6102 TPT.commit(); 6103 // Move the extend into the same block as the load. 6104 ExtFedByLoad->moveAfter(LI); 6105 ++NumExtsMoved; 6106 Inst = ExtFedByLoad; 6107 return true; 6108 } 6109 6110 // Continue promoting SExts if known as considerable depending on targets. 6111 if (ATPConsiderable && 6112 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 6113 HasPromoted, TPT, SpeculativelyMovedExts)) 6114 return true; 6115 6116 TPT.rollback(LastKnownGood); 6117 return false; 6118 } 6119 6120 // Perform address type promotion if doing so is profitable. 6121 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 6122 // instructions that sign extended the same initial value. However, if 6123 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 6124 // extension is just profitable. 6125 bool CodeGenPrepare::performAddressTypePromotion( 6126 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 6127 bool HasPromoted, TypePromotionTransaction &TPT, 6128 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 6129 bool Promoted = false; 6130 SmallPtrSet<Instruction *, 1> UnhandledExts; 6131 bool AllSeenFirst = true; 6132 for (auto *I : SpeculativelyMovedExts) { 6133 Value *HeadOfChain = I->getOperand(0); 6134 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 6135 SeenChainsForSExt.find(HeadOfChain); 6136 // If there is an unhandled SExt which has the same header, try to promote 6137 // it as well. 6138 if (AlreadySeen != SeenChainsForSExt.end()) { 6139 if (AlreadySeen->second != nullptr) 6140 UnhandledExts.insert(AlreadySeen->second); 6141 AllSeenFirst = false; 6142 } 6143 } 6144 6145 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 6146 SpeculativelyMovedExts.size() == 1)) { 6147 TPT.commit(); 6148 if (HasPromoted) 6149 Promoted = true; 6150 for (auto *I : SpeculativelyMovedExts) { 6151 Value *HeadOfChain = I->getOperand(0); 6152 SeenChainsForSExt[HeadOfChain] = nullptr; 6153 ValToSExtendedUses[HeadOfChain].push_back(I); 6154 } 6155 // Update Inst as promotion happen. 6156 Inst = SpeculativelyMovedExts.pop_back_val(); 6157 } else { 6158 // This is the first chain visited from the header, keep the current chain 6159 // as unhandled. Defer to promote this until we encounter another SExt 6160 // chain derived from the same header. 6161 for (auto *I : SpeculativelyMovedExts) { 6162 Value *HeadOfChain = I->getOperand(0); 6163 SeenChainsForSExt[HeadOfChain] = Inst; 6164 } 6165 return false; 6166 } 6167 6168 if (!AllSeenFirst && !UnhandledExts.empty()) 6169 for (auto *VisitedSExt : UnhandledExts) { 6170 if (RemovedInsts.count(VisitedSExt)) 6171 continue; 6172 TypePromotionTransaction TPT(RemovedInsts); 6173 SmallVector<Instruction *, 1> Exts; 6174 SmallVector<Instruction *, 2> Chains; 6175 Exts.push_back(VisitedSExt); 6176 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 6177 TPT.commit(); 6178 if (HasPromoted) 6179 Promoted = true; 6180 for (auto *I : Chains) { 6181 Value *HeadOfChain = I->getOperand(0); 6182 // Mark this as handled. 6183 SeenChainsForSExt[HeadOfChain] = nullptr; 6184 ValToSExtendedUses[HeadOfChain].push_back(I); 6185 } 6186 } 6187 return Promoted; 6188 } 6189 6190 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 6191 BasicBlock *DefBB = I->getParent(); 6192 6193 // If the result of a {s|z}ext and its source are both live out, rewrite all 6194 // other uses of the source with result of extension. 6195 Value *Src = I->getOperand(0); 6196 if (Src->hasOneUse()) 6197 return false; 6198 6199 // Only do this xform if truncating is free. 6200 if (!TLI->isTruncateFree(I->getType(), Src->getType())) 6201 return false; 6202 6203 // Only safe to perform the optimization if the source is also defined in 6204 // this block. 6205 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 6206 return false; 6207 6208 bool DefIsLiveOut = false; 6209 for (User *U : I->users()) { 6210 Instruction *UI = cast<Instruction>(U); 6211 6212 // Figure out which BB this ext is used in. 6213 BasicBlock *UserBB = UI->getParent(); 6214 if (UserBB == DefBB) continue; 6215 DefIsLiveOut = true; 6216 break; 6217 } 6218 if (!DefIsLiveOut) 6219 return false; 6220 6221 // Make sure none of the uses are PHI nodes. 6222 for (User *U : Src->users()) { 6223 Instruction *UI = cast<Instruction>(U); 6224 BasicBlock *UserBB = UI->getParent(); 6225 if (UserBB == DefBB) continue; 6226 // Be conservative. We don't want this xform to end up introducing 6227 // reloads just before load / store instructions. 6228 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 6229 return false; 6230 } 6231 6232 // InsertedTruncs - Only insert one trunc in each block once. 6233 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 6234 6235 bool MadeChange = false; 6236 for (Use &U : Src->uses()) { 6237 Instruction *User = cast<Instruction>(U.getUser()); 6238 6239 // Figure out which BB this ext is used in. 6240 BasicBlock *UserBB = User->getParent(); 6241 if (UserBB == DefBB) continue; 6242 6243 // Both src and def are live in this block. Rewrite the use. 6244 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 6245 6246 if (!InsertedTrunc) { 6247 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6248 assert(InsertPt != UserBB->end()); 6249 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 6250 InsertedInsts.insert(InsertedTrunc); 6251 } 6252 6253 // Replace a use of the {s|z}ext source with a use of the result. 6254 U = InsertedTrunc; 6255 ++NumExtUses; 6256 MadeChange = true; 6257 } 6258 6259 return MadeChange; 6260 } 6261 6262 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 6263 // just after the load if the target can fold this into one extload instruction, 6264 // with the hope of eliminating some of the other later "and" instructions using 6265 // the loaded value. "and"s that are made trivially redundant by the insertion 6266 // of the new "and" are removed by this function, while others (e.g. those whose 6267 // path from the load goes through a phi) are left for isel to potentially 6268 // remove. 6269 // 6270 // For example: 6271 // 6272 // b0: 6273 // x = load i32 6274 // ... 6275 // b1: 6276 // y = and x, 0xff 6277 // z = use y 6278 // 6279 // becomes: 6280 // 6281 // b0: 6282 // x = load i32 6283 // x' = and x, 0xff 6284 // ... 6285 // b1: 6286 // z = use x' 6287 // 6288 // whereas: 6289 // 6290 // b0: 6291 // x1 = load i32 6292 // ... 6293 // b1: 6294 // x2 = load i32 6295 // ... 6296 // b2: 6297 // x = phi x1, x2 6298 // y = and x, 0xff 6299 // 6300 // becomes (after a call to optimizeLoadExt for each load): 6301 // 6302 // b0: 6303 // x1 = load i32 6304 // x1' = and x1, 0xff 6305 // ... 6306 // b1: 6307 // x2 = load i32 6308 // x2' = and x2, 0xff 6309 // ... 6310 // b2: 6311 // x = phi x1', x2' 6312 // y = and x, 0xff 6313 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 6314 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 6315 return false; 6316 6317 // Skip loads we've already transformed. 6318 if (Load->hasOneUse() && 6319 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 6320 return false; 6321 6322 // Look at all uses of Load, looking through phis, to determine how many bits 6323 // of the loaded value are needed. 6324 SmallVector<Instruction *, 8> WorkList; 6325 SmallPtrSet<Instruction *, 16> Visited; 6326 SmallVector<Instruction *, 8> AndsToMaybeRemove; 6327 for (auto *U : Load->users()) 6328 WorkList.push_back(cast<Instruction>(U)); 6329 6330 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 6331 unsigned BitWidth = LoadResultVT.getSizeInBits(); 6332 APInt DemandBits(BitWidth, 0); 6333 APInt WidestAndBits(BitWidth, 0); 6334 6335 while (!WorkList.empty()) { 6336 Instruction *I = WorkList.back(); 6337 WorkList.pop_back(); 6338 6339 // Break use-def graph loops. 6340 if (!Visited.insert(I).second) 6341 continue; 6342 6343 // For a PHI node, push all of its users. 6344 if (auto *Phi = dyn_cast<PHINode>(I)) { 6345 for (auto *U : Phi->users()) 6346 WorkList.push_back(cast<Instruction>(U)); 6347 continue; 6348 } 6349 6350 switch (I->getOpcode()) { 6351 case Instruction::And: { 6352 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 6353 if (!AndC) 6354 return false; 6355 APInt AndBits = AndC->getValue(); 6356 DemandBits |= AndBits; 6357 // Keep track of the widest and mask we see. 6358 if (AndBits.ugt(WidestAndBits)) 6359 WidestAndBits = AndBits; 6360 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 6361 AndsToMaybeRemove.push_back(I); 6362 break; 6363 } 6364 6365 case Instruction::Shl: { 6366 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 6367 if (!ShlC) 6368 return false; 6369 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 6370 DemandBits.setLowBits(BitWidth - ShiftAmt); 6371 break; 6372 } 6373 6374 case Instruction::Trunc: { 6375 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 6376 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 6377 DemandBits.setLowBits(TruncBitWidth); 6378 break; 6379 } 6380 6381 default: 6382 return false; 6383 } 6384 } 6385 6386 uint32_t ActiveBits = DemandBits.getActiveBits(); 6387 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 6388 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 6389 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 6390 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 6391 // followed by an AND. 6392 // TODO: Look into removing this restriction by fixing backends to either 6393 // return false for isLoadExtLegal for i1 or have them select this pattern to 6394 // a single instruction. 6395 // 6396 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 6397 // mask, since these are the only ands that will be removed by isel. 6398 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 6399 WidestAndBits != DemandBits) 6400 return false; 6401 6402 LLVMContext &Ctx = Load->getType()->getContext(); 6403 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 6404 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 6405 6406 // Reject cases that won't be matched as extloads. 6407 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 6408 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 6409 return false; 6410 6411 IRBuilder<> Builder(Load->getNextNode()); 6412 auto *NewAnd = cast<Instruction>( 6413 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 6414 // Mark this instruction as "inserted by CGP", so that other 6415 // optimizations don't touch it. 6416 InsertedInsts.insert(NewAnd); 6417 6418 // Replace all uses of load with new and (except for the use of load in the 6419 // new and itself). 6420 Load->replaceAllUsesWith(NewAnd); 6421 NewAnd->setOperand(0, Load); 6422 6423 // Remove any and instructions that are now redundant. 6424 for (auto *And : AndsToMaybeRemove) 6425 // Check that the and mask is the same as the one we decided to put on the 6426 // new and. 6427 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 6428 And->replaceAllUsesWith(NewAnd); 6429 if (&*CurInstIterator == And) 6430 CurInstIterator = std::next(And->getIterator()); 6431 And->eraseFromParent(); 6432 ++NumAndUses; 6433 } 6434 6435 ++NumAndsAdded; 6436 return true; 6437 } 6438 6439 /// Check if V (an operand of a select instruction) is an expensive instruction 6440 /// that is only used once. 6441 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 6442 auto *I = dyn_cast<Instruction>(V); 6443 // If it's safe to speculatively execute, then it should not have side 6444 // effects; therefore, it's safe to sink and possibly *not* execute. 6445 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 6446 TTI->getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency) >= 6447 TargetTransformInfo::TCC_Expensive; 6448 } 6449 6450 /// Returns true if a SelectInst should be turned into an explicit branch. 6451 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 6452 const TargetLowering *TLI, 6453 SelectInst *SI) { 6454 // If even a predictable select is cheap, then a branch can't be cheaper. 6455 if (!TLI->isPredictableSelectExpensive()) 6456 return false; 6457 6458 // FIXME: This should use the same heuristics as IfConversion to determine 6459 // whether a select is better represented as a branch. 6460 6461 // If metadata tells us that the select condition is obviously predictable, 6462 // then we want to replace the select with a branch. 6463 uint64_t TrueWeight, FalseWeight; 6464 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 6465 uint64_t Max = std::max(TrueWeight, FalseWeight); 6466 uint64_t Sum = TrueWeight + FalseWeight; 6467 if (Sum != 0) { 6468 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 6469 if (Probability > TLI->getPredictableBranchThreshold()) 6470 return true; 6471 } 6472 } 6473 6474 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 6475 6476 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 6477 // comparison condition. If the compare has more than one use, there's 6478 // probably another cmov or setcc around, so it's not worth emitting a branch. 6479 if (!Cmp || !Cmp->hasOneUse()) 6480 return false; 6481 6482 // If either operand of the select is expensive and only needed on one side 6483 // of the select, we should form a branch. 6484 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 6485 sinkSelectOperand(TTI, SI->getFalseValue())) 6486 return true; 6487 6488 return false; 6489 } 6490 6491 /// If \p isTrue is true, return the true value of \p SI, otherwise return 6492 /// false value of \p SI. If the true/false value of \p SI is defined by any 6493 /// select instructions in \p Selects, look through the defining select 6494 /// instruction until the true/false value is not defined in \p Selects. 6495 static Value *getTrueOrFalseValue( 6496 SelectInst *SI, bool isTrue, 6497 const SmallPtrSet<const Instruction *, 2> &Selects) { 6498 Value *V = nullptr; 6499 6500 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 6501 DefSI = dyn_cast<SelectInst>(V)) { 6502 assert(DefSI->getCondition() == SI->getCondition() && 6503 "The condition of DefSI does not match with SI"); 6504 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 6505 } 6506 6507 assert(V && "Failed to get select true/false value"); 6508 return V; 6509 } 6510 6511 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { 6512 assert(Shift->isShift() && "Expected a shift"); 6513 6514 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than 6515 // general vector shifts, and (3) the shift amount is a select-of-splatted 6516 // values, hoist the shifts before the select: 6517 // shift Op0, (select Cond, TVal, FVal) --> 6518 // select Cond, (shift Op0, TVal), (shift Op0, FVal) 6519 // 6520 // This is inverting a generic IR transform when we know that the cost of a 6521 // general vector shift is more than the cost of 2 shift-by-scalars. 6522 // We can't do this effectively in SDAG because we may not be able to 6523 // determine if the select operands are splats from within a basic block. 6524 Type *Ty = Shift->getType(); 6525 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6526 return false; 6527 Value *Cond, *TVal, *FVal; 6528 if (!match(Shift->getOperand(1), 6529 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6530 return false; 6531 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6532 return false; 6533 6534 IRBuilder<> Builder(Shift); 6535 BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); 6536 Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal); 6537 Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal); 6538 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6539 Shift->replaceAllUsesWith(NewSel); 6540 Shift->eraseFromParent(); 6541 return true; 6542 } 6543 6544 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { 6545 Intrinsic::ID Opcode = Fsh->getIntrinsicID(); 6546 assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && 6547 "Expected a funnel shift"); 6548 6549 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper 6550 // than general vector shifts, and (3) the shift amount is select-of-splatted 6551 // values, hoist the funnel shifts before the select: 6552 // fsh Op0, Op1, (select Cond, TVal, FVal) --> 6553 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) 6554 // 6555 // This is inverting a generic IR transform when we know that the cost of a 6556 // general vector shift is more than the cost of 2 shift-by-scalars. 6557 // We can't do this effectively in SDAG because we may not be able to 6558 // determine if the select operands are splats from within a basic block. 6559 Type *Ty = Fsh->getType(); 6560 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6561 return false; 6562 Value *Cond, *TVal, *FVal; 6563 if (!match(Fsh->getOperand(2), 6564 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6565 return false; 6566 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6567 return false; 6568 6569 IRBuilder<> Builder(Fsh); 6570 Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1); 6571 Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, TVal }); 6572 Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, FVal }); 6573 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6574 Fsh->replaceAllUsesWith(NewSel); 6575 Fsh->eraseFromParent(); 6576 return true; 6577 } 6578 6579 /// If we have a SelectInst that will likely profit from branch prediction, 6580 /// turn it into a branch. 6581 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 6582 if (DisableSelectToBranch) 6583 return false; 6584 6585 // Find all consecutive select instructions that share the same condition. 6586 SmallVector<SelectInst *, 2> ASI; 6587 ASI.push_back(SI); 6588 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 6589 It != SI->getParent()->end(); ++It) { 6590 SelectInst *I = dyn_cast<SelectInst>(&*It); 6591 if (I && SI->getCondition() == I->getCondition()) { 6592 ASI.push_back(I); 6593 } else { 6594 break; 6595 } 6596 } 6597 6598 SelectInst *LastSI = ASI.back(); 6599 // Increment the current iterator to skip all the rest of select instructions 6600 // because they will be either "not lowered" or "all lowered" to branch. 6601 CurInstIterator = std::next(LastSI->getIterator()); 6602 6603 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 6604 6605 // Can we convert the 'select' to CF ? 6606 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 6607 return false; 6608 6609 TargetLowering::SelectSupportKind SelectKind; 6610 if (VectorCond) 6611 SelectKind = TargetLowering::VectorMaskSelect; 6612 else if (SI->getType()->isVectorTy()) 6613 SelectKind = TargetLowering::ScalarCondVectorVal; 6614 else 6615 SelectKind = TargetLowering::ScalarValSelect; 6616 6617 if (TLI->isSelectSupported(SelectKind) && 6618 (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || 6619 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))) 6620 return false; 6621 6622 // The DominatorTree needs to be rebuilt by any consumers after this 6623 // transformation. We simply reset here rather than setting the ModifiedDT 6624 // flag to avoid restarting the function walk in runOnFunction for each 6625 // select optimized. 6626 DT.reset(); 6627 6628 // Transform a sequence like this: 6629 // start: 6630 // %cmp = cmp uge i32 %a, %b 6631 // %sel = select i1 %cmp, i32 %c, i32 %d 6632 // 6633 // Into: 6634 // start: 6635 // %cmp = cmp uge i32 %a, %b 6636 // %cmp.frozen = freeze %cmp 6637 // br i1 %cmp.frozen, label %select.true, label %select.false 6638 // select.true: 6639 // br label %select.end 6640 // select.false: 6641 // br label %select.end 6642 // select.end: 6643 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 6644 // 6645 // %cmp should be frozen, otherwise it may introduce undefined behavior. 6646 // In addition, we may sink instructions that produce %c or %d from 6647 // the entry block into the destination(s) of the new branch. 6648 // If the true or false blocks do not contain a sunken instruction, that 6649 // block and its branch may be optimized away. In that case, one side of the 6650 // first branch will point directly to select.end, and the corresponding PHI 6651 // predecessor block will be the start block. 6652 6653 // First, we split the block containing the select into 2 blocks. 6654 BasicBlock *StartBlock = SI->getParent(); 6655 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 6656 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 6657 BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency()); 6658 6659 // Delete the unconditional branch that was just created by the split. 6660 StartBlock->getTerminator()->eraseFromParent(); 6661 6662 // These are the new basic blocks for the conditional branch. 6663 // At least one will become an actual new basic block. 6664 BasicBlock *TrueBlock = nullptr; 6665 BasicBlock *FalseBlock = nullptr; 6666 BranchInst *TrueBranch = nullptr; 6667 BranchInst *FalseBranch = nullptr; 6668 6669 // Sink expensive instructions into the conditional blocks to avoid executing 6670 // them speculatively. 6671 for (SelectInst *SI : ASI) { 6672 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 6673 if (TrueBlock == nullptr) { 6674 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 6675 EndBlock->getParent(), EndBlock); 6676 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 6677 TrueBranch->setDebugLoc(SI->getDebugLoc()); 6678 } 6679 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 6680 TrueInst->moveBefore(TrueBranch); 6681 } 6682 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 6683 if (FalseBlock == nullptr) { 6684 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 6685 EndBlock->getParent(), EndBlock); 6686 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6687 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6688 } 6689 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 6690 FalseInst->moveBefore(FalseBranch); 6691 } 6692 } 6693 6694 // If there was nothing to sink, then arbitrarily choose the 'false' side 6695 // for a new input value to the PHI. 6696 if (TrueBlock == FalseBlock) { 6697 assert(TrueBlock == nullptr && 6698 "Unexpected basic block transform while optimizing select"); 6699 6700 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 6701 EndBlock->getParent(), EndBlock); 6702 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6703 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6704 } 6705 6706 // Insert the real conditional branch based on the original condition. 6707 // If we did not create a new block for one of the 'true' or 'false' paths 6708 // of the condition, it means that side of the branch goes to the end block 6709 // directly and the path originates from the start block from the point of 6710 // view of the new PHI. 6711 BasicBlock *TT, *FT; 6712 if (TrueBlock == nullptr) { 6713 TT = EndBlock; 6714 FT = FalseBlock; 6715 TrueBlock = StartBlock; 6716 } else if (FalseBlock == nullptr) { 6717 TT = TrueBlock; 6718 FT = EndBlock; 6719 FalseBlock = StartBlock; 6720 } else { 6721 TT = TrueBlock; 6722 FT = FalseBlock; 6723 } 6724 IRBuilder<> IB(SI); 6725 auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen"); 6726 IB.CreateCondBr(CondFr, TT, FT, SI); 6727 6728 SmallPtrSet<const Instruction *, 2> INS; 6729 INS.insert(ASI.begin(), ASI.end()); 6730 // Use reverse iterator because later select may use the value of the 6731 // earlier select, and we need to propagate value through earlier select 6732 // to get the PHI operand. 6733 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 6734 SelectInst *SI = *It; 6735 // The select itself is replaced with a PHI Node. 6736 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 6737 PN->takeName(SI); 6738 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 6739 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 6740 PN->setDebugLoc(SI->getDebugLoc()); 6741 6742 SI->replaceAllUsesWith(PN); 6743 SI->eraseFromParent(); 6744 INS.erase(SI); 6745 ++NumSelectsExpanded; 6746 } 6747 6748 // Instruct OptimizeBlock to skip to the next block. 6749 CurInstIterator = StartBlock->end(); 6750 return true; 6751 } 6752 6753 /// Some targets only accept certain types for splat inputs. For example a VDUP 6754 /// in MVE takes a GPR (integer) register, and the instruction that incorporate 6755 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. 6756 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6757 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only 6758 if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 6759 m_Undef(), m_ZeroMask()))) 6760 return false; 6761 Type *NewType = TLI->shouldConvertSplatType(SVI); 6762 if (!NewType) 6763 return false; 6764 6765 auto *SVIVecType = cast<FixedVectorType>(SVI->getType()); 6766 assert(!NewType->isVectorTy() && "Expected a scalar type!"); 6767 assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && 6768 "Expected a type of the same size!"); 6769 auto *NewVecType = 6770 FixedVectorType::get(NewType, SVIVecType->getNumElements()); 6771 6772 // Create a bitcast (shuffle (insert (bitcast(..)))) 6773 IRBuilder<> Builder(SVI->getContext()); 6774 Builder.SetInsertPoint(SVI); 6775 Value *BC1 = Builder.CreateBitCast( 6776 cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType); 6777 Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1); 6778 Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType); 6779 6780 SVI->replaceAllUsesWith(BC2); 6781 RecursivelyDeleteTriviallyDeadInstructions( 6782 SVI, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 6783 6784 // Also hoist the bitcast up to its operand if it they are not in the same 6785 // block. 6786 if (auto *BCI = dyn_cast<Instruction>(BC1)) 6787 if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0))) 6788 if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) && 6789 !Op->isTerminator() && !Op->isEHPad()) 6790 BCI->moveAfter(Op); 6791 6792 return true; 6793 } 6794 6795 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6796 // If the operands of I can be folded into a target instruction together with 6797 // I, duplicate and sink them. 6798 SmallVector<Use *, 4> OpsToSink; 6799 if (!TLI->shouldSinkOperands(I, OpsToSink)) 6800 return false; 6801 6802 // OpsToSink can contain multiple uses in a use chain (e.g. 6803 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6804 // uses must come first, so we process the ops in reverse order so as to not 6805 // create invalid IR. 6806 BasicBlock *TargetBB = I->getParent(); 6807 bool Changed = false; 6808 SmallVector<Use *, 4> ToReplace; 6809 for (Use *U : reverse(OpsToSink)) { 6810 auto *UI = cast<Instruction>(U->get()); 6811 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6812 continue; 6813 ToReplace.push_back(U); 6814 } 6815 6816 SetVector<Instruction *> MaybeDead; 6817 DenseMap<Instruction *, Instruction *> NewInstructions; 6818 Instruction *InsertPoint = I; 6819 for (Use *U : ToReplace) { 6820 auto *UI = cast<Instruction>(U->get()); 6821 Instruction *NI = UI->clone(); 6822 NewInstructions[UI] = NI; 6823 MaybeDead.insert(UI); 6824 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6825 NI->insertBefore(InsertPoint); 6826 InsertPoint = NI; 6827 InsertedInsts.insert(NI); 6828 6829 // Update the use for the new instruction, making sure that we update the 6830 // sunk instruction uses, if it is part of a chain that has already been 6831 // sunk. 6832 Instruction *OldI = cast<Instruction>(U->getUser()); 6833 if (NewInstructions.count(OldI)) 6834 NewInstructions[OldI]->setOperand(U->getOperandNo(), NI); 6835 else 6836 U->set(NI); 6837 Changed = true; 6838 } 6839 6840 // Remove instructions that are dead after sinking. 6841 for (auto *I : MaybeDead) { 6842 if (!I->hasNUsesOrMore(1)) { 6843 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n"); 6844 I->eraseFromParent(); 6845 } 6846 } 6847 6848 return Changed; 6849 } 6850 6851 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6852 Value *Cond = SI->getCondition(); 6853 Type *OldType = Cond->getType(); 6854 LLVMContext &Context = Cond->getContext(); 6855 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6856 unsigned RegWidth = RegType.getSizeInBits(); 6857 6858 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6859 return false; 6860 6861 // If the register width is greater than the type width, expand the condition 6862 // of the switch instruction and each case constant to the width of the 6863 // register. By widening the type of the switch condition, subsequent 6864 // comparisons (for case comparisons) will not need to be extended to the 6865 // preferred register width, so we will potentially eliminate N-1 extends, 6866 // where N is the number of cases in the switch. 6867 auto *NewType = Type::getIntNTy(Context, RegWidth); 6868 6869 // Zero-extend the switch condition and case constants unless the switch 6870 // condition is a function argument that is already being sign-extended. 6871 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6872 // everything instead. 6873 Instruction::CastOps ExtType = Instruction::ZExt; 6874 if (auto *Arg = dyn_cast<Argument>(Cond)) 6875 if (Arg->hasSExtAttr()) 6876 ExtType = Instruction::SExt; 6877 6878 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6879 ExtInst->insertBefore(SI); 6880 ExtInst->setDebugLoc(SI->getDebugLoc()); 6881 SI->setCondition(ExtInst); 6882 for (auto Case : SI->cases()) { 6883 APInt NarrowConst = Case.getCaseValue()->getValue(); 6884 APInt WideConst = (ExtType == Instruction::ZExt) ? 6885 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6886 Case.setValue(ConstantInt::get(Context, WideConst)); 6887 } 6888 6889 return true; 6890 } 6891 6892 6893 namespace { 6894 6895 /// Helper class to promote a scalar operation to a vector one. 6896 /// This class is used to move downward extractelement transition. 6897 /// E.g., 6898 /// a = vector_op <2 x i32> 6899 /// b = extractelement <2 x i32> a, i32 0 6900 /// c = scalar_op b 6901 /// store c 6902 /// 6903 /// => 6904 /// a = vector_op <2 x i32> 6905 /// c = vector_op a (equivalent to scalar_op on the related lane) 6906 /// * d = extractelement <2 x i32> c, i32 0 6907 /// * store d 6908 /// Assuming both extractelement and store can be combine, we get rid of the 6909 /// transition. 6910 class VectorPromoteHelper { 6911 /// DataLayout associated with the current module. 6912 const DataLayout &DL; 6913 6914 /// Used to perform some checks on the legality of vector operations. 6915 const TargetLowering &TLI; 6916 6917 /// Used to estimated the cost of the promoted chain. 6918 const TargetTransformInfo &TTI; 6919 6920 /// The transition being moved downwards. 6921 Instruction *Transition; 6922 6923 /// The sequence of instructions to be promoted. 6924 SmallVector<Instruction *, 4> InstsToBePromoted; 6925 6926 /// Cost of combining a store and an extract. 6927 unsigned StoreExtractCombineCost; 6928 6929 /// Instruction that will be combined with the transition. 6930 Instruction *CombineInst = nullptr; 6931 6932 /// The instruction that represents the current end of the transition. 6933 /// Since we are faking the promotion until we reach the end of the chain 6934 /// of computation, we need a way to get the current end of the transition. 6935 Instruction *getEndOfTransition() const { 6936 if (InstsToBePromoted.empty()) 6937 return Transition; 6938 return InstsToBePromoted.back(); 6939 } 6940 6941 /// Return the index of the original value in the transition. 6942 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 6943 /// c, is at index 0. 6944 unsigned getTransitionOriginalValueIdx() const { 6945 assert(isa<ExtractElementInst>(Transition) && 6946 "Other kind of transitions are not supported yet"); 6947 return 0; 6948 } 6949 6950 /// Return the index of the index in the transition. 6951 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 6952 /// is at index 1. 6953 unsigned getTransitionIdx() const { 6954 assert(isa<ExtractElementInst>(Transition) && 6955 "Other kind of transitions are not supported yet"); 6956 return 1; 6957 } 6958 6959 /// Get the type of the transition. 6960 /// This is the type of the original value. 6961 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 6962 /// transition is <2 x i32>. 6963 Type *getTransitionType() const { 6964 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 6965 } 6966 6967 /// Promote \p ToBePromoted by moving \p Def downward through. 6968 /// I.e., we have the following sequence: 6969 /// Def = Transition <ty1> a to <ty2> 6970 /// b = ToBePromoted <ty2> Def, ... 6971 /// => 6972 /// b = ToBePromoted <ty1> a, ... 6973 /// Def = Transition <ty1> ToBePromoted to <ty2> 6974 void promoteImpl(Instruction *ToBePromoted); 6975 6976 /// Check whether or not it is profitable to promote all the 6977 /// instructions enqueued to be promoted. 6978 bool isProfitableToPromote() { 6979 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 6980 unsigned Index = isa<ConstantInt>(ValIdx) 6981 ? cast<ConstantInt>(ValIdx)->getZExtValue() 6982 : -1; 6983 Type *PromotedType = getTransitionType(); 6984 6985 StoreInst *ST = cast<StoreInst>(CombineInst); 6986 unsigned AS = ST->getPointerAddressSpace(); 6987 // Check if this store is supported. 6988 if (!TLI.allowsMisalignedMemoryAccesses( 6989 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 6990 ST->getAlign())) { 6991 // If this is not supported, there is no way we can combine 6992 // the extract with the store. 6993 return false; 6994 } 6995 6996 // The scalar chain of computation has to pay for the transition 6997 // scalar to vector. 6998 // The vector chain has to account for the combining cost. 6999 InstructionCost ScalarCost = 7000 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 7001 InstructionCost VectorCost = StoreExtractCombineCost; 7002 enum TargetTransformInfo::TargetCostKind CostKind = 7003 TargetTransformInfo::TCK_RecipThroughput; 7004 for (const auto &Inst : InstsToBePromoted) { 7005 // Compute the cost. 7006 // By construction, all instructions being promoted are arithmetic ones. 7007 // Moreover, one argument is a constant that can be viewed as a splat 7008 // constant. 7009 Value *Arg0 = Inst->getOperand(0); 7010 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 7011 isa<ConstantFP>(Arg0); 7012 TargetTransformInfo::OperandValueKind Arg0OVK = 7013 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 7014 : TargetTransformInfo::OK_AnyValue; 7015 TargetTransformInfo::OperandValueKind Arg1OVK = 7016 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 7017 : TargetTransformInfo::OK_AnyValue; 7018 ScalarCost += TTI.getArithmeticInstrCost( 7019 Inst->getOpcode(), Inst->getType(), CostKind, Arg0OVK, Arg1OVK); 7020 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 7021 CostKind, 7022 Arg0OVK, Arg1OVK); 7023 } 7024 LLVM_DEBUG( 7025 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 7026 << ScalarCost << "\nVector: " << VectorCost << '\n'); 7027 return ScalarCost > VectorCost; 7028 } 7029 7030 /// Generate a constant vector with \p Val with the same 7031 /// number of elements as the transition. 7032 /// \p UseSplat defines whether or not \p Val should be replicated 7033 /// across the whole vector. 7034 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 7035 /// otherwise we generate a vector with as many undef as possible: 7036 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 7037 /// used at the index of the extract. 7038 Value *getConstantVector(Constant *Val, bool UseSplat) const { 7039 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 7040 if (!UseSplat) { 7041 // If we cannot determine where the constant must be, we have to 7042 // use a splat constant. 7043 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 7044 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 7045 ExtractIdx = CstVal->getSExtValue(); 7046 else 7047 UseSplat = true; 7048 } 7049 7050 ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount(); 7051 if (UseSplat) 7052 return ConstantVector::getSplat(EC, Val); 7053 7054 if (!EC.isScalable()) { 7055 SmallVector<Constant *, 4> ConstVec; 7056 UndefValue *UndefVal = UndefValue::get(Val->getType()); 7057 for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { 7058 if (Idx == ExtractIdx) 7059 ConstVec.push_back(Val); 7060 else 7061 ConstVec.push_back(UndefVal); 7062 } 7063 return ConstantVector::get(ConstVec); 7064 } else 7065 llvm_unreachable( 7066 "Generate scalable vector for non-splat is unimplemented"); 7067 } 7068 7069 /// Check if promoting to a vector type an operand at \p OperandIdx 7070 /// in \p Use can trigger undefined behavior. 7071 static bool canCauseUndefinedBehavior(const Instruction *Use, 7072 unsigned OperandIdx) { 7073 // This is not safe to introduce undef when the operand is on 7074 // the right hand side of a division-like instruction. 7075 if (OperandIdx != 1) 7076 return false; 7077 switch (Use->getOpcode()) { 7078 default: 7079 return false; 7080 case Instruction::SDiv: 7081 case Instruction::UDiv: 7082 case Instruction::SRem: 7083 case Instruction::URem: 7084 return true; 7085 case Instruction::FDiv: 7086 case Instruction::FRem: 7087 return !Use->hasNoNaNs(); 7088 } 7089 llvm_unreachable(nullptr); 7090 } 7091 7092 public: 7093 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 7094 const TargetTransformInfo &TTI, Instruction *Transition, 7095 unsigned CombineCost) 7096 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 7097 StoreExtractCombineCost(CombineCost) { 7098 assert(Transition && "Do not know how to promote null"); 7099 } 7100 7101 /// Check if we can promote \p ToBePromoted to \p Type. 7102 bool canPromote(const Instruction *ToBePromoted) const { 7103 // We could support CastInst too. 7104 return isa<BinaryOperator>(ToBePromoted); 7105 } 7106 7107 /// Check if it is profitable to promote \p ToBePromoted 7108 /// by moving downward the transition through. 7109 bool shouldPromote(const Instruction *ToBePromoted) const { 7110 // Promote only if all the operands can be statically expanded. 7111 // Indeed, we do not want to introduce any new kind of transitions. 7112 for (const Use &U : ToBePromoted->operands()) { 7113 const Value *Val = U.get(); 7114 if (Val == getEndOfTransition()) { 7115 // If the use is a division and the transition is on the rhs, 7116 // we cannot promote the operation, otherwise we may create a 7117 // division by zero. 7118 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 7119 return false; 7120 continue; 7121 } 7122 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 7123 !isa<ConstantFP>(Val)) 7124 return false; 7125 } 7126 // Check that the resulting operation is legal. 7127 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 7128 if (!ISDOpcode) 7129 return false; 7130 return StressStoreExtract || 7131 TLI.isOperationLegalOrCustom( 7132 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 7133 } 7134 7135 /// Check whether or not \p Use can be combined 7136 /// with the transition. 7137 /// I.e., is it possible to do Use(Transition) => AnotherUse? 7138 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 7139 7140 /// Record \p ToBePromoted as part of the chain to be promoted. 7141 void enqueueForPromotion(Instruction *ToBePromoted) { 7142 InstsToBePromoted.push_back(ToBePromoted); 7143 } 7144 7145 /// Set the instruction that will be combined with the transition. 7146 void recordCombineInstruction(Instruction *ToBeCombined) { 7147 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 7148 CombineInst = ToBeCombined; 7149 } 7150 7151 /// Promote all the instructions enqueued for promotion if it is 7152 /// is profitable. 7153 /// \return True if the promotion happened, false otherwise. 7154 bool promote() { 7155 // Check if there is something to promote. 7156 // Right now, if we do not have anything to combine with, 7157 // we assume the promotion is not profitable. 7158 if (InstsToBePromoted.empty() || !CombineInst) 7159 return false; 7160 7161 // Check cost. 7162 if (!StressStoreExtract && !isProfitableToPromote()) 7163 return false; 7164 7165 // Promote. 7166 for (auto &ToBePromoted : InstsToBePromoted) 7167 promoteImpl(ToBePromoted); 7168 InstsToBePromoted.clear(); 7169 return true; 7170 } 7171 }; 7172 7173 } // end anonymous namespace 7174 7175 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 7176 // At this point, we know that all the operands of ToBePromoted but Def 7177 // can be statically promoted. 7178 // For Def, we need to use its parameter in ToBePromoted: 7179 // b = ToBePromoted ty1 a 7180 // Def = Transition ty1 b to ty2 7181 // Move the transition down. 7182 // 1. Replace all uses of the promoted operation by the transition. 7183 // = ... b => = ... Def. 7184 assert(ToBePromoted->getType() == Transition->getType() && 7185 "The type of the result of the transition does not match " 7186 "the final type"); 7187 ToBePromoted->replaceAllUsesWith(Transition); 7188 // 2. Update the type of the uses. 7189 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 7190 Type *TransitionTy = getTransitionType(); 7191 ToBePromoted->mutateType(TransitionTy); 7192 // 3. Update all the operands of the promoted operation with promoted 7193 // operands. 7194 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 7195 for (Use &U : ToBePromoted->operands()) { 7196 Value *Val = U.get(); 7197 Value *NewVal = nullptr; 7198 if (Val == Transition) 7199 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 7200 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 7201 isa<ConstantFP>(Val)) { 7202 // Use a splat constant if it is not safe to use undef. 7203 NewVal = getConstantVector( 7204 cast<Constant>(Val), 7205 isa<UndefValue>(Val) || 7206 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 7207 } else 7208 llvm_unreachable("Did you modified shouldPromote and forgot to update " 7209 "this?"); 7210 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 7211 } 7212 Transition->moveAfter(ToBePromoted); 7213 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 7214 } 7215 7216 /// Some targets can do store(extractelement) with one instruction. 7217 /// Try to push the extractelement towards the stores when the target 7218 /// has this feature and this is profitable. 7219 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 7220 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 7221 if (DisableStoreExtract || 7222 (!StressStoreExtract && 7223 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 7224 Inst->getOperand(1), CombineCost))) 7225 return false; 7226 7227 // At this point we know that Inst is a vector to scalar transition. 7228 // Try to move it down the def-use chain, until: 7229 // - We can combine the transition with its single use 7230 // => we got rid of the transition. 7231 // - We escape the current basic block 7232 // => we would need to check that we are moving it at a cheaper place and 7233 // we do not do that for now. 7234 BasicBlock *Parent = Inst->getParent(); 7235 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 7236 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 7237 // If the transition has more than one use, assume this is not going to be 7238 // beneficial. 7239 while (Inst->hasOneUse()) { 7240 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 7241 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 7242 7243 if (ToBePromoted->getParent() != Parent) { 7244 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 7245 << ToBePromoted->getParent()->getName() 7246 << ") than the transition (" << Parent->getName() 7247 << ").\n"); 7248 return false; 7249 } 7250 7251 if (VPH.canCombine(ToBePromoted)) { 7252 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 7253 << "will be combined with: " << *ToBePromoted << '\n'); 7254 VPH.recordCombineInstruction(ToBePromoted); 7255 bool Changed = VPH.promote(); 7256 NumStoreExtractExposed += Changed; 7257 return Changed; 7258 } 7259 7260 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 7261 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 7262 return false; 7263 7264 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 7265 7266 VPH.enqueueForPromotion(ToBePromoted); 7267 Inst = ToBePromoted; 7268 } 7269 return false; 7270 } 7271 7272 /// For the instruction sequence of store below, F and I values 7273 /// are bundled together as an i64 value before being stored into memory. 7274 /// Sometimes it is more efficient to generate separate stores for F and I, 7275 /// which can remove the bitwise instructions or sink them to colder places. 7276 /// 7277 /// (store (or (zext (bitcast F to i32) to i64), 7278 /// (shl (zext I to i64), 32)), addr) --> 7279 /// (store F, addr) and (store I, addr+4) 7280 /// 7281 /// Similarly, splitting for other merged store can also be beneficial, like: 7282 /// For pair of {i32, i32}, i64 store --> two i32 stores. 7283 /// For pair of {i32, i16}, i64 store --> two i32 stores. 7284 /// For pair of {i16, i16}, i32 store --> two i16 stores. 7285 /// For pair of {i16, i8}, i32 store --> two i16 stores. 7286 /// For pair of {i8, i8}, i16 store --> two i8 stores. 7287 /// 7288 /// We allow each target to determine specifically which kind of splitting is 7289 /// supported. 7290 /// 7291 /// The store patterns are commonly seen from the simple code snippet below 7292 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 7293 /// void goo(const std::pair<int, float> &); 7294 /// hoo() { 7295 /// ... 7296 /// goo(std::make_pair(tmp, ftmp)); 7297 /// ... 7298 /// } 7299 /// 7300 /// Although we already have similar splitting in DAG Combine, we duplicate 7301 /// it in CodeGenPrepare to catch the case in which pattern is across 7302 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 7303 /// during code expansion. 7304 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 7305 const TargetLowering &TLI) { 7306 // Handle simple but common cases only. 7307 Type *StoreType = SI.getValueOperand()->getType(); 7308 7309 // The code below assumes shifting a value by <number of bits>, 7310 // whereas scalable vectors would have to be shifted by 7311 // <2log(vscale) + number of bits> in order to store the 7312 // low/high parts. Bailing out for now. 7313 if (isa<ScalableVectorType>(StoreType)) 7314 return false; 7315 7316 if (!DL.typeSizeEqualsStoreSize(StoreType) || 7317 DL.getTypeSizeInBits(StoreType) == 0) 7318 return false; 7319 7320 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 7321 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 7322 if (!DL.typeSizeEqualsStoreSize(SplitStoreType)) 7323 return false; 7324 7325 // Don't split the store if it is volatile. 7326 if (SI.isVolatile()) 7327 return false; 7328 7329 // Match the following patterns: 7330 // (store (or (zext LValue to i64), 7331 // (shl (zext HValue to i64), 32)), HalfValBitSize) 7332 // or 7333 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 7334 // (zext LValue to i64), 7335 // Expect both operands of OR and the first operand of SHL have only 7336 // one use. 7337 Value *LValue, *HValue; 7338 if (!match(SI.getValueOperand(), 7339 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 7340 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 7341 m_SpecificInt(HalfValBitSize)))))) 7342 return false; 7343 7344 // Check LValue and HValue are int with size less or equal than 32. 7345 if (!LValue->getType()->isIntegerTy() || 7346 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 7347 !HValue->getType()->isIntegerTy() || 7348 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 7349 return false; 7350 7351 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 7352 // as the input of target query. 7353 auto *LBC = dyn_cast<BitCastInst>(LValue); 7354 auto *HBC = dyn_cast<BitCastInst>(HValue); 7355 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 7356 : EVT::getEVT(LValue->getType()); 7357 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 7358 : EVT::getEVT(HValue->getType()); 7359 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 7360 return false; 7361 7362 // Start to split store. 7363 IRBuilder<> Builder(SI.getContext()); 7364 Builder.SetInsertPoint(&SI); 7365 7366 // If LValue/HValue is a bitcast in another BB, create a new one in current 7367 // BB so it may be merged with the splitted stores by dag combiner. 7368 if (LBC && LBC->getParent() != SI.getParent()) 7369 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 7370 if (HBC && HBC->getParent() != SI.getParent()) 7371 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 7372 7373 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 7374 auto CreateSplitStore = [&](Value *V, bool Upper) { 7375 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 7376 Value *Addr = Builder.CreateBitCast( 7377 SI.getOperand(1), 7378 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 7379 Align Alignment = SI.getAlign(); 7380 const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); 7381 if (IsOffsetStore) { 7382 Addr = Builder.CreateGEP( 7383 SplitStoreType, Addr, 7384 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 7385 7386 // When splitting the store in half, naturally one half will retain the 7387 // alignment of the original wider store, regardless of whether it was 7388 // over-aligned or not, while the other will require adjustment. 7389 Alignment = commonAlignment(Alignment, HalfValBitSize / 8); 7390 } 7391 Builder.CreateAlignedStore(V, Addr, Alignment); 7392 }; 7393 7394 CreateSplitStore(LValue, false); 7395 CreateSplitStore(HValue, true); 7396 7397 // Delete the old store. 7398 SI.eraseFromParent(); 7399 return true; 7400 } 7401 7402 // Return true if the GEP has two operands, the first operand is of a sequential 7403 // type, and the second operand is a constant. 7404 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 7405 gep_type_iterator I = gep_type_begin(*GEP); 7406 return GEP->getNumOperands() == 2 && 7407 I.isSequential() && 7408 isa<ConstantInt>(GEP->getOperand(1)); 7409 } 7410 7411 // Try unmerging GEPs to reduce liveness interference (register pressure) across 7412 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 7413 // reducing liveness interference across those edges benefits global register 7414 // allocation. Currently handles only certain cases. 7415 // 7416 // For example, unmerge %GEPI and %UGEPI as below. 7417 // 7418 // ---------- BEFORE ---------- 7419 // SrcBlock: 7420 // ... 7421 // %GEPIOp = ... 7422 // ... 7423 // %GEPI = gep %GEPIOp, Idx 7424 // ... 7425 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 7426 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 7427 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 7428 // %UGEPI) 7429 // 7430 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 7431 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 7432 // ... 7433 // 7434 // DstBi: 7435 // ... 7436 // %UGEPI = gep %GEPIOp, UIdx 7437 // ... 7438 // --------------------------- 7439 // 7440 // ---------- AFTER ---------- 7441 // SrcBlock: 7442 // ... (same as above) 7443 // (* %GEPI is still alive on the indirectbr edges) 7444 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 7445 // unmerging) 7446 // ... 7447 // 7448 // DstBi: 7449 // ... 7450 // %UGEPI = gep %GEPI, (UIdx-Idx) 7451 // ... 7452 // --------------------------- 7453 // 7454 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 7455 // no longer alive on them. 7456 // 7457 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 7458 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 7459 // not to disable further simplications and optimizations as a result of GEP 7460 // merging. 7461 // 7462 // Note this unmerging may increase the length of the data flow critical path 7463 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 7464 // between the register pressure and the length of data-flow critical 7465 // path. Restricting this to the uncommon IndirectBr case would minimize the 7466 // impact of potentially longer critical path, if any, and the impact on compile 7467 // time. 7468 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 7469 const TargetTransformInfo *TTI) { 7470 BasicBlock *SrcBlock = GEPI->getParent(); 7471 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 7472 // (non-IndirectBr) cases exit early here. 7473 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 7474 return false; 7475 // Check that GEPI is a simple gep with a single constant index. 7476 if (!GEPSequentialConstIndexed(GEPI)) 7477 return false; 7478 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 7479 // Check that GEPI is a cheap one. 7480 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(), 7481 TargetTransformInfo::TCK_SizeAndLatency) 7482 > TargetTransformInfo::TCC_Basic) 7483 return false; 7484 Value *GEPIOp = GEPI->getOperand(0); 7485 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 7486 if (!isa<Instruction>(GEPIOp)) 7487 return false; 7488 auto *GEPIOpI = cast<Instruction>(GEPIOp); 7489 if (GEPIOpI->getParent() != SrcBlock) 7490 return false; 7491 // Check that GEP is used outside the block, meaning it's alive on the 7492 // IndirectBr edge(s). 7493 if (find_if(GEPI->users(), [&](User *Usr) { 7494 if (auto *I = dyn_cast<Instruction>(Usr)) { 7495 if (I->getParent() != SrcBlock) { 7496 return true; 7497 } 7498 } 7499 return false; 7500 }) == GEPI->users().end()) 7501 return false; 7502 // The second elements of the GEP chains to be unmerged. 7503 std::vector<GetElementPtrInst *> UGEPIs; 7504 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 7505 // on IndirectBr edges. 7506 for (User *Usr : GEPIOp->users()) { 7507 if (Usr == GEPI) continue; 7508 // Check if Usr is an Instruction. If not, give up. 7509 if (!isa<Instruction>(Usr)) 7510 return false; 7511 auto *UI = cast<Instruction>(Usr); 7512 // Check if Usr in the same block as GEPIOp, which is fine, skip. 7513 if (UI->getParent() == SrcBlock) 7514 continue; 7515 // Check if Usr is a GEP. If not, give up. 7516 if (!isa<GetElementPtrInst>(Usr)) 7517 return false; 7518 auto *UGEPI = cast<GetElementPtrInst>(Usr); 7519 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 7520 // the pointer operand to it. If so, record it in the vector. If not, give 7521 // up. 7522 if (!GEPSequentialConstIndexed(UGEPI)) 7523 return false; 7524 if (UGEPI->getOperand(0) != GEPIOp) 7525 return false; 7526 if (GEPIIdx->getType() != 7527 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 7528 return false; 7529 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7530 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(), 7531 TargetTransformInfo::TCK_SizeAndLatency) 7532 > TargetTransformInfo::TCC_Basic) 7533 return false; 7534 UGEPIs.push_back(UGEPI); 7535 } 7536 if (UGEPIs.size() == 0) 7537 return false; 7538 // Check the materializing cost of (Uidx-Idx). 7539 for (GetElementPtrInst *UGEPI : UGEPIs) { 7540 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7541 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 7542 unsigned ImmCost = 7543 TTI->getIntImmCost(NewIdx, GEPIIdx->getType(), 7544 TargetTransformInfo::TCK_SizeAndLatency); 7545 if (ImmCost > TargetTransformInfo::TCC_Basic) 7546 return false; 7547 } 7548 // Now unmerge between GEPI and UGEPIs. 7549 for (GetElementPtrInst *UGEPI : UGEPIs) { 7550 UGEPI->setOperand(0, GEPI); 7551 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7552 Constant *NewUGEPIIdx = 7553 ConstantInt::get(GEPIIdx->getType(), 7554 UGEPIIdx->getValue() - GEPIIdx->getValue()); 7555 UGEPI->setOperand(1, NewUGEPIIdx); 7556 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 7557 // inbounds to avoid UB. 7558 if (!GEPI->isInBounds()) { 7559 UGEPI->setIsInBounds(false); 7560 } 7561 } 7562 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 7563 // alive on IndirectBr edges). 7564 assert(find_if(GEPIOp->users(), [&](User *Usr) { 7565 return cast<Instruction>(Usr)->getParent() != SrcBlock; 7566 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 7567 return true; 7568 } 7569 7570 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 7571 // Bail out if we inserted the instruction to prevent optimizations from 7572 // stepping on each other's toes. 7573 if (InsertedInsts.count(I)) 7574 return false; 7575 7576 // TODO: Move into the switch on opcode below here. 7577 if (PHINode *P = dyn_cast<PHINode>(I)) { 7578 // It is possible for very late stage optimizations (such as SimplifyCFG) 7579 // to introduce PHI nodes too late to be cleaned up. If we detect such a 7580 // trivial PHI, go ahead and zap it here. 7581 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 7582 LargeOffsetGEPMap.erase(P); 7583 P->replaceAllUsesWith(V); 7584 P->eraseFromParent(); 7585 ++NumPHIsElim; 7586 return true; 7587 } 7588 return false; 7589 } 7590 7591 if (CastInst *CI = dyn_cast<CastInst>(I)) { 7592 // If the source of the cast is a constant, then this should have 7593 // already been constant folded. The only reason NOT to constant fold 7594 // it is if something (e.g. LSR) was careful to place the constant 7595 // evaluation in a block other than then one that uses it (e.g. to hoist 7596 // the address of globals out of a loop). If this is the case, we don't 7597 // want to forward-subst the cast. 7598 if (isa<Constant>(CI->getOperand(0))) 7599 return false; 7600 7601 if (OptimizeNoopCopyExpression(CI, *TLI, *DL)) 7602 return true; 7603 7604 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 7605 /// Sink a zext or sext into its user blocks if the target type doesn't 7606 /// fit in one register 7607 if (TLI->getTypeAction(CI->getContext(), 7608 TLI->getValueType(*DL, CI->getType())) == 7609 TargetLowering::TypeExpandInteger) { 7610 return SinkCast(CI); 7611 } else { 7612 bool MadeChange = optimizeExt(I); 7613 return MadeChange | optimizeExtUses(I); 7614 } 7615 } 7616 return false; 7617 } 7618 7619 if (auto *Cmp = dyn_cast<CmpInst>(I)) 7620 if (optimizeCmp(Cmp, ModifiedDT)) 7621 return true; 7622 7623 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7624 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7625 bool Modified = optimizeLoadExt(LI); 7626 unsigned AS = LI->getPointerAddressSpace(); 7627 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 7628 return Modified; 7629 } 7630 7631 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 7632 if (splitMergedValStore(*SI, *DL, *TLI)) 7633 return true; 7634 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7635 unsigned AS = SI->getPointerAddressSpace(); 7636 return optimizeMemoryInst(I, SI->getOperand(1), 7637 SI->getOperand(0)->getType(), AS); 7638 } 7639 7640 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 7641 unsigned AS = RMW->getPointerAddressSpace(); 7642 return optimizeMemoryInst(I, RMW->getPointerOperand(), 7643 RMW->getType(), AS); 7644 } 7645 7646 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 7647 unsigned AS = CmpX->getPointerAddressSpace(); 7648 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 7649 CmpX->getCompareOperand()->getType(), AS); 7650 } 7651 7652 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 7653 7654 if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking) 7655 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 7656 7657 // TODO: Move this into the switch on opcode - it handles shifts already. 7658 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 7659 BinOp->getOpcode() == Instruction::LShr)) { 7660 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 7661 if (CI && TLI->hasExtractBitsInsn()) 7662 if (OptimizeExtractBits(BinOp, CI, *TLI, *DL)) 7663 return true; 7664 } 7665 7666 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 7667 if (GEPI->hasAllZeroIndices()) { 7668 /// The GEP operand must be a pointer, so must its result -> BitCast 7669 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 7670 GEPI->getName(), GEPI); 7671 NC->setDebugLoc(GEPI->getDebugLoc()); 7672 GEPI->replaceAllUsesWith(NC); 7673 GEPI->eraseFromParent(); 7674 ++NumGEPsElim; 7675 optimizeInst(NC, ModifiedDT); 7676 return true; 7677 } 7678 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 7679 return true; 7680 } 7681 return false; 7682 } 7683 7684 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 7685 // freeze(icmp a, const)) -> icmp (freeze a), const 7686 // This helps generate efficient conditional jumps. 7687 Instruction *CmpI = nullptr; 7688 if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0))) 7689 CmpI = II; 7690 else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0))) 7691 CmpI = F->getFastMathFlags().none() ? F : nullptr; 7692 7693 if (CmpI && CmpI->hasOneUse()) { 7694 auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1); 7695 bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) || 7696 isa<ConstantPointerNull>(Op0); 7697 bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) || 7698 isa<ConstantPointerNull>(Op1); 7699 if (Const0 || Const1) { 7700 if (!Const0 || !Const1) { 7701 auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI); 7702 F->takeName(FI); 7703 CmpI->setOperand(Const0 ? 1 : 0, F); 7704 } 7705 FI->replaceAllUsesWith(CmpI); 7706 FI->eraseFromParent(); 7707 return true; 7708 } 7709 } 7710 return false; 7711 } 7712 7713 if (tryToSinkFreeOperands(I)) 7714 return true; 7715 7716 switch (I->getOpcode()) { 7717 case Instruction::Shl: 7718 case Instruction::LShr: 7719 case Instruction::AShr: 7720 return optimizeShiftInst(cast<BinaryOperator>(I)); 7721 case Instruction::Call: 7722 return optimizeCallInst(cast<CallInst>(I), ModifiedDT); 7723 case Instruction::Select: 7724 return optimizeSelectInst(cast<SelectInst>(I)); 7725 case Instruction::ShuffleVector: 7726 return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I)); 7727 case Instruction::Switch: 7728 return optimizeSwitchInst(cast<SwitchInst>(I)); 7729 case Instruction::ExtractElement: 7730 return optimizeExtractElementInst(cast<ExtractElementInst>(I)); 7731 } 7732 7733 return false; 7734 } 7735 7736 /// Given an OR instruction, check to see if this is a bitreverse 7737 /// idiom. If so, insert the new intrinsic and return true. 7738 bool CodeGenPrepare::makeBitReverse(Instruction &I) { 7739 if (!I.getType()->isIntegerTy() || 7740 !TLI->isOperationLegalOrCustom(ISD::BITREVERSE, 7741 TLI->getValueType(*DL, I.getType(), true))) 7742 return false; 7743 7744 SmallVector<Instruction*, 4> Insts; 7745 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 7746 return false; 7747 Instruction *LastInst = Insts.back(); 7748 I.replaceAllUsesWith(LastInst); 7749 RecursivelyDeleteTriviallyDeadInstructions( 7750 &I, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 7751 return true; 7752 } 7753 7754 // In this pass we look for GEP and cast instructions that are used 7755 // across basic blocks and rewrite them to improve basic-block-at-a-time 7756 // selection. 7757 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 7758 SunkAddrs.clear(); 7759 bool MadeChange = false; 7760 7761 CurInstIterator = BB.begin(); 7762 while (CurInstIterator != BB.end()) { 7763 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 7764 if (ModifiedDT) 7765 return true; 7766 } 7767 7768 bool MadeBitReverse = true; 7769 while (MadeBitReverse) { 7770 MadeBitReverse = false; 7771 for (auto &I : reverse(BB)) { 7772 if (makeBitReverse(I)) { 7773 MadeBitReverse = MadeChange = true; 7774 break; 7775 } 7776 } 7777 } 7778 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); 7779 7780 return MadeChange; 7781 } 7782 7783 // Some CGP optimizations may move or alter what's computed in a block. Check 7784 // whether a dbg.value intrinsic could be pointed at a more appropriate operand. 7785 bool CodeGenPrepare::fixupDbgValue(Instruction *I) { 7786 assert(isa<DbgValueInst>(I)); 7787 DbgValueInst &DVI = *cast<DbgValueInst>(I); 7788 7789 // Does this dbg.value refer to a sunk address calculation? 7790 Value *Location = DVI.getVariableLocation(); 7791 WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; 7792 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 7793 if (SunkAddr) { 7794 // Point dbg.value at locally computed address, which should give the best 7795 // opportunity to be accurately lowered. This update may change the type of 7796 // pointer being referred to; however this makes no difference to debugging 7797 // information, and we can't generate bitcasts that may affect codegen. 7798 DVI.setOperand(0, MetadataAsValue::get(DVI.getContext(), 7799 ValueAsMetadata::get(SunkAddr))); 7800 return true; 7801 } 7802 return false; 7803 } 7804 7805 // A llvm.dbg.value may be using a value before its definition, due to 7806 // optimizations in this pass and others. Scan for such dbg.values, and rescue 7807 // them by moving the dbg.value to immediately after the value definition. 7808 // FIXME: Ideally this should never be necessary, and this has the potential 7809 // to re-order dbg.value intrinsics. 7810 bool CodeGenPrepare::placeDbgValues(Function &F) { 7811 bool MadeChange = false; 7812 DominatorTree DT(F); 7813 7814 for (BasicBlock &BB : F) { 7815 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 7816 Instruction *Insn = &*BI++; 7817 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 7818 if (!DVI) 7819 continue; 7820 7821 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7822 7823 if (!VI || VI->isTerminator()) 7824 continue; 7825 7826 // If VI is a phi in a block with an EHPad terminator, we can't insert 7827 // after it. 7828 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7829 continue; 7830 7831 // If the defining instruction dominates the dbg.value, we do not need 7832 // to move the dbg.value. 7833 if (DT.dominates(VI, DVI)) 7834 continue; 7835 7836 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7837 << *DVI << ' ' << *VI); 7838 DVI->removeFromParent(); 7839 if (isa<PHINode>(VI)) 7840 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7841 else 7842 DVI->insertAfter(VI); 7843 MadeChange = true; 7844 ++NumDbgValueMoved; 7845 } 7846 } 7847 return MadeChange; 7848 } 7849 7850 /// Scale down both weights to fit into uint32_t. 7851 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7852 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7853 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7854 NewTrue = NewTrue / Scale; 7855 NewFalse = NewFalse / Scale; 7856 } 7857 7858 /// Some targets prefer to split a conditional branch like: 7859 /// \code 7860 /// %0 = icmp ne i32 %a, 0 7861 /// %1 = icmp ne i32 %b, 0 7862 /// %or.cond = or i1 %0, %1 7863 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7864 /// \endcode 7865 /// into multiple branch instructions like: 7866 /// \code 7867 /// bb1: 7868 /// %0 = icmp ne i32 %a, 0 7869 /// br i1 %0, label %TrueBB, label %bb2 7870 /// bb2: 7871 /// %1 = icmp ne i32 %b, 0 7872 /// br i1 %1, label %TrueBB, label %FalseBB 7873 /// \endcode 7874 /// This usually allows instruction selection to do even further optimizations 7875 /// and combine the compare with the branch instruction. Currently this is 7876 /// applied for targets which have "cheap" jump instructions. 7877 /// 7878 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7879 /// 7880 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) { 7881 if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) 7882 return false; 7883 7884 bool MadeChange = false; 7885 for (auto &BB : F) { 7886 // Does this BB end with the following? 7887 // %cond1 = icmp|fcmp|binary instruction ... 7888 // %cond2 = icmp|fcmp|binary instruction ... 7889 // %cond.or = or|and i1 %cond1, cond2 7890 // br i1 %cond.or label %dest1, label %dest2" 7891 Instruction *LogicOp; 7892 BasicBlock *TBB, *FBB; 7893 if (!match(BB.getTerminator(), 7894 m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB))) 7895 continue; 7896 7897 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7898 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7899 continue; 7900 7901 // The merging of mostly empty BB can cause a degenerate branch. 7902 if (TBB == FBB) 7903 continue; 7904 7905 unsigned Opc; 7906 Value *Cond1, *Cond2; 7907 if (match(LogicOp, 7908 m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2))))) 7909 Opc = Instruction::And; 7910 else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)), 7911 m_OneUse(m_Value(Cond2))))) 7912 Opc = Instruction::Or; 7913 else 7914 continue; 7915 7916 auto IsGoodCond = [](Value *Cond) { 7917 return match( 7918 Cond, 7919 m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()), 7920 m_LogicalOr(m_Value(), m_Value())))); 7921 }; 7922 if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2)) 7923 continue; 7924 7925 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 7926 7927 // Create a new BB. 7928 auto *TmpBB = 7929 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 7930 BB.getParent(), BB.getNextNode()); 7931 7932 // Update original basic block by using the first condition directly by the 7933 // branch instruction and removing the no longer needed and/or instruction. 7934 Br1->setCondition(Cond1); 7935 LogicOp->eraseFromParent(); 7936 7937 // Depending on the condition we have to either replace the true or the 7938 // false successor of the original branch instruction. 7939 if (Opc == Instruction::And) 7940 Br1->setSuccessor(0, TmpBB); 7941 else 7942 Br1->setSuccessor(1, TmpBB); 7943 7944 // Fill in the new basic block. 7945 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 7946 if (auto *I = dyn_cast<Instruction>(Cond2)) { 7947 I->removeFromParent(); 7948 I->insertBefore(Br2); 7949 } 7950 7951 // Update PHI nodes in both successors. The original BB needs to be 7952 // replaced in one successor's PHI nodes, because the branch comes now from 7953 // the newly generated BB (NewBB). In the other successor we need to add one 7954 // incoming edge to the PHI nodes, because both branch instructions target 7955 // now the same successor. Depending on the original branch condition 7956 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 7957 // we perform the correct update for the PHI nodes. 7958 // This doesn't change the successor order of the just created branch 7959 // instruction (or any other instruction). 7960 if (Opc == Instruction::Or) 7961 std::swap(TBB, FBB); 7962 7963 // Replace the old BB with the new BB. 7964 TBB->replacePhiUsesWith(&BB, TmpBB); 7965 7966 // Add another incoming edge form the new BB. 7967 for (PHINode &PN : FBB->phis()) { 7968 auto *Val = PN.getIncomingValueForBlock(&BB); 7969 PN.addIncoming(Val, TmpBB); 7970 } 7971 7972 // Update the branch weights (from SelectionDAGBuilder:: 7973 // FindMergedConditions). 7974 if (Opc == Instruction::Or) { 7975 // Codegen X | Y as: 7976 // BB1: 7977 // jmp_if_X TBB 7978 // jmp TmpBB 7979 // TmpBB: 7980 // jmp_if_Y TBB 7981 // jmp FBB 7982 // 7983 7984 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 7985 // The requirement is that 7986 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 7987 // = TrueProb for original BB. 7988 // Assuming the original weights are A and B, one choice is to set BB1's 7989 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 7990 // assumes that 7991 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 7992 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 7993 // TmpBB, but the math is more complicated. 7994 uint64_t TrueWeight, FalseWeight; 7995 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 7996 uint64_t NewTrueWeight = TrueWeight; 7997 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 7998 scaleWeights(NewTrueWeight, NewFalseWeight); 7999 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 8000 .createBranchWeights(TrueWeight, FalseWeight)); 8001 8002 NewTrueWeight = TrueWeight; 8003 NewFalseWeight = 2 * FalseWeight; 8004 scaleWeights(NewTrueWeight, NewFalseWeight); 8005 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 8006 .createBranchWeights(TrueWeight, FalseWeight)); 8007 } 8008 } else { 8009 // Codegen X & Y as: 8010 // BB1: 8011 // jmp_if_X TmpBB 8012 // jmp FBB 8013 // TmpBB: 8014 // jmp_if_Y TBB 8015 // jmp FBB 8016 // 8017 // This requires creation of TmpBB after CurBB. 8018 8019 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 8020 // The requirement is that 8021 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 8022 // = FalseProb for original BB. 8023 // Assuming the original weights are A and B, one choice is to set BB1's 8024 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 8025 // assumes that 8026 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 8027 uint64_t TrueWeight, FalseWeight; 8028 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 8029 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 8030 uint64_t NewFalseWeight = FalseWeight; 8031 scaleWeights(NewTrueWeight, NewFalseWeight); 8032 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 8033 .createBranchWeights(TrueWeight, FalseWeight)); 8034 8035 NewTrueWeight = 2 * TrueWeight; 8036 NewFalseWeight = FalseWeight; 8037 scaleWeights(NewTrueWeight, NewFalseWeight); 8038 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 8039 .createBranchWeights(TrueWeight, FalseWeight)); 8040 } 8041 } 8042 8043 ModifiedDT = true; 8044 MadeChange = true; 8045 8046 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 8047 TmpBB->dump()); 8048 } 8049 return MadeChange; 8050 } 8051