1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass munges the code in the input function to better prepare it for 10 // SelectionDAG-based code generation. This works around limitations in it's 11 // basic-block-at-a-time approach. It should eventually be removed. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BranchProbabilityInfo.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/ProfileSummaryInfo.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Analysis/VectorUtils.h" 35 #include "llvm/CodeGen/Analysis.h" 36 #include "llvm/CodeGen/ISDOpcodes.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/ValueTypes.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Argument.h" 44 #include "llvm/IR/Attributes.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/GlobalVariable.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/InlineAsm.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/IntrinsicsAArch64.h" 63 #include "llvm/IR/LLVMContext.h" 64 #include "llvm/IR/MDBuilder.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Statepoint.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/ValueMap.h" 75 #include "llvm/InitializePasses.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/BlockFrequency.h" 78 #include "llvm/Support/BranchProbability.h" 79 #include "llvm/Support/Casting.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/MachineValueType.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 90 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 91 #include "llvm/Transforms/Utils/Local.h" 92 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 93 #include "llvm/Transforms/Utils/SizeOpts.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <limits> 99 #include <memory> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 106 #define DEBUG_TYPE "codegenprepare" 107 108 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 109 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 110 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 111 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 112 "sunken Cmps"); 113 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 114 "of sunken Casts"); 115 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 116 "computations were sunk"); 117 STATISTIC(NumMemoryInstsPhiCreated, 118 "Number of phis created when address " 119 "computations were sunk to memory instructions"); 120 STATISTIC(NumMemoryInstsSelectCreated, 121 "Number of select created when address " 122 "computations were sunk to memory instructions"); 123 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 124 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 125 STATISTIC(NumAndsAdded, 126 "Number of and mask instructions added to form ext loads"); 127 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 128 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 129 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 130 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 131 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 132 133 static cl::opt<bool> DisableBranchOpts( 134 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 135 cl::desc("Disable branch optimizations in CodeGenPrepare")); 136 137 static cl::opt<bool> 138 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 139 cl::desc("Disable GC optimizations in CodeGenPrepare")); 140 141 static cl::opt<bool> DisableSelectToBranch( 142 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 143 cl::desc("Disable select to branch conversion.")); 144 145 static cl::opt<bool> AddrSinkUsingGEPs( 146 "addr-sink-using-gep", cl::Hidden, cl::init(true), 147 cl::desc("Address sinking in CGP using GEPs.")); 148 149 static cl::opt<bool> EnableAndCmpSinking( 150 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 151 cl::desc("Enable sinkinig and/cmp into branches.")); 152 153 static cl::opt<bool> DisableStoreExtract( 154 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 155 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 156 157 static cl::opt<bool> StressStoreExtract( 158 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 159 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 160 161 static cl::opt<bool> DisableExtLdPromotion( 162 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 163 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 164 "CodeGenPrepare")); 165 166 static cl::opt<bool> StressExtLdPromotion( 167 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 168 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 169 "optimization in CodeGenPrepare")); 170 171 static cl::opt<bool> DisablePreheaderProtect( 172 "disable-preheader-prot", cl::Hidden, cl::init(false), 173 cl::desc("Disable protection against removing loop preheaders")); 174 175 static cl::opt<bool> ProfileGuidedSectionPrefix( 176 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, 177 cl::desc("Use profile info to add section prefix for hot/cold functions")); 178 179 static cl::opt<bool> ProfileUnknownInSpecialSection( 180 "profile-unknown-in-special-section", cl::Hidden, cl::init(false), 181 cl::ZeroOrMore, 182 cl::desc("In profiling mode like sampleFDO, if a function doesn't have " 183 "profile, we cannot tell the function is cold for sure because " 184 "it may be a function newly added without ever being sampled. " 185 "With the flag enabled, compiler can put such profile unknown " 186 "functions into a special section, so runtime system can choose " 187 "to handle it in a different way than .text section, to save " 188 "RAM for example. ")); 189 190 static cl::opt<unsigned> FreqRatioToSkipMerge( 191 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), 192 cl::desc("Skip merging empty blocks if (frequency of empty block) / " 193 "(frequency of destination block) is greater than this ratio")); 194 195 static cl::opt<bool> ForceSplitStore( 196 "force-split-store", cl::Hidden, cl::init(false), 197 cl::desc("Force store splitting no matter what the target query says.")); 198 199 static cl::opt<bool> 200 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, 201 cl::desc("Enable merging of redundant sexts when one is dominating" 202 " the other."), cl::init(true)); 203 204 static cl::opt<bool> DisableComplexAddrModes( 205 "disable-complex-addr-modes", cl::Hidden, cl::init(false), 206 cl::desc("Disables combining addressing modes with different parts " 207 "in optimizeMemoryInst.")); 208 209 static cl::opt<bool> 210 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), 211 cl::desc("Allow creation of Phis in Address sinking.")); 212 213 static cl::opt<bool> 214 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), 215 cl::desc("Allow creation of selects in Address sinking.")); 216 217 static cl::opt<bool> AddrSinkCombineBaseReg( 218 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), 219 cl::desc("Allow combining of BaseReg field in Address sinking.")); 220 221 static cl::opt<bool> AddrSinkCombineBaseGV( 222 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), 223 cl::desc("Allow combining of BaseGV field in Address sinking.")); 224 225 static cl::opt<bool> AddrSinkCombineBaseOffs( 226 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), 227 cl::desc("Allow combining of BaseOffs field in Address sinking.")); 228 229 static cl::opt<bool> AddrSinkCombineScaledReg( 230 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), 231 cl::desc("Allow combining of ScaledReg field in Address sinking.")); 232 233 static cl::opt<bool> 234 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, 235 cl::init(true), 236 cl::desc("Enable splitting large offset of GEP.")); 237 238 static cl::opt<bool> EnableICMP_EQToICMP_ST( 239 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), 240 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); 241 242 static cl::opt<bool> 243 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), 244 cl::desc("Enable BFI update verification for " 245 "CodeGenPrepare.")); 246 247 static cl::opt<bool> OptimizePhiTypes( 248 "cgp-optimize-phi-types", cl::Hidden, cl::init(false), 249 cl::desc("Enable converting phi types in CodeGenPrepare")); 250 251 namespace { 252 253 enum ExtType { 254 ZeroExtension, // Zero extension has been seen. 255 SignExtension, // Sign extension has been seen. 256 BothExtension // This extension type is used if we saw sext after 257 // ZeroExtension had been set, or if we saw zext after 258 // SignExtension had been set. It makes the type 259 // information of a promoted instruction invalid. 260 }; 261 262 using SetOfInstrs = SmallPtrSet<Instruction *, 16>; 263 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; 264 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; 265 using SExts = SmallVector<Instruction *, 16>; 266 using ValueToSExts = DenseMap<Value *, SExts>; 267 268 class TypePromotionTransaction; 269 270 class CodeGenPrepare : public FunctionPass { 271 const TargetMachine *TM = nullptr; 272 const TargetSubtargetInfo *SubtargetInfo; 273 const TargetLowering *TLI = nullptr; 274 const TargetRegisterInfo *TRI; 275 const TargetTransformInfo *TTI = nullptr; 276 const TargetLibraryInfo *TLInfo; 277 const LoopInfo *LI; 278 std::unique_ptr<BlockFrequencyInfo> BFI; 279 std::unique_ptr<BranchProbabilityInfo> BPI; 280 ProfileSummaryInfo *PSI; 281 282 /// As we scan instructions optimizing them, this is the next instruction 283 /// to optimize. Transforms that can invalidate this should update it. 284 BasicBlock::iterator CurInstIterator; 285 286 /// Keeps track of non-local addresses that have been sunk into a block. 287 /// This allows us to avoid inserting duplicate code for blocks with 288 /// multiple load/stores of the same address. The usage of WeakTrackingVH 289 /// enables SunkAddrs to be treated as a cache whose entries can be 290 /// invalidated if a sunken address computation has been erased. 291 ValueMap<Value*, WeakTrackingVH> SunkAddrs; 292 293 /// Keeps track of all instructions inserted for the current function. 294 SetOfInstrs InsertedInsts; 295 296 /// Keeps track of the type of the related instruction before their 297 /// promotion for the current function. 298 InstrToOrigTy PromotedInsts; 299 300 /// Keep track of instructions removed during promotion. 301 SetOfInstrs RemovedInsts; 302 303 /// Keep track of sext chains based on their initial value. 304 DenseMap<Value *, Instruction *> SeenChainsForSExt; 305 306 /// Keep track of GEPs accessing the same data structures such as structs or 307 /// arrays that are candidates to be split later because of their large 308 /// size. 309 MapVector< 310 AssertingVH<Value>, 311 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> 312 LargeOffsetGEPMap; 313 314 /// Keep track of new GEP base after splitting the GEPs having large offset. 315 SmallSet<AssertingVH<Value>, 2> NewGEPBases; 316 317 /// Map serial numbers to Large offset GEPs. 318 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; 319 320 /// Keep track of SExt promoted. 321 ValueToSExts ValToSExtendedUses; 322 323 /// True if the function has the OptSize attribute. 324 bool OptSize; 325 326 /// DataLayout for the Function being processed. 327 const DataLayout *DL = nullptr; 328 329 /// Building the dominator tree can be expensive, so we only build it 330 /// lazily and update it when required. 331 std::unique_ptr<DominatorTree> DT; 332 333 public: 334 static char ID; // Pass identification, replacement for typeid 335 336 CodeGenPrepare() : FunctionPass(ID) { 337 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 338 } 339 340 bool runOnFunction(Function &F) override; 341 342 StringRef getPassName() const override { return "CodeGen Prepare"; } 343 344 void getAnalysisUsage(AnalysisUsage &AU) const override { 345 // FIXME: When we can selectively preserve passes, preserve the domtree. 346 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 347 AU.addRequired<TargetLibraryInfoWrapperPass>(); 348 AU.addRequired<TargetPassConfig>(); 349 AU.addRequired<TargetTransformInfoWrapperPass>(); 350 AU.addRequired<LoopInfoWrapperPass>(); 351 } 352 353 private: 354 template <typename F> 355 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { 356 // Substituting can cause recursive simplifications, which can invalidate 357 // our iterator. Use a WeakTrackingVH to hold onto it in case this 358 // happens. 359 Value *CurValue = &*CurInstIterator; 360 WeakTrackingVH IterHandle(CurValue); 361 362 f(); 363 364 // If the iterator instruction was recursively deleted, start over at the 365 // start of the block. 366 if (IterHandle != CurValue) { 367 CurInstIterator = BB->begin(); 368 SunkAddrs.clear(); 369 } 370 } 371 372 // Get the DominatorTree, building if necessary. 373 DominatorTree &getDT(Function &F) { 374 if (!DT) 375 DT = std::make_unique<DominatorTree>(F); 376 return *DT; 377 } 378 379 void removeAllAssertingVHReferences(Value *V); 380 bool eliminateAssumptions(Function &F); 381 bool eliminateFallThrough(Function &F); 382 bool eliminateMostlyEmptyBlocks(Function &F); 383 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); 384 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 385 void eliminateMostlyEmptyBlock(BasicBlock *BB); 386 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, 387 bool isPreheader); 388 bool makeBitReverse(Instruction &I); 389 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); 390 bool optimizeInst(Instruction *I, bool &ModifiedDT); 391 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 392 Type *AccessTy, unsigned AddrSpace); 393 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); 394 bool optimizeInlineAsmInst(CallInst *CS); 395 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); 396 bool optimizeExt(Instruction *&I); 397 bool optimizeExtUses(Instruction *I); 398 bool optimizeLoadExt(LoadInst *Load); 399 bool optimizeShiftInst(BinaryOperator *BO); 400 bool optimizeFunnelShift(IntrinsicInst *Fsh); 401 bool optimizeSelectInst(SelectInst *SI); 402 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); 403 bool optimizeSwitchInst(SwitchInst *SI); 404 bool optimizeExtractElementInst(Instruction *Inst); 405 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); 406 bool fixupDbgValue(Instruction *I); 407 bool placeDbgValues(Function &F); 408 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, 409 LoadInst *&LI, Instruction *&Inst, bool HasPromoted); 410 bool tryToPromoteExts(TypePromotionTransaction &TPT, 411 const SmallVectorImpl<Instruction *> &Exts, 412 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 413 unsigned CreatedInstsCost = 0); 414 bool mergeSExts(Function &F); 415 bool splitLargeGEPOffsets(); 416 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, 417 SmallPtrSetImpl<Instruction *> &DeletedInstrs); 418 bool optimizePhiTypes(Function &F); 419 bool performAddressTypePromotion( 420 Instruction *&Inst, 421 bool AllowPromotionWithoutCommonHeader, 422 bool HasPromoted, TypePromotionTransaction &TPT, 423 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); 424 bool splitBranchCondition(Function &F, bool &ModifiedDT); 425 bool simplifyOffsetableRelocate(GCStatepointInst &I); 426 427 bool tryToSinkFreeOperands(Instruction *I); 428 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, 429 Value *Arg1, CmpInst *Cmp, 430 Intrinsic::ID IID); 431 bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT); 432 bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 433 bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT); 434 void verifyBFIUpdates(Function &F); 435 }; 436 437 } // end anonymous namespace 438 439 char CodeGenPrepare::ID = 0; 440 441 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE, 442 "Optimize for code generation", false, false) 443 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 444 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 445 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 446 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 447 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 448 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, 449 "Optimize for code generation", false, false) 450 451 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } 452 453 bool CodeGenPrepare::runOnFunction(Function &F) { 454 if (skipFunction(F)) 455 return false; 456 457 DL = &F.getParent()->getDataLayout(); 458 459 bool EverMadeChange = false; 460 // Clear per function information. 461 InsertedInsts.clear(); 462 PromotedInsts.clear(); 463 464 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); 465 SubtargetInfo = TM->getSubtargetImpl(F); 466 TLI = SubtargetInfo->getTargetLowering(); 467 TRI = SubtargetInfo->getRegisterInfo(); 468 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 469 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 470 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 471 BPI.reset(new BranchProbabilityInfo(F, *LI)); 472 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); 473 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 474 OptSize = F.hasOptSize(); 475 if (ProfileGuidedSectionPrefix) { 476 // The hot attribute overwrites profile count based hotness while profile 477 // counts based hotness overwrite the cold attribute. 478 // This is a conservative behabvior. 479 if (F.hasFnAttribute(Attribute::Hot) || 480 PSI->isFunctionHotInCallGraph(&F, *BFI)) 481 F.setSectionPrefix("hot"); 482 // If PSI shows this function is not hot, we will placed the function 483 // into unlikely section if (1) PSI shows this is a cold function, or 484 // (2) the function has a attribute of cold. 485 else if (PSI->isFunctionColdInCallGraph(&F, *BFI) || 486 F.hasFnAttribute(Attribute::Cold)) 487 F.setSectionPrefix("unlikely"); 488 else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && 489 PSI->isFunctionHotnessUnknown(F)) 490 F.setSectionPrefix("unknown"); 491 } 492 493 /// This optimization identifies DIV instructions that can be 494 /// profitably bypassed and carried out with a shorter, faster divide. 495 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { 496 const DenseMap<unsigned int, unsigned int> &BypassWidths = 497 TLI->getBypassSlowDivWidths(); 498 BasicBlock* BB = &*F.begin(); 499 while (BB != nullptr) { 500 // bypassSlowDivision may create new BBs, but we don't want to reapply the 501 // optimization to those blocks. 502 BasicBlock* Next = BB->getNextNode(); 503 // F.hasOptSize is already checked in the outer if statement. 504 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 505 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 506 BB = Next; 507 } 508 } 509 510 // Get rid of @llvm.assume builtins before attempting to eliminate empty 511 // blocks, since there might be blocks that only contain @llvm.assume calls 512 // (plus arguments that we can get rid of). 513 EverMadeChange |= eliminateAssumptions(F); 514 515 // Eliminate blocks that contain only PHI nodes and an 516 // unconditional branch. 517 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 518 519 bool ModifiedDT = false; 520 if (!DisableBranchOpts) 521 EverMadeChange |= splitBranchCondition(F, ModifiedDT); 522 523 // Split some critical edges where one of the sources is an indirect branch, 524 // to help generate sane code for PHIs involving such edges. 525 EverMadeChange |= SplitIndirectBrCriticalEdges(F); 526 527 bool MadeChange = true; 528 while (MadeChange) { 529 MadeChange = false; 530 DT.reset(); 531 for (Function::iterator I = F.begin(); I != F.end(); ) { 532 BasicBlock *BB = &*I++; 533 bool ModifiedDTOnIteration = false; 534 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 535 536 // Restart BB iteration if the dominator tree of the Function was changed 537 if (ModifiedDTOnIteration) 538 break; 539 } 540 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) 541 MadeChange |= mergeSExts(F); 542 if (!LargeOffsetGEPMap.empty()) 543 MadeChange |= splitLargeGEPOffsets(); 544 MadeChange |= optimizePhiTypes(F); 545 546 if (MadeChange) 547 eliminateFallThrough(F); 548 549 // Really free removed instructions during promotion. 550 for (Instruction *I : RemovedInsts) 551 I->deleteValue(); 552 553 EverMadeChange |= MadeChange; 554 SeenChainsForSExt.clear(); 555 ValToSExtendedUses.clear(); 556 RemovedInsts.clear(); 557 LargeOffsetGEPMap.clear(); 558 LargeOffsetGEPID.clear(); 559 } 560 561 NewGEPBases.clear(); 562 SunkAddrs.clear(); 563 564 if (!DisableBranchOpts) { 565 MadeChange = false; 566 // Use a set vector to get deterministic iteration order. The order the 567 // blocks are removed may affect whether or not PHI nodes in successors 568 // are removed. 569 SmallSetVector<BasicBlock*, 8> WorkList; 570 for (BasicBlock &BB : F) { 571 SmallVector<BasicBlock *, 2> Successors(successors(&BB)); 572 MadeChange |= ConstantFoldTerminator(&BB, true); 573 if (!MadeChange) continue; 574 575 for (BasicBlock *Succ : Successors) 576 if (pred_empty(Succ)) 577 WorkList.insert(Succ); 578 } 579 580 // Delete the dead blocks and any of their dead successors. 581 MadeChange |= !WorkList.empty(); 582 while (!WorkList.empty()) { 583 BasicBlock *BB = WorkList.pop_back_val(); 584 SmallVector<BasicBlock*, 2> Successors(successors(BB)); 585 586 DeleteDeadBlock(BB); 587 588 for (BasicBlock *Succ : Successors) 589 if (pred_empty(Succ)) 590 WorkList.insert(Succ); 591 } 592 593 // Merge pairs of basic blocks with unconditional branches, connected by 594 // a single edge. 595 if (EverMadeChange || MadeChange) 596 MadeChange |= eliminateFallThrough(F); 597 598 EverMadeChange |= MadeChange; 599 } 600 601 if (!DisableGCOpts) { 602 SmallVector<GCStatepointInst *, 2> Statepoints; 603 for (BasicBlock &BB : F) 604 for (Instruction &I : BB) 605 if (auto *SP = dyn_cast<GCStatepointInst>(&I)) 606 Statepoints.push_back(SP); 607 for (auto &I : Statepoints) 608 EverMadeChange |= simplifyOffsetableRelocate(*I); 609 } 610 611 // Do this last to clean up use-before-def scenarios introduced by other 612 // preparatory transforms. 613 EverMadeChange |= placeDbgValues(F); 614 615 #ifndef NDEBUG 616 if (VerifyBFIUpdates) 617 verifyBFIUpdates(F); 618 #endif 619 620 return EverMadeChange; 621 } 622 623 bool CodeGenPrepare::eliminateAssumptions(Function &F) { 624 bool MadeChange = false; 625 for (BasicBlock &BB : F) { 626 CurInstIterator = BB.begin(); 627 while (CurInstIterator != BB.end()) { 628 Instruction *I = &*(CurInstIterator++); 629 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 630 if (II->getIntrinsicID() != Intrinsic::assume) 631 continue; 632 MadeChange = true; 633 Value *Operand = II->getOperand(0); 634 II->eraseFromParent(); 635 636 resetIteratorIfInvalidatedWhileCalling(&BB, [&]() { 637 RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr); 638 }); 639 } 640 } 641 } 642 return MadeChange; 643 } 644 645 /// An instruction is about to be deleted, so remove all references to it in our 646 /// GEP-tracking data strcutures. 647 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { 648 LargeOffsetGEPMap.erase(V); 649 NewGEPBases.erase(V); 650 651 auto GEP = dyn_cast<GetElementPtrInst>(V); 652 if (!GEP) 653 return; 654 655 LargeOffsetGEPID.erase(GEP); 656 657 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); 658 if (VecI == LargeOffsetGEPMap.end()) 659 return; 660 661 auto &GEPVector = VecI->second; 662 const auto &I = 663 llvm::find_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; }); 664 if (I == GEPVector.end()) 665 return; 666 667 GEPVector.erase(I); 668 if (GEPVector.empty()) 669 LargeOffsetGEPMap.erase(VecI); 670 } 671 672 // Verify BFI has been updated correctly by recomputing BFI and comparing them. 673 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) { 674 DominatorTree NewDT(F); 675 LoopInfo NewLI(NewDT); 676 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); 677 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); 678 NewBFI.verifyMatch(*BFI); 679 } 680 681 /// Merge basic blocks which are connected by a single edge, where one of the 682 /// basic blocks has a single successor pointing to the other basic block, 683 /// which has a single predecessor. 684 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 685 bool Changed = false; 686 // Scan all of the blocks in the function, except for the entry block. 687 // Use a temporary array to avoid iterator being invalidated when 688 // deleting blocks. 689 SmallVector<WeakTrackingVH, 16> Blocks; 690 for (auto &Block : llvm::drop_begin(F)) 691 Blocks.push_back(&Block); 692 693 SmallSet<WeakTrackingVH, 16> Preds; 694 for (auto &Block : Blocks) { 695 auto *BB = cast_or_null<BasicBlock>(Block); 696 if (!BB) 697 continue; 698 // If the destination block has a single pred, then this is a trivial 699 // edge, just collapse it. 700 BasicBlock *SinglePred = BB->getSinglePredecessor(); 701 702 // Don't merge if BB's address is taken. 703 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 704 705 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 706 if (Term && !Term->isConditional()) { 707 Changed = true; 708 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n"); 709 710 // Merge BB into SinglePred and delete it. 711 MergeBlockIntoPredecessor(BB); 712 Preds.insert(SinglePred); 713 } 714 } 715 716 // (Repeatedly) merging blocks into their predecessors can create redundant 717 // debug intrinsics. 718 for (auto &Pred : Preds) 719 if (auto *BB = cast_or_null<BasicBlock>(Pred)) 720 RemoveRedundantDbgInstrs(BB); 721 722 return Changed; 723 } 724 725 /// Find a destination block from BB if BB is mergeable empty block. 726 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { 727 // If this block doesn't end with an uncond branch, ignore it. 728 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 729 if (!BI || !BI->isUnconditional()) 730 return nullptr; 731 732 // If the instruction before the branch (skipping debug info) isn't a phi 733 // node, then other stuff is happening here. 734 BasicBlock::iterator BBI = BI->getIterator(); 735 if (BBI != BB->begin()) { 736 --BBI; 737 while (isa<DbgInfoIntrinsic>(BBI)) { 738 if (BBI == BB->begin()) 739 break; 740 --BBI; 741 } 742 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 743 return nullptr; 744 } 745 746 // Do not break infinite loops. 747 BasicBlock *DestBB = BI->getSuccessor(0); 748 if (DestBB == BB) 749 return nullptr; 750 751 if (!canMergeBlocks(BB, DestBB)) 752 DestBB = nullptr; 753 754 return DestBB; 755 } 756 757 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 758 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 759 /// edges in ways that are non-optimal for isel. Start by eliminating these 760 /// blocks so we can split them the way we want them. 761 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 762 SmallPtrSet<BasicBlock *, 16> Preheaders; 763 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); 764 while (!LoopList.empty()) { 765 Loop *L = LoopList.pop_back_val(); 766 llvm::append_range(LoopList, *L); 767 if (BasicBlock *Preheader = L->getLoopPreheader()) 768 Preheaders.insert(Preheader); 769 } 770 771 bool MadeChange = false; 772 // Copy blocks into a temporary array to avoid iterator invalidation issues 773 // as we remove them. 774 // Note that this intentionally skips the entry block. 775 SmallVector<WeakTrackingVH, 16> Blocks; 776 for (auto &Block : llvm::drop_begin(F)) 777 Blocks.push_back(&Block); 778 779 for (auto &Block : Blocks) { 780 BasicBlock *BB = cast_or_null<BasicBlock>(Block); 781 if (!BB) 782 continue; 783 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); 784 if (!DestBB || 785 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) 786 continue; 787 788 eliminateMostlyEmptyBlock(BB); 789 MadeChange = true; 790 } 791 return MadeChange; 792 } 793 794 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, 795 BasicBlock *DestBB, 796 bool isPreheader) { 797 // Do not delete loop preheaders if doing so would create a critical edge. 798 // Loop preheaders can be good locations to spill registers. If the 799 // preheader is deleted and we create a critical edge, registers may be 800 // spilled in the loop body instead. 801 if (!DisablePreheaderProtect && isPreheader && 802 !(BB->getSinglePredecessor() && 803 BB->getSinglePredecessor()->getSingleSuccessor())) 804 return false; 805 806 // Skip merging if the block's successor is also a successor to any callbr 807 // that leads to this block. 808 // FIXME: Is this really needed? Is this a correctness issue? 809 for (BasicBlock *Pred : predecessors(BB)) { 810 if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator())) 811 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) 812 if (DestBB == CBI->getSuccessor(i)) 813 return false; 814 } 815 816 // Try to skip merging if the unique predecessor of BB is terminated by a 817 // switch or indirect branch instruction, and BB is used as an incoming block 818 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to 819 // add COPY instructions in the predecessor of BB instead of BB (if it is not 820 // merged). Note that the critical edge created by merging such blocks wont be 821 // split in MachineSink because the jump table is not analyzable. By keeping 822 // such empty block (BB), ISel will place COPY instructions in BB, not in the 823 // predecessor of BB. 824 BasicBlock *Pred = BB->getUniquePredecessor(); 825 if (!Pred || 826 !(isa<SwitchInst>(Pred->getTerminator()) || 827 isa<IndirectBrInst>(Pred->getTerminator()))) 828 return true; 829 830 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) 831 return true; 832 833 // We use a simple cost heuristic which determine skipping merging is 834 // profitable if the cost of skipping merging is less than the cost of 835 // merging : Cost(skipping merging) < Cost(merging BB), where the 836 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and 837 // the Cost(merging BB) is Freq(Pred) * Cost(Copy). 838 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : 839 // Freq(Pred) / Freq(BB) > 2. 840 // Note that if there are multiple empty blocks sharing the same incoming 841 // value for the PHIs in the DestBB, we consider them together. In such 842 // case, Cost(merging BB) will be the sum of their frequencies. 843 844 if (!isa<PHINode>(DestBB->begin())) 845 return true; 846 847 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; 848 849 // Find all other incoming blocks from which incoming values of all PHIs in 850 // DestBB are the same as the ones from BB. 851 for (BasicBlock *DestBBPred : predecessors(DestBB)) { 852 if (DestBBPred == BB) 853 continue; 854 855 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { 856 return DestPN.getIncomingValueForBlock(BB) == 857 DestPN.getIncomingValueForBlock(DestBBPred); 858 })) 859 SameIncomingValueBBs.insert(DestBBPred); 860 } 861 862 // See if all BB's incoming values are same as the value from Pred. In this 863 // case, no reason to skip merging because COPYs are expected to be place in 864 // Pred already. 865 if (SameIncomingValueBBs.count(Pred)) 866 return true; 867 868 BlockFrequency PredFreq = BFI->getBlockFreq(Pred); 869 BlockFrequency BBFreq = BFI->getBlockFreq(BB); 870 871 for (auto *SameValueBB : SameIncomingValueBBs) 872 if (SameValueBB->getUniquePredecessor() == Pred && 873 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) 874 BBFreq += BFI->getBlockFreq(SameValueBB); 875 876 return PredFreq.getFrequency() <= 877 BBFreq.getFrequency() * FreqRatioToSkipMerge; 878 } 879 880 /// Return true if we can merge BB into DestBB if there is a single 881 /// unconditional branch between them, and BB contains no other non-phi 882 /// instructions. 883 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 884 const BasicBlock *DestBB) const { 885 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 886 // the successor. If there are more complex condition (e.g. preheaders), 887 // don't mess around with them. 888 for (const PHINode &PN : BB->phis()) { 889 for (const User *U : PN.users()) { 890 const Instruction *UI = cast<Instruction>(U); 891 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 892 return false; 893 // If User is inside DestBB block and it is a PHINode then check 894 // incoming value. If incoming value is not from BB then this is 895 // a complex condition (e.g. preheaders) we want to avoid here. 896 if (UI->getParent() == DestBB) { 897 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 898 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 899 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 900 if (Insn && Insn->getParent() == BB && 901 Insn->getParent() != UPN->getIncomingBlock(I)) 902 return false; 903 } 904 } 905 } 906 } 907 908 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 909 // and DestBB may have conflicting incoming values for the block. If so, we 910 // can't merge the block. 911 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 912 if (!DestBBPN) return true; // no conflict. 913 914 // Collect the preds of BB. 915 SmallPtrSet<const BasicBlock*, 16> BBPreds; 916 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 917 // It is faster to get preds from a PHI than with pred_iterator. 918 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 919 BBPreds.insert(BBPN->getIncomingBlock(i)); 920 } else { 921 BBPreds.insert(pred_begin(BB), pred_end(BB)); 922 } 923 924 // Walk the preds of DestBB. 925 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 926 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 927 if (BBPreds.count(Pred)) { // Common predecessor? 928 for (const PHINode &PN : DestBB->phis()) { 929 const Value *V1 = PN.getIncomingValueForBlock(Pred); 930 const Value *V2 = PN.getIncomingValueForBlock(BB); 931 932 // If V2 is a phi node in BB, look up what the mapped value will be. 933 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 934 if (V2PN->getParent() == BB) 935 V2 = V2PN->getIncomingValueForBlock(Pred); 936 937 // If there is a conflict, bail out. 938 if (V1 != V2) return false; 939 } 940 } 941 } 942 943 return true; 944 } 945 946 /// Eliminate a basic block that has only phi's and an unconditional branch in 947 /// it. 948 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 949 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 950 BasicBlock *DestBB = BI->getSuccessor(0); 951 952 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" 953 << *BB << *DestBB); 954 955 // If the destination block has a single pred, then this is a trivial edge, 956 // just collapse it. 957 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 958 if (SinglePred != DestBB) { 959 assert(SinglePred == BB && 960 "Single predecessor not the same as predecessor"); 961 // Merge DestBB into SinglePred/BB and delete it. 962 MergeBlockIntoPredecessor(DestBB); 963 // Note: BB(=SinglePred) will not be deleted on this path. 964 // DestBB(=its single successor) is the one that was deleted. 965 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"); 966 return; 967 } 968 } 969 970 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 971 // to handle the new incoming edges it is about to have. 972 for (PHINode &PN : DestBB->phis()) { 973 // Remove the incoming value for BB, and remember it. 974 Value *InVal = PN.removeIncomingValue(BB, false); 975 976 // Two options: either the InVal is a phi node defined in BB or it is some 977 // value that dominates BB. 978 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 979 if (InValPhi && InValPhi->getParent() == BB) { 980 // Add all of the input values of the input PHI as inputs of this phi. 981 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 982 PN.addIncoming(InValPhi->getIncomingValue(i), 983 InValPhi->getIncomingBlock(i)); 984 } else { 985 // Otherwise, add one instance of the dominating value for each edge that 986 // we will be adding. 987 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 988 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 989 PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); 990 } else { 991 for (BasicBlock *Pred : predecessors(BB)) 992 PN.addIncoming(InVal, Pred); 993 } 994 } 995 } 996 997 // The PHIs are now updated, change everything that refers to BB to use 998 // DestBB and remove BB. 999 BB->replaceAllUsesWith(DestBB); 1000 BB->eraseFromParent(); 1001 ++NumBlocksElim; 1002 1003 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 1004 } 1005 1006 // Computes a map of base pointer relocation instructions to corresponding 1007 // derived pointer relocation instructions given a vector of all relocate calls 1008 static void computeBaseDerivedRelocateMap( 1009 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 1010 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 1011 &RelocateInstMap) { 1012 // Collect information in two maps: one primarily for locating the base object 1013 // while filling the second map; the second map is the final structure holding 1014 // a mapping between Base and corresponding Derived relocate calls 1015 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 1016 for (auto *ThisRelocate : AllRelocateCalls) { 1017 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 1018 ThisRelocate->getDerivedPtrIndex()); 1019 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 1020 } 1021 for (auto &Item : RelocateIdxMap) { 1022 std::pair<unsigned, unsigned> Key = Item.first; 1023 if (Key.first == Key.second) 1024 // Base relocation: nothing to insert 1025 continue; 1026 1027 GCRelocateInst *I = Item.second; 1028 auto BaseKey = std::make_pair(Key.first, Key.first); 1029 1030 // We're iterating over RelocateIdxMap so we cannot modify it. 1031 auto MaybeBase = RelocateIdxMap.find(BaseKey); 1032 if (MaybeBase == RelocateIdxMap.end()) 1033 // TODO: We might want to insert a new base object relocate and gep off 1034 // that, if there are enough derived object relocates. 1035 continue; 1036 1037 RelocateInstMap[MaybeBase->second].push_back(I); 1038 } 1039 } 1040 1041 // Accepts a GEP and extracts the operands into a vector provided they're all 1042 // small integer constants 1043 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 1044 SmallVectorImpl<Value *> &OffsetV) { 1045 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 1046 // Only accept small constant integer operands 1047 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 1048 if (!Op || Op->getZExtValue() > 20) 1049 return false; 1050 } 1051 1052 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 1053 OffsetV.push_back(GEP->getOperand(i)); 1054 return true; 1055 } 1056 1057 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 1058 // replace, computes a replacement, and affects it. 1059 static bool 1060 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 1061 const SmallVectorImpl<GCRelocateInst *> &Targets) { 1062 bool MadeChange = false; 1063 // We must ensure the relocation of derived pointer is defined after 1064 // relocation of base pointer. If we find a relocation corresponding to base 1065 // defined earlier than relocation of base then we move relocation of base 1066 // right before found relocation. We consider only relocation in the same 1067 // basic block as relocation of base. Relocations from other basic block will 1068 // be skipped by optimization and we do not care about them. 1069 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); 1070 &*R != RelocatedBase; ++R) 1071 if (auto *RI = dyn_cast<GCRelocateInst>(R)) 1072 if (RI->getStatepoint() == RelocatedBase->getStatepoint()) 1073 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { 1074 RelocatedBase->moveBefore(RI); 1075 break; 1076 } 1077 1078 for (GCRelocateInst *ToReplace : Targets) { 1079 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 1080 "Not relocating a derived object of the original base object"); 1081 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 1082 // A duplicate relocate call. TODO: coalesce duplicates. 1083 continue; 1084 } 1085 1086 if (RelocatedBase->getParent() != ToReplace->getParent()) { 1087 // Base and derived relocates are in different basic blocks. 1088 // In this case transform is only valid when base dominates derived 1089 // relocate. However it would be too expensive to check dominance 1090 // for each such relocate, so we skip the whole transformation. 1091 continue; 1092 } 1093 1094 Value *Base = ToReplace->getBasePtr(); 1095 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 1096 if (!Derived || Derived->getPointerOperand() != Base) 1097 continue; 1098 1099 SmallVector<Value *, 2> OffsetV; 1100 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 1101 continue; 1102 1103 // Create a Builder and replace the target callsite with a gep 1104 assert(RelocatedBase->getNextNode() && 1105 "Should always have one since it's not a terminator"); 1106 1107 // Insert after RelocatedBase 1108 IRBuilder<> Builder(RelocatedBase->getNextNode()); 1109 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 1110 1111 // If gc_relocate does not match the actual type, cast it to the right type. 1112 // In theory, there must be a bitcast after gc_relocate if the type does not 1113 // match, and we should reuse it to get the derived pointer. But it could be 1114 // cases like this: 1115 // bb1: 1116 // ... 1117 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1118 // br label %merge 1119 // 1120 // bb2: 1121 // ... 1122 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 1123 // br label %merge 1124 // 1125 // merge: 1126 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 1127 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 1128 // 1129 // In this case, we can not find the bitcast any more. So we insert a new bitcast 1130 // no matter there is already one or not. In this way, we can handle all cases, and 1131 // the extra bitcast should be optimized away in later passes. 1132 Value *ActualRelocatedBase = RelocatedBase; 1133 if (RelocatedBase->getType() != Base->getType()) { 1134 ActualRelocatedBase = 1135 Builder.CreateBitCast(RelocatedBase, Base->getType()); 1136 } 1137 Value *Replacement = Builder.CreateGEP( 1138 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 1139 Replacement->takeName(ToReplace); 1140 // If the newly generated derived pointer's type does not match the original derived 1141 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 1142 Value *ActualReplacement = Replacement; 1143 if (Replacement->getType() != ToReplace->getType()) { 1144 ActualReplacement = 1145 Builder.CreateBitCast(Replacement, ToReplace->getType()); 1146 } 1147 ToReplace->replaceAllUsesWith(ActualReplacement); 1148 ToReplace->eraseFromParent(); 1149 1150 MadeChange = true; 1151 } 1152 return MadeChange; 1153 } 1154 1155 // Turns this: 1156 // 1157 // %base = ... 1158 // %ptr = gep %base + 15 1159 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1160 // %base' = relocate(%tok, i32 4, i32 4) 1161 // %ptr' = relocate(%tok, i32 4, i32 5) 1162 // %val = load %ptr' 1163 // 1164 // into this: 1165 // 1166 // %base = ... 1167 // %ptr = gep %base + 15 1168 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 1169 // %base' = gc.relocate(%tok, i32 4, i32 4) 1170 // %ptr' = gep %base' + 15 1171 // %val = load %ptr' 1172 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { 1173 bool MadeChange = false; 1174 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 1175 for (auto *U : I.users()) 1176 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 1177 // Collect all the relocate calls associated with a statepoint 1178 AllRelocateCalls.push_back(Relocate); 1179 1180 // We need at least one base pointer relocation + one derived pointer 1181 // relocation to mangle 1182 if (AllRelocateCalls.size() < 2) 1183 return false; 1184 1185 // RelocateInstMap is a mapping from the base relocate instruction to the 1186 // corresponding derived relocate instructions 1187 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 1188 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 1189 if (RelocateInstMap.empty()) 1190 return false; 1191 1192 for (auto &Item : RelocateInstMap) 1193 // Item.first is the RelocatedBase to offset against 1194 // Item.second is the vector of Targets to replace 1195 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 1196 return MadeChange; 1197 } 1198 1199 /// Sink the specified cast instruction into its user blocks. 1200 static bool SinkCast(CastInst *CI) { 1201 BasicBlock *DefBB = CI->getParent(); 1202 1203 /// InsertedCasts - Only insert a cast in each block once. 1204 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 1205 1206 bool MadeChange = false; 1207 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 1208 UI != E; ) { 1209 Use &TheUse = UI.getUse(); 1210 Instruction *User = cast<Instruction>(*UI); 1211 1212 // Figure out which BB this cast is used in. For PHI's this is the 1213 // appropriate predecessor block. 1214 BasicBlock *UserBB = User->getParent(); 1215 if (PHINode *PN = dyn_cast<PHINode>(User)) { 1216 UserBB = PN->getIncomingBlock(TheUse); 1217 } 1218 1219 // Preincrement use iterator so we don't invalidate it. 1220 ++UI; 1221 1222 // The first insertion point of a block containing an EH pad is after the 1223 // pad. If the pad is the user, we cannot sink the cast past the pad. 1224 if (User->isEHPad()) 1225 continue; 1226 1227 // If the block selected to receive the cast is an EH pad that does not 1228 // allow non-PHI instructions before the terminator, we can't sink the 1229 // cast. 1230 if (UserBB->getTerminator()->isEHPad()) 1231 continue; 1232 1233 // If this user is in the same block as the cast, don't change the cast. 1234 if (UserBB == DefBB) continue; 1235 1236 // If we have already inserted a cast into this block, use it. 1237 CastInst *&InsertedCast = InsertedCasts[UserBB]; 1238 1239 if (!InsertedCast) { 1240 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1241 assert(InsertPt != UserBB->end()); 1242 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 1243 CI->getType(), "", &*InsertPt); 1244 InsertedCast->setDebugLoc(CI->getDebugLoc()); 1245 } 1246 1247 // Replace a use of the cast with a use of the new cast. 1248 TheUse = InsertedCast; 1249 MadeChange = true; 1250 ++NumCastUses; 1251 } 1252 1253 // If we removed all uses, nuke the cast. 1254 if (CI->use_empty()) { 1255 salvageDebugInfo(*CI); 1256 CI->eraseFromParent(); 1257 MadeChange = true; 1258 } 1259 1260 return MadeChange; 1261 } 1262 1263 /// If the specified cast instruction is a noop copy (e.g. it's casting from 1264 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 1265 /// reduce the number of virtual registers that must be created and coalesced. 1266 /// 1267 /// Return true if any changes are made. 1268 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 1269 const DataLayout &DL) { 1270 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition 1271 // than sinking only nop casts, but is helpful on some platforms. 1272 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { 1273 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), 1274 ASC->getDestAddressSpace())) 1275 return false; 1276 } 1277 1278 // If this is a noop copy, 1279 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 1280 EVT DstVT = TLI.getValueType(DL, CI->getType()); 1281 1282 // This is an fp<->int conversion? 1283 if (SrcVT.isInteger() != DstVT.isInteger()) 1284 return false; 1285 1286 // If this is an extension, it will be a zero or sign extension, which 1287 // isn't a noop. 1288 if (SrcVT.bitsLT(DstVT)) return false; 1289 1290 // If these values will be promoted, find out what they will be promoted 1291 // to. This helps us consider truncates on PPC as noop copies when they 1292 // are. 1293 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 1294 TargetLowering::TypePromoteInteger) 1295 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 1296 if (TLI.getTypeAction(CI->getContext(), DstVT) == 1297 TargetLowering::TypePromoteInteger) 1298 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 1299 1300 // If, after promotion, these are the same types, this is a noop copy. 1301 if (SrcVT != DstVT) 1302 return false; 1303 1304 return SinkCast(CI); 1305 } 1306 1307 /// If given \p PN is an inductive variable with value IVInc coming from the 1308 /// backedge, and on each iteration it gets increased by Step, return pair 1309 /// <IVInc, Step>. Otherwise, return None. 1310 static Optional<std::pair<Instruction *, Constant *> > 1311 getIVIncrement(const PHINode *PN, const LoopInfo *LI) { 1312 const Loop *L = LI->getLoopFor(PN->getParent()); 1313 if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) 1314 return None; 1315 auto *IVInc = 1316 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); 1317 if (!IVInc) 1318 return None; 1319 Constant *Step = nullptr; 1320 if (match(IVInc, m_Sub(m_Specific(PN), m_Constant(Step)))) 1321 return std::make_pair(IVInc, ConstantExpr::getNeg(Step)); 1322 if (match(IVInc, m_Add(m_Specific(PN), m_Constant(Step)))) 1323 return std::make_pair(IVInc, Step); 1324 if (match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>( 1325 m_Specific(PN), m_Constant(Step))))) 1326 return std::make_pair(IVInc, ConstantExpr::getNeg(Step)); 1327 if (match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>( 1328 m_Specific(PN), m_Constant(Step))))) 1329 return std::make_pair(IVInc, Step); 1330 return None; 1331 } 1332 1333 static bool isIVIncrement(const BinaryOperator *BO, const LoopInfo *LI) { 1334 auto *PN = dyn_cast<PHINode>(BO->getOperand(0)); 1335 if (!PN || LI->getLoopFor(BO->getParent()) != LI->getLoopFor(PN->getParent())) 1336 return false; 1337 if (auto IVInc = getIVIncrement(PN, LI)) 1338 return IVInc->first == BO; 1339 return false; 1340 } 1341 1342 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, 1343 Value *Arg0, Value *Arg1, 1344 CmpInst *Cmp, 1345 Intrinsic::ID IID) { 1346 auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { 1347 if (!isIVIncrement(BO, LI)) 1348 return false; 1349 const Loop *L = LI->getLoopFor(BO->getParent()); 1350 assert(L && "L should not be null after isIVIncrement()"); 1351 // IV increment may have other users than the IV. We do not want to make 1352 // dominance queries to analyze the legality of moving it towards the cmp, 1353 // so just check that there is no other users. 1354 if (!BO->hasOneUse()) 1355 return false; 1356 // Do not risk on moving increment into a child loop. 1357 if (LI->getLoopFor(Cmp->getParent()) != L) 1358 return false; 1359 // Ultimately, the insertion point must dominate latch. This should be a 1360 // cheap check because no CFG changes & dom tree recomputation happens 1361 // during the transform. 1362 Function *F = BO->getParent()->getParent(); 1363 return getDT(*F).dominates(Cmp->getParent(), L->getLoopLatch()); 1364 }; 1365 if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { 1366 // We used to use a dominator tree here to allow multi-block optimization. 1367 // But that was problematic because: 1368 // 1. It could cause a perf regression by hoisting the math op into the 1369 // critical path. 1370 // 2. It could cause a perf regression by creating a value that was live 1371 // across multiple blocks and increasing register pressure. 1372 // 3. Use of a dominator tree could cause large compile-time regression. 1373 // This is because we recompute the DT on every change in the main CGP 1374 // run-loop. The recomputing is probably unnecessary in many cases, so if 1375 // that was fixed, using a DT here would be ok. 1376 // 1377 // There is one important particular case we still want to handle: if BO is 1378 // the IV increment. Important properties that make it profitable: 1379 // - We can speculate IV increment anywhere in the loop (as long as the 1380 // indvar Phi is its only user); 1381 // - Upon computing Cmp, we effectively compute something equivalent to the 1382 // IV increment (despite it loops differently in the IR). So moving it up 1383 // to the cmp point does not really increase register pressure. 1384 return false; 1385 } 1386 1387 // We allow matching the canonical IR (add X, C) back to (usubo X, -C). 1388 if (BO->getOpcode() == Instruction::Add && 1389 IID == Intrinsic::usub_with_overflow) { 1390 assert(isa<Constant>(Arg1) && "Unexpected input for usubo"); 1391 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); 1392 } 1393 1394 // Insert at the first instruction of the pair. 1395 Instruction *InsertPt = nullptr; 1396 for (Instruction &Iter : *Cmp->getParent()) { 1397 // If BO is an XOR, it is not guaranteed that it comes after both inputs to 1398 // the overflow intrinsic are defined. 1399 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { 1400 InsertPt = &Iter; 1401 break; 1402 } 1403 } 1404 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop"); 1405 1406 IRBuilder<> Builder(InsertPt); 1407 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); 1408 if (BO->getOpcode() != Instruction::Xor) { 1409 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); 1410 BO->replaceAllUsesWith(Math); 1411 } else 1412 assert(BO->hasOneUse() && 1413 "Patterns with XOr should use the BO only in the compare"); 1414 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); 1415 Cmp->replaceAllUsesWith(OV); 1416 Cmp->eraseFromParent(); 1417 BO->eraseFromParent(); 1418 return true; 1419 } 1420 1421 /// Match special-case patterns that check for unsigned add overflow. 1422 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, 1423 BinaryOperator *&Add) { 1424 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) 1425 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) 1426 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1427 1428 // We are not expecting non-canonical/degenerate code. Just bail out. 1429 if (isa<Constant>(A)) 1430 return false; 1431 1432 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1433 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) 1434 B = ConstantInt::get(B->getType(), 1); 1435 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) 1436 B = ConstantInt::get(B->getType(), -1); 1437 else 1438 return false; 1439 1440 // Check the users of the variable operand of the compare looking for an add 1441 // with the adjusted constant. 1442 for (User *U : A->users()) { 1443 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { 1444 Add = cast<BinaryOperator>(U); 1445 return true; 1446 } 1447 } 1448 return false; 1449 } 1450 1451 /// Try to combine the compare into a call to the llvm.uadd.with.overflow 1452 /// intrinsic. Return true if any changes were made. 1453 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, 1454 bool &ModifiedDT) { 1455 Value *A, *B; 1456 BinaryOperator *Add; 1457 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { 1458 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) 1459 return false; 1460 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. 1461 A = Add->getOperand(0); 1462 B = Add->getOperand(1); 1463 } 1464 1465 if (!TLI->shouldFormOverflowOp(ISD::UADDO, 1466 TLI->getValueType(*DL, Add->getType()), 1467 Add->hasNUsesOrMore(2))) 1468 return false; 1469 1470 // We don't want to move around uses of condition values this late, so we 1471 // check if it is legal to create the call to the intrinsic in the basic 1472 // block containing the icmp. 1473 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) 1474 return false; 1475 1476 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, 1477 Intrinsic::uadd_with_overflow)) 1478 return false; 1479 1480 // Reset callers - do not crash by iterating over a dead instruction. 1481 ModifiedDT = true; 1482 return true; 1483 } 1484 1485 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, 1486 bool &ModifiedDT) { 1487 // We are not expecting non-canonical/degenerate code. Just bail out. 1488 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); 1489 if (isa<Constant>(A) && isa<Constant>(B)) 1490 return false; 1491 1492 // Convert (A u> B) to (A u< B) to simplify pattern matching. 1493 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1494 if (Pred == ICmpInst::ICMP_UGT) { 1495 std::swap(A, B); 1496 Pred = ICmpInst::ICMP_ULT; 1497 } 1498 // Convert special-case: (A == 0) is the same as (A u< 1). 1499 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { 1500 B = ConstantInt::get(B->getType(), 1); 1501 Pred = ICmpInst::ICMP_ULT; 1502 } 1503 // Convert special-case: (A != 0) is the same as (0 u< A). 1504 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { 1505 std::swap(A, B); 1506 Pred = ICmpInst::ICMP_ULT; 1507 } 1508 if (Pred != ICmpInst::ICMP_ULT) 1509 return false; 1510 1511 // Walk the users of a variable operand of a compare looking for a subtract or 1512 // add with that same operand. Also match the 2nd operand of the compare to 1513 // the add/sub, but that may be a negated constant operand of an add. 1514 Value *CmpVariableOperand = isa<Constant>(A) ? B : A; 1515 BinaryOperator *Sub = nullptr; 1516 for (User *U : CmpVariableOperand->users()) { 1517 // A - B, A u< B --> usubo(A, B) 1518 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { 1519 Sub = cast<BinaryOperator>(U); 1520 break; 1521 } 1522 1523 // A + (-C), A u< C (canonicalized form of (sub A, C)) 1524 const APInt *CmpC, *AddC; 1525 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && 1526 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { 1527 Sub = cast<BinaryOperator>(U); 1528 break; 1529 } 1530 } 1531 if (!Sub) 1532 return false; 1533 1534 if (!TLI->shouldFormOverflowOp(ISD::USUBO, 1535 TLI->getValueType(*DL, Sub->getType()), 1536 Sub->hasNUsesOrMore(2))) 1537 return false; 1538 1539 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), 1540 Cmp, Intrinsic::usub_with_overflow)) 1541 return false; 1542 1543 // Reset callers - do not crash by iterating over a dead instruction. 1544 ModifiedDT = true; 1545 return true; 1546 } 1547 1548 /// Sink the given CmpInst into user blocks to reduce the number of virtual 1549 /// registers that must be created and coalesced. This is a clear win except on 1550 /// targets with multiple condition code registers (PowerPC), where it might 1551 /// lose; some adjustment may be wanted there. 1552 /// 1553 /// Return true if any changes are made. 1554 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { 1555 if (TLI.hasMultipleConditionRegisters()) 1556 return false; 1557 1558 // Avoid sinking soft-FP comparisons, since this can move them into a loop. 1559 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) 1560 return false; 1561 1562 // Only insert a cmp in each block once. 1563 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 1564 1565 bool MadeChange = false; 1566 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); 1567 UI != E; ) { 1568 Use &TheUse = UI.getUse(); 1569 Instruction *User = cast<Instruction>(*UI); 1570 1571 // Preincrement use iterator so we don't invalidate it. 1572 ++UI; 1573 1574 // Don't bother for PHI nodes. 1575 if (isa<PHINode>(User)) 1576 continue; 1577 1578 // Figure out which BB this cmp is used in. 1579 BasicBlock *UserBB = User->getParent(); 1580 BasicBlock *DefBB = Cmp->getParent(); 1581 1582 // If this user is in the same block as the cmp, don't change the cmp. 1583 if (UserBB == DefBB) continue; 1584 1585 // If we have already inserted a cmp into this block, use it. 1586 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 1587 1588 if (!InsertedCmp) { 1589 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1590 assert(InsertPt != UserBB->end()); 1591 InsertedCmp = 1592 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), 1593 Cmp->getOperand(0), Cmp->getOperand(1), "", 1594 &*InsertPt); 1595 // Propagate the debug info. 1596 InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); 1597 } 1598 1599 // Replace a use of the cmp with a use of the new cmp. 1600 TheUse = InsertedCmp; 1601 MadeChange = true; 1602 ++NumCmpUses; 1603 } 1604 1605 // If we removed all uses, nuke the cmp. 1606 if (Cmp->use_empty()) { 1607 Cmp->eraseFromParent(); 1608 MadeChange = true; 1609 } 1610 1611 return MadeChange; 1612 } 1613 1614 /// For pattern like: 1615 /// 1616 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) 1617 /// ... 1618 /// DomBB: 1619 /// ... 1620 /// br DomCond, TrueBB, CmpBB 1621 /// CmpBB: (with DomBB being the single predecessor) 1622 /// ... 1623 /// Cmp = icmp eq CmpOp0, CmpOp1 1624 /// ... 1625 /// 1626 /// It would use two comparison on targets that lowering of icmp sgt/slt is 1627 /// different from lowering of icmp eq (PowerPC). This function try to convert 1628 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. 1629 /// After that, DomCond and Cmp can use the same comparison so reduce one 1630 /// comparison. 1631 /// 1632 /// Return true if any changes are made. 1633 static bool foldICmpWithDominatingICmp(CmpInst *Cmp, 1634 const TargetLowering &TLI) { 1635 if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) 1636 return false; 1637 1638 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1639 if (Pred != ICmpInst::ICMP_EQ) 1640 return false; 1641 1642 // If icmp eq has users other than BranchInst and SelectInst, converting it to 1643 // icmp slt/sgt would introduce more redundant LLVM IR. 1644 for (User *U : Cmp->users()) { 1645 if (isa<BranchInst>(U)) 1646 continue; 1647 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) 1648 continue; 1649 return false; 1650 } 1651 1652 // This is a cheap/incomplete check for dominance - just match a single 1653 // predecessor with a conditional branch. 1654 BasicBlock *CmpBB = Cmp->getParent(); 1655 BasicBlock *DomBB = CmpBB->getSinglePredecessor(); 1656 if (!DomBB) 1657 return false; 1658 1659 // We want to ensure that the only way control gets to the comparison of 1660 // interest is that a less/greater than comparison on the same operands is 1661 // false. 1662 Value *DomCond; 1663 BasicBlock *TrueBB, *FalseBB; 1664 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) 1665 return false; 1666 if (CmpBB != FalseBB) 1667 return false; 1668 1669 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); 1670 ICmpInst::Predicate DomPred; 1671 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) 1672 return false; 1673 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) 1674 return false; 1675 1676 // Convert the equality comparison to the opposite of the dominating 1677 // comparison and swap the direction for all branch/select users. 1678 // We have conceptually converted: 1679 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; 1680 // to 1681 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; 1682 // And similarly for branches. 1683 for (User *U : Cmp->users()) { 1684 if (auto *BI = dyn_cast<BranchInst>(U)) { 1685 assert(BI->isConditional() && "Must be conditional"); 1686 BI->swapSuccessors(); 1687 continue; 1688 } 1689 if (auto *SI = dyn_cast<SelectInst>(U)) { 1690 // Swap operands 1691 SI->swapValues(); 1692 SI->swapProfMetadata(); 1693 continue; 1694 } 1695 llvm_unreachable("Must be a branch or a select"); 1696 } 1697 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); 1698 return true; 1699 } 1700 1701 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) { 1702 if (sinkCmpExpression(Cmp, *TLI)) 1703 return true; 1704 1705 if (combineToUAddWithOverflow(Cmp, ModifiedDT)) 1706 return true; 1707 1708 if (combineToUSubWithOverflow(Cmp, ModifiedDT)) 1709 return true; 1710 1711 if (foldICmpWithDominatingICmp(Cmp, *TLI)) 1712 return true; 1713 1714 return false; 1715 } 1716 1717 /// Duplicate and sink the given 'and' instruction into user blocks where it is 1718 /// used in a compare to allow isel to generate better code for targets where 1719 /// this operation can be combined. 1720 /// 1721 /// Return true if any changes are made. 1722 static bool sinkAndCmp0Expression(Instruction *AndI, 1723 const TargetLowering &TLI, 1724 SetOfInstrs &InsertedInsts) { 1725 // Double-check that we're not trying to optimize an instruction that was 1726 // already optimized by some other part of this pass. 1727 assert(!InsertedInsts.count(AndI) && 1728 "Attempting to optimize already optimized and instruction"); 1729 (void) InsertedInsts; 1730 1731 // Nothing to do for single use in same basic block. 1732 if (AndI->hasOneUse() && 1733 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) 1734 return false; 1735 1736 // Try to avoid cases where sinking/duplicating is likely to increase register 1737 // pressure. 1738 if (!isa<ConstantInt>(AndI->getOperand(0)) && 1739 !isa<ConstantInt>(AndI->getOperand(1)) && 1740 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) 1741 return false; 1742 1743 for (auto *U : AndI->users()) { 1744 Instruction *User = cast<Instruction>(U); 1745 1746 // Only sink 'and' feeding icmp with 0. 1747 if (!isa<ICmpInst>(User)) 1748 return false; 1749 1750 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); 1751 if (!CmpC || !CmpC->isZero()) 1752 return false; 1753 } 1754 1755 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) 1756 return false; 1757 1758 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n"); 1759 LLVM_DEBUG(AndI->getParent()->dump()); 1760 1761 // Push the 'and' into the same block as the icmp 0. There should only be 1762 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any 1763 // others, so we don't need to keep track of which BBs we insert into. 1764 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); 1765 UI != E; ) { 1766 Use &TheUse = UI.getUse(); 1767 Instruction *User = cast<Instruction>(*UI); 1768 1769 // Preincrement use iterator so we don't invalidate it. 1770 ++UI; 1771 1772 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n"); 1773 1774 // Keep the 'and' in the same place if the use is already in the same block. 1775 Instruction *InsertPt = 1776 User->getParent() == AndI->getParent() ? AndI : User; 1777 Instruction *InsertedAnd = 1778 BinaryOperator::Create(Instruction::And, AndI->getOperand(0), 1779 AndI->getOperand(1), "", InsertPt); 1780 // Propagate the debug info. 1781 InsertedAnd->setDebugLoc(AndI->getDebugLoc()); 1782 1783 // Replace a use of the 'and' with a use of the new 'and'. 1784 TheUse = InsertedAnd; 1785 ++NumAndUses; 1786 LLVM_DEBUG(User->getParent()->dump()); 1787 } 1788 1789 // We removed all uses, nuke the and. 1790 AndI->eraseFromParent(); 1791 return true; 1792 } 1793 1794 /// Check if the candidates could be combined with a shift instruction, which 1795 /// includes: 1796 /// 1. Truncate instruction 1797 /// 2. And instruction and the imm is a mask of the low bits: 1798 /// imm & (imm+1) == 0 1799 static bool isExtractBitsCandidateUse(Instruction *User) { 1800 if (!isa<TruncInst>(User)) { 1801 if (User->getOpcode() != Instruction::And || 1802 !isa<ConstantInt>(User->getOperand(1))) 1803 return false; 1804 1805 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 1806 1807 if ((Cimm & (Cimm + 1)).getBoolValue()) 1808 return false; 1809 } 1810 return true; 1811 } 1812 1813 /// Sink both shift and truncate instruction to the use of truncate's BB. 1814 static bool 1815 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 1816 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 1817 const TargetLowering &TLI, const DataLayout &DL) { 1818 BasicBlock *UserBB = User->getParent(); 1819 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 1820 auto *TruncI = cast<TruncInst>(User); 1821 bool MadeChange = false; 1822 1823 for (Value::user_iterator TruncUI = TruncI->user_begin(), 1824 TruncE = TruncI->user_end(); 1825 TruncUI != TruncE;) { 1826 1827 Use &TruncTheUse = TruncUI.getUse(); 1828 Instruction *TruncUser = cast<Instruction>(*TruncUI); 1829 // Preincrement use iterator so we don't invalidate it. 1830 1831 ++TruncUI; 1832 1833 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 1834 if (!ISDOpcode) 1835 continue; 1836 1837 // If the use is actually a legal node, there will not be an 1838 // implicit truncate. 1839 // FIXME: always querying the result type is just an 1840 // approximation; some nodes' legality is determined by the 1841 // operand or other means. There's no good way to find out though. 1842 if (TLI.isOperationLegalOrCustom( 1843 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 1844 continue; 1845 1846 // Don't bother for PHI nodes. 1847 if (isa<PHINode>(TruncUser)) 1848 continue; 1849 1850 BasicBlock *TruncUserBB = TruncUser->getParent(); 1851 1852 if (UserBB == TruncUserBB) 1853 continue; 1854 1855 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 1856 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 1857 1858 if (!InsertedShift && !InsertedTrunc) { 1859 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 1860 assert(InsertPt != TruncUserBB->end()); 1861 // Sink the shift 1862 if (ShiftI->getOpcode() == Instruction::AShr) 1863 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1864 "", &*InsertPt); 1865 else 1866 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1867 "", &*InsertPt); 1868 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1869 1870 // Sink the trunc 1871 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 1872 TruncInsertPt++; 1873 assert(TruncInsertPt != TruncUserBB->end()); 1874 1875 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1876 TruncI->getType(), "", &*TruncInsertPt); 1877 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); 1878 1879 MadeChange = true; 1880 1881 TruncTheUse = InsertedTrunc; 1882 } 1883 } 1884 return MadeChange; 1885 } 1886 1887 /// Sink the shift *right* instruction into user blocks if the uses could 1888 /// potentially be combined with this shift instruction and generate BitExtract 1889 /// instruction. It will only be applied if the architecture supports BitExtract 1890 /// instruction. Here is an example: 1891 /// BB1: 1892 /// %x.extract.shift = lshr i64 %arg1, 32 1893 /// BB2: 1894 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1895 /// ==> 1896 /// 1897 /// BB2: 1898 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1899 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1900 /// 1901 /// CodeGen will recognize the pattern in BB2 and generate BitExtract 1902 /// instruction. 1903 /// Return true if any changes are made. 1904 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1905 const TargetLowering &TLI, 1906 const DataLayout &DL) { 1907 BasicBlock *DefBB = ShiftI->getParent(); 1908 1909 /// Only insert instructions in each block once. 1910 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1911 1912 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1913 1914 bool MadeChange = false; 1915 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1916 UI != E;) { 1917 Use &TheUse = UI.getUse(); 1918 Instruction *User = cast<Instruction>(*UI); 1919 // Preincrement use iterator so we don't invalidate it. 1920 ++UI; 1921 1922 // Don't bother for PHI nodes. 1923 if (isa<PHINode>(User)) 1924 continue; 1925 1926 if (!isExtractBitsCandidateUse(User)) 1927 continue; 1928 1929 BasicBlock *UserBB = User->getParent(); 1930 1931 if (UserBB == DefBB) { 1932 // If the shift and truncate instruction are in the same BB. The use of 1933 // the truncate(TruncUse) may still introduce another truncate if not 1934 // legal. In this case, we would like to sink both shift and truncate 1935 // instruction to the BB of TruncUse. 1936 // for example: 1937 // BB1: 1938 // i64 shift.result = lshr i64 opnd, imm 1939 // trunc.result = trunc shift.result to i16 1940 // 1941 // BB2: 1942 // ----> We will have an implicit truncate here if the architecture does 1943 // not have i16 compare. 1944 // cmp i16 trunc.result, opnd2 1945 // 1946 if (isa<TruncInst>(User) && shiftIsLegal 1947 // If the type of the truncate is legal, no truncate will be 1948 // introduced in other basic blocks. 1949 && 1950 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1951 MadeChange = 1952 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1953 1954 continue; 1955 } 1956 // If we have already inserted a shift into this block, use it. 1957 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1958 1959 if (!InsertedShift) { 1960 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1961 assert(InsertPt != UserBB->end()); 1962 1963 if (ShiftI->getOpcode() == Instruction::AShr) 1964 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1965 "", &*InsertPt); 1966 else 1967 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1968 "", &*InsertPt); 1969 InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); 1970 1971 MadeChange = true; 1972 } 1973 1974 // Replace a use of the shift with a use of the new shift. 1975 TheUse = InsertedShift; 1976 } 1977 1978 // If we removed all uses, or there are none, nuke the shift. 1979 if (ShiftI->use_empty()) { 1980 salvageDebugInfo(*ShiftI); 1981 ShiftI->eraseFromParent(); 1982 MadeChange = true; 1983 } 1984 1985 return MadeChange; 1986 } 1987 1988 /// If counting leading or trailing zeros is an expensive operation and a zero 1989 /// input is defined, add a check for zero to avoid calling the intrinsic. 1990 /// 1991 /// We want to transform: 1992 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1993 /// 1994 /// into: 1995 /// entry: 1996 /// %cmpz = icmp eq i64 %A, 0 1997 /// br i1 %cmpz, label %cond.end, label %cond.false 1998 /// cond.false: 1999 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 2000 /// br label %cond.end 2001 /// cond.end: 2002 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 2003 /// 2004 /// If the transform is performed, return true and set ModifiedDT to true. 2005 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 2006 const TargetLowering *TLI, 2007 const DataLayout *DL, 2008 bool &ModifiedDT) { 2009 // If a zero input is undefined, it doesn't make sense to despeculate that. 2010 if (match(CountZeros->getOperand(1), m_One())) 2011 return false; 2012 2013 // If it's cheap to speculate, there's nothing to do. 2014 auto IntrinsicID = CountZeros->getIntrinsicID(); 2015 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 2016 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 2017 return false; 2018 2019 // Only handle legal scalar cases. Anything else requires too much work. 2020 Type *Ty = CountZeros->getType(); 2021 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 2022 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) 2023 return false; 2024 2025 // The intrinsic will be sunk behind a compare against zero and branch. 2026 BasicBlock *StartBlock = CountZeros->getParent(); 2027 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 2028 2029 // Create another block after the count zero intrinsic. A PHI will be added 2030 // in this block to select the result of the intrinsic or the bit-width 2031 // constant if the input to the intrinsic is zero. 2032 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 2033 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 2034 2035 // Set up a builder to create a compare, conditional branch, and PHI. 2036 IRBuilder<> Builder(CountZeros->getContext()); 2037 Builder.SetInsertPoint(StartBlock->getTerminator()); 2038 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 2039 2040 // Replace the unconditional branch that was created by the first split with 2041 // a compare against zero and a conditional branch. 2042 Value *Zero = Constant::getNullValue(Ty); 2043 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 2044 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 2045 StartBlock->getTerminator()->eraseFromParent(); 2046 2047 // Create a PHI in the end block to select either the output of the intrinsic 2048 // or the bit width of the operand. 2049 Builder.SetInsertPoint(&EndBlock->front()); 2050 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 2051 CountZeros->replaceAllUsesWith(PN); 2052 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 2053 PN->addIncoming(BitWidth, StartBlock); 2054 PN->addIncoming(CountZeros, CallBlock); 2055 2056 // We are explicitly handling the zero case, so we can set the intrinsic's 2057 // undefined zero argument to 'true'. This will also prevent reprocessing the 2058 // intrinsic; we only despeculate when a zero input is defined. 2059 CountZeros->setArgOperand(1, Builder.getTrue()); 2060 ModifiedDT = true; 2061 return true; 2062 } 2063 2064 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { 2065 BasicBlock *BB = CI->getParent(); 2066 2067 // Lower inline assembly if we can. 2068 // If we found an inline asm expession, and if the target knows how to 2069 // lower it to normal LLVM code, do so now. 2070 if (CI->isInlineAsm()) { 2071 if (TLI->ExpandInlineAsm(CI)) { 2072 // Avoid invalidating the iterator. 2073 CurInstIterator = BB->begin(); 2074 // Avoid processing instructions out of order, which could cause 2075 // reuse before a value is defined. 2076 SunkAddrs.clear(); 2077 return true; 2078 } 2079 // Sink address computing for memory operands into the block. 2080 if (optimizeInlineAsmInst(CI)) 2081 return true; 2082 } 2083 2084 // Align the pointer arguments to this call if the target thinks it's a good 2085 // idea 2086 unsigned MinSize, PrefAlign; 2087 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 2088 for (auto &Arg : CI->arg_operands()) { 2089 // We want to align both objects whose address is used directly and 2090 // objects whose address is used in casts and GEPs, though it only makes 2091 // sense for GEPs if the offset is a multiple of the desired alignment and 2092 // if size - offset meets the size threshold. 2093 if (!Arg->getType()->isPointerTy()) 2094 continue; 2095 APInt Offset(DL->getIndexSizeInBits( 2096 cast<PointerType>(Arg->getType())->getAddressSpace()), 2097 0); 2098 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 2099 uint64_t Offset2 = Offset.getLimitedValue(); 2100 if ((Offset2 & (PrefAlign-1)) != 0) 2101 continue; 2102 AllocaInst *AI; 2103 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 2104 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 2105 AI->setAlignment(Align(PrefAlign)); 2106 // Global variables can only be aligned if they are defined in this 2107 // object (i.e. they are uniquely initialized in this object), and 2108 // over-aligning global variables that have an explicit section is 2109 // forbidden. 2110 GlobalVariable *GV; 2111 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 2112 GV->getPointerAlignment(*DL) < PrefAlign && 2113 DL->getTypeAllocSize(GV->getValueType()) >= 2114 MinSize + Offset2) 2115 GV->setAlignment(MaybeAlign(PrefAlign)); 2116 } 2117 // If this is a memcpy (or similar) then we may be able to improve the 2118 // alignment 2119 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 2120 Align DestAlign = getKnownAlignment(MI->getDest(), *DL); 2121 MaybeAlign MIDestAlign = MI->getDestAlign(); 2122 if (!MIDestAlign || DestAlign > *MIDestAlign) 2123 MI->setDestAlignment(DestAlign); 2124 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 2125 MaybeAlign MTISrcAlign = MTI->getSourceAlign(); 2126 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); 2127 if (!MTISrcAlign || SrcAlign > *MTISrcAlign) 2128 MTI->setSourceAlignment(SrcAlign); 2129 } 2130 } 2131 } 2132 2133 // If we have a cold call site, try to sink addressing computation into the 2134 // cold block. This interacts with our handling for loads and stores to 2135 // ensure that we can fold all uses of a potential addressing computation 2136 // into their uses. TODO: generalize this to work over profiling data 2137 if (CI->hasFnAttr(Attribute::Cold) && 2138 !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) 2139 for (auto &Arg : CI->arg_operands()) { 2140 if (!Arg->getType()->isPointerTy()) 2141 continue; 2142 unsigned AS = Arg->getType()->getPointerAddressSpace(); 2143 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 2144 } 2145 2146 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 2147 if (II) { 2148 switch (II->getIntrinsicID()) { 2149 default: break; 2150 case Intrinsic::assume: 2151 llvm_unreachable("llvm.assume should have been removed already"); 2152 case Intrinsic::experimental_widenable_condition: { 2153 // Give up on future widening oppurtunties so that we can fold away dead 2154 // paths and merge blocks before going into block-local instruction 2155 // selection. 2156 if (II->use_empty()) { 2157 II->eraseFromParent(); 2158 return true; 2159 } 2160 Constant *RetVal = ConstantInt::getTrue(II->getContext()); 2161 resetIteratorIfInvalidatedWhileCalling(BB, [&]() { 2162 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 2163 }); 2164 return true; 2165 } 2166 case Intrinsic::objectsize: 2167 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2168 case Intrinsic::is_constant: 2169 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2170 case Intrinsic::aarch64_stlxr: 2171 case Intrinsic::aarch64_stxr: { 2172 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 2173 if (!ExtVal || !ExtVal->hasOneUse() || 2174 ExtVal->getParent() == CI->getParent()) 2175 return false; 2176 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 2177 ExtVal->moveBefore(CI); 2178 // Mark this instruction as "inserted by CGP", so that other 2179 // optimizations don't touch it. 2180 InsertedInsts.insert(ExtVal); 2181 return true; 2182 } 2183 2184 case Intrinsic::launder_invariant_group: 2185 case Intrinsic::strip_invariant_group: { 2186 Value *ArgVal = II->getArgOperand(0); 2187 auto it = LargeOffsetGEPMap.find(II); 2188 if (it != LargeOffsetGEPMap.end()) { 2189 // Merge entries in LargeOffsetGEPMap to reflect the RAUW. 2190 // Make sure not to have to deal with iterator invalidation 2191 // after possibly adding ArgVal to LargeOffsetGEPMap. 2192 auto GEPs = std::move(it->second); 2193 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); 2194 LargeOffsetGEPMap.erase(II); 2195 } 2196 2197 II->replaceAllUsesWith(ArgVal); 2198 II->eraseFromParent(); 2199 return true; 2200 } 2201 case Intrinsic::cttz: 2202 case Intrinsic::ctlz: 2203 // If counting zeros is expensive, try to avoid it. 2204 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 2205 case Intrinsic::fshl: 2206 case Intrinsic::fshr: 2207 return optimizeFunnelShift(II); 2208 case Intrinsic::dbg_value: 2209 return fixupDbgValue(II); 2210 case Intrinsic::vscale: { 2211 // If datalayout has no special restrictions on vector data layout, 2212 // replace `llvm.vscale` by an equivalent constant expression 2213 // to benefit from cheap constant propagation. 2214 Type *ScalableVectorTy = 2215 VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); 2216 if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { 2217 auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); 2218 auto *One = ConstantInt::getSigned(II->getType(), 1); 2219 auto *CGep = 2220 ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One); 2221 II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType())); 2222 II->eraseFromParent(); 2223 return true; 2224 } 2225 break; 2226 } 2227 case Intrinsic::masked_gather: 2228 return optimizeGatherScatterInst(II, II->getArgOperand(0)); 2229 case Intrinsic::masked_scatter: 2230 return optimizeGatherScatterInst(II, II->getArgOperand(1)); 2231 } 2232 2233 SmallVector<Value *, 2> PtrOps; 2234 Type *AccessTy; 2235 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) 2236 while (!PtrOps.empty()) { 2237 Value *PtrVal = PtrOps.pop_back_val(); 2238 unsigned AS = PtrVal->getType()->getPointerAddressSpace(); 2239 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) 2240 return true; 2241 } 2242 } 2243 2244 // From here on out we're working with named functions. 2245 if (!CI->getCalledFunction()) return false; 2246 2247 // Lower all default uses of _chk calls. This is very similar 2248 // to what InstCombineCalls does, but here we are only lowering calls 2249 // to fortified library functions (e.g. __memcpy_chk) that have the default 2250 // "don't know" as the objectsize. Anything else should be left alone. 2251 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 2252 IRBuilder<> Builder(CI); 2253 if (Value *V = Simplifier.optimizeCall(CI, Builder)) { 2254 CI->replaceAllUsesWith(V); 2255 CI->eraseFromParent(); 2256 return true; 2257 } 2258 2259 return false; 2260 } 2261 2262 /// Look for opportunities to duplicate return instructions to the predecessor 2263 /// to enable tail call optimizations. The case it is currently looking for is: 2264 /// @code 2265 /// bb0: 2266 /// %tmp0 = tail call i32 @f0() 2267 /// br label %return 2268 /// bb1: 2269 /// %tmp1 = tail call i32 @f1() 2270 /// br label %return 2271 /// bb2: 2272 /// %tmp2 = tail call i32 @f2() 2273 /// br label %return 2274 /// return: 2275 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 2276 /// ret i32 %retval 2277 /// @endcode 2278 /// 2279 /// => 2280 /// 2281 /// @code 2282 /// bb0: 2283 /// %tmp0 = tail call i32 @f0() 2284 /// ret i32 %tmp0 2285 /// bb1: 2286 /// %tmp1 = tail call i32 @f1() 2287 /// ret i32 %tmp1 2288 /// bb2: 2289 /// %tmp2 = tail call i32 @f2() 2290 /// ret i32 %tmp2 2291 /// @endcode 2292 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) { 2293 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); 2294 if (!RetI) 2295 return false; 2296 2297 PHINode *PN = nullptr; 2298 ExtractValueInst *EVI = nullptr; 2299 BitCastInst *BCI = nullptr; 2300 Value *V = RetI->getReturnValue(); 2301 if (V) { 2302 BCI = dyn_cast<BitCastInst>(V); 2303 if (BCI) 2304 V = BCI->getOperand(0); 2305 2306 EVI = dyn_cast<ExtractValueInst>(V); 2307 if (EVI) { 2308 V = EVI->getOperand(0); 2309 if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; })) 2310 return false; 2311 } 2312 2313 PN = dyn_cast<PHINode>(V); 2314 if (!PN) 2315 return false; 2316 } 2317 2318 if (PN && PN->getParent() != BB) 2319 return false; 2320 2321 auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { 2322 const BitCastInst *BC = dyn_cast<BitCastInst>(Inst); 2323 if (BC && BC->hasOneUse()) 2324 Inst = BC->user_back(); 2325 2326 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 2327 return II->getIntrinsicID() == Intrinsic::lifetime_end; 2328 return false; 2329 }; 2330 2331 // Make sure there are no instructions between the first instruction 2332 // and return. 2333 const Instruction *BI = BB->getFirstNonPHI(); 2334 // Skip over debug and the bitcast. 2335 while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI || 2336 isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI)) 2337 BI = BI->getNextNode(); 2338 if (BI != RetI) 2339 return false; 2340 2341 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 2342 /// call. 2343 const Function *F = BB->getParent(); 2344 SmallVector<BasicBlock*, 4> TailCallBBs; 2345 if (PN) { 2346 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 2347 // Look through bitcasts. 2348 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); 2349 CallInst *CI = dyn_cast<CallInst>(IncomingVal); 2350 BasicBlock *PredBB = PN->getIncomingBlock(I); 2351 // Make sure the phi value is indeed produced by the tail call. 2352 if (CI && CI->hasOneUse() && CI->getParent() == PredBB && 2353 TLI->mayBeEmittedAsTailCall(CI) && 2354 attributesPermitTailCall(F, CI, RetI, *TLI)) 2355 TailCallBBs.push_back(PredBB); 2356 } 2357 } else { 2358 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 2359 for (BasicBlock *Pred : predecessors(BB)) { 2360 if (!VisitedBBs.insert(Pred).second) 2361 continue; 2362 if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { 2363 CallInst *CI = dyn_cast<CallInst>(I); 2364 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && 2365 attributesPermitTailCall(F, CI, RetI, *TLI)) 2366 TailCallBBs.push_back(Pred); 2367 } 2368 } 2369 } 2370 2371 bool Changed = false; 2372 for (auto const &TailCallBB : TailCallBBs) { 2373 // Make sure the call instruction is followed by an unconditional branch to 2374 // the return block. 2375 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); 2376 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2377 continue; 2378 2379 // Duplicate the return into TailCallBB. 2380 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); 2381 assert(!VerifyBFIUpdates || 2382 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)); 2383 BFI->setBlockFreq( 2384 BB, 2385 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); 2386 ModifiedDT = Changed = true; 2387 ++NumRetsDup; 2388 } 2389 2390 // If we eliminated all predecessors of the block, delete the block now. 2391 if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) 2392 BB->eraseFromParent(); 2393 2394 return Changed; 2395 } 2396 2397 //===----------------------------------------------------------------------===// 2398 // Memory Optimization 2399 //===----------------------------------------------------------------------===// 2400 2401 namespace { 2402 2403 /// This is an extended version of TargetLowering::AddrMode 2404 /// which holds actual Value*'s for register values. 2405 struct ExtAddrMode : public TargetLowering::AddrMode { 2406 Value *BaseReg = nullptr; 2407 Value *ScaledReg = nullptr; 2408 Value *OriginalValue = nullptr; 2409 bool InBounds = true; 2410 2411 enum FieldName { 2412 NoField = 0x00, 2413 BaseRegField = 0x01, 2414 BaseGVField = 0x02, 2415 BaseOffsField = 0x04, 2416 ScaledRegField = 0x08, 2417 ScaleField = 0x10, 2418 MultipleFields = 0xff 2419 }; 2420 2421 2422 ExtAddrMode() = default; 2423 2424 void print(raw_ostream &OS) const; 2425 void dump() const; 2426 2427 FieldName compare(const ExtAddrMode &other) { 2428 // First check that the types are the same on each field, as differing types 2429 // is something we can't cope with later on. 2430 if (BaseReg && other.BaseReg && 2431 BaseReg->getType() != other.BaseReg->getType()) 2432 return MultipleFields; 2433 if (BaseGV && other.BaseGV && 2434 BaseGV->getType() != other.BaseGV->getType()) 2435 return MultipleFields; 2436 if (ScaledReg && other.ScaledReg && 2437 ScaledReg->getType() != other.ScaledReg->getType()) 2438 return MultipleFields; 2439 2440 // Conservatively reject 'inbounds' mismatches. 2441 if (InBounds != other.InBounds) 2442 return MultipleFields; 2443 2444 // Check each field to see if it differs. 2445 unsigned Result = NoField; 2446 if (BaseReg != other.BaseReg) 2447 Result |= BaseRegField; 2448 if (BaseGV != other.BaseGV) 2449 Result |= BaseGVField; 2450 if (BaseOffs != other.BaseOffs) 2451 Result |= BaseOffsField; 2452 if (ScaledReg != other.ScaledReg) 2453 Result |= ScaledRegField; 2454 // Don't count 0 as being a different scale, because that actually means 2455 // unscaled (which will already be counted by having no ScaledReg). 2456 if (Scale && other.Scale && Scale != other.Scale) 2457 Result |= ScaleField; 2458 2459 if (countPopulation(Result) > 1) 2460 return MultipleFields; 2461 else 2462 return static_cast<FieldName>(Result); 2463 } 2464 2465 // An AddrMode is trivial if it involves no calculation i.e. it is just a base 2466 // with no offset. 2467 bool isTrivial() { 2468 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is 2469 // trivial if at most one of these terms is nonzero, except that BaseGV and 2470 // BaseReg both being zero actually means a null pointer value, which we 2471 // consider to be 'non-zero' here. 2472 return !BaseOffs && !Scale && !(BaseGV && BaseReg); 2473 } 2474 2475 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { 2476 switch (Field) { 2477 default: 2478 return nullptr; 2479 case BaseRegField: 2480 return BaseReg; 2481 case BaseGVField: 2482 return BaseGV; 2483 case ScaledRegField: 2484 return ScaledReg; 2485 case BaseOffsField: 2486 return ConstantInt::get(IntPtrTy, BaseOffs); 2487 } 2488 } 2489 2490 void SetCombinedField(FieldName Field, Value *V, 2491 const SmallVectorImpl<ExtAddrMode> &AddrModes) { 2492 switch (Field) { 2493 default: 2494 llvm_unreachable("Unhandled fields are expected to be rejected earlier"); 2495 break; 2496 case ExtAddrMode::BaseRegField: 2497 BaseReg = V; 2498 break; 2499 case ExtAddrMode::BaseGVField: 2500 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes 2501 // in the BaseReg field. 2502 assert(BaseReg == nullptr); 2503 BaseReg = V; 2504 BaseGV = nullptr; 2505 break; 2506 case ExtAddrMode::ScaledRegField: 2507 ScaledReg = V; 2508 // If we have a mix of scaled and unscaled addrmodes then we want scale 2509 // to be the scale and not zero. 2510 if (!Scale) 2511 for (const ExtAddrMode &AM : AddrModes) 2512 if (AM.Scale) { 2513 Scale = AM.Scale; 2514 break; 2515 } 2516 break; 2517 case ExtAddrMode::BaseOffsField: 2518 // The offset is no longer a constant, so it goes in ScaledReg with a 2519 // scale of 1. 2520 assert(ScaledReg == nullptr); 2521 ScaledReg = V; 2522 Scale = 1; 2523 BaseOffs = 0; 2524 break; 2525 } 2526 } 2527 }; 2528 2529 } // end anonymous namespace 2530 2531 #ifndef NDEBUG 2532 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2533 AM.print(OS); 2534 return OS; 2535 } 2536 #endif 2537 2538 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2539 void ExtAddrMode::print(raw_ostream &OS) const { 2540 bool NeedPlus = false; 2541 OS << "["; 2542 if (InBounds) 2543 OS << "inbounds "; 2544 if (BaseGV) { 2545 OS << (NeedPlus ? " + " : "") 2546 << "GV:"; 2547 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2548 NeedPlus = true; 2549 } 2550 2551 if (BaseOffs) { 2552 OS << (NeedPlus ? " + " : "") 2553 << BaseOffs; 2554 NeedPlus = true; 2555 } 2556 2557 if (BaseReg) { 2558 OS << (NeedPlus ? " + " : "") 2559 << "Base:"; 2560 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2561 NeedPlus = true; 2562 } 2563 if (Scale) { 2564 OS << (NeedPlus ? " + " : "") 2565 << Scale << "*"; 2566 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2567 } 2568 2569 OS << ']'; 2570 } 2571 2572 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2573 print(dbgs()); 2574 dbgs() << '\n'; 2575 } 2576 #endif 2577 2578 namespace { 2579 2580 /// This class provides transaction based operation on the IR. 2581 /// Every change made through this class is recorded in the internal state and 2582 /// can be undone (rollback) until commit is called. 2583 /// CGP does not check if instructions could be speculatively executed when 2584 /// moved. Preserving the original location would pessimize the debugging 2585 /// experience, as well as negatively impact the quality of sample PGO. 2586 class TypePromotionTransaction { 2587 /// This represents the common interface of the individual transaction. 2588 /// Each class implements the logic for doing one specific modification on 2589 /// the IR via the TypePromotionTransaction. 2590 class TypePromotionAction { 2591 protected: 2592 /// The Instruction modified. 2593 Instruction *Inst; 2594 2595 public: 2596 /// Constructor of the action. 2597 /// The constructor performs the related action on the IR. 2598 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2599 2600 virtual ~TypePromotionAction() = default; 2601 2602 /// Undo the modification done by this action. 2603 /// When this method is called, the IR must be in the same state as it was 2604 /// before this action was applied. 2605 /// \pre Undoing the action works if and only if the IR is in the exact same 2606 /// state as it was directly after this action was applied. 2607 virtual void undo() = 0; 2608 2609 /// Advocate every change made by this action. 2610 /// When the results on the IR of the action are to be kept, it is important 2611 /// to call this function, otherwise hidden information may be kept forever. 2612 virtual void commit() { 2613 // Nothing to be done, this action is not doing anything. 2614 } 2615 }; 2616 2617 /// Utility to remember the position of an instruction. 2618 class InsertionHandler { 2619 /// Position of an instruction. 2620 /// Either an instruction: 2621 /// - Is the first in a basic block: BB is used. 2622 /// - Has a previous instruction: PrevInst is used. 2623 union { 2624 Instruction *PrevInst; 2625 BasicBlock *BB; 2626 } Point; 2627 2628 /// Remember whether or not the instruction had a previous instruction. 2629 bool HasPrevInstruction; 2630 2631 public: 2632 /// Record the position of \p Inst. 2633 InsertionHandler(Instruction *Inst) { 2634 BasicBlock::iterator It = Inst->getIterator(); 2635 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2636 if (HasPrevInstruction) 2637 Point.PrevInst = &*--It; 2638 else 2639 Point.BB = Inst->getParent(); 2640 } 2641 2642 /// Insert \p Inst at the recorded position. 2643 void insert(Instruction *Inst) { 2644 if (HasPrevInstruction) { 2645 if (Inst->getParent()) 2646 Inst->removeFromParent(); 2647 Inst->insertAfter(Point.PrevInst); 2648 } else { 2649 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2650 if (Inst->getParent()) 2651 Inst->moveBefore(Position); 2652 else 2653 Inst->insertBefore(Position); 2654 } 2655 } 2656 }; 2657 2658 /// Move an instruction before another. 2659 class InstructionMoveBefore : public TypePromotionAction { 2660 /// Original position of the instruction. 2661 InsertionHandler Position; 2662 2663 public: 2664 /// Move \p Inst before \p Before. 2665 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2666 : TypePromotionAction(Inst), Position(Inst) { 2667 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before 2668 << "\n"); 2669 Inst->moveBefore(Before); 2670 } 2671 2672 /// Move the instruction back to its original position. 2673 void undo() override { 2674 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2675 Position.insert(Inst); 2676 } 2677 }; 2678 2679 /// Set the operand of an instruction with a new value. 2680 class OperandSetter : public TypePromotionAction { 2681 /// Original operand of the instruction. 2682 Value *Origin; 2683 2684 /// Index of the modified instruction. 2685 unsigned Idx; 2686 2687 public: 2688 /// Set \p Idx operand of \p Inst with \p NewVal. 2689 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2690 : TypePromotionAction(Inst), Idx(Idx) { 2691 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2692 << "for:" << *Inst << "\n" 2693 << "with:" << *NewVal << "\n"); 2694 Origin = Inst->getOperand(Idx); 2695 Inst->setOperand(Idx, NewVal); 2696 } 2697 2698 /// Restore the original value of the instruction. 2699 void undo() override { 2700 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2701 << "for: " << *Inst << "\n" 2702 << "with: " << *Origin << "\n"); 2703 Inst->setOperand(Idx, Origin); 2704 } 2705 }; 2706 2707 /// Hide the operands of an instruction. 2708 /// Do as if this instruction was not using any of its operands. 2709 class OperandsHider : public TypePromotionAction { 2710 /// The list of original operands. 2711 SmallVector<Value *, 4> OriginalValues; 2712 2713 public: 2714 /// Remove \p Inst from the uses of the operands of \p Inst. 2715 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2716 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2717 unsigned NumOpnds = Inst->getNumOperands(); 2718 OriginalValues.reserve(NumOpnds); 2719 for (unsigned It = 0; It < NumOpnds; ++It) { 2720 // Save the current operand. 2721 Value *Val = Inst->getOperand(It); 2722 OriginalValues.push_back(Val); 2723 // Set a dummy one. 2724 // We could use OperandSetter here, but that would imply an overhead 2725 // that we are not willing to pay. 2726 Inst->setOperand(It, UndefValue::get(Val->getType())); 2727 } 2728 } 2729 2730 /// Restore the original list of uses. 2731 void undo() override { 2732 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2733 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2734 Inst->setOperand(It, OriginalValues[It]); 2735 } 2736 }; 2737 2738 /// Build a truncate instruction. 2739 class TruncBuilder : public TypePromotionAction { 2740 Value *Val; 2741 2742 public: 2743 /// Build a truncate instruction of \p Opnd producing a \p Ty 2744 /// result. 2745 /// trunc Opnd to Ty. 2746 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2747 IRBuilder<> Builder(Opnd); 2748 Builder.SetCurrentDebugLocation(DebugLoc()); 2749 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2750 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2751 } 2752 2753 /// Get the built value. 2754 Value *getBuiltValue() { return Val; } 2755 2756 /// Remove the built instruction. 2757 void undo() override { 2758 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2759 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2760 IVal->eraseFromParent(); 2761 } 2762 }; 2763 2764 /// Build a sign extension instruction. 2765 class SExtBuilder : public TypePromotionAction { 2766 Value *Val; 2767 2768 public: 2769 /// Build a sign extension instruction of \p Opnd producing a \p Ty 2770 /// result. 2771 /// sext Opnd to Ty. 2772 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2773 : TypePromotionAction(InsertPt) { 2774 IRBuilder<> Builder(InsertPt); 2775 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2776 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2777 } 2778 2779 /// Get the built value. 2780 Value *getBuiltValue() { return Val; } 2781 2782 /// Remove the built instruction. 2783 void undo() override { 2784 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2785 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2786 IVal->eraseFromParent(); 2787 } 2788 }; 2789 2790 /// Build a zero extension instruction. 2791 class ZExtBuilder : public TypePromotionAction { 2792 Value *Val; 2793 2794 public: 2795 /// Build a zero extension instruction of \p Opnd producing a \p Ty 2796 /// result. 2797 /// zext Opnd to Ty. 2798 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2799 : TypePromotionAction(InsertPt) { 2800 IRBuilder<> Builder(InsertPt); 2801 Builder.SetCurrentDebugLocation(DebugLoc()); 2802 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2803 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2804 } 2805 2806 /// Get the built value. 2807 Value *getBuiltValue() { return Val; } 2808 2809 /// Remove the built instruction. 2810 void undo() override { 2811 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2812 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2813 IVal->eraseFromParent(); 2814 } 2815 }; 2816 2817 /// Mutate an instruction to another type. 2818 class TypeMutator : public TypePromotionAction { 2819 /// Record the original type. 2820 Type *OrigTy; 2821 2822 public: 2823 /// Mutate the type of \p Inst into \p NewTy. 2824 TypeMutator(Instruction *Inst, Type *NewTy) 2825 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2826 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2827 << "\n"); 2828 Inst->mutateType(NewTy); 2829 } 2830 2831 /// Mutate the instruction back to its original type. 2832 void undo() override { 2833 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2834 << "\n"); 2835 Inst->mutateType(OrigTy); 2836 } 2837 }; 2838 2839 /// Replace the uses of an instruction by another instruction. 2840 class UsesReplacer : public TypePromotionAction { 2841 /// Helper structure to keep track of the replaced uses. 2842 struct InstructionAndIdx { 2843 /// The instruction using the instruction. 2844 Instruction *Inst; 2845 2846 /// The index where this instruction is used for Inst. 2847 unsigned Idx; 2848 2849 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2850 : Inst(Inst), Idx(Idx) {} 2851 }; 2852 2853 /// Keep track of the original uses (pair Instruction, Index). 2854 SmallVector<InstructionAndIdx, 4> OriginalUses; 2855 /// Keep track of the debug users. 2856 SmallVector<DbgValueInst *, 1> DbgValues; 2857 2858 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; 2859 2860 public: 2861 /// Replace all the use of \p Inst by \p New. 2862 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2863 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2864 << "\n"); 2865 // Record the original uses. 2866 for (Use &U : Inst->uses()) { 2867 Instruction *UserI = cast<Instruction>(U.getUser()); 2868 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2869 } 2870 // Record the debug uses separately. They are not in the instruction's 2871 // use list, but they are replaced by RAUW. 2872 findDbgValues(DbgValues, Inst); 2873 2874 // Now, we can replace the uses. 2875 Inst->replaceAllUsesWith(New); 2876 } 2877 2878 /// Reassign the original uses of Inst to Inst. 2879 void undo() override { 2880 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2881 for (InstructionAndIdx &Use : OriginalUses) 2882 Use.Inst->setOperand(Use.Idx, Inst); 2883 // RAUW has replaced all original uses with references to the new value, 2884 // including the debug uses. Since we are undoing the replacements, 2885 // the original debug uses must also be reinstated to maintain the 2886 // correctness and utility of debug value instructions. 2887 for (auto *DVI : DbgValues) 2888 DVI->replaceVariableLocationOp(DVI->getVariableLocationOp(0), Inst); 2889 } 2890 }; 2891 2892 /// Remove an instruction from the IR. 2893 class InstructionRemover : public TypePromotionAction { 2894 /// Original position of the instruction. 2895 InsertionHandler Inserter; 2896 2897 /// Helper structure to hide all the link to the instruction. In other 2898 /// words, this helps to do as if the instruction was removed. 2899 OperandsHider Hider; 2900 2901 /// Keep track of the uses replaced, if any. 2902 UsesReplacer *Replacer = nullptr; 2903 2904 /// Keep track of instructions removed. 2905 SetOfInstrs &RemovedInsts; 2906 2907 public: 2908 /// Remove all reference of \p Inst and optionally replace all its 2909 /// uses with New. 2910 /// \p RemovedInsts Keep track of the instructions removed by this Action. 2911 /// \pre If !Inst->use_empty(), then New != nullptr 2912 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, 2913 Value *New = nullptr) 2914 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2915 RemovedInsts(RemovedInsts) { 2916 if (New) 2917 Replacer = new UsesReplacer(Inst, New); 2918 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2919 RemovedInsts.insert(Inst); 2920 /// The instructions removed here will be freed after completing 2921 /// optimizeBlock() for all blocks as we need to keep track of the 2922 /// removed instructions during promotion. 2923 Inst->removeFromParent(); 2924 } 2925 2926 ~InstructionRemover() override { delete Replacer; } 2927 2928 /// Resurrect the instruction and reassign it to the proper uses if 2929 /// new value was provided when build this action. 2930 void undo() override { 2931 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2932 Inserter.insert(Inst); 2933 if (Replacer) 2934 Replacer->undo(); 2935 Hider.undo(); 2936 RemovedInsts.erase(Inst); 2937 } 2938 }; 2939 2940 public: 2941 /// Restoration point. 2942 /// The restoration point is a pointer to an action instead of an iterator 2943 /// because the iterator may be invalidated but not the pointer. 2944 using ConstRestorationPt = const TypePromotionAction *; 2945 2946 TypePromotionTransaction(SetOfInstrs &RemovedInsts) 2947 : RemovedInsts(RemovedInsts) {} 2948 2949 /// Advocate every changes made in that transaction. Return true if any change 2950 /// happen. 2951 bool commit(); 2952 2953 /// Undo all the changes made after the given point. 2954 void rollback(ConstRestorationPt Point); 2955 2956 /// Get the current restoration point. 2957 ConstRestorationPt getRestorationPoint() const; 2958 2959 /// \name API for IR modification with state keeping to support rollback. 2960 /// @{ 2961 /// Same as Instruction::setOperand. 2962 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2963 2964 /// Same as Instruction::eraseFromParent. 2965 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2966 2967 /// Same as Value::replaceAllUsesWith. 2968 void replaceAllUsesWith(Instruction *Inst, Value *New); 2969 2970 /// Same as Value::mutateType. 2971 void mutateType(Instruction *Inst, Type *NewTy); 2972 2973 /// Same as IRBuilder::createTrunc. 2974 Value *createTrunc(Instruction *Opnd, Type *Ty); 2975 2976 /// Same as IRBuilder::createSExt. 2977 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2978 2979 /// Same as IRBuilder::createZExt. 2980 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2981 2982 /// Same as Instruction::moveBefore. 2983 void moveBefore(Instruction *Inst, Instruction *Before); 2984 /// @} 2985 2986 private: 2987 /// The ordered list of actions made so far. 2988 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2989 2990 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; 2991 2992 SetOfInstrs &RemovedInsts; 2993 }; 2994 2995 } // end anonymous namespace 2996 2997 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2998 Value *NewVal) { 2999 Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( 3000 Inst, Idx, NewVal)); 3001 } 3002 3003 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 3004 Value *NewVal) { 3005 Actions.push_back( 3006 std::make_unique<TypePromotionTransaction::InstructionRemover>( 3007 Inst, RemovedInsts, NewVal)); 3008 } 3009 3010 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 3011 Value *New) { 3012 Actions.push_back( 3013 std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 3014 } 3015 3016 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 3017 Actions.push_back( 3018 std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 3019 } 3020 3021 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 3022 Type *Ty) { 3023 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 3024 Value *Val = Ptr->getBuiltValue(); 3025 Actions.push_back(std::move(Ptr)); 3026 return Val; 3027 } 3028 3029 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 3030 Value *Opnd, Type *Ty) { 3031 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 3032 Value *Val = Ptr->getBuiltValue(); 3033 Actions.push_back(std::move(Ptr)); 3034 return Val; 3035 } 3036 3037 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 3038 Value *Opnd, Type *Ty) { 3039 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 3040 Value *Val = Ptr->getBuiltValue(); 3041 Actions.push_back(std::move(Ptr)); 3042 return Val; 3043 } 3044 3045 void TypePromotionTransaction::moveBefore(Instruction *Inst, 3046 Instruction *Before) { 3047 Actions.push_back( 3048 std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( 3049 Inst, Before)); 3050 } 3051 3052 TypePromotionTransaction::ConstRestorationPt 3053 TypePromotionTransaction::getRestorationPoint() const { 3054 return !Actions.empty() ? Actions.back().get() : nullptr; 3055 } 3056 3057 bool TypePromotionTransaction::commit() { 3058 for (std::unique_ptr<TypePromotionAction> &Action : Actions) 3059 Action->commit(); 3060 bool Modified = !Actions.empty(); 3061 Actions.clear(); 3062 return Modified; 3063 } 3064 3065 void TypePromotionTransaction::rollback( 3066 TypePromotionTransaction::ConstRestorationPt Point) { 3067 while (!Actions.empty() && Point != Actions.back().get()) { 3068 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 3069 Curr->undo(); 3070 } 3071 } 3072 3073 namespace { 3074 3075 /// A helper class for matching addressing modes. 3076 /// 3077 /// This encapsulates the logic for matching the target-legal addressing modes. 3078 class AddressingModeMatcher { 3079 SmallVectorImpl<Instruction*> &AddrModeInsts; 3080 const TargetLowering &TLI; 3081 const TargetRegisterInfo &TRI; 3082 const DataLayout &DL; 3083 const LoopInfo &LI; 3084 const std::function<const DominatorTree &()> getDTFn; 3085 3086 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 3087 /// the memory instruction that we're computing this address for. 3088 Type *AccessTy; 3089 unsigned AddrSpace; 3090 Instruction *MemoryInst; 3091 3092 /// This is the addressing mode that we're building up. This is 3093 /// part of the return value of this addressing mode matching stuff. 3094 ExtAddrMode &AddrMode; 3095 3096 /// The instructions inserted by other CodeGenPrepare optimizations. 3097 const SetOfInstrs &InsertedInsts; 3098 3099 /// A map from the instructions to their type before promotion. 3100 InstrToOrigTy &PromotedInsts; 3101 3102 /// The ongoing transaction where every action should be registered. 3103 TypePromotionTransaction &TPT; 3104 3105 // A GEP which has too large offset to be folded into the addressing mode. 3106 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; 3107 3108 /// This is set to true when we should not do profitability checks. 3109 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 3110 bool IgnoreProfitability; 3111 3112 /// True if we are optimizing for size. 3113 bool OptSize; 3114 3115 ProfileSummaryInfo *PSI; 3116 BlockFrequencyInfo *BFI; 3117 3118 AddressingModeMatcher( 3119 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, 3120 const TargetRegisterInfo &TRI, const LoopInfo &LI, 3121 const std::function<const DominatorTree &()> getDTFn, 3122 Type *AT, unsigned AS, Instruction *MI, ExtAddrMode &AM, 3123 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, 3124 TypePromotionTransaction &TPT, 3125 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3126 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) 3127 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), 3128 DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn), 3129 AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), 3130 InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT), 3131 LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) { 3132 IgnoreProfitability = false; 3133 } 3134 3135 public: 3136 /// Find the maximal addressing mode that a load/store of V can fold, 3137 /// give an access type of AccessTy. This returns a list of involved 3138 /// instructions in AddrModeInsts. 3139 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 3140 /// optimizations. 3141 /// \p PromotedInsts maps the instructions to their type before promotion. 3142 /// \p The ongoing transaction where every action should be registered. 3143 static ExtAddrMode 3144 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, 3145 SmallVectorImpl<Instruction *> &AddrModeInsts, 3146 const TargetLowering &TLI, const LoopInfo &LI, 3147 const std::function<const DominatorTree &()> getDTFn, 3148 const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts, 3149 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, 3150 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, 3151 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 3152 ExtAddrMode Result; 3153 3154 bool Success = AddressingModeMatcher( 3155 AddrModeInsts, TLI, TRI, LI, getDTFn, AccessTy, AS, MemoryInst, Result, 3156 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, 3157 BFI).matchAddr(V, 0); 3158 (void)Success; assert(Success && "Couldn't select *anything*?"); 3159 return Result; 3160 } 3161 3162 private: 3163 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 3164 bool matchAddr(Value *Addr, unsigned Depth); 3165 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, 3166 bool *MovedAway = nullptr); 3167 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 3168 ExtAddrMode &AMBefore, 3169 ExtAddrMode &AMAfter); 3170 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 3171 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 3172 Value *PromotedOperand) const; 3173 }; 3174 3175 class PhiNodeSet; 3176 3177 /// An iterator for PhiNodeSet. 3178 class PhiNodeSetIterator { 3179 PhiNodeSet * const Set; 3180 size_t CurrentIndex = 0; 3181 3182 public: 3183 /// The constructor. Start should point to either a valid element, or be equal 3184 /// to the size of the underlying SmallVector of the PhiNodeSet. 3185 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start); 3186 PHINode * operator*() const; 3187 PhiNodeSetIterator& operator++(); 3188 bool operator==(const PhiNodeSetIterator &RHS) const; 3189 bool operator!=(const PhiNodeSetIterator &RHS) const; 3190 }; 3191 3192 /// Keeps a set of PHINodes. 3193 /// 3194 /// This is a minimal set implementation for a specific use case: 3195 /// It is very fast when there are very few elements, but also provides good 3196 /// performance when there are many. It is similar to SmallPtrSet, but also 3197 /// provides iteration by insertion order, which is deterministic and stable 3198 /// across runs. It is also similar to SmallSetVector, but provides removing 3199 /// elements in O(1) time. This is achieved by not actually removing the element 3200 /// from the underlying vector, so comes at the cost of using more memory, but 3201 /// that is fine, since PhiNodeSets are used as short lived objects. 3202 class PhiNodeSet { 3203 friend class PhiNodeSetIterator; 3204 3205 using MapType = SmallDenseMap<PHINode *, size_t, 32>; 3206 using iterator = PhiNodeSetIterator; 3207 3208 /// Keeps the elements in the order of their insertion in the underlying 3209 /// vector. To achieve constant time removal, it never deletes any element. 3210 SmallVector<PHINode *, 32> NodeList; 3211 3212 /// Keeps the elements in the underlying set implementation. This (and not the 3213 /// NodeList defined above) is the source of truth on whether an element 3214 /// is actually in the collection. 3215 MapType NodeMap; 3216 3217 /// Points to the first valid (not deleted) element when the set is not empty 3218 /// and the value is not zero. Equals to the size of the underlying vector 3219 /// when the set is empty. When the value is 0, as in the beginning, the 3220 /// first element may or may not be valid. 3221 size_t FirstValidElement = 0; 3222 3223 public: 3224 /// Inserts a new element to the collection. 3225 /// \returns true if the element is actually added, i.e. was not in the 3226 /// collection before the operation. 3227 bool insert(PHINode *Ptr) { 3228 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { 3229 NodeList.push_back(Ptr); 3230 return true; 3231 } 3232 return false; 3233 } 3234 3235 /// Removes the element from the collection. 3236 /// \returns whether the element is actually removed, i.e. was in the 3237 /// collection before the operation. 3238 bool erase(PHINode *Ptr) { 3239 if (NodeMap.erase(Ptr)) { 3240 SkipRemovedElements(FirstValidElement); 3241 return true; 3242 } 3243 return false; 3244 } 3245 3246 /// Removes all elements and clears the collection. 3247 void clear() { 3248 NodeMap.clear(); 3249 NodeList.clear(); 3250 FirstValidElement = 0; 3251 } 3252 3253 /// \returns an iterator that will iterate the elements in the order of 3254 /// insertion. 3255 iterator begin() { 3256 if (FirstValidElement == 0) 3257 SkipRemovedElements(FirstValidElement); 3258 return PhiNodeSetIterator(this, FirstValidElement); 3259 } 3260 3261 /// \returns an iterator that points to the end of the collection. 3262 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } 3263 3264 /// Returns the number of elements in the collection. 3265 size_t size() const { 3266 return NodeMap.size(); 3267 } 3268 3269 /// \returns 1 if the given element is in the collection, and 0 if otherwise. 3270 size_t count(PHINode *Ptr) const { 3271 return NodeMap.count(Ptr); 3272 } 3273 3274 private: 3275 /// Updates the CurrentIndex so that it will point to a valid element. 3276 /// 3277 /// If the element of NodeList at CurrentIndex is valid, it does not 3278 /// change it. If there are no more valid elements, it updates CurrentIndex 3279 /// to point to the end of the NodeList. 3280 void SkipRemovedElements(size_t &CurrentIndex) { 3281 while (CurrentIndex < NodeList.size()) { 3282 auto it = NodeMap.find(NodeList[CurrentIndex]); 3283 // If the element has been deleted and added again later, NodeMap will 3284 // point to a different index, so CurrentIndex will still be invalid. 3285 if (it != NodeMap.end() && it->second == CurrentIndex) 3286 break; 3287 ++CurrentIndex; 3288 } 3289 } 3290 }; 3291 3292 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) 3293 : Set(Set), CurrentIndex(Start) {} 3294 3295 PHINode * PhiNodeSetIterator::operator*() const { 3296 assert(CurrentIndex < Set->NodeList.size() && 3297 "PhiNodeSet access out of range"); 3298 return Set->NodeList[CurrentIndex]; 3299 } 3300 3301 PhiNodeSetIterator& PhiNodeSetIterator::operator++() { 3302 assert(CurrentIndex < Set->NodeList.size() && 3303 "PhiNodeSet access out of range"); 3304 ++CurrentIndex; 3305 Set->SkipRemovedElements(CurrentIndex); 3306 return *this; 3307 } 3308 3309 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { 3310 return CurrentIndex == RHS.CurrentIndex; 3311 } 3312 3313 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { 3314 return !((*this) == RHS); 3315 } 3316 3317 /// Keep track of simplification of Phi nodes. 3318 /// Accept the set of all phi nodes and erase phi node from this set 3319 /// if it is simplified. 3320 class SimplificationTracker { 3321 DenseMap<Value *, Value *> Storage; 3322 const SimplifyQuery &SQ; 3323 // Tracks newly created Phi nodes. The elements are iterated by insertion 3324 // order. 3325 PhiNodeSet AllPhiNodes; 3326 // Tracks newly created Select nodes. 3327 SmallPtrSet<SelectInst *, 32> AllSelectNodes; 3328 3329 public: 3330 SimplificationTracker(const SimplifyQuery &sq) 3331 : SQ(sq) {} 3332 3333 Value *Get(Value *V) { 3334 do { 3335 auto SV = Storage.find(V); 3336 if (SV == Storage.end()) 3337 return V; 3338 V = SV->second; 3339 } while (true); 3340 } 3341 3342 Value *Simplify(Value *Val) { 3343 SmallVector<Value *, 32> WorkList; 3344 SmallPtrSet<Value *, 32> Visited; 3345 WorkList.push_back(Val); 3346 while (!WorkList.empty()) { 3347 auto *P = WorkList.pop_back_val(); 3348 if (!Visited.insert(P).second) 3349 continue; 3350 if (auto *PI = dyn_cast<Instruction>(P)) 3351 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { 3352 for (auto *U : PI->users()) 3353 WorkList.push_back(cast<Value>(U)); 3354 Put(PI, V); 3355 PI->replaceAllUsesWith(V); 3356 if (auto *PHI = dyn_cast<PHINode>(PI)) 3357 AllPhiNodes.erase(PHI); 3358 if (auto *Select = dyn_cast<SelectInst>(PI)) 3359 AllSelectNodes.erase(Select); 3360 PI->eraseFromParent(); 3361 } 3362 } 3363 return Get(Val); 3364 } 3365 3366 void Put(Value *From, Value *To) { 3367 Storage.insert({ From, To }); 3368 } 3369 3370 void ReplacePhi(PHINode *From, PHINode *To) { 3371 Value* OldReplacement = Get(From); 3372 while (OldReplacement != From) { 3373 From = To; 3374 To = dyn_cast<PHINode>(OldReplacement); 3375 OldReplacement = Get(From); 3376 } 3377 assert(To && Get(To) == To && "Replacement PHI node is already replaced."); 3378 Put(From, To); 3379 From->replaceAllUsesWith(To); 3380 AllPhiNodes.erase(From); 3381 From->eraseFromParent(); 3382 } 3383 3384 PhiNodeSet& newPhiNodes() { return AllPhiNodes; } 3385 3386 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } 3387 3388 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } 3389 3390 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } 3391 3392 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } 3393 3394 void destroyNewNodes(Type *CommonType) { 3395 // For safe erasing, replace the uses with dummy value first. 3396 auto *Dummy = UndefValue::get(CommonType); 3397 for (auto *I : AllPhiNodes) { 3398 I->replaceAllUsesWith(Dummy); 3399 I->eraseFromParent(); 3400 } 3401 AllPhiNodes.clear(); 3402 for (auto *I : AllSelectNodes) { 3403 I->replaceAllUsesWith(Dummy); 3404 I->eraseFromParent(); 3405 } 3406 AllSelectNodes.clear(); 3407 } 3408 }; 3409 3410 /// A helper class for combining addressing modes. 3411 class AddressingModeCombiner { 3412 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; 3413 typedef std::pair<PHINode *, PHINode *> PHIPair; 3414 3415 private: 3416 /// The addressing modes we've collected. 3417 SmallVector<ExtAddrMode, 16> AddrModes; 3418 3419 /// The field in which the AddrModes differ, when we have more than one. 3420 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; 3421 3422 /// Are the AddrModes that we have all just equal to their original values? 3423 bool AllAddrModesTrivial = true; 3424 3425 /// Common Type for all different fields in addressing modes. 3426 Type *CommonType; 3427 3428 /// SimplifyQuery for simplifyInstruction utility. 3429 const SimplifyQuery &SQ; 3430 3431 /// Original Address. 3432 Value *Original; 3433 3434 public: 3435 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) 3436 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} 3437 3438 /// Get the combined AddrMode 3439 const ExtAddrMode &getAddrMode() const { 3440 return AddrModes[0]; 3441 } 3442 3443 /// Add a new AddrMode if it's compatible with the AddrModes we already 3444 /// have. 3445 /// \return True iff we succeeded in doing so. 3446 bool addNewAddrMode(ExtAddrMode &NewAddrMode) { 3447 // Take note of if we have any non-trivial AddrModes, as we need to detect 3448 // when all AddrModes are trivial as then we would introduce a phi or select 3449 // which just duplicates what's already there. 3450 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); 3451 3452 // If this is the first addrmode then everything is fine. 3453 if (AddrModes.empty()) { 3454 AddrModes.emplace_back(NewAddrMode); 3455 return true; 3456 } 3457 3458 // Figure out how different this is from the other address modes, which we 3459 // can do just by comparing against the first one given that we only care 3460 // about the cumulative difference. 3461 ExtAddrMode::FieldName ThisDifferentField = 3462 AddrModes[0].compare(NewAddrMode); 3463 if (DifferentField == ExtAddrMode::NoField) 3464 DifferentField = ThisDifferentField; 3465 else if (DifferentField != ThisDifferentField) 3466 DifferentField = ExtAddrMode::MultipleFields; 3467 3468 // If NewAddrMode differs in more than one dimension we cannot handle it. 3469 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; 3470 3471 // If Scale Field is different then we reject. 3472 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; 3473 3474 // We also must reject the case when base offset is different and 3475 // scale reg is not null, we cannot handle this case due to merge of 3476 // different offsets will be used as ScaleReg. 3477 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || 3478 !NewAddrMode.ScaledReg); 3479 3480 // We also must reject the case when GV is different and BaseReg installed 3481 // due to we want to use base reg as a merge of GV values. 3482 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || 3483 !NewAddrMode.HasBaseReg); 3484 3485 // Even if NewAddMode is the same we still need to collect it due to 3486 // original value is different. And later we will need all original values 3487 // as anchors during finding the common Phi node. 3488 if (CanHandle) 3489 AddrModes.emplace_back(NewAddrMode); 3490 else 3491 AddrModes.clear(); 3492 3493 return CanHandle; 3494 } 3495 3496 /// Combine the addressing modes we've collected into a single 3497 /// addressing mode. 3498 /// \return True iff we successfully combined them or we only had one so 3499 /// didn't need to combine them anyway. 3500 bool combineAddrModes() { 3501 // If we have no AddrModes then they can't be combined. 3502 if (AddrModes.size() == 0) 3503 return false; 3504 3505 // A single AddrMode can trivially be combined. 3506 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) 3507 return true; 3508 3509 // If the AddrModes we collected are all just equal to the value they are 3510 // derived from then combining them wouldn't do anything useful. 3511 if (AllAddrModesTrivial) 3512 return false; 3513 3514 if (!addrModeCombiningAllowed()) 3515 return false; 3516 3517 // Build a map between <original value, basic block where we saw it> to 3518 // value of base register. 3519 // Bail out if there is no common type. 3520 FoldAddrToValueMapping Map; 3521 if (!initializeMap(Map)) 3522 return false; 3523 3524 Value *CommonValue = findCommon(Map); 3525 if (CommonValue) 3526 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); 3527 return CommonValue != nullptr; 3528 } 3529 3530 private: 3531 /// Initialize Map with anchor values. For address seen 3532 /// we set the value of different field saw in this address. 3533 /// At the same time we find a common type for different field we will 3534 /// use to create new Phi/Select nodes. Keep it in CommonType field. 3535 /// Return false if there is no common type found. 3536 bool initializeMap(FoldAddrToValueMapping &Map) { 3537 // Keep track of keys where the value is null. We will need to replace it 3538 // with constant null when we know the common type. 3539 SmallVector<Value *, 2> NullValue; 3540 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); 3541 for (auto &AM : AddrModes) { 3542 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); 3543 if (DV) { 3544 auto *Type = DV->getType(); 3545 if (CommonType && CommonType != Type) 3546 return false; 3547 CommonType = Type; 3548 Map[AM.OriginalValue] = DV; 3549 } else { 3550 NullValue.push_back(AM.OriginalValue); 3551 } 3552 } 3553 assert(CommonType && "At least one non-null value must be!"); 3554 for (auto *V : NullValue) 3555 Map[V] = Constant::getNullValue(CommonType); 3556 return true; 3557 } 3558 3559 /// We have mapping between value A and other value B where B was a field in 3560 /// addressing mode represented by A. Also we have an original value C 3561 /// representing an address we start with. Traversing from C through phi and 3562 /// selects we ended up with A's in a map. This utility function tries to find 3563 /// a value V which is a field in addressing mode C and traversing through phi 3564 /// nodes and selects we will end up in corresponded values B in a map. 3565 /// The utility will create a new Phi/Selects if needed. 3566 // The simple example looks as follows: 3567 // BB1: 3568 // p1 = b1 + 40 3569 // br cond BB2, BB3 3570 // BB2: 3571 // p2 = b2 + 40 3572 // br BB3 3573 // BB3: 3574 // p = phi [p1, BB1], [p2, BB2] 3575 // v = load p 3576 // Map is 3577 // p1 -> b1 3578 // p2 -> b2 3579 // Request is 3580 // p -> ? 3581 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. 3582 Value *findCommon(FoldAddrToValueMapping &Map) { 3583 // Tracks the simplification of newly created phi nodes. The reason we use 3584 // this mapping is because we will add new created Phi nodes in AddrToBase. 3585 // Simplification of Phi nodes is recursive, so some Phi node may 3586 // be simplified after we added it to AddrToBase. In reality this 3587 // simplification is possible only if original phi/selects were not 3588 // simplified yet. 3589 // Using this mapping we can find the current value in AddrToBase. 3590 SimplificationTracker ST(SQ); 3591 3592 // First step, DFS to create PHI nodes for all intermediate blocks. 3593 // Also fill traverse order for the second step. 3594 SmallVector<Value *, 32> TraverseOrder; 3595 InsertPlaceholders(Map, TraverseOrder, ST); 3596 3597 // Second Step, fill new nodes by merged values and simplify if possible. 3598 FillPlaceholders(Map, TraverseOrder, ST); 3599 3600 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { 3601 ST.destroyNewNodes(CommonType); 3602 return nullptr; 3603 } 3604 3605 // Now we'd like to match New Phi nodes to existed ones. 3606 unsigned PhiNotMatchedCount = 0; 3607 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { 3608 ST.destroyNewNodes(CommonType); 3609 return nullptr; 3610 } 3611 3612 auto *Result = ST.Get(Map.find(Original)->second); 3613 if (Result) { 3614 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; 3615 NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); 3616 } 3617 return Result; 3618 } 3619 3620 /// Try to match PHI node to Candidate. 3621 /// Matcher tracks the matched Phi nodes. 3622 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, 3623 SmallSetVector<PHIPair, 8> &Matcher, 3624 PhiNodeSet &PhiNodesToMatch) { 3625 SmallVector<PHIPair, 8> WorkList; 3626 Matcher.insert({ PHI, Candidate }); 3627 SmallSet<PHINode *, 8> MatchedPHIs; 3628 MatchedPHIs.insert(PHI); 3629 WorkList.push_back({ PHI, Candidate }); 3630 SmallSet<PHIPair, 8> Visited; 3631 while (!WorkList.empty()) { 3632 auto Item = WorkList.pop_back_val(); 3633 if (!Visited.insert(Item).second) 3634 continue; 3635 // We iterate over all incoming values to Phi to compare them. 3636 // If values are different and both of them Phi and the first one is a 3637 // Phi we added (subject to match) and both of them is in the same basic 3638 // block then we can match our pair if values match. So we state that 3639 // these values match and add it to work list to verify that. 3640 for (auto B : Item.first->blocks()) { 3641 Value *FirstValue = Item.first->getIncomingValueForBlock(B); 3642 Value *SecondValue = Item.second->getIncomingValueForBlock(B); 3643 if (FirstValue == SecondValue) 3644 continue; 3645 3646 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); 3647 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); 3648 3649 // One of them is not Phi or 3650 // The first one is not Phi node from the set we'd like to match or 3651 // Phi nodes from different basic blocks then 3652 // we will not be able to match. 3653 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || 3654 FirstPhi->getParent() != SecondPhi->getParent()) 3655 return false; 3656 3657 // If we already matched them then continue. 3658 if (Matcher.count({ FirstPhi, SecondPhi })) 3659 continue; 3660 // So the values are different and does not match. So we need them to 3661 // match. (But we register no more than one match per PHI node, so that 3662 // we won't later try to replace them twice.) 3663 if (MatchedPHIs.insert(FirstPhi).second) 3664 Matcher.insert({ FirstPhi, SecondPhi }); 3665 // But me must check it. 3666 WorkList.push_back({ FirstPhi, SecondPhi }); 3667 } 3668 } 3669 return true; 3670 } 3671 3672 /// For the given set of PHI nodes (in the SimplificationTracker) try 3673 /// to find their equivalents. 3674 /// Returns false if this matching fails and creation of new Phi is disabled. 3675 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, 3676 unsigned &PhiNotMatchedCount) { 3677 // Matched and PhiNodesToMatch iterate their elements in a deterministic 3678 // order, so the replacements (ReplacePhi) are also done in a deterministic 3679 // order. 3680 SmallSetVector<PHIPair, 8> Matched; 3681 SmallPtrSet<PHINode *, 8> WillNotMatch; 3682 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); 3683 while (PhiNodesToMatch.size()) { 3684 PHINode *PHI = *PhiNodesToMatch.begin(); 3685 3686 // Add us, if no Phi nodes in the basic block we do not match. 3687 WillNotMatch.clear(); 3688 WillNotMatch.insert(PHI); 3689 3690 // Traverse all Phis until we found equivalent or fail to do that. 3691 bool IsMatched = false; 3692 for (auto &P : PHI->getParent()->phis()) { 3693 if (&P == PHI) 3694 continue; 3695 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) 3696 break; 3697 // If it does not match, collect all Phi nodes from matcher. 3698 // if we end up with no match, them all these Phi nodes will not match 3699 // later. 3700 for (auto M : Matched) 3701 WillNotMatch.insert(M.first); 3702 Matched.clear(); 3703 } 3704 if (IsMatched) { 3705 // Replace all matched values and erase them. 3706 for (auto MV : Matched) 3707 ST.ReplacePhi(MV.first, MV.second); 3708 Matched.clear(); 3709 continue; 3710 } 3711 // If we are not allowed to create new nodes then bail out. 3712 if (!AllowNewPhiNodes) 3713 return false; 3714 // Just remove all seen values in matcher. They will not match anything. 3715 PhiNotMatchedCount += WillNotMatch.size(); 3716 for (auto *P : WillNotMatch) 3717 PhiNodesToMatch.erase(P); 3718 } 3719 return true; 3720 } 3721 /// Fill the placeholders with values from predecessors and simplify them. 3722 void FillPlaceholders(FoldAddrToValueMapping &Map, 3723 SmallVectorImpl<Value *> &TraverseOrder, 3724 SimplificationTracker &ST) { 3725 while (!TraverseOrder.empty()) { 3726 Value *Current = TraverseOrder.pop_back_val(); 3727 assert(Map.find(Current) != Map.end() && "No node to fill!!!"); 3728 Value *V = Map[Current]; 3729 3730 if (SelectInst *Select = dyn_cast<SelectInst>(V)) { 3731 // CurrentValue also must be Select. 3732 auto *CurrentSelect = cast<SelectInst>(Current); 3733 auto *TrueValue = CurrentSelect->getTrueValue(); 3734 assert(Map.find(TrueValue) != Map.end() && "No True Value!"); 3735 Select->setTrueValue(ST.Get(Map[TrueValue])); 3736 auto *FalseValue = CurrentSelect->getFalseValue(); 3737 assert(Map.find(FalseValue) != Map.end() && "No False Value!"); 3738 Select->setFalseValue(ST.Get(Map[FalseValue])); 3739 } else { 3740 // Must be a Phi node then. 3741 auto *PHI = cast<PHINode>(V); 3742 // Fill the Phi node with values from predecessors. 3743 for (auto *B : predecessors(PHI->getParent())) { 3744 Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); 3745 assert(Map.find(PV) != Map.end() && "No predecessor Value!"); 3746 PHI->addIncoming(ST.Get(Map[PV]), B); 3747 } 3748 } 3749 Map[Current] = ST.Simplify(V); 3750 } 3751 } 3752 3753 /// Starting from original value recursively iterates over def-use chain up to 3754 /// known ending values represented in a map. For each traversed phi/select 3755 /// inserts a placeholder Phi or Select. 3756 /// Reports all new created Phi/Select nodes by adding them to set. 3757 /// Also reports and order in what values have been traversed. 3758 void InsertPlaceholders(FoldAddrToValueMapping &Map, 3759 SmallVectorImpl<Value *> &TraverseOrder, 3760 SimplificationTracker &ST) { 3761 SmallVector<Value *, 32> Worklist; 3762 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) && 3763 "Address must be a Phi or Select node"); 3764 auto *Dummy = UndefValue::get(CommonType); 3765 Worklist.push_back(Original); 3766 while (!Worklist.empty()) { 3767 Value *Current = Worklist.pop_back_val(); 3768 // if it is already visited or it is an ending value then skip it. 3769 if (Map.find(Current) != Map.end()) 3770 continue; 3771 TraverseOrder.push_back(Current); 3772 3773 // CurrentValue must be a Phi node or select. All others must be covered 3774 // by anchors. 3775 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { 3776 // Is it OK to get metadata from OrigSelect?! 3777 // Create a Select placeholder with dummy value. 3778 SelectInst *Select = SelectInst::Create( 3779 CurrentSelect->getCondition(), Dummy, Dummy, 3780 CurrentSelect->getName(), CurrentSelect, CurrentSelect); 3781 Map[Current] = Select; 3782 ST.insertNewSelect(Select); 3783 // We are interested in True and False values. 3784 Worklist.push_back(CurrentSelect->getTrueValue()); 3785 Worklist.push_back(CurrentSelect->getFalseValue()); 3786 } else { 3787 // It must be a Phi node then. 3788 PHINode *CurrentPhi = cast<PHINode>(Current); 3789 unsigned PredCount = CurrentPhi->getNumIncomingValues(); 3790 PHINode *PHI = 3791 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); 3792 Map[Current] = PHI; 3793 ST.insertNewPhi(PHI); 3794 append_range(Worklist, CurrentPhi->incoming_values()); 3795 } 3796 } 3797 } 3798 3799 bool addrModeCombiningAllowed() { 3800 if (DisableComplexAddrModes) 3801 return false; 3802 switch (DifferentField) { 3803 default: 3804 return false; 3805 case ExtAddrMode::BaseRegField: 3806 return AddrSinkCombineBaseReg; 3807 case ExtAddrMode::BaseGVField: 3808 return AddrSinkCombineBaseGV; 3809 case ExtAddrMode::BaseOffsField: 3810 return AddrSinkCombineBaseOffs; 3811 case ExtAddrMode::ScaledRegField: 3812 return AddrSinkCombineScaledReg; 3813 } 3814 } 3815 }; 3816 } // end anonymous namespace 3817 3818 /// Try adding ScaleReg*Scale to the current addressing mode. 3819 /// Return true and update AddrMode if this addr mode is legal for the target, 3820 /// false if not. 3821 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 3822 unsigned Depth) { 3823 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 3824 // mode. Just process that directly. 3825 if (Scale == 1) 3826 return matchAddr(ScaleReg, Depth); 3827 3828 // If the scale is 0, it takes nothing to add this. 3829 if (Scale == 0) 3830 return true; 3831 3832 // If we already have a scale of this value, we can add to it, otherwise, we 3833 // need an available scale field. 3834 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 3835 return false; 3836 3837 ExtAddrMode TestAddrMode = AddrMode; 3838 3839 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 3840 // [A+B + A*7] -> [B+A*8]. 3841 TestAddrMode.Scale += Scale; 3842 TestAddrMode.ScaledReg = ScaleReg; 3843 3844 // If the new address isn't legal, bail out. 3845 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 3846 return false; 3847 3848 // It was legal, so commit it. 3849 AddrMode = TestAddrMode; 3850 3851 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 3852 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 3853 // X*Scale + C*Scale to addr mode. If we found available IV increment, do not 3854 // go any further: we can reuse it and cannot eliminate it. 3855 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 3856 if (isa<Instruction>(ScaleReg) && // not a constant expr. 3857 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && 3858 !isIVIncrement(cast<BinaryOperator>(ScaleReg), &LI) && 3859 CI->getValue().isSignedIntN(64)) { 3860 TestAddrMode.InBounds = false; 3861 TestAddrMode.ScaledReg = AddLHS; 3862 TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; 3863 3864 // If this addressing mode is legal, commit it and remember that we folded 3865 // this instruction. 3866 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 3867 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 3868 AddrMode = TestAddrMode; 3869 return true; 3870 } 3871 // Restore status quo. 3872 TestAddrMode = AddrMode; 3873 } 3874 3875 auto GetConstantStep = [this](const Value * V) 3876 ->Optional<std::pair<Instruction *, APInt> > { 3877 auto *PN = dyn_cast<PHINode>(V); 3878 if (!PN) 3879 return None; 3880 auto IVInc = getIVIncrement(PN, &LI); 3881 if (!IVInc) 3882 return None; 3883 // TODO: The result of the intrinsics above is two-compliment. However when 3884 // IV inc is expressed as add or sub, iv.next is potentially a poison value. 3885 // If it has nuw or nsw flags, we need to make sure that these flags are 3886 // inferrable at the point of memory instruction. Otherwise we are replacing 3887 // well-defined two-compliment computation with poison. Currently, to avoid 3888 // potentially complex analysis needed to prove this, we reject such cases. 3889 if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first)) 3890 if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) 3891 return None; 3892 if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second)) 3893 return std::make_pair(IVInc->first, ConstantStep->getValue()); 3894 return None; 3895 }; 3896 3897 // Try to account for the following special case: 3898 // 1. ScaleReg is an inductive variable; 3899 // 2. We use it with non-zero offset; 3900 // 3. IV's increment is available at the point of memory instruction. 3901 // 3902 // In this case, we may reuse the IV increment instead of the IV Phi to 3903 // achieve the following advantages: 3904 // 1. If IV step matches the offset, we will have no need in the offset; 3905 // 2. Even if they don't match, we will reduce the overlap of living IV 3906 // and IV increment, that will potentially lead to better register 3907 // assignment. 3908 if (AddrMode.BaseOffs) { 3909 if (auto IVStep = GetConstantStep(ScaleReg)) { 3910 Instruction *IVInc = IVStep->first; 3911 APInt Step = IVStep->second; 3912 APInt Offset = Step * AddrMode.Scale; 3913 if (Offset.isSignedIntN(64)) { 3914 TestAddrMode.InBounds = false; 3915 TestAddrMode.ScaledReg = IVInc; 3916 TestAddrMode.BaseOffs -= Offset.getLimitedValue(); 3917 // If this addressing mode is legal, commit it.. 3918 // (Note that we defer the (expensive) domtree base legality check 3919 // to the very last possible point.) 3920 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) && 3921 getDTFn().dominates(IVInc, MemoryInst)) { 3922 AddrModeInsts.push_back(cast<Instruction>(IVInc)); 3923 AddrMode = TestAddrMode; 3924 return true; 3925 } 3926 // Restore status quo. 3927 TestAddrMode = AddrMode; 3928 } 3929 } 3930 } 3931 3932 // Otherwise, just return what we have. 3933 return true; 3934 } 3935 3936 /// This is a little filter, which returns true if an addressing computation 3937 /// involving I might be folded into a load/store accessing it. 3938 /// This doesn't need to be perfect, but needs to accept at least 3939 /// the set of instructions that MatchOperationAddr can. 3940 static bool MightBeFoldableInst(Instruction *I) { 3941 switch (I->getOpcode()) { 3942 case Instruction::BitCast: 3943 case Instruction::AddrSpaceCast: 3944 // Don't touch identity bitcasts. 3945 if (I->getType() == I->getOperand(0)->getType()) 3946 return false; 3947 return I->getType()->isIntOrPtrTy(); 3948 case Instruction::PtrToInt: 3949 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3950 return true; 3951 case Instruction::IntToPtr: 3952 // We know the input is intptr_t, so this is foldable. 3953 return true; 3954 case Instruction::Add: 3955 return true; 3956 case Instruction::Mul: 3957 case Instruction::Shl: 3958 // Can only handle X*C and X << C. 3959 return isa<ConstantInt>(I->getOperand(1)); 3960 case Instruction::GetElementPtr: 3961 return true; 3962 default: 3963 return false; 3964 } 3965 } 3966 3967 /// Check whether or not \p Val is a legal instruction for \p TLI. 3968 /// \note \p Val is assumed to be the product of some type promotion. 3969 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 3970 /// to be legal, as the non-promoted value would have had the same state. 3971 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 3972 const DataLayout &DL, Value *Val) { 3973 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 3974 if (!PromotedInst) 3975 return false; 3976 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 3977 // If the ISDOpcode is undefined, it was undefined before the promotion. 3978 if (!ISDOpcode) 3979 return true; 3980 // Otherwise, check if the promoted instruction is legal or not. 3981 return TLI.isOperationLegalOrCustom( 3982 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 3983 } 3984 3985 namespace { 3986 3987 /// Hepler class to perform type promotion. 3988 class TypePromotionHelper { 3989 /// Utility function to add a promoted instruction \p ExtOpnd to 3990 /// \p PromotedInsts and record the type of extension we have seen. 3991 static void addPromotedInst(InstrToOrigTy &PromotedInsts, 3992 Instruction *ExtOpnd, 3993 bool IsSExt) { 3994 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 3995 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); 3996 if (It != PromotedInsts.end()) { 3997 // If the new extension is same as original, the information in 3998 // PromotedInsts[ExtOpnd] is still correct. 3999 if (It->second.getInt() == ExtTy) 4000 return; 4001 4002 // Now the new extension is different from old extension, we make 4003 // the type information invalid by setting extension type to 4004 // BothExtension. 4005 ExtTy = BothExtension; 4006 } 4007 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); 4008 } 4009 4010 /// Utility function to query the original type of instruction \p Opnd 4011 /// with a matched extension type. If the extension doesn't match, we 4012 /// cannot use the information we had on the original type. 4013 /// BothExtension doesn't match any extension type. 4014 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, 4015 Instruction *Opnd, 4016 bool IsSExt) { 4017 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; 4018 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 4019 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) 4020 return It->second.getPointer(); 4021 return nullptr; 4022 } 4023 4024 /// Utility function to check whether or not a sign or zero extension 4025 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 4026 /// either using the operands of \p Inst or promoting \p Inst. 4027 /// The type of the extension is defined by \p IsSExt. 4028 /// In other words, check if: 4029 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 4030 /// #1 Promotion applies: 4031 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 4032 /// #2 Operand reuses: 4033 /// ext opnd1 to ConsideredExtType. 4034 /// \p PromotedInsts maps the instructions to their type before promotion. 4035 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 4036 const InstrToOrigTy &PromotedInsts, bool IsSExt); 4037 4038 /// Utility function to determine if \p OpIdx should be promoted when 4039 /// promoting \p Inst. 4040 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 4041 return !(isa<SelectInst>(Inst) && OpIdx == 0); 4042 } 4043 4044 /// Utility function to promote the operand of \p Ext when this 4045 /// operand is a promotable trunc or sext or zext. 4046 /// \p PromotedInsts maps the instructions to their type before promotion. 4047 /// \p CreatedInstsCost[out] contains the cost of all instructions 4048 /// created to promote the operand of Ext. 4049 /// Newly added extensions are inserted in \p Exts. 4050 /// Newly added truncates are inserted in \p Truncs. 4051 /// Should never be called directly. 4052 /// \return The promoted value which is used instead of Ext. 4053 static Value *promoteOperandForTruncAndAnyExt( 4054 Instruction *Ext, TypePromotionTransaction &TPT, 4055 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4056 SmallVectorImpl<Instruction *> *Exts, 4057 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 4058 4059 /// Utility function to promote the operand of \p Ext when this 4060 /// operand is promotable and is not a supported trunc or sext. 4061 /// \p PromotedInsts maps the instructions to their type before promotion. 4062 /// \p CreatedInstsCost[out] contains the cost of all the instructions 4063 /// created to promote the operand of Ext. 4064 /// Newly added extensions are inserted in \p Exts. 4065 /// Newly added truncates are inserted in \p Truncs. 4066 /// Should never be called directly. 4067 /// \return The promoted value which is used instead of Ext. 4068 static Value *promoteOperandForOther(Instruction *Ext, 4069 TypePromotionTransaction &TPT, 4070 InstrToOrigTy &PromotedInsts, 4071 unsigned &CreatedInstsCost, 4072 SmallVectorImpl<Instruction *> *Exts, 4073 SmallVectorImpl<Instruction *> *Truncs, 4074 const TargetLowering &TLI, bool IsSExt); 4075 4076 /// \see promoteOperandForOther. 4077 static Value *signExtendOperandForOther( 4078 Instruction *Ext, TypePromotionTransaction &TPT, 4079 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4080 SmallVectorImpl<Instruction *> *Exts, 4081 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4082 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 4083 Exts, Truncs, TLI, true); 4084 } 4085 4086 /// \see promoteOperandForOther. 4087 static Value *zeroExtendOperandForOther( 4088 Instruction *Ext, TypePromotionTransaction &TPT, 4089 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4090 SmallVectorImpl<Instruction *> *Exts, 4091 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4092 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 4093 Exts, Truncs, TLI, false); 4094 } 4095 4096 public: 4097 /// Type for the utility function that promotes the operand of Ext. 4098 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, 4099 InstrToOrigTy &PromotedInsts, 4100 unsigned &CreatedInstsCost, 4101 SmallVectorImpl<Instruction *> *Exts, 4102 SmallVectorImpl<Instruction *> *Truncs, 4103 const TargetLowering &TLI); 4104 4105 /// Given a sign/zero extend instruction \p Ext, return the appropriate 4106 /// action to promote the operand of \p Ext instead of using Ext. 4107 /// \return NULL if no promotable action is possible with the current 4108 /// sign extension. 4109 /// \p InsertedInsts keeps track of all the instructions inserted by the 4110 /// other CodeGenPrepare optimizations. This information is important 4111 /// because we do not want to promote these instructions as CodeGenPrepare 4112 /// will reinsert them later. Thus creating an infinite loop: create/remove. 4113 /// \p PromotedInsts maps the instructions to their type before promotion. 4114 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 4115 const TargetLowering &TLI, 4116 const InstrToOrigTy &PromotedInsts); 4117 }; 4118 4119 } // end anonymous namespace 4120 4121 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 4122 Type *ConsideredExtType, 4123 const InstrToOrigTy &PromotedInsts, 4124 bool IsSExt) { 4125 // The promotion helper does not know how to deal with vector types yet. 4126 // To be able to fix that, we would need to fix the places where we 4127 // statically extend, e.g., constants and such. 4128 if (Inst->getType()->isVectorTy()) 4129 return false; 4130 4131 // We can always get through zext. 4132 if (isa<ZExtInst>(Inst)) 4133 return true; 4134 4135 // sext(sext) is ok too. 4136 if (IsSExt && isa<SExtInst>(Inst)) 4137 return true; 4138 4139 // We can get through binary operator, if it is legal. In other words, the 4140 // binary operator must have a nuw or nsw flag. 4141 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 4142 if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) && 4143 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 4144 (IsSExt && BinOp->hasNoSignedWrap()))) 4145 return true; 4146 4147 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) 4148 if ((Inst->getOpcode() == Instruction::And || 4149 Inst->getOpcode() == Instruction::Or)) 4150 return true; 4151 4152 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) 4153 if (Inst->getOpcode() == Instruction::Xor) { 4154 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); 4155 // Make sure it is not a NOT. 4156 if (Cst && !Cst->getValue().isAllOnesValue()) 4157 return true; 4158 } 4159 4160 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) 4161 // It may change a poisoned value into a regular value, like 4162 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 4163 // poisoned value regular value 4164 // It should be OK since undef covers valid value. 4165 if (Inst->getOpcode() == Instruction::LShr && !IsSExt) 4166 return true; 4167 4168 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) 4169 // It may change a poisoned value into a regular value, like 4170 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 4171 // poisoned value regular value 4172 // It should be OK since undef covers valid value. 4173 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { 4174 const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); 4175 if (ExtInst->hasOneUse()) { 4176 const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); 4177 if (AndInst && AndInst->getOpcode() == Instruction::And) { 4178 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); 4179 if (Cst && 4180 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) 4181 return true; 4182 } 4183 } 4184 } 4185 4186 // Check if we can do the following simplification. 4187 // ext(trunc(opnd)) --> ext(opnd) 4188 if (!isa<TruncInst>(Inst)) 4189 return false; 4190 4191 Value *OpndVal = Inst->getOperand(0); 4192 // Check if we can use this operand in the extension. 4193 // If the type is larger than the result type of the extension, we cannot. 4194 if (!OpndVal->getType()->isIntegerTy() || 4195 OpndVal->getType()->getIntegerBitWidth() > 4196 ConsideredExtType->getIntegerBitWidth()) 4197 return false; 4198 4199 // If the operand of the truncate is not an instruction, we will not have 4200 // any information on the dropped bits. 4201 // (Actually we could for constant but it is not worth the extra logic). 4202 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 4203 if (!Opnd) 4204 return false; 4205 4206 // Check if the source of the type is narrow enough. 4207 // I.e., check that trunc just drops extended bits of the same kind of 4208 // the extension. 4209 // #1 get the type of the operand and check the kind of the extended bits. 4210 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); 4211 if (OpndType) 4212 ; 4213 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 4214 OpndType = Opnd->getOperand(0)->getType(); 4215 else 4216 return false; 4217 4218 // #2 check that the truncate just drops extended bits. 4219 return Inst->getType()->getIntegerBitWidth() >= 4220 OpndType->getIntegerBitWidth(); 4221 } 4222 4223 TypePromotionHelper::Action TypePromotionHelper::getAction( 4224 Instruction *Ext, const SetOfInstrs &InsertedInsts, 4225 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 4226 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4227 "Unexpected instruction type"); 4228 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 4229 Type *ExtTy = Ext->getType(); 4230 bool IsSExt = isa<SExtInst>(Ext); 4231 // If the operand of the extension is not an instruction, we cannot 4232 // get through. 4233 // If it, check we can get through. 4234 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 4235 return nullptr; 4236 4237 // Do not promote if the operand has been added by codegenprepare. 4238 // Otherwise, it means we are undoing an optimization that is likely to be 4239 // redone, thus causing potential infinite loop. 4240 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 4241 return nullptr; 4242 4243 // SExt or Trunc instructions. 4244 // Return the related handler. 4245 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 4246 isa<ZExtInst>(ExtOpnd)) 4247 return promoteOperandForTruncAndAnyExt; 4248 4249 // Regular instruction. 4250 // Abort early if we will have to insert non-free instructions. 4251 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 4252 return nullptr; 4253 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 4254 } 4255 4256 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 4257 Instruction *SExt, TypePromotionTransaction &TPT, 4258 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4259 SmallVectorImpl<Instruction *> *Exts, 4260 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 4261 // By construction, the operand of SExt is an instruction. Otherwise we cannot 4262 // get through it and this method should not be called. 4263 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 4264 Value *ExtVal = SExt; 4265 bool HasMergedNonFreeExt = false; 4266 if (isa<ZExtInst>(SExtOpnd)) { 4267 // Replace s|zext(zext(opnd)) 4268 // => zext(opnd). 4269 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 4270 Value *ZExt = 4271 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 4272 TPT.replaceAllUsesWith(SExt, ZExt); 4273 TPT.eraseInstruction(SExt); 4274 ExtVal = ZExt; 4275 } else { 4276 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 4277 // => z|sext(opnd). 4278 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 4279 } 4280 CreatedInstsCost = 0; 4281 4282 // Remove dead code. 4283 if (SExtOpnd->use_empty()) 4284 TPT.eraseInstruction(SExtOpnd); 4285 4286 // Check if the extension is still needed. 4287 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 4288 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 4289 if (ExtInst) { 4290 if (Exts) 4291 Exts->push_back(ExtInst); 4292 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 4293 } 4294 return ExtVal; 4295 } 4296 4297 // At this point we have: ext ty opnd to ty. 4298 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 4299 Value *NextVal = ExtInst->getOperand(0); 4300 TPT.eraseInstruction(ExtInst, NextVal); 4301 return NextVal; 4302 } 4303 4304 Value *TypePromotionHelper::promoteOperandForOther( 4305 Instruction *Ext, TypePromotionTransaction &TPT, 4306 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 4307 SmallVectorImpl<Instruction *> *Exts, 4308 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 4309 bool IsSExt) { 4310 // By construction, the operand of Ext is an instruction. Otherwise we cannot 4311 // get through it and this method should not be called. 4312 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 4313 CreatedInstsCost = 0; 4314 if (!ExtOpnd->hasOneUse()) { 4315 // ExtOpnd will be promoted. 4316 // All its uses, but Ext, will need to use a truncated value of the 4317 // promoted version. 4318 // Create the truncate now. 4319 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 4320 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 4321 // Insert it just after the definition. 4322 ITrunc->moveAfter(ExtOpnd); 4323 if (Truncs) 4324 Truncs->push_back(ITrunc); 4325 } 4326 4327 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 4328 // Restore the operand of Ext (which has been replaced by the previous call 4329 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 4330 TPT.setOperand(Ext, 0, ExtOpnd); 4331 } 4332 4333 // Get through the Instruction: 4334 // 1. Update its type. 4335 // 2. Replace the uses of Ext by Inst. 4336 // 3. Extend each operand that needs to be extended. 4337 4338 // Remember the original type of the instruction before promotion. 4339 // This is useful to know that the high bits are sign extended bits. 4340 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); 4341 // Step #1. 4342 TPT.mutateType(ExtOpnd, Ext->getType()); 4343 // Step #2. 4344 TPT.replaceAllUsesWith(Ext, ExtOpnd); 4345 // Step #3. 4346 Instruction *ExtForOpnd = Ext; 4347 4348 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n"); 4349 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 4350 ++OpIdx) { 4351 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 4352 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 4353 !shouldExtOperand(ExtOpnd, OpIdx)) { 4354 LLVM_DEBUG(dbgs() << "No need to propagate\n"); 4355 continue; 4356 } 4357 // Check if we can statically extend the operand. 4358 Value *Opnd = ExtOpnd->getOperand(OpIdx); 4359 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 4360 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4361 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 4362 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 4363 : Cst->getValue().zext(BitWidth); 4364 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 4365 continue; 4366 } 4367 // UndefValue are typed, so we have to statically sign extend them. 4368 if (isa<UndefValue>(Opnd)) { 4369 LLVM_DEBUG(dbgs() << "Statically extend\n"); 4370 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 4371 continue; 4372 } 4373 4374 // Otherwise we have to explicitly sign extend the operand. 4375 // Check if Ext was reused to extend an operand. 4376 if (!ExtForOpnd) { 4377 // If yes, create a new one. 4378 LLVM_DEBUG(dbgs() << "More operands to ext\n"); 4379 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 4380 : TPT.createZExt(Ext, Opnd, Ext->getType()); 4381 if (!isa<Instruction>(ValForExtOpnd)) { 4382 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 4383 continue; 4384 } 4385 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 4386 } 4387 if (Exts) 4388 Exts->push_back(ExtForOpnd); 4389 TPT.setOperand(ExtForOpnd, 0, Opnd); 4390 4391 // Move the sign extension before the insertion point. 4392 TPT.moveBefore(ExtForOpnd, ExtOpnd); 4393 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 4394 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 4395 // If more sext are required, new instructions will have to be created. 4396 ExtForOpnd = nullptr; 4397 } 4398 if (ExtForOpnd == Ext) { 4399 LLVM_DEBUG(dbgs() << "Extension is useless now\n"); 4400 TPT.eraseInstruction(Ext); 4401 } 4402 return ExtOpnd; 4403 } 4404 4405 /// Check whether or not promoting an instruction to a wider type is profitable. 4406 /// \p NewCost gives the cost of extension instructions created by the 4407 /// promotion. 4408 /// \p OldCost gives the cost of extension instructions before the promotion 4409 /// plus the number of instructions that have been 4410 /// matched in the addressing mode the promotion. 4411 /// \p PromotedOperand is the value that has been promoted. 4412 /// \return True if the promotion is profitable, false otherwise. 4413 bool AddressingModeMatcher::isPromotionProfitable( 4414 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 4415 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost 4416 << '\n'); 4417 // The cost of the new extensions is greater than the cost of the 4418 // old extension plus what we folded. 4419 // This is not profitable. 4420 if (NewCost > OldCost) 4421 return false; 4422 if (NewCost < OldCost) 4423 return true; 4424 // The promotion is neutral but it may help folding the sign extension in 4425 // loads for instance. 4426 // Check that we did not create an illegal instruction. 4427 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 4428 } 4429 4430 /// Given an instruction or constant expr, see if we can fold the operation 4431 /// into the addressing mode. If so, update the addressing mode and return 4432 /// true, otherwise return false without modifying AddrMode. 4433 /// If \p MovedAway is not NULL, it contains the information of whether or 4434 /// not AddrInst has to be folded into the addressing mode on success. 4435 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 4436 /// because it has been moved away. 4437 /// Thus AddrInst must not be added in the matched instructions. 4438 /// This state can happen when AddrInst is a sext, since it may be moved away. 4439 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 4440 /// not be referenced anymore. 4441 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 4442 unsigned Depth, 4443 bool *MovedAway) { 4444 // Avoid exponential behavior on extremely deep expression trees. 4445 if (Depth >= 5) return false; 4446 4447 // By default, all matched instructions stay in place. 4448 if (MovedAway) 4449 *MovedAway = false; 4450 4451 switch (Opcode) { 4452 case Instruction::PtrToInt: 4453 // PtrToInt is always a noop, as we know that the int type is pointer sized. 4454 return matchAddr(AddrInst->getOperand(0), Depth); 4455 case Instruction::IntToPtr: { 4456 auto AS = AddrInst->getType()->getPointerAddressSpace(); 4457 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 4458 // This inttoptr is a no-op if the integer type is pointer sized. 4459 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 4460 return matchAddr(AddrInst->getOperand(0), Depth); 4461 return false; 4462 } 4463 case Instruction::BitCast: 4464 // BitCast is always a noop, and we can handle it as long as it is 4465 // int->int or pointer->pointer (we don't want int<->fp or something). 4466 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && 4467 // Don't touch identity bitcasts. These were probably put here by LSR, 4468 // and we don't want to mess around with them. Assume it knows what it 4469 // is doing. 4470 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 4471 return matchAddr(AddrInst->getOperand(0), Depth); 4472 return false; 4473 case Instruction::AddrSpaceCast: { 4474 unsigned SrcAS 4475 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 4476 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 4477 if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) 4478 return matchAddr(AddrInst->getOperand(0), Depth); 4479 return false; 4480 } 4481 case Instruction::Add: { 4482 // Check to see if we can merge in the RHS then the LHS. If so, we win. 4483 ExtAddrMode BackupAddrMode = AddrMode; 4484 unsigned OldSize = AddrModeInsts.size(); 4485 // Start a transaction at this point. 4486 // The LHS may match but not the RHS. 4487 // Therefore, we need a higher level restoration point to undo partially 4488 // matched operation. 4489 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4490 TPT.getRestorationPoint(); 4491 4492 AddrMode.InBounds = false; 4493 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 4494 matchAddr(AddrInst->getOperand(0), Depth+1)) 4495 return true; 4496 4497 // Restore the old addr mode info. 4498 AddrMode = BackupAddrMode; 4499 AddrModeInsts.resize(OldSize); 4500 TPT.rollback(LastKnownGood); 4501 4502 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 4503 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 4504 matchAddr(AddrInst->getOperand(1), Depth+1)) 4505 return true; 4506 4507 // Otherwise we definitely can't merge the ADD in. 4508 AddrMode = BackupAddrMode; 4509 AddrModeInsts.resize(OldSize); 4510 TPT.rollback(LastKnownGood); 4511 break; 4512 } 4513 //case Instruction::Or: 4514 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 4515 //break; 4516 case Instruction::Mul: 4517 case Instruction::Shl: { 4518 // Can only handle X*C and X << C. 4519 AddrMode.InBounds = false; 4520 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 4521 if (!RHS || RHS->getBitWidth() > 64) 4522 return false; 4523 int64_t Scale = RHS->getSExtValue(); 4524 if (Opcode == Instruction::Shl) 4525 Scale = 1LL << Scale; 4526 4527 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 4528 } 4529 case Instruction::GetElementPtr: { 4530 // Scan the GEP. We check it if it contains constant offsets and at most 4531 // one variable offset. 4532 int VariableOperand = -1; 4533 unsigned VariableScale = 0; 4534 4535 int64_t ConstantOffset = 0; 4536 gep_type_iterator GTI = gep_type_begin(AddrInst); 4537 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 4538 if (StructType *STy = GTI.getStructTypeOrNull()) { 4539 const StructLayout *SL = DL.getStructLayout(STy); 4540 unsigned Idx = 4541 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 4542 ConstantOffset += SL->getElementOffset(Idx); 4543 } else { 4544 TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); 4545 if (TS.isNonZero()) { 4546 // The optimisations below currently only work for fixed offsets. 4547 if (TS.isScalable()) 4548 return false; 4549 int64_t TypeSize = TS.getFixedSize(); 4550 if (ConstantInt *CI = 4551 dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 4552 const APInt &CVal = CI->getValue(); 4553 if (CVal.getMinSignedBits() <= 64) { 4554 ConstantOffset += CVal.getSExtValue() * TypeSize; 4555 continue; 4556 } 4557 } 4558 // We only allow one variable index at the moment. 4559 if (VariableOperand != -1) 4560 return false; 4561 4562 // Remember the variable index. 4563 VariableOperand = i; 4564 VariableScale = TypeSize; 4565 } 4566 } 4567 } 4568 4569 // A common case is for the GEP to only do a constant offset. In this case, 4570 // just add it to the disp field and check validity. 4571 if (VariableOperand == -1) { 4572 AddrMode.BaseOffs += ConstantOffset; 4573 if (ConstantOffset == 0 || 4574 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 4575 // Check to see if we can fold the base pointer in too. 4576 if (matchAddr(AddrInst->getOperand(0), Depth+1)) { 4577 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4578 AddrMode.InBounds = false; 4579 return true; 4580 } 4581 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && 4582 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && 4583 ConstantOffset > 0) { 4584 // Record GEPs with non-zero offsets as candidates for splitting in the 4585 // event that the offset cannot fit into the r+i addressing mode. 4586 // Simple and common case that only one GEP is used in calculating the 4587 // address for the memory access. 4588 Value *Base = AddrInst->getOperand(0); 4589 auto *BaseI = dyn_cast<Instruction>(Base); 4590 auto *GEP = cast<GetElementPtrInst>(AddrInst); 4591 if (isa<Argument>(Base) || isa<GlobalValue>(Base) || 4592 (BaseI && !isa<CastInst>(BaseI) && 4593 !isa<GetElementPtrInst>(BaseI))) { 4594 // Make sure the parent block allows inserting non-PHI instructions 4595 // before the terminator. 4596 BasicBlock *Parent = 4597 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); 4598 if (!Parent->getTerminator()->isEHPad()) 4599 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); 4600 } 4601 } 4602 AddrMode.BaseOffs -= ConstantOffset; 4603 return false; 4604 } 4605 4606 // Save the valid addressing mode in case we can't match. 4607 ExtAddrMode BackupAddrMode = AddrMode; 4608 unsigned OldSize = AddrModeInsts.size(); 4609 4610 // See if the scale and offset amount is valid for this target. 4611 AddrMode.BaseOffs += ConstantOffset; 4612 if (!cast<GEPOperator>(AddrInst)->isInBounds()) 4613 AddrMode.InBounds = false; 4614 4615 // Match the base operand of the GEP. 4616 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 4617 // If it couldn't be matched, just stuff the value in a register. 4618 if (AddrMode.HasBaseReg) { 4619 AddrMode = BackupAddrMode; 4620 AddrModeInsts.resize(OldSize); 4621 return false; 4622 } 4623 AddrMode.HasBaseReg = true; 4624 AddrMode.BaseReg = AddrInst->getOperand(0); 4625 } 4626 4627 // Match the remaining variable portion of the GEP. 4628 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 4629 Depth)) { 4630 // If it couldn't be matched, try stuffing the base into a register 4631 // instead of matching it, and retrying the match of the scale. 4632 AddrMode = BackupAddrMode; 4633 AddrModeInsts.resize(OldSize); 4634 if (AddrMode.HasBaseReg) 4635 return false; 4636 AddrMode.HasBaseReg = true; 4637 AddrMode.BaseReg = AddrInst->getOperand(0); 4638 AddrMode.BaseOffs += ConstantOffset; 4639 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 4640 VariableScale, Depth)) { 4641 // If even that didn't work, bail. 4642 AddrMode = BackupAddrMode; 4643 AddrModeInsts.resize(OldSize); 4644 return false; 4645 } 4646 } 4647 4648 return true; 4649 } 4650 case Instruction::SExt: 4651 case Instruction::ZExt: { 4652 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 4653 if (!Ext) 4654 return false; 4655 4656 // Try to move this ext out of the way of the addressing mode. 4657 // Ask for a method for doing so. 4658 TypePromotionHelper::Action TPH = 4659 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 4660 if (!TPH) 4661 return false; 4662 4663 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4664 TPT.getRestorationPoint(); 4665 unsigned CreatedInstsCost = 0; 4666 unsigned ExtCost = !TLI.isExtFree(Ext); 4667 Value *PromotedOperand = 4668 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 4669 // SExt has been moved away. 4670 // Thus either it will be rematched later in the recursive calls or it is 4671 // gone. Anyway, we must not fold it into the addressing mode at this point. 4672 // E.g., 4673 // op = add opnd, 1 4674 // idx = ext op 4675 // addr = gep base, idx 4676 // is now: 4677 // promotedOpnd = ext opnd <- no match here 4678 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 4679 // addr = gep base, op <- match 4680 if (MovedAway) 4681 *MovedAway = true; 4682 4683 assert(PromotedOperand && 4684 "TypePromotionHelper should have filtered out those cases"); 4685 4686 ExtAddrMode BackupAddrMode = AddrMode; 4687 unsigned OldSize = AddrModeInsts.size(); 4688 4689 if (!matchAddr(PromotedOperand, Depth) || 4690 // The total of the new cost is equal to the cost of the created 4691 // instructions. 4692 // The total of the old cost is equal to the cost of the extension plus 4693 // what we have saved in the addressing mode. 4694 !isPromotionProfitable(CreatedInstsCost, 4695 ExtCost + (AddrModeInsts.size() - OldSize), 4696 PromotedOperand)) { 4697 AddrMode = BackupAddrMode; 4698 AddrModeInsts.resize(OldSize); 4699 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 4700 TPT.rollback(LastKnownGood); 4701 return false; 4702 } 4703 return true; 4704 } 4705 } 4706 return false; 4707 } 4708 4709 /// If we can, try to add the value of 'Addr' into the current addressing mode. 4710 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 4711 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 4712 /// for the target. 4713 /// 4714 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 4715 // Start a transaction at this point that we will rollback if the matching 4716 // fails. 4717 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4718 TPT.getRestorationPoint(); 4719 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 4720 if (CI->getValue().isSignedIntN(64)) { 4721 // Fold in immediates if legal for the target. 4722 AddrMode.BaseOffs += CI->getSExtValue(); 4723 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4724 return true; 4725 AddrMode.BaseOffs -= CI->getSExtValue(); 4726 } 4727 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 4728 // If this is a global variable, try to fold it into the addressing mode. 4729 if (!AddrMode.BaseGV) { 4730 AddrMode.BaseGV = GV; 4731 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4732 return true; 4733 AddrMode.BaseGV = nullptr; 4734 } 4735 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 4736 ExtAddrMode BackupAddrMode = AddrMode; 4737 unsigned OldSize = AddrModeInsts.size(); 4738 4739 // Check to see if it is possible to fold this operation. 4740 bool MovedAway = false; 4741 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 4742 // This instruction may have been moved away. If so, there is nothing 4743 // to check here. 4744 if (MovedAway) 4745 return true; 4746 // Okay, it's possible to fold this. Check to see if it is actually 4747 // *profitable* to do so. We use a simple cost model to avoid increasing 4748 // register pressure too much. 4749 if (I->hasOneUse() || 4750 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 4751 AddrModeInsts.push_back(I); 4752 return true; 4753 } 4754 4755 // It isn't profitable to do this, roll back. 4756 //cerr << "NOT FOLDING: " << *I; 4757 AddrMode = BackupAddrMode; 4758 AddrModeInsts.resize(OldSize); 4759 TPT.rollback(LastKnownGood); 4760 } 4761 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 4762 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 4763 return true; 4764 TPT.rollback(LastKnownGood); 4765 } else if (isa<ConstantPointerNull>(Addr)) { 4766 // Null pointer gets folded without affecting the addressing mode. 4767 return true; 4768 } 4769 4770 // Worse case, the target should support [reg] addressing modes. :) 4771 if (!AddrMode.HasBaseReg) { 4772 AddrMode.HasBaseReg = true; 4773 AddrMode.BaseReg = Addr; 4774 // Still check for legality in case the target supports [imm] but not [i+r]. 4775 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4776 return true; 4777 AddrMode.HasBaseReg = false; 4778 AddrMode.BaseReg = nullptr; 4779 } 4780 4781 // If the base register is already taken, see if we can do [r+r]. 4782 if (AddrMode.Scale == 0) { 4783 AddrMode.Scale = 1; 4784 AddrMode.ScaledReg = Addr; 4785 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 4786 return true; 4787 AddrMode.Scale = 0; 4788 AddrMode.ScaledReg = nullptr; 4789 } 4790 // Couldn't match. 4791 TPT.rollback(LastKnownGood); 4792 return false; 4793 } 4794 4795 /// Check to see if all uses of OpVal by the specified inline asm call are due 4796 /// to memory operands. If so, return true, otherwise return false. 4797 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 4798 const TargetLowering &TLI, 4799 const TargetRegisterInfo &TRI) { 4800 const Function *F = CI->getFunction(); 4801 TargetLowering::AsmOperandInfoVector TargetConstraints = 4802 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); 4803 4804 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4805 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4806 4807 // Compute the constraint code and ConstraintType to use. 4808 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 4809 4810 // If this asm operand is our Value*, and if it isn't an indirect memory 4811 // operand, we can't fold it! 4812 if (OpInfo.CallOperandVal == OpVal && 4813 (OpInfo.ConstraintType != TargetLowering::C_Memory || 4814 !OpInfo.isIndirect)) 4815 return false; 4816 } 4817 4818 return true; 4819 } 4820 4821 // Max number of memory uses to look at before aborting the search to conserve 4822 // compile time. 4823 static constexpr int MaxMemoryUsesToScan = 20; 4824 4825 /// Recursively walk all the uses of I until we find a memory use. 4826 /// If we find an obviously non-foldable instruction, return true. 4827 /// Add the ultimately found memory instructions to MemoryUses. 4828 static bool FindAllMemoryUses( 4829 Instruction *I, 4830 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 4831 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, 4832 const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, 4833 BlockFrequencyInfo *BFI, int SeenInsts = 0) { 4834 // If we already considered this instruction, we're done. 4835 if (!ConsideredInsts.insert(I).second) 4836 return false; 4837 4838 // If this is an obviously unfoldable instruction, bail out. 4839 if (!MightBeFoldableInst(I)) 4840 return true; 4841 4842 // Loop over all the uses, recursively processing them. 4843 for (Use &U : I->uses()) { 4844 // Conservatively return true if we're seeing a large number or a deep chain 4845 // of users. This avoids excessive compilation times in pathological cases. 4846 if (SeenInsts++ >= MaxMemoryUsesToScan) 4847 return true; 4848 4849 Instruction *UserI = cast<Instruction>(U.getUser()); 4850 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 4851 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 4852 continue; 4853 } 4854 4855 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 4856 unsigned opNo = U.getOperandNo(); 4857 if (opNo != StoreInst::getPointerOperandIndex()) 4858 return true; // Storing addr, not into addr. 4859 MemoryUses.push_back(std::make_pair(SI, opNo)); 4860 continue; 4861 } 4862 4863 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { 4864 unsigned opNo = U.getOperandNo(); 4865 if (opNo != AtomicRMWInst::getPointerOperandIndex()) 4866 return true; // Storing addr, not into addr. 4867 MemoryUses.push_back(std::make_pair(RMW, opNo)); 4868 continue; 4869 } 4870 4871 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { 4872 unsigned opNo = U.getOperandNo(); 4873 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) 4874 return true; // Storing addr, not into addr. 4875 MemoryUses.push_back(std::make_pair(CmpX, opNo)); 4876 continue; 4877 } 4878 4879 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 4880 if (CI->hasFnAttr(Attribute::Cold)) { 4881 // If this is a cold call, we can sink the addressing calculation into 4882 // the cold path. See optimizeCallInst 4883 bool OptForSize = OptSize || 4884 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); 4885 if (!OptForSize) 4886 continue; 4887 } 4888 4889 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); 4890 if (!IA) return true; 4891 4892 // If this is a memory operand, we're cool, otherwise bail out. 4893 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) 4894 return true; 4895 continue; 4896 } 4897 4898 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4899 PSI, BFI, SeenInsts)) 4900 return true; 4901 } 4902 4903 return false; 4904 } 4905 4906 /// Return true if Val is already known to be live at the use site that we're 4907 /// folding it into. If so, there is no cost to include it in the addressing 4908 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 4909 /// instruction already. 4910 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 4911 Value *KnownLive2) { 4912 // If Val is either of the known-live values, we know it is live! 4913 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 4914 return true; 4915 4916 // All values other than instructions and arguments (e.g. constants) are live. 4917 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 4918 4919 // If Val is a constant sized alloca in the entry block, it is live, this is 4920 // true because it is just a reference to the stack/frame pointer, which is 4921 // live for the whole function. 4922 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 4923 if (AI->isStaticAlloca()) 4924 return true; 4925 4926 // Check to see if this value is already used in the memory instruction's 4927 // block. If so, it's already live into the block at the very least, so we 4928 // can reasonably fold it. 4929 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 4930 } 4931 4932 /// It is possible for the addressing mode of the machine to fold the specified 4933 /// instruction into a load or store that ultimately uses it. 4934 /// However, the specified instruction has multiple uses. 4935 /// Given this, it may actually increase register pressure to fold it 4936 /// into the load. For example, consider this code: 4937 /// 4938 /// X = ... 4939 /// Y = X+1 4940 /// use(Y) -> nonload/store 4941 /// Z = Y+1 4942 /// load Z 4943 /// 4944 /// In this case, Y has multiple uses, and can be folded into the load of Z 4945 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 4946 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 4947 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 4948 /// number of computations either. 4949 /// 4950 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 4951 /// X was live across 'load Z' for other reasons, we actually *would* want to 4952 /// fold the addressing mode in the Z case. This would make Y die earlier. 4953 bool AddressingModeMatcher:: 4954 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 4955 ExtAddrMode &AMAfter) { 4956 if (IgnoreProfitability) return true; 4957 4958 // AMBefore is the addressing mode before this instruction was folded into it, 4959 // and AMAfter is the addressing mode after the instruction was folded. Get 4960 // the set of registers referenced by AMAfter and subtract out those 4961 // referenced by AMBefore: this is the set of values which folding in this 4962 // address extends the lifetime of. 4963 // 4964 // Note that there are only two potential values being referenced here, 4965 // BaseReg and ScaleReg (global addresses are always available, as are any 4966 // folded immediates). 4967 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 4968 4969 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 4970 // lifetime wasn't extended by adding this instruction. 4971 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4972 BaseReg = nullptr; 4973 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 4974 ScaledReg = nullptr; 4975 4976 // If folding this instruction (and it's subexprs) didn't extend any live 4977 // ranges, we're ok with it. 4978 if (!BaseReg && !ScaledReg) 4979 return true; 4980 4981 // If all uses of this instruction can have the address mode sunk into them, 4982 // we can remove the addressing mode and effectively trade one live register 4983 // for another (at worst.) In this context, folding an addressing mode into 4984 // the use is just a particularly nice way of sinking it. 4985 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 4986 SmallPtrSet<Instruction*, 16> ConsideredInsts; 4987 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, 4988 PSI, BFI)) 4989 return false; // Has a non-memory, non-foldable use! 4990 4991 // Now that we know that all uses of this instruction are part of a chain of 4992 // computation involving only operations that could theoretically be folded 4993 // into a memory use, loop over each of these memory operation uses and see 4994 // if they could *actually* fold the instruction. The assumption is that 4995 // addressing modes are cheap and that duplicating the computation involved 4996 // many times is worthwhile, even on a fastpath. For sinking candidates 4997 // (i.e. cold call sites), this serves as a way to prevent excessive code 4998 // growth since most architectures have some reasonable small and fast way to 4999 // compute an effective address. (i.e LEA on x86) 5000 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 5001 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 5002 Instruction *User = MemoryUses[i].first; 5003 unsigned OpNo = MemoryUses[i].second; 5004 5005 // Get the access type of this use. If the use isn't a pointer, we don't 5006 // know what it accesses. 5007 Value *Address = User->getOperand(OpNo); 5008 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 5009 if (!AddrTy) 5010 return false; 5011 Type *AddressAccessTy = AddrTy->getElementType(); 5012 unsigned AS = AddrTy->getAddressSpace(); 5013 5014 // Do a match against the root of this address, ignoring profitability. This 5015 // will tell us if the addressing mode for the memory operation will 5016 // *actually* cover the shared instruction. 5017 ExtAddrMode Result; 5018 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 5019 0); 5020 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5021 TPT.getRestorationPoint(); 5022 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, 5023 AddressAccessTy, AS, MemoryInst, Result, 5024 InsertedInsts, PromotedInsts, TPT, 5025 LargeOffsetGEP, OptSize, PSI, BFI); 5026 Matcher.IgnoreProfitability = true; 5027 bool Success = Matcher.matchAddr(Address, 0); 5028 (void)Success; assert(Success && "Couldn't select *anything*?"); 5029 5030 // The match was to check the profitability, the changes made are not 5031 // part of the original matcher. Therefore, they should be dropped 5032 // otherwise the original matcher will not present the right state. 5033 TPT.rollback(LastKnownGood); 5034 5035 // If the match didn't cover I, then it won't be shared by it. 5036 if (!is_contained(MatchedAddrModeInsts, I)) 5037 return false; 5038 5039 MatchedAddrModeInsts.clear(); 5040 } 5041 5042 return true; 5043 } 5044 5045 /// Return true if the specified values are defined in a 5046 /// different basic block than BB. 5047 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 5048 if (Instruction *I = dyn_cast<Instruction>(V)) 5049 return I->getParent() != BB; 5050 return false; 5051 } 5052 5053 /// Sink addressing mode computation immediate before MemoryInst if doing so 5054 /// can be done without increasing register pressure. The need for the 5055 /// register pressure constraint means this can end up being an all or nothing 5056 /// decision for all uses of the same addressing computation. 5057 /// 5058 /// Load and Store Instructions often have addressing modes that can do 5059 /// significant amounts of computation. As such, instruction selection will try 5060 /// to get the load or store to do as much computation as possible for the 5061 /// program. The problem is that isel can only see within a single block. As 5062 /// such, we sink as much legal addressing mode work into the block as possible. 5063 /// 5064 /// This method is used to optimize both load/store and inline asms with memory 5065 /// operands. It's also used to sink addressing computations feeding into cold 5066 /// call sites into their (cold) basic block. 5067 /// 5068 /// The motivation for handling sinking into cold blocks is that doing so can 5069 /// both enable other address mode sinking (by satisfying the register pressure 5070 /// constraint above), and reduce register pressure globally (by removing the 5071 /// addressing mode computation from the fast path entirely.). 5072 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 5073 Type *AccessTy, unsigned AddrSpace) { 5074 Value *Repl = Addr; 5075 5076 // Try to collapse single-value PHI nodes. This is necessary to undo 5077 // unprofitable PRE transformations. 5078 SmallVector<Value*, 8> worklist; 5079 SmallPtrSet<Value*, 16> Visited; 5080 worklist.push_back(Addr); 5081 5082 // Use a worklist to iteratively look through PHI and select nodes, and 5083 // ensure that the addressing mode obtained from the non-PHI/select roots of 5084 // the graph are compatible. 5085 bool PhiOrSelectSeen = false; 5086 SmallVector<Instruction*, 16> AddrModeInsts; 5087 const SimplifyQuery SQ(*DL, TLInfo); 5088 AddressingModeCombiner AddrModes(SQ, Addr); 5089 TypePromotionTransaction TPT(RemovedInsts); 5090 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5091 TPT.getRestorationPoint(); 5092 while (!worklist.empty()) { 5093 Value *V = worklist.back(); 5094 worklist.pop_back(); 5095 5096 // We allow traversing cyclic Phi nodes. 5097 // In case of success after this loop we ensure that traversing through 5098 // Phi nodes ends up with all cases to compute address of the form 5099 // BaseGV + Base + Scale * Index + Offset 5100 // where Scale and Offset are constans and BaseGV, Base and Index 5101 // are exactly the same Values in all cases. 5102 // It means that BaseGV, Scale and Offset dominate our memory instruction 5103 // and have the same value as they had in address computation represented 5104 // as Phi. So we can safely sink address computation to memory instruction. 5105 if (!Visited.insert(V).second) 5106 continue; 5107 5108 // For a PHI node, push all of its incoming values. 5109 if (PHINode *P = dyn_cast<PHINode>(V)) { 5110 append_range(worklist, P->incoming_values()); 5111 PhiOrSelectSeen = true; 5112 continue; 5113 } 5114 // Similar for select. 5115 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 5116 worklist.push_back(SI->getFalseValue()); 5117 worklist.push_back(SI->getTrueValue()); 5118 PhiOrSelectSeen = true; 5119 continue; 5120 } 5121 5122 // For non-PHIs, determine the addressing mode being computed. Note that 5123 // the result may differ depending on what other uses our candidate 5124 // addressing instructions might have. 5125 AddrModeInsts.clear(); 5126 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, 5127 0); 5128 // Defer the query (and possible computation of) the dom tree to point of 5129 // actual use. It's expected that most address matches don't actually need 5130 // the domtree. 5131 auto getDTFn = [MemoryInst, this]() -> const DominatorTree & { 5132 Function *F = MemoryInst->getParent()->getParent(); 5133 return this->getDT(*F); 5134 }; 5135 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 5136 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn, 5137 *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, 5138 BFI.get()); 5139 5140 GetElementPtrInst *GEP = LargeOffsetGEP.first; 5141 if (GEP && !NewGEPBases.count(GEP)) { 5142 // If splitting the underlying data structure can reduce the offset of a 5143 // GEP, collect the GEP. Skip the GEPs that are the new bases of 5144 // previously split data structures. 5145 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); 5146 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) 5147 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); 5148 } 5149 5150 NewAddrMode.OriginalValue = V; 5151 if (!AddrModes.addNewAddrMode(NewAddrMode)) 5152 break; 5153 } 5154 5155 // Try to combine the AddrModes we've collected. If we couldn't collect any, 5156 // or we have multiple but either couldn't combine them or combining them 5157 // wouldn't do anything useful, bail out now. 5158 if (!AddrModes.combineAddrModes()) { 5159 TPT.rollback(LastKnownGood); 5160 return false; 5161 } 5162 bool Modified = TPT.commit(); 5163 5164 // Get the combined AddrMode (or the only AddrMode, if we only had one). 5165 ExtAddrMode AddrMode = AddrModes.getAddrMode(); 5166 5167 // If all the instructions matched are already in this BB, don't do anything. 5168 // If we saw a Phi node then it is not local definitely, and if we saw a select 5169 // then we want to push the address calculation past it even if it's already 5170 // in this BB. 5171 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { 5172 return IsNonLocalValue(V, MemoryInst->getParent()); 5173 })) { 5174 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode 5175 << "\n"); 5176 return Modified; 5177 } 5178 5179 // Insert this computation right after this user. Since our caller is 5180 // scanning from the top of the BB to the bottom, reuse of the expr are 5181 // guaranteed to happen later. 5182 IRBuilder<> Builder(MemoryInst); 5183 5184 // Now that we determined the addressing expression we want to use and know 5185 // that we have to sink it into this block. Check to see if we have already 5186 // done this for some other load/store instr in this block. If so, reuse 5187 // the computation. Before attempting reuse, check if the address is valid 5188 // as it may have been erased. 5189 5190 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; 5191 5192 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 5193 if (SunkAddr) { 5194 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode 5195 << " for " << *MemoryInst << "\n"); 5196 if (SunkAddr->getType() != Addr->getType()) 5197 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5198 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 5199 SubtargetInfo->addrSinkUsingGEPs())) { 5200 // By default, we use the GEP-based method when AA is used later. This 5201 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 5202 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5203 << " for " << *MemoryInst << "\n"); 5204 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5205 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 5206 5207 // First, find the pointer. 5208 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 5209 ResultPtr = AddrMode.BaseReg; 5210 AddrMode.BaseReg = nullptr; 5211 } 5212 5213 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 5214 // We can't add more than one pointer together, nor can we scale a 5215 // pointer (both of which seem meaningless). 5216 if (ResultPtr || AddrMode.Scale != 1) 5217 return Modified; 5218 5219 ResultPtr = AddrMode.ScaledReg; 5220 AddrMode.Scale = 0; 5221 } 5222 5223 // It is only safe to sign extend the BaseReg if we know that the math 5224 // required to create it did not overflow before we extend it. Since 5225 // the original IR value was tossed in favor of a constant back when 5226 // the AddrMode was created we need to bail out gracefully if widths 5227 // do not match instead of extending it. 5228 // 5229 // (See below for code to add the scale.) 5230 if (AddrMode.Scale) { 5231 Type *ScaledRegTy = AddrMode.ScaledReg->getType(); 5232 if (cast<IntegerType>(IntPtrTy)->getBitWidth() > 5233 cast<IntegerType>(ScaledRegTy)->getBitWidth()) 5234 return Modified; 5235 } 5236 5237 if (AddrMode.BaseGV) { 5238 if (ResultPtr) 5239 return Modified; 5240 5241 ResultPtr = AddrMode.BaseGV; 5242 } 5243 5244 // If the real base value actually came from an inttoptr, then the matcher 5245 // will look through it and provide only the integer value. In that case, 5246 // use it here. 5247 if (!DL->isNonIntegralPointerType(Addr->getType())) { 5248 if (!ResultPtr && AddrMode.BaseReg) { 5249 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), 5250 "sunkaddr"); 5251 AddrMode.BaseReg = nullptr; 5252 } else if (!ResultPtr && AddrMode.Scale == 1) { 5253 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), 5254 "sunkaddr"); 5255 AddrMode.Scale = 0; 5256 } 5257 } 5258 5259 if (!ResultPtr && 5260 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 5261 SunkAddr = Constant::getNullValue(Addr->getType()); 5262 } else if (!ResultPtr) { 5263 return Modified; 5264 } else { 5265 Type *I8PtrTy = 5266 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 5267 Type *I8Ty = Builder.getInt8Ty(); 5268 5269 // Start with the base register. Do this first so that subsequent address 5270 // matching finds it last, which will prevent it from trying to match it 5271 // as the scaled value in case it happens to be a mul. That would be 5272 // problematic if we've sunk a different mul for the scale, because then 5273 // we'd end up sinking both muls. 5274 if (AddrMode.BaseReg) { 5275 Value *V = AddrMode.BaseReg; 5276 if (V->getType() != IntPtrTy) 5277 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5278 5279 ResultIndex = V; 5280 } 5281 5282 // Add the scale value. 5283 if (AddrMode.Scale) { 5284 Value *V = AddrMode.ScaledReg; 5285 if (V->getType() == IntPtrTy) { 5286 // done. 5287 } else { 5288 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() < 5289 cast<IntegerType>(V->getType())->getBitWidth() && 5290 "We can't transform if ScaledReg is too narrow"); 5291 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5292 } 5293 5294 if (AddrMode.Scale != 1) 5295 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5296 "sunkaddr"); 5297 if (ResultIndex) 5298 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 5299 else 5300 ResultIndex = V; 5301 } 5302 5303 // Add in the Base Offset if present. 5304 if (AddrMode.BaseOffs) { 5305 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5306 if (ResultIndex) { 5307 // We need to add this separately from the scale above to help with 5308 // SDAG consecutive load/store merging. 5309 if (ResultPtr->getType() != I8PtrTy) 5310 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5311 ResultPtr = 5312 AddrMode.InBounds 5313 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5314 "sunkaddr") 5315 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5316 } 5317 5318 ResultIndex = V; 5319 } 5320 5321 if (!ResultIndex) { 5322 SunkAddr = ResultPtr; 5323 } else { 5324 if (ResultPtr->getType() != I8PtrTy) 5325 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); 5326 SunkAddr = 5327 AddrMode.InBounds 5328 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex, 5329 "sunkaddr") 5330 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 5331 } 5332 5333 if (SunkAddr->getType() != Addr->getType()) 5334 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); 5335 } 5336 } else { 5337 // We'd require a ptrtoint/inttoptr down the line, which we can't do for 5338 // non-integral pointers, so in that case bail out now. 5339 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; 5340 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; 5341 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); 5342 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); 5343 if (DL->isNonIntegralPointerType(Addr->getType()) || 5344 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || 5345 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || 5346 (AddrMode.BaseGV && 5347 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) 5348 return Modified; 5349 5350 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode 5351 << " for " << *MemoryInst << "\n"); 5352 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 5353 Value *Result = nullptr; 5354 5355 // Start with the base register. Do this first so that subsequent address 5356 // matching finds it last, which will prevent it from trying to match it 5357 // as the scaled value in case it happens to be a mul. That would be 5358 // problematic if we've sunk a different mul for the scale, because then 5359 // we'd end up sinking both muls. 5360 if (AddrMode.BaseReg) { 5361 Value *V = AddrMode.BaseReg; 5362 if (V->getType()->isPointerTy()) 5363 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5364 if (V->getType() != IntPtrTy) 5365 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 5366 Result = V; 5367 } 5368 5369 // Add the scale value. 5370 if (AddrMode.Scale) { 5371 Value *V = AddrMode.ScaledReg; 5372 if (V->getType() == IntPtrTy) { 5373 // done. 5374 } else if (V->getType()->isPointerTy()) { 5375 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 5376 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 5377 cast<IntegerType>(V->getType())->getBitWidth()) { 5378 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 5379 } else { 5380 // It is only safe to sign extend the BaseReg if we know that the math 5381 // required to create it did not overflow before we extend it. Since 5382 // the original IR value was tossed in favor of a constant back when 5383 // the AddrMode was created we need to bail out gracefully if widths 5384 // do not match instead of extending it. 5385 Instruction *I = dyn_cast_or_null<Instruction>(Result); 5386 if (I && (Result != AddrMode.BaseReg)) 5387 I->eraseFromParent(); 5388 return Modified; 5389 } 5390 if (AddrMode.Scale != 1) 5391 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 5392 "sunkaddr"); 5393 if (Result) 5394 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5395 else 5396 Result = V; 5397 } 5398 5399 // Add in the BaseGV if present. 5400 if (AddrMode.BaseGV) { 5401 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 5402 if (Result) 5403 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5404 else 5405 Result = V; 5406 } 5407 5408 // Add in the Base Offset if present. 5409 if (AddrMode.BaseOffs) { 5410 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 5411 if (Result) 5412 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 5413 else 5414 Result = V; 5415 } 5416 5417 if (!Result) 5418 SunkAddr = Constant::getNullValue(Addr->getType()); 5419 else 5420 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 5421 } 5422 5423 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 5424 // Store the newly computed address into the cache. In the case we reused a 5425 // value, this should be idempotent. 5426 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); 5427 5428 // If we have no uses, recursively delete the value and all dead instructions 5429 // using it. 5430 if (Repl->use_empty()) { 5431 resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() { 5432 RecursivelyDeleteTriviallyDeadInstructions( 5433 Repl, TLInfo, nullptr, 5434 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5435 }); 5436 } 5437 ++NumMemoryInsts; 5438 return true; 5439 } 5440 5441 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find 5442 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can 5443 /// only handle a 2 operand GEP in the same basic block or a splat constant 5444 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector 5445 /// index. 5446 /// 5447 /// If the existing GEP has a vector base pointer that is splat, we can look 5448 /// through the splat to find the scalar pointer. If we can't find a scalar 5449 /// pointer there's nothing we can do. 5450 /// 5451 /// If we have a GEP with more than 2 indices where the middle indices are all 5452 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. 5453 /// 5454 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP 5455 /// followed by a GEP with an all zeroes vector index. This will enable 5456 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a 5457 /// zero index. 5458 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, 5459 Value *Ptr) { 5460 Value *NewAddr; 5461 5462 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 5463 // Don't optimize GEPs that don't have indices. 5464 if (!GEP->hasIndices()) 5465 return false; 5466 5467 // If the GEP and the gather/scatter aren't in the same BB, don't optimize. 5468 // FIXME: We should support this by sinking the GEP. 5469 if (MemoryInst->getParent() != GEP->getParent()) 5470 return false; 5471 5472 SmallVector<Value *, 2> Ops(GEP->operands()); 5473 5474 bool RewriteGEP = false; 5475 5476 if (Ops[0]->getType()->isVectorTy()) { 5477 Ops[0] = getSplatValue(Ops[0]); 5478 if (!Ops[0]) 5479 return false; 5480 RewriteGEP = true; 5481 } 5482 5483 unsigned FinalIndex = Ops.size() - 1; 5484 5485 // Ensure all but the last index is 0. 5486 // FIXME: This isn't strictly required. All that's required is that they are 5487 // all scalars or splats. 5488 for (unsigned i = 1; i < FinalIndex; ++i) { 5489 auto *C = dyn_cast<Constant>(Ops[i]); 5490 if (!C) 5491 return false; 5492 if (isa<VectorType>(C->getType())) 5493 C = C->getSplatValue(); 5494 auto *CI = dyn_cast_or_null<ConstantInt>(C); 5495 if (!CI || !CI->isZero()) 5496 return false; 5497 // Scalarize the index if needed. 5498 Ops[i] = CI; 5499 } 5500 5501 // Try to scalarize the final index. 5502 if (Ops[FinalIndex]->getType()->isVectorTy()) { 5503 if (Value *V = getSplatValue(Ops[FinalIndex])) { 5504 auto *C = dyn_cast<ConstantInt>(V); 5505 // Don't scalarize all zeros vector. 5506 if (!C || !C->isZero()) { 5507 Ops[FinalIndex] = V; 5508 RewriteGEP = true; 5509 } 5510 } 5511 } 5512 5513 // If we made any changes or the we have extra operands, we need to generate 5514 // new instructions. 5515 if (!RewriteGEP && Ops.size() == 2) 5516 return false; 5517 5518 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); 5519 5520 IRBuilder<> Builder(MemoryInst); 5521 5522 Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); 5523 5524 // If the final index isn't a vector, emit a scalar GEP containing all ops 5525 // and a vector GEP with all zeroes final index. 5526 if (!Ops[FinalIndex]->getType()->isVectorTy()) { 5527 NewAddr = Builder.CreateGEP(Ops[0], makeArrayRef(Ops).drop_front()); 5528 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); 5529 NewAddr = Builder.CreateGEP(NewAddr, Constant::getNullValue(IndexTy)); 5530 } else { 5531 Value *Base = Ops[0]; 5532 Value *Index = Ops[FinalIndex]; 5533 5534 // Create a scalar GEP if there are more than 2 operands. 5535 if (Ops.size() != 2) { 5536 // Replace the last index with 0. 5537 Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); 5538 Base = Builder.CreateGEP(Base, makeArrayRef(Ops).drop_front()); 5539 } 5540 5541 // Now create the GEP with scalar pointer and vector index. 5542 NewAddr = Builder.CreateGEP(Base, Index); 5543 } 5544 } else if (!isa<Constant>(Ptr)) { 5545 // Not a GEP, maybe its a splat and we can create a GEP to enable 5546 // SelectionDAGBuilder to use it as a uniform base. 5547 Value *V = getSplatValue(Ptr); 5548 if (!V) 5549 return false; 5550 5551 auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); 5552 5553 IRBuilder<> Builder(MemoryInst); 5554 5555 // Emit a vector GEP with a scalar pointer and all 0s vector index. 5556 Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType()); 5557 auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); 5558 NewAddr = Builder.CreateGEP(V, Constant::getNullValue(IndexTy)); 5559 } else { 5560 // Constant, SelectionDAGBuilder knows to check if its a splat. 5561 return false; 5562 } 5563 5564 MemoryInst->replaceUsesOfWith(Ptr, NewAddr); 5565 5566 // If we have no uses, recursively delete the value and all dead instructions 5567 // using it. 5568 if (Ptr->use_empty()) 5569 RecursivelyDeleteTriviallyDeadInstructions( 5570 Ptr, TLInfo, nullptr, 5571 [&](Value *V) { removeAllAssertingVHReferences(V); }); 5572 5573 return true; 5574 } 5575 5576 /// If there are any memory operands, use OptimizeMemoryInst to sink their 5577 /// address computing into the block when possible / profitable. 5578 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 5579 bool MadeChange = false; 5580 5581 const TargetRegisterInfo *TRI = 5582 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); 5583 TargetLowering::AsmOperandInfoVector TargetConstraints = 5584 TLI->ParseConstraints(*DL, TRI, *CS); 5585 unsigned ArgNo = 0; 5586 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5587 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 5588 5589 // Compute the constraint code and ConstraintType to use. 5590 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 5591 5592 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5593 OpInfo.isIndirect) { 5594 Value *OpVal = CS->getArgOperand(ArgNo++); 5595 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 5596 } else if (OpInfo.Type == InlineAsm::isInput) 5597 ArgNo++; 5598 } 5599 5600 return MadeChange; 5601 } 5602 5603 /// Check if all the uses of \p Val are equivalent (or free) zero or 5604 /// sign extensions. 5605 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { 5606 assert(!Val->use_empty() && "Input must have at least one use"); 5607 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); 5608 bool IsSExt = isa<SExtInst>(FirstUser); 5609 Type *ExtTy = FirstUser->getType(); 5610 for (const User *U : Val->users()) { 5611 const Instruction *UI = cast<Instruction>(U); 5612 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 5613 return false; 5614 Type *CurTy = UI->getType(); 5615 // Same input and output types: Same instruction after CSE. 5616 if (CurTy == ExtTy) 5617 continue; 5618 5619 // If IsSExt is true, we are in this situation: 5620 // a = Val 5621 // b = sext ty1 a to ty2 5622 // c = sext ty1 a to ty3 5623 // Assuming ty2 is shorter than ty3, this could be turned into: 5624 // a = Val 5625 // b = sext ty1 a to ty2 5626 // c = sext ty2 b to ty3 5627 // However, the last sext is not free. 5628 if (IsSExt) 5629 return false; 5630 5631 // This is a ZExt, maybe this is free to extend from one type to another. 5632 // In that case, we would not account for a different use. 5633 Type *NarrowTy; 5634 Type *LargeTy; 5635 if (ExtTy->getScalarType()->getIntegerBitWidth() > 5636 CurTy->getScalarType()->getIntegerBitWidth()) { 5637 NarrowTy = CurTy; 5638 LargeTy = ExtTy; 5639 } else { 5640 NarrowTy = ExtTy; 5641 LargeTy = CurTy; 5642 } 5643 5644 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 5645 return false; 5646 } 5647 // All uses are the same or can be derived from one another for free. 5648 return true; 5649 } 5650 5651 /// Try to speculatively promote extensions in \p Exts and continue 5652 /// promoting through newly promoted operands recursively as far as doing so is 5653 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. 5654 /// When some promotion happened, \p TPT contains the proper state to revert 5655 /// them. 5656 /// 5657 /// \return true if some promotion happened, false otherwise. 5658 bool CodeGenPrepare::tryToPromoteExts( 5659 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, 5660 SmallVectorImpl<Instruction *> &ProfitablyMovedExts, 5661 unsigned CreatedInstsCost) { 5662 bool Promoted = false; 5663 5664 // Iterate over all the extensions to try to promote them. 5665 for (auto *I : Exts) { 5666 // Early check if we directly have ext(load). 5667 if (isa<LoadInst>(I->getOperand(0))) { 5668 ProfitablyMovedExts.push_back(I); 5669 continue; 5670 } 5671 5672 // Check whether or not we want to do any promotion. The reason we have 5673 // this check inside the for loop is to catch the case where an extension 5674 // is directly fed by a load because in such case the extension can be moved 5675 // up without any promotion on its operands. 5676 if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) 5677 return false; 5678 5679 // Get the action to perform the promotion. 5680 TypePromotionHelper::Action TPH = 5681 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); 5682 // Check if we can promote. 5683 if (!TPH) { 5684 // Save the current extension as we cannot move up through its operand. 5685 ProfitablyMovedExts.push_back(I); 5686 continue; 5687 } 5688 5689 // Save the current state. 5690 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 5691 TPT.getRestorationPoint(); 5692 SmallVector<Instruction *, 4> NewExts; 5693 unsigned NewCreatedInstsCost = 0; 5694 unsigned ExtCost = !TLI->isExtFree(I); 5695 // Promote. 5696 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 5697 &NewExts, nullptr, *TLI); 5698 assert(PromotedVal && 5699 "TypePromotionHelper should have filtered out those cases"); 5700 5701 // We would be able to merge only one extension in a load. 5702 // Therefore, if we have more than 1 new extension we heuristically 5703 // cut this search path, because it means we degrade the code quality. 5704 // With exactly 2, the transformation is neutral, because we will merge 5705 // one extension but leave one. However, we optimistically keep going, 5706 // because the new extension may be removed too. 5707 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 5708 // FIXME: It would be possible to propagate a negative value instead of 5709 // conservatively ceiling it to 0. 5710 TotalCreatedInstsCost = 5711 std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); 5712 if (!StressExtLdPromotion && 5713 (TotalCreatedInstsCost > 1 || 5714 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 5715 // This promotion is not profitable, rollback to the previous state, and 5716 // save the current extension in ProfitablyMovedExts as the latest 5717 // speculative promotion turned out to be unprofitable. 5718 TPT.rollback(LastKnownGood); 5719 ProfitablyMovedExts.push_back(I); 5720 continue; 5721 } 5722 // Continue promoting NewExts as far as doing so is profitable. 5723 SmallVector<Instruction *, 2> NewlyMovedExts; 5724 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); 5725 bool NewPromoted = false; 5726 for (auto *ExtInst : NewlyMovedExts) { 5727 Instruction *MovedExt = cast<Instruction>(ExtInst); 5728 Value *ExtOperand = MovedExt->getOperand(0); 5729 // If we have reached to a load, we need this extra profitability check 5730 // as it could potentially be merged into an ext(load). 5731 if (isa<LoadInst>(ExtOperand) && 5732 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 5733 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) 5734 continue; 5735 5736 ProfitablyMovedExts.push_back(MovedExt); 5737 NewPromoted = true; 5738 } 5739 5740 // If none of speculative promotions for NewExts is profitable, rollback 5741 // and save the current extension (I) as the last profitable extension. 5742 if (!NewPromoted) { 5743 TPT.rollback(LastKnownGood); 5744 ProfitablyMovedExts.push_back(I); 5745 continue; 5746 } 5747 // The promotion is profitable. 5748 Promoted = true; 5749 } 5750 return Promoted; 5751 } 5752 5753 /// Merging redundant sexts when one is dominating the other. 5754 bool CodeGenPrepare::mergeSExts(Function &F) { 5755 bool Changed = false; 5756 for (auto &Entry : ValToSExtendedUses) { 5757 SExts &Insts = Entry.second; 5758 SExts CurPts; 5759 for (Instruction *Inst : Insts) { 5760 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || 5761 Inst->getOperand(0) != Entry.first) 5762 continue; 5763 bool inserted = false; 5764 for (auto &Pt : CurPts) { 5765 if (getDT(F).dominates(Inst, Pt)) { 5766 Pt->replaceAllUsesWith(Inst); 5767 RemovedInsts.insert(Pt); 5768 Pt->removeFromParent(); 5769 Pt = Inst; 5770 inserted = true; 5771 Changed = true; 5772 break; 5773 } 5774 if (!getDT(F).dominates(Pt, Inst)) 5775 // Give up if we need to merge in a common dominator as the 5776 // experiments show it is not profitable. 5777 continue; 5778 Inst->replaceAllUsesWith(Pt); 5779 RemovedInsts.insert(Inst); 5780 Inst->removeFromParent(); 5781 inserted = true; 5782 Changed = true; 5783 break; 5784 } 5785 if (!inserted) 5786 CurPts.push_back(Inst); 5787 } 5788 } 5789 return Changed; 5790 } 5791 5792 // Splitting large data structures so that the GEPs accessing them can have 5793 // smaller offsets so that they can be sunk to the same blocks as their users. 5794 // For example, a large struct starting from %base is split into two parts 5795 // where the second part starts from %new_base. 5796 // 5797 // Before: 5798 // BB0: 5799 // %base = 5800 // 5801 // BB1: 5802 // %gep0 = gep %base, off0 5803 // %gep1 = gep %base, off1 5804 // %gep2 = gep %base, off2 5805 // 5806 // BB2: 5807 // %load1 = load %gep0 5808 // %load2 = load %gep1 5809 // %load3 = load %gep2 5810 // 5811 // After: 5812 // BB0: 5813 // %base = 5814 // %new_base = gep %base, off0 5815 // 5816 // BB1: 5817 // %new_gep0 = %new_base 5818 // %new_gep1 = gep %new_base, off1 - off0 5819 // %new_gep2 = gep %new_base, off2 - off0 5820 // 5821 // BB2: 5822 // %load1 = load i32, i32* %new_gep0 5823 // %load2 = load i32, i32* %new_gep1 5824 // %load3 = load i32, i32* %new_gep2 5825 // 5826 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because 5827 // their offsets are smaller enough to fit into the addressing mode. 5828 bool CodeGenPrepare::splitLargeGEPOffsets() { 5829 bool Changed = false; 5830 for (auto &Entry : LargeOffsetGEPMap) { 5831 Value *OldBase = Entry.first; 5832 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> 5833 &LargeOffsetGEPs = Entry.second; 5834 auto compareGEPOffset = 5835 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, 5836 const std::pair<GetElementPtrInst *, int64_t> &RHS) { 5837 if (LHS.first == RHS.first) 5838 return false; 5839 if (LHS.second != RHS.second) 5840 return LHS.second < RHS.second; 5841 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; 5842 }; 5843 // Sorting all the GEPs of the same data structures based on the offsets. 5844 llvm::sort(LargeOffsetGEPs, compareGEPOffset); 5845 LargeOffsetGEPs.erase( 5846 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), 5847 LargeOffsetGEPs.end()); 5848 // Skip if all the GEPs have the same offsets. 5849 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) 5850 continue; 5851 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; 5852 int64_t BaseOffset = LargeOffsetGEPs.begin()->second; 5853 Value *NewBaseGEP = nullptr; 5854 5855 auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); 5856 while (LargeOffsetGEP != LargeOffsetGEPs.end()) { 5857 GetElementPtrInst *GEP = LargeOffsetGEP->first; 5858 int64_t Offset = LargeOffsetGEP->second; 5859 if (Offset != BaseOffset) { 5860 TargetLowering::AddrMode AddrMode; 5861 AddrMode.BaseOffs = Offset - BaseOffset; 5862 // The result type of the GEP might not be the type of the memory 5863 // access. 5864 if (!TLI->isLegalAddressingMode(*DL, AddrMode, 5865 GEP->getResultElementType(), 5866 GEP->getAddressSpace())) { 5867 // We need to create a new base if the offset to the current base is 5868 // too large to fit into the addressing mode. So, a very large struct 5869 // may be split into several parts. 5870 BaseGEP = GEP; 5871 BaseOffset = Offset; 5872 NewBaseGEP = nullptr; 5873 } 5874 } 5875 5876 // Generate a new GEP to replace the current one. 5877 LLVMContext &Ctx = GEP->getContext(); 5878 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 5879 Type *I8PtrTy = 5880 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); 5881 Type *I8Ty = Type::getInt8Ty(Ctx); 5882 5883 if (!NewBaseGEP) { 5884 // Create a new base if we don't have one yet. Find the insertion 5885 // pointer for the new base first. 5886 BasicBlock::iterator NewBaseInsertPt; 5887 BasicBlock *NewBaseInsertBB; 5888 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { 5889 // If the base of the struct is an instruction, the new base will be 5890 // inserted close to it. 5891 NewBaseInsertBB = BaseI->getParent(); 5892 if (isa<PHINode>(BaseI)) 5893 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5894 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { 5895 NewBaseInsertBB = 5896 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); 5897 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5898 } else 5899 NewBaseInsertPt = std::next(BaseI->getIterator()); 5900 } else { 5901 // If the current base is an argument or global value, the new base 5902 // will be inserted to the entry block. 5903 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); 5904 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); 5905 } 5906 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); 5907 // Create a new base. 5908 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); 5909 NewBaseGEP = OldBase; 5910 if (NewBaseGEP->getType() != I8PtrTy) 5911 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); 5912 NewBaseGEP = 5913 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); 5914 NewGEPBases.insert(NewBaseGEP); 5915 } 5916 5917 IRBuilder<> Builder(GEP); 5918 Value *NewGEP = NewBaseGEP; 5919 if (Offset == BaseOffset) { 5920 if (GEP->getType() != I8PtrTy) 5921 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5922 } else { 5923 // Calculate the new offset for the new GEP. 5924 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); 5925 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); 5926 5927 if (GEP->getType() != I8PtrTy) 5928 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); 5929 } 5930 GEP->replaceAllUsesWith(NewGEP); 5931 LargeOffsetGEPID.erase(GEP); 5932 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); 5933 GEP->eraseFromParent(); 5934 Changed = true; 5935 } 5936 } 5937 return Changed; 5938 } 5939 5940 bool CodeGenPrepare::optimizePhiType( 5941 PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, 5942 SmallPtrSetImpl<Instruction *> &DeletedInstrs) { 5943 // We are looking for a collection on interconnected phi nodes that together 5944 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts 5945 // are of the same type. Convert the whole set of nodes to the type of the 5946 // bitcast. 5947 Type *PhiTy = I->getType(); 5948 Type *ConvertTy = nullptr; 5949 if (Visited.count(I) || 5950 (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) 5951 return false; 5952 5953 SmallVector<Instruction *, 4> Worklist; 5954 Worklist.push_back(cast<Instruction>(I)); 5955 SmallPtrSet<PHINode *, 4> PhiNodes; 5956 PhiNodes.insert(I); 5957 Visited.insert(I); 5958 SmallPtrSet<Instruction *, 4> Defs; 5959 SmallPtrSet<Instruction *, 4> Uses; 5960 // This works by adding extra bitcasts between load/stores and removing 5961 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi)) 5962 // we can get in the situation where we remove a bitcast in one iteration 5963 // just to add it again in the next. We need to ensure that at least one 5964 // bitcast we remove are anchored to something that will not change back. 5965 bool AnyAnchored = false; 5966 5967 while (!Worklist.empty()) { 5968 Instruction *II = Worklist.pop_back_val(); 5969 5970 if (auto *Phi = dyn_cast<PHINode>(II)) { 5971 // Handle Defs, which might also be PHI's 5972 for (Value *V : Phi->incoming_values()) { 5973 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 5974 if (!PhiNodes.count(OpPhi)) { 5975 if (Visited.count(OpPhi)) 5976 return false; 5977 PhiNodes.insert(OpPhi); 5978 Visited.insert(OpPhi); 5979 Worklist.push_back(OpPhi); 5980 } 5981 } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) { 5982 if (!OpLoad->isSimple()) 5983 return false; 5984 if (!Defs.count(OpLoad)) { 5985 Defs.insert(OpLoad); 5986 Worklist.push_back(OpLoad); 5987 } 5988 } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) { 5989 if (!Defs.count(OpEx)) { 5990 Defs.insert(OpEx); 5991 Worklist.push_back(OpEx); 5992 } 5993 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 5994 if (!ConvertTy) 5995 ConvertTy = OpBC->getOperand(0)->getType(); 5996 if (OpBC->getOperand(0)->getType() != ConvertTy) 5997 return false; 5998 if (!Defs.count(OpBC)) { 5999 Defs.insert(OpBC); 6000 Worklist.push_back(OpBC); 6001 AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) && 6002 !isa<ExtractElementInst>(OpBC->getOperand(0)); 6003 } 6004 } else if (!isa<UndefValue>(V)) { 6005 return false; 6006 } 6007 } 6008 } 6009 6010 // Handle uses which might also be phi's 6011 for (User *V : II->users()) { 6012 if (auto *OpPhi = dyn_cast<PHINode>(V)) { 6013 if (!PhiNodes.count(OpPhi)) { 6014 if (Visited.count(OpPhi)) 6015 return false; 6016 PhiNodes.insert(OpPhi); 6017 Visited.insert(OpPhi); 6018 Worklist.push_back(OpPhi); 6019 } 6020 } else if (auto *OpStore = dyn_cast<StoreInst>(V)) { 6021 if (!OpStore->isSimple() || OpStore->getOperand(0) != II) 6022 return false; 6023 Uses.insert(OpStore); 6024 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { 6025 if (!ConvertTy) 6026 ConvertTy = OpBC->getType(); 6027 if (OpBC->getType() != ConvertTy) 6028 return false; 6029 Uses.insert(OpBC); 6030 AnyAnchored |= 6031 any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); }); 6032 } else { 6033 return false; 6034 } 6035 } 6036 } 6037 6038 if (!ConvertTy || !AnyAnchored || !TLI->shouldConvertPhiType(PhiTy, ConvertTy)) 6039 return false; 6040 6041 LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to " 6042 << *ConvertTy << "\n"); 6043 6044 // Create all the new phi nodes of the new type, and bitcast any loads to the 6045 // correct type. 6046 ValueToValueMap ValMap; 6047 ValMap[UndefValue::get(PhiTy)] = UndefValue::get(ConvertTy); 6048 for (Instruction *D : Defs) { 6049 if (isa<BitCastInst>(D)) { 6050 ValMap[D] = D->getOperand(0); 6051 DeletedInstrs.insert(D); 6052 } else { 6053 ValMap[D] = 6054 new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode()); 6055 } 6056 } 6057 for (PHINode *Phi : PhiNodes) 6058 ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(), 6059 Phi->getName() + ".tc", Phi); 6060 // Pipe together all the PhiNodes. 6061 for (PHINode *Phi : PhiNodes) { 6062 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]); 6063 for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) 6064 NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)], 6065 Phi->getIncomingBlock(i)); 6066 Visited.insert(NewPhi); 6067 } 6068 // And finally pipe up the stores and bitcasts 6069 for (Instruction *U : Uses) { 6070 if (isa<BitCastInst>(U)) { 6071 DeletedInstrs.insert(U); 6072 U->replaceAllUsesWith(ValMap[U->getOperand(0)]); 6073 } else { 6074 U->setOperand(0, 6075 new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U)); 6076 } 6077 } 6078 6079 // Save the removed phis to be deleted later. 6080 for (PHINode *Phi : PhiNodes) 6081 DeletedInstrs.insert(Phi); 6082 return true; 6083 } 6084 6085 bool CodeGenPrepare::optimizePhiTypes(Function &F) { 6086 if (!OptimizePhiTypes) 6087 return false; 6088 6089 bool Changed = false; 6090 SmallPtrSet<PHINode *, 4> Visited; 6091 SmallPtrSet<Instruction *, 4> DeletedInstrs; 6092 6093 // Attempt to optimize all the phis in the functions to the correct type. 6094 for (auto &BB : F) 6095 for (auto &Phi : BB.phis()) 6096 Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs); 6097 6098 // Remove any old phi's that have been converted. 6099 for (auto *I : DeletedInstrs) { 6100 I->replaceAllUsesWith(UndefValue::get(I->getType())); 6101 I->eraseFromParent(); 6102 } 6103 6104 return Changed; 6105 } 6106 6107 /// Return true, if an ext(load) can be formed from an extension in 6108 /// \p MovedExts. 6109 bool CodeGenPrepare::canFormExtLd( 6110 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, 6111 Instruction *&Inst, bool HasPromoted) { 6112 for (auto *MovedExtInst : MovedExts) { 6113 if (isa<LoadInst>(MovedExtInst->getOperand(0))) { 6114 LI = cast<LoadInst>(MovedExtInst->getOperand(0)); 6115 Inst = MovedExtInst; 6116 break; 6117 } 6118 } 6119 if (!LI) 6120 return false; 6121 6122 // If they're already in the same block, there's nothing to do. 6123 // Make the cheap checks first if we did not promote. 6124 // If we promoted, we need to check if it is indeed profitable. 6125 if (!HasPromoted && LI->getParent() == Inst->getParent()) 6126 return false; 6127 6128 return TLI->isExtLoad(LI, Inst, *DL); 6129 } 6130 6131 /// Move a zext or sext fed by a load into the same basic block as the load, 6132 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 6133 /// extend into the load. 6134 /// 6135 /// E.g., 6136 /// \code 6137 /// %ld = load i32* %addr 6138 /// %add = add nuw i32 %ld, 4 6139 /// %zext = zext i32 %add to i64 6140 // \endcode 6141 /// => 6142 /// \code 6143 /// %ld = load i32* %addr 6144 /// %zext = zext i32 %ld to i64 6145 /// %add = add nuw i64 %zext, 4 6146 /// \encode 6147 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which 6148 /// allow us to match zext(load i32*) to i64. 6149 /// 6150 /// Also, try to promote the computations used to obtain a sign extended 6151 /// value used into memory accesses. 6152 /// E.g., 6153 /// \code 6154 /// a = add nsw i32 b, 3 6155 /// d = sext i32 a to i64 6156 /// e = getelementptr ..., i64 d 6157 /// \endcode 6158 /// => 6159 /// \code 6160 /// f = sext i32 b to i64 6161 /// a = add nsw i64 f, 3 6162 /// e = getelementptr ..., i64 a 6163 /// \endcode 6164 /// 6165 /// \p Inst[in/out] the extension may be modified during the process if some 6166 /// promotions apply. 6167 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { 6168 bool AllowPromotionWithoutCommonHeader = false; 6169 /// See if it is an interesting sext operations for the address type 6170 /// promotion before trying to promote it, e.g., the ones with the right 6171 /// type and used in memory accesses. 6172 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( 6173 *Inst, AllowPromotionWithoutCommonHeader); 6174 TypePromotionTransaction TPT(RemovedInsts); 6175 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 6176 TPT.getRestorationPoint(); 6177 SmallVector<Instruction *, 1> Exts; 6178 SmallVector<Instruction *, 2> SpeculativelyMovedExts; 6179 Exts.push_back(Inst); 6180 6181 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); 6182 6183 // Look for a load being extended. 6184 LoadInst *LI = nullptr; 6185 Instruction *ExtFedByLoad; 6186 6187 // Try to promote a chain of computation if it allows to form an extended 6188 // load. 6189 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { 6190 assert(LI && ExtFedByLoad && "Expect a valid load and extension"); 6191 TPT.commit(); 6192 // Move the extend into the same block as the load. 6193 ExtFedByLoad->moveAfter(LI); 6194 ++NumExtsMoved; 6195 Inst = ExtFedByLoad; 6196 return true; 6197 } 6198 6199 // Continue promoting SExts if known as considerable depending on targets. 6200 if (ATPConsiderable && 6201 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, 6202 HasPromoted, TPT, SpeculativelyMovedExts)) 6203 return true; 6204 6205 TPT.rollback(LastKnownGood); 6206 return false; 6207 } 6208 6209 // Perform address type promotion if doing so is profitable. 6210 // If AllowPromotionWithoutCommonHeader == false, we should find other sext 6211 // instructions that sign extended the same initial value. However, if 6212 // AllowPromotionWithoutCommonHeader == true, we expect promoting the 6213 // extension is just profitable. 6214 bool CodeGenPrepare::performAddressTypePromotion( 6215 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, 6216 bool HasPromoted, TypePromotionTransaction &TPT, 6217 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { 6218 bool Promoted = false; 6219 SmallPtrSet<Instruction *, 1> UnhandledExts; 6220 bool AllSeenFirst = true; 6221 for (auto *I : SpeculativelyMovedExts) { 6222 Value *HeadOfChain = I->getOperand(0); 6223 DenseMap<Value *, Instruction *>::iterator AlreadySeen = 6224 SeenChainsForSExt.find(HeadOfChain); 6225 // If there is an unhandled SExt which has the same header, try to promote 6226 // it as well. 6227 if (AlreadySeen != SeenChainsForSExt.end()) { 6228 if (AlreadySeen->second != nullptr) 6229 UnhandledExts.insert(AlreadySeen->second); 6230 AllSeenFirst = false; 6231 } 6232 } 6233 6234 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && 6235 SpeculativelyMovedExts.size() == 1)) { 6236 TPT.commit(); 6237 if (HasPromoted) 6238 Promoted = true; 6239 for (auto *I : SpeculativelyMovedExts) { 6240 Value *HeadOfChain = I->getOperand(0); 6241 SeenChainsForSExt[HeadOfChain] = nullptr; 6242 ValToSExtendedUses[HeadOfChain].push_back(I); 6243 } 6244 // Update Inst as promotion happen. 6245 Inst = SpeculativelyMovedExts.pop_back_val(); 6246 } else { 6247 // This is the first chain visited from the header, keep the current chain 6248 // as unhandled. Defer to promote this until we encounter another SExt 6249 // chain derived from the same header. 6250 for (auto *I : SpeculativelyMovedExts) { 6251 Value *HeadOfChain = I->getOperand(0); 6252 SeenChainsForSExt[HeadOfChain] = Inst; 6253 } 6254 return false; 6255 } 6256 6257 if (!AllSeenFirst && !UnhandledExts.empty()) 6258 for (auto *VisitedSExt : UnhandledExts) { 6259 if (RemovedInsts.count(VisitedSExt)) 6260 continue; 6261 TypePromotionTransaction TPT(RemovedInsts); 6262 SmallVector<Instruction *, 1> Exts; 6263 SmallVector<Instruction *, 2> Chains; 6264 Exts.push_back(VisitedSExt); 6265 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); 6266 TPT.commit(); 6267 if (HasPromoted) 6268 Promoted = true; 6269 for (auto *I : Chains) { 6270 Value *HeadOfChain = I->getOperand(0); 6271 // Mark this as handled. 6272 SeenChainsForSExt[HeadOfChain] = nullptr; 6273 ValToSExtendedUses[HeadOfChain].push_back(I); 6274 } 6275 } 6276 return Promoted; 6277 } 6278 6279 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 6280 BasicBlock *DefBB = I->getParent(); 6281 6282 // If the result of a {s|z}ext and its source are both live out, rewrite all 6283 // other uses of the source with result of extension. 6284 Value *Src = I->getOperand(0); 6285 if (Src->hasOneUse()) 6286 return false; 6287 6288 // Only do this xform if truncating is free. 6289 if (!TLI->isTruncateFree(I->getType(), Src->getType())) 6290 return false; 6291 6292 // Only safe to perform the optimization if the source is also defined in 6293 // this block. 6294 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 6295 return false; 6296 6297 bool DefIsLiveOut = false; 6298 for (User *U : I->users()) { 6299 Instruction *UI = cast<Instruction>(U); 6300 6301 // Figure out which BB this ext is used in. 6302 BasicBlock *UserBB = UI->getParent(); 6303 if (UserBB == DefBB) continue; 6304 DefIsLiveOut = true; 6305 break; 6306 } 6307 if (!DefIsLiveOut) 6308 return false; 6309 6310 // Make sure none of the uses are PHI nodes. 6311 for (User *U : Src->users()) { 6312 Instruction *UI = cast<Instruction>(U); 6313 BasicBlock *UserBB = UI->getParent(); 6314 if (UserBB == DefBB) continue; 6315 // Be conservative. We don't want this xform to end up introducing 6316 // reloads just before load / store instructions. 6317 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 6318 return false; 6319 } 6320 6321 // InsertedTruncs - Only insert one trunc in each block once. 6322 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 6323 6324 bool MadeChange = false; 6325 for (Use &U : Src->uses()) { 6326 Instruction *User = cast<Instruction>(U.getUser()); 6327 6328 // Figure out which BB this ext is used in. 6329 BasicBlock *UserBB = User->getParent(); 6330 if (UserBB == DefBB) continue; 6331 6332 // Both src and def are live in this block. Rewrite the use. 6333 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 6334 6335 if (!InsertedTrunc) { 6336 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 6337 assert(InsertPt != UserBB->end()); 6338 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 6339 InsertedInsts.insert(InsertedTrunc); 6340 } 6341 6342 // Replace a use of the {s|z}ext source with a use of the result. 6343 U = InsertedTrunc; 6344 ++NumExtUses; 6345 MadeChange = true; 6346 } 6347 6348 return MadeChange; 6349 } 6350 6351 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 6352 // just after the load if the target can fold this into one extload instruction, 6353 // with the hope of eliminating some of the other later "and" instructions using 6354 // the loaded value. "and"s that are made trivially redundant by the insertion 6355 // of the new "and" are removed by this function, while others (e.g. those whose 6356 // path from the load goes through a phi) are left for isel to potentially 6357 // remove. 6358 // 6359 // For example: 6360 // 6361 // b0: 6362 // x = load i32 6363 // ... 6364 // b1: 6365 // y = and x, 0xff 6366 // z = use y 6367 // 6368 // becomes: 6369 // 6370 // b0: 6371 // x = load i32 6372 // x' = and x, 0xff 6373 // ... 6374 // b1: 6375 // z = use x' 6376 // 6377 // whereas: 6378 // 6379 // b0: 6380 // x1 = load i32 6381 // ... 6382 // b1: 6383 // x2 = load i32 6384 // ... 6385 // b2: 6386 // x = phi x1, x2 6387 // y = and x, 0xff 6388 // 6389 // becomes (after a call to optimizeLoadExt for each load): 6390 // 6391 // b0: 6392 // x1 = load i32 6393 // x1' = and x1, 0xff 6394 // ... 6395 // b1: 6396 // x2 = load i32 6397 // x2' = and x2, 0xff 6398 // ... 6399 // b2: 6400 // x = phi x1', x2' 6401 // y = and x, 0xff 6402 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 6403 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) 6404 return false; 6405 6406 // Skip loads we've already transformed. 6407 if (Load->hasOneUse() && 6408 InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) 6409 return false; 6410 6411 // Look at all uses of Load, looking through phis, to determine how many bits 6412 // of the loaded value are needed. 6413 SmallVector<Instruction *, 8> WorkList; 6414 SmallPtrSet<Instruction *, 16> Visited; 6415 SmallVector<Instruction *, 8> AndsToMaybeRemove; 6416 for (auto *U : Load->users()) 6417 WorkList.push_back(cast<Instruction>(U)); 6418 6419 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 6420 unsigned BitWidth = LoadResultVT.getSizeInBits(); 6421 APInt DemandBits(BitWidth, 0); 6422 APInt WidestAndBits(BitWidth, 0); 6423 6424 while (!WorkList.empty()) { 6425 Instruction *I = WorkList.back(); 6426 WorkList.pop_back(); 6427 6428 // Break use-def graph loops. 6429 if (!Visited.insert(I).second) 6430 continue; 6431 6432 // For a PHI node, push all of its users. 6433 if (auto *Phi = dyn_cast<PHINode>(I)) { 6434 for (auto *U : Phi->users()) 6435 WorkList.push_back(cast<Instruction>(U)); 6436 continue; 6437 } 6438 6439 switch (I->getOpcode()) { 6440 case Instruction::And: { 6441 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 6442 if (!AndC) 6443 return false; 6444 APInt AndBits = AndC->getValue(); 6445 DemandBits |= AndBits; 6446 // Keep track of the widest and mask we see. 6447 if (AndBits.ugt(WidestAndBits)) 6448 WidestAndBits = AndBits; 6449 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 6450 AndsToMaybeRemove.push_back(I); 6451 break; 6452 } 6453 6454 case Instruction::Shl: { 6455 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 6456 if (!ShlC) 6457 return false; 6458 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 6459 DemandBits.setLowBits(BitWidth - ShiftAmt); 6460 break; 6461 } 6462 6463 case Instruction::Trunc: { 6464 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 6465 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 6466 DemandBits.setLowBits(TruncBitWidth); 6467 break; 6468 } 6469 6470 default: 6471 return false; 6472 } 6473 } 6474 6475 uint32_t ActiveBits = DemandBits.getActiveBits(); 6476 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 6477 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 6478 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 6479 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 6480 // followed by an AND. 6481 // TODO: Look into removing this restriction by fixing backends to either 6482 // return false for isLoadExtLegal for i1 or have them select this pattern to 6483 // a single instruction. 6484 // 6485 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 6486 // mask, since these are the only ands that will be removed by isel. 6487 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || 6488 WidestAndBits != DemandBits) 6489 return false; 6490 6491 LLVMContext &Ctx = Load->getType()->getContext(); 6492 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 6493 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 6494 6495 // Reject cases that won't be matched as extloads. 6496 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 6497 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 6498 return false; 6499 6500 IRBuilder<> Builder(Load->getNextNode()); 6501 auto *NewAnd = cast<Instruction>( 6502 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 6503 // Mark this instruction as "inserted by CGP", so that other 6504 // optimizations don't touch it. 6505 InsertedInsts.insert(NewAnd); 6506 6507 // Replace all uses of load with new and (except for the use of load in the 6508 // new and itself). 6509 Load->replaceAllUsesWith(NewAnd); 6510 NewAnd->setOperand(0, Load); 6511 6512 // Remove any and instructions that are now redundant. 6513 for (auto *And : AndsToMaybeRemove) 6514 // Check that the and mask is the same as the one we decided to put on the 6515 // new and. 6516 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 6517 And->replaceAllUsesWith(NewAnd); 6518 if (&*CurInstIterator == And) 6519 CurInstIterator = std::next(And->getIterator()); 6520 And->eraseFromParent(); 6521 ++NumAndUses; 6522 } 6523 6524 ++NumAndsAdded; 6525 return true; 6526 } 6527 6528 /// Check if V (an operand of a select instruction) is an expensive instruction 6529 /// that is only used once. 6530 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 6531 auto *I = dyn_cast<Instruction>(V); 6532 // If it's safe to speculatively execute, then it should not have side 6533 // effects; therefore, it's safe to sink and possibly *not* execute. 6534 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 6535 TTI->getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency) >= 6536 TargetTransformInfo::TCC_Expensive; 6537 } 6538 6539 /// Returns true if a SelectInst should be turned into an explicit branch. 6540 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 6541 const TargetLowering *TLI, 6542 SelectInst *SI) { 6543 // If even a predictable select is cheap, then a branch can't be cheaper. 6544 if (!TLI->isPredictableSelectExpensive()) 6545 return false; 6546 6547 // FIXME: This should use the same heuristics as IfConversion to determine 6548 // whether a select is better represented as a branch. 6549 6550 // If metadata tells us that the select condition is obviously predictable, 6551 // then we want to replace the select with a branch. 6552 uint64_t TrueWeight, FalseWeight; 6553 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { 6554 uint64_t Max = std::max(TrueWeight, FalseWeight); 6555 uint64_t Sum = TrueWeight + FalseWeight; 6556 if (Sum != 0) { 6557 auto Probability = BranchProbability::getBranchProbability(Max, Sum); 6558 if (Probability > TLI->getPredictableBranchThreshold()) 6559 return true; 6560 } 6561 } 6562 6563 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 6564 6565 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 6566 // comparison condition. If the compare has more than one use, there's 6567 // probably another cmov or setcc around, so it's not worth emitting a branch. 6568 if (!Cmp || !Cmp->hasOneUse()) 6569 return false; 6570 6571 // If either operand of the select is expensive and only needed on one side 6572 // of the select, we should form a branch. 6573 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 6574 sinkSelectOperand(TTI, SI->getFalseValue())) 6575 return true; 6576 6577 return false; 6578 } 6579 6580 /// If \p isTrue is true, return the true value of \p SI, otherwise return 6581 /// false value of \p SI. If the true/false value of \p SI is defined by any 6582 /// select instructions in \p Selects, look through the defining select 6583 /// instruction until the true/false value is not defined in \p Selects. 6584 static Value *getTrueOrFalseValue( 6585 SelectInst *SI, bool isTrue, 6586 const SmallPtrSet<const Instruction *, 2> &Selects) { 6587 Value *V = nullptr; 6588 6589 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); 6590 DefSI = dyn_cast<SelectInst>(V)) { 6591 assert(DefSI->getCondition() == SI->getCondition() && 6592 "The condition of DefSI does not match with SI"); 6593 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); 6594 } 6595 6596 assert(V && "Failed to get select true/false value"); 6597 return V; 6598 } 6599 6600 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { 6601 assert(Shift->isShift() && "Expected a shift"); 6602 6603 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than 6604 // general vector shifts, and (3) the shift amount is a select-of-splatted 6605 // values, hoist the shifts before the select: 6606 // shift Op0, (select Cond, TVal, FVal) --> 6607 // select Cond, (shift Op0, TVal), (shift Op0, FVal) 6608 // 6609 // This is inverting a generic IR transform when we know that the cost of a 6610 // general vector shift is more than the cost of 2 shift-by-scalars. 6611 // We can't do this effectively in SDAG because we may not be able to 6612 // determine if the select operands are splats from within a basic block. 6613 Type *Ty = Shift->getType(); 6614 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6615 return false; 6616 Value *Cond, *TVal, *FVal; 6617 if (!match(Shift->getOperand(1), 6618 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6619 return false; 6620 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6621 return false; 6622 6623 IRBuilder<> Builder(Shift); 6624 BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); 6625 Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal); 6626 Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal); 6627 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6628 Shift->replaceAllUsesWith(NewSel); 6629 Shift->eraseFromParent(); 6630 return true; 6631 } 6632 6633 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { 6634 Intrinsic::ID Opcode = Fsh->getIntrinsicID(); 6635 assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && 6636 "Expected a funnel shift"); 6637 6638 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper 6639 // than general vector shifts, and (3) the shift amount is select-of-splatted 6640 // values, hoist the funnel shifts before the select: 6641 // fsh Op0, Op1, (select Cond, TVal, FVal) --> 6642 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) 6643 // 6644 // This is inverting a generic IR transform when we know that the cost of a 6645 // general vector shift is more than the cost of 2 shift-by-scalars. 6646 // We can't do this effectively in SDAG because we may not be able to 6647 // determine if the select operands are splats from within a basic block. 6648 Type *Ty = Fsh->getType(); 6649 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) 6650 return false; 6651 Value *Cond, *TVal, *FVal; 6652 if (!match(Fsh->getOperand(2), 6653 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 6654 return false; 6655 if (!isSplatValue(TVal) || !isSplatValue(FVal)) 6656 return false; 6657 6658 IRBuilder<> Builder(Fsh); 6659 Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1); 6660 Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, TVal }); 6661 Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, FVal }); 6662 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); 6663 Fsh->replaceAllUsesWith(NewSel); 6664 Fsh->eraseFromParent(); 6665 return true; 6666 } 6667 6668 /// If we have a SelectInst that will likely profit from branch prediction, 6669 /// turn it into a branch. 6670 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 6671 if (DisableSelectToBranch) 6672 return false; 6673 6674 // Find all consecutive select instructions that share the same condition. 6675 SmallVector<SelectInst *, 2> ASI; 6676 ASI.push_back(SI); 6677 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); 6678 It != SI->getParent()->end(); ++It) { 6679 SelectInst *I = dyn_cast<SelectInst>(&*It); 6680 if (I && SI->getCondition() == I->getCondition()) { 6681 ASI.push_back(I); 6682 } else { 6683 break; 6684 } 6685 } 6686 6687 SelectInst *LastSI = ASI.back(); 6688 // Increment the current iterator to skip all the rest of select instructions 6689 // because they will be either "not lowered" or "all lowered" to branch. 6690 CurInstIterator = std::next(LastSI->getIterator()); 6691 6692 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 6693 6694 // Can we convert the 'select' to CF ? 6695 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) 6696 return false; 6697 6698 TargetLowering::SelectSupportKind SelectKind; 6699 if (VectorCond) 6700 SelectKind = TargetLowering::VectorMaskSelect; 6701 else if (SI->getType()->isVectorTy()) 6702 SelectKind = TargetLowering::ScalarCondVectorVal; 6703 else 6704 SelectKind = TargetLowering::ScalarValSelect; 6705 6706 if (TLI->isSelectSupported(SelectKind) && 6707 (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || 6708 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))) 6709 return false; 6710 6711 // The DominatorTree needs to be rebuilt by any consumers after this 6712 // transformation. We simply reset here rather than setting the ModifiedDT 6713 // flag to avoid restarting the function walk in runOnFunction for each 6714 // select optimized. 6715 DT.reset(); 6716 6717 // Transform a sequence like this: 6718 // start: 6719 // %cmp = cmp uge i32 %a, %b 6720 // %sel = select i1 %cmp, i32 %c, i32 %d 6721 // 6722 // Into: 6723 // start: 6724 // %cmp = cmp uge i32 %a, %b 6725 // %cmp.frozen = freeze %cmp 6726 // br i1 %cmp.frozen, label %select.true, label %select.false 6727 // select.true: 6728 // br label %select.end 6729 // select.false: 6730 // br label %select.end 6731 // select.end: 6732 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 6733 // 6734 // %cmp should be frozen, otherwise it may introduce undefined behavior. 6735 // In addition, we may sink instructions that produce %c or %d from 6736 // the entry block into the destination(s) of the new branch. 6737 // If the true or false blocks do not contain a sunken instruction, that 6738 // block and its branch may be optimized away. In that case, one side of the 6739 // first branch will point directly to select.end, and the corresponding PHI 6740 // predecessor block will be the start block. 6741 6742 // First, we split the block containing the select into 2 blocks. 6743 BasicBlock *StartBlock = SI->getParent(); 6744 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); 6745 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 6746 BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency()); 6747 6748 // Delete the unconditional branch that was just created by the split. 6749 StartBlock->getTerminator()->eraseFromParent(); 6750 6751 // These are the new basic blocks for the conditional branch. 6752 // At least one will become an actual new basic block. 6753 BasicBlock *TrueBlock = nullptr; 6754 BasicBlock *FalseBlock = nullptr; 6755 BranchInst *TrueBranch = nullptr; 6756 BranchInst *FalseBranch = nullptr; 6757 6758 // Sink expensive instructions into the conditional blocks to avoid executing 6759 // them speculatively. 6760 for (SelectInst *SI : ASI) { 6761 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 6762 if (TrueBlock == nullptr) { 6763 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 6764 EndBlock->getParent(), EndBlock); 6765 TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 6766 TrueBranch->setDebugLoc(SI->getDebugLoc()); 6767 } 6768 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 6769 TrueInst->moveBefore(TrueBranch); 6770 } 6771 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 6772 if (FalseBlock == nullptr) { 6773 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 6774 EndBlock->getParent(), EndBlock); 6775 FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6776 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6777 } 6778 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 6779 FalseInst->moveBefore(FalseBranch); 6780 } 6781 } 6782 6783 // If there was nothing to sink, then arbitrarily choose the 'false' side 6784 // for a new input value to the PHI. 6785 if (TrueBlock == FalseBlock) { 6786 assert(TrueBlock == nullptr && 6787 "Unexpected basic block transform while optimizing select"); 6788 6789 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 6790 EndBlock->getParent(), EndBlock); 6791 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 6792 FalseBranch->setDebugLoc(SI->getDebugLoc()); 6793 } 6794 6795 // Insert the real conditional branch based on the original condition. 6796 // If we did not create a new block for one of the 'true' or 'false' paths 6797 // of the condition, it means that side of the branch goes to the end block 6798 // directly and the path originates from the start block from the point of 6799 // view of the new PHI. 6800 BasicBlock *TT, *FT; 6801 if (TrueBlock == nullptr) { 6802 TT = EndBlock; 6803 FT = FalseBlock; 6804 TrueBlock = StartBlock; 6805 } else if (FalseBlock == nullptr) { 6806 TT = TrueBlock; 6807 FT = EndBlock; 6808 FalseBlock = StartBlock; 6809 } else { 6810 TT = TrueBlock; 6811 FT = FalseBlock; 6812 } 6813 IRBuilder<> IB(SI); 6814 auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen"); 6815 IB.CreateCondBr(CondFr, TT, FT, SI); 6816 6817 SmallPtrSet<const Instruction *, 2> INS; 6818 INS.insert(ASI.begin(), ASI.end()); 6819 // Use reverse iterator because later select may use the value of the 6820 // earlier select, and we need to propagate value through earlier select 6821 // to get the PHI operand. 6822 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { 6823 SelectInst *SI = *It; 6824 // The select itself is replaced with a PHI Node. 6825 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 6826 PN->takeName(SI); 6827 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); 6828 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); 6829 PN->setDebugLoc(SI->getDebugLoc()); 6830 6831 SI->replaceAllUsesWith(PN); 6832 SI->eraseFromParent(); 6833 INS.erase(SI); 6834 ++NumSelectsExpanded; 6835 } 6836 6837 // Instruct OptimizeBlock to skip to the next block. 6838 CurInstIterator = StartBlock->end(); 6839 return true; 6840 } 6841 6842 /// Some targets only accept certain types for splat inputs. For example a VDUP 6843 /// in MVE takes a GPR (integer) register, and the instruction that incorporate 6844 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. 6845 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 6846 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only 6847 if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 6848 m_Undef(), m_ZeroMask()))) 6849 return false; 6850 Type *NewType = TLI->shouldConvertSplatType(SVI); 6851 if (!NewType) 6852 return false; 6853 6854 auto *SVIVecType = cast<FixedVectorType>(SVI->getType()); 6855 assert(!NewType->isVectorTy() && "Expected a scalar type!"); 6856 assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && 6857 "Expected a type of the same size!"); 6858 auto *NewVecType = 6859 FixedVectorType::get(NewType, SVIVecType->getNumElements()); 6860 6861 // Create a bitcast (shuffle (insert (bitcast(..)))) 6862 IRBuilder<> Builder(SVI->getContext()); 6863 Builder.SetInsertPoint(SVI); 6864 Value *BC1 = Builder.CreateBitCast( 6865 cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType); 6866 Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1); 6867 Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType); 6868 6869 SVI->replaceAllUsesWith(BC2); 6870 RecursivelyDeleteTriviallyDeadInstructions( 6871 SVI, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 6872 6873 // Also hoist the bitcast up to its operand if it they are not in the same 6874 // block. 6875 if (auto *BCI = dyn_cast<Instruction>(BC1)) 6876 if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0))) 6877 if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) && 6878 !Op->isTerminator() && !Op->isEHPad()) 6879 BCI->moveAfter(Op); 6880 6881 return true; 6882 } 6883 6884 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { 6885 // If the operands of I can be folded into a target instruction together with 6886 // I, duplicate and sink them. 6887 SmallVector<Use *, 4> OpsToSink; 6888 if (!TLI->shouldSinkOperands(I, OpsToSink)) 6889 return false; 6890 6891 // OpsToSink can contain multiple uses in a use chain (e.g. 6892 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating 6893 // uses must come first, so we process the ops in reverse order so as to not 6894 // create invalid IR. 6895 BasicBlock *TargetBB = I->getParent(); 6896 bool Changed = false; 6897 SmallVector<Use *, 4> ToReplace; 6898 for (Use *U : reverse(OpsToSink)) { 6899 auto *UI = cast<Instruction>(U->get()); 6900 if (UI->getParent() == TargetBB || isa<PHINode>(UI)) 6901 continue; 6902 ToReplace.push_back(U); 6903 } 6904 6905 SetVector<Instruction *> MaybeDead; 6906 DenseMap<Instruction *, Instruction *> NewInstructions; 6907 Instruction *InsertPoint = I; 6908 for (Use *U : ToReplace) { 6909 auto *UI = cast<Instruction>(U->get()); 6910 Instruction *NI = UI->clone(); 6911 NewInstructions[UI] = NI; 6912 MaybeDead.insert(UI); 6913 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n"); 6914 NI->insertBefore(InsertPoint); 6915 InsertPoint = NI; 6916 InsertedInsts.insert(NI); 6917 6918 // Update the use for the new instruction, making sure that we update the 6919 // sunk instruction uses, if it is part of a chain that has already been 6920 // sunk. 6921 Instruction *OldI = cast<Instruction>(U->getUser()); 6922 if (NewInstructions.count(OldI)) 6923 NewInstructions[OldI]->setOperand(U->getOperandNo(), NI); 6924 else 6925 U->set(NI); 6926 Changed = true; 6927 } 6928 6929 // Remove instructions that are dead after sinking. 6930 for (auto *I : MaybeDead) { 6931 if (!I->hasNUsesOrMore(1)) { 6932 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n"); 6933 I->eraseFromParent(); 6934 } 6935 } 6936 6937 return Changed; 6938 } 6939 6940 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 6941 Value *Cond = SI->getCondition(); 6942 Type *OldType = Cond->getType(); 6943 LLVMContext &Context = Cond->getContext(); 6944 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 6945 unsigned RegWidth = RegType.getSizeInBits(); 6946 6947 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 6948 return false; 6949 6950 // If the register width is greater than the type width, expand the condition 6951 // of the switch instruction and each case constant to the width of the 6952 // register. By widening the type of the switch condition, subsequent 6953 // comparisons (for case comparisons) will not need to be extended to the 6954 // preferred register width, so we will potentially eliminate N-1 extends, 6955 // where N is the number of cases in the switch. 6956 auto *NewType = Type::getIntNTy(Context, RegWidth); 6957 6958 // Zero-extend the switch condition and case constants unless the switch 6959 // condition is a function argument that is already being sign-extended. 6960 // In that case, we can avoid an unnecessary mask/extension by sign-extending 6961 // everything instead. 6962 Instruction::CastOps ExtType = Instruction::ZExt; 6963 if (auto *Arg = dyn_cast<Argument>(Cond)) 6964 if (Arg->hasSExtAttr()) 6965 ExtType = Instruction::SExt; 6966 6967 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 6968 ExtInst->insertBefore(SI); 6969 ExtInst->setDebugLoc(SI->getDebugLoc()); 6970 SI->setCondition(ExtInst); 6971 for (auto Case : SI->cases()) { 6972 APInt NarrowConst = Case.getCaseValue()->getValue(); 6973 APInt WideConst = (ExtType == Instruction::ZExt) ? 6974 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 6975 Case.setValue(ConstantInt::get(Context, WideConst)); 6976 } 6977 6978 return true; 6979 } 6980 6981 6982 namespace { 6983 6984 /// Helper class to promote a scalar operation to a vector one. 6985 /// This class is used to move downward extractelement transition. 6986 /// E.g., 6987 /// a = vector_op <2 x i32> 6988 /// b = extractelement <2 x i32> a, i32 0 6989 /// c = scalar_op b 6990 /// store c 6991 /// 6992 /// => 6993 /// a = vector_op <2 x i32> 6994 /// c = vector_op a (equivalent to scalar_op on the related lane) 6995 /// * d = extractelement <2 x i32> c, i32 0 6996 /// * store d 6997 /// Assuming both extractelement and store can be combine, we get rid of the 6998 /// transition. 6999 class VectorPromoteHelper { 7000 /// DataLayout associated with the current module. 7001 const DataLayout &DL; 7002 7003 /// Used to perform some checks on the legality of vector operations. 7004 const TargetLowering &TLI; 7005 7006 /// Used to estimated the cost of the promoted chain. 7007 const TargetTransformInfo &TTI; 7008 7009 /// The transition being moved downwards. 7010 Instruction *Transition; 7011 7012 /// The sequence of instructions to be promoted. 7013 SmallVector<Instruction *, 4> InstsToBePromoted; 7014 7015 /// Cost of combining a store and an extract. 7016 unsigned StoreExtractCombineCost; 7017 7018 /// Instruction that will be combined with the transition. 7019 Instruction *CombineInst = nullptr; 7020 7021 /// The instruction that represents the current end of the transition. 7022 /// Since we are faking the promotion until we reach the end of the chain 7023 /// of computation, we need a way to get the current end of the transition. 7024 Instruction *getEndOfTransition() const { 7025 if (InstsToBePromoted.empty()) 7026 return Transition; 7027 return InstsToBePromoted.back(); 7028 } 7029 7030 /// Return the index of the original value in the transition. 7031 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 7032 /// c, is at index 0. 7033 unsigned getTransitionOriginalValueIdx() const { 7034 assert(isa<ExtractElementInst>(Transition) && 7035 "Other kind of transitions are not supported yet"); 7036 return 0; 7037 } 7038 7039 /// Return the index of the index in the transition. 7040 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 7041 /// is at index 1. 7042 unsigned getTransitionIdx() const { 7043 assert(isa<ExtractElementInst>(Transition) && 7044 "Other kind of transitions are not supported yet"); 7045 return 1; 7046 } 7047 7048 /// Get the type of the transition. 7049 /// This is the type of the original value. 7050 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 7051 /// transition is <2 x i32>. 7052 Type *getTransitionType() const { 7053 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 7054 } 7055 7056 /// Promote \p ToBePromoted by moving \p Def downward through. 7057 /// I.e., we have the following sequence: 7058 /// Def = Transition <ty1> a to <ty2> 7059 /// b = ToBePromoted <ty2> Def, ... 7060 /// => 7061 /// b = ToBePromoted <ty1> a, ... 7062 /// Def = Transition <ty1> ToBePromoted to <ty2> 7063 void promoteImpl(Instruction *ToBePromoted); 7064 7065 /// Check whether or not it is profitable to promote all the 7066 /// instructions enqueued to be promoted. 7067 bool isProfitableToPromote() { 7068 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 7069 unsigned Index = isa<ConstantInt>(ValIdx) 7070 ? cast<ConstantInt>(ValIdx)->getZExtValue() 7071 : -1; 7072 Type *PromotedType = getTransitionType(); 7073 7074 StoreInst *ST = cast<StoreInst>(CombineInst); 7075 unsigned AS = ST->getPointerAddressSpace(); 7076 // Check if this store is supported. 7077 if (!TLI.allowsMisalignedMemoryAccesses( 7078 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 7079 ST->getAlign())) { 7080 // If this is not supported, there is no way we can combine 7081 // the extract with the store. 7082 return false; 7083 } 7084 7085 // The scalar chain of computation has to pay for the transition 7086 // scalar to vector. 7087 // The vector chain has to account for the combining cost. 7088 InstructionCost ScalarCost = 7089 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 7090 InstructionCost VectorCost = StoreExtractCombineCost; 7091 enum TargetTransformInfo::TargetCostKind CostKind = 7092 TargetTransformInfo::TCK_RecipThroughput; 7093 for (const auto &Inst : InstsToBePromoted) { 7094 // Compute the cost. 7095 // By construction, all instructions being promoted are arithmetic ones. 7096 // Moreover, one argument is a constant that can be viewed as a splat 7097 // constant. 7098 Value *Arg0 = Inst->getOperand(0); 7099 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 7100 isa<ConstantFP>(Arg0); 7101 TargetTransformInfo::OperandValueKind Arg0OVK = 7102 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 7103 : TargetTransformInfo::OK_AnyValue; 7104 TargetTransformInfo::OperandValueKind Arg1OVK = 7105 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 7106 : TargetTransformInfo::OK_AnyValue; 7107 ScalarCost += TTI.getArithmeticInstrCost( 7108 Inst->getOpcode(), Inst->getType(), CostKind, Arg0OVK, Arg1OVK); 7109 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 7110 CostKind, 7111 Arg0OVK, Arg1OVK); 7112 } 7113 LLVM_DEBUG( 7114 dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 7115 << ScalarCost << "\nVector: " << VectorCost << '\n'); 7116 return ScalarCost > VectorCost; 7117 } 7118 7119 /// Generate a constant vector with \p Val with the same 7120 /// number of elements as the transition. 7121 /// \p UseSplat defines whether or not \p Val should be replicated 7122 /// across the whole vector. 7123 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 7124 /// otherwise we generate a vector with as many undef as possible: 7125 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 7126 /// used at the index of the extract. 7127 Value *getConstantVector(Constant *Val, bool UseSplat) const { 7128 unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); 7129 if (!UseSplat) { 7130 // If we cannot determine where the constant must be, we have to 7131 // use a splat constant. 7132 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 7133 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 7134 ExtractIdx = CstVal->getSExtValue(); 7135 else 7136 UseSplat = true; 7137 } 7138 7139 ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount(); 7140 if (UseSplat) 7141 return ConstantVector::getSplat(EC, Val); 7142 7143 if (!EC.isScalable()) { 7144 SmallVector<Constant *, 4> ConstVec; 7145 UndefValue *UndefVal = UndefValue::get(Val->getType()); 7146 for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { 7147 if (Idx == ExtractIdx) 7148 ConstVec.push_back(Val); 7149 else 7150 ConstVec.push_back(UndefVal); 7151 } 7152 return ConstantVector::get(ConstVec); 7153 } else 7154 llvm_unreachable( 7155 "Generate scalable vector for non-splat is unimplemented"); 7156 } 7157 7158 /// Check if promoting to a vector type an operand at \p OperandIdx 7159 /// in \p Use can trigger undefined behavior. 7160 static bool canCauseUndefinedBehavior(const Instruction *Use, 7161 unsigned OperandIdx) { 7162 // This is not safe to introduce undef when the operand is on 7163 // the right hand side of a division-like instruction. 7164 if (OperandIdx != 1) 7165 return false; 7166 switch (Use->getOpcode()) { 7167 default: 7168 return false; 7169 case Instruction::SDiv: 7170 case Instruction::UDiv: 7171 case Instruction::SRem: 7172 case Instruction::URem: 7173 return true; 7174 case Instruction::FDiv: 7175 case Instruction::FRem: 7176 return !Use->hasNoNaNs(); 7177 } 7178 llvm_unreachable(nullptr); 7179 } 7180 7181 public: 7182 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 7183 const TargetTransformInfo &TTI, Instruction *Transition, 7184 unsigned CombineCost) 7185 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 7186 StoreExtractCombineCost(CombineCost) { 7187 assert(Transition && "Do not know how to promote null"); 7188 } 7189 7190 /// Check if we can promote \p ToBePromoted to \p Type. 7191 bool canPromote(const Instruction *ToBePromoted) const { 7192 // We could support CastInst too. 7193 return isa<BinaryOperator>(ToBePromoted); 7194 } 7195 7196 /// Check if it is profitable to promote \p ToBePromoted 7197 /// by moving downward the transition through. 7198 bool shouldPromote(const Instruction *ToBePromoted) const { 7199 // Promote only if all the operands can be statically expanded. 7200 // Indeed, we do not want to introduce any new kind of transitions. 7201 for (const Use &U : ToBePromoted->operands()) { 7202 const Value *Val = U.get(); 7203 if (Val == getEndOfTransition()) { 7204 // If the use is a division and the transition is on the rhs, 7205 // we cannot promote the operation, otherwise we may create a 7206 // division by zero. 7207 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 7208 return false; 7209 continue; 7210 } 7211 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 7212 !isa<ConstantFP>(Val)) 7213 return false; 7214 } 7215 // Check that the resulting operation is legal. 7216 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 7217 if (!ISDOpcode) 7218 return false; 7219 return StressStoreExtract || 7220 TLI.isOperationLegalOrCustom( 7221 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 7222 } 7223 7224 /// Check whether or not \p Use can be combined 7225 /// with the transition. 7226 /// I.e., is it possible to do Use(Transition) => AnotherUse? 7227 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 7228 7229 /// Record \p ToBePromoted as part of the chain to be promoted. 7230 void enqueueForPromotion(Instruction *ToBePromoted) { 7231 InstsToBePromoted.push_back(ToBePromoted); 7232 } 7233 7234 /// Set the instruction that will be combined with the transition. 7235 void recordCombineInstruction(Instruction *ToBeCombined) { 7236 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 7237 CombineInst = ToBeCombined; 7238 } 7239 7240 /// Promote all the instructions enqueued for promotion if it is 7241 /// is profitable. 7242 /// \return True if the promotion happened, false otherwise. 7243 bool promote() { 7244 // Check if there is something to promote. 7245 // Right now, if we do not have anything to combine with, 7246 // we assume the promotion is not profitable. 7247 if (InstsToBePromoted.empty() || !CombineInst) 7248 return false; 7249 7250 // Check cost. 7251 if (!StressStoreExtract && !isProfitableToPromote()) 7252 return false; 7253 7254 // Promote. 7255 for (auto &ToBePromoted : InstsToBePromoted) 7256 promoteImpl(ToBePromoted); 7257 InstsToBePromoted.clear(); 7258 return true; 7259 } 7260 }; 7261 7262 } // end anonymous namespace 7263 7264 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 7265 // At this point, we know that all the operands of ToBePromoted but Def 7266 // can be statically promoted. 7267 // For Def, we need to use its parameter in ToBePromoted: 7268 // b = ToBePromoted ty1 a 7269 // Def = Transition ty1 b to ty2 7270 // Move the transition down. 7271 // 1. Replace all uses of the promoted operation by the transition. 7272 // = ... b => = ... Def. 7273 assert(ToBePromoted->getType() == Transition->getType() && 7274 "The type of the result of the transition does not match " 7275 "the final type"); 7276 ToBePromoted->replaceAllUsesWith(Transition); 7277 // 2. Update the type of the uses. 7278 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 7279 Type *TransitionTy = getTransitionType(); 7280 ToBePromoted->mutateType(TransitionTy); 7281 // 3. Update all the operands of the promoted operation with promoted 7282 // operands. 7283 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 7284 for (Use &U : ToBePromoted->operands()) { 7285 Value *Val = U.get(); 7286 Value *NewVal = nullptr; 7287 if (Val == Transition) 7288 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 7289 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 7290 isa<ConstantFP>(Val)) { 7291 // Use a splat constant if it is not safe to use undef. 7292 NewVal = getConstantVector( 7293 cast<Constant>(Val), 7294 isa<UndefValue>(Val) || 7295 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 7296 } else 7297 llvm_unreachable("Did you modified shouldPromote and forgot to update " 7298 "this?"); 7299 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 7300 } 7301 Transition->moveAfter(ToBePromoted); 7302 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 7303 } 7304 7305 /// Some targets can do store(extractelement) with one instruction. 7306 /// Try to push the extractelement towards the stores when the target 7307 /// has this feature and this is profitable. 7308 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 7309 unsigned CombineCost = std::numeric_limits<unsigned>::max(); 7310 if (DisableStoreExtract || 7311 (!StressStoreExtract && 7312 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 7313 Inst->getOperand(1), CombineCost))) 7314 return false; 7315 7316 // At this point we know that Inst is a vector to scalar transition. 7317 // Try to move it down the def-use chain, until: 7318 // - We can combine the transition with its single use 7319 // => we got rid of the transition. 7320 // - We escape the current basic block 7321 // => we would need to check that we are moving it at a cheaper place and 7322 // we do not do that for now. 7323 BasicBlock *Parent = Inst->getParent(); 7324 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 7325 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 7326 // If the transition has more than one use, assume this is not going to be 7327 // beneficial. 7328 while (Inst->hasOneUse()) { 7329 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 7330 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 7331 7332 if (ToBePromoted->getParent() != Parent) { 7333 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block (" 7334 << ToBePromoted->getParent()->getName() 7335 << ") than the transition (" << Parent->getName() 7336 << ").\n"); 7337 return false; 7338 } 7339 7340 if (VPH.canCombine(ToBePromoted)) { 7341 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n' 7342 << "will be combined with: " << *ToBePromoted << '\n'); 7343 VPH.recordCombineInstruction(ToBePromoted); 7344 bool Changed = VPH.promote(); 7345 NumStoreExtractExposed += Changed; 7346 return Changed; 7347 } 7348 7349 LLVM_DEBUG(dbgs() << "Try promoting.\n"); 7350 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 7351 return false; 7352 7353 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 7354 7355 VPH.enqueueForPromotion(ToBePromoted); 7356 Inst = ToBePromoted; 7357 } 7358 return false; 7359 } 7360 7361 /// For the instruction sequence of store below, F and I values 7362 /// are bundled together as an i64 value before being stored into memory. 7363 /// Sometimes it is more efficient to generate separate stores for F and I, 7364 /// which can remove the bitwise instructions or sink them to colder places. 7365 /// 7366 /// (store (or (zext (bitcast F to i32) to i64), 7367 /// (shl (zext I to i64), 32)), addr) --> 7368 /// (store F, addr) and (store I, addr+4) 7369 /// 7370 /// Similarly, splitting for other merged store can also be beneficial, like: 7371 /// For pair of {i32, i32}, i64 store --> two i32 stores. 7372 /// For pair of {i32, i16}, i64 store --> two i32 stores. 7373 /// For pair of {i16, i16}, i32 store --> two i16 stores. 7374 /// For pair of {i16, i8}, i32 store --> two i16 stores. 7375 /// For pair of {i8, i8}, i16 store --> two i8 stores. 7376 /// 7377 /// We allow each target to determine specifically which kind of splitting is 7378 /// supported. 7379 /// 7380 /// The store patterns are commonly seen from the simple code snippet below 7381 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 7382 /// void goo(const std::pair<int, float> &); 7383 /// hoo() { 7384 /// ... 7385 /// goo(std::make_pair(tmp, ftmp)); 7386 /// ... 7387 /// } 7388 /// 7389 /// Although we already have similar splitting in DAG Combine, we duplicate 7390 /// it in CodeGenPrepare to catch the case in which pattern is across 7391 /// multiple BBs. The logic in DAG Combine is kept to catch case generated 7392 /// during code expansion. 7393 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, 7394 const TargetLowering &TLI) { 7395 // Handle simple but common cases only. 7396 Type *StoreType = SI.getValueOperand()->getType(); 7397 7398 // The code below assumes shifting a value by <number of bits>, 7399 // whereas scalable vectors would have to be shifted by 7400 // <2log(vscale) + number of bits> in order to store the 7401 // low/high parts. Bailing out for now. 7402 if (isa<ScalableVectorType>(StoreType)) 7403 return false; 7404 7405 if (!DL.typeSizeEqualsStoreSize(StoreType) || 7406 DL.getTypeSizeInBits(StoreType) == 0) 7407 return false; 7408 7409 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; 7410 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); 7411 if (!DL.typeSizeEqualsStoreSize(SplitStoreType)) 7412 return false; 7413 7414 // Don't split the store if it is volatile. 7415 if (SI.isVolatile()) 7416 return false; 7417 7418 // Match the following patterns: 7419 // (store (or (zext LValue to i64), 7420 // (shl (zext HValue to i64), 32)), HalfValBitSize) 7421 // or 7422 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) 7423 // (zext LValue to i64), 7424 // Expect both operands of OR and the first operand of SHL have only 7425 // one use. 7426 Value *LValue, *HValue; 7427 if (!match(SI.getValueOperand(), 7428 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), 7429 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), 7430 m_SpecificInt(HalfValBitSize)))))) 7431 return false; 7432 7433 // Check LValue and HValue are int with size less or equal than 32. 7434 if (!LValue->getType()->isIntegerTy() || 7435 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || 7436 !HValue->getType()->isIntegerTy() || 7437 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) 7438 return false; 7439 7440 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast 7441 // as the input of target query. 7442 auto *LBC = dyn_cast<BitCastInst>(LValue); 7443 auto *HBC = dyn_cast<BitCastInst>(HValue); 7444 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) 7445 : EVT::getEVT(LValue->getType()); 7446 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) 7447 : EVT::getEVT(HValue->getType()); 7448 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 7449 return false; 7450 7451 // Start to split store. 7452 IRBuilder<> Builder(SI.getContext()); 7453 Builder.SetInsertPoint(&SI); 7454 7455 // If LValue/HValue is a bitcast in another BB, create a new one in current 7456 // BB so it may be merged with the splitted stores by dag combiner. 7457 if (LBC && LBC->getParent() != SI.getParent()) 7458 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); 7459 if (HBC && HBC->getParent() != SI.getParent()) 7460 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); 7461 7462 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); 7463 auto CreateSplitStore = [&](Value *V, bool Upper) { 7464 V = Builder.CreateZExtOrBitCast(V, SplitStoreType); 7465 Value *Addr = Builder.CreateBitCast( 7466 SI.getOperand(1), 7467 SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); 7468 Align Alignment = SI.getAlign(); 7469 const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); 7470 if (IsOffsetStore) { 7471 Addr = Builder.CreateGEP( 7472 SplitStoreType, Addr, 7473 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); 7474 7475 // When splitting the store in half, naturally one half will retain the 7476 // alignment of the original wider store, regardless of whether it was 7477 // over-aligned or not, while the other will require adjustment. 7478 Alignment = commonAlignment(Alignment, HalfValBitSize / 8); 7479 } 7480 Builder.CreateAlignedStore(V, Addr, Alignment); 7481 }; 7482 7483 CreateSplitStore(LValue, false); 7484 CreateSplitStore(HValue, true); 7485 7486 // Delete the old store. 7487 SI.eraseFromParent(); 7488 return true; 7489 } 7490 7491 // Return true if the GEP has two operands, the first operand is of a sequential 7492 // type, and the second operand is a constant. 7493 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { 7494 gep_type_iterator I = gep_type_begin(*GEP); 7495 return GEP->getNumOperands() == 2 && 7496 I.isSequential() && 7497 isa<ConstantInt>(GEP->getOperand(1)); 7498 } 7499 7500 // Try unmerging GEPs to reduce liveness interference (register pressure) across 7501 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, 7502 // reducing liveness interference across those edges benefits global register 7503 // allocation. Currently handles only certain cases. 7504 // 7505 // For example, unmerge %GEPI and %UGEPI as below. 7506 // 7507 // ---------- BEFORE ---------- 7508 // SrcBlock: 7509 // ... 7510 // %GEPIOp = ... 7511 // ... 7512 // %GEPI = gep %GEPIOp, Idx 7513 // ... 7514 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] 7515 // (* %GEPI is alive on the indirectbr edges due to other uses ahead) 7516 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by 7517 // %UGEPI) 7518 // 7519 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) 7520 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) 7521 // ... 7522 // 7523 // DstBi: 7524 // ... 7525 // %UGEPI = gep %GEPIOp, UIdx 7526 // ... 7527 // --------------------------- 7528 // 7529 // ---------- AFTER ---------- 7530 // SrcBlock: 7531 // ... (same as above) 7532 // (* %GEPI is still alive on the indirectbr edges) 7533 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the 7534 // unmerging) 7535 // ... 7536 // 7537 // DstBi: 7538 // ... 7539 // %UGEPI = gep %GEPI, (UIdx-Idx) 7540 // ... 7541 // --------------------------- 7542 // 7543 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is 7544 // no longer alive on them. 7545 // 7546 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging 7547 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as 7548 // not to disable further simplications and optimizations as a result of GEP 7549 // merging. 7550 // 7551 // Note this unmerging may increase the length of the data flow critical path 7552 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff 7553 // between the register pressure and the length of data-flow critical 7554 // path. Restricting this to the uncommon IndirectBr case would minimize the 7555 // impact of potentially longer critical path, if any, and the impact on compile 7556 // time. 7557 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, 7558 const TargetTransformInfo *TTI) { 7559 BasicBlock *SrcBlock = GEPI->getParent(); 7560 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common 7561 // (non-IndirectBr) cases exit early here. 7562 if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) 7563 return false; 7564 // Check that GEPI is a simple gep with a single constant index. 7565 if (!GEPSequentialConstIndexed(GEPI)) 7566 return false; 7567 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); 7568 // Check that GEPI is a cheap one. 7569 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(), 7570 TargetTransformInfo::TCK_SizeAndLatency) 7571 > TargetTransformInfo::TCC_Basic) 7572 return false; 7573 Value *GEPIOp = GEPI->getOperand(0); 7574 // Check that GEPIOp is an instruction that's also defined in SrcBlock. 7575 if (!isa<Instruction>(GEPIOp)) 7576 return false; 7577 auto *GEPIOpI = cast<Instruction>(GEPIOp); 7578 if (GEPIOpI->getParent() != SrcBlock) 7579 return false; 7580 // Check that GEP is used outside the block, meaning it's alive on the 7581 // IndirectBr edge(s). 7582 if (find_if(GEPI->users(), [&](User *Usr) { 7583 if (auto *I = dyn_cast<Instruction>(Usr)) { 7584 if (I->getParent() != SrcBlock) { 7585 return true; 7586 } 7587 } 7588 return false; 7589 }) == GEPI->users().end()) 7590 return false; 7591 // The second elements of the GEP chains to be unmerged. 7592 std::vector<GetElementPtrInst *> UGEPIs; 7593 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive 7594 // on IndirectBr edges. 7595 for (User *Usr : GEPIOp->users()) { 7596 if (Usr == GEPI) continue; 7597 // Check if Usr is an Instruction. If not, give up. 7598 if (!isa<Instruction>(Usr)) 7599 return false; 7600 auto *UI = cast<Instruction>(Usr); 7601 // Check if Usr in the same block as GEPIOp, which is fine, skip. 7602 if (UI->getParent() == SrcBlock) 7603 continue; 7604 // Check if Usr is a GEP. If not, give up. 7605 if (!isa<GetElementPtrInst>(Usr)) 7606 return false; 7607 auto *UGEPI = cast<GetElementPtrInst>(Usr); 7608 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is 7609 // the pointer operand to it. If so, record it in the vector. If not, give 7610 // up. 7611 if (!GEPSequentialConstIndexed(UGEPI)) 7612 return false; 7613 if (UGEPI->getOperand(0) != GEPIOp) 7614 return false; 7615 if (GEPIIdx->getType() != 7616 cast<ConstantInt>(UGEPI->getOperand(1))->getType()) 7617 return false; 7618 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7619 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(), 7620 TargetTransformInfo::TCK_SizeAndLatency) 7621 > TargetTransformInfo::TCC_Basic) 7622 return false; 7623 UGEPIs.push_back(UGEPI); 7624 } 7625 if (UGEPIs.size() == 0) 7626 return false; 7627 // Check the materializing cost of (Uidx-Idx). 7628 for (GetElementPtrInst *UGEPI : UGEPIs) { 7629 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7630 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); 7631 unsigned ImmCost = 7632 TTI->getIntImmCost(NewIdx, GEPIIdx->getType(), 7633 TargetTransformInfo::TCK_SizeAndLatency); 7634 if (ImmCost > TargetTransformInfo::TCC_Basic) 7635 return false; 7636 } 7637 // Now unmerge between GEPI and UGEPIs. 7638 for (GetElementPtrInst *UGEPI : UGEPIs) { 7639 UGEPI->setOperand(0, GEPI); 7640 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); 7641 Constant *NewUGEPIIdx = 7642 ConstantInt::get(GEPIIdx->getType(), 7643 UGEPIIdx->getValue() - GEPIIdx->getValue()); 7644 UGEPI->setOperand(1, NewUGEPIIdx); 7645 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not 7646 // inbounds to avoid UB. 7647 if (!GEPI->isInBounds()) { 7648 UGEPI->setIsInBounds(false); 7649 } 7650 } 7651 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not 7652 // alive on IndirectBr edges). 7653 assert(find_if(GEPIOp->users(), [&](User *Usr) { 7654 return cast<Instruction>(Usr)->getParent() != SrcBlock; 7655 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock"); 7656 return true; 7657 } 7658 7659 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { 7660 // Bail out if we inserted the instruction to prevent optimizations from 7661 // stepping on each other's toes. 7662 if (InsertedInsts.count(I)) 7663 return false; 7664 7665 // TODO: Move into the switch on opcode below here. 7666 if (PHINode *P = dyn_cast<PHINode>(I)) { 7667 // It is possible for very late stage optimizations (such as SimplifyCFG) 7668 // to introduce PHI nodes too late to be cleaned up. If we detect such a 7669 // trivial PHI, go ahead and zap it here. 7670 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { 7671 LargeOffsetGEPMap.erase(P); 7672 P->replaceAllUsesWith(V); 7673 P->eraseFromParent(); 7674 ++NumPHIsElim; 7675 return true; 7676 } 7677 return false; 7678 } 7679 7680 if (CastInst *CI = dyn_cast<CastInst>(I)) { 7681 // If the source of the cast is a constant, then this should have 7682 // already been constant folded. The only reason NOT to constant fold 7683 // it is if something (e.g. LSR) was careful to place the constant 7684 // evaluation in a block other than then one that uses it (e.g. to hoist 7685 // the address of globals out of a loop). If this is the case, we don't 7686 // want to forward-subst the cast. 7687 if (isa<Constant>(CI->getOperand(0))) 7688 return false; 7689 7690 if (OptimizeNoopCopyExpression(CI, *TLI, *DL)) 7691 return true; 7692 7693 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 7694 /// Sink a zext or sext into its user blocks if the target type doesn't 7695 /// fit in one register 7696 if (TLI->getTypeAction(CI->getContext(), 7697 TLI->getValueType(*DL, CI->getType())) == 7698 TargetLowering::TypeExpandInteger) { 7699 return SinkCast(CI); 7700 } else { 7701 bool MadeChange = optimizeExt(I); 7702 return MadeChange | optimizeExtUses(I); 7703 } 7704 } 7705 return false; 7706 } 7707 7708 if (auto *Cmp = dyn_cast<CmpInst>(I)) 7709 if (optimizeCmp(Cmp, ModifiedDT)) 7710 return true; 7711 7712 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7713 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7714 bool Modified = optimizeLoadExt(LI); 7715 unsigned AS = LI->getPointerAddressSpace(); 7716 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 7717 return Modified; 7718 } 7719 7720 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 7721 if (splitMergedValStore(*SI, *DL, *TLI)) 7722 return true; 7723 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); 7724 unsigned AS = SI->getPointerAddressSpace(); 7725 return optimizeMemoryInst(I, SI->getOperand(1), 7726 SI->getOperand(0)->getType(), AS); 7727 } 7728 7729 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 7730 unsigned AS = RMW->getPointerAddressSpace(); 7731 return optimizeMemoryInst(I, RMW->getPointerOperand(), 7732 RMW->getType(), AS); 7733 } 7734 7735 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { 7736 unsigned AS = CmpX->getPointerAddressSpace(); 7737 return optimizeMemoryInst(I, CmpX->getPointerOperand(), 7738 CmpX->getCompareOperand()->getType(), AS); 7739 } 7740 7741 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 7742 7743 if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking) 7744 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); 7745 7746 // TODO: Move this into the switch on opcode - it handles shifts already. 7747 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 7748 BinOp->getOpcode() == Instruction::LShr)) { 7749 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 7750 if (CI && TLI->hasExtractBitsInsn()) 7751 if (OptimizeExtractBits(BinOp, CI, *TLI, *DL)) 7752 return true; 7753 } 7754 7755 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 7756 if (GEPI->hasAllZeroIndices()) { 7757 /// The GEP operand must be a pointer, so must its result -> BitCast 7758 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 7759 GEPI->getName(), GEPI); 7760 NC->setDebugLoc(GEPI->getDebugLoc()); 7761 GEPI->replaceAllUsesWith(NC); 7762 GEPI->eraseFromParent(); 7763 ++NumGEPsElim; 7764 optimizeInst(NC, ModifiedDT); 7765 return true; 7766 } 7767 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { 7768 return true; 7769 } 7770 return false; 7771 } 7772 7773 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 7774 // freeze(icmp a, const)) -> icmp (freeze a), const 7775 // This helps generate efficient conditional jumps. 7776 Instruction *CmpI = nullptr; 7777 if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0))) 7778 CmpI = II; 7779 else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0))) 7780 CmpI = F->getFastMathFlags().none() ? F : nullptr; 7781 7782 if (CmpI && CmpI->hasOneUse()) { 7783 auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1); 7784 bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) || 7785 isa<ConstantPointerNull>(Op0); 7786 bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) || 7787 isa<ConstantPointerNull>(Op1); 7788 if (Const0 || Const1) { 7789 if (!Const0 || !Const1) { 7790 auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI); 7791 F->takeName(FI); 7792 CmpI->setOperand(Const0 ? 1 : 0, F); 7793 } 7794 FI->replaceAllUsesWith(CmpI); 7795 FI->eraseFromParent(); 7796 return true; 7797 } 7798 } 7799 return false; 7800 } 7801 7802 if (tryToSinkFreeOperands(I)) 7803 return true; 7804 7805 switch (I->getOpcode()) { 7806 case Instruction::Shl: 7807 case Instruction::LShr: 7808 case Instruction::AShr: 7809 return optimizeShiftInst(cast<BinaryOperator>(I)); 7810 case Instruction::Call: 7811 return optimizeCallInst(cast<CallInst>(I), ModifiedDT); 7812 case Instruction::Select: 7813 return optimizeSelectInst(cast<SelectInst>(I)); 7814 case Instruction::ShuffleVector: 7815 return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I)); 7816 case Instruction::Switch: 7817 return optimizeSwitchInst(cast<SwitchInst>(I)); 7818 case Instruction::ExtractElement: 7819 return optimizeExtractElementInst(cast<ExtractElementInst>(I)); 7820 } 7821 7822 return false; 7823 } 7824 7825 /// Given an OR instruction, check to see if this is a bitreverse 7826 /// idiom. If so, insert the new intrinsic and return true. 7827 bool CodeGenPrepare::makeBitReverse(Instruction &I) { 7828 if (!I.getType()->isIntegerTy() || 7829 !TLI->isOperationLegalOrCustom(ISD::BITREVERSE, 7830 TLI->getValueType(*DL, I.getType(), true))) 7831 return false; 7832 7833 SmallVector<Instruction*, 4> Insts; 7834 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) 7835 return false; 7836 Instruction *LastInst = Insts.back(); 7837 I.replaceAllUsesWith(LastInst); 7838 RecursivelyDeleteTriviallyDeadInstructions( 7839 &I, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); }); 7840 return true; 7841 } 7842 7843 // In this pass we look for GEP and cast instructions that are used 7844 // across basic blocks and rewrite them to improve basic-block-at-a-time 7845 // selection. 7846 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { 7847 SunkAddrs.clear(); 7848 bool MadeChange = false; 7849 7850 CurInstIterator = BB.begin(); 7851 while (CurInstIterator != BB.end()) { 7852 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 7853 if (ModifiedDT) 7854 return true; 7855 } 7856 7857 bool MadeBitReverse = true; 7858 while (MadeBitReverse) { 7859 MadeBitReverse = false; 7860 for (auto &I : reverse(BB)) { 7861 if (makeBitReverse(I)) { 7862 MadeBitReverse = MadeChange = true; 7863 break; 7864 } 7865 } 7866 } 7867 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); 7868 7869 return MadeChange; 7870 } 7871 7872 // Some CGP optimizations may move or alter what's computed in a block. Check 7873 // whether a dbg.value intrinsic could be pointed at a more appropriate operand. 7874 bool CodeGenPrepare::fixupDbgValue(Instruction *I) { 7875 assert(isa<DbgValueInst>(I)); 7876 DbgValueInst &DVI = *cast<DbgValueInst>(I); 7877 7878 // Does this dbg.value refer to a sunk address calculation? 7879 Value *Location = DVI.getVariableLocationOp(0); 7880 WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; 7881 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; 7882 if (SunkAddr) { 7883 // Point dbg.value at locally computed address, which should give the best 7884 // opportunity to be accurately lowered. This update may change the type of 7885 // pointer being referred to; however this makes no difference to debugging 7886 // information, and we can't generate bitcasts that may affect codegen. 7887 DVI.replaceVariableLocationOp(Location, SunkAddr); 7888 return true; 7889 } 7890 return false; 7891 } 7892 7893 // A llvm.dbg.value may be using a value before its definition, due to 7894 // optimizations in this pass and others. Scan for such dbg.values, and rescue 7895 // them by moving the dbg.value to immediately after the value definition. 7896 // FIXME: Ideally this should never be necessary, and this has the potential 7897 // to re-order dbg.value intrinsics. 7898 bool CodeGenPrepare::placeDbgValues(Function &F) { 7899 bool MadeChange = false; 7900 DominatorTree DT(F); 7901 7902 for (BasicBlock &BB : F) { 7903 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 7904 Instruction *Insn = &*BI++; 7905 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 7906 if (!DVI) 7907 continue; 7908 7909 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 7910 7911 if (!VI || VI->isTerminator()) 7912 continue; 7913 7914 // If VI is a phi in a block with an EHPad terminator, we can't insert 7915 // after it. 7916 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 7917 continue; 7918 7919 // If the defining instruction dominates the dbg.value, we do not need 7920 // to move the dbg.value. 7921 if (DT.dominates(VI, DVI)) 7922 continue; 7923 7924 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n" 7925 << *DVI << ' ' << *VI); 7926 DVI->removeFromParent(); 7927 if (isa<PHINode>(VI)) 7928 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 7929 else 7930 DVI->insertAfter(VI); 7931 MadeChange = true; 7932 ++NumDbgValueMoved; 7933 } 7934 } 7935 return MadeChange; 7936 } 7937 7938 /// Scale down both weights to fit into uint32_t. 7939 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 7940 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 7941 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; 7942 NewTrue = NewTrue / Scale; 7943 NewFalse = NewFalse / Scale; 7944 } 7945 7946 /// Some targets prefer to split a conditional branch like: 7947 /// \code 7948 /// %0 = icmp ne i32 %a, 0 7949 /// %1 = icmp ne i32 %b, 0 7950 /// %or.cond = or i1 %0, %1 7951 /// br i1 %or.cond, label %TrueBB, label %FalseBB 7952 /// \endcode 7953 /// into multiple branch instructions like: 7954 /// \code 7955 /// bb1: 7956 /// %0 = icmp ne i32 %a, 0 7957 /// br i1 %0, label %TrueBB, label %bb2 7958 /// bb2: 7959 /// %1 = icmp ne i32 %b, 0 7960 /// br i1 %1, label %TrueBB, label %FalseBB 7961 /// \endcode 7962 /// This usually allows instruction selection to do even further optimizations 7963 /// and combine the compare with the branch instruction. Currently this is 7964 /// applied for targets which have "cheap" jump instructions. 7965 /// 7966 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 7967 /// 7968 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) { 7969 if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) 7970 return false; 7971 7972 bool MadeChange = false; 7973 for (auto &BB : F) { 7974 // Does this BB end with the following? 7975 // %cond1 = icmp|fcmp|binary instruction ... 7976 // %cond2 = icmp|fcmp|binary instruction ... 7977 // %cond.or = or|and i1 %cond1, cond2 7978 // br i1 %cond.or label %dest1, label %dest2" 7979 Instruction *LogicOp; 7980 BasicBlock *TBB, *FBB; 7981 if (!match(BB.getTerminator(), 7982 m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB))) 7983 continue; 7984 7985 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 7986 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 7987 continue; 7988 7989 // The merging of mostly empty BB can cause a degenerate branch. 7990 if (TBB == FBB) 7991 continue; 7992 7993 unsigned Opc; 7994 Value *Cond1, *Cond2; 7995 if (match(LogicOp, 7996 m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2))))) 7997 Opc = Instruction::And; 7998 else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)), 7999 m_OneUse(m_Value(Cond2))))) 8000 Opc = Instruction::Or; 8001 else 8002 continue; 8003 8004 auto IsGoodCond = [](Value *Cond) { 8005 return match( 8006 Cond, 8007 m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()), 8008 m_LogicalOr(m_Value(), m_Value())))); 8009 }; 8010 if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2)) 8011 continue; 8012 8013 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 8014 8015 // Create a new BB. 8016 auto *TmpBB = 8017 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 8018 BB.getParent(), BB.getNextNode()); 8019 8020 // Update original basic block by using the first condition directly by the 8021 // branch instruction and removing the no longer needed and/or instruction. 8022 Br1->setCondition(Cond1); 8023 LogicOp->eraseFromParent(); 8024 8025 // Depending on the condition we have to either replace the true or the 8026 // false successor of the original branch instruction. 8027 if (Opc == Instruction::And) 8028 Br1->setSuccessor(0, TmpBB); 8029 else 8030 Br1->setSuccessor(1, TmpBB); 8031 8032 // Fill in the new basic block. 8033 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 8034 if (auto *I = dyn_cast<Instruction>(Cond2)) { 8035 I->removeFromParent(); 8036 I->insertBefore(Br2); 8037 } 8038 8039 // Update PHI nodes in both successors. The original BB needs to be 8040 // replaced in one successor's PHI nodes, because the branch comes now from 8041 // the newly generated BB (NewBB). In the other successor we need to add one 8042 // incoming edge to the PHI nodes, because both branch instructions target 8043 // now the same successor. Depending on the original branch condition 8044 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 8045 // we perform the correct update for the PHI nodes. 8046 // This doesn't change the successor order of the just created branch 8047 // instruction (or any other instruction). 8048 if (Opc == Instruction::Or) 8049 std::swap(TBB, FBB); 8050 8051 // Replace the old BB with the new BB. 8052 TBB->replacePhiUsesWith(&BB, TmpBB); 8053 8054 // Add another incoming edge form the new BB. 8055 for (PHINode &PN : FBB->phis()) { 8056 auto *Val = PN.getIncomingValueForBlock(&BB); 8057 PN.addIncoming(Val, TmpBB); 8058 } 8059 8060 // Update the branch weights (from SelectionDAGBuilder:: 8061 // FindMergedConditions). 8062 if (Opc == Instruction::Or) { 8063 // Codegen X | Y as: 8064 // BB1: 8065 // jmp_if_X TBB 8066 // jmp TmpBB 8067 // TmpBB: 8068 // jmp_if_Y TBB 8069 // jmp FBB 8070 // 8071 8072 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 8073 // The requirement is that 8074 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 8075 // = TrueProb for original BB. 8076 // Assuming the original weights are A and B, one choice is to set BB1's 8077 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 8078 // assumes that 8079 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 8080 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 8081 // TmpBB, but the math is more complicated. 8082 uint64_t TrueWeight, FalseWeight; 8083 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 8084 uint64_t NewTrueWeight = TrueWeight; 8085 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 8086 scaleWeights(NewTrueWeight, NewFalseWeight); 8087 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 8088 .createBranchWeights(TrueWeight, FalseWeight)); 8089 8090 NewTrueWeight = TrueWeight; 8091 NewFalseWeight = 2 * FalseWeight; 8092 scaleWeights(NewTrueWeight, NewFalseWeight); 8093 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 8094 .createBranchWeights(TrueWeight, FalseWeight)); 8095 } 8096 } else { 8097 // Codegen X & Y as: 8098 // BB1: 8099 // jmp_if_X TmpBB 8100 // jmp FBB 8101 // TmpBB: 8102 // jmp_if_Y TBB 8103 // jmp FBB 8104 // 8105 // This requires creation of TmpBB after CurBB. 8106 8107 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 8108 // The requirement is that 8109 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 8110 // = FalseProb for original BB. 8111 // Assuming the original weights are A and B, one choice is to set BB1's 8112 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 8113 // assumes that 8114 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 8115 uint64_t TrueWeight, FalseWeight; 8116 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { 8117 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 8118 uint64_t NewFalseWeight = FalseWeight; 8119 scaleWeights(NewTrueWeight, NewFalseWeight); 8120 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 8121 .createBranchWeights(TrueWeight, FalseWeight)); 8122 8123 NewTrueWeight = 2 * TrueWeight; 8124 NewFalseWeight = FalseWeight; 8125 scaleWeights(NewTrueWeight, NewFalseWeight); 8126 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 8127 .createBranchWeights(TrueWeight, FalseWeight)); 8128 } 8129 } 8130 8131 ModifiedDT = true; 8132 MadeChange = true; 8133 8134 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 8135 TmpBB->dump()); 8136 } 8137 return MadeChange; 8138 } 8139