1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/IR/CallSite.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/GetElementPtrTypeIterator.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/MDBuilder.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/IR/Statepoint.h" 37 #include "llvm/IR/ValueHandle.h" 38 #include "llvm/IR/ValueMap.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetLowering.h" 44 #include "llvm/Target/TargetSubtargetInfo.h" 45 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 46 #include "llvm/Transforms/Utils/BuildLibCalls.h" 47 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 48 #include "llvm/Transforms/Utils/Local.h" 49 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 50 using namespace llvm; 51 using namespace llvm::PatternMatch; 52 53 #define DEBUG_TYPE "codegenprepare" 54 55 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 56 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 57 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 58 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 59 "sunken Cmps"); 60 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 61 "of sunken Casts"); 62 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 63 "computations were sunk"); 64 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 65 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 66 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 67 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 68 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 69 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 70 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 71 72 static cl::opt<bool> DisableBranchOpts( 73 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 74 cl::desc("Disable branch optimizations in CodeGenPrepare")); 75 76 static cl::opt<bool> 77 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 78 cl::desc("Disable GC optimizations in CodeGenPrepare")); 79 80 static cl::opt<bool> DisableSelectToBranch( 81 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 82 cl::desc("Disable select to branch conversion.")); 83 84 static cl::opt<bool> AddrSinkUsingGEPs( 85 "addr-sink-using-gep", cl::Hidden, cl::init(false), 86 cl::desc("Address sinking in CGP using GEPs.")); 87 88 static cl::opt<bool> EnableAndCmpSinking( 89 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 90 cl::desc("Enable sinkinig and/cmp into branches.")); 91 92 static cl::opt<bool> DisableStoreExtract( 93 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 94 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 95 96 static cl::opt<bool> StressStoreExtract( 97 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 98 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 99 100 static cl::opt<bool> DisableExtLdPromotion( 101 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 102 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 103 "CodeGenPrepare")); 104 105 static cl::opt<bool> StressExtLdPromotion( 106 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 107 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 108 "optimization in CodeGenPrepare")); 109 110 namespace { 111 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 112 struct TypeIsSExt { 113 Type *Ty; 114 bool IsSExt; 115 TypeIsSExt(Type *Ty, bool IsSExt) : Ty(Ty), IsSExt(IsSExt) {} 116 }; 117 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 118 class TypePromotionTransaction; 119 120 class CodeGenPrepare : public FunctionPass { 121 /// TLI - Keep a pointer of a TargetLowering to consult for determining 122 /// transformation profitability. 123 const TargetMachine *TM; 124 const TargetLowering *TLI; 125 const TargetTransformInfo *TTI; 126 const TargetLibraryInfo *TLInfo; 127 128 /// CurInstIterator - As we scan instructions optimizing them, this is the 129 /// next instruction to optimize. Xforms that can invalidate this should 130 /// update it. 131 BasicBlock::iterator CurInstIterator; 132 133 /// Keeps track of non-local addresses that have been sunk into a block. 134 /// This allows us to avoid inserting duplicate code for blocks with 135 /// multiple load/stores of the same address. 136 ValueMap<Value*, Value*> SunkAddrs; 137 138 /// Keeps track of all truncates inserted for the current function. 139 SetOfInstrs InsertedTruncsSet; 140 /// Keeps track of the type of the related instruction before their 141 /// promotion for the current function. 142 InstrToOrigTy PromotedInsts; 143 144 /// ModifiedDT - If CFG is modified in anyway. 145 bool ModifiedDT; 146 147 /// OptSize - True if optimizing for size. 148 bool OptSize; 149 150 public: 151 static char ID; // Pass identification, replacement for typeid 152 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 153 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr) { 154 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 155 } 156 bool runOnFunction(Function &F) override; 157 158 const char *getPassName() const override { return "CodeGen Prepare"; } 159 160 void getAnalysisUsage(AnalysisUsage &AU) const override { 161 AU.addPreserved<DominatorTreeWrapperPass>(); 162 AU.addRequired<TargetLibraryInfoWrapperPass>(); 163 AU.addRequired<TargetTransformInfoWrapperPass>(); 164 } 165 166 private: 167 bool EliminateFallThrough(Function &F); 168 bool EliminateMostlyEmptyBlocks(Function &F); 169 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 170 void EliminateMostlyEmptyBlock(BasicBlock *BB); 171 bool OptimizeBlock(BasicBlock &BB, bool& ModifiedDT); 172 bool OptimizeInst(Instruction *I, bool& ModifiedDT); 173 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 174 bool OptimizeInlineAsmInst(CallInst *CS); 175 bool OptimizeCallInst(CallInst *CI, bool& ModifiedDT); 176 bool MoveExtToFormExtLoad(Instruction *&I); 177 bool OptimizeExtUses(Instruction *I); 178 bool OptimizeSelectInst(SelectInst *SI); 179 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); 180 bool OptimizeExtractElementInst(Instruction *Inst); 181 bool DupRetToEnableTailCallOpts(BasicBlock *BB); 182 bool PlaceDbgValues(Function &F); 183 bool sinkAndCmp(Function &F); 184 bool ExtLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI, 185 Instruction *&Inst, 186 const SmallVectorImpl<Instruction *> &Exts, 187 unsigned CreatedInstCost); 188 bool splitBranchCondition(Function &F); 189 bool simplifyOffsetableRelocate(Instruction &I); 190 }; 191 } 192 193 char CodeGenPrepare::ID = 0; 194 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare", 195 "Optimize for code generation", false, false) 196 197 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 198 return new CodeGenPrepare(TM); 199 } 200 201 bool CodeGenPrepare::runOnFunction(Function &F) { 202 if (skipOptnoneFunction(F)) 203 return false; 204 205 bool EverMadeChange = false; 206 // Clear per function information. 207 InsertedTruncsSet.clear(); 208 PromotedInsts.clear(); 209 210 ModifiedDT = false; 211 if (TM) 212 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 213 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 214 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 215 OptSize = F.hasFnAttribute(Attribute::OptimizeForSize); 216 217 /// This optimization identifies DIV instructions that can be 218 /// profitably bypassed and carried out with a shorter, faster divide. 219 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 220 const DenseMap<unsigned int, unsigned int> &BypassWidths = 221 TLI->getBypassSlowDivWidths(); 222 for (Function::iterator I = F.begin(); I != F.end(); I++) 223 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 224 } 225 226 // Eliminate blocks that contain only PHI nodes and an 227 // unconditional branch. 228 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 229 230 // llvm.dbg.value is far away from the value then iSel may not be able 231 // handle it properly. iSel will drop llvm.dbg.value if it can not 232 // find a node corresponding to the value. 233 EverMadeChange |= PlaceDbgValues(F); 234 235 // If there is a mask, compare against zero, and branch that can be combined 236 // into a single target instruction, push the mask and compare into branch 237 // users. Do this before OptimizeBlock -> OptimizeInst -> 238 // OptimizeCmpExpression, which perturbs the pattern being searched for. 239 if (!DisableBranchOpts) { 240 EverMadeChange |= sinkAndCmp(F); 241 EverMadeChange |= splitBranchCondition(F); 242 } 243 244 bool MadeChange = true; 245 while (MadeChange) { 246 MadeChange = false; 247 for (Function::iterator I = F.begin(); I != F.end(); ) { 248 BasicBlock *BB = I++; 249 bool ModifiedDTOnIteration = false; 250 MadeChange |= OptimizeBlock(*BB, ModifiedDTOnIteration); 251 252 // Restart BB iteration if the dominator tree of the Function was changed 253 if (ModifiedDTOnIteration) 254 break; 255 } 256 EverMadeChange |= MadeChange; 257 } 258 259 SunkAddrs.clear(); 260 261 if (!DisableBranchOpts) { 262 MadeChange = false; 263 SmallPtrSet<BasicBlock*, 8> WorkList; 264 for (BasicBlock &BB : F) { 265 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 266 MadeChange |= ConstantFoldTerminator(&BB, true); 267 if (!MadeChange) continue; 268 269 for (SmallVectorImpl<BasicBlock*>::iterator 270 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 271 if (pred_begin(*II) == pred_end(*II)) 272 WorkList.insert(*II); 273 } 274 275 // Delete the dead blocks and any of their dead successors. 276 MadeChange |= !WorkList.empty(); 277 while (!WorkList.empty()) { 278 BasicBlock *BB = *WorkList.begin(); 279 WorkList.erase(BB); 280 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 281 282 DeleteDeadBlock(BB); 283 284 for (SmallVectorImpl<BasicBlock*>::iterator 285 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 286 if (pred_begin(*II) == pred_end(*II)) 287 WorkList.insert(*II); 288 } 289 290 // Merge pairs of basic blocks with unconditional branches, connected by 291 // a single edge. 292 if (EverMadeChange || MadeChange) 293 MadeChange |= EliminateFallThrough(F); 294 295 EverMadeChange |= MadeChange; 296 } 297 298 if (!DisableGCOpts) { 299 SmallVector<Instruction *, 2> Statepoints; 300 for (BasicBlock &BB : F) 301 for (Instruction &I : BB) 302 if (isStatepoint(I)) 303 Statepoints.push_back(&I); 304 for (auto &I : Statepoints) 305 EverMadeChange |= simplifyOffsetableRelocate(*I); 306 } 307 308 return EverMadeChange; 309 } 310 311 /// EliminateFallThrough - Merge basic blocks which are connected 312 /// by a single edge, where one of the basic blocks has a single successor 313 /// pointing to the other basic block, which has a single predecessor. 314 bool CodeGenPrepare::EliminateFallThrough(Function &F) { 315 bool Changed = false; 316 // Scan all of the blocks in the function, except for the entry block. 317 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 318 BasicBlock *BB = I++; 319 // If the destination block has a single pred, then this is a trivial 320 // edge, just collapse it. 321 BasicBlock *SinglePred = BB->getSinglePredecessor(); 322 323 // Don't merge if BB's address is taken. 324 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 325 326 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 327 if (Term && !Term->isConditional()) { 328 Changed = true; 329 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 330 // Remember if SinglePred was the entry block of the function. 331 // If so, we will need to move BB back to the entry position. 332 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 333 MergeBasicBlockIntoOnlyPred(BB, nullptr); 334 335 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 336 BB->moveBefore(&BB->getParent()->getEntryBlock()); 337 338 // We have erased a block. Update the iterator. 339 I = BB; 340 } 341 } 342 return Changed; 343 } 344 345 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 346 /// debug info directives, and an unconditional branch. Passes before isel 347 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 348 /// isel. Start by eliminating these blocks so we can split them the way we 349 /// want them. 350 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 351 bool MadeChange = false; 352 // Note that this intentionally skips the entry block. 353 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 354 BasicBlock *BB = I++; 355 356 // If this block doesn't end with an uncond branch, ignore it. 357 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 358 if (!BI || !BI->isUnconditional()) 359 continue; 360 361 // If the instruction before the branch (skipping debug info) isn't a phi 362 // node, then other stuff is happening here. 363 BasicBlock::iterator BBI = BI; 364 if (BBI != BB->begin()) { 365 --BBI; 366 while (isa<DbgInfoIntrinsic>(BBI)) { 367 if (BBI == BB->begin()) 368 break; 369 --BBI; 370 } 371 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 372 continue; 373 } 374 375 // Do not break infinite loops. 376 BasicBlock *DestBB = BI->getSuccessor(0); 377 if (DestBB == BB) 378 continue; 379 380 if (!CanMergeBlocks(BB, DestBB)) 381 continue; 382 383 EliminateMostlyEmptyBlock(BB); 384 MadeChange = true; 385 } 386 return MadeChange; 387 } 388 389 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 390 /// single uncond branch between them, and BB contains no other non-phi 391 /// instructions. 392 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 393 const BasicBlock *DestBB) const { 394 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 395 // the successor. If there are more complex condition (e.g. preheaders), 396 // don't mess around with them. 397 BasicBlock::const_iterator BBI = BB->begin(); 398 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 399 for (const User *U : PN->users()) { 400 const Instruction *UI = cast<Instruction>(U); 401 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 402 return false; 403 // If User is inside DestBB block and it is a PHINode then check 404 // incoming value. If incoming value is not from BB then this is 405 // a complex condition (e.g. preheaders) we want to avoid here. 406 if (UI->getParent() == DestBB) { 407 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 408 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 409 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 410 if (Insn && Insn->getParent() == BB && 411 Insn->getParent() != UPN->getIncomingBlock(I)) 412 return false; 413 } 414 } 415 } 416 } 417 418 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 419 // and DestBB may have conflicting incoming values for the block. If so, we 420 // can't merge the block. 421 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 422 if (!DestBBPN) return true; // no conflict. 423 424 // Collect the preds of BB. 425 SmallPtrSet<const BasicBlock*, 16> BBPreds; 426 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 427 // It is faster to get preds from a PHI than with pred_iterator. 428 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 429 BBPreds.insert(BBPN->getIncomingBlock(i)); 430 } else { 431 BBPreds.insert(pred_begin(BB), pred_end(BB)); 432 } 433 434 // Walk the preds of DestBB. 435 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 436 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 437 if (BBPreds.count(Pred)) { // Common predecessor? 438 BBI = DestBB->begin(); 439 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 440 const Value *V1 = PN->getIncomingValueForBlock(Pred); 441 const Value *V2 = PN->getIncomingValueForBlock(BB); 442 443 // If V2 is a phi node in BB, look up what the mapped value will be. 444 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 445 if (V2PN->getParent() == BB) 446 V2 = V2PN->getIncomingValueForBlock(Pred); 447 448 // If there is a conflict, bail out. 449 if (V1 != V2) return false; 450 } 451 } 452 } 453 454 return true; 455 } 456 457 458 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 459 /// an unconditional branch in it. 460 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 461 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 462 BasicBlock *DestBB = BI->getSuccessor(0); 463 464 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 465 466 // If the destination block has a single pred, then this is a trivial edge, 467 // just collapse it. 468 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 469 if (SinglePred != DestBB) { 470 // Remember if SinglePred was the entry block of the function. If so, we 471 // will need to move BB back to the entry position. 472 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 473 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 474 475 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 476 BB->moveBefore(&BB->getParent()->getEntryBlock()); 477 478 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 479 return; 480 } 481 } 482 483 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 484 // to handle the new incoming edges it is about to have. 485 PHINode *PN; 486 for (BasicBlock::iterator BBI = DestBB->begin(); 487 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 488 // Remove the incoming value for BB, and remember it. 489 Value *InVal = PN->removeIncomingValue(BB, false); 490 491 // Two options: either the InVal is a phi node defined in BB or it is some 492 // value that dominates BB. 493 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 494 if (InValPhi && InValPhi->getParent() == BB) { 495 // Add all of the input values of the input PHI as inputs of this phi. 496 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 497 PN->addIncoming(InValPhi->getIncomingValue(i), 498 InValPhi->getIncomingBlock(i)); 499 } else { 500 // Otherwise, add one instance of the dominating value for each edge that 501 // we will be adding. 502 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 503 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 504 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 505 } else { 506 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 507 PN->addIncoming(InVal, *PI); 508 } 509 } 510 } 511 512 // The PHIs are now updated, change everything that refers to BB to use 513 // DestBB and remove BB. 514 BB->replaceAllUsesWith(DestBB); 515 BB->eraseFromParent(); 516 ++NumBlocksElim; 517 518 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 519 } 520 521 // Computes a map of base pointer relocation instructions to corresponding 522 // derived pointer relocation instructions given a vector of all relocate calls 523 static void computeBaseDerivedRelocateMap( 524 const SmallVectorImpl<User *> &AllRelocateCalls, 525 DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> & 526 RelocateInstMap) { 527 // Collect information in two maps: one primarily for locating the base object 528 // while filling the second map; the second map is the final structure holding 529 // a mapping between Base and corresponding Derived relocate calls 530 DenseMap<std::pair<unsigned, unsigned>, IntrinsicInst *> RelocateIdxMap; 531 for (auto &U : AllRelocateCalls) { 532 GCRelocateOperands ThisRelocate(U); 533 IntrinsicInst *I = cast<IntrinsicInst>(U); 534 auto K = std::make_pair(ThisRelocate.getBasePtrIndex(), 535 ThisRelocate.getDerivedPtrIndex()); 536 RelocateIdxMap.insert(std::make_pair(K, I)); 537 } 538 for (auto &Item : RelocateIdxMap) { 539 std::pair<unsigned, unsigned> Key = Item.first; 540 if (Key.first == Key.second) 541 // Base relocation: nothing to insert 542 continue; 543 544 IntrinsicInst *I = Item.second; 545 auto BaseKey = std::make_pair(Key.first, Key.first); 546 547 // We're iterating over RelocateIdxMap so we cannot modify it. 548 auto MaybeBase = RelocateIdxMap.find(BaseKey); 549 if (MaybeBase == RelocateIdxMap.end()) 550 // TODO: We might want to insert a new base object relocate and gep off 551 // that, if there are enough derived object relocates. 552 continue; 553 554 RelocateInstMap[MaybeBase->second].push_back(I); 555 } 556 } 557 558 // Accepts a GEP and extracts the operands into a vector provided they're all 559 // small integer constants 560 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 561 SmallVectorImpl<Value *> &OffsetV) { 562 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 563 // Only accept small constant integer operands 564 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 565 if (!Op || Op->getZExtValue() > 20) 566 return false; 567 } 568 569 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 570 OffsetV.push_back(GEP->getOperand(i)); 571 return true; 572 } 573 574 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 575 // replace, computes a replacement, and affects it. 576 static bool 577 simplifyRelocatesOffABase(IntrinsicInst *RelocatedBase, 578 const SmallVectorImpl<IntrinsicInst *> &Targets) { 579 bool MadeChange = false; 580 for (auto &ToReplace : Targets) { 581 GCRelocateOperands MasterRelocate(RelocatedBase); 582 GCRelocateOperands ThisRelocate(ToReplace); 583 584 assert(ThisRelocate.getBasePtrIndex() == MasterRelocate.getBasePtrIndex() && 585 "Not relocating a derived object of the original base object"); 586 if (ThisRelocate.getBasePtrIndex() == ThisRelocate.getDerivedPtrIndex()) { 587 // A duplicate relocate call. TODO: coalesce duplicates. 588 continue; 589 } 590 591 Value *Base = ThisRelocate.getBasePtr(); 592 auto Derived = dyn_cast<GetElementPtrInst>(ThisRelocate.getDerivedPtr()); 593 if (!Derived || Derived->getPointerOperand() != Base) 594 continue; 595 596 SmallVector<Value *, 2> OffsetV; 597 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 598 continue; 599 600 // Create a Builder and replace the target callsite with a gep 601 assert(RelocatedBase->getNextNode() && "Should always have one since it's not a terminator"); 602 603 // Insert after RelocatedBase 604 IRBuilder<> Builder(RelocatedBase->getNextNode()); 605 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 606 607 // If gc_relocate does not match the actual type, cast it to the right type. 608 // In theory, there must be a bitcast after gc_relocate if the type does not 609 // match, and we should reuse it to get the derived pointer. But it could be 610 // cases like this: 611 // bb1: 612 // ... 613 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 614 // br label %merge 615 // 616 // bb2: 617 // ... 618 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 619 // br label %merge 620 // 621 // merge: 622 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 623 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 624 // 625 // In this case, we can not find the bitcast any more. So we insert a new bitcast 626 // no matter there is already one or not. In this way, we can handle all cases, and 627 // the extra bitcast should be optimized away in later passes. 628 Instruction *ActualRelocatedBase = RelocatedBase; 629 if (RelocatedBase->getType() != Base->getType()) { 630 ActualRelocatedBase = 631 cast<Instruction>(Builder.CreateBitCast(RelocatedBase, Base->getType())); 632 } 633 Value *Replacement = Builder.CreateGEP( 634 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 635 Instruction *ReplacementInst = cast<Instruction>(Replacement); 636 Replacement->takeName(ToReplace); 637 // If the newly generated derived pointer's type does not match the original derived 638 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 639 Instruction *ActualReplacement = ReplacementInst; 640 if (ReplacementInst->getType() != ToReplace->getType()) { 641 ActualReplacement = 642 cast<Instruction>(Builder.CreateBitCast(ReplacementInst, ToReplace->getType())); 643 } 644 ToReplace->replaceAllUsesWith(ActualReplacement); 645 ToReplace->eraseFromParent(); 646 647 MadeChange = true; 648 } 649 return MadeChange; 650 } 651 652 // Turns this: 653 // 654 // %base = ... 655 // %ptr = gep %base + 15 656 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 657 // %base' = relocate(%tok, i32 4, i32 4) 658 // %ptr' = relocate(%tok, i32 4, i32 5) 659 // %val = load %ptr' 660 // 661 // into this: 662 // 663 // %base = ... 664 // %ptr = gep %base + 15 665 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 666 // %base' = gc.relocate(%tok, i32 4, i32 4) 667 // %ptr' = gep %base' + 15 668 // %val = load %ptr' 669 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 670 bool MadeChange = false; 671 SmallVector<User *, 2> AllRelocateCalls; 672 673 for (auto *U : I.users()) 674 if (isGCRelocate(dyn_cast<Instruction>(U))) 675 // Collect all the relocate calls associated with a statepoint 676 AllRelocateCalls.push_back(U); 677 678 // We need atleast one base pointer relocation + one derived pointer 679 // relocation to mangle 680 if (AllRelocateCalls.size() < 2) 681 return false; 682 683 // RelocateInstMap is a mapping from the base relocate instruction to the 684 // corresponding derived relocate instructions 685 DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> RelocateInstMap; 686 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 687 if (RelocateInstMap.empty()) 688 return false; 689 690 for (auto &Item : RelocateInstMap) 691 // Item.first is the RelocatedBase to offset against 692 // Item.second is the vector of Targets to replace 693 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 694 return MadeChange; 695 } 696 697 /// SinkCast - Sink the specified cast instruction into its user blocks 698 static bool SinkCast(CastInst *CI) { 699 BasicBlock *DefBB = CI->getParent(); 700 701 /// InsertedCasts - Only insert a cast in each block once. 702 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 703 704 bool MadeChange = false; 705 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 706 UI != E; ) { 707 Use &TheUse = UI.getUse(); 708 Instruction *User = cast<Instruction>(*UI); 709 710 // Figure out which BB this cast is used in. For PHI's this is the 711 // appropriate predecessor block. 712 BasicBlock *UserBB = User->getParent(); 713 if (PHINode *PN = dyn_cast<PHINode>(User)) { 714 UserBB = PN->getIncomingBlock(TheUse); 715 } 716 717 // Preincrement use iterator so we don't invalidate it. 718 ++UI; 719 720 // If this user is in the same block as the cast, don't change the cast. 721 if (UserBB == DefBB) continue; 722 723 // If we have already inserted a cast into this block, use it. 724 CastInst *&InsertedCast = InsertedCasts[UserBB]; 725 726 if (!InsertedCast) { 727 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 728 InsertedCast = 729 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 730 InsertPt); 731 } 732 733 // Replace a use of the cast with a use of the new cast. 734 TheUse = InsertedCast; 735 MadeChange = true; 736 ++NumCastUses; 737 } 738 739 // If we removed all uses, nuke the cast. 740 if (CI->use_empty()) { 741 CI->eraseFromParent(); 742 MadeChange = true; 743 } 744 745 return MadeChange; 746 } 747 748 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 749 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 750 /// sink it into user blocks to reduce the number of virtual 751 /// registers that must be created and coalesced. 752 /// 753 /// Return true if any changes are made. 754 /// 755 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 756 // If this is a noop copy, 757 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 758 EVT DstVT = TLI.getValueType(CI->getType()); 759 760 // This is an fp<->int conversion? 761 if (SrcVT.isInteger() != DstVT.isInteger()) 762 return false; 763 764 // If this is an extension, it will be a zero or sign extension, which 765 // isn't a noop. 766 if (SrcVT.bitsLT(DstVT)) return false; 767 768 // If these values will be promoted, find out what they will be promoted 769 // to. This helps us consider truncates on PPC as noop copies when they 770 // are. 771 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 772 TargetLowering::TypePromoteInteger) 773 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 774 if (TLI.getTypeAction(CI->getContext(), DstVT) == 775 TargetLowering::TypePromoteInteger) 776 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 777 778 // If, after promotion, these are the same types, this is a noop copy. 779 if (SrcVT != DstVT) 780 return false; 781 782 return SinkCast(CI); 783 } 784 785 /// CombineUAddWithOverflow - try to combine CI into a call to the 786 /// llvm.uadd.with.overflow intrinsic if possible. 787 /// 788 /// Return true if any changes were made. 789 static bool CombineUAddWithOverflow(CmpInst *CI) { 790 Value *A, *B; 791 Instruction *AddI; 792 if (!match(CI, 793 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 794 return false; 795 796 Type *Ty = AddI->getType(); 797 if (!isa<IntegerType>(Ty)) 798 return false; 799 800 // We don't want to move around uses of condition values this late, so we we 801 // check if it is legal to create the call to the intrinsic in the basic 802 // block containing the icmp: 803 804 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 805 return false; 806 807 #ifndef NDEBUG 808 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 809 // for now: 810 if (AddI->hasOneUse()) 811 assert(*AddI->user_begin() == CI && "expected!"); 812 #endif 813 814 Module *M = CI->getParent()->getParent()->getParent(); 815 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 816 817 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 818 819 auto *UAddWithOverflow = 820 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 821 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 822 auto *Overflow = 823 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 824 825 CI->replaceAllUsesWith(Overflow); 826 AddI->replaceAllUsesWith(UAdd); 827 CI->eraseFromParent(); 828 AddI->eraseFromParent(); 829 return true; 830 } 831 832 /// SinkCmpExpression - Sink the given CmpInst into user blocks to reduce 833 /// the number of virtual registers that must be created and coalesced. This is 834 /// a clear win except on targets with multiple condition code registers 835 /// (PowerPC), where it might lose; some adjustment may be wanted there. 836 /// 837 /// Return true if any changes are made. 838 static bool SinkCmpExpression(CmpInst *CI) { 839 BasicBlock *DefBB = CI->getParent(); 840 841 /// InsertedCmp - Only insert a cmp in each block once. 842 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 843 844 bool MadeChange = false; 845 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 846 UI != E; ) { 847 Use &TheUse = UI.getUse(); 848 Instruction *User = cast<Instruction>(*UI); 849 850 // Preincrement use iterator so we don't invalidate it. 851 ++UI; 852 853 // Don't bother for PHI nodes. 854 if (isa<PHINode>(User)) 855 continue; 856 857 // Figure out which BB this cmp is used in. 858 BasicBlock *UserBB = User->getParent(); 859 860 // If this user is in the same block as the cmp, don't change the cmp. 861 if (UserBB == DefBB) continue; 862 863 // If we have already inserted a cmp into this block, use it. 864 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 865 866 if (!InsertedCmp) { 867 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 868 InsertedCmp = 869 CmpInst::Create(CI->getOpcode(), 870 CI->getPredicate(), CI->getOperand(0), 871 CI->getOperand(1), "", InsertPt); 872 } 873 874 // Replace a use of the cmp with a use of the new cmp. 875 TheUse = InsertedCmp; 876 MadeChange = true; 877 ++NumCmpUses; 878 } 879 880 // If we removed all uses, nuke the cmp. 881 if (CI->use_empty()) { 882 CI->eraseFromParent(); 883 MadeChange = true; 884 } 885 886 return MadeChange; 887 } 888 889 static bool OptimizeCmpExpression(CmpInst *CI) { 890 if (SinkCmpExpression(CI)) 891 return true; 892 893 if (CombineUAddWithOverflow(CI)) 894 return true; 895 896 return false; 897 } 898 899 /// isExtractBitsCandidateUse - Check if the candidates could 900 /// be combined with shift instruction, which includes: 901 /// 1. Truncate instruction 902 /// 2. And instruction and the imm is a mask of the low bits: 903 /// imm & (imm+1) == 0 904 static bool isExtractBitsCandidateUse(Instruction *User) { 905 if (!isa<TruncInst>(User)) { 906 if (User->getOpcode() != Instruction::And || 907 !isa<ConstantInt>(User->getOperand(1))) 908 return false; 909 910 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 911 912 if ((Cimm & (Cimm + 1)).getBoolValue()) 913 return false; 914 } 915 return true; 916 } 917 918 /// SinkShiftAndTruncate - sink both shift and truncate instruction 919 /// to the use of truncate's BB. 920 static bool 921 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 922 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 923 const TargetLowering &TLI) { 924 BasicBlock *UserBB = User->getParent(); 925 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 926 TruncInst *TruncI = dyn_cast<TruncInst>(User); 927 bool MadeChange = false; 928 929 for (Value::user_iterator TruncUI = TruncI->user_begin(), 930 TruncE = TruncI->user_end(); 931 TruncUI != TruncE;) { 932 933 Use &TruncTheUse = TruncUI.getUse(); 934 Instruction *TruncUser = cast<Instruction>(*TruncUI); 935 // Preincrement use iterator so we don't invalidate it. 936 937 ++TruncUI; 938 939 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 940 if (!ISDOpcode) 941 continue; 942 943 // If the use is actually a legal node, there will not be an 944 // implicit truncate. 945 // FIXME: always querying the result type is just an 946 // approximation; some nodes' legality is determined by the 947 // operand or other means. There's no good way to find out though. 948 if (TLI.isOperationLegalOrCustom( 949 ISDOpcode, TLI.getValueType(TruncUser->getType(), true))) 950 continue; 951 952 // Don't bother for PHI nodes. 953 if (isa<PHINode>(TruncUser)) 954 continue; 955 956 BasicBlock *TruncUserBB = TruncUser->getParent(); 957 958 if (UserBB == TruncUserBB) 959 continue; 960 961 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 962 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 963 964 if (!InsertedShift && !InsertedTrunc) { 965 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 966 // Sink the shift 967 if (ShiftI->getOpcode() == Instruction::AShr) 968 InsertedShift = 969 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 970 else 971 InsertedShift = 972 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 973 974 // Sink the trunc 975 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 976 TruncInsertPt++; 977 978 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 979 TruncI->getType(), "", TruncInsertPt); 980 981 MadeChange = true; 982 983 TruncTheUse = InsertedTrunc; 984 } 985 } 986 return MadeChange; 987 } 988 989 /// OptimizeExtractBits - sink the shift *right* instruction into user blocks if 990 /// the uses could potentially be combined with this shift instruction and 991 /// generate BitExtract instruction. It will only be applied if the architecture 992 /// supports BitExtract instruction. Here is an example: 993 /// BB1: 994 /// %x.extract.shift = lshr i64 %arg1, 32 995 /// BB2: 996 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 997 /// ==> 998 /// 999 /// BB2: 1000 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1001 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1002 /// 1003 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1004 /// instruction. 1005 /// Return true if any changes are made. 1006 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1007 const TargetLowering &TLI) { 1008 BasicBlock *DefBB = ShiftI->getParent(); 1009 1010 /// Only insert instructions in each block once. 1011 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1012 1013 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType())); 1014 1015 bool MadeChange = false; 1016 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1017 UI != E;) { 1018 Use &TheUse = UI.getUse(); 1019 Instruction *User = cast<Instruction>(*UI); 1020 // Preincrement use iterator so we don't invalidate it. 1021 ++UI; 1022 1023 // Don't bother for PHI nodes. 1024 if (isa<PHINode>(User)) 1025 continue; 1026 1027 if (!isExtractBitsCandidateUse(User)) 1028 continue; 1029 1030 BasicBlock *UserBB = User->getParent(); 1031 1032 if (UserBB == DefBB) { 1033 // If the shift and truncate instruction are in the same BB. The use of 1034 // the truncate(TruncUse) may still introduce another truncate if not 1035 // legal. In this case, we would like to sink both shift and truncate 1036 // instruction to the BB of TruncUse. 1037 // for example: 1038 // BB1: 1039 // i64 shift.result = lshr i64 opnd, imm 1040 // trunc.result = trunc shift.result to i16 1041 // 1042 // BB2: 1043 // ----> We will have an implicit truncate here if the architecture does 1044 // not have i16 compare. 1045 // cmp i16 trunc.result, opnd2 1046 // 1047 if (isa<TruncInst>(User) && shiftIsLegal 1048 // If the type of the truncate is legal, no trucate will be 1049 // introduced in other basic blocks. 1050 && (!TLI.isTypeLegal(TLI.getValueType(User->getType())))) 1051 MadeChange = 1052 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI); 1053 1054 continue; 1055 } 1056 // If we have already inserted a shift into this block, use it. 1057 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1058 1059 if (!InsertedShift) { 1060 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1061 1062 if (ShiftI->getOpcode() == Instruction::AShr) 1063 InsertedShift = 1064 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 1065 else 1066 InsertedShift = 1067 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 1068 1069 MadeChange = true; 1070 } 1071 1072 // Replace a use of the shift with a use of the new shift. 1073 TheUse = InsertedShift; 1074 } 1075 1076 // If we removed all uses, nuke the shift. 1077 if (ShiftI->use_empty()) 1078 ShiftI->eraseFromParent(); 1079 1080 return MadeChange; 1081 } 1082 1083 // ScalarizeMaskedLoad() translates masked load intrinsic, like 1084 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1085 // <16 x i1> %mask, <16 x i32> %passthru) 1086 // to a chain of basic blocks, whith loading element one-by-one if 1087 // the appropriate mask bit is set 1088 // 1089 // %1 = bitcast i8* %addr to i32* 1090 // %2 = extractelement <16 x i1> %mask, i32 0 1091 // %3 = icmp eq i1 %2, true 1092 // br i1 %3, label %cond.load, label %else 1093 // 1094 //cond.load: ; preds = %0 1095 // %4 = getelementptr i32* %1, i32 0 1096 // %5 = load i32* %4 1097 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1098 // br label %else 1099 // 1100 //else: ; preds = %0, %cond.load 1101 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1102 // %7 = extractelement <16 x i1> %mask, i32 1 1103 // %8 = icmp eq i1 %7, true 1104 // br i1 %8, label %cond.load1, label %else2 1105 // 1106 //cond.load1: ; preds = %else 1107 // %9 = getelementptr i32* %1, i32 1 1108 // %10 = load i32* %9 1109 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1110 // br label %else2 1111 // 1112 //else2: ; preds = %else, %cond.load1 1113 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1114 // %12 = extractelement <16 x i1> %mask, i32 2 1115 // %13 = icmp eq i1 %12, true 1116 // br i1 %13, label %cond.load4, label %else5 1117 // 1118 static void ScalarizeMaskedLoad(CallInst *CI) { 1119 Value *Ptr = CI->getArgOperand(0); 1120 Value *Src0 = CI->getArgOperand(3); 1121 Value *Mask = CI->getArgOperand(2); 1122 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1123 Type *EltTy = VecType->getElementType(); 1124 1125 assert(VecType && "Unexpected return type of masked load intrinsic"); 1126 1127 IRBuilder<> Builder(CI->getContext()); 1128 Instruction *InsertPt = CI; 1129 BasicBlock *IfBlock = CI->getParent(); 1130 BasicBlock *CondBlock = nullptr; 1131 BasicBlock *PrevIfBlock = CI->getParent(); 1132 Builder.SetInsertPoint(InsertPt); 1133 1134 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1135 1136 // Bitcast %addr fron i8* to EltTy* 1137 Type *NewPtrType = 1138 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1139 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1140 Value *UndefVal = UndefValue::get(VecType); 1141 1142 // The result vector 1143 Value *VResult = UndefVal; 1144 1145 PHINode *Phi = nullptr; 1146 Value *PrevPhi = UndefVal; 1147 1148 unsigned VectorWidth = VecType->getNumElements(); 1149 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1150 1151 // Fill the "else" block, created in the previous iteration 1152 // 1153 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1154 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1155 // %to_load = icmp eq i1 %mask_1, true 1156 // br i1 %to_load, label %cond.load, label %else 1157 // 1158 if (Idx > 0) { 1159 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1160 Phi->addIncoming(VResult, CondBlock); 1161 Phi->addIncoming(PrevPhi, PrevIfBlock); 1162 PrevPhi = Phi; 1163 VResult = Phi; 1164 } 1165 1166 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1167 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1168 ConstantInt::get(Predicate->getType(), 1)); 1169 1170 // Create "cond" block 1171 // 1172 // %EltAddr = getelementptr i32* %1, i32 0 1173 // %Elt = load i32* %EltAddr 1174 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1175 // 1176 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); 1177 Builder.SetInsertPoint(InsertPt); 1178 1179 Value *Gep = 1180 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1181 LoadInst* Load = Builder.CreateLoad(Gep, false); 1182 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1183 1184 // Create "else" block, fill it in the next iteration 1185 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1186 Builder.SetInsertPoint(InsertPt); 1187 Instruction *OldBr = IfBlock->getTerminator(); 1188 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1189 OldBr->eraseFromParent(); 1190 PrevIfBlock = IfBlock; 1191 IfBlock = NewIfBlock; 1192 } 1193 1194 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1195 Phi->addIncoming(VResult, CondBlock); 1196 Phi->addIncoming(PrevPhi, PrevIfBlock); 1197 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1198 CI->replaceAllUsesWith(NewI); 1199 CI->eraseFromParent(); 1200 } 1201 1202 // ScalarizeMaskedStore() translates masked store intrinsic, like 1203 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1204 // <16 x i1> %mask) 1205 // to a chain of basic blocks, that stores element one-by-one if 1206 // the appropriate mask bit is set 1207 // 1208 // %1 = bitcast i8* %addr to i32* 1209 // %2 = extractelement <16 x i1> %mask, i32 0 1210 // %3 = icmp eq i1 %2, true 1211 // br i1 %3, label %cond.store, label %else 1212 // 1213 // cond.store: ; preds = %0 1214 // %4 = extractelement <16 x i32> %val, i32 0 1215 // %5 = getelementptr i32* %1, i32 0 1216 // store i32 %4, i32* %5 1217 // br label %else 1218 // 1219 // else: ; preds = %0, %cond.store 1220 // %6 = extractelement <16 x i1> %mask, i32 1 1221 // %7 = icmp eq i1 %6, true 1222 // br i1 %7, label %cond.store1, label %else2 1223 // 1224 // cond.store1: ; preds = %else 1225 // %8 = extractelement <16 x i32> %val, i32 1 1226 // %9 = getelementptr i32* %1, i32 1 1227 // store i32 %8, i32* %9 1228 // br label %else2 1229 // . . . 1230 static void ScalarizeMaskedStore(CallInst *CI) { 1231 Value *Ptr = CI->getArgOperand(1); 1232 Value *Src = CI->getArgOperand(0); 1233 Value *Mask = CI->getArgOperand(3); 1234 1235 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1236 Type *EltTy = VecType->getElementType(); 1237 1238 assert(VecType && "Unexpected data type in masked store intrinsic"); 1239 1240 IRBuilder<> Builder(CI->getContext()); 1241 Instruction *InsertPt = CI; 1242 BasicBlock *IfBlock = CI->getParent(); 1243 Builder.SetInsertPoint(InsertPt); 1244 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1245 1246 // Bitcast %addr fron i8* to EltTy* 1247 Type *NewPtrType = 1248 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1249 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1250 1251 unsigned VectorWidth = VecType->getNumElements(); 1252 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1253 1254 // Fill the "else" block, created in the previous iteration 1255 // 1256 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1257 // %to_store = icmp eq i1 %mask_1, true 1258 // br i1 %to_load, label %cond.store, label %else 1259 // 1260 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1261 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1262 ConstantInt::get(Predicate->getType(), 1)); 1263 1264 // Create "cond" block 1265 // 1266 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1267 // %EltAddr = getelementptr i32* %1, i32 0 1268 // %store i32 %OneElt, i32* %EltAddr 1269 // 1270 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); 1271 Builder.SetInsertPoint(InsertPt); 1272 1273 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1274 Value *Gep = 1275 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1276 Builder.CreateStore(OneElt, Gep); 1277 1278 // Create "else" block, fill it in the next iteration 1279 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1280 Builder.SetInsertPoint(InsertPt); 1281 Instruction *OldBr = IfBlock->getTerminator(); 1282 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1283 OldBr->eraseFromParent(); 1284 IfBlock = NewIfBlock; 1285 } 1286 CI->eraseFromParent(); 1287 } 1288 1289 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) { 1290 BasicBlock *BB = CI->getParent(); 1291 1292 // Lower inline assembly if we can. 1293 // If we found an inline asm expession, and if the target knows how to 1294 // lower it to normal LLVM code, do so now. 1295 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1296 if (TLI->ExpandInlineAsm(CI)) { 1297 // Avoid invalidating the iterator. 1298 CurInstIterator = BB->begin(); 1299 // Avoid processing instructions out of order, which could cause 1300 // reuse before a value is defined. 1301 SunkAddrs.clear(); 1302 return true; 1303 } 1304 // Sink address computing for memory operands into the block. 1305 if (OptimizeInlineAsmInst(CI)) 1306 return true; 1307 } 1308 1309 const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; 1310 1311 // Align the pointer arguments to this call if the target thinks it's a good 1312 // idea 1313 unsigned MinSize, PrefAlign; 1314 if (TLI && TD && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1315 for (auto &Arg : CI->arg_operands()) { 1316 // We want to align both objects whose address is used directly and 1317 // objects whose address is used in casts and GEPs, though it only makes 1318 // sense for GEPs if the offset is a multiple of the desired alignment and 1319 // if size - offset meets the size threshold. 1320 if (!Arg->getType()->isPointerTy()) 1321 continue; 1322 APInt Offset(TD->getPointerSizeInBits( 1323 cast<PointerType>(Arg->getType())->getAddressSpace()), 0); 1324 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*TD, Offset); 1325 uint64_t Offset2 = Offset.getLimitedValue(); 1326 if ((Offset2 & (PrefAlign-1)) != 0) 1327 continue; 1328 AllocaInst *AI; 1329 if ((AI = dyn_cast<AllocaInst>(Val)) && 1330 AI->getAlignment() < PrefAlign && 1331 TD->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1332 AI->setAlignment(PrefAlign); 1333 // Global variables can only be aligned if they are defined in this 1334 // object (i.e. they are uniquely initialized in this object), and 1335 // over-aligning global variables that have an explicit section is 1336 // forbidden. 1337 GlobalVariable *GV; 1338 if ((GV = dyn_cast<GlobalVariable>(Val)) && 1339 GV->hasUniqueInitializer() && 1340 !GV->hasSection() && 1341 GV->getAlignment() < PrefAlign && 1342 TD->getTypeAllocSize( 1343 GV->getType()->getElementType()) >= MinSize + Offset2) 1344 GV->setAlignment(PrefAlign); 1345 } 1346 // If this is a memcpy (or similar) then we may be able to improve the 1347 // alignment 1348 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1349 unsigned Align = getKnownAlignment(MI->getDest(), *TD); 1350 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 1351 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *TD)); 1352 if (Align > MI->getAlignment()) 1353 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 1354 } 1355 } 1356 1357 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1358 if (II) { 1359 switch (II->getIntrinsicID()) { 1360 default: break; 1361 case Intrinsic::objectsize: { 1362 // Lower all uses of llvm.objectsize.* 1363 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 1364 Type *ReturnTy = CI->getType(); 1365 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 1366 1367 // Substituting this can cause recursive simplifications, which can 1368 // invalidate our iterator. Use a WeakVH to hold onto it in case this 1369 // happens. 1370 WeakVH IterHandle(CurInstIterator); 1371 1372 replaceAndRecursivelySimplify(CI, RetVal, 1373 TLInfo, nullptr); 1374 1375 // If the iterator instruction was recursively deleted, start over at the 1376 // start of the block. 1377 if (IterHandle != CurInstIterator) { 1378 CurInstIterator = BB->begin(); 1379 SunkAddrs.clear(); 1380 } 1381 return true; 1382 } 1383 case Intrinsic::masked_load: { 1384 // Scalarize unsupported vector masked load 1385 if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) { 1386 ScalarizeMaskedLoad(CI); 1387 ModifiedDT = true; 1388 return true; 1389 } 1390 return false; 1391 } 1392 case Intrinsic::masked_store: { 1393 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) { 1394 ScalarizeMaskedStore(CI); 1395 ModifiedDT = true; 1396 return true; 1397 } 1398 return false; 1399 } 1400 } 1401 1402 if (TLI) { 1403 SmallVector<Value*, 2> PtrOps; 1404 Type *AccessTy; 1405 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) 1406 while (!PtrOps.empty()) 1407 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) 1408 return true; 1409 } 1410 } 1411 1412 // From here on out we're working with named functions. 1413 if (!CI->getCalledFunction()) return false; 1414 1415 // Lower all default uses of _chk calls. This is very similar 1416 // to what InstCombineCalls does, but here we are only lowering calls 1417 // to fortified library functions (e.g. __memcpy_chk) that have the default 1418 // "don't know" as the objectsize. Anything else should be left alone. 1419 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1420 if (Value *V = Simplifier.optimizeCall(CI)) { 1421 CI->replaceAllUsesWith(V); 1422 CI->eraseFromParent(); 1423 return true; 1424 } 1425 return false; 1426 } 1427 1428 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 1429 /// instructions to the predecessor to enable tail call optimizations. The 1430 /// case it is currently looking for is: 1431 /// @code 1432 /// bb0: 1433 /// %tmp0 = tail call i32 @f0() 1434 /// br label %return 1435 /// bb1: 1436 /// %tmp1 = tail call i32 @f1() 1437 /// br label %return 1438 /// bb2: 1439 /// %tmp2 = tail call i32 @f2() 1440 /// br label %return 1441 /// return: 1442 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1443 /// ret i32 %retval 1444 /// @endcode 1445 /// 1446 /// => 1447 /// 1448 /// @code 1449 /// bb0: 1450 /// %tmp0 = tail call i32 @f0() 1451 /// ret i32 %tmp0 1452 /// bb1: 1453 /// %tmp1 = tail call i32 @f1() 1454 /// ret i32 %tmp1 1455 /// bb2: 1456 /// %tmp2 = tail call i32 @f2() 1457 /// ret i32 %tmp2 1458 /// @endcode 1459 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { 1460 if (!TLI) 1461 return false; 1462 1463 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 1464 if (!RI) 1465 return false; 1466 1467 PHINode *PN = nullptr; 1468 BitCastInst *BCI = nullptr; 1469 Value *V = RI->getReturnValue(); 1470 if (V) { 1471 BCI = dyn_cast<BitCastInst>(V); 1472 if (BCI) 1473 V = BCI->getOperand(0); 1474 1475 PN = dyn_cast<PHINode>(V); 1476 if (!PN) 1477 return false; 1478 } 1479 1480 if (PN && PN->getParent() != BB) 1481 return false; 1482 1483 // It's not safe to eliminate the sign / zero extension of the return value. 1484 // See llvm::isInTailCallPosition(). 1485 const Function *F = BB->getParent(); 1486 AttributeSet CallerAttrs = F->getAttributes(); 1487 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 1488 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 1489 return false; 1490 1491 // Make sure there are no instructions between the PHI and return, or that the 1492 // return is the first instruction in the block. 1493 if (PN) { 1494 BasicBlock::iterator BI = BB->begin(); 1495 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1496 if (&*BI == BCI) 1497 // Also skip over the bitcast. 1498 ++BI; 1499 if (&*BI != RI) 1500 return false; 1501 } else { 1502 BasicBlock::iterator BI = BB->begin(); 1503 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1504 if (&*BI != RI) 1505 return false; 1506 } 1507 1508 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1509 /// call. 1510 SmallVector<CallInst*, 4> TailCalls; 1511 if (PN) { 1512 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1513 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1514 // Make sure the phi value is indeed produced by the tail call. 1515 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1516 TLI->mayBeEmittedAsTailCall(CI)) 1517 TailCalls.push_back(CI); 1518 } 1519 } else { 1520 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1521 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1522 if (!VisitedBBs.insert(*PI).second) 1523 continue; 1524 1525 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1526 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1527 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1528 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1529 if (RI == RE) 1530 continue; 1531 1532 CallInst *CI = dyn_cast<CallInst>(&*RI); 1533 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1534 TailCalls.push_back(CI); 1535 } 1536 } 1537 1538 bool Changed = false; 1539 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1540 CallInst *CI = TailCalls[i]; 1541 CallSite CS(CI); 1542 1543 // Conservatively require the attributes of the call to match those of the 1544 // return. Ignore noalias because it doesn't affect the call sequence. 1545 AttributeSet CalleeAttrs = CS.getAttributes(); 1546 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1547 removeAttribute(Attribute::NoAlias) != 1548 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1549 removeAttribute(Attribute::NoAlias)) 1550 continue; 1551 1552 // Make sure the call instruction is followed by an unconditional branch to 1553 // the return block. 1554 BasicBlock *CallBB = CI->getParent(); 1555 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1556 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1557 continue; 1558 1559 // Duplicate the return into CallBB. 1560 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 1561 ModifiedDT = Changed = true; 1562 ++NumRetsDup; 1563 } 1564 1565 // If we eliminated all predecessors of the block, delete the block now. 1566 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 1567 BB->eraseFromParent(); 1568 1569 return Changed; 1570 } 1571 1572 //===----------------------------------------------------------------------===// 1573 // Memory Optimization 1574 //===----------------------------------------------------------------------===// 1575 1576 namespace { 1577 1578 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode 1579 /// which holds actual Value*'s for register values. 1580 struct ExtAddrMode : public TargetLowering::AddrMode { 1581 Value *BaseReg; 1582 Value *ScaledReg; 1583 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 1584 void print(raw_ostream &OS) const; 1585 void dump() const; 1586 1587 bool operator==(const ExtAddrMode& O) const { 1588 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 1589 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 1590 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 1591 } 1592 }; 1593 1594 #ifndef NDEBUG 1595 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 1596 AM.print(OS); 1597 return OS; 1598 } 1599 #endif 1600 1601 void ExtAddrMode::print(raw_ostream &OS) const { 1602 bool NeedPlus = false; 1603 OS << "["; 1604 if (BaseGV) { 1605 OS << (NeedPlus ? " + " : "") 1606 << "GV:"; 1607 BaseGV->printAsOperand(OS, /*PrintType=*/false); 1608 NeedPlus = true; 1609 } 1610 1611 if (BaseOffs) { 1612 OS << (NeedPlus ? " + " : "") 1613 << BaseOffs; 1614 NeedPlus = true; 1615 } 1616 1617 if (BaseReg) { 1618 OS << (NeedPlus ? " + " : "") 1619 << "Base:"; 1620 BaseReg->printAsOperand(OS, /*PrintType=*/false); 1621 NeedPlus = true; 1622 } 1623 if (Scale) { 1624 OS << (NeedPlus ? " + " : "") 1625 << Scale << "*"; 1626 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 1627 } 1628 1629 OS << ']'; 1630 } 1631 1632 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1633 void ExtAddrMode::dump() const { 1634 print(dbgs()); 1635 dbgs() << '\n'; 1636 } 1637 #endif 1638 1639 /// \brief This class provides transaction based operation on the IR. 1640 /// Every change made through this class is recorded in the internal state and 1641 /// can be undone (rollback) until commit is called. 1642 class TypePromotionTransaction { 1643 1644 /// \brief This represents the common interface of the individual transaction. 1645 /// Each class implements the logic for doing one specific modification on 1646 /// the IR via the TypePromotionTransaction. 1647 class TypePromotionAction { 1648 protected: 1649 /// The Instruction modified. 1650 Instruction *Inst; 1651 1652 public: 1653 /// \brief Constructor of the action. 1654 /// The constructor performs the related action on the IR. 1655 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 1656 1657 virtual ~TypePromotionAction() {} 1658 1659 /// \brief Undo the modification done by this action. 1660 /// When this method is called, the IR must be in the same state as it was 1661 /// before this action was applied. 1662 /// \pre Undoing the action works if and only if the IR is in the exact same 1663 /// state as it was directly after this action was applied. 1664 virtual void undo() = 0; 1665 1666 /// \brief Advocate every change made by this action. 1667 /// When the results on the IR of the action are to be kept, it is important 1668 /// to call this function, otherwise hidden information may be kept forever. 1669 virtual void commit() { 1670 // Nothing to be done, this action is not doing anything. 1671 } 1672 }; 1673 1674 /// \brief Utility to remember the position of an instruction. 1675 class InsertionHandler { 1676 /// Position of an instruction. 1677 /// Either an instruction: 1678 /// - Is the first in a basic block: BB is used. 1679 /// - Has a previous instructon: PrevInst is used. 1680 union { 1681 Instruction *PrevInst; 1682 BasicBlock *BB; 1683 } Point; 1684 /// Remember whether or not the instruction had a previous instruction. 1685 bool HasPrevInstruction; 1686 1687 public: 1688 /// \brief Record the position of \p Inst. 1689 InsertionHandler(Instruction *Inst) { 1690 BasicBlock::iterator It = Inst; 1691 HasPrevInstruction = (It != (Inst->getParent()->begin())); 1692 if (HasPrevInstruction) 1693 Point.PrevInst = --It; 1694 else 1695 Point.BB = Inst->getParent(); 1696 } 1697 1698 /// \brief Insert \p Inst at the recorded position. 1699 void insert(Instruction *Inst) { 1700 if (HasPrevInstruction) { 1701 if (Inst->getParent()) 1702 Inst->removeFromParent(); 1703 Inst->insertAfter(Point.PrevInst); 1704 } else { 1705 Instruction *Position = Point.BB->getFirstInsertionPt(); 1706 if (Inst->getParent()) 1707 Inst->moveBefore(Position); 1708 else 1709 Inst->insertBefore(Position); 1710 } 1711 } 1712 }; 1713 1714 /// \brief Move an instruction before another. 1715 class InstructionMoveBefore : public TypePromotionAction { 1716 /// Original position of the instruction. 1717 InsertionHandler Position; 1718 1719 public: 1720 /// \brief Move \p Inst before \p Before. 1721 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 1722 : TypePromotionAction(Inst), Position(Inst) { 1723 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 1724 Inst->moveBefore(Before); 1725 } 1726 1727 /// \brief Move the instruction back to its original position. 1728 void undo() override { 1729 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 1730 Position.insert(Inst); 1731 } 1732 }; 1733 1734 /// \brief Set the operand of an instruction with a new value. 1735 class OperandSetter : public TypePromotionAction { 1736 /// Original operand of the instruction. 1737 Value *Origin; 1738 /// Index of the modified instruction. 1739 unsigned Idx; 1740 1741 public: 1742 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 1743 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 1744 : TypePromotionAction(Inst), Idx(Idx) { 1745 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 1746 << "for:" << *Inst << "\n" 1747 << "with:" << *NewVal << "\n"); 1748 Origin = Inst->getOperand(Idx); 1749 Inst->setOperand(Idx, NewVal); 1750 } 1751 1752 /// \brief Restore the original value of the instruction. 1753 void undo() override { 1754 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 1755 << "for: " << *Inst << "\n" 1756 << "with: " << *Origin << "\n"); 1757 Inst->setOperand(Idx, Origin); 1758 } 1759 }; 1760 1761 /// \brief Hide the operands of an instruction. 1762 /// Do as if this instruction was not using any of its operands. 1763 class OperandsHider : public TypePromotionAction { 1764 /// The list of original operands. 1765 SmallVector<Value *, 4> OriginalValues; 1766 1767 public: 1768 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 1769 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 1770 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 1771 unsigned NumOpnds = Inst->getNumOperands(); 1772 OriginalValues.reserve(NumOpnds); 1773 for (unsigned It = 0; It < NumOpnds; ++It) { 1774 // Save the current operand. 1775 Value *Val = Inst->getOperand(It); 1776 OriginalValues.push_back(Val); 1777 // Set a dummy one. 1778 // We could use OperandSetter here, but that would implied an overhead 1779 // that we are not willing to pay. 1780 Inst->setOperand(It, UndefValue::get(Val->getType())); 1781 } 1782 } 1783 1784 /// \brief Restore the original list of uses. 1785 void undo() override { 1786 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 1787 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 1788 Inst->setOperand(It, OriginalValues[It]); 1789 } 1790 }; 1791 1792 /// \brief Build a truncate instruction. 1793 class TruncBuilder : public TypePromotionAction { 1794 Value *Val; 1795 public: 1796 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 1797 /// result. 1798 /// trunc Opnd to Ty. 1799 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 1800 IRBuilder<> Builder(Opnd); 1801 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 1802 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 1803 } 1804 1805 /// \brief Get the built value. 1806 Value *getBuiltValue() { return Val; } 1807 1808 /// \brief Remove the built instruction. 1809 void undo() override { 1810 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 1811 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1812 IVal->eraseFromParent(); 1813 } 1814 }; 1815 1816 /// \brief Build a sign extension instruction. 1817 class SExtBuilder : public TypePromotionAction { 1818 Value *Val; 1819 public: 1820 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 1821 /// result. 1822 /// sext Opnd to Ty. 1823 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1824 : TypePromotionAction(InsertPt) { 1825 IRBuilder<> Builder(InsertPt); 1826 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 1827 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 1828 } 1829 1830 /// \brief Get the built value. 1831 Value *getBuiltValue() { return Val; } 1832 1833 /// \brief Remove the built instruction. 1834 void undo() override { 1835 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 1836 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1837 IVal->eraseFromParent(); 1838 } 1839 }; 1840 1841 /// \brief Build a zero extension instruction. 1842 class ZExtBuilder : public TypePromotionAction { 1843 Value *Val; 1844 public: 1845 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 1846 /// result. 1847 /// zext Opnd to Ty. 1848 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1849 : TypePromotionAction(InsertPt) { 1850 IRBuilder<> Builder(InsertPt); 1851 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 1852 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 1853 } 1854 1855 /// \brief Get the built value. 1856 Value *getBuiltValue() { return Val; } 1857 1858 /// \brief Remove the built instruction. 1859 void undo() override { 1860 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 1861 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1862 IVal->eraseFromParent(); 1863 } 1864 }; 1865 1866 /// \brief Mutate an instruction to another type. 1867 class TypeMutator : public TypePromotionAction { 1868 /// Record the original type. 1869 Type *OrigTy; 1870 1871 public: 1872 /// \brief Mutate the type of \p Inst into \p NewTy. 1873 TypeMutator(Instruction *Inst, Type *NewTy) 1874 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 1875 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 1876 << "\n"); 1877 Inst->mutateType(NewTy); 1878 } 1879 1880 /// \brief Mutate the instruction back to its original type. 1881 void undo() override { 1882 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 1883 << "\n"); 1884 Inst->mutateType(OrigTy); 1885 } 1886 }; 1887 1888 /// \brief Replace the uses of an instruction by another instruction. 1889 class UsesReplacer : public TypePromotionAction { 1890 /// Helper structure to keep track of the replaced uses. 1891 struct InstructionAndIdx { 1892 /// The instruction using the instruction. 1893 Instruction *Inst; 1894 /// The index where this instruction is used for Inst. 1895 unsigned Idx; 1896 InstructionAndIdx(Instruction *Inst, unsigned Idx) 1897 : Inst(Inst), Idx(Idx) {} 1898 }; 1899 1900 /// Keep track of the original uses (pair Instruction, Index). 1901 SmallVector<InstructionAndIdx, 4> OriginalUses; 1902 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 1903 1904 public: 1905 /// \brief Replace all the use of \p Inst by \p New. 1906 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 1907 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 1908 << "\n"); 1909 // Record the original uses. 1910 for (Use &U : Inst->uses()) { 1911 Instruction *UserI = cast<Instruction>(U.getUser()); 1912 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 1913 } 1914 // Now, we can replace the uses. 1915 Inst->replaceAllUsesWith(New); 1916 } 1917 1918 /// \brief Reassign the original uses of Inst to Inst. 1919 void undo() override { 1920 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 1921 for (use_iterator UseIt = OriginalUses.begin(), 1922 EndIt = OriginalUses.end(); 1923 UseIt != EndIt; ++UseIt) { 1924 UseIt->Inst->setOperand(UseIt->Idx, Inst); 1925 } 1926 } 1927 }; 1928 1929 /// \brief Remove an instruction from the IR. 1930 class InstructionRemover : public TypePromotionAction { 1931 /// Original position of the instruction. 1932 InsertionHandler Inserter; 1933 /// Helper structure to hide all the link to the instruction. In other 1934 /// words, this helps to do as if the instruction was removed. 1935 OperandsHider Hider; 1936 /// Keep track of the uses replaced, if any. 1937 UsesReplacer *Replacer; 1938 1939 public: 1940 /// \brief Remove all reference of \p Inst and optinally replace all its 1941 /// uses with New. 1942 /// \pre If !Inst->use_empty(), then New != nullptr 1943 InstructionRemover(Instruction *Inst, Value *New = nullptr) 1944 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 1945 Replacer(nullptr) { 1946 if (New) 1947 Replacer = new UsesReplacer(Inst, New); 1948 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 1949 Inst->removeFromParent(); 1950 } 1951 1952 ~InstructionRemover() override { delete Replacer; } 1953 1954 /// \brief Really remove the instruction. 1955 void commit() override { delete Inst; } 1956 1957 /// \brief Resurrect the instruction and reassign it to the proper uses if 1958 /// new value was provided when build this action. 1959 void undo() override { 1960 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 1961 Inserter.insert(Inst); 1962 if (Replacer) 1963 Replacer->undo(); 1964 Hider.undo(); 1965 } 1966 }; 1967 1968 public: 1969 /// Restoration point. 1970 /// The restoration point is a pointer to an action instead of an iterator 1971 /// because the iterator may be invalidated but not the pointer. 1972 typedef const TypePromotionAction *ConstRestorationPt; 1973 /// Advocate every changes made in that transaction. 1974 void commit(); 1975 /// Undo all the changes made after the given point. 1976 void rollback(ConstRestorationPt Point); 1977 /// Get the current restoration point. 1978 ConstRestorationPt getRestorationPoint() const; 1979 1980 /// \name API for IR modification with state keeping to support rollback. 1981 /// @{ 1982 /// Same as Instruction::setOperand. 1983 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 1984 /// Same as Instruction::eraseFromParent. 1985 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 1986 /// Same as Value::replaceAllUsesWith. 1987 void replaceAllUsesWith(Instruction *Inst, Value *New); 1988 /// Same as Value::mutateType. 1989 void mutateType(Instruction *Inst, Type *NewTy); 1990 /// Same as IRBuilder::createTrunc. 1991 Value *createTrunc(Instruction *Opnd, Type *Ty); 1992 /// Same as IRBuilder::createSExt. 1993 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 1994 /// Same as IRBuilder::createZExt. 1995 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 1996 /// Same as Instruction::moveBefore. 1997 void moveBefore(Instruction *Inst, Instruction *Before); 1998 /// @} 1999 2000 private: 2001 /// The ordered list of actions made so far. 2002 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2003 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2004 }; 2005 2006 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2007 Value *NewVal) { 2008 Actions.push_back( 2009 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2010 } 2011 2012 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2013 Value *NewVal) { 2014 Actions.push_back( 2015 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 2016 } 2017 2018 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2019 Value *New) { 2020 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2021 } 2022 2023 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2024 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2025 } 2026 2027 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2028 Type *Ty) { 2029 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2030 Value *Val = Ptr->getBuiltValue(); 2031 Actions.push_back(std::move(Ptr)); 2032 return Val; 2033 } 2034 2035 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2036 Value *Opnd, Type *Ty) { 2037 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2038 Value *Val = Ptr->getBuiltValue(); 2039 Actions.push_back(std::move(Ptr)); 2040 return Val; 2041 } 2042 2043 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2044 Value *Opnd, Type *Ty) { 2045 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2046 Value *Val = Ptr->getBuiltValue(); 2047 Actions.push_back(std::move(Ptr)); 2048 return Val; 2049 } 2050 2051 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2052 Instruction *Before) { 2053 Actions.push_back( 2054 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2055 } 2056 2057 TypePromotionTransaction::ConstRestorationPt 2058 TypePromotionTransaction::getRestorationPoint() const { 2059 return !Actions.empty() ? Actions.back().get() : nullptr; 2060 } 2061 2062 void TypePromotionTransaction::commit() { 2063 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2064 ++It) 2065 (*It)->commit(); 2066 Actions.clear(); 2067 } 2068 2069 void TypePromotionTransaction::rollback( 2070 TypePromotionTransaction::ConstRestorationPt Point) { 2071 while (!Actions.empty() && Point != Actions.back().get()) { 2072 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2073 Curr->undo(); 2074 } 2075 } 2076 2077 /// \brief A helper class for matching addressing modes. 2078 /// 2079 /// This encapsulates the logic for matching the target-legal addressing modes. 2080 class AddressingModeMatcher { 2081 SmallVectorImpl<Instruction*> &AddrModeInsts; 2082 const TargetMachine &TM; 2083 const TargetLowering &TLI; 2084 2085 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2086 /// the memory instruction that we're computing this address for. 2087 Type *AccessTy; 2088 Instruction *MemoryInst; 2089 2090 /// AddrMode - This is the addressing mode that we're building up. This is 2091 /// part of the return value of this addressing mode matching stuff. 2092 ExtAddrMode &AddrMode; 2093 2094 /// The truncate instruction inserted by other CodeGenPrepare optimizations. 2095 const SetOfInstrs &InsertedTruncs; 2096 /// A map from the instructions to their type before promotion. 2097 InstrToOrigTy &PromotedInsts; 2098 /// The ongoing transaction where every action should be registered. 2099 TypePromotionTransaction &TPT; 2100 2101 /// IgnoreProfitability - This is set to true when we should not do 2102 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode 2103 /// always returns true. 2104 bool IgnoreProfitability; 2105 2106 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 2107 const TargetMachine &TM, Type *AT, Instruction *MI, 2108 ExtAddrMode &AM, const SetOfInstrs &InsertedTruncs, 2109 InstrToOrigTy &PromotedInsts, 2110 TypePromotionTransaction &TPT) 2111 : AddrModeInsts(AMI), TM(TM), 2112 TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent()) 2113 ->getTargetLowering()), 2114 AccessTy(AT), MemoryInst(MI), AddrMode(AM), 2115 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { 2116 IgnoreProfitability = false; 2117 } 2118 public: 2119 2120 /// Match - Find the maximal addressing mode that a load/store of V can fold, 2121 /// give an access type of AccessTy. This returns a list of involved 2122 /// instructions in AddrModeInsts. 2123 /// \p InsertedTruncs The truncate instruction inserted by other 2124 /// CodeGenPrepare 2125 /// optimizations. 2126 /// \p PromotedInsts maps the instructions to their type before promotion. 2127 /// \p The ongoing transaction where every action should be registered. 2128 static ExtAddrMode Match(Value *V, Type *AccessTy, 2129 Instruction *MemoryInst, 2130 SmallVectorImpl<Instruction*> &AddrModeInsts, 2131 const TargetMachine &TM, 2132 const SetOfInstrs &InsertedTruncs, 2133 InstrToOrigTy &PromotedInsts, 2134 TypePromotionTransaction &TPT) { 2135 ExtAddrMode Result; 2136 2137 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, 2138 MemoryInst, Result, InsertedTruncs, 2139 PromotedInsts, TPT).MatchAddr(V, 0); 2140 (void)Success; assert(Success && "Couldn't select *anything*?"); 2141 return Result; 2142 } 2143 private: 2144 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2145 bool MatchAddr(Value *V, unsigned Depth); 2146 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 2147 bool *MovedAway = nullptr); 2148 bool IsProfitableToFoldIntoAddressingMode(Instruction *I, 2149 ExtAddrMode &AMBefore, 2150 ExtAddrMode &AMAfter); 2151 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2152 bool IsPromotionProfitable(unsigned NewCost, unsigned OldCost, 2153 Value *PromotedOperand) const; 2154 }; 2155 2156 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. 2157 /// Return true and update AddrMode if this addr mode is legal for the target, 2158 /// false if not. 2159 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, 2160 unsigned Depth) { 2161 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 2162 // mode. Just process that directly. 2163 if (Scale == 1) 2164 return MatchAddr(ScaleReg, Depth); 2165 2166 // If the scale is 0, it takes nothing to add this. 2167 if (Scale == 0) 2168 return true; 2169 2170 // If we already have a scale of this value, we can add to it, otherwise, we 2171 // need an available scale field. 2172 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 2173 return false; 2174 2175 ExtAddrMode TestAddrMode = AddrMode; 2176 2177 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 2178 // [A+B + A*7] -> [B+A*8]. 2179 TestAddrMode.Scale += Scale; 2180 TestAddrMode.ScaledReg = ScaleReg; 2181 2182 // If the new address isn't legal, bail out. 2183 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) 2184 return false; 2185 2186 // It was legal, so commit it. 2187 AddrMode = TestAddrMode; 2188 2189 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 2190 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 2191 // X*Scale + C*Scale to addr mode. 2192 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 2193 if (isa<Instruction>(ScaleReg) && // not a constant expr. 2194 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 2195 TestAddrMode.ScaledReg = AddLHS; 2196 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 2197 2198 // If this addressing mode is legal, commit it and remember that we folded 2199 // this instruction. 2200 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { 2201 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 2202 AddrMode = TestAddrMode; 2203 return true; 2204 } 2205 } 2206 2207 // Otherwise, not (x+c)*scale, just return what we have. 2208 return true; 2209 } 2210 2211 /// MightBeFoldableInst - This is a little filter, which returns true if an 2212 /// addressing computation involving I might be folded into a load/store 2213 /// accessing it. This doesn't need to be perfect, but needs to accept at least 2214 /// the set of instructions that MatchOperationAddr can. 2215 static bool MightBeFoldableInst(Instruction *I) { 2216 switch (I->getOpcode()) { 2217 case Instruction::BitCast: 2218 case Instruction::AddrSpaceCast: 2219 // Don't touch identity bitcasts. 2220 if (I->getType() == I->getOperand(0)->getType()) 2221 return false; 2222 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 2223 case Instruction::PtrToInt: 2224 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2225 return true; 2226 case Instruction::IntToPtr: 2227 // We know the input is intptr_t, so this is foldable. 2228 return true; 2229 case Instruction::Add: 2230 return true; 2231 case Instruction::Mul: 2232 case Instruction::Shl: 2233 // Can only handle X*C and X << C. 2234 return isa<ConstantInt>(I->getOperand(1)); 2235 case Instruction::GetElementPtr: 2236 return true; 2237 default: 2238 return false; 2239 } 2240 } 2241 2242 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 2243 /// \note \p Val is assumed to be the product of some type promotion. 2244 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 2245 /// to be legal, as the non-promoted value would have had the same state. 2246 static bool isPromotedInstructionLegal(const TargetLowering &TLI, Value *Val) { 2247 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 2248 if (!PromotedInst) 2249 return false; 2250 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 2251 // If the ISDOpcode is undefined, it was undefined before the promotion. 2252 if (!ISDOpcode) 2253 return true; 2254 // Otherwise, check if the promoted instruction is legal or not. 2255 return TLI.isOperationLegalOrCustom( 2256 ISDOpcode, TLI.getValueType(PromotedInst->getType())); 2257 } 2258 2259 /// \brief Hepler class to perform type promotion. 2260 class TypePromotionHelper { 2261 /// \brief Utility function to check whether or not a sign or zero extension 2262 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 2263 /// either using the operands of \p Inst or promoting \p Inst. 2264 /// The type of the extension is defined by \p IsSExt. 2265 /// In other words, check if: 2266 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 2267 /// #1 Promotion applies: 2268 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 2269 /// #2 Operand reuses: 2270 /// ext opnd1 to ConsideredExtType. 2271 /// \p PromotedInsts maps the instructions to their type before promotion. 2272 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 2273 const InstrToOrigTy &PromotedInsts, bool IsSExt); 2274 2275 /// \brief Utility function to determine if \p OpIdx should be promoted when 2276 /// promoting \p Inst. 2277 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 2278 if (isa<SelectInst>(Inst) && OpIdx == 0) 2279 return false; 2280 return true; 2281 } 2282 2283 /// \brief Utility function to promote the operand of \p Ext when this 2284 /// operand is a promotable trunc or sext or zext. 2285 /// \p PromotedInsts maps the instructions to their type before promotion. 2286 /// \p CreatedInstsCost[out] contains the cost of all instructions 2287 /// created to promote the operand of Ext. 2288 /// Newly added extensions are inserted in \p Exts. 2289 /// Newly added truncates are inserted in \p Truncs. 2290 /// Should never be called directly. 2291 /// \return The promoted value which is used instead of Ext. 2292 static Value *promoteOperandForTruncAndAnyExt( 2293 Instruction *Ext, TypePromotionTransaction &TPT, 2294 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2295 SmallVectorImpl<Instruction *> *Exts, 2296 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 2297 2298 /// \brief Utility function to promote the operand of \p Ext when this 2299 /// operand is promotable and is not a supported trunc or sext. 2300 /// \p PromotedInsts maps the instructions to their type before promotion. 2301 /// \p CreatedInstsCost[out] contains the cost of all the instructions 2302 /// created to promote the operand of Ext. 2303 /// Newly added extensions are inserted in \p Exts. 2304 /// Newly added truncates are inserted in \p Truncs. 2305 /// Should never be called directly. 2306 /// \return The promoted value which is used instead of Ext. 2307 static Value *promoteOperandForOther(Instruction *Ext, 2308 TypePromotionTransaction &TPT, 2309 InstrToOrigTy &PromotedInsts, 2310 unsigned &CreatedInstsCost, 2311 SmallVectorImpl<Instruction *> *Exts, 2312 SmallVectorImpl<Instruction *> *Truncs, 2313 const TargetLowering &TLI, bool IsSExt); 2314 2315 /// \see promoteOperandForOther. 2316 static Value *signExtendOperandForOther( 2317 Instruction *Ext, TypePromotionTransaction &TPT, 2318 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2319 SmallVectorImpl<Instruction *> *Exts, 2320 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2321 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2322 Exts, Truncs, TLI, true); 2323 } 2324 2325 /// \see promoteOperandForOther. 2326 static Value *zeroExtendOperandForOther( 2327 Instruction *Ext, TypePromotionTransaction &TPT, 2328 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2329 SmallVectorImpl<Instruction *> *Exts, 2330 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2331 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2332 Exts, Truncs, TLI, false); 2333 } 2334 2335 public: 2336 /// Type for the utility function that promotes the operand of Ext. 2337 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 2338 InstrToOrigTy &PromotedInsts, 2339 unsigned &CreatedInstsCost, 2340 SmallVectorImpl<Instruction *> *Exts, 2341 SmallVectorImpl<Instruction *> *Truncs, 2342 const TargetLowering &TLI); 2343 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 2344 /// action to promote the operand of \p Ext instead of using Ext. 2345 /// \return NULL if no promotable action is possible with the current 2346 /// sign extension. 2347 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by 2348 /// the others CodeGenPrepare optimizations. This information is important 2349 /// because we do not want to promote these instructions as CodeGenPrepare 2350 /// will reinsert them later. Thus creating an infinite loop: create/remove. 2351 /// \p PromotedInsts maps the instructions to their type before promotion. 2352 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedTruncs, 2353 const TargetLowering &TLI, 2354 const InstrToOrigTy &PromotedInsts); 2355 }; 2356 2357 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 2358 Type *ConsideredExtType, 2359 const InstrToOrigTy &PromotedInsts, 2360 bool IsSExt) { 2361 // The promotion helper does not know how to deal with vector types yet. 2362 // To be able to fix that, we would need to fix the places where we 2363 // statically extend, e.g., constants and such. 2364 if (Inst->getType()->isVectorTy()) 2365 return false; 2366 2367 // We can always get through zext. 2368 if (isa<ZExtInst>(Inst)) 2369 return true; 2370 2371 // sext(sext) is ok too. 2372 if (IsSExt && isa<SExtInst>(Inst)) 2373 return true; 2374 2375 // We can get through binary operator, if it is legal. In other words, the 2376 // binary operator must have a nuw or nsw flag. 2377 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 2378 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 2379 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 2380 (IsSExt && BinOp->hasNoSignedWrap()))) 2381 return true; 2382 2383 // Check if we can do the following simplification. 2384 // ext(trunc(opnd)) --> ext(opnd) 2385 if (!isa<TruncInst>(Inst)) 2386 return false; 2387 2388 Value *OpndVal = Inst->getOperand(0); 2389 // Check if we can use this operand in the extension. 2390 // If the type is larger than the result type of the extension, 2391 // we cannot. 2392 if (!OpndVal->getType()->isIntegerTy() || 2393 OpndVal->getType()->getIntegerBitWidth() > 2394 ConsideredExtType->getIntegerBitWidth()) 2395 return false; 2396 2397 // If the operand of the truncate is not an instruction, we will not have 2398 // any information on the dropped bits. 2399 // (Actually we could for constant but it is not worth the extra logic). 2400 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 2401 if (!Opnd) 2402 return false; 2403 2404 // Check if the source of the type is narrow enough. 2405 // I.e., check that trunc just drops extended bits of the same kind of 2406 // the extension. 2407 // #1 get the type of the operand and check the kind of the extended bits. 2408 const Type *OpndType; 2409 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 2410 if (It != PromotedInsts.end() && It->second.IsSExt == IsSExt) 2411 OpndType = It->second.Ty; 2412 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 2413 OpndType = Opnd->getOperand(0)->getType(); 2414 else 2415 return false; 2416 2417 // #2 check that the truncate just drop extended bits. 2418 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 2419 return true; 2420 2421 return false; 2422 } 2423 2424 TypePromotionHelper::Action TypePromotionHelper::getAction( 2425 Instruction *Ext, const SetOfInstrs &InsertedTruncs, 2426 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 2427 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 2428 "Unexpected instruction type"); 2429 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 2430 Type *ExtTy = Ext->getType(); 2431 bool IsSExt = isa<SExtInst>(Ext); 2432 // If the operand of the extension is not an instruction, we cannot 2433 // get through. 2434 // If it, check we can get through. 2435 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 2436 return nullptr; 2437 2438 // Do not promote if the operand has been added by codegenprepare. 2439 // Otherwise, it means we are undoing an optimization that is likely to be 2440 // redone, thus causing potential infinite loop. 2441 if (isa<TruncInst>(ExtOpnd) && InsertedTruncs.count(ExtOpnd)) 2442 return nullptr; 2443 2444 // SExt or Trunc instructions. 2445 // Return the related handler. 2446 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 2447 isa<ZExtInst>(ExtOpnd)) 2448 return promoteOperandForTruncAndAnyExt; 2449 2450 // Regular instruction. 2451 // Abort early if we will have to insert non-free instructions. 2452 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 2453 return nullptr; 2454 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 2455 } 2456 2457 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 2458 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 2459 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2460 SmallVectorImpl<Instruction *> *Exts, 2461 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2462 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2463 // get through it and this method should not be called. 2464 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2465 Value *ExtVal = SExt; 2466 bool HasMergedNonFreeExt = false; 2467 if (isa<ZExtInst>(SExtOpnd)) { 2468 // Replace s|zext(zext(opnd)) 2469 // => zext(opnd). 2470 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 2471 Value *ZExt = 2472 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 2473 TPT.replaceAllUsesWith(SExt, ZExt); 2474 TPT.eraseInstruction(SExt); 2475 ExtVal = ZExt; 2476 } else { 2477 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 2478 // => z|sext(opnd). 2479 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 2480 } 2481 CreatedInstsCost = 0; 2482 2483 // Remove dead code. 2484 if (SExtOpnd->use_empty()) 2485 TPT.eraseInstruction(SExtOpnd); 2486 2487 // Check if the extension is still needed. 2488 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 2489 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 2490 if (ExtInst) { 2491 if (Exts) 2492 Exts->push_back(ExtInst); 2493 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 2494 } 2495 return ExtVal; 2496 } 2497 2498 // At this point we have: ext ty opnd to ty. 2499 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 2500 Value *NextVal = ExtInst->getOperand(0); 2501 TPT.eraseInstruction(ExtInst, NextVal); 2502 return NextVal; 2503 } 2504 2505 Value *TypePromotionHelper::promoteOperandForOther( 2506 Instruction *Ext, TypePromotionTransaction &TPT, 2507 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2508 SmallVectorImpl<Instruction *> *Exts, 2509 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 2510 bool IsSExt) { 2511 // By construction, the operand of Ext is an instruction. Otherwise we cannot 2512 // get through it and this method should not be called. 2513 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 2514 CreatedInstsCost = 0; 2515 if (!ExtOpnd->hasOneUse()) { 2516 // ExtOpnd will be promoted. 2517 // All its uses, but Ext, will need to use a truncated value of the 2518 // promoted version. 2519 // Create the truncate now. 2520 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 2521 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 2522 ITrunc->removeFromParent(); 2523 // Insert it just after the definition. 2524 ITrunc->insertAfter(ExtOpnd); 2525 if (Truncs) 2526 Truncs->push_back(ITrunc); 2527 } 2528 2529 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 2530 // Restore the operand of Ext (which has been replace by the previous call 2531 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 2532 TPT.setOperand(Ext, 0, ExtOpnd); 2533 } 2534 2535 // Get through the Instruction: 2536 // 1. Update its type. 2537 // 2. Replace the uses of Ext by Inst. 2538 // 3. Extend each operand that needs to be extended. 2539 2540 // Remember the original type of the instruction before promotion. 2541 // This is useful to know that the high bits are sign extended bits. 2542 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 2543 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 2544 // Step #1. 2545 TPT.mutateType(ExtOpnd, Ext->getType()); 2546 // Step #2. 2547 TPT.replaceAllUsesWith(Ext, ExtOpnd); 2548 // Step #3. 2549 Instruction *ExtForOpnd = Ext; 2550 2551 DEBUG(dbgs() << "Propagate Ext to operands\n"); 2552 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 2553 ++OpIdx) { 2554 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 2555 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 2556 !shouldExtOperand(ExtOpnd, OpIdx)) { 2557 DEBUG(dbgs() << "No need to propagate\n"); 2558 continue; 2559 } 2560 // Check if we can statically extend the operand. 2561 Value *Opnd = ExtOpnd->getOperand(OpIdx); 2562 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 2563 DEBUG(dbgs() << "Statically extend\n"); 2564 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 2565 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 2566 : Cst->getValue().zext(BitWidth); 2567 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 2568 continue; 2569 } 2570 // UndefValue are typed, so we have to statically sign extend them. 2571 if (isa<UndefValue>(Opnd)) { 2572 DEBUG(dbgs() << "Statically extend\n"); 2573 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 2574 continue; 2575 } 2576 2577 // Otherwise we have to explicity sign extend the operand. 2578 // Check if Ext was reused to extend an operand. 2579 if (!ExtForOpnd) { 2580 // If yes, create a new one. 2581 DEBUG(dbgs() << "More operands to ext\n"); 2582 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 2583 : TPT.createZExt(Ext, Opnd, Ext->getType()); 2584 if (!isa<Instruction>(ValForExtOpnd)) { 2585 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 2586 continue; 2587 } 2588 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 2589 } 2590 if (Exts) 2591 Exts->push_back(ExtForOpnd); 2592 TPT.setOperand(ExtForOpnd, 0, Opnd); 2593 2594 // Move the sign extension before the insertion point. 2595 TPT.moveBefore(ExtForOpnd, ExtOpnd); 2596 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 2597 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 2598 // If more sext are required, new instructions will have to be created. 2599 ExtForOpnd = nullptr; 2600 } 2601 if (ExtForOpnd == Ext) { 2602 DEBUG(dbgs() << "Extension is useless now\n"); 2603 TPT.eraseInstruction(Ext); 2604 } 2605 return ExtOpnd; 2606 } 2607 2608 /// IsPromotionProfitable - Check whether or not promoting an instruction 2609 /// to a wider type was profitable. 2610 /// \p NewCost gives the cost of extension instructions created by the 2611 /// promotion. 2612 /// \p OldCost gives the cost of extension instructions before the promotion 2613 /// plus the number of instructions that have been 2614 /// matched in the addressing mode the promotion. 2615 /// \p PromotedOperand is the value that has been promoted. 2616 /// \return True if the promotion is profitable, false otherwise. 2617 bool AddressingModeMatcher::IsPromotionProfitable( 2618 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 2619 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 2620 // The cost of the new extensions is greater than the cost of the 2621 // old extension plus what we folded. 2622 // This is not profitable. 2623 if (NewCost > OldCost) 2624 return false; 2625 if (NewCost < OldCost) 2626 return true; 2627 // The promotion is neutral but it may help folding the sign extension in 2628 // loads for instance. 2629 // Check that we did not create an illegal instruction. 2630 return isPromotedInstructionLegal(TLI, PromotedOperand); 2631 } 2632 2633 /// MatchOperationAddr - Given an instruction or constant expr, see if we can 2634 /// fold the operation into the addressing mode. If so, update the addressing 2635 /// mode and return true, otherwise return false without modifying AddrMode. 2636 /// If \p MovedAway is not NULL, it contains the information of whether or 2637 /// not AddrInst has to be folded into the addressing mode on success. 2638 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 2639 /// because it has been moved away. 2640 /// Thus AddrInst must not be added in the matched instructions. 2641 /// This state can happen when AddrInst is a sext, since it may be moved away. 2642 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 2643 /// not be referenced anymore. 2644 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, 2645 unsigned Depth, 2646 bool *MovedAway) { 2647 // Avoid exponential behavior on extremely deep expression trees. 2648 if (Depth >= 5) return false; 2649 2650 // By default, all matched instructions stay in place. 2651 if (MovedAway) 2652 *MovedAway = false; 2653 2654 switch (Opcode) { 2655 case Instruction::PtrToInt: 2656 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2657 return MatchAddr(AddrInst->getOperand(0), Depth); 2658 case Instruction::IntToPtr: 2659 // This inttoptr is a no-op if the integer type is pointer sized. 2660 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 2661 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) 2662 return MatchAddr(AddrInst->getOperand(0), Depth); 2663 return false; 2664 case Instruction::BitCast: 2665 case Instruction::AddrSpaceCast: 2666 // BitCast is always a noop, and we can handle it as long as it is 2667 // int->int or pointer->pointer (we don't want int<->fp or something). 2668 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 2669 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 2670 // Don't touch identity bitcasts. These were probably put here by LSR, 2671 // and we don't want to mess around with them. Assume it knows what it 2672 // is doing. 2673 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 2674 return MatchAddr(AddrInst->getOperand(0), Depth); 2675 return false; 2676 case Instruction::Add: { 2677 // Check to see if we can merge in the RHS then the LHS. If so, we win. 2678 ExtAddrMode BackupAddrMode = AddrMode; 2679 unsigned OldSize = AddrModeInsts.size(); 2680 // Start a transaction at this point. 2681 // The LHS may match but not the RHS. 2682 // Therefore, we need a higher level restoration point to undo partially 2683 // matched operation. 2684 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2685 TPT.getRestorationPoint(); 2686 2687 if (MatchAddr(AddrInst->getOperand(1), Depth+1) && 2688 MatchAddr(AddrInst->getOperand(0), Depth+1)) 2689 return true; 2690 2691 // Restore the old addr mode info. 2692 AddrMode = BackupAddrMode; 2693 AddrModeInsts.resize(OldSize); 2694 TPT.rollback(LastKnownGood); 2695 2696 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 2697 if (MatchAddr(AddrInst->getOperand(0), Depth+1) && 2698 MatchAddr(AddrInst->getOperand(1), Depth+1)) 2699 return true; 2700 2701 // Otherwise we definitely can't merge the ADD in. 2702 AddrMode = BackupAddrMode; 2703 AddrModeInsts.resize(OldSize); 2704 TPT.rollback(LastKnownGood); 2705 break; 2706 } 2707 //case Instruction::Or: 2708 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 2709 //break; 2710 case Instruction::Mul: 2711 case Instruction::Shl: { 2712 // Can only handle X*C and X << C. 2713 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 2714 if (!RHS) 2715 return false; 2716 int64_t Scale = RHS->getSExtValue(); 2717 if (Opcode == Instruction::Shl) 2718 Scale = 1LL << Scale; 2719 2720 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); 2721 } 2722 case Instruction::GetElementPtr: { 2723 // Scan the GEP. We check it if it contains constant offsets and at most 2724 // one variable offset. 2725 int VariableOperand = -1; 2726 unsigned VariableScale = 0; 2727 2728 int64_t ConstantOffset = 0; 2729 const DataLayout *TD = TLI.getDataLayout(); 2730 gep_type_iterator GTI = gep_type_begin(AddrInst); 2731 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 2732 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 2733 const StructLayout *SL = TD->getStructLayout(STy); 2734 unsigned Idx = 2735 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 2736 ConstantOffset += SL->getElementOffset(Idx); 2737 } else { 2738 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); 2739 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 2740 ConstantOffset += CI->getSExtValue()*TypeSize; 2741 } else if (TypeSize) { // Scales of zero don't do anything. 2742 // We only allow one variable index at the moment. 2743 if (VariableOperand != -1) 2744 return false; 2745 2746 // Remember the variable index. 2747 VariableOperand = i; 2748 VariableScale = TypeSize; 2749 } 2750 } 2751 } 2752 2753 // A common case is for the GEP to only do a constant offset. In this case, 2754 // just add it to the disp field and check validity. 2755 if (VariableOperand == -1) { 2756 AddrMode.BaseOffs += ConstantOffset; 2757 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 2758 // Check to see if we can fold the base pointer in too. 2759 if (MatchAddr(AddrInst->getOperand(0), Depth+1)) 2760 return true; 2761 } 2762 AddrMode.BaseOffs -= ConstantOffset; 2763 return false; 2764 } 2765 2766 // Save the valid addressing mode in case we can't match. 2767 ExtAddrMode BackupAddrMode = AddrMode; 2768 unsigned OldSize = AddrModeInsts.size(); 2769 2770 // See if the scale and offset amount is valid for this target. 2771 AddrMode.BaseOffs += ConstantOffset; 2772 2773 // Match the base operand of the GEP. 2774 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { 2775 // If it couldn't be matched, just stuff the value in a register. 2776 if (AddrMode.HasBaseReg) { 2777 AddrMode = BackupAddrMode; 2778 AddrModeInsts.resize(OldSize); 2779 return false; 2780 } 2781 AddrMode.HasBaseReg = true; 2782 AddrMode.BaseReg = AddrInst->getOperand(0); 2783 } 2784 2785 // Match the remaining variable portion of the GEP. 2786 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 2787 Depth)) { 2788 // If it couldn't be matched, try stuffing the base into a register 2789 // instead of matching it, and retrying the match of the scale. 2790 AddrMode = BackupAddrMode; 2791 AddrModeInsts.resize(OldSize); 2792 if (AddrMode.HasBaseReg) 2793 return false; 2794 AddrMode.HasBaseReg = true; 2795 AddrMode.BaseReg = AddrInst->getOperand(0); 2796 AddrMode.BaseOffs += ConstantOffset; 2797 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), 2798 VariableScale, Depth)) { 2799 // If even that didn't work, bail. 2800 AddrMode = BackupAddrMode; 2801 AddrModeInsts.resize(OldSize); 2802 return false; 2803 } 2804 } 2805 2806 return true; 2807 } 2808 case Instruction::SExt: 2809 case Instruction::ZExt: { 2810 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 2811 if (!Ext) 2812 return false; 2813 2814 // Try to move this ext out of the way of the addressing mode. 2815 // Ask for a method for doing so. 2816 TypePromotionHelper::Action TPH = 2817 TypePromotionHelper::getAction(Ext, InsertedTruncs, TLI, PromotedInsts); 2818 if (!TPH) 2819 return false; 2820 2821 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2822 TPT.getRestorationPoint(); 2823 unsigned CreatedInstsCost = 0; 2824 unsigned ExtCost = !TLI.isExtFree(Ext); 2825 Value *PromotedOperand = 2826 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 2827 // SExt has been moved away. 2828 // Thus either it will be rematched later in the recursive calls or it is 2829 // gone. Anyway, we must not fold it into the addressing mode at this point. 2830 // E.g., 2831 // op = add opnd, 1 2832 // idx = ext op 2833 // addr = gep base, idx 2834 // is now: 2835 // promotedOpnd = ext opnd <- no match here 2836 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 2837 // addr = gep base, op <- match 2838 if (MovedAway) 2839 *MovedAway = true; 2840 2841 assert(PromotedOperand && 2842 "TypePromotionHelper should have filtered out those cases"); 2843 2844 ExtAddrMode BackupAddrMode = AddrMode; 2845 unsigned OldSize = AddrModeInsts.size(); 2846 2847 if (!MatchAddr(PromotedOperand, Depth) || 2848 // The total of the new cost is equals to the cost of the created 2849 // instructions. 2850 // The total of the old cost is equals to the cost of the extension plus 2851 // what we have saved in the addressing mode. 2852 !IsPromotionProfitable(CreatedInstsCost, 2853 ExtCost + (AddrModeInsts.size() - OldSize), 2854 PromotedOperand)) { 2855 AddrMode = BackupAddrMode; 2856 AddrModeInsts.resize(OldSize); 2857 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 2858 TPT.rollback(LastKnownGood); 2859 return false; 2860 } 2861 return true; 2862 } 2863 } 2864 return false; 2865 } 2866 2867 /// MatchAddr - If we can, try to add the value of 'Addr' into the current 2868 /// addressing mode. If Addr can't be added to AddrMode this returns false and 2869 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type 2870 /// or intptr_t for the target. 2871 /// 2872 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { 2873 // Start a transaction at this point that we will rollback if the matching 2874 // fails. 2875 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2876 TPT.getRestorationPoint(); 2877 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 2878 // Fold in immediates if legal for the target. 2879 AddrMode.BaseOffs += CI->getSExtValue(); 2880 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2881 return true; 2882 AddrMode.BaseOffs -= CI->getSExtValue(); 2883 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 2884 // If this is a global variable, try to fold it into the addressing mode. 2885 if (!AddrMode.BaseGV) { 2886 AddrMode.BaseGV = GV; 2887 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2888 return true; 2889 AddrMode.BaseGV = nullptr; 2890 } 2891 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 2892 ExtAddrMode BackupAddrMode = AddrMode; 2893 unsigned OldSize = AddrModeInsts.size(); 2894 2895 // Check to see if it is possible to fold this operation. 2896 bool MovedAway = false; 2897 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 2898 // This instruction may have been move away. If so, there is nothing 2899 // to check here. 2900 if (MovedAway) 2901 return true; 2902 // Okay, it's possible to fold this. Check to see if it is actually 2903 // *profitable* to do so. We use a simple cost model to avoid increasing 2904 // register pressure too much. 2905 if (I->hasOneUse() || 2906 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 2907 AddrModeInsts.push_back(I); 2908 return true; 2909 } 2910 2911 // It isn't profitable to do this, roll back. 2912 //cerr << "NOT FOLDING: " << *I; 2913 AddrMode = BackupAddrMode; 2914 AddrModeInsts.resize(OldSize); 2915 TPT.rollback(LastKnownGood); 2916 } 2917 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 2918 if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) 2919 return true; 2920 TPT.rollback(LastKnownGood); 2921 } else if (isa<ConstantPointerNull>(Addr)) { 2922 // Null pointer gets folded without affecting the addressing mode. 2923 return true; 2924 } 2925 2926 // Worse case, the target should support [reg] addressing modes. :) 2927 if (!AddrMode.HasBaseReg) { 2928 AddrMode.HasBaseReg = true; 2929 AddrMode.BaseReg = Addr; 2930 // Still check for legality in case the target supports [imm] but not [i+r]. 2931 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2932 return true; 2933 AddrMode.HasBaseReg = false; 2934 AddrMode.BaseReg = nullptr; 2935 } 2936 2937 // If the base register is already taken, see if we can do [r+r]. 2938 if (AddrMode.Scale == 0) { 2939 AddrMode.Scale = 1; 2940 AddrMode.ScaledReg = Addr; 2941 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2942 return true; 2943 AddrMode.Scale = 0; 2944 AddrMode.ScaledReg = nullptr; 2945 } 2946 // Couldn't match. 2947 TPT.rollback(LastKnownGood); 2948 return false; 2949 } 2950 2951 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified 2952 /// inline asm call are due to memory operands. If so, return true, otherwise 2953 /// return false. 2954 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 2955 const TargetMachine &TM) { 2956 const Function *F = CI->getParent()->getParent(); 2957 const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); 2958 const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); 2959 TargetLowering::AsmOperandInfoVector TargetConstraints = 2960 TLI->ParseConstraints(TRI, ImmutableCallSite(CI)); 2961 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2962 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2963 2964 // Compute the constraint code and ConstraintType to use. 2965 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 2966 2967 // If this asm operand is our Value*, and if it isn't an indirect memory 2968 // operand, we can't fold it! 2969 if (OpInfo.CallOperandVal == OpVal && 2970 (OpInfo.ConstraintType != TargetLowering::C_Memory || 2971 !OpInfo.isIndirect)) 2972 return false; 2973 } 2974 2975 return true; 2976 } 2977 2978 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a 2979 /// memory use. If we find an obviously non-foldable instruction, return true. 2980 /// Add the ultimately found memory instructions to MemoryUses. 2981 static bool FindAllMemoryUses( 2982 Instruction *I, 2983 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 2984 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) { 2985 // If we already considered this instruction, we're done. 2986 if (!ConsideredInsts.insert(I).second) 2987 return false; 2988 2989 // If this is an obviously unfoldable instruction, bail out. 2990 if (!MightBeFoldableInst(I)) 2991 return true; 2992 2993 // Loop over all the uses, recursively processing them. 2994 for (Use &U : I->uses()) { 2995 Instruction *UserI = cast<Instruction>(U.getUser()); 2996 2997 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 2998 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 2999 continue; 3000 } 3001 3002 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3003 unsigned opNo = U.getOperandNo(); 3004 if (opNo == 0) return true; // Storing addr, not into addr. 3005 MemoryUses.push_back(std::make_pair(SI, opNo)); 3006 continue; 3007 } 3008 3009 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3010 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3011 if (!IA) return true; 3012 3013 // If this is a memory operand, we're cool, otherwise bail out. 3014 if (!IsOperandAMemoryOperand(CI, IA, I, TM)) 3015 return true; 3016 continue; 3017 } 3018 3019 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM)) 3020 return true; 3021 } 3022 3023 return false; 3024 } 3025 3026 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at 3027 /// the use site that we're folding it into. If so, there is no cost to 3028 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values 3029 /// that we know are live at the instruction already. 3030 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3031 Value *KnownLive2) { 3032 // If Val is either of the known-live values, we know it is live! 3033 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3034 return true; 3035 3036 // All values other than instructions and arguments (e.g. constants) are live. 3037 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3038 3039 // If Val is a constant sized alloca in the entry block, it is live, this is 3040 // true because it is just a reference to the stack/frame pointer, which is 3041 // live for the whole function. 3042 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3043 if (AI->isStaticAlloca()) 3044 return true; 3045 3046 // Check to see if this value is already used in the memory instruction's 3047 // block. If so, it's already live into the block at the very least, so we 3048 // can reasonably fold it. 3049 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3050 } 3051 3052 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing 3053 /// mode of the machine to fold the specified instruction into a load or store 3054 /// that ultimately uses it. However, the specified instruction has multiple 3055 /// uses. Given this, it may actually increase register pressure to fold it 3056 /// into the load. For example, consider this code: 3057 /// 3058 /// X = ... 3059 /// Y = X+1 3060 /// use(Y) -> nonload/store 3061 /// Z = Y+1 3062 /// load Z 3063 /// 3064 /// In this case, Y has multiple uses, and can be folded into the load of Z 3065 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 3066 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 3067 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 3068 /// number of computations either. 3069 /// 3070 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 3071 /// X was live across 'load Z' for other reasons, we actually *would* want to 3072 /// fold the addressing mode in the Z case. This would make Y die earlier. 3073 bool AddressingModeMatcher:: 3074 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 3075 ExtAddrMode &AMAfter) { 3076 if (IgnoreProfitability) return true; 3077 3078 // AMBefore is the addressing mode before this instruction was folded into it, 3079 // and AMAfter is the addressing mode after the instruction was folded. Get 3080 // the set of registers referenced by AMAfter and subtract out those 3081 // referenced by AMBefore: this is the set of values which folding in this 3082 // address extends the lifetime of. 3083 // 3084 // Note that there are only two potential values being referenced here, 3085 // BaseReg and ScaleReg (global addresses are always available, as are any 3086 // folded immediates). 3087 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 3088 3089 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 3090 // lifetime wasn't extended by adding this instruction. 3091 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3092 BaseReg = nullptr; 3093 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3094 ScaledReg = nullptr; 3095 3096 // If folding this instruction (and it's subexprs) didn't extend any live 3097 // ranges, we're ok with it. 3098 if (!BaseReg && !ScaledReg) 3099 return true; 3100 3101 // If all uses of this instruction are ultimately load/store/inlineasm's, 3102 // check to see if their addressing modes will include this instruction. If 3103 // so, we can fold it into all uses, so it doesn't matter if it has multiple 3104 // uses. 3105 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 3106 SmallPtrSet<Instruction*, 16> ConsideredInsts; 3107 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM)) 3108 return false; // Has a non-memory, non-foldable use! 3109 3110 // Now that we know that all uses of this instruction are part of a chain of 3111 // computation involving only operations that could theoretically be folded 3112 // into a memory use, loop over each of these uses and see if they could 3113 // *actually* fold the instruction. 3114 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 3115 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 3116 Instruction *User = MemoryUses[i].first; 3117 unsigned OpNo = MemoryUses[i].second; 3118 3119 // Get the access type of this use. If the use isn't a pointer, we don't 3120 // know what it accesses. 3121 Value *Address = User->getOperand(OpNo); 3122 if (!Address->getType()->isPointerTy()) 3123 return false; 3124 Type *AddressAccessTy = Address->getType()->getPointerElementType(); 3125 3126 // Do a match against the root of this address, ignoring profitability. This 3127 // will tell us if the addressing mode for the memory operation will 3128 // *actually* cover the shared instruction. 3129 ExtAddrMode Result; 3130 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3131 TPT.getRestorationPoint(); 3132 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, 3133 MemoryInst, Result, InsertedTruncs, 3134 PromotedInsts, TPT); 3135 Matcher.IgnoreProfitability = true; 3136 bool Success = Matcher.MatchAddr(Address, 0); 3137 (void)Success; assert(Success && "Couldn't select *anything*?"); 3138 3139 // The match was to check the profitability, the changes made are not 3140 // part of the original matcher. Therefore, they should be dropped 3141 // otherwise the original matcher will not present the right state. 3142 TPT.rollback(LastKnownGood); 3143 3144 // If the match didn't cover I, then it won't be shared by it. 3145 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 3146 I) == MatchedAddrModeInsts.end()) 3147 return false; 3148 3149 MatchedAddrModeInsts.clear(); 3150 } 3151 3152 return true; 3153 } 3154 3155 } // end anonymous namespace 3156 3157 /// IsNonLocalValue - Return true if the specified values are defined in a 3158 /// different basic block than BB. 3159 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 3160 if (Instruction *I = dyn_cast<Instruction>(V)) 3161 return I->getParent() != BB; 3162 return false; 3163 } 3164 3165 /// OptimizeMemoryInst - Load and Store Instructions often have 3166 /// addressing modes that can do significant amounts of computation. As such, 3167 /// instruction selection will try to get the load or store to do as much 3168 /// computation as possible for the program. The problem is that isel can only 3169 /// see within a single block. As such, we sink as much legal addressing mode 3170 /// stuff into the block as possible. 3171 /// 3172 /// This method is used to optimize both load/store and inline asms with memory 3173 /// operands. 3174 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 3175 Type *AccessTy) { 3176 Value *Repl = Addr; 3177 3178 // Try to collapse single-value PHI nodes. This is necessary to undo 3179 // unprofitable PRE transformations. 3180 SmallVector<Value*, 8> worklist; 3181 SmallPtrSet<Value*, 16> Visited; 3182 worklist.push_back(Addr); 3183 3184 // Use a worklist to iteratively look through PHI nodes, and ensure that 3185 // the addressing mode obtained from the non-PHI roots of the graph 3186 // are equivalent. 3187 Value *Consensus = nullptr; 3188 unsigned NumUsesConsensus = 0; 3189 bool IsNumUsesConsensusValid = false; 3190 SmallVector<Instruction*, 16> AddrModeInsts; 3191 ExtAddrMode AddrMode; 3192 TypePromotionTransaction TPT; 3193 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3194 TPT.getRestorationPoint(); 3195 while (!worklist.empty()) { 3196 Value *V = worklist.back(); 3197 worklist.pop_back(); 3198 3199 // Break use-def graph loops. 3200 if (!Visited.insert(V).second) { 3201 Consensus = nullptr; 3202 break; 3203 } 3204 3205 // For a PHI node, push all of its incoming values. 3206 if (PHINode *P = dyn_cast<PHINode>(V)) { 3207 for (Value *IncValue : P->incoming_values()) 3208 worklist.push_back(IncValue); 3209 continue; 3210 } 3211 3212 // For non-PHIs, determine the addressing mode being computed. 3213 SmallVector<Instruction*, 16> NewAddrModeInsts; 3214 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 3215 V, AccessTy, MemoryInst, NewAddrModeInsts, *TM, InsertedTruncsSet, 3216 PromotedInsts, TPT); 3217 3218 // This check is broken into two cases with very similar code to avoid using 3219 // getNumUses() as much as possible. Some values have a lot of uses, so 3220 // calling getNumUses() unconditionally caused a significant compile-time 3221 // regression. 3222 if (!Consensus) { 3223 Consensus = V; 3224 AddrMode = NewAddrMode; 3225 AddrModeInsts = NewAddrModeInsts; 3226 continue; 3227 } else if (NewAddrMode == AddrMode) { 3228 if (!IsNumUsesConsensusValid) { 3229 NumUsesConsensus = Consensus->getNumUses(); 3230 IsNumUsesConsensusValid = true; 3231 } 3232 3233 // Ensure that the obtained addressing mode is equivalent to that obtained 3234 // for all other roots of the PHI traversal. Also, when choosing one 3235 // such root as representative, select the one with the most uses in order 3236 // to keep the cost modeling heuristics in AddressingModeMatcher 3237 // applicable. 3238 unsigned NumUses = V->getNumUses(); 3239 if (NumUses > NumUsesConsensus) { 3240 Consensus = V; 3241 NumUsesConsensus = NumUses; 3242 AddrModeInsts = NewAddrModeInsts; 3243 } 3244 continue; 3245 } 3246 3247 Consensus = nullptr; 3248 break; 3249 } 3250 3251 // If the addressing mode couldn't be determined, or if multiple different 3252 // ones were determined, bail out now. 3253 if (!Consensus) { 3254 TPT.rollback(LastKnownGood); 3255 return false; 3256 } 3257 TPT.commit(); 3258 3259 // Check to see if any of the instructions supersumed by this addr mode are 3260 // non-local to I's BB. 3261 bool AnyNonLocal = false; 3262 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 3263 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 3264 AnyNonLocal = true; 3265 break; 3266 } 3267 } 3268 3269 // If all the instructions matched are already in this BB, don't do anything. 3270 if (!AnyNonLocal) { 3271 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 3272 return false; 3273 } 3274 3275 // Insert this computation right after this user. Since our caller is 3276 // scanning from the top of the BB to the bottom, reuse of the expr are 3277 // guaranteed to happen later. 3278 IRBuilder<> Builder(MemoryInst); 3279 3280 // Now that we determined the addressing expression we want to use and know 3281 // that we have to sink it into this block. Check to see if we have already 3282 // done this for some other load/store instr in this block. If so, reuse the 3283 // computation. 3284 Value *&SunkAddr = SunkAddrs[Addr]; 3285 if (SunkAddr) { 3286 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 3287 << *MemoryInst << "\n"); 3288 if (SunkAddr->getType() != Addr->getType()) 3289 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3290 } else if (AddrSinkUsingGEPs || 3291 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 3292 TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) 3293 ->useAA())) { 3294 // By default, we use the GEP-based method when AA is used later. This 3295 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 3296 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3297 << *MemoryInst << "\n"); 3298 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 3299 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 3300 3301 // First, find the pointer. 3302 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 3303 ResultPtr = AddrMode.BaseReg; 3304 AddrMode.BaseReg = nullptr; 3305 } 3306 3307 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 3308 // We can't add more than one pointer together, nor can we scale a 3309 // pointer (both of which seem meaningless). 3310 if (ResultPtr || AddrMode.Scale != 1) 3311 return false; 3312 3313 ResultPtr = AddrMode.ScaledReg; 3314 AddrMode.Scale = 0; 3315 } 3316 3317 if (AddrMode.BaseGV) { 3318 if (ResultPtr) 3319 return false; 3320 3321 ResultPtr = AddrMode.BaseGV; 3322 } 3323 3324 // If the real base value actually came from an inttoptr, then the matcher 3325 // will look through it and provide only the integer value. In that case, 3326 // use it here. 3327 if (!ResultPtr && AddrMode.BaseReg) { 3328 ResultPtr = 3329 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 3330 AddrMode.BaseReg = nullptr; 3331 } else if (!ResultPtr && AddrMode.Scale == 1) { 3332 ResultPtr = 3333 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 3334 AddrMode.Scale = 0; 3335 } 3336 3337 if (!ResultPtr && 3338 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 3339 SunkAddr = Constant::getNullValue(Addr->getType()); 3340 } else if (!ResultPtr) { 3341 return false; 3342 } else { 3343 Type *I8PtrTy = 3344 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 3345 Type *I8Ty = Builder.getInt8Ty(); 3346 3347 // Start with the base register. Do this first so that subsequent address 3348 // matching finds it last, which will prevent it from trying to match it 3349 // as the scaled value in case it happens to be a mul. That would be 3350 // problematic if we've sunk a different mul for the scale, because then 3351 // we'd end up sinking both muls. 3352 if (AddrMode.BaseReg) { 3353 Value *V = AddrMode.BaseReg; 3354 if (V->getType() != IntPtrTy) 3355 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3356 3357 ResultIndex = V; 3358 } 3359 3360 // Add the scale value. 3361 if (AddrMode.Scale) { 3362 Value *V = AddrMode.ScaledReg; 3363 if (V->getType() == IntPtrTy) { 3364 // done. 3365 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3366 cast<IntegerType>(V->getType())->getBitWidth()) { 3367 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3368 } else { 3369 // It is only safe to sign extend the BaseReg if we know that the math 3370 // required to create it did not overflow before we extend it. Since 3371 // the original IR value was tossed in favor of a constant back when 3372 // the AddrMode was created we need to bail out gracefully if widths 3373 // do not match instead of extending it. 3374 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 3375 if (I && (ResultIndex != AddrMode.BaseReg)) 3376 I->eraseFromParent(); 3377 return false; 3378 } 3379 3380 if (AddrMode.Scale != 1) 3381 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3382 "sunkaddr"); 3383 if (ResultIndex) 3384 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 3385 else 3386 ResultIndex = V; 3387 } 3388 3389 // Add in the Base Offset if present. 3390 if (AddrMode.BaseOffs) { 3391 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3392 if (ResultIndex) { 3393 // We need to add this separately from the scale above to help with 3394 // SDAG consecutive load/store merging. 3395 if (ResultPtr->getType() != I8PtrTy) 3396 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3397 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3398 } 3399 3400 ResultIndex = V; 3401 } 3402 3403 if (!ResultIndex) { 3404 SunkAddr = ResultPtr; 3405 } else { 3406 if (ResultPtr->getType() != I8PtrTy) 3407 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3408 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3409 } 3410 3411 if (SunkAddr->getType() != Addr->getType()) 3412 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3413 } 3414 } else { 3415 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3416 << *MemoryInst << "\n"); 3417 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 3418 Value *Result = nullptr; 3419 3420 // Start with the base register. Do this first so that subsequent address 3421 // matching finds it last, which will prevent it from trying to match it 3422 // as the scaled value in case it happens to be a mul. That would be 3423 // problematic if we've sunk a different mul for the scale, because then 3424 // we'd end up sinking both muls. 3425 if (AddrMode.BaseReg) { 3426 Value *V = AddrMode.BaseReg; 3427 if (V->getType()->isPointerTy()) 3428 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3429 if (V->getType() != IntPtrTy) 3430 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3431 Result = V; 3432 } 3433 3434 // Add the scale value. 3435 if (AddrMode.Scale) { 3436 Value *V = AddrMode.ScaledReg; 3437 if (V->getType() == IntPtrTy) { 3438 // done. 3439 } else if (V->getType()->isPointerTy()) { 3440 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3441 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3442 cast<IntegerType>(V->getType())->getBitWidth()) { 3443 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3444 } else { 3445 // It is only safe to sign extend the BaseReg if we know that the math 3446 // required to create it did not overflow before we extend it. Since 3447 // the original IR value was tossed in favor of a constant back when 3448 // the AddrMode was created we need to bail out gracefully if widths 3449 // do not match instead of extending it. 3450 Instruction *I = dyn_cast_or_null<Instruction>(Result); 3451 if (I && (Result != AddrMode.BaseReg)) 3452 I->eraseFromParent(); 3453 return false; 3454 } 3455 if (AddrMode.Scale != 1) 3456 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3457 "sunkaddr"); 3458 if (Result) 3459 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3460 else 3461 Result = V; 3462 } 3463 3464 // Add in the BaseGV if present. 3465 if (AddrMode.BaseGV) { 3466 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 3467 if (Result) 3468 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3469 else 3470 Result = V; 3471 } 3472 3473 // Add in the Base Offset if present. 3474 if (AddrMode.BaseOffs) { 3475 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3476 if (Result) 3477 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3478 else 3479 Result = V; 3480 } 3481 3482 if (!Result) 3483 SunkAddr = Constant::getNullValue(Addr->getType()); 3484 else 3485 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 3486 } 3487 3488 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 3489 3490 // If we have no uses, recursively delete the value and all dead instructions 3491 // using it. 3492 if (Repl->use_empty()) { 3493 // This can cause recursive deletion, which can invalidate our iterator. 3494 // Use a WeakVH to hold onto it in case this happens. 3495 WeakVH IterHandle(CurInstIterator); 3496 BasicBlock *BB = CurInstIterator->getParent(); 3497 3498 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 3499 3500 if (IterHandle != CurInstIterator) { 3501 // If the iterator instruction was recursively deleted, start over at the 3502 // start of the block. 3503 CurInstIterator = BB->begin(); 3504 SunkAddrs.clear(); 3505 } 3506 } 3507 ++NumMemoryInsts; 3508 return true; 3509 } 3510 3511 /// OptimizeInlineAsmInst - If there are any memory operands, use 3512 /// OptimizeMemoryInst to sink their address computing into the block when 3513 /// possible / profitable. 3514 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 3515 bool MadeChange = false; 3516 3517 const TargetRegisterInfo *TRI = 3518 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 3519 TargetLowering::AsmOperandInfoVector 3520 TargetConstraints = TLI->ParseConstraints(TRI, CS); 3521 unsigned ArgNo = 0; 3522 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3523 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3524 3525 // Compute the constraint code and ConstraintType to use. 3526 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3527 3528 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 3529 OpInfo.isIndirect) { 3530 Value *OpVal = CS->getArgOperand(ArgNo++); 3531 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 3532 } else if (OpInfo.Type == InlineAsm::isInput) 3533 ArgNo++; 3534 } 3535 3536 return MadeChange; 3537 } 3538 3539 /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or 3540 /// sign extensions. 3541 static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { 3542 assert(!Inst->use_empty() && "Input must have at least one use"); 3543 const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin()); 3544 bool IsSExt = isa<SExtInst>(FirstUser); 3545 Type *ExtTy = FirstUser->getType(); 3546 for (const User *U : Inst->users()) { 3547 const Instruction *UI = cast<Instruction>(U); 3548 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 3549 return false; 3550 Type *CurTy = UI->getType(); 3551 // Same input and output types: Same instruction after CSE. 3552 if (CurTy == ExtTy) 3553 continue; 3554 3555 // If IsSExt is true, we are in this situation: 3556 // a = Inst 3557 // b = sext ty1 a to ty2 3558 // c = sext ty1 a to ty3 3559 // Assuming ty2 is shorter than ty3, this could be turned into: 3560 // a = Inst 3561 // b = sext ty1 a to ty2 3562 // c = sext ty2 b to ty3 3563 // However, the last sext is not free. 3564 if (IsSExt) 3565 return false; 3566 3567 // This is a ZExt, maybe this is free to extend from one type to another. 3568 // In that case, we would not account for a different use. 3569 Type *NarrowTy; 3570 Type *LargeTy; 3571 if (ExtTy->getScalarType()->getIntegerBitWidth() > 3572 CurTy->getScalarType()->getIntegerBitWidth()) { 3573 NarrowTy = CurTy; 3574 LargeTy = ExtTy; 3575 } else { 3576 NarrowTy = ExtTy; 3577 LargeTy = CurTy; 3578 } 3579 3580 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 3581 return false; 3582 } 3583 // All uses are the same or can be derived from one another for free. 3584 return true; 3585 } 3586 3587 /// \brief Try to form ExtLd by promoting \p Exts until they reach a 3588 /// load instruction. 3589 /// If an ext(load) can be formed, it is returned via \p LI for the load 3590 /// and \p Inst for the extension. 3591 /// Otherwise LI == nullptr and Inst == nullptr. 3592 /// When some promotion happened, \p TPT contains the proper state to 3593 /// revert them. 3594 /// 3595 /// \return true when promoting was necessary to expose the ext(load) 3596 /// opportunity, false otherwise. 3597 /// 3598 /// Example: 3599 /// \code 3600 /// %ld = load i32* %addr 3601 /// %add = add nuw i32 %ld, 4 3602 /// %zext = zext i32 %add to i64 3603 /// \endcode 3604 /// => 3605 /// \code 3606 /// %ld = load i32* %addr 3607 /// %zext = zext i32 %ld to i64 3608 /// %add = add nuw i64 %zext, 4 3609 /// \encode 3610 /// Thanks to the promotion, we can match zext(load i32*) to i64. 3611 bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT, 3612 LoadInst *&LI, Instruction *&Inst, 3613 const SmallVectorImpl<Instruction *> &Exts, 3614 unsigned CreatedInstsCost = 0) { 3615 // Iterate over all the extensions to see if one form an ext(load). 3616 for (auto I : Exts) { 3617 // Check if we directly have ext(load). 3618 if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) { 3619 Inst = I; 3620 // No promotion happened here. 3621 return false; 3622 } 3623 // Check whether or not we want to do any promotion. 3624 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 3625 continue; 3626 // Get the action to perform the promotion. 3627 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 3628 I, InsertedTruncsSet, *TLI, PromotedInsts); 3629 // Check if we can promote. 3630 if (!TPH) 3631 continue; 3632 // Save the current state. 3633 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3634 TPT.getRestorationPoint(); 3635 SmallVector<Instruction *, 4> NewExts; 3636 unsigned NewCreatedInstsCost = 0; 3637 unsigned ExtCost = !TLI->isExtFree(I); 3638 // Promote. 3639 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 3640 &NewExts, nullptr, *TLI); 3641 assert(PromotedVal && 3642 "TypePromotionHelper should have filtered out those cases"); 3643 3644 // We would be able to merge only one extension in a load. 3645 // Therefore, if we have more than 1 new extension we heuristically 3646 // cut this search path, because it means we degrade the code quality. 3647 // With exactly 2, the transformation is neutral, because we will merge 3648 // one extension but leave one. However, we optimistically keep going, 3649 // because the new extension may be removed too. 3650 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 3651 TotalCreatedInstsCost -= ExtCost; 3652 if (!StressExtLdPromotion && 3653 (TotalCreatedInstsCost > 1 || 3654 !isPromotedInstructionLegal(*TLI, PromotedVal))) { 3655 // The promotion is not profitable, rollback to the previous state. 3656 TPT.rollback(LastKnownGood); 3657 continue; 3658 } 3659 // The promotion is profitable. 3660 // Check if it exposes an ext(load). 3661 (void)ExtLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost); 3662 if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 3663 // If we have created a new extension, i.e., now we have two 3664 // extensions. We must make sure one of them is merged with 3665 // the load, otherwise we may degrade the code quality. 3666 (LI->hasOneUse() || hasSameExtUse(LI, *TLI)))) 3667 // Promotion happened. 3668 return true; 3669 // If this does not help to expose an ext(load) then, rollback. 3670 TPT.rollback(LastKnownGood); 3671 } 3672 // None of the extension can form an ext(load). 3673 LI = nullptr; 3674 Inst = nullptr; 3675 return false; 3676 } 3677 3678 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 3679 /// basic block as the load, unless conditions are unfavorable. This allows 3680 /// SelectionDAG to fold the extend into the load. 3681 /// \p I[in/out] the extension may be modified during the process if some 3682 /// promotions apply. 3683 /// 3684 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *&I) { 3685 // Try to promote a chain of computation if it allows to form 3686 // an extended load. 3687 TypePromotionTransaction TPT; 3688 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3689 TPT.getRestorationPoint(); 3690 SmallVector<Instruction *, 1> Exts; 3691 Exts.push_back(I); 3692 // Look for a load being extended. 3693 LoadInst *LI = nullptr; 3694 Instruction *OldExt = I; 3695 bool HasPromoted = ExtLdPromotion(TPT, LI, I, Exts); 3696 if (!LI || !I) { 3697 assert(!HasPromoted && !LI && "If we did not match any load instruction " 3698 "the code must remain the same"); 3699 I = OldExt; 3700 return false; 3701 } 3702 3703 // If they're already in the same block, there's nothing to do. 3704 // Make the cheap checks first if we did not promote. 3705 // If we promoted, we need to check if it is indeed profitable. 3706 if (!HasPromoted && LI->getParent() == I->getParent()) 3707 return false; 3708 3709 EVT VT = TLI->getValueType(I->getType()); 3710 EVT LoadVT = TLI->getValueType(LI->getType()); 3711 3712 // If the load has other users and the truncate is not free, this probably 3713 // isn't worthwhile. 3714 if (!LI->hasOneUse() && TLI && 3715 (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 3716 !TLI->isTruncateFree(I->getType(), LI->getType())) { 3717 I = OldExt; 3718 TPT.rollback(LastKnownGood); 3719 return false; 3720 } 3721 3722 // Check whether the target supports casts folded into loads. 3723 unsigned LType; 3724 if (isa<ZExtInst>(I)) 3725 LType = ISD::ZEXTLOAD; 3726 else { 3727 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 3728 LType = ISD::SEXTLOAD; 3729 } 3730 if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) { 3731 I = OldExt; 3732 TPT.rollback(LastKnownGood); 3733 return false; 3734 } 3735 3736 // Move the extend into the same block as the load, so that SelectionDAG 3737 // can fold it. 3738 TPT.commit(); 3739 I->removeFromParent(); 3740 I->insertAfter(LI); 3741 ++NumExtsMoved; 3742 return true; 3743 } 3744 3745 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 3746 BasicBlock *DefBB = I->getParent(); 3747 3748 // If the result of a {s|z}ext and its source are both live out, rewrite all 3749 // other uses of the source with result of extension. 3750 Value *Src = I->getOperand(0); 3751 if (Src->hasOneUse()) 3752 return false; 3753 3754 // Only do this xform if truncating is free. 3755 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 3756 return false; 3757 3758 // Only safe to perform the optimization if the source is also defined in 3759 // this block. 3760 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 3761 return false; 3762 3763 bool DefIsLiveOut = false; 3764 for (User *U : I->users()) { 3765 Instruction *UI = cast<Instruction>(U); 3766 3767 // Figure out which BB this ext is used in. 3768 BasicBlock *UserBB = UI->getParent(); 3769 if (UserBB == DefBB) continue; 3770 DefIsLiveOut = true; 3771 break; 3772 } 3773 if (!DefIsLiveOut) 3774 return false; 3775 3776 // Make sure none of the uses are PHI nodes. 3777 for (User *U : Src->users()) { 3778 Instruction *UI = cast<Instruction>(U); 3779 BasicBlock *UserBB = UI->getParent(); 3780 if (UserBB == DefBB) continue; 3781 // Be conservative. We don't want this xform to end up introducing 3782 // reloads just before load / store instructions. 3783 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 3784 return false; 3785 } 3786 3787 // InsertedTruncs - Only insert one trunc in each block once. 3788 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 3789 3790 bool MadeChange = false; 3791 for (Use &U : Src->uses()) { 3792 Instruction *User = cast<Instruction>(U.getUser()); 3793 3794 // Figure out which BB this ext is used in. 3795 BasicBlock *UserBB = User->getParent(); 3796 if (UserBB == DefBB) continue; 3797 3798 // Both src and def are live in this block. Rewrite the use. 3799 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 3800 3801 if (!InsertedTrunc) { 3802 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3803 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 3804 InsertedTruncsSet.insert(InsertedTrunc); 3805 } 3806 3807 // Replace a use of the {s|z}ext source with a use of the result. 3808 U = InsertedTrunc; 3809 ++NumExtUses; 3810 MadeChange = true; 3811 } 3812 3813 return MadeChange; 3814 } 3815 3816 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be 3817 /// turned into an explicit branch. 3818 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 3819 // FIXME: This should use the same heuristics as IfConversion to determine 3820 // whether a select is better represented as a branch. This requires that 3821 // branch probability metadata is preserved for the select, which is not the 3822 // case currently. 3823 3824 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 3825 3826 // If the branch is predicted right, an out of order CPU can avoid blocking on 3827 // the compare. Emit cmovs on compares with a memory operand as branches to 3828 // avoid stalls on the load from memory. If the compare has more than one use 3829 // there's probably another cmov or setcc around so it's not worth emitting a 3830 // branch. 3831 if (!Cmp) 3832 return false; 3833 3834 Value *CmpOp0 = Cmp->getOperand(0); 3835 Value *CmpOp1 = Cmp->getOperand(1); 3836 3837 // We check that the memory operand has one use to avoid uses of the loaded 3838 // value directly after the compare, making branches unprofitable. 3839 return Cmp->hasOneUse() && 3840 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 3841 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 3842 } 3843 3844 3845 /// If we have a SelectInst that will likely profit from branch prediction, 3846 /// turn it into a branch. 3847 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { 3848 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 3849 3850 // Can we convert the 'select' to CF ? 3851 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 3852 return false; 3853 3854 TargetLowering::SelectSupportKind SelectKind; 3855 if (VectorCond) 3856 SelectKind = TargetLowering::VectorMaskSelect; 3857 else if (SI->getType()->isVectorTy()) 3858 SelectKind = TargetLowering::ScalarCondVectorVal; 3859 else 3860 SelectKind = TargetLowering::ScalarValSelect; 3861 3862 // Do we have efficient codegen support for this kind of 'selects' ? 3863 if (TLI->isSelectSupported(SelectKind)) { 3864 // We have efficient codegen support for the select instruction. 3865 // Check if it is profitable to keep this 'select'. 3866 if (!TLI->isPredictableSelectExpensive() || 3867 !isFormingBranchFromSelectProfitable(SI)) 3868 return false; 3869 } 3870 3871 ModifiedDT = true; 3872 3873 // First, we split the block containing the select into 2 blocks. 3874 BasicBlock *StartBlock = SI->getParent(); 3875 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 3876 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 3877 3878 // Create a new block serving as the landing pad for the branch. 3879 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 3880 NextBlock->getParent(), NextBlock); 3881 3882 // Move the unconditional branch from the block with the select in it into our 3883 // landing pad block. 3884 StartBlock->getTerminator()->eraseFromParent(); 3885 BranchInst::Create(NextBlock, SmallBlock); 3886 3887 // Insert the real conditional branch based on the original condition. 3888 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 3889 3890 // The select itself is replaced with a PHI Node. 3891 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); 3892 PN->takeName(SI); 3893 PN->addIncoming(SI->getTrueValue(), StartBlock); 3894 PN->addIncoming(SI->getFalseValue(), SmallBlock); 3895 SI->replaceAllUsesWith(PN); 3896 SI->eraseFromParent(); 3897 3898 // Instruct OptimizeBlock to skip to the next block. 3899 CurInstIterator = StartBlock->end(); 3900 ++NumSelectsExpanded; 3901 return true; 3902 } 3903 3904 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 3905 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 3906 int SplatElem = -1; 3907 for (unsigned i = 0; i < Mask.size(); ++i) { 3908 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 3909 return false; 3910 SplatElem = Mask[i]; 3911 } 3912 3913 return true; 3914 } 3915 3916 /// Some targets have expensive vector shifts if the lanes aren't all the same 3917 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 3918 /// it's often worth sinking a shufflevector splat down to its use so that 3919 /// codegen can spot all lanes are identical. 3920 bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 3921 BasicBlock *DefBB = SVI->getParent(); 3922 3923 // Only do this xform if variable vector shifts are particularly expensive. 3924 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 3925 return false; 3926 3927 // We only expect better codegen by sinking a shuffle if we can recognise a 3928 // constant splat. 3929 if (!isBroadcastShuffle(SVI)) 3930 return false; 3931 3932 // InsertedShuffles - Only insert a shuffle in each block once. 3933 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 3934 3935 bool MadeChange = false; 3936 for (User *U : SVI->users()) { 3937 Instruction *UI = cast<Instruction>(U); 3938 3939 // Figure out which BB this ext is used in. 3940 BasicBlock *UserBB = UI->getParent(); 3941 if (UserBB == DefBB) continue; 3942 3943 // For now only apply this when the splat is used by a shift instruction. 3944 if (!UI->isShift()) continue; 3945 3946 // Everything checks out, sink the shuffle if the user's block doesn't 3947 // already have a copy. 3948 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 3949 3950 if (!InsertedShuffle) { 3951 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3952 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), 3953 SVI->getOperand(1), 3954 SVI->getOperand(2), "", InsertPt); 3955 } 3956 3957 UI->replaceUsesOfWith(SVI, InsertedShuffle); 3958 MadeChange = true; 3959 } 3960 3961 // If we removed all uses, nuke the shuffle. 3962 if (SVI->use_empty()) { 3963 SVI->eraseFromParent(); 3964 MadeChange = true; 3965 } 3966 3967 return MadeChange; 3968 } 3969 3970 namespace { 3971 /// \brief Helper class to promote a scalar operation to a vector one. 3972 /// This class is used to move downward extractelement transition. 3973 /// E.g., 3974 /// a = vector_op <2 x i32> 3975 /// b = extractelement <2 x i32> a, i32 0 3976 /// c = scalar_op b 3977 /// store c 3978 /// 3979 /// => 3980 /// a = vector_op <2 x i32> 3981 /// c = vector_op a (equivalent to scalar_op on the related lane) 3982 /// * d = extractelement <2 x i32> c, i32 0 3983 /// * store d 3984 /// Assuming both extractelement and store can be combine, we get rid of the 3985 /// transition. 3986 class VectorPromoteHelper { 3987 /// Used to perform some checks on the legality of vector operations. 3988 const TargetLowering &TLI; 3989 3990 /// Used to estimated the cost of the promoted chain. 3991 const TargetTransformInfo &TTI; 3992 3993 /// The transition being moved downwards. 3994 Instruction *Transition; 3995 /// The sequence of instructions to be promoted. 3996 SmallVector<Instruction *, 4> InstsToBePromoted; 3997 /// Cost of combining a store and an extract. 3998 unsigned StoreExtractCombineCost; 3999 /// Instruction that will be combined with the transition. 4000 Instruction *CombineInst; 4001 4002 /// \brief The instruction that represents the current end of the transition. 4003 /// Since we are faking the promotion until we reach the end of the chain 4004 /// of computation, we need a way to get the current end of the transition. 4005 Instruction *getEndOfTransition() const { 4006 if (InstsToBePromoted.empty()) 4007 return Transition; 4008 return InstsToBePromoted.back(); 4009 } 4010 4011 /// \brief Return the index of the original value in the transition. 4012 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 4013 /// c, is at index 0. 4014 unsigned getTransitionOriginalValueIdx() const { 4015 assert(isa<ExtractElementInst>(Transition) && 4016 "Other kind of transitions are not supported yet"); 4017 return 0; 4018 } 4019 4020 /// \brief Return the index of the index in the transition. 4021 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 4022 /// is at index 1. 4023 unsigned getTransitionIdx() const { 4024 assert(isa<ExtractElementInst>(Transition) && 4025 "Other kind of transitions are not supported yet"); 4026 return 1; 4027 } 4028 4029 /// \brief Get the type of the transition. 4030 /// This is the type of the original value. 4031 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 4032 /// transition is <2 x i32>. 4033 Type *getTransitionType() const { 4034 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 4035 } 4036 4037 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 4038 /// I.e., we have the following sequence: 4039 /// Def = Transition <ty1> a to <ty2> 4040 /// b = ToBePromoted <ty2> Def, ... 4041 /// => 4042 /// b = ToBePromoted <ty1> a, ... 4043 /// Def = Transition <ty1> ToBePromoted to <ty2> 4044 void promoteImpl(Instruction *ToBePromoted); 4045 4046 /// \brief Check whether or not it is profitable to promote all the 4047 /// instructions enqueued to be promoted. 4048 bool isProfitableToPromote() { 4049 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 4050 unsigned Index = isa<ConstantInt>(ValIdx) 4051 ? cast<ConstantInt>(ValIdx)->getZExtValue() 4052 : -1; 4053 Type *PromotedType = getTransitionType(); 4054 4055 StoreInst *ST = cast<StoreInst>(CombineInst); 4056 unsigned AS = ST->getPointerAddressSpace(); 4057 unsigned Align = ST->getAlignment(); 4058 // Check if this store is supported. 4059 if (!TLI.allowsMisalignedMemoryAccesses( 4060 TLI.getValueType(ST->getValueOperand()->getType()), AS, Align)) { 4061 // If this is not supported, there is no way we can combine 4062 // the extract with the store. 4063 return false; 4064 } 4065 4066 // The scalar chain of computation has to pay for the transition 4067 // scalar to vector. 4068 // The vector chain has to account for the combining cost. 4069 uint64_t ScalarCost = 4070 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 4071 uint64_t VectorCost = StoreExtractCombineCost; 4072 for (const auto &Inst : InstsToBePromoted) { 4073 // Compute the cost. 4074 // By construction, all instructions being promoted are arithmetic ones. 4075 // Moreover, one argument is a constant that can be viewed as a splat 4076 // constant. 4077 Value *Arg0 = Inst->getOperand(0); 4078 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 4079 isa<ConstantFP>(Arg0); 4080 TargetTransformInfo::OperandValueKind Arg0OVK = 4081 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4082 : TargetTransformInfo::OK_AnyValue; 4083 TargetTransformInfo::OperandValueKind Arg1OVK = 4084 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4085 : TargetTransformInfo::OK_AnyValue; 4086 ScalarCost += TTI.getArithmeticInstrCost( 4087 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 4088 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 4089 Arg0OVK, Arg1OVK); 4090 } 4091 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 4092 << ScalarCost << "\nVector: " << VectorCost << '\n'); 4093 return ScalarCost > VectorCost; 4094 } 4095 4096 /// \brief Generate a constant vector with \p Val with the same 4097 /// number of elements as the transition. 4098 /// \p UseSplat defines whether or not \p Val should be replicated 4099 /// accross the whole vector. 4100 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 4101 /// otherwise we generate a vector with as many undef as possible: 4102 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 4103 /// used at the index of the extract. 4104 Value *getConstantVector(Constant *Val, bool UseSplat) const { 4105 unsigned ExtractIdx = UINT_MAX; 4106 if (!UseSplat) { 4107 // If we cannot determine where the constant must be, we have to 4108 // use a splat constant. 4109 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 4110 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 4111 ExtractIdx = CstVal->getSExtValue(); 4112 else 4113 UseSplat = true; 4114 } 4115 4116 unsigned End = getTransitionType()->getVectorNumElements(); 4117 if (UseSplat) 4118 return ConstantVector::getSplat(End, Val); 4119 4120 SmallVector<Constant *, 4> ConstVec; 4121 UndefValue *UndefVal = UndefValue::get(Val->getType()); 4122 for (unsigned Idx = 0; Idx != End; ++Idx) { 4123 if (Idx == ExtractIdx) 4124 ConstVec.push_back(Val); 4125 else 4126 ConstVec.push_back(UndefVal); 4127 } 4128 return ConstantVector::get(ConstVec); 4129 } 4130 4131 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 4132 /// in \p Use can trigger undefined behavior. 4133 static bool canCauseUndefinedBehavior(const Instruction *Use, 4134 unsigned OperandIdx) { 4135 // This is not safe to introduce undef when the operand is on 4136 // the right hand side of a division-like instruction. 4137 if (OperandIdx != 1) 4138 return false; 4139 switch (Use->getOpcode()) { 4140 default: 4141 return false; 4142 case Instruction::SDiv: 4143 case Instruction::UDiv: 4144 case Instruction::SRem: 4145 case Instruction::URem: 4146 return true; 4147 case Instruction::FDiv: 4148 case Instruction::FRem: 4149 return !Use->hasNoNaNs(); 4150 } 4151 llvm_unreachable(nullptr); 4152 } 4153 4154 public: 4155 VectorPromoteHelper(const TargetLowering &TLI, const TargetTransformInfo &TTI, 4156 Instruction *Transition, unsigned CombineCost) 4157 : TLI(TLI), TTI(TTI), Transition(Transition), 4158 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 4159 assert(Transition && "Do not know how to promote null"); 4160 } 4161 4162 /// \brief Check if we can promote \p ToBePromoted to \p Type. 4163 bool canPromote(const Instruction *ToBePromoted) const { 4164 // We could support CastInst too. 4165 return isa<BinaryOperator>(ToBePromoted); 4166 } 4167 4168 /// \brief Check if it is profitable to promote \p ToBePromoted 4169 /// by moving downward the transition through. 4170 bool shouldPromote(const Instruction *ToBePromoted) const { 4171 // Promote only if all the operands can be statically expanded. 4172 // Indeed, we do not want to introduce any new kind of transitions. 4173 for (const Use &U : ToBePromoted->operands()) { 4174 const Value *Val = U.get(); 4175 if (Val == getEndOfTransition()) { 4176 // If the use is a division and the transition is on the rhs, 4177 // we cannot promote the operation, otherwise we may create a 4178 // division by zero. 4179 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 4180 return false; 4181 continue; 4182 } 4183 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 4184 !isa<ConstantFP>(Val)) 4185 return false; 4186 } 4187 // Check that the resulting operation is legal. 4188 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 4189 if (!ISDOpcode) 4190 return false; 4191 return StressStoreExtract || 4192 TLI.isOperationLegalOrCustom( 4193 ISDOpcode, TLI.getValueType(getTransitionType(), true)); 4194 } 4195 4196 /// \brief Check whether or not \p Use can be combined 4197 /// with the transition. 4198 /// I.e., is it possible to do Use(Transition) => AnotherUse? 4199 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 4200 4201 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 4202 void enqueueForPromotion(Instruction *ToBePromoted) { 4203 InstsToBePromoted.push_back(ToBePromoted); 4204 } 4205 4206 /// \brief Set the instruction that will be combined with the transition. 4207 void recordCombineInstruction(Instruction *ToBeCombined) { 4208 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 4209 CombineInst = ToBeCombined; 4210 } 4211 4212 /// \brief Promote all the instructions enqueued for promotion if it is 4213 /// is profitable. 4214 /// \return True if the promotion happened, false otherwise. 4215 bool promote() { 4216 // Check if there is something to promote. 4217 // Right now, if we do not have anything to combine with, 4218 // we assume the promotion is not profitable. 4219 if (InstsToBePromoted.empty() || !CombineInst) 4220 return false; 4221 4222 // Check cost. 4223 if (!StressStoreExtract && !isProfitableToPromote()) 4224 return false; 4225 4226 // Promote. 4227 for (auto &ToBePromoted : InstsToBePromoted) 4228 promoteImpl(ToBePromoted); 4229 InstsToBePromoted.clear(); 4230 return true; 4231 } 4232 }; 4233 } // End of anonymous namespace. 4234 4235 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 4236 // At this point, we know that all the operands of ToBePromoted but Def 4237 // can be statically promoted. 4238 // For Def, we need to use its parameter in ToBePromoted: 4239 // b = ToBePromoted ty1 a 4240 // Def = Transition ty1 b to ty2 4241 // Move the transition down. 4242 // 1. Replace all uses of the promoted operation by the transition. 4243 // = ... b => = ... Def. 4244 assert(ToBePromoted->getType() == Transition->getType() && 4245 "The type of the result of the transition does not match " 4246 "the final type"); 4247 ToBePromoted->replaceAllUsesWith(Transition); 4248 // 2. Update the type of the uses. 4249 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 4250 Type *TransitionTy = getTransitionType(); 4251 ToBePromoted->mutateType(TransitionTy); 4252 // 3. Update all the operands of the promoted operation with promoted 4253 // operands. 4254 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 4255 for (Use &U : ToBePromoted->operands()) { 4256 Value *Val = U.get(); 4257 Value *NewVal = nullptr; 4258 if (Val == Transition) 4259 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 4260 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 4261 isa<ConstantFP>(Val)) { 4262 // Use a splat constant if it is not safe to use undef. 4263 NewVal = getConstantVector( 4264 cast<Constant>(Val), 4265 isa<UndefValue>(Val) || 4266 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 4267 } else 4268 llvm_unreachable("Did you modified shouldPromote and forgot to update " 4269 "this?"); 4270 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 4271 } 4272 Transition->removeFromParent(); 4273 Transition->insertAfter(ToBePromoted); 4274 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 4275 } 4276 4277 /// Some targets can do store(extractelement) with one instruction. 4278 /// Try to push the extractelement towards the stores when the target 4279 /// has this feature and this is profitable. 4280 bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) { 4281 unsigned CombineCost = UINT_MAX; 4282 if (DisableStoreExtract || !TLI || 4283 (!StressStoreExtract && 4284 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 4285 Inst->getOperand(1), CombineCost))) 4286 return false; 4287 4288 // At this point we know that Inst is a vector to scalar transition. 4289 // Try to move it down the def-use chain, until: 4290 // - We can combine the transition with its single use 4291 // => we got rid of the transition. 4292 // - We escape the current basic block 4293 // => we would need to check that we are moving it at a cheaper place and 4294 // we do not do that for now. 4295 BasicBlock *Parent = Inst->getParent(); 4296 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 4297 VectorPromoteHelper VPH(*TLI, *TTI, Inst, CombineCost); 4298 // If the transition has more than one use, assume this is not going to be 4299 // beneficial. 4300 while (Inst->hasOneUse()) { 4301 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 4302 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 4303 4304 if (ToBePromoted->getParent() != Parent) { 4305 DEBUG(dbgs() << "Instruction to promote is in a different block (" 4306 << ToBePromoted->getParent()->getName() 4307 << ") than the transition (" << Parent->getName() << ").\n"); 4308 return false; 4309 } 4310 4311 if (VPH.canCombine(ToBePromoted)) { 4312 DEBUG(dbgs() << "Assume " << *Inst << '\n' 4313 << "will be combined with: " << *ToBePromoted << '\n'); 4314 VPH.recordCombineInstruction(ToBePromoted); 4315 bool Changed = VPH.promote(); 4316 NumStoreExtractExposed += Changed; 4317 return Changed; 4318 } 4319 4320 DEBUG(dbgs() << "Try promoting.\n"); 4321 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 4322 return false; 4323 4324 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 4325 4326 VPH.enqueueForPromotion(ToBePromoted); 4327 Inst = ToBePromoted; 4328 } 4329 return false; 4330 } 4331 4332 bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) { 4333 if (PHINode *P = dyn_cast<PHINode>(I)) { 4334 // It is possible for very late stage optimizations (such as SimplifyCFG) 4335 // to introduce PHI nodes too late to be cleaned up. If we detect such a 4336 // trivial PHI, go ahead and zap it here. 4337 const DataLayout &DL = I->getModule()->getDataLayout(); 4338 if (Value *V = SimplifyInstruction(P, DL, TLInfo, nullptr)) { 4339 P->replaceAllUsesWith(V); 4340 P->eraseFromParent(); 4341 ++NumPHIsElim; 4342 return true; 4343 } 4344 return false; 4345 } 4346 4347 if (CastInst *CI = dyn_cast<CastInst>(I)) { 4348 // If the source of the cast is a constant, then this should have 4349 // already been constant folded. The only reason NOT to constant fold 4350 // it is if something (e.g. LSR) was careful to place the constant 4351 // evaluation in a block other than then one that uses it (e.g. to hoist 4352 // the address of globals out of a loop). If this is the case, we don't 4353 // want to forward-subst the cast. 4354 if (isa<Constant>(CI->getOperand(0))) 4355 return false; 4356 4357 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 4358 return true; 4359 4360 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 4361 /// Sink a zext or sext into its user blocks if the target type doesn't 4362 /// fit in one register 4363 if (TLI && TLI->getTypeAction(CI->getContext(), 4364 TLI->getValueType(CI->getType())) == 4365 TargetLowering::TypeExpandInteger) { 4366 return SinkCast(CI); 4367 } else { 4368 bool MadeChange = MoveExtToFormExtLoad(I); 4369 return MadeChange | OptimizeExtUses(I); 4370 } 4371 } 4372 return false; 4373 } 4374 4375 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 4376 if (!TLI || !TLI->hasMultipleConditionRegisters()) 4377 return OptimizeCmpExpression(CI); 4378 4379 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 4380 if (TLI) 4381 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 4382 return false; 4383 } 4384 4385 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 4386 if (TLI) 4387 return OptimizeMemoryInst(I, SI->getOperand(1), 4388 SI->getOperand(0)->getType()); 4389 return false; 4390 } 4391 4392 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 4393 4394 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 4395 BinOp->getOpcode() == Instruction::LShr)) { 4396 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 4397 if (TLI && CI && TLI->hasExtractBitsInsn()) 4398 return OptimizeExtractBits(BinOp, CI, *TLI); 4399 4400 return false; 4401 } 4402 4403 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 4404 if (GEPI->hasAllZeroIndices()) { 4405 /// The GEP operand must be a pointer, so must its result -> BitCast 4406 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 4407 GEPI->getName(), GEPI); 4408 GEPI->replaceAllUsesWith(NC); 4409 GEPI->eraseFromParent(); 4410 ++NumGEPsElim; 4411 OptimizeInst(NC, ModifiedDT); 4412 return true; 4413 } 4414 return false; 4415 } 4416 4417 if (CallInst *CI = dyn_cast<CallInst>(I)) 4418 return OptimizeCallInst(CI, ModifiedDT); 4419 4420 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 4421 return OptimizeSelectInst(SI); 4422 4423 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 4424 return OptimizeShuffleVectorInst(SVI); 4425 4426 if (isa<ExtractElementInst>(I)) 4427 return OptimizeExtractElementInst(I); 4428 4429 return false; 4430 } 4431 4432 // In this pass we look for GEP and cast instructions that are used 4433 // across basic blocks and rewrite them to improve basic-block-at-a-time 4434 // selection. 4435 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 4436 SunkAddrs.clear(); 4437 bool MadeChange = false; 4438 4439 CurInstIterator = BB.begin(); 4440 while (CurInstIterator != BB.end()) { 4441 MadeChange |= OptimizeInst(CurInstIterator++, ModifiedDT); 4442 if (ModifiedDT) 4443 return true; 4444 } 4445 MadeChange |= DupRetToEnableTailCallOpts(&BB); 4446 4447 return MadeChange; 4448 } 4449 4450 // llvm.dbg.value is far away from the value then iSel may not be able 4451 // handle it properly. iSel will drop llvm.dbg.value if it can not 4452 // find a node corresponding to the value. 4453 bool CodeGenPrepare::PlaceDbgValues(Function &F) { 4454 bool MadeChange = false; 4455 for (BasicBlock &BB : F) { 4456 Instruction *PrevNonDbgInst = nullptr; 4457 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 4458 Instruction *Insn = BI++; 4459 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 4460 // Leave dbg.values that refer to an alloca alone. These 4461 // instrinsics describe the address of a variable (= the alloca) 4462 // being taken. They should not be moved next to the alloca 4463 // (and to the beginning of the scope), but rather stay close to 4464 // where said address is used. 4465 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 4466 PrevNonDbgInst = Insn; 4467 continue; 4468 } 4469 4470 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 4471 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 4472 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 4473 DVI->removeFromParent(); 4474 if (isa<PHINode>(VI)) 4475 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 4476 else 4477 DVI->insertAfter(VI); 4478 MadeChange = true; 4479 ++NumDbgValueMoved; 4480 } 4481 } 4482 } 4483 return MadeChange; 4484 } 4485 4486 // If there is a sequence that branches based on comparing a single bit 4487 // against zero that can be combined into a single instruction, and the 4488 // target supports folding these into a single instruction, sink the 4489 // mask and compare into the branch uses. Do this before OptimizeBlock -> 4490 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 4491 // searched for. 4492 bool CodeGenPrepare::sinkAndCmp(Function &F) { 4493 if (!EnableAndCmpSinking) 4494 return false; 4495 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 4496 return false; 4497 bool MadeChange = false; 4498 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 4499 BasicBlock *BB = I++; 4500 4501 // Does this BB end with the following? 4502 // %andVal = and %val, #single-bit-set 4503 // %icmpVal = icmp %andResult, 0 4504 // br i1 %cmpVal label %dest1, label %dest2" 4505 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 4506 if (!Brcc || !Brcc->isConditional()) 4507 continue; 4508 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 4509 if (!Cmp || Cmp->getParent() != BB) 4510 continue; 4511 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 4512 if (!Zero || !Zero->isZero()) 4513 continue; 4514 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 4515 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 4516 continue; 4517 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 4518 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 4519 continue; 4520 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 4521 4522 // Push the "and; icmp" for any users that are conditional branches. 4523 // Since there can only be one branch use per BB, we don't need to keep 4524 // track of which BBs we insert into. 4525 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 4526 UI != E; ) { 4527 Use &TheUse = *UI; 4528 // Find brcc use. 4529 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 4530 ++UI; 4531 if (!BrccUser || !BrccUser->isConditional()) 4532 continue; 4533 BasicBlock *UserBB = BrccUser->getParent(); 4534 if (UserBB == BB) continue; 4535 DEBUG(dbgs() << "found Brcc use\n"); 4536 4537 // Sink the "and; icmp" to use. 4538 MadeChange = true; 4539 BinaryOperator *NewAnd = 4540 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 4541 BrccUser); 4542 CmpInst *NewCmp = 4543 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 4544 "", BrccUser); 4545 TheUse = NewCmp; 4546 ++NumAndCmpsMoved; 4547 DEBUG(BrccUser->getParent()->dump()); 4548 } 4549 } 4550 return MadeChange; 4551 } 4552 4553 /// \brief Retrieve the probabilities of a conditional branch. Returns true on 4554 /// success, or returns false if no or invalid metadata was found. 4555 static bool extractBranchMetadata(BranchInst *BI, 4556 uint64_t &ProbTrue, uint64_t &ProbFalse) { 4557 assert(BI->isConditional() && 4558 "Looking for probabilities on unconditional branch?"); 4559 auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof); 4560 if (!ProfileData || ProfileData->getNumOperands() != 3) 4561 return false; 4562 4563 const auto *CITrue = 4564 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)); 4565 const auto *CIFalse = 4566 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2)); 4567 if (!CITrue || !CIFalse) 4568 return false; 4569 4570 ProbTrue = CITrue->getValue().getZExtValue(); 4571 ProbFalse = CIFalse->getValue().getZExtValue(); 4572 4573 return true; 4574 } 4575 4576 /// \brief Scale down both weights to fit into uint32_t. 4577 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 4578 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 4579 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 4580 NewTrue = NewTrue / Scale; 4581 NewFalse = NewFalse / Scale; 4582 } 4583 4584 /// \brief Some targets prefer to split a conditional branch like: 4585 /// \code 4586 /// %0 = icmp ne i32 %a, 0 4587 /// %1 = icmp ne i32 %b, 0 4588 /// %or.cond = or i1 %0, %1 4589 /// br i1 %or.cond, label %TrueBB, label %FalseBB 4590 /// \endcode 4591 /// into multiple branch instructions like: 4592 /// \code 4593 /// bb1: 4594 /// %0 = icmp ne i32 %a, 0 4595 /// br i1 %0, label %TrueBB, label %bb2 4596 /// bb2: 4597 /// %1 = icmp ne i32 %b, 0 4598 /// br i1 %1, label %TrueBB, label %FalseBB 4599 /// \endcode 4600 /// This usually allows instruction selection to do even further optimizations 4601 /// and combine the compare with the branch instruction. Currently this is 4602 /// applied for targets which have "cheap" jump instructions. 4603 /// 4604 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 4605 /// 4606 bool CodeGenPrepare::splitBranchCondition(Function &F) { 4607 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 4608 return false; 4609 4610 bool MadeChange = false; 4611 for (auto &BB : F) { 4612 // Does this BB end with the following? 4613 // %cond1 = icmp|fcmp|binary instruction ... 4614 // %cond2 = icmp|fcmp|binary instruction ... 4615 // %cond.or = or|and i1 %cond1, cond2 4616 // br i1 %cond.or label %dest1, label %dest2" 4617 BinaryOperator *LogicOp; 4618 BasicBlock *TBB, *FBB; 4619 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 4620 continue; 4621 4622 unsigned Opc; 4623 Value *Cond1, *Cond2; 4624 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 4625 m_OneUse(m_Value(Cond2))))) 4626 Opc = Instruction::And; 4627 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 4628 m_OneUse(m_Value(Cond2))))) 4629 Opc = Instruction::Or; 4630 else 4631 continue; 4632 4633 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 4634 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 4635 continue; 4636 4637 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 4638 4639 // Create a new BB. 4640 auto *InsertBefore = std::next(Function::iterator(BB)) 4641 .getNodePtrUnchecked(); 4642 auto TmpBB = BasicBlock::Create(BB.getContext(), 4643 BB.getName() + ".cond.split", 4644 BB.getParent(), InsertBefore); 4645 4646 // Update original basic block by using the first condition directly by the 4647 // branch instruction and removing the no longer needed and/or instruction. 4648 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 4649 Br1->setCondition(Cond1); 4650 LogicOp->eraseFromParent(); 4651 4652 // Depending on the conditon we have to either replace the true or the false 4653 // successor of the original branch instruction. 4654 if (Opc == Instruction::And) 4655 Br1->setSuccessor(0, TmpBB); 4656 else 4657 Br1->setSuccessor(1, TmpBB); 4658 4659 // Fill in the new basic block. 4660 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 4661 if (auto *I = dyn_cast<Instruction>(Cond2)) { 4662 I->removeFromParent(); 4663 I->insertBefore(Br2); 4664 } 4665 4666 // Update PHI nodes in both successors. The original BB needs to be 4667 // replaced in one succesor's PHI nodes, because the branch comes now from 4668 // the newly generated BB (NewBB). In the other successor we need to add one 4669 // incoming edge to the PHI nodes, because both branch instructions target 4670 // now the same successor. Depending on the original branch condition 4671 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 4672 // we perfrom the correct update for the PHI nodes. 4673 // This doesn't change the successor order of the just created branch 4674 // instruction (or any other instruction). 4675 if (Opc == Instruction::Or) 4676 std::swap(TBB, FBB); 4677 4678 // Replace the old BB with the new BB. 4679 for (auto &I : *TBB) { 4680 PHINode *PN = dyn_cast<PHINode>(&I); 4681 if (!PN) 4682 break; 4683 int i; 4684 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 4685 PN->setIncomingBlock(i, TmpBB); 4686 } 4687 4688 // Add another incoming edge form the new BB. 4689 for (auto &I : *FBB) { 4690 PHINode *PN = dyn_cast<PHINode>(&I); 4691 if (!PN) 4692 break; 4693 auto *Val = PN->getIncomingValueForBlock(&BB); 4694 PN->addIncoming(Val, TmpBB); 4695 } 4696 4697 // Update the branch weights (from SelectionDAGBuilder:: 4698 // FindMergedConditions). 4699 if (Opc == Instruction::Or) { 4700 // Codegen X | Y as: 4701 // BB1: 4702 // jmp_if_X TBB 4703 // jmp TmpBB 4704 // TmpBB: 4705 // jmp_if_Y TBB 4706 // jmp FBB 4707 // 4708 4709 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 4710 // The requirement is that 4711 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 4712 // = TrueProb for orignal BB. 4713 // Assuming the orignal weights are A and B, one choice is to set BB1's 4714 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 4715 // assumes that 4716 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 4717 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 4718 // TmpBB, but the math is more complicated. 4719 uint64_t TrueWeight, FalseWeight; 4720 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 4721 uint64_t NewTrueWeight = TrueWeight; 4722 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 4723 scaleWeights(NewTrueWeight, NewFalseWeight); 4724 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 4725 .createBranchWeights(TrueWeight, FalseWeight)); 4726 4727 NewTrueWeight = TrueWeight; 4728 NewFalseWeight = 2 * FalseWeight; 4729 scaleWeights(NewTrueWeight, NewFalseWeight); 4730 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 4731 .createBranchWeights(TrueWeight, FalseWeight)); 4732 } 4733 } else { 4734 // Codegen X & Y as: 4735 // BB1: 4736 // jmp_if_X TmpBB 4737 // jmp FBB 4738 // TmpBB: 4739 // jmp_if_Y TBB 4740 // jmp FBB 4741 // 4742 // This requires creation of TmpBB after CurBB. 4743 4744 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 4745 // The requirement is that 4746 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 4747 // = FalseProb for orignal BB. 4748 // Assuming the orignal weights are A and B, one choice is to set BB1's 4749 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 4750 // assumes that 4751 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 4752 uint64_t TrueWeight, FalseWeight; 4753 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 4754 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 4755 uint64_t NewFalseWeight = FalseWeight; 4756 scaleWeights(NewTrueWeight, NewFalseWeight); 4757 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 4758 .createBranchWeights(TrueWeight, FalseWeight)); 4759 4760 NewTrueWeight = 2 * TrueWeight; 4761 NewFalseWeight = FalseWeight; 4762 scaleWeights(NewTrueWeight, NewFalseWeight); 4763 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 4764 .createBranchWeights(TrueWeight, FalseWeight)); 4765 } 4766 } 4767 4768 // Note: No point in getting fancy here, since the DT info is never 4769 // available to CodeGenPrepare. 4770 ModifiedDT = true; 4771 4772 MadeChange = true; 4773 4774 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 4775 TmpBB->dump()); 4776 } 4777 return MadeChange; 4778 } 4779