1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/GetElementPtrTypeIterator.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InlineAsm.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/PatternMatch.h" 33 #include "llvm/IR/ValueHandle.h" 34 #include "llvm/IR/ValueMap.h" 35 #include "llvm/Pass.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetLibraryInfo.h" 40 #include "llvm/Target/TargetLowering.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/BuildLibCalls.h" 44 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 using namespace llvm; 47 using namespace llvm::PatternMatch; 48 49 #define DEBUG_TYPE "codegenprepare" 50 51 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 52 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 53 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 54 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 55 "sunken Cmps"); 56 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 57 "of sunken Casts"); 58 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 59 "computations were sunk"); 60 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 61 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 62 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 63 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 64 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 65 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 66 67 static cl::opt<bool> DisableBranchOpts( 68 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 69 cl::desc("Disable branch optimizations in CodeGenPrepare")); 70 71 static cl::opt<bool> DisableSelectToBranch( 72 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 73 cl::desc("Disable select to branch conversion.")); 74 75 static cl::opt<bool> AddrSinkUsingGEPs( 76 "addr-sink-using-gep", cl::Hidden, cl::init(false), 77 cl::desc("Address sinking in CGP using GEPs.")); 78 79 static cl::opt<bool> EnableAndCmpSinking( 80 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 81 cl::desc("Enable sinkinig and/cmp into branches.")); 82 83 namespace { 84 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 85 typedef DenseMap<Instruction *, Type *> InstrToOrigTy; 86 87 class CodeGenPrepare : public FunctionPass { 88 /// TLI - Keep a pointer of a TargetLowering to consult for determining 89 /// transformation profitability. 90 const TargetMachine *TM; 91 const TargetLowering *TLI; 92 const TargetLibraryInfo *TLInfo; 93 DominatorTree *DT; 94 95 /// CurInstIterator - As we scan instructions optimizing them, this is the 96 /// next instruction to optimize. Xforms that can invalidate this should 97 /// update it. 98 BasicBlock::iterator CurInstIterator; 99 100 /// Keeps track of non-local addresses that have been sunk into a block. 101 /// This allows us to avoid inserting duplicate code for blocks with 102 /// multiple load/stores of the same address. 103 ValueMap<Value*, Value*> SunkAddrs; 104 105 /// Keeps track of all truncates inserted for the current function. 106 SetOfInstrs InsertedTruncsSet; 107 /// Keeps track of the type of the related instruction before their 108 /// promotion for the current function. 109 InstrToOrigTy PromotedInsts; 110 111 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to 112 /// be updated. 113 bool ModifiedDT; 114 115 /// OptSize - True if optimizing for size. 116 bool OptSize; 117 118 public: 119 static char ID; // Pass identification, replacement for typeid 120 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 121 : FunctionPass(ID), TM(TM), TLI(nullptr) { 122 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 123 } 124 bool runOnFunction(Function &F) override; 125 126 const char *getPassName() const override { return "CodeGen Prepare"; } 127 128 void getAnalysisUsage(AnalysisUsage &AU) const override { 129 AU.addPreserved<DominatorTreeWrapperPass>(); 130 AU.addRequired<TargetLibraryInfo>(); 131 } 132 133 private: 134 bool EliminateFallThrough(Function &F); 135 bool EliminateMostlyEmptyBlocks(Function &F); 136 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 137 void EliminateMostlyEmptyBlock(BasicBlock *BB); 138 bool OptimizeBlock(BasicBlock &BB); 139 bool OptimizeInst(Instruction *I); 140 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 141 bool OptimizeInlineAsmInst(CallInst *CS); 142 bool OptimizeCallInst(CallInst *CI); 143 bool MoveExtToFormExtLoad(Instruction *I); 144 bool OptimizeExtUses(Instruction *I); 145 bool OptimizeSelectInst(SelectInst *SI); 146 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); 147 bool DupRetToEnableTailCallOpts(BasicBlock *BB); 148 bool PlaceDbgValues(Function &F); 149 bool sinkAndCmp(Function &F); 150 }; 151 } 152 153 char CodeGenPrepare::ID = 0; 154 static void *initializeCodeGenPreparePassOnce(PassRegistry &Registry) { 155 initializeTargetLibraryInfoPass(Registry); 156 PassInfo *PI = new PassInfo( 157 "Optimize for code generation", "codegenprepare", &CodeGenPrepare::ID, 158 PassInfo::NormalCtor_t(callDefaultCtor<CodeGenPrepare>), false, false, 159 PassInfo::TargetMachineCtor_t(callTargetMachineCtor<CodeGenPrepare>)); 160 Registry.registerPass(*PI, true); 161 return PI; 162 } 163 164 void llvm::initializeCodeGenPreparePass(PassRegistry &Registry) { 165 CALL_ONCE_INITIALIZATION(initializeCodeGenPreparePassOnce) 166 } 167 168 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 169 return new CodeGenPrepare(TM); 170 } 171 172 bool CodeGenPrepare::runOnFunction(Function &F) { 173 if (skipOptnoneFunction(F)) 174 return false; 175 176 bool EverMadeChange = false; 177 // Clear per function information. 178 InsertedTruncsSet.clear(); 179 PromotedInsts.clear(); 180 181 ModifiedDT = false; 182 if (TM) TLI = TM->getTargetLowering(); 183 TLInfo = &getAnalysis<TargetLibraryInfo>(); 184 DominatorTreeWrapperPass *DTWP = 185 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 186 DT = DTWP ? &DTWP->getDomTree() : nullptr; 187 OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 188 Attribute::OptimizeForSize); 189 190 /// This optimization identifies DIV instructions that can be 191 /// profitably bypassed and carried out with a shorter, faster divide. 192 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 193 const DenseMap<unsigned int, unsigned int> &BypassWidths = 194 TLI->getBypassSlowDivWidths(); 195 for (Function::iterator I = F.begin(); I != F.end(); I++) 196 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 197 } 198 199 // Eliminate blocks that contain only PHI nodes and an 200 // unconditional branch. 201 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 202 203 // llvm.dbg.value is far away from the value then iSel may not be able 204 // handle it properly. iSel will drop llvm.dbg.value if it can not 205 // find a node corresponding to the value. 206 EverMadeChange |= PlaceDbgValues(F); 207 208 // If there is a mask, compare against zero, and branch that can be combined 209 // into a single target instruction, push the mask and compare into branch 210 // users. Do this before OptimizeBlock -> OptimizeInst -> 211 // OptimizeCmpExpression, which perturbs the pattern being searched for. 212 if (!DisableBranchOpts) 213 EverMadeChange |= sinkAndCmp(F); 214 215 bool MadeChange = true; 216 while (MadeChange) { 217 MadeChange = false; 218 for (Function::iterator I = F.begin(); I != F.end(); ) { 219 BasicBlock *BB = I++; 220 MadeChange |= OptimizeBlock(*BB); 221 } 222 EverMadeChange |= MadeChange; 223 } 224 225 SunkAddrs.clear(); 226 227 if (!DisableBranchOpts) { 228 MadeChange = false; 229 SmallPtrSet<BasicBlock*, 8> WorkList; 230 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 231 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 232 MadeChange |= ConstantFoldTerminator(BB, true); 233 if (!MadeChange) continue; 234 235 for (SmallVectorImpl<BasicBlock*>::iterator 236 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 237 if (pred_begin(*II) == pred_end(*II)) 238 WorkList.insert(*II); 239 } 240 241 // Delete the dead blocks and any of their dead successors. 242 MadeChange |= !WorkList.empty(); 243 while (!WorkList.empty()) { 244 BasicBlock *BB = *WorkList.begin(); 245 WorkList.erase(BB); 246 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 247 248 DeleteDeadBlock(BB); 249 250 for (SmallVectorImpl<BasicBlock*>::iterator 251 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 252 if (pred_begin(*II) == pred_end(*II)) 253 WorkList.insert(*II); 254 } 255 256 // Merge pairs of basic blocks with unconditional branches, connected by 257 // a single edge. 258 if (EverMadeChange || MadeChange) 259 MadeChange |= EliminateFallThrough(F); 260 261 if (MadeChange) 262 ModifiedDT = true; 263 EverMadeChange |= MadeChange; 264 } 265 266 if (ModifiedDT && DT) 267 DT->recalculate(F); 268 269 return EverMadeChange; 270 } 271 272 /// EliminateFallThrough - Merge basic blocks which are connected 273 /// by a single edge, where one of the basic blocks has a single successor 274 /// pointing to the other basic block, which has a single predecessor. 275 bool CodeGenPrepare::EliminateFallThrough(Function &F) { 276 bool Changed = false; 277 // Scan all of the blocks in the function, except for the entry block. 278 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 279 BasicBlock *BB = I++; 280 // If the destination block has a single pred, then this is a trivial 281 // edge, just collapse it. 282 BasicBlock *SinglePred = BB->getSinglePredecessor(); 283 284 // Don't merge if BB's address is taken. 285 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 286 287 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 288 if (Term && !Term->isConditional()) { 289 Changed = true; 290 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 291 // Remember if SinglePred was the entry block of the function. 292 // If so, we will need to move BB back to the entry position. 293 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 294 MergeBasicBlockIntoOnlyPred(BB, this); 295 296 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 297 BB->moveBefore(&BB->getParent()->getEntryBlock()); 298 299 // We have erased a block. Update the iterator. 300 I = BB; 301 } 302 } 303 return Changed; 304 } 305 306 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 307 /// debug info directives, and an unconditional branch. Passes before isel 308 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 309 /// isel. Start by eliminating these blocks so we can split them the way we 310 /// want them. 311 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 312 bool MadeChange = false; 313 // Note that this intentionally skips the entry block. 314 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 315 BasicBlock *BB = I++; 316 317 // If this block doesn't end with an uncond branch, ignore it. 318 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 319 if (!BI || !BI->isUnconditional()) 320 continue; 321 322 // If the instruction before the branch (skipping debug info) isn't a phi 323 // node, then other stuff is happening here. 324 BasicBlock::iterator BBI = BI; 325 if (BBI != BB->begin()) { 326 --BBI; 327 while (isa<DbgInfoIntrinsic>(BBI)) { 328 if (BBI == BB->begin()) 329 break; 330 --BBI; 331 } 332 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 333 continue; 334 } 335 336 // Do not break infinite loops. 337 BasicBlock *DestBB = BI->getSuccessor(0); 338 if (DestBB == BB) 339 continue; 340 341 if (!CanMergeBlocks(BB, DestBB)) 342 continue; 343 344 EliminateMostlyEmptyBlock(BB); 345 MadeChange = true; 346 } 347 return MadeChange; 348 } 349 350 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 351 /// single uncond branch between them, and BB contains no other non-phi 352 /// instructions. 353 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 354 const BasicBlock *DestBB) const { 355 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 356 // the successor. If there are more complex condition (e.g. preheaders), 357 // don't mess around with them. 358 BasicBlock::const_iterator BBI = BB->begin(); 359 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 360 for (const User *U : PN->users()) { 361 const Instruction *UI = cast<Instruction>(U); 362 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 363 return false; 364 // If User is inside DestBB block and it is a PHINode then check 365 // incoming value. If incoming value is not from BB then this is 366 // a complex condition (e.g. preheaders) we want to avoid here. 367 if (UI->getParent() == DestBB) { 368 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 369 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 370 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 371 if (Insn && Insn->getParent() == BB && 372 Insn->getParent() != UPN->getIncomingBlock(I)) 373 return false; 374 } 375 } 376 } 377 } 378 379 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 380 // and DestBB may have conflicting incoming values for the block. If so, we 381 // can't merge the block. 382 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 383 if (!DestBBPN) return true; // no conflict. 384 385 // Collect the preds of BB. 386 SmallPtrSet<const BasicBlock*, 16> BBPreds; 387 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 388 // It is faster to get preds from a PHI than with pred_iterator. 389 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 390 BBPreds.insert(BBPN->getIncomingBlock(i)); 391 } else { 392 BBPreds.insert(pred_begin(BB), pred_end(BB)); 393 } 394 395 // Walk the preds of DestBB. 396 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 397 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 398 if (BBPreds.count(Pred)) { // Common predecessor? 399 BBI = DestBB->begin(); 400 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 401 const Value *V1 = PN->getIncomingValueForBlock(Pred); 402 const Value *V2 = PN->getIncomingValueForBlock(BB); 403 404 // If V2 is a phi node in BB, look up what the mapped value will be. 405 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 406 if (V2PN->getParent() == BB) 407 V2 = V2PN->getIncomingValueForBlock(Pred); 408 409 // If there is a conflict, bail out. 410 if (V1 != V2) return false; 411 } 412 } 413 } 414 415 return true; 416 } 417 418 419 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 420 /// an unconditional branch in it. 421 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 422 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 423 BasicBlock *DestBB = BI->getSuccessor(0); 424 425 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 426 427 // If the destination block has a single pred, then this is a trivial edge, 428 // just collapse it. 429 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 430 if (SinglePred != DestBB) { 431 // Remember if SinglePred was the entry block of the function. If so, we 432 // will need to move BB back to the entry position. 433 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 434 MergeBasicBlockIntoOnlyPred(DestBB, this); 435 436 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 437 BB->moveBefore(&BB->getParent()->getEntryBlock()); 438 439 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 440 return; 441 } 442 } 443 444 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 445 // to handle the new incoming edges it is about to have. 446 PHINode *PN; 447 for (BasicBlock::iterator BBI = DestBB->begin(); 448 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 449 // Remove the incoming value for BB, and remember it. 450 Value *InVal = PN->removeIncomingValue(BB, false); 451 452 // Two options: either the InVal is a phi node defined in BB or it is some 453 // value that dominates BB. 454 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 455 if (InValPhi && InValPhi->getParent() == BB) { 456 // Add all of the input values of the input PHI as inputs of this phi. 457 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 458 PN->addIncoming(InValPhi->getIncomingValue(i), 459 InValPhi->getIncomingBlock(i)); 460 } else { 461 // Otherwise, add one instance of the dominating value for each edge that 462 // we will be adding. 463 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 464 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 465 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 466 } else { 467 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 468 PN->addIncoming(InVal, *PI); 469 } 470 } 471 } 472 473 // The PHIs are now updated, change everything that refers to BB to use 474 // DestBB and remove BB. 475 BB->replaceAllUsesWith(DestBB); 476 if (DT && !ModifiedDT) { 477 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 478 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 479 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 480 DT->changeImmediateDominator(DestBB, NewIDom); 481 DT->eraseNode(BB); 482 } 483 BB->eraseFromParent(); 484 ++NumBlocksElim; 485 486 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 487 } 488 489 /// SinkCast - Sink the specified cast instruction into its user blocks 490 static bool SinkCast(CastInst *CI) { 491 BasicBlock *DefBB = CI->getParent(); 492 493 /// InsertedCasts - Only insert a cast in each block once. 494 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 495 496 bool MadeChange = false; 497 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 498 UI != E; ) { 499 Use &TheUse = UI.getUse(); 500 Instruction *User = cast<Instruction>(*UI); 501 502 // Figure out which BB this cast is used in. For PHI's this is the 503 // appropriate predecessor block. 504 BasicBlock *UserBB = User->getParent(); 505 if (PHINode *PN = dyn_cast<PHINode>(User)) { 506 UserBB = PN->getIncomingBlock(TheUse); 507 } 508 509 // Preincrement use iterator so we don't invalidate it. 510 ++UI; 511 512 // If this user is in the same block as the cast, don't change the cast. 513 if (UserBB == DefBB) continue; 514 515 // If we have already inserted a cast into this block, use it. 516 CastInst *&InsertedCast = InsertedCasts[UserBB]; 517 518 if (!InsertedCast) { 519 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 520 InsertedCast = 521 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 522 InsertPt); 523 MadeChange = true; 524 } 525 526 // Replace a use of the cast with a use of the new cast. 527 TheUse = InsertedCast; 528 ++NumCastUses; 529 } 530 531 // If we removed all uses, nuke the cast. 532 if (CI->use_empty()) { 533 CI->eraseFromParent(); 534 MadeChange = true; 535 } 536 537 return MadeChange; 538 } 539 540 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 541 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 542 /// sink it into user blocks to reduce the number of virtual 543 /// registers that must be created and coalesced. 544 /// 545 /// Return true if any changes are made. 546 /// 547 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 548 // If this is a noop copy, 549 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 550 EVT DstVT = TLI.getValueType(CI->getType()); 551 552 // This is an fp<->int conversion? 553 if (SrcVT.isInteger() != DstVT.isInteger()) 554 return false; 555 556 // If this is an extension, it will be a zero or sign extension, which 557 // isn't a noop. 558 if (SrcVT.bitsLT(DstVT)) return false; 559 560 // If these values will be promoted, find out what they will be promoted 561 // to. This helps us consider truncates on PPC as noop copies when they 562 // are. 563 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 564 TargetLowering::TypePromoteInteger) 565 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 566 if (TLI.getTypeAction(CI->getContext(), DstVT) == 567 TargetLowering::TypePromoteInteger) 568 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 569 570 // If, after promotion, these are the same types, this is a noop copy. 571 if (SrcVT != DstVT) 572 return false; 573 574 return SinkCast(CI); 575 } 576 577 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 578 /// the number of virtual registers that must be created and coalesced. This is 579 /// a clear win except on targets with multiple condition code registers 580 /// (PowerPC), where it might lose; some adjustment may be wanted there. 581 /// 582 /// Return true if any changes are made. 583 static bool OptimizeCmpExpression(CmpInst *CI) { 584 BasicBlock *DefBB = CI->getParent(); 585 586 /// InsertedCmp - Only insert a cmp in each block once. 587 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 588 589 bool MadeChange = false; 590 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 591 UI != E; ) { 592 Use &TheUse = UI.getUse(); 593 Instruction *User = cast<Instruction>(*UI); 594 595 // Preincrement use iterator so we don't invalidate it. 596 ++UI; 597 598 // Don't bother for PHI nodes. 599 if (isa<PHINode>(User)) 600 continue; 601 602 // Figure out which BB this cmp is used in. 603 BasicBlock *UserBB = User->getParent(); 604 605 // If this user is in the same block as the cmp, don't change the cmp. 606 if (UserBB == DefBB) continue; 607 608 // If we have already inserted a cmp into this block, use it. 609 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 610 611 if (!InsertedCmp) { 612 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 613 InsertedCmp = 614 CmpInst::Create(CI->getOpcode(), 615 CI->getPredicate(), CI->getOperand(0), 616 CI->getOperand(1), "", InsertPt); 617 MadeChange = true; 618 } 619 620 // Replace a use of the cmp with a use of the new cmp. 621 TheUse = InsertedCmp; 622 ++NumCmpUses; 623 } 624 625 // If we removed all uses, nuke the cmp. 626 if (CI->use_empty()) 627 CI->eraseFromParent(); 628 629 return MadeChange; 630 } 631 632 /// isExtractBitsCandidateUse - Check if the candidates could 633 /// be combined with shift instruction, which includes: 634 /// 1. Truncate instruction 635 /// 2. And instruction and the imm is a mask of the low bits: 636 /// imm & (imm+1) == 0 637 static bool isExtractBitsCandidateUse(Instruction *User) { 638 if (!isa<TruncInst>(User)) { 639 if (User->getOpcode() != Instruction::And || 640 !isa<ConstantInt>(User->getOperand(1))) 641 return false; 642 643 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 644 645 if ((Cimm & (Cimm + 1)).getBoolValue()) 646 return false; 647 } 648 return true; 649 } 650 651 /// SinkShiftAndTruncate - sink both shift and truncate instruction 652 /// to the use of truncate's BB. 653 static bool 654 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 655 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 656 const TargetLowering &TLI) { 657 BasicBlock *UserBB = User->getParent(); 658 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 659 TruncInst *TruncI = dyn_cast<TruncInst>(User); 660 bool MadeChange = false; 661 662 for (Value::user_iterator TruncUI = TruncI->user_begin(), 663 TruncE = TruncI->user_end(); 664 TruncUI != TruncE;) { 665 666 Use &TruncTheUse = TruncUI.getUse(); 667 Instruction *TruncUser = cast<Instruction>(*TruncUI); 668 // Preincrement use iterator so we don't invalidate it. 669 670 ++TruncUI; 671 672 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 673 if (!ISDOpcode) 674 continue; 675 676 // If the use is actually a legal node, there will not be an implicit 677 // truncate. 678 if (TLI.isOperationLegalOrCustom(ISDOpcode, 679 EVT::getEVT(TruncUser->getType()))) 680 continue; 681 682 // Don't bother for PHI nodes. 683 if (isa<PHINode>(TruncUser)) 684 continue; 685 686 BasicBlock *TruncUserBB = TruncUser->getParent(); 687 688 if (UserBB == TruncUserBB) 689 continue; 690 691 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 692 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 693 694 if (!InsertedShift && !InsertedTrunc) { 695 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 696 // Sink the shift 697 if (ShiftI->getOpcode() == Instruction::AShr) 698 InsertedShift = 699 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 700 else 701 InsertedShift = 702 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 703 704 // Sink the trunc 705 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 706 TruncInsertPt++; 707 708 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 709 TruncI->getType(), "", TruncInsertPt); 710 711 MadeChange = true; 712 713 TruncTheUse = InsertedTrunc; 714 } 715 } 716 return MadeChange; 717 } 718 719 /// OptimizeExtractBits - sink the shift *right* instruction into user blocks if 720 /// the uses could potentially be combined with this shift instruction and 721 /// generate BitExtract instruction. It will only be applied if the architecture 722 /// supports BitExtract instruction. Here is an example: 723 /// BB1: 724 /// %x.extract.shift = lshr i64 %arg1, 32 725 /// BB2: 726 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 727 /// ==> 728 /// 729 /// BB2: 730 /// %x.extract.shift.1 = lshr i64 %arg1, 32 731 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 732 /// 733 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 734 /// instruction. 735 /// Return true if any changes are made. 736 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 737 const TargetLowering &TLI) { 738 BasicBlock *DefBB = ShiftI->getParent(); 739 740 /// Only insert instructions in each block once. 741 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 742 743 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType())); 744 745 bool MadeChange = false; 746 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 747 UI != E;) { 748 Use &TheUse = UI.getUse(); 749 Instruction *User = cast<Instruction>(*UI); 750 // Preincrement use iterator so we don't invalidate it. 751 ++UI; 752 753 // Don't bother for PHI nodes. 754 if (isa<PHINode>(User)) 755 continue; 756 757 if (!isExtractBitsCandidateUse(User)) 758 continue; 759 760 BasicBlock *UserBB = User->getParent(); 761 762 if (UserBB == DefBB) { 763 // If the shift and truncate instruction are in the same BB. The use of 764 // the truncate(TruncUse) may still introduce another truncate if not 765 // legal. In this case, we would like to sink both shift and truncate 766 // instruction to the BB of TruncUse. 767 // for example: 768 // BB1: 769 // i64 shift.result = lshr i64 opnd, imm 770 // trunc.result = trunc shift.result to i16 771 // 772 // BB2: 773 // ----> We will have an implicit truncate here if the architecture does 774 // not have i16 compare. 775 // cmp i16 trunc.result, opnd2 776 // 777 if (isa<TruncInst>(User) && shiftIsLegal 778 // If the type of the truncate is legal, no trucate will be 779 // introduced in other basic blocks. 780 && (!TLI.isTypeLegal(TLI.getValueType(User->getType())))) 781 MadeChange = 782 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI); 783 784 continue; 785 } 786 // If we have already inserted a shift into this block, use it. 787 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 788 789 if (!InsertedShift) { 790 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 791 792 if (ShiftI->getOpcode() == Instruction::AShr) 793 InsertedShift = 794 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 795 else 796 InsertedShift = 797 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 798 799 MadeChange = true; 800 } 801 802 // Replace a use of the shift with a use of the new shift. 803 TheUse = InsertedShift; 804 } 805 806 // If we removed all uses, nuke the shift. 807 if (ShiftI->use_empty()) 808 ShiftI->eraseFromParent(); 809 810 return MadeChange; 811 } 812 813 namespace { 814 class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 815 protected: 816 void replaceCall(Value *With) override { 817 CI->replaceAllUsesWith(With); 818 CI->eraseFromParent(); 819 } 820 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override { 821 if (ConstantInt *SizeCI = 822 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 823 return SizeCI->isAllOnesValue(); 824 return false; 825 } 826 }; 827 } // end anonymous namespace 828 829 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 830 BasicBlock *BB = CI->getParent(); 831 832 // Lower inline assembly if we can. 833 // If we found an inline asm expession, and if the target knows how to 834 // lower it to normal LLVM code, do so now. 835 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 836 if (TLI->ExpandInlineAsm(CI)) { 837 // Avoid invalidating the iterator. 838 CurInstIterator = BB->begin(); 839 // Avoid processing instructions out of order, which could cause 840 // reuse before a value is defined. 841 SunkAddrs.clear(); 842 return true; 843 } 844 // Sink address computing for memory operands into the block. 845 if (OptimizeInlineAsmInst(CI)) 846 return true; 847 } 848 849 // Lower all uses of llvm.objectsize.* 850 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 851 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 852 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 853 Type *ReturnTy = CI->getType(); 854 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 855 856 // Substituting this can cause recursive simplifications, which can 857 // invalidate our iterator. Use a WeakVH to hold onto it in case this 858 // happens. 859 WeakVH IterHandle(CurInstIterator); 860 861 replaceAndRecursivelySimplify(CI, RetVal, 862 TLI ? TLI->getDataLayout() : nullptr, 863 TLInfo, ModifiedDT ? nullptr : DT); 864 865 // If the iterator instruction was recursively deleted, start over at the 866 // start of the block. 867 if (IterHandle != CurInstIterator) { 868 CurInstIterator = BB->begin(); 869 SunkAddrs.clear(); 870 } 871 return true; 872 } 873 874 if (II && TLI) { 875 SmallVector<Value*, 2> PtrOps; 876 Type *AccessTy; 877 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) 878 while (!PtrOps.empty()) 879 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) 880 return true; 881 } 882 883 // From here on out we're working with named functions. 884 if (!CI->getCalledFunction()) return false; 885 886 // We'll need DataLayout from here on out. 887 const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; 888 if (!TD) return false; 889 890 // Lower all default uses of _chk calls. This is very similar 891 // to what InstCombineCalls does, but here we are only lowering calls 892 // that have the default "don't know" as the objectsize. Anything else 893 // should be left alone. 894 CodeGenPrepareFortifiedLibCalls Simplifier; 895 return Simplifier.fold(CI, TD, TLInfo); 896 } 897 898 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 899 /// instructions to the predecessor to enable tail call optimizations. The 900 /// case it is currently looking for is: 901 /// @code 902 /// bb0: 903 /// %tmp0 = tail call i32 @f0() 904 /// br label %return 905 /// bb1: 906 /// %tmp1 = tail call i32 @f1() 907 /// br label %return 908 /// bb2: 909 /// %tmp2 = tail call i32 @f2() 910 /// br label %return 911 /// return: 912 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 913 /// ret i32 %retval 914 /// @endcode 915 /// 916 /// => 917 /// 918 /// @code 919 /// bb0: 920 /// %tmp0 = tail call i32 @f0() 921 /// ret i32 %tmp0 922 /// bb1: 923 /// %tmp1 = tail call i32 @f1() 924 /// ret i32 %tmp1 925 /// bb2: 926 /// %tmp2 = tail call i32 @f2() 927 /// ret i32 %tmp2 928 /// @endcode 929 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { 930 if (!TLI) 931 return false; 932 933 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 934 if (!RI) 935 return false; 936 937 PHINode *PN = nullptr; 938 BitCastInst *BCI = nullptr; 939 Value *V = RI->getReturnValue(); 940 if (V) { 941 BCI = dyn_cast<BitCastInst>(V); 942 if (BCI) 943 V = BCI->getOperand(0); 944 945 PN = dyn_cast<PHINode>(V); 946 if (!PN) 947 return false; 948 } 949 950 if (PN && PN->getParent() != BB) 951 return false; 952 953 // It's not safe to eliminate the sign / zero extension of the return value. 954 // See llvm::isInTailCallPosition(). 955 const Function *F = BB->getParent(); 956 AttributeSet CallerAttrs = F->getAttributes(); 957 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 958 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 959 return false; 960 961 // Make sure there are no instructions between the PHI and return, or that the 962 // return is the first instruction in the block. 963 if (PN) { 964 BasicBlock::iterator BI = BB->begin(); 965 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 966 if (&*BI == BCI) 967 // Also skip over the bitcast. 968 ++BI; 969 if (&*BI != RI) 970 return false; 971 } else { 972 BasicBlock::iterator BI = BB->begin(); 973 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 974 if (&*BI != RI) 975 return false; 976 } 977 978 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 979 /// call. 980 SmallVector<CallInst*, 4> TailCalls; 981 if (PN) { 982 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 983 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 984 // Make sure the phi value is indeed produced by the tail call. 985 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 986 TLI->mayBeEmittedAsTailCall(CI)) 987 TailCalls.push_back(CI); 988 } 989 } else { 990 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 991 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 992 if (!VisitedBBs.insert(*PI)) 993 continue; 994 995 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 996 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 997 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 998 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 999 if (RI == RE) 1000 continue; 1001 1002 CallInst *CI = dyn_cast<CallInst>(&*RI); 1003 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1004 TailCalls.push_back(CI); 1005 } 1006 } 1007 1008 bool Changed = false; 1009 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1010 CallInst *CI = TailCalls[i]; 1011 CallSite CS(CI); 1012 1013 // Conservatively require the attributes of the call to match those of the 1014 // return. Ignore noalias because it doesn't affect the call sequence. 1015 AttributeSet CalleeAttrs = CS.getAttributes(); 1016 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1017 removeAttribute(Attribute::NoAlias) != 1018 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1019 removeAttribute(Attribute::NoAlias)) 1020 continue; 1021 1022 // Make sure the call instruction is followed by an unconditional branch to 1023 // the return block. 1024 BasicBlock *CallBB = CI->getParent(); 1025 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1026 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1027 continue; 1028 1029 // Duplicate the return into CallBB. 1030 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 1031 ModifiedDT = Changed = true; 1032 ++NumRetsDup; 1033 } 1034 1035 // If we eliminated all predecessors of the block, delete the block now. 1036 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 1037 BB->eraseFromParent(); 1038 1039 return Changed; 1040 } 1041 1042 //===----------------------------------------------------------------------===// 1043 // Memory Optimization 1044 //===----------------------------------------------------------------------===// 1045 1046 namespace { 1047 1048 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode 1049 /// which holds actual Value*'s for register values. 1050 struct ExtAddrMode : public TargetLowering::AddrMode { 1051 Value *BaseReg; 1052 Value *ScaledReg; 1053 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 1054 void print(raw_ostream &OS) const; 1055 void dump() const; 1056 1057 bool operator==(const ExtAddrMode& O) const { 1058 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 1059 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 1060 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 1061 } 1062 }; 1063 1064 #ifndef NDEBUG 1065 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 1066 AM.print(OS); 1067 return OS; 1068 } 1069 #endif 1070 1071 void ExtAddrMode::print(raw_ostream &OS) const { 1072 bool NeedPlus = false; 1073 OS << "["; 1074 if (BaseGV) { 1075 OS << (NeedPlus ? " + " : "") 1076 << "GV:"; 1077 BaseGV->printAsOperand(OS, /*PrintType=*/false); 1078 NeedPlus = true; 1079 } 1080 1081 if (BaseOffs) { 1082 OS << (NeedPlus ? " + " : "") 1083 << BaseOffs; 1084 NeedPlus = true; 1085 } 1086 1087 if (BaseReg) { 1088 OS << (NeedPlus ? " + " : "") 1089 << "Base:"; 1090 BaseReg->printAsOperand(OS, /*PrintType=*/false); 1091 NeedPlus = true; 1092 } 1093 if (Scale) { 1094 OS << (NeedPlus ? " + " : "") 1095 << Scale << "*"; 1096 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 1097 } 1098 1099 OS << ']'; 1100 } 1101 1102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1103 void ExtAddrMode::dump() const { 1104 print(dbgs()); 1105 dbgs() << '\n'; 1106 } 1107 #endif 1108 1109 /// \brief This class provides transaction based operation on the IR. 1110 /// Every change made through this class is recorded in the internal state and 1111 /// can be undone (rollback) until commit is called. 1112 class TypePromotionTransaction { 1113 1114 /// \brief This represents the common interface of the individual transaction. 1115 /// Each class implements the logic for doing one specific modification on 1116 /// the IR via the TypePromotionTransaction. 1117 class TypePromotionAction { 1118 protected: 1119 /// The Instruction modified. 1120 Instruction *Inst; 1121 1122 public: 1123 /// \brief Constructor of the action. 1124 /// The constructor performs the related action on the IR. 1125 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 1126 1127 virtual ~TypePromotionAction() {} 1128 1129 /// \brief Undo the modification done by this action. 1130 /// When this method is called, the IR must be in the same state as it was 1131 /// before this action was applied. 1132 /// \pre Undoing the action works if and only if the IR is in the exact same 1133 /// state as it was directly after this action was applied. 1134 virtual void undo() = 0; 1135 1136 /// \brief Advocate every change made by this action. 1137 /// When the results on the IR of the action are to be kept, it is important 1138 /// to call this function, otherwise hidden information may be kept forever. 1139 virtual void commit() { 1140 // Nothing to be done, this action is not doing anything. 1141 } 1142 }; 1143 1144 /// \brief Utility to remember the position of an instruction. 1145 class InsertionHandler { 1146 /// Position of an instruction. 1147 /// Either an instruction: 1148 /// - Is the first in a basic block: BB is used. 1149 /// - Has a previous instructon: PrevInst is used. 1150 union { 1151 Instruction *PrevInst; 1152 BasicBlock *BB; 1153 } Point; 1154 /// Remember whether or not the instruction had a previous instruction. 1155 bool HasPrevInstruction; 1156 1157 public: 1158 /// \brief Record the position of \p Inst. 1159 InsertionHandler(Instruction *Inst) { 1160 BasicBlock::iterator It = Inst; 1161 HasPrevInstruction = (It != (Inst->getParent()->begin())); 1162 if (HasPrevInstruction) 1163 Point.PrevInst = --It; 1164 else 1165 Point.BB = Inst->getParent(); 1166 } 1167 1168 /// \brief Insert \p Inst at the recorded position. 1169 void insert(Instruction *Inst) { 1170 if (HasPrevInstruction) { 1171 if (Inst->getParent()) 1172 Inst->removeFromParent(); 1173 Inst->insertAfter(Point.PrevInst); 1174 } else { 1175 Instruction *Position = Point.BB->getFirstInsertionPt(); 1176 if (Inst->getParent()) 1177 Inst->moveBefore(Position); 1178 else 1179 Inst->insertBefore(Position); 1180 } 1181 } 1182 }; 1183 1184 /// \brief Move an instruction before another. 1185 class InstructionMoveBefore : public TypePromotionAction { 1186 /// Original position of the instruction. 1187 InsertionHandler Position; 1188 1189 public: 1190 /// \brief Move \p Inst before \p Before. 1191 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 1192 : TypePromotionAction(Inst), Position(Inst) { 1193 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 1194 Inst->moveBefore(Before); 1195 } 1196 1197 /// \brief Move the instruction back to its original position. 1198 void undo() override { 1199 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 1200 Position.insert(Inst); 1201 } 1202 }; 1203 1204 /// \brief Set the operand of an instruction with a new value. 1205 class OperandSetter : public TypePromotionAction { 1206 /// Original operand of the instruction. 1207 Value *Origin; 1208 /// Index of the modified instruction. 1209 unsigned Idx; 1210 1211 public: 1212 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 1213 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 1214 : TypePromotionAction(Inst), Idx(Idx) { 1215 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 1216 << "for:" << *Inst << "\n" 1217 << "with:" << *NewVal << "\n"); 1218 Origin = Inst->getOperand(Idx); 1219 Inst->setOperand(Idx, NewVal); 1220 } 1221 1222 /// \brief Restore the original value of the instruction. 1223 void undo() override { 1224 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 1225 << "for: " << *Inst << "\n" 1226 << "with: " << *Origin << "\n"); 1227 Inst->setOperand(Idx, Origin); 1228 } 1229 }; 1230 1231 /// \brief Hide the operands of an instruction. 1232 /// Do as if this instruction was not using any of its operands. 1233 class OperandsHider : public TypePromotionAction { 1234 /// The list of original operands. 1235 SmallVector<Value *, 4> OriginalValues; 1236 1237 public: 1238 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 1239 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 1240 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 1241 unsigned NumOpnds = Inst->getNumOperands(); 1242 OriginalValues.reserve(NumOpnds); 1243 for (unsigned It = 0; It < NumOpnds; ++It) { 1244 // Save the current operand. 1245 Value *Val = Inst->getOperand(It); 1246 OriginalValues.push_back(Val); 1247 // Set a dummy one. 1248 // We could use OperandSetter here, but that would implied an overhead 1249 // that we are not willing to pay. 1250 Inst->setOperand(It, UndefValue::get(Val->getType())); 1251 } 1252 } 1253 1254 /// \brief Restore the original list of uses. 1255 void undo() override { 1256 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 1257 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 1258 Inst->setOperand(It, OriginalValues[It]); 1259 } 1260 }; 1261 1262 /// \brief Build a truncate instruction. 1263 class TruncBuilder : public TypePromotionAction { 1264 public: 1265 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 1266 /// result. 1267 /// trunc Opnd to Ty. 1268 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 1269 IRBuilder<> Builder(Opnd); 1270 Inst = cast<Instruction>(Builder.CreateTrunc(Opnd, Ty, "promoted")); 1271 DEBUG(dbgs() << "Do: TruncBuilder: " << *Inst << "\n"); 1272 } 1273 1274 /// \brief Get the built instruction. 1275 Instruction *getBuiltInstruction() { return Inst; } 1276 1277 /// \brief Remove the built instruction. 1278 void undo() override { 1279 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Inst << "\n"); 1280 Inst->eraseFromParent(); 1281 } 1282 }; 1283 1284 /// \brief Build a sign extension instruction. 1285 class SExtBuilder : public TypePromotionAction { 1286 public: 1287 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 1288 /// result. 1289 /// sext Opnd to Ty. 1290 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1291 : TypePromotionAction(Inst) { 1292 IRBuilder<> Builder(InsertPt); 1293 Inst = cast<Instruction>(Builder.CreateSExt(Opnd, Ty, "promoted")); 1294 DEBUG(dbgs() << "Do: SExtBuilder: " << *Inst << "\n"); 1295 } 1296 1297 /// \brief Get the built instruction. 1298 Instruction *getBuiltInstruction() { return Inst; } 1299 1300 /// \brief Remove the built instruction. 1301 void undo() override { 1302 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Inst << "\n"); 1303 Inst->eraseFromParent(); 1304 } 1305 }; 1306 1307 /// \brief Mutate an instruction to another type. 1308 class TypeMutator : public TypePromotionAction { 1309 /// Record the original type. 1310 Type *OrigTy; 1311 1312 public: 1313 /// \brief Mutate the type of \p Inst into \p NewTy. 1314 TypeMutator(Instruction *Inst, Type *NewTy) 1315 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 1316 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 1317 << "\n"); 1318 Inst->mutateType(NewTy); 1319 } 1320 1321 /// \brief Mutate the instruction back to its original type. 1322 void undo() override { 1323 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 1324 << "\n"); 1325 Inst->mutateType(OrigTy); 1326 } 1327 }; 1328 1329 /// \brief Replace the uses of an instruction by another instruction. 1330 class UsesReplacer : public TypePromotionAction { 1331 /// Helper structure to keep track of the replaced uses. 1332 struct InstructionAndIdx { 1333 /// The instruction using the instruction. 1334 Instruction *Inst; 1335 /// The index where this instruction is used for Inst. 1336 unsigned Idx; 1337 InstructionAndIdx(Instruction *Inst, unsigned Idx) 1338 : Inst(Inst), Idx(Idx) {} 1339 }; 1340 1341 /// Keep track of the original uses (pair Instruction, Index). 1342 SmallVector<InstructionAndIdx, 4> OriginalUses; 1343 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 1344 1345 public: 1346 /// \brief Replace all the use of \p Inst by \p New. 1347 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 1348 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 1349 << "\n"); 1350 // Record the original uses. 1351 for (Use &U : Inst->uses()) { 1352 Instruction *UserI = cast<Instruction>(U.getUser()); 1353 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 1354 } 1355 // Now, we can replace the uses. 1356 Inst->replaceAllUsesWith(New); 1357 } 1358 1359 /// \brief Reassign the original uses of Inst to Inst. 1360 void undo() override { 1361 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 1362 for (use_iterator UseIt = OriginalUses.begin(), 1363 EndIt = OriginalUses.end(); 1364 UseIt != EndIt; ++UseIt) { 1365 UseIt->Inst->setOperand(UseIt->Idx, Inst); 1366 } 1367 } 1368 }; 1369 1370 /// \brief Remove an instruction from the IR. 1371 class InstructionRemover : public TypePromotionAction { 1372 /// Original position of the instruction. 1373 InsertionHandler Inserter; 1374 /// Helper structure to hide all the link to the instruction. In other 1375 /// words, this helps to do as if the instruction was removed. 1376 OperandsHider Hider; 1377 /// Keep track of the uses replaced, if any. 1378 UsesReplacer *Replacer; 1379 1380 public: 1381 /// \brief Remove all reference of \p Inst and optinally replace all its 1382 /// uses with New. 1383 /// \pre If !Inst->use_empty(), then New != nullptr 1384 InstructionRemover(Instruction *Inst, Value *New = nullptr) 1385 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 1386 Replacer(nullptr) { 1387 if (New) 1388 Replacer = new UsesReplacer(Inst, New); 1389 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 1390 Inst->removeFromParent(); 1391 } 1392 1393 ~InstructionRemover() { delete Replacer; } 1394 1395 /// \brief Really remove the instruction. 1396 void commit() override { delete Inst; } 1397 1398 /// \brief Resurrect the instruction and reassign it to the proper uses if 1399 /// new value was provided when build this action. 1400 void undo() override { 1401 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 1402 Inserter.insert(Inst); 1403 if (Replacer) 1404 Replacer->undo(); 1405 Hider.undo(); 1406 } 1407 }; 1408 1409 public: 1410 /// Restoration point. 1411 /// The restoration point is a pointer to an action instead of an iterator 1412 /// because the iterator may be invalidated but not the pointer. 1413 typedef const TypePromotionAction *ConstRestorationPt; 1414 /// Advocate every changes made in that transaction. 1415 void commit(); 1416 /// Undo all the changes made after the given point. 1417 void rollback(ConstRestorationPt Point); 1418 /// Get the current restoration point. 1419 ConstRestorationPt getRestorationPoint() const; 1420 1421 /// \name API for IR modification with state keeping to support rollback. 1422 /// @{ 1423 /// Same as Instruction::setOperand. 1424 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 1425 /// Same as Instruction::eraseFromParent. 1426 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 1427 /// Same as Value::replaceAllUsesWith. 1428 void replaceAllUsesWith(Instruction *Inst, Value *New); 1429 /// Same as Value::mutateType. 1430 void mutateType(Instruction *Inst, Type *NewTy); 1431 /// Same as IRBuilder::createTrunc. 1432 Instruction *createTrunc(Instruction *Opnd, Type *Ty); 1433 /// Same as IRBuilder::createSExt. 1434 Instruction *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 1435 /// Same as Instruction::moveBefore. 1436 void moveBefore(Instruction *Inst, Instruction *Before); 1437 /// @} 1438 1439 private: 1440 /// The ordered list of actions made so far. 1441 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 1442 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 1443 }; 1444 1445 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 1446 Value *NewVal) { 1447 Actions.push_back( 1448 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 1449 } 1450 1451 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 1452 Value *NewVal) { 1453 Actions.push_back( 1454 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 1455 } 1456 1457 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 1458 Value *New) { 1459 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 1460 } 1461 1462 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 1463 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 1464 } 1465 1466 Instruction *TypePromotionTransaction::createTrunc(Instruction *Opnd, 1467 Type *Ty) { 1468 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 1469 Instruction *I = Ptr->getBuiltInstruction(); 1470 Actions.push_back(std::move(Ptr)); 1471 return I; 1472 } 1473 1474 Instruction *TypePromotionTransaction::createSExt(Instruction *Inst, 1475 Value *Opnd, Type *Ty) { 1476 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 1477 Instruction *I = Ptr->getBuiltInstruction(); 1478 Actions.push_back(std::move(Ptr)); 1479 return I; 1480 } 1481 1482 void TypePromotionTransaction::moveBefore(Instruction *Inst, 1483 Instruction *Before) { 1484 Actions.push_back( 1485 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 1486 } 1487 1488 TypePromotionTransaction::ConstRestorationPt 1489 TypePromotionTransaction::getRestorationPoint() const { 1490 return !Actions.empty() ? Actions.back().get() : nullptr; 1491 } 1492 1493 void TypePromotionTransaction::commit() { 1494 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 1495 ++It) 1496 (*It)->commit(); 1497 Actions.clear(); 1498 } 1499 1500 void TypePromotionTransaction::rollback( 1501 TypePromotionTransaction::ConstRestorationPt Point) { 1502 while (!Actions.empty() && Point != Actions.back().get()) { 1503 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 1504 Curr->undo(); 1505 } 1506 } 1507 1508 /// \brief A helper class for matching addressing modes. 1509 /// 1510 /// This encapsulates the logic for matching the target-legal addressing modes. 1511 class AddressingModeMatcher { 1512 SmallVectorImpl<Instruction*> &AddrModeInsts; 1513 const TargetLowering &TLI; 1514 1515 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 1516 /// the memory instruction that we're computing this address for. 1517 Type *AccessTy; 1518 Instruction *MemoryInst; 1519 1520 /// AddrMode - This is the addressing mode that we're building up. This is 1521 /// part of the return value of this addressing mode matching stuff. 1522 ExtAddrMode &AddrMode; 1523 1524 /// The truncate instruction inserted by other CodeGenPrepare optimizations. 1525 const SetOfInstrs &InsertedTruncs; 1526 /// A map from the instructions to their type before promotion. 1527 InstrToOrigTy &PromotedInsts; 1528 /// The ongoing transaction where every action should be registered. 1529 TypePromotionTransaction &TPT; 1530 1531 /// IgnoreProfitability - This is set to true when we should not do 1532 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode 1533 /// always returns true. 1534 bool IgnoreProfitability; 1535 1536 AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, 1537 const TargetLowering &T, Type *AT, 1538 Instruction *MI, ExtAddrMode &AM, 1539 const SetOfInstrs &InsertedTruncs, 1540 InstrToOrigTy &PromotedInsts, 1541 TypePromotionTransaction &TPT) 1542 : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM), 1543 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { 1544 IgnoreProfitability = false; 1545 } 1546 public: 1547 1548 /// Match - Find the maximal addressing mode that a load/store of V can fold, 1549 /// give an access type of AccessTy. This returns a list of involved 1550 /// instructions in AddrModeInsts. 1551 /// \p InsertedTruncs The truncate instruction inserted by other 1552 /// CodeGenPrepare 1553 /// optimizations. 1554 /// \p PromotedInsts maps the instructions to their type before promotion. 1555 /// \p The ongoing transaction where every action should be registered. 1556 static ExtAddrMode Match(Value *V, Type *AccessTy, 1557 Instruction *MemoryInst, 1558 SmallVectorImpl<Instruction*> &AddrModeInsts, 1559 const TargetLowering &TLI, 1560 const SetOfInstrs &InsertedTruncs, 1561 InstrToOrigTy &PromotedInsts, 1562 TypePromotionTransaction &TPT) { 1563 ExtAddrMode Result; 1564 1565 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, 1566 MemoryInst, Result, InsertedTruncs, 1567 PromotedInsts, TPT).MatchAddr(V, 0); 1568 (void)Success; assert(Success && "Couldn't select *anything*?"); 1569 return Result; 1570 } 1571 private: 1572 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 1573 bool MatchAddr(Value *V, unsigned Depth); 1574 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 1575 bool *MovedAway = nullptr); 1576 bool IsProfitableToFoldIntoAddressingMode(Instruction *I, 1577 ExtAddrMode &AMBefore, 1578 ExtAddrMode &AMAfter); 1579 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 1580 bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion, 1581 Value *PromotedOperand) const; 1582 }; 1583 1584 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. 1585 /// Return true and update AddrMode if this addr mode is legal for the target, 1586 /// false if not. 1587 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, 1588 unsigned Depth) { 1589 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 1590 // mode. Just process that directly. 1591 if (Scale == 1) 1592 return MatchAddr(ScaleReg, Depth); 1593 1594 // If the scale is 0, it takes nothing to add this. 1595 if (Scale == 0) 1596 return true; 1597 1598 // If we already have a scale of this value, we can add to it, otherwise, we 1599 // need an available scale field. 1600 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 1601 return false; 1602 1603 ExtAddrMode TestAddrMode = AddrMode; 1604 1605 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 1606 // [A+B + A*7] -> [B+A*8]. 1607 TestAddrMode.Scale += Scale; 1608 TestAddrMode.ScaledReg = ScaleReg; 1609 1610 // If the new address isn't legal, bail out. 1611 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) 1612 return false; 1613 1614 // It was legal, so commit it. 1615 AddrMode = TestAddrMode; 1616 1617 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 1618 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 1619 // X*Scale + C*Scale to addr mode. 1620 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 1621 if (isa<Instruction>(ScaleReg) && // not a constant expr. 1622 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 1623 TestAddrMode.ScaledReg = AddLHS; 1624 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 1625 1626 // If this addressing mode is legal, commit it and remember that we folded 1627 // this instruction. 1628 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { 1629 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 1630 AddrMode = TestAddrMode; 1631 return true; 1632 } 1633 } 1634 1635 // Otherwise, not (x+c)*scale, just return what we have. 1636 return true; 1637 } 1638 1639 /// MightBeFoldableInst - This is a little filter, which returns true if an 1640 /// addressing computation involving I might be folded into a load/store 1641 /// accessing it. This doesn't need to be perfect, but needs to accept at least 1642 /// the set of instructions that MatchOperationAddr can. 1643 static bool MightBeFoldableInst(Instruction *I) { 1644 switch (I->getOpcode()) { 1645 case Instruction::BitCast: 1646 case Instruction::AddrSpaceCast: 1647 // Don't touch identity bitcasts. 1648 if (I->getType() == I->getOperand(0)->getType()) 1649 return false; 1650 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 1651 case Instruction::PtrToInt: 1652 // PtrToInt is always a noop, as we know that the int type is pointer sized. 1653 return true; 1654 case Instruction::IntToPtr: 1655 // We know the input is intptr_t, so this is foldable. 1656 return true; 1657 case Instruction::Add: 1658 return true; 1659 case Instruction::Mul: 1660 case Instruction::Shl: 1661 // Can only handle X*C and X << C. 1662 return isa<ConstantInt>(I->getOperand(1)); 1663 case Instruction::GetElementPtr: 1664 return true; 1665 default: 1666 return false; 1667 } 1668 } 1669 1670 /// \brief Hepler class to perform type promotion. 1671 class TypePromotionHelper { 1672 /// \brief Utility function to check whether or not a sign extension of 1673 /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either 1674 /// using the operands of \p Inst or promoting \p Inst. 1675 /// In other words, check if: 1676 /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType. 1677 /// #1 Promotion applies: 1678 /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...). 1679 /// #2 Operand reuses: 1680 /// sext opnd1 to ConsideredSExtType. 1681 /// \p PromotedInsts maps the instructions to their type before promotion. 1682 static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType, 1683 const InstrToOrigTy &PromotedInsts); 1684 1685 /// \brief Utility function to determine if \p OpIdx should be promoted when 1686 /// promoting \p Inst. 1687 static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { 1688 if (isa<SelectInst>(Inst) && OpIdx == 0) 1689 return false; 1690 return true; 1691 } 1692 1693 /// \brief Utility function to promote the operand of \p SExt when this 1694 /// operand is a promotable trunc or sext. 1695 /// \p PromotedInsts maps the instructions to their type before promotion. 1696 /// \p CreatedInsts[out] contains how many non-free instructions have been 1697 /// created to promote the operand of SExt. 1698 /// Should never be called directly. 1699 /// \return The promoted value which is used instead of SExt. 1700 static Value *promoteOperandForTruncAndSExt(Instruction *SExt, 1701 TypePromotionTransaction &TPT, 1702 InstrToOrigTy &PromotedInsts, 1703 unsigned &CreatedInsts); 1704 1705 /// \brief Utility function to promote the operand of \p SExt when this 1706 /// operand is promotable and is not a supported trunc or sext. 1707 /// \p PromotedInsts maps the instructions to their type before promotion. 1708 /// \p CreatedInsts[out] contains how many non-free instructions have been 1709 /// created to promote the operand of SExt. 1710 /// Should never be called directly. 1711 /// \return The promoted value which is used instead of SExt. 1712 static Value *promoteOperandForOther(Instruction *SExt, 1713 TypePromotionTransaction &TPT, 1714 InstrToOrigTy &PromotedInsts, 1715 unsigned &CreatedInsts); 1716 1717 public: 1718 /// Type for the utility function that promotes the operand of SExt. 1719 typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT, 1720 InstrToOrigTy &PromotedInsts, 1721 unsigned &CreatedInsts); 1722 /// \brief Given a sign extend instruction \p SExt, return the approriate 1723 /// action to promote the operand of \p SExt instead of using SExt. 1724 /// \return NULL if no promotable action is possible with the current 1725 /// sign extension. 1726 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by 1727 /// the others CodeGenPrepare optimizations. This information is important 1728 /// because we do not want to promote these instructions as CodeGenPrepare 1729 /// will reinsert them later. Thus creating an infinite loop: create/remove. 1730 /// \p PromotedInsts maps the instructions to their type before promotion. 1731 static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs, 1732 const TargetLowering &TLI, 1733 const InstrToOrigTy &PromotedInsts); 1734 }; 1735 1736 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 1737 Type *ConsideredSExtType, 1738 const InstrToOrigTy &PromotedInsts) { 1739 // We can always get through sext. 1740 if (isa<SExtInst>(Inst)) 1741 return true; 1742 1743 // We can get through binary operator, if it is legal. In other words, the 1744 // binary operator must have a nuw or nsw flag. 1745 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 1746 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 1747 (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap())) 1748 return true; 1749 1750 // Check if we can do the following simplification. 1751 // sext(trunc(sext)) --> sext 1752 if (!isa<TruncInst>(Inst)) 1753 return false; 1754 1755 Value *OpndVal = Inst->getOperand(0); 1756 // Check if we can use this operand in the sext. 1757 // If the type is larger than the result type of the sign extension, 1758 // we cannot. 1759 if (OpndVal->getType()->getIntegerBitWidth() > 1760 ConsideredSExtType->getIntegerBitWidth()) 1761 return false; 1762 1763 // If the operand of the truncate is not an instruction, we will not have 1764 // any information on the dropped bits. 1765 // (Actually we could for constant but it is not worth the extra logic). 1766 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 1767 if (!Opnd) 1768 return false; 1769 1770 // Check if the source of the type is narrow enough. 1771 // I.e., check that trunc just drops sign extended bits. 1772 // #1 get the type of the operand. 1773 const Type *OpndType; 1774 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 1775 if (It != PromotedInsts.end()) 1776 OpndType = It->second; 1777 else if (isa<SExtInst>(Opnd)) 1778 OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType(); 1779 else 1780 return false; 1781 1782 // #2 check that the truncate just drop sign extended bits. 1783 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 1784 return true; 1785 1786 return false; 1787 } 1788 1789 TypePromotionHelper::Action TypePromotionHelper::getAction( 1790 Instruction *SExt, const SetOfInstrs &InsertedTruncs, 1791 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 1792 Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0)); 1793 Type *SExtTy = SExt->getType(); 1794 // If the operand of the sign extension is not an instruction, we cannot 1795 // get through. 1796 // If it, check we can get through. 1797 if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts)) 1798 return nullptr; 1799 1800 // Do not promote if the operand has been added by codegenprepare. 1801 // Otherwise, it means we are undoing an optimization that is likely to be 1802 // redone, thus causing potential infinite loop. 1803 if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd)) 1804 return nullptr; 1805 1806 // SExt or Trunc instructions. 1807 // Return the related handler. 1808 if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd)) 1809 return promoteOperandForTruncAndSExt; 1810 1811 // Regular instruction. 1812 // Abort early if we will have to insert non-free instructions. 1813 if (!SExtOpnd->hasOneUse() && 1814 !TLI.isTruncateFree(SExtTy, SExtOpnd->getType())) 1815 return nullptr; 1816 return promoteOperandForOther; 1817 } 1818 1819 Value *TypePromotionHelper::promoteOperandForTruncAndSExt( 1820 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 1821 InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) { 1822 // By construction, the operand of SExt is an instruction. Otherwise we cannot 1823 // get through it and this method should not be called. 1824 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 1825 // Replace sext(trunc(opnd)) or sext(sext(opnd)) 1826 // => sext(opnd). 1827 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 1828 CreatedInsts = 0; 1829 1830 // Remove dead code. 1831 if (SExtOpnd->use_empty()) 1832 TPT.eraseInstruction(SExtOpnd); 1833 1834 // Check if the sext is still needed. 1835 if (SExt->getType() != SExt->getOperand(0)->getType()) 1836 return SExt; 1837 1838 // At this point we have: sext ty opnd to ty. 1839 // Reassign the uses of SExt to the opnd and remove SExt. 1840 Value *NextVal = SExt->getOperand(0); 1841 TPT.eraseInstruction(SExt, NextVal); 1842 return NextVal; 1843 } 1844 1845 Value * 1846 TypePromotionHelper::promoteOperandForOther(Instruction *SExt, 1847 TypePromotionTransaction &TPT, 1848 InstrToOrigTy &PromotedInsts, 1849 unsigned &CreatedInsts) { 1850 // By construction, the operand of SExt is an instruction. Otherwise we cannot 1851 // get through it and this method should not be called. 1852 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 1853 CreatedInsts = 0; 1854 if (!SExtOpnd->hasOneUse()) { 1855 // SExtOpnd will be promoted. 1856 // All its uses, but SExt, will need to use a truncated value of the 1857 // promoted version. 1858 // Create the truncate now. 1859 Instruction *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType()); 1860 Trunc->removeFromParent(); 1861 // Insert it just after the definition. 1862 Trunc->insertAfter(SExtOpnd); 1863 1864 TPT.replaceAllUsesWith(SExtOpnd, Trunc); 1865 // Restore the operand of SExt (which has been replace by the previous call 1866 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 1867 TPT.setOperand(SExt, 0, SExtOpnd); 1868 } 1869 1870 // Get through the Instruction: 1871 // 1. Update its type. 1872 // 2. Replace the uses of SExt by Inst. 1873 // 3. Sign extend each operand that needs to be sign extended. 1874 1875 // Remember the original type of the instruction before promotion. 1876 // This is useful to know that the high bits are sign extended bits. 1877 PromotedInsts.insert( 1878 std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType())); 1879 // Step #1. 1880 TPT.mutateType(SExtOpnd, SExt->getType()); 1881 // Step #2. 1882 TPT.replaceAllUsesWith(SExt, SExtOpnd); 1883 // Step #3. 1884 Instruction *SExtForOpnd = SExt; 1885 1886 DEBUG(dbgs() << "Propagate SExt to operands\n"); 1887 for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 1888 ++OpIdx) { 1889 DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n'); 1890 if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() || 1891 !shouldSExtOperand(SExtOpnd, OpIdx)) { 1892 DEBUG(dbgs() << "No need to propagate\n"); 1893 continue; 1894 } 1895 // Check if we can statically sign extend the operand. 1896 Value *Opnd = SExtOpnd->getOperand(OpIdx); 1897 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 1898 DEBUG(dbgs() << "Statically sign extend\n"); 1899 TPT.setOperand( 1900 SExtOpnd, OpIdx, 1901 ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue())); 1902 continue; 1903 } 1904 // UndefValue are typed, so we have to statically sign extend them. 1905 if (isa<UndefValue>(Opnd)) { 1906 DEBUG(dbgs() << "Statically sign extend\n"); 1907 TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType())); 1908 continue; 1909 } 1910 1911 // Otherwise we have to explicity sign extend the operand. 1912 // Check if SExt was reused to sign extend an operand. 1913 if (!SExtForOpnd) { 1914 // If yes, create a new one. 1915 DEBUG(dbgs() << "More operands to sext\n"); 1916 SExtForOpnd = TPT.createSExt(SExt, Opnd, SExt->getType()); 1917 ++CreatedInsts; 1918 } 1919 1920 TPT.setOperand(SExtForOpnd, 0, Opnd); 1921 1922 // Move the sign extension before the insertion point. 1923 TPT.moveBefore(SExtForOpnd, SExtOpnd); 1924 TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd); 1925 // If more sext are required, new instructions will have to be created. 1926 SExtForOpnd = nullptr; 1927 } 1928 if (SExtForOpnd == SExt) { 1929 DEBUG(dbgs() << "Sign extension is useless now\n"); 1930 TPT.eraseInstruction(SExt); 1931 } 1932 return SExtOpnd; 1933 } 1934 1935 /// IsPromotionProfitable - Check whether or not promoting an instruction 1936 /// to a wider type was profitable. 1937 /// \p MatchedSize gives the number of instructions that have been matched 1938 /// in the addressing mode after the promotion was applied. 1939 /// \p SizeWithPromotion gives the number of created instructions for 1940 /// the promotion plus the number of instructions that have been 1941 /// matched in the addressing mode before the promotion. 1942 /// \p PromotedOperand is the value that has been promoted. 1943 /// \return True if the promotion is profitable, false otherwise. 1944 bool 1945 AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize, 1946 unsigned SizeWithPromotion, 1947 Value *PromotedOperand) const { 1948 // We folded less instructions than what we created to promote the operand. 1949 // This is not profitable. 1950 if (MatchedSize < SizeWithPromotion) 1951 return false; 1952 if (MatchedSize > SizeWithPromotion) 1953 return true; 1954 // The promotion is neutral but it may help folding the sign extension in 1955 // loads for instance. 1956 // Check that we did not create an illegal instruction. 1957 Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand); 1958 if (!PromotedInst) 1959 return false; 1960 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 1961 // If the ISDOpcode is undefined, it was undefined before the promotion. 1962 if (!ISDOpcode) 1963 return true; 1964 // Otherwise, check if the promoted instruction is legal or not. 1965 return TLI.isOperationLegalOrCustom(ISDOpcode, 1966 EVT::getEVT(PromotedInst->getType())); 1967 } 1968 1969 /// MatchOperationAddr - Given an instruction or constant expr, see if we can 1970 /// fold the operation into the addressing mode. If so, update the addressing 1971 /// mode and return true, otherwise return false without modifying AddrMode. 1972 /// If \p MovedAway is not NULL, it contains the information of whether or 1973 /// not AddrInst has to be folded into the addressing mode on success. 1974 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 1975 /// because it has been moved away. 1976 /// Thus AddrInst must not be added in the matched instructions. 1977 /// This state can happen when AddrInst is a sext, since it may be moved away. 1978 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 1979 /// not be referenced anymore. 1980 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, 1981 unsigned Depth, 1982 bool *MovedAway) { 1983 // Avoid exponential behavior on extremely deep expression trees. 1984 if (Depth >= 5) return false; 1985 1986 // By default, all matched instructions stay in place. 1987 if (MovedAway) 1988 *MovedAway = false; 1989 1990 switch (Opcode) { 1991 case Instruction::PtrToInt: 1992 // PtrToInt is always a noop, as we know that the int type is pointer sized. 1993 return MatchAddr(AddrInst->getOperand(0), Depth); 1994 case Instruction::IntToPtr: 1995 // This inttoptr is a no-op if the integer type is pointer sized. 1996 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 1997 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) 1998 return MatchAddr(AddrInst->getOperand(0), Depth); 1999 return false; 2000 case Instruction::BitCast: 2001 case Instruction::AddrSpaceCast: 2002 // BitCast is always a noop, and we can handle it as long as it is 2003 // int->int or pointer->pointer (we don't want int<->fp or something). 2004 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 2005 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 2006 // Don't touch identity bitcasts. These were probably put here by LSR, 2007 // and we don't want to mess around with them. Assume it knows what it 2008 // is doing. 2009 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 2010 return MatchAddr(AddrInst->getOperand(0), Depth); 2011 return false; 2012 case Instruction::Add: { 2013 // Check to see if we can merge in the RHS then the LHS. If so, we win. 2014 ExtAddrMode BackupAddrMode = AddrMode; 2015 unsigned OldSize = AddrModeInsts.size(); 2016 // Start a transaction at this point. 2017 // The LHS may match but not the RHS. 2018 // Therefore, we need a higher level restoration point to undo partially 2019 // matched operation. 2020 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2021 TPT.getRestorationPoint(); 2022 2023 if (MatchAddr(AddrInst->getOperand(1), Depth+1) && 2024 MatchAddr(AddrInst->getOperand(0), Depth+1)) 2025 return true; 2026 2027 // Restore the old addr mode info. 2028 AddrMode = BackupAddrMode; 2029 AddrModeInsts.resize(OldSize); 2030 TPT.rollback(LastKnownGood); 2031 2032 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 2033 if (MatchAddr(AddrInst->getOperand(0), Depth+1) && 2034 MatchAddr(AddrInst->getOperand(1), Depth+1)) 2035 return true; 2036 2037 // Otherwise we definitely can't merge the ADD in. 2038 AddrMode = BackupAddrMode; 2039 AddrModeInsts.resize(OldSize); 2040 TPT.rollback(LastKnownGood); 2041 break; 2042 } 2043 //case Instruction::Or: 2044 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 2045 //break; 2046 case Instruction::Mul: 2047 case Instruction::Shl: { 2048 // Can only handle X*C and X << C. 2049 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 2050 if (!RHS) return false; 2051 int64_t Scale = RHS->getSExtValue(); 2052 if (Opcode == Instruction::Shl) 2053 Scale = 1LL << Scale; 2054 2055 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); 2056 } 2057 case Instruction::GetElementPtr: { 2058 // Scan the GEP. We check it if it contains constant offsets and at most 2059 // one variable offset. 2060 int VariableOperand = -1; 2061 unsigned VariableScale = 0; 2062 2063 int64_t ConstantOffset = 0; 2064 const DataLayout *TD = TLI.getDataLayout(); 2065 gep_type_iterator GTI = gep_type_begin(AddrInst); 2066 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 2067 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 2068 const StructLayout *SL = TD->getStructLayout(STy); 2069 unsigned Idx = 2070 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 2071 ConstantOffset += SL->getElementOffset(Idx); 2072 } else { 2073 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); 2074 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 2075 ConstantOffset += CI->getSExtValue()*TypeSize; 2076 } else if (TypeSize) { // Scales of zero don't do anything. 2077 // We only allow one variable index at the moment. 2078 if (VariableOperand != -1) 2079 return false; 2080 2081 // Remember the variable index. 2082 VariableOperand = i; 2083 VariableScale = TypeSize; 2084 } 2085 } 2086 } 2087 2088 // A common case is for the GEP to only do a constant offset. In this case, 2089 // just add it to the disp field and check validity. 2090 if (VariableOperand == -1) { 2091 AddrMode.BaseOffs += ConstantOffset; 2092 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 2093 // Check to see if we can fold the base pointer in too. 2094 if (MatchAddr(AddrInst->getOperand(0), Depth+1)) 2095 return true; 2096 } 2097 AddrMode.BaseOffs -= ConstantOffset; 2098 return false; 2099 } 2100 2101 // Save the valid addressing mode in case we can't match. 2102 ExtAddrMode BackupAddrMode = AddrMode; 2103 unsigned OldSize = AddrModeInsts.size(); 2104 2105 // See if the scale and offset amount is valid for this target. 2106 AddrMode.BaseOffs += ConstantOffset; 2107 2108 // Match the base operand of the GEP. 2109 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { 2110 // If it couldn't be matched, just stuff the value in a register. 2111 if (AddrMode.HasBaseReg) { 2112 AddrMode = BackupAddrMode; 2113 AddrModeInsts.resize(OldSize); 2114 return false; 2115 } 2116 AddrMode.HasBaseReg = true; 2117 AddrMode.BaseReg = AddrInst->getOperand(0); 2118 } 2119 2120 // Match the remaining variable portion of the GEP. 2121 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 2122 Depth)) { 2123 // If it couldn't be matched, try stuffing the base into a register 2124 // instead of matching it, and retrying the match of the scale. 2125 AddrMode = BackupAddrMode; 2126 AddrModeInsts.resize(OldSize); 2127 if (AddrMode.HasBaseReg) 2128 return false; 2129 AddrMode.HasBaseReg = true; 2130 AddrMode.BaseReg = AddrInst->getOperand(0); 2131 AddrMode.BaseOffs += ConstantOffset; 2132 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), 2133 VariableScale, Depth)) { 2134 // If even that didn't work, bail. 2135 AddrMode = BackupAddrMode; 2136 AddrModeInsts.resize(OldSize); 2137 return false; 2138 } 2139 } 2140 2141 return true; 2142 } 2143 case Instruction::SExt: { 2144 // Try to move this sext out of the way of the addressing mode. 2145 Instruction *SExt = cast<Instruction>(AddrInst); 2146 // Ask for a method for doing so. 2147 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 2148 SExt, InsertedTruncs, TLI, PromotedInsts); 2149 if (!TPH) 2150 return false; 2151 2152 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2153 TPT.getRestorationPoint(); 2154 unsigned CreatedInsts = 0; 2155 Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts); 2156 // SExt has been moved away. 2157 // Thus either it will be rematched later in the recursive calls or it is 2158 // gone. Anyway, we must not fold it into the addressing mode at this point. 2159 // E.g., 2160 // op = add opnd, 1 2161 // idx = sext op 2162 // addr = gep base, idx 2163 // is now: 2164 // promotedOpnd = sext opnd <- no match here 2165 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 2166 // addr = gep base, op <- match 2167 if (MovedAway) 2168 *MovedAway = true; 2169 2170 assert(PromotedOperand && 2171 "TypePromotionHelper should have filtered out those cases"); 2172 2173 ExtAddrMode BackupAddrMode = AddrMode; 2174 unsigned OldSize = AddrModeInsts.size(); 2175 2176 if (!MatchAddr(PromotedOperand, Depth) || 2177 !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts, 2178 PromotedOperand)) { 2179 AddrMode = BackupAddrMode; 2180 AddrModeInsts.resize(OldSize); 2181 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 2182 TPT.rollback(LastKnownGood); 2183 return false; 2184 } 2185 return true; 2186 } 2187 } 2188 return false; 2189 } 2190 2191 /// MatchAddr - If we can, try to add the value of 'Addr' into the current 2192 /// addressing mode. If Addr can't be added to AddrMode this returns false and 2193 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type 2194 /// or intptr_t for the target. 2195 /// 2196 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { 2197 // Start a transaction at this point that we will rollback if the matching 2198 // fails. 2199 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2200 TPT.getRestorationPoint(); 2201 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 2202 // Fold in immediates if legal for the target. 2203 AddrMode.BaseOffs += CI->getSExtValue(); 2204 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2205 return true; 2206 AddrMode.BaseOffs -= CI->getSExtValue(); 2207 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 2208 // If this is a global variable, try to fold it into the addressing mode. 2209 if (!AddrMode.BaseGV) { 2210 AddrMode.BaseGV = GV; 2211 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2212 return true; 2213 AddrMode.BaseGV = nullptr; 2214 } 2215 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 2216 ExtAddrMode BackupAddrMode = AddrMode; 2217 unsigned OldSize = AddrModeInsts.size(); 2218 2219 // Check to see if it is possible to fold this operation. 2220 bool MovedAway = false; 2221 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 2222 // This instruction may have been move away. If so, there is nothing 2223 // to check here. 2224 if (MovedAway) 2225 return true; 2226 // Okay, it's possible to fold this. Check to see if it is actually 2227 // *profitable* to do so. We use a simple cost model to avoid increasing 2228 // register pressure too much. 2229 if (I->hasOneUse() || 2230 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 2231 AddrModeInsts.push_back(I); 2232 return true; 2233 } 2234 2235 // It isn't profitable to do this, roll back. 2236 //cerr << "NOT FOLDING: " << *I; 2237 AddrMode = BackupAddrMode; 2238 AddrModeInsts.resize(OldSize); 2239 TPT.rollback(LastKnownGood); 2240 } 2241 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 2242 if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) 2243 return true; 2244 TPT.rollback(LastKnownGood); 2245 } else if (isa<ConstantPointerNull>(Addr)) { 2246 // Null pointer gets folded without affecting the addressing mode. 2247 return true; 2248 } 2249 2250 // Worse case, the target should support [reg] addressing modes. :) 2251 if (!AddrMode.HasBaseReg) { 2252 AddrMode.HasBaseReg = true; 2253 AddrMode.BaseReg = Addr; 2254 // Still check for legality in case the target supports [imm] but not [i+r]. 2255 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2256 return true; 2257 AddrMode.HasBaseReg = false; 2258 AddrMode.BaseReg = nullptr; 2259 } 2260 2261 // If the base register is already taken, see if we can do [r+r]. 2262 if (AddrMode.Scale == 0) { 2263 AddrMode.Scale = 1; 2264 AddrMode.ScaledReg = Addr; 2265 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2266 return true; 2267 AddrMode.Scale = 0; 2268 AddrMode.ScaledReg = nullptr; 2269 } 2270 // Couldn't match. 2271 TPT.rollback(LastKnownGood); 2272 return false; 2273 } 2274 2275 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified 2276 /// inline asm call are due to memory operands. If so, return true, otherwise 2277 /// return false. 2278 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 2279 const TargetLowering &TLI) { 2280 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); 2281 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2282 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2283 2284 // Compute the constraint code and ConstraintType to use. 2285 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 2286 2287 // If this asm operand is our Value*, and if it isn't an indirect memory 2288 // operand, we can't fold it! 2289 if (OpInfo.CallOperandVal == OpVal && 2290 (OpInfo.ConstraintType != TargetLowering::C_Memory || 2291 !OpInfo.isIndirect)) 2292 return false; 2293 } 2294 2295 return true; 2296 } 2297 2298 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a 2299 /// memory use. If we find an obviously non-foldable instruction, return true. 2300 /// Add the ultimately found memory instructions to MemoryUses. 2301 static bool FindAllMemoryUses(Instruction *I, 2302 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, 2303 SmallPtrSet<Instruction*, 16> &ConsideredInsts, 2304 const TargetLowering &TLI) { 2305 // If we already considered this instruction, we're done. 2306 if (!ConsideredInsts.insert(I)) 2307 return false; 2308 2309 // If this is an obviously unfoldable instruction, bail out. 2310 if (!MightBeFoldableInst(I)) 2311 return true; 2312 2313 // Loop over all the uses, recursively processing them. 2314 for (Use &U : I->uses()) { 2315 Instruction *UserI = cast<Instruction>(U.getUser()); 2316 2317 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 2318 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 2319 continue; 2320 } 2321 2322 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 2323 unsigned opNo = U.getOperandNo(); 2324 if (opNo == 0) return true; // Storing addr, not into addr. 2325 MemoryUses.push_back(std::make_pair(SI, opNo)); 2326 continue; 2327 } 2328 2329 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 2330 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 2331 if (!IA) return true; 2332 2333 // If this is a memory operand, we're cool, otherwise bail out. 2334 if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) 2335 return true; 2336 continue; 2337 } 2338 2339 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI)) 2340 return true; 2341 } 2342 2343 return false; 2344 } 2345 2346 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at 2347 /// the use site that we're folding it into. If so, there is no cost to 2348 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values 2349 /// that we know are live at the instruction already. 2350 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 2351 Value *KnownLive2) { 2352 // If Val is either of the known-live values, we know it is live! 2353 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 2354 return true; 2355 2356 // All values other than instructions and arguments (e.g. constants) are live. 2357 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 2358 2359 // If Val is a constant sized alloca in the entry block, it is live, this is 2360 // true because it is just a reference to the stack/frame pointer, which is 2361 // live for the whole function. 2362 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 2363 if (AI->isStaticAlloca()) 2364 return true; 2365 2366 // Check to see if this value is already used in the memory instruction's 2367 // block. If so, it's already live into the block at the very least, so we 2368 // can reasonably fold it. 2369 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 2370 } 2371 2372 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing 2373 /// mode of the machine to fold the specified instruction into a load or store 2374 /// that ultimately uses it. However, the specified instruction has multiple 2375 /// uses. Given this, it may actually increase register pressure to fold it 2376 /// into the load. For example, consider this code: 2377 /// 2378 /// X = ... 2379 /// Y = X+1 2380 /// use(Y) -> nonload/store 2381 /// Z = Y+1 2382 /// load Z 2383 /// 2384 /// In this case, Y has multiple uses, and can be folded into the load of Z 2385 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 2386 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 2387 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 2388 /// number of computations either. 2389 /// 2390 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 2391 /// X was live across 'load Z' for other reasons, we actually *would* want to 2392 /// fold the addressing mode in the Z case. This would make Y die earlier. 2393 bool AddressingModeMatcher:: 2394 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 2395 ExtAddrMode &AMAfter) { 2396 if (IgnoreProfitability) return true; 2397 2398 // AMBefore is the addressing mode before this instruction was folded into it, 2399 // and AMAfter is the addressing mode after the instruction was folded. Get 2400 // the set of registers referenced by AMAfter and subtract out those 2401 // referenced by AMBefore: this is the set of values which folding in this 2402 // address extends the lifetime of. 2403 // 2404 // Note that there are only two potential values being referenced here, 2405 // BaseReg and ScaleReg (global addresses are always available, as are any 2406 // folded immediates). 2407 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 2408 2409 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 2410 // lifetime wasn't extended by adding this instruction. 2411 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2412 BaseReg = nullptr; 2413 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2414 ScaledReg = nullptr; 2415 2416 // If folding this instruction (and it's subexprs) didn't extend any live 2417 // ranges, we're ok with it. 2418 if (!BaseReg && !ScaledReg) 2419 return true; 2420 2421 // If all uses of this instruction are ultimately load/store/inlineasm's, 2422 // check to see if their addressing modes will include this instruction. If 2423 // so, we can fold it into all uses, so it doesn't matter if it has multiple 2424 // uses. 2425 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 2426 SmallPtrSet<Instruction*, 16> ConsideredInsts; 2427 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) 2428 return false; // Has a non-memory, non-foldable use! 2429 2430 // Now that we know that all uses of this instruction are part of a chain of 2431 // computation involving only operations that could theoretically be folded 2432 // into a memory use, loop over each of these uses and see if they could 2433 // *actually* fold the instruction. 2434 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 2435 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 2436 Instruction *User = MemoryUses[i].first; 2437 unsigned OpNo = MemoryUses[i].second; 2438 2439 // Get the access type of this use. If the use isn't a pointer, we don't 2440 // know what it accesses. 2441 Value *Address = User->getOperand(OpNo); 2442 if (!Address->getType()->isPointerTy()) 2443 return false; 2444 Type *AddressAccessTy = Address->getType()->getPointerElementType(); 2445 2446 // Do a match against the root of this address, ignoring profitability. This 2447 // will tell us if the addressing mode for the memory operation will 2448 // *actually* cover the shared instruction. 2449 ExtAddrMode Result; 2450 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2451 TPT.getRestorationPoint(); 2452 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, 2453 MemoryInst, Result, InsertedTruncs, 2454 PromotedInsts, TPT); 2455 Matcher.IgnoreProfitability = true; 2456 bool Success = Matcher.MatchAddr(Address, 0); 2457 (void)Success; assert(Success && "Couldn't select *anything*?"); 2458 2459 // The match was to check the profitability, the changes made are not 2460 // part of the original matcher. Therefore, they should be dropped 2461 // otherwise the original matcher will not present the right state. 2462 TPT.rollback(LastKnownGood); 2463 2464 // If the match didn't cover I, then it won't be shared by it. 2465 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 2466 I) == MatchedAddrModeInsts.end()) 2467 return false; 2468 2469 MatchedAddrModeInsts.clear(); 2470 } 2471 2472 return true; 2473 } 2474 2475 } // end anonymous namespace 2476 2477 /// IsNonLocalValue - Return true if the specified values are defined in a 2478 /// different basic block than BB. 2479 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 2480 if (Instruction *I = dyn_cast<Instruction>(V)) 2481 return I->getParent() != BB; 2482 return false; 2483 } 2484 2485 /// OptimizeMemoryInst - Load and Store Instructions often have 2486 /// addressing modes that can do significant amounts of computation. As such, 2487 /// instruction selection will try to get the load or store to do as much 2488 /// computation as possible for the program. The problem is that isel can only 2489 /// see within a single block. As such, we sink as much legal addressing mode 2490 /// stuff into the block as possible. 2491 /// 2492 /// This method is used to optimize both load/store and inline asms with memory 2493 /// operands. 2494 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 2495 Type *AccessTy) { 2496 Value *Repl = Addr; 2497 2498 // Try to collapse single-value PHI nodes. This is necessary to undo 2499 // unprofitable PRE transformations. 2500 SmallVector<Value*, 8> worklist; 2501 SmallPtrSet<Value*, 16> Visited; 2502 worklist.push_back(Addr); 2503 2504 // Use a worklist to iteratively look through PHI nodes, and ensure that 2505 // the addressing mode obtained from the non-PHI roots of the graph 2506 // are equivalent. 2507 Value *Consensus = nullptr; 2508 unsigned NumUsesConsensus = 0; 2509 bool IsNumUsesConsensusValid = false; 2510 SmallVector<Instruction*, 16> AddrModeInsts; 2511 ExtAddrMode AddrMode; 2512 TypePromotionTransaction TPT; 2513 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2514 TPT.getRestorationPoint(); 2515 while (!worklist.empty()) { 2516 Value *V = worklist.back(); 2517 worklist.pop_back(); 2518 2519 // Break use-def graph loops. 2520 if (!Visited.insert(V)) { 2521 Consensus = nullptr; 2522 break; 2523 } 2524 2525 // For a PHI node, push all of its incoming values. 2526 if (PHINode *P = dyn_cast<PHINode>(V)) { 2527 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 2528 worklist.push_back(P->getIncomingValue(i)); 2529 continue; 2530 } 2531 2532 // For non-PHIs, determine the addressing mode being computed. 2533 SmallVector<Instruction*, 16> NewAddrModeInsts; 2534 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 2535 V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet, 2536 PromotedInsts, TPT); 2537 2538 // This check is broken into two cases with very similar code to avoid using 2539 // getNumUses() as much as possible. Some values have a lot of uses, so 2540 // calling getNumUses() unconditionally caused a significant compile-time 2541 // regression. 2542 if (!Consensus) { 2543 Consensus = V; 2544 AddrMode = NewAddrMode; 2545 AddrModeInsts = NewAddrModeInsts; 2546 continue; 2547 } else if (NewAddrMode == AddrMode) { 2548 if (!IsNumUsesConsensusValid) { 2549 NumUsesConsensus = Consensus->getNumUses(); 2550 IsNumUsesConsensusValid = true; 2551 } 2552 2553 // Ensure that the obtained addressing mode is equivalent to that obtained 2554 // for all other roots of the PHI traversal. Also, when choosing one 2555 // such root as representative, select the one with the most uses in order 2556 // to keep the cost modeling heuristics in AddressingModeMatcher 2557 // applicable. 2558 unsigned NumUses = V->getNumUses(); 2559 if (NumUses > NumUsesConsensus) { 2560 Consensus = V; 2561 NumUsesConsensus = NumUses; 2562 AddrModeInsts = NewAddrModeInsts; 2563 } 2564 continue; 2565 } 2566 2567 Consensus = nullptr; 2568 break; 2569 } 2570 2571 // If the addressing mode couldn't be determined, or if multiple different 2572 // ones were determined, bail out now. 2573 if (!Consensus) { 2574 TPT.rollback(LastKnownGood); 2575 return false; 2576 } 2577 TPT.commit(); 2578 2579 // Check to see if any of the instructions supersumed by this addr mode are 2580 // non-local to I's BB. 2581 bool AnyNonLocal = false; 2582 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 2583 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 2584 AnyNonLocal = true; 2585 break; 2586 } 2587 } 2588 2589 // If all the instructions matched are already in this BB, don't do anything. 2590 if (!AnyNonLocal) { 2591 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 2592 return false; 2593 } 2594 2595 // Insert this computation right after this user. Since our caller is 2596 // scanning from the top of the BB to the bottom, reuse of the expr are 2597 // guaranteed to happen later. 2598 IRBuilder<> Builder(MemoryInst); 2599 2600 // Now that we determined the addressing expression we want to use and know 2601 // that we have to sink it into this block. Check to see if we have already 2602 // done this for some other load/store instr in this block. If so, reuse the 2603 // computation. 2604 Value *&SunkAddr = SunkAddrs[Addr]; 2605 if (SunkAddr) { 2606 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 2607 << *MemoryInst << "\n"); 2608 if (SunkAddr->getType() != Addr->getType()) 2609 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 2610 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 2611 TM && TM->getSubtarget<TargetSubtargetInfo>().useAA())) { 2612 // By default, we use the GEP-based method when AA is used later. This 2613 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 2614 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 2615 << *MemoryInst << "\n"); 2616 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 2617 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 2618 2619 // First, find the pointer. 2620 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 2621 ResultPtr = AddrMode.BaseReg; 2622 AddrMode.BaseReg = nullptr; 2623 } 2624 2625 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 2626 // We can't add more than one pointer together, nor can we scale a 2627 // pointer (both of which seem meaningless). 2628 if (ResultPtr || AddrMode.Scale != 1) 2629 return false; 2630 2631 ResultPtr = AddrMode.ScaledReg; 2632 AddrMode.Scale = 0; 2633 } 2634 2635 if (AddrMode.BaseGV) { 2636 if (ResultPtr) 2637 return false; 2638 2639 ResultPtr = AddrMode.BaseGV; 2640 } 2641 2642 // If the real base value actually came from an inttoptr, then the matcher 2643 // will look through it and provide only the integer value. In that case, 2644 // use it here. 2645 if (!ResultPtr && AddrMode.BaseReg) { 2646 ResultPtr = 2647 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 2648 AddrMode.BaseReg = nullptr; 2649 } else if (!ResultPtr && AddrMode.Scale == 1) { 2650 ResultPtr = 2651 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 2652 AddrMode.Scale = 0; 2653 } 2654 2655 if (!ResultPtr && 2656 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 2657 SunkAddr = Constant::getNullValue(Addr->getType()); 2658 } else if (!ResultPtr) { 2659 return false; 2660 } else { 2661 Type *I8PtrTy = 2662 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 2663 2664 // Start with the base register. Do this first so that subsequent address 2665 // matching finds it last, which will prevent it from trying to match it 2666 // as the scaled value in case it happens to be a mul. That would be 2667 // problematic if we've sunk a different mul for the scale, because then 2668 // we'd end up sinking both muls. 2669 if (AddrMode.BaseReg) { 2670 Value *V = AddrMode.BaseReg; 2671 if (V->getType() != IntPtrTy) 2672 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 2673 2674 ResultIndex = V; 2675 } 2676 2677 // Add the scale value. 2678 if (AddrMode.Scale) { 2679 Value *V = AddrMode.ScaledReg; 2680 if (V->getType() == IntPtrTy) { 2681 // done. 2682 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 2683 cast<IntegerType>(V->getType())->getBitWidth()) { 2684 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 2685 } else { 2686 // It is only safe to sign extend the BaseReg if we know that the math 2687 // required to create it did not overflow before we extend it. Since 2688 // the original IR value was tossed in favor of a constant back when 2689 // the AddrMode was created we need to bail out gracefully if widths 2690 // do not match instead of extending it. 2691 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 2692 if (I && (ResultIndex != AddrMode.BaseReg)) 2693 I->eraseFromParent(); 2694 return false; 2695 } 2696 2697 if (AddrMode.Scale != 1) 2698 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 2699 "sunkaddr"); 2700 if (ResultIndex) 2701 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 2702 else 2703 ResultIndex = V; 2704 } 2705 2706 // Add in the Base Offset if present. 2707 if (AddrMode.BaseOffs) { 2708 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 2709 if (ResultIndex) { 2710 // We need to add this separately from the scale above to help with 2711 // SDAG consecutive load/store merging. 2712 if (ResultPtr->getType() != I8PtrTy) 2713 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 2714 ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 2715 } 2716 2717 ResultIndex = V; 2718 } 2719 2720 if (!ResultIndex) { 2721 SunkAddr = ResultPtr; 2722 } else { 2723 if (ResultPtr->getType() != I8PtrTy) 2724 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 2725 SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 2726 } 2727 2728 if (SunkAddr->getType() != Addr->getType()) 2729 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 2730 } 2731 } else { 2732 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 2733 << *MemoryInst << "\n"); 2734 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 2735 Value *Result = nullptr; 2736 2737 // Start with the base register. Do this first so that subsequent address 2738 // matching finds it last, which will prevent it from trying to match it 2739 // as the scaled value in case it happens to be a mul. That would be 2740 // problematic if we've sunk a different mul for the scale, because then 2741 // we'd end up sinking both muls. 2742 if (AddrMode.BaseReg) { 2743 Value *V = AddrMode.BaseReg; 2744 if (V->getType()->isPointerTy()) 2745 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2746 if (V->getType() != IntPtrTy) 2747 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 2748 Result = V; 2749 } 2750 2751 // Add the scale value. 2752 if (AddrMode.Scale) { 2753 Value *V = AddrMode.ScaledReg; 2754 if (V->getType() == IntPtrTy) { 2755 // done. 2756 } else if (V->getType()->isPointerTy()) { 2757 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2758 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 2759 cast<IntegerType>(V->getType())->getBitWidth()) { 2760 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 2761 } else { 2762 // It is only safe to sign extend the BaseReg if we know that the math 2763 // required to create it did not overflow before we extend it. Since 2764 // the original IR value was tossed in favor of a constant back when 2765 // the AddrMode was created we need to bail out gracefully if widths 2766 // do not match instead of extending it. 2767 Instruction *I = dyn_cast_or_null<Instruction>(Result); 2768 if (I && (Result != AddrMode.BaseReg)) 2769 I->eraseFromParent(); 2770 return false; 2771 } 2772 if (AddrMode.Scale != 1) 2773 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 2774 "sunkaddr"); 2775 if (Result) 2776 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2777 else 2778 Result = V; 2779 } 2780 2781 // Add in the BaseGV if present. 2782 if (AddrMode.BaseGV) { 2783 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 2784 if (Result) 2785 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2786 else 2787 Result = V; 2788 } 2789 2790 // Add in the Base Offset if present. 2791 if (AddrMode.BaseOffs) { 2792 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 2793 if (Result) 2794 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 2795 else 2796 Result = V; 2797 } 2798 2799 if (!Result) 2800 SunkAddr = Constant::getNullValue(Addr->getType()); 2801 else 2802 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 2803 } 2804 2805 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 2806 2807 // If we have no uses, recursively delete the value and all dead instructions 2808 // using it. 2809 if (Repl->use_empty()) { 2810 // This can cause recursive deletion, which can invalidate our iterator. 2811 // Use a WeakVH to hold onto it in case this happens. 2812 WeakVH IterHandle(CurInstIterator); 2813 BasicBlock *BB = CurInstIterator->getParent(); 2814 2815 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 2816 2817 if (IterHandle != CurInstIterator) { 2818 // If the iterator instruction was recursively deleted, start over at the 2819 // start of the block. 2820 CurInstIterator = BB->begin(); 2821 SunkAddrs.clear(); 2822 } 2823 } 2824 ++NumMemoryInsts; 2825 return true; 2826 } 2827 2828 /// OptimizeInlineAsmInst - If there are any memory operands, use 2829 /// OptimizeMemoryInst to sink their address computing into the block when 2830 /// possible / profitable. 2831 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 2832 bool MadeChange = false; 2833 2834 TargetLowering::AsmOperandInfoVector 2835 TargetConstraints = TLI->ParseConstraints(CS); 2836 unsigned ArgNo = 0; 2837 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2838 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2839 2840 // Compute the constraint code and ConstraintType to use. 2841 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 2842 2843 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 2844 OpInfo.isIndirect) { 2845 Value *OpVal = CS->getArgOperand(ArgNo++); 2846 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 2847 } else if (OpInfo.Type == InlineAsm::isInput) 2848 ArgNo++; 2849 } 2850 2851 return MadeChange; 2852 } 2853 2854 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 2855 /// basic block as the load, unless conditions are unfavorable. This allows 2856 /// SelectionDAG to fold the extend into the load. 2857 /// 2858 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 2859 // Look for a load being extended. 2860 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 2861 if (!LI) return false; 2862 2863 // If they're already in the same block, there's nothing to do. 2864 if (LI->getParent() == I->getParent()) 2865 return false; 2866 2867 // If the load has other users and the truncate is not free, this probably 2868 // isn't worthwhile. 2869 if (!LI->hasOneUse() && 2870 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 2871 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 2872 !TLI->isTruncateFree(I->getType(), LI->getType())) 2873 return false; 2874 2875 // Check whether the target supports casts folded into loads. 2876 unsigned LType; 2877 if (isa<ZExtInst>(I)) 2878 LType = ISD::ZEXTLOAD; 2879 else { 2880 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 2881 LType = ISD::SEXTLOAD; 2882 } 2883 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 2884 return false; 2885 2886 // Move the extend into the same block as the load, so that SelectionDAG 2887 // can fold it. 2888 I->removeFromParent(); 2889 I->insertAfter(LI); 2890 ++NumExtsMoved; 2891 return true; 2892 } 2893 2894 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 2895 BasicBlock *DefBB = I->getParent(); 2896 2897 // If the result of a {s|z}ext and its source are both live out, rewrite all 2898 // other uses of the source with result of extension. 2899 Value *Src = I->getOperand(0); 2900 if (Src->hasOneUse()) 2901 return false; 2902 2903 // Only do this xform if truncating is free. 2904 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 2905 return false; 2906 2907 // Only safe to perform the optimization if the source is also defined in 2908 // this block. 2909 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 2910 return false; 2911 2912 bool DefIsLiveOut = false; 2913 for (User *U : I->users()) { 2914 Instruction *UI = cast<Instruction>(U); 2915 2916 // Figure out which BB this ext is used in. 2917 BasicBlock *UserBB = UI->getParent(); 2918 if (UserBB == DefBB) continue; 2919 DefIsLiveOut = true; 2920 break; 2921 } 2922 if (!DefIsLiveOut) 2923 return false; 2924 2925 // Make sure none of the uses are PHI nodes. 2926 for (User *U : Src->users()) { 2927 Instruction *UI = cast<Instruction>(U); 2928 BasicBlock *UserBB = UI->getParent(); 2929 if (UserBB == DefBB) continue; 2930 // Be conservative. We don't want this xform to end up introducing 2931 // reloads just before load / store instructions. 2932 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 2933 return false; 2934 } 2935 2936 // InsertedTruncs - Only insert one trunc in each block once. 2937 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 2938 2939 bool MadeChange = false; 2940 for (Use &U : Src->uses()) { 2941 Instruction *User = cast<Instruction>(U.getUser()); 2942 2943 // Figure out which BB this ext is used in. 2944 BasicBlock *UserBB = User->getParent(); 2945 if (UserBB == DefBB) continue; 2946 2947 // Both src and def are live in this block. Rewrite the use. 2948 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 2949 2950 if (!InsertedTrunc) { 2951 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 2952 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 2953 InsertedTruncsSet.insert(InsertedTrunc); 2954 } 2955 2956 // Replace a use of the {s|z}ext source with a use of the result. 2957 U = InsertedTrunc; 2958 ++NumExtUses; 2959 MadeChange = true; 2960 } 2961 2962 return MadeChange; 2963 } 2964 2965 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be 2966 /// turned into an explicit branch. 2967 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 2968 // FIXME: This should use the same heuristics as IfConversion to determine 2969 // whether a select is better represented as a branch. This requires that 2970 // branch probability metadata is preserved for the select, which is not the 2971 // case currently. 2972 2973 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2974 2975 // If the branch is predicted right, an out of order CPU can avoid blocking on 2976 // the compare. Emit cmovs on compares with a memory operand as branches to 2977 // avoid stalls on the load from memory. If the compare has more than one use 2978 // there's probably another cmov or setcc around so it's not worth emitting a 2979 // branch. 2980 if (!Cmp) 2981 return false; 2982 2983 Value *CmpOp0 = Cmp->getOperand(0); 2984 Value *CmpOp1 = Cmp->getOperand(1); 2985 2986 // We check that the memory operand has one use to avoid uses of the loaded 2987 // value directly after the compare, making branches unprofitable. 2988 return Cmp->hasOneUse() && 2989 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 2990 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 2991 } 2992 2993 2994 /// If we have a SelectInst that will likely profit from branch prediction, 2995 /// turn it into a branch. 2996 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { 2997 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 2998 2999 // Can we convert the 'select' to CF ? 3000 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 3001 return false; 3002 3003 TargetLowering::SelectSupportKind SelectKind; 3004 if (VectorCond) 3005 SelectKind = TargetLowering::VectorMaskSelect; 3006 else if (SI->getType()->isVectorTy()) 3007 SelectKind = TargetLowering::ScalarCondVectorVal; 3008 else 3009 SelectKind = TargetLowering::ScalarValSelect; 3010 3011 // Do we have efficient codegen support for this kind of 'selects' ? 3012 if (TLI->isSelectSupported(SelectKind)) { 3013 // We have efficient codegen support for the select instruction. 3014 // Check if it is profitable to keep this 'select'. 3015 if (!TLI->isPredictableSelectExpensive() || 3016 !isFormingBranchFromSelectProfitable(SI)) 3017 return false; 3018 } 3019 3020 ModifiedDT = true; 3021 3022 // First, we split the block containing the select into 2 blocks. 3023 BasicBlock *StartBlock = SI->getParent(); 3024 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 3025 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 3026 3027 // Create a new block serving as the landing pad for the branch. 3028 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 3029 NextBlock->getParent(), NextBlock); 3030 3031 // Move the unconditional branch from the block with the select in it into our 3032 // landing pad block. 3033 StartBlock->getTerminator()->eraseFromParent(); 3034 BranchInst::Create(NextBlock, SmallBlock); 3035 3036 // Insert the real conditional branch based on the original condition. 3037 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 3038 3039 // The select itself is replaced with a PHI Node. 3040 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); 3041 PN->takeName(SI); 3042 PN->addIncoming(SI->getTrueValue(), StartBlock); 3043 PN->addIncoming(SI->getFalseValue(), SmallBlock); 3044 SI->replaceAllUsesWith(PN); 3045 SI->eraseFromParent(); 3046 3047 // Instruct OptimizeBlock to skip to the next block. 3048 CurInstIterator = StartBlock->end(); 3049 ++NumSelectsExpanded; 3050 return true; 3051 } 3052 3053 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 3054 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 3055 int SplatElem = -1; 3056 for (unsigned i = 0; i < Mask.size(); ++i) { 3057 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 3058 return false; 3059 SplatElem = Mask[i]; 3060 } 3061 3062 return true; 3063 } 3064 3065 /// Some targets have expensive vector shifts if the lanes aren't all the same 3066 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 3067 /// it's often worth sinking a shufflevector splat down to its use so that 3068 /// codegen can spot all lanes are identical. 3069 bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 3070 BasicBlock *DefBB = SVI->getParent(); 3071 3072 // Only do this xform if variable vector shifts are particularly expensive. 3073 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 3074 return false; 3075 3076 // We only expect better codegen by sinking a shuffle if we can recognise a 3077 // constant splat. 3078 if (!isBroadcastShuffle(SVI)) 3079 return false; 3080 3081 // InsertedShuffles - Only insert a shuffle in each block once. 3082 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 3083 3084 bool MadeChange = false; 3085 for (User *U : SVI->users()) { 3086 Instruction *UI = cast<Instruction>(U); 3087 3088 // Figure out which BB this ext is used in. 3089 BasicBlock *UserBB = UI->getParent(); 3090 if (UserBB == DefBB) continue; 3091 3092 // For now only apply this when the splat is used by a shift instruction. 3093 if (!UI->isShift()) continue; 3094 3095 // Everything checks out, sink the shuffle if the user's block doesn't 3096 // already have a copy. 3097 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 3098 3099 if (!InsertedShuffle) { 3100 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3101 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), 3102 SVI->getOperand(1), 3103 SVI->getOperand(2), "", InsertPt); 3104 } 3105 3106 UI->replaceUsesOfWith(SVI, InsertedShuffle); 3107 MadeChange = true; 3108 } 3109 3110 // If we removed all uses, nuke the shuffle. 3111 if (SVI->use_empty()) { 3112 SVI->eraseFromParent(); 3113 MadeChange = true; 3114 } 3115 3116 return MadeChange; 3117 } 3118 3119 bool CodeGenPrepare::OptimizeInst(Instruction *I) { 3120 if (PHINode *P = dyn_cast<PHINode>(I)) { 3121 // It is possible for very late stage optimizations (such as SimplifyCFG) 3122 // to introduce PHI nodes too late to be cleaned up. If we detect such a 3123 // trivial PHI, go ahead and zap it here. 3124 if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr, 3125 TLInfo, DT)) { 3126 P->replaceAllUsesWith(V); 3127 P->eraseFromParent(); 3128 ++NumPHIsElim; 3129 return true; 3130 } 3131 return false; 3132 } 3133 3134 if (CastInst *CI = dyn_cast<CastInst>(I)) { 3135 // If the source of the cast is a constant, then this should have 3136 // already been constant folded. The only reason NOT to constant fold 3137 // it is if something (e.g. LSR) was careful to place the constant 3138 // evaluation in a block other than then one that uses it (e.g. to hoist 3139 // the address of globals out of a loop). If this is the case, we don't 3140 // want to forward-subst the cast. 3141 if (isa<Constant>(CI->getOperand(0))) 3142 return false; 3143 3144 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 3145 return true; 3146 3147 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 3148 /// Sink a zext or sext into its user blocks if the target type doesn't 3149 /// fit in one register 3150 if (TLI && TLI->getTypeAction(CI->getContext(), 3151 TLI->getValueType(CI->getType())) == 3152 TargetLowering::TypeExpandInteger) { 3153 return SinkCast(CI); 3154 } else { 3155 bool MadeChange = MoveExtToFormExtLoad(I); 3156 return MadeChange | OptimizeExtUses(I); 3157 } 3158 } 3159 return false; 3160 } 3161 3162 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 3163 if (!TLI || !TLI->hasMultipleConditionRegisters()) 3164 return OptimizeCmpExpression(CI); 3165 3166 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3167 if (TLI) 3168 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 3169 return false; 3170 } 3171 3172 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3173 if (TLI) 3174 return OptimizeMemoryInst(I, SI->getOperand(1), 3175 SI->getOperand(0)->getType()); 3176 return false; 3177 } 3178 3179 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 3180 3181 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 3182 BinOp->getOpcode() == Instruction::LShr)) { 3183 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 3184 if (TLI && CI && TLI->hasExtractBitsInsn()) 3185 return OptimizeExtractBits(BinOp, CI, *TLI); 3186 3187 return false; 3188 } 3189 3190 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 3191 if (GEPI->hasAllZeroIndices()) { 3192 /// The GEP operand must be a pointer, so must its result -> BitCast 3193 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 3194 GEPI->getName(), GEPI); 3195 GEPI->replaceAllUsesWith(NC); 3196 GEPI->eraseFromParent(); 3197 ++NumGEPsElim; 3198 OptimizeInst(NC); 3199 return true; 3200 } 3201 return false; 3202 } 3203 3204 if (CallInst *CI = dyn_cast<CallInst>(I)) 3205 return OptimizeCallInst(CI); 3206 3207 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 3208 return OptimizeSelectInst(SI); 3209 3210 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 3211 return OptimizeShuffleVectorInst(SVI); 3212 3213 return false; 3214 } 3215 3216 // In this pass we look for GEP and cast instructions that are used 3217 // across basic blocks and rewrite them to improve basic-block-at-a-time 3218 // selection. 3219 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 3220 SunkAddrs.clear(); 3221 bool MadeChange = false; 3222 3223 CurInstIterator = BB.begin(); 3224 while (CurInstIterator != BB.end()) 3225 MadeChange |= OptimizeInst(CurInstIterator++); 3226 3227 MadeChange |= DupRetToEnableTailCallOpts(&BB); 3228 3229 return MadeChange; 3230 } 3231 3232 // llvm.dbg.value is far away from the value then iSel may not be able 3233 // handle it properly. iSel will drop llvm.dbg.value if it can not 3234 // find a node corresponding to the value. 3235 bool CodeGenPrepare::PlaceDbgValues(Function &F) { 3236 bool MadeChange = false; 3237 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { 3238 Instruction *PrevNonDbgInst = nullptr; 3239 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { 3240 Instruction *Insn = BI; ++BI; 3241 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 3242 // Leave dbg.values that refer to an alloca alone. These 3243 // instrinsics describe the address of a variable (= the alloca) 3244 // being taken. They should not be moved next to the alloca 3245 // (and to the beginning of the scope), but rather stay close to 3246 // where said address is used. 3247 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 3248 PrevNonDbgInst = Insn; 3249 continue; 3250 } 3251 3252 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 3253 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 3254 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 3255 DVI->removeFromParent(); 3256 if (isa<PHINode>(VI)) 3257 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 3258 else 3259 DVI->insertAfter(VI); 3260 MadeChange = true; 3261 ++NumDbgValueMoved; 3262 } 3263 } 3264 } 3265 return MadeChange; 3266 } 3267 3268 // If there is a sequence that branches based on comparing a single bit 3269 // against zero that can be combined into a single instruction, and the 3270 // target supports folding these into a single instruction, sink the 3271 // mask and compare into the branch uses. Do this before OptimizeBlock -> 3272 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 3273 // searched for. 3274 bool CodeGenPrepare::sinkAndCmp(Function &F) { 3275 if (!EnableAndCmpSinking) 3276 return false; 3277 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 3278 return false; 3279 bool MadeChange = false; 3280 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 3281 BasicBlock *BB = I++; 3282 3283 // Does this BB end with the following? 3284 // %andVal = and %val, #single-bit-set 3285 // %icmpVal = icmp %andResult, 0 3286 // br i1 %cmpVal label %dest1, label %dest2" 3287 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 3288 if (!Brcc || !Brcc->isConditional()) 3289 continue; 3290 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 3291 if (!Cmp || Cmp->getParent() != BB) 3292 continue; 3293 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 3294 if (!Zero || !Zero->isZero()) 3295 continue; 3296 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 3297 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 3298 continue; 3299 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 3300 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 3301 continue; 3302 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 3303 3304 // Push the "and; icmp" for any users that are conditional branches. 3305 // Since there can only be one branch use per BB, we don't need to keep 3306 // track of which BBs we insert into. 3307 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 3308 UI != E; ) { 3309 Use &TheUse = *UI; 3310 // Find brcc use. 3311 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 3312 ++UI; 3313 if (!BrccUser || !BrccUser->isConditional()) 3314 continue; 3315 BasicBlock *UserBB = BrccUser->getParent(); 3316 if (UserBB == BB) continue; 3317 DEBUG(dbgs() << "found Brcc use\n"); 3318 3319 // Sink the "and; icmp" to use. 3320 MadeChange = true; 3321 BinaryOperator *NewAnd = 3322 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 3323 BrccUser); 3324 CmpInst *NewCmp = 3325 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 3326 "", BrccUser); 3327 TheUse = NewCmp; 3328 ++NumAndCmpsMoved; 3329 DEBUG(BrccUser->getParent()->dump()); 3330 } 3331 } 3332 return MadeChange; 3333 } 3334