1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/GetElementPtrTypeIterator.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InlineAsm.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/PatternMatch.h" 33 #include "llvm/IR/ValueHandle.h" 34 #include "llvm/IR/ValueMap.h" 35 #include "llvm/Pass.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetLibraryInfo.h" 40 #include "llvm/Target/TargetLowering.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/BuildLibCalls.h" 44 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 using namespace llvm; 47 using namespace llvm::PatternMatch; 48 49 #define DEBUG_TYPE "codegenprepare" 50 51 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 52 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 53 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 54 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 55 "sunken Cmps"); 56 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 57 "of sunken Casts"); 58 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 59 "computations were sunk"); 60 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 61 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 62 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 63 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 64 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 65 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 66 67 static cl::opt<bool> DisableBranchOpts( 68 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 69 cl::desc("Disable branch optimizations in CodeGenPrepare")); 70 71 static cl::opt<bool> DisableSelectToBranch( 72 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 73 cl::desc("Disable select to branch conversion.")); 74 75 static cl::opt<bool> AddrSinkUsingGEPs( 76 "addr-sink-using-gep", cl::Hidden, cl::init(false), 77 cl::desc("Address sinking in CGP using GEPs.")); 78 79 static cl::opt<bool> EnableAndCmpSinking( 80 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 81 cl::desc("Enable sinkinig and/cmp into branches.")); 82 83 namespace { 84 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 85 typedef DenseMap<Instruction *, Type *> InstrToOrigTy; 86 87 class CodeGenPrepare : public FunctionPass { 88 /// TLI - Keep a pointer of a TargetLowering to consult for determining 89 /// transformation profitability. 90 const TargetMachine *TM; 91 const TargetLowering *TLI; 92 const TargetLibraryInfo *TLInfo; 93 DominatorTree *DT; 94 95 /// CurInstIterator - As we scan instructions optimizing them, this is the 96 /// next instruction to optimize. Xforms that can invalidate this should 97 /// update it. 98 BasicBlock::iterator CurInstIterator; 99 100 /// Keeps track of non-local addresses that have been sunk into a block. 101 /// This allows us to avoid inserting duplicate code for blocks with 102 /// multiple load/stores of the same address. 103 ValueMap<Value*, Value*> SunkAddrs; 104 105 /// Keeps track of all truncates inserted for the current function. 106 SetOfInstrs InsertedTruncsSet; 107 /// Keeps track of the type of the related instruction before their 108 /// promotion for the current function. 109 InstrToOrigTy PromotedInsts; 110 111 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to 112 /// be updated. 113 bool ModifiedDT; 114 115 /// OptSize - True if optimizing for size. 116 bool OptSize; 117 118 public: 119 static char ID; // Pass identification, replacement for typeid 120 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 121 : FunctionPass(ID), TM(TM), TLI(nullptr) { 122 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 123 } 124 bool runOnFunction(Function &F) override; 125 126 const char *getPassName() const override { return "CodeGen Prepare"; } 127 128 void getAnalysisUsage(AnalysisUsage &AU) const override { 129 AU.addPreserved<DominatorTreeWrapperPass>(); 130 AU.addRequired<TargetLibraryInfo>(); 131 } 132 133 private: 134 bool EliminateFallThrough(Function &F); 135 bool EliminateMostlyEmptyBlocks(Function &F); 136 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 137 void EliminateMostlyEmptyBlock(BasicBlock *BB); 138 bool OptimizeBlock(BasicBlock &BB); 139 bool OptimizeInst(Instruction *I); 140 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); 141 bool OptimizeInlineAsmInst(CallInst *CS); 142 bool OptimizeCallInst(CallInst *CI); 143 bool MoveExtToFormExtLoad(Instruction *I); 144 bool OptimizeExtUses(Instruction *I); 145 bool OptimizeSelectInst(SelectInst *SI); 146 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); 147 bool DupRetToEnableTailCallOpts(BasicBlock *BB); 148 bool PlaceDbgValues(Function &F); 149 bool sinkAndCmp(Function &F); 150 }; 151 } 152 153 char CodeGenPrepare::ID = 0; 154 static void *initializeCodeGenPreparePassOnce(PassRegistry &Registry) { 155 initializeTargetLibraryInfoPass(Registry); 156 PassInfo *PI = new PassInfo( 157 "Optimize for code generation", "codegenprepare", &CodeGenPrepare::ID, 158 PassInfo::NormalCtor_t(callDefaultCtor<CodeGenPrepare>), false, false, 159 PassInfo::TargetMachineCtor_t(callTargetMachineCtor<CodeGenPrepare>)); 160 Registry.registerPass(*PI, true); 161 return PI; 162 } 163 164 void llvm::initializeCodeGenPreparePass(PassRegistry &Registry) { 165 CALL_ONCE_INITIALIZATION(initializeCodeGenPreparePassOnce) 166 } 167 168 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 169 return new CodeGenPrepare(TM); 170 } 171 172 bool CodeGenPrepare::runOnFunction(Function &F) { 173 if (skipOptnoneFunction(F)) 174 return false; 175 176 bool EverMadeChange = false; 177 // Clear per function information. 178 InsertedTruncsSet.clear(); 179 PromotedInsts.clear(); 180 181 ModifiedDT = false; 182 if (TM) TLI = TM->getTargetLowering(); 183 TLInfo = &getAnalysis<TargetLibraryInfo>(); 184 DominatorTreeWrapperPass *DTWP = 185 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 186 DT = DTWP ? &DTWP->getDomTree() : nullptr; 187 OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 188 Attribute::OptimizeForSize); 189 190 /// This optimization identifies DIV instructions that can be 191 /// profitably bypassed and carried out with a shorter, faster divide. 192 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 193 const DenseMap<unsigned int, unsigned int> &BypassWidths = 194 TLI->getBypassSlowDivWidths(); 195 for (Function::iterator I = F.begin(); I != F.end(); I++) 196 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 197 } 198 199 // Eliminate blocks that contain only PHI nodes and an 200 // unconditional branch. 201 EverMadeChange |= EliminateMostlyEmptyBlocks(F); 202 203 // llvm.dbg.value is far away from the value then iSel may not be able 204 // handle it properly. iSel will drop llvm.dbg.value if it can not 205 // find a node corresponding to the value. 206 EverMadeChange |= PlaceDbgValues(F); 207 208 // If there is a mask, compare against zero, and branch that can be combined 209 // into a single target instruction, push the mask and compare into branch 210 // users. Do this before OptimizeBlock -> OptimizeInst -> 211 // OptimizeCmpExpression, which perturbs the pattern being searched for. 212 if (!DisableBranchOpts) 213 EverMadeChange |= sinkAndCmp(F); 214 215 bool MadeChange = true; 216 while (MadeChange) { 217 MadeChange = false; 218 for (Function::iterator I = F.begin(); I != F.end(); ) { 219 BasicBlock *BB = I++; 220 MadeChange |= OptimizeBlock(*BB); 221 } 222 EverMadeChange |= MadeChange; 223 } 224 225 SunkAddrs.clear(); 226 227 if (!DisableBranchOpts) { 228 MadeChange = false; 229 SmallPtrSet<BasicBlock*, 8> WorkList; 230 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 231 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 232 MadeChange |= ConstantFoldTerminator(BB, true); 233 if (!MadeChange) continue; 234 235 for (SmallVectorImpl<BasicBlock*>::iterator 236 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 237 if (pred_begin(*II) == pred_end(*II)) 238 WorkList.insert(*II); 239 } 240 241 // Delete the dead blocks and any of their dead successors. 242 MadeChange |= !WorkList.empty(); 243 while (!WorkList.empty()) { 244 BasicBlock *BB = *WorkList.begin(); 245 WorkList.erase(BB); 246 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 247 248 DeleteDeadBlock(BB); 249 250 for (SmallVectorImpl<BasicBlock*>::iterator 251 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 252 if (pred_begin(*II) == pred_end(*II)) 253 WorkList.insert(*II); 254 } 255 256 // Merge pairs of basic blocks with unconditional branches, connected by 257 // a single edge. 258 if (EverMadeChange || MadeChange) 259 MadeChange |= EliminateFallThrough(F); 260 261 if (MadeChange) 262 ModifiedDT = true; 263 EverMadeChange |= MadeChange; 264 } 265 266 if (ModifiedDT && DT) 267 DT->recalculate(F); 268 269 return EverMadeChange; 270 } 271 272 /// EliminateFallThrough - Merge basic blocks which are connected 273 /// by a single edge, where one of the basic blocks has a single successor 274 /// pointing to the other basic block, which has a single predecessor. 275 bool CodeGenPrepare::EliminateFallThrough(Function &F) { 276 bool Changed = false; 277 // Scan all of the blocks in the function, except for the entry block. 278 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 279 BasicBlock *BB = I++; 280 // If the destination block has a single pred, then this is a trivial 281 // edge, just collapse it. 282 BasicBlock *SinglePred = BB->getSinglePredecessor(); 283 284 // Don't merge if BB's address is taken. 285 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 286 287 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 288 if (Term && !Term->isConditional()) { 289 Changed = true; 290 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 291 // Remember if SinglePred was the entry block of the function. 292 // If so, we will need to move BB back to the entry position. 293 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 294 MergeBasicBlockIntoOnlyPred(BB, this); 295 296 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 297 BB->moveBefore(&BB->getParent()->getEntryBlock()); 298 299 // We have erased a block. Update the iterator. 300 I = BB; 301 } 302 } 303 return Changed; 304 } 305 306 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, 307 /// debug info directives, and an unconditional branch. Passes before isel 308 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for 309 /// isel. Start by eliminating these blocks so we can split them the way we 310 /// want them. 311 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { 312 bool MadeChange = false; 313 // Note that this intentionally skips the entry block. 314 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 315 BasicBlock *BB = I++; 316 317 // If this block doesn't end with an uncond branch, ignore it. 318 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 319 if (!BI || !BI->isUnconditional()) 320 continue; 321 322 // If the instruction before the branch (skipping debug info) isn't a phi 323 // node, then other stuff is happening here. 324 BasicBlock::iterator BBI = BI; 325 if (BBI != BB->begin()) { 326 --BBI; 327 while (isa<DbgInfoIntrinsic>(BBI)) { 328 if (BBI == BB->begin()) 329 break; 330 --BBI; 331 } 332 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 333 continue; 334 } 335 336 // Do not break infinite loops. 337 BasicBlock *DestBB = BI->getSuccessor(0); 338 if (DestBB == BB) 339 continue; 340 341 if (!CanMergeBlocks(BB, DestBB)) 342 continue; 343 344 EliminateMostlyEmptyBlock(BB); 345 MadeChange = true; 346 } 347 return MadeChange; 348 } 349 350 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a 351 /// single uncond branch between them, and BB contains no other non-phi 352 /// instructions. 353 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, 354 const BasicBlock *DestBB) const { 355 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 356 // the successor. If there are more complex condition (e.g. preheaders), 357 // don't mess around with them. 358 BasicBlock::const_iterator BBI = BB->begin(); 359 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 360 for (const User *U : PN->users()) { 361 const Instruction *UI = cast<Instruction>(U); 362 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 363 return false; 364 // If User is inside DestBB block and it is a PHINode then check 365 // incoming value. If incoming value is not from BB then this is 366 // a complex condition (e.g. preheaders) we want to avoid here. 367 if (UI->getParent() == DestBB) { 368 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 369 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 370 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 371 if (Insn && Insn->getParent() == BB && 372 Insn->getParent() != UPN->getIncomingBlock(I)) 373 return false; 374 } 375 } 376 } 377 } 378 379 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 380 // and DestBB may have conflicting incoming values for the block. If so, we 381 // can't merge the block. 382 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 383 if (!DestBBPN) return true; // no conflict. 384 385 // Collect the preds of BB. 386 SmallPtrSet<const BasicBlock*, 16> BBPreds; 387 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 388 // It is faster to get preds from a PHI than with pred_iterator. 389 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 390 BBPreds.insert(BBPN->getIncomingBlock(i)); 391 } else { 392 BBPreds.insert(pred_begin(BB), pred_end(BB)); 393 } 394 395 // Walk the preds of DestBB. 396 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 397 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 398 if (BBPreds.count(Pred)) { // Common predecessor? 399 BBI = DestBB->begin(); 400 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 401 const Value *V1 = PN->getIncomingValueForBlock(Pred); 402 const Value *V2 = PN->getIncomingValueForBlock(BB); 403 404 // If V2 is a phi node in BB, look up what the mapped value will be. 405 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 406 if (V2PN->getParent() == BB) 407 V2 = V2PN->getIncomingValueForBlock(Pred); 408 409 // If there is a conflict, bail out. 410 if (V1 != V2) return false; 411 } 412 } 413 } 414 415 return true; 416 } 417 418 419 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and 420 /// an unconditional branch in it. 421 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { 422 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 423 BasicBlock *DestBB = BI->getSuccessor(0); 424 425 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 426 427 // If the destination block has a single pred, then this is a trivial edge, 428 // just collapse it. 429 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 430 if (SinglePred != DestBB) { 431 // Remember if SinglePred was the entry block of the function. If so, we 432 // will need to move BB back to the entry position. 433 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 434 MergeBasicBlockIntoOnlyPred(DestBB, this); 435 436 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 437 BB->moveBefore(&BB->getParent()->getEntryBlock()); 438 439 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 440 return; 441 } 442 } 443 444 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 445 // to handle the new incoming edges it is about to have. 446 PHINode *PN; 447 for (BasicBlock::iterator BBI = DestBB->begin(); 448 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 449 // Remove the incoming value for BB, and remember it. 450 Value *InVal = PN->removeIncomingValue(BB, false); 451 452 // Two options: either the InVal is a phi node defined in BB or it is some 453 // value that dominates BB. 454 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 455 if (InValPhi && InValPhi->getParent() == BB) { 456 // Add all of the input values of the input PHI as inputs of this phi. 457 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 458 PN->addIncoming(InValPhi->getIncomingValue(i), 459 InValPhi->getIncomingBlock(i)); 460 } else { 461 // Otherwise, add one instance of the dominating value for each edge that 462 // we will be adding. 463 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 464 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 465 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 466 } else { 467 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 468 PN->addIncoming(InVal, *PI); 469 } 470 } 471 } 472 473 // The PHIs are now updated, change everything that refers to BB to use 474 // DestBB and remove BB. 475 BB->replaceAllUsesWith(DestBB); 476 if (DT && !ModifiedDT) { 477 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); 478 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); 479 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); 480 DT->changeImmediateDominator(DestBB, NewIDom); 481 DT->eraseNode(BB); 482 } 483 BB->eraseFromParent(); 484 ++NumBlocksElim; 485 486 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 487 } 488 489 /// SinkCast - Sink the specified cast instruction into its user blocks 490 static bool SinkCast(CastInst *CI) { 491 BasicBlock *DefBB = CI->getParent(); 492 493 /// InsertedCasts - Only insert a cast in each block once. 494 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 495 496 bool MadeChange = false; 497 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 498 UI != E; ) { 499 Use &TheUse = UI.getUse(); 500 Instruction *User = cast<Instruction>(*UI); 501 502 // Figure out which BB this cast is used in. For PHI's this is the 503 // appropriate predecessor block. 504 BasicBlock *UserBB = User->getParent(); 505 if (PHINode *PN = dyn_cast<PHINode>(User)) { 506 UserBB = PN->getIncomingBlock(TheUse); 507 } 508 509 // Preincrement use iterator so we don't invalidate it. 510 ++UI; 511 512 // If this user is in the same block as the cast, don't change the cast. 513 if (UserBB == DefBB) continue; 514 515 // If we have already inserted a cast into this block, use it. 516 CastInst *&InsertedCast = InsertedCasts[UserBB]; 517 518 if (!InsertedCast) { 519 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 520 InsertedCast = 521 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 522 InsertPt); 523 MadeChange = true; 524 } 525 526 // Replace a use of the cast with a use of the new cast. 527 TheUse = InsertedCast; 528 ++NumCastUses; 529 } 530 531 // If we removed all uses, nuke the cast. 532 if (CI->use_empty()) { 533 CI->eraseFromParent(); 534 MadeChange = true; 535 } 536 537 return MadeChange; 538 } 539 540 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop 541 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), 542 /// sink it into user blocks to reduce the number of virtual 543 /// registers that must be created and coalesced. 544 /// 545 /// Return true if any changes are made. 546 /// 547 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ 548 // If this is a noop copy, 549 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 550 EVT DstVT = TLI.getValueType(CI->getType()); 551 552 // This is an fp<->int conversion? 553 if (SrcVT.isInteger() != DstVT.isInteger()) 554 return false; 555 556 // If this is an extension, it will be a zero or sign extension, which 557 // isn't a noop. 558 if (SrcVT.bitsLT(DstVT)) return false; 559 560 // If these values will be promoted, find out what they will be promoted 561 // to. This helps us consider truncates on PPC as noop copies when they 562 // are. 563 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 564 TargetLowering::TypePromoteInteger) 565 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 566 if (TLI.getTypeAction(CI->getContext(), DstVT) == 567 TargetLowering::TypePromoteInteger) 568 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 569 570 // If, after promotion, these are the same types, this is a noop copy. 571 if (SrcVT != DstVT) 572 return false; 573 574 return SinkCast(CI); 575 } 576 577 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce 578 /// the number of virtual registers that must be created and coalesced. This is 579 /// a clear win except on targets with multiple condition code registers 580 /// (PowerPC), where it might lose; some adjustment may be wanted there. 581 /// 582 /// Return true if any changes are made. 583 static bool OptimizeCmpExpression(CmpInst *CI) { 584 BasicBlock *DefBB = CI->getParent(); 585 586 /// InsertedCmp - Only insert a cmp in each block once. 587 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 588 589 bool MadeChange = false; 590 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 591 UI != E; ) { 592 Use &TheUse = UI.getUse(); 593 Instruction *User = cast<Instruction>(*UI); 594 595 // Preincrement use iterator so we don't invalidate it. 596 ++UI; 597 598 // Don't bother for PHI nodes. 599 if (isa<PHINode>(User)) 600 continue; 601 602 // Figure out which BB this cmp is used in. 603 BasicBlock *UserBB = User->getParent(); 604 605 // If this user is in the same block as the cmp, don't change the cmp. 606 if (UserBB == DefBB) continue; 607 608 // If we have already inserted a cmp into this block, use it. 609 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 610 611 if (!InsertedCmp) { 612 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 613 InsertedCmp = 614 CmpInst::Create(CI->getOpcode(), 615 CI->getPredicate(), CI->getOperand(0), 616 CI->getOperand(1), "", InsertPt); 617 MadeChange = true; 618 } 619 620 // Replace a use of the cmp with a use of the new cmp. 621 TheUse = InsertedCmp; 622 ++NumCmpUses; 623 } 624 625 // If we removed all uses, nuke the cmp. 626 if (CI->use_empty()) 627 CI->eraseFromParent(); 628 629 return MadeChange; 630 } 631 632 /// isExtractBitsCandidateUse - Check if the candidates could 633 /// be combined with shift instruction, which includes: 634 /// 1. Truncate instruction 635 /// 2. And instruction and the imm is a mask of the low bits: 636 /// imm & (imm+1) == 0 637 bool isExtractBitsCandidateUse(Instruction *User) { 638 if (!isa<TruncInst>(User)) { 639 if (User->getOpcode() != Instruction::And || 640 !isa<ConstantInt>(User->getOperand(1))) 641 return false; 642 643 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 644 645 if ((Cimm & (Cimm + 1)).getBoolValue()) 646 return false; 647 } 648 return true; 649 } 650 651 /// SinkShiftAndTruncate - sink both shift and truncate instruction 652 /// to the use of truncate's BB. 653 bool 654 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 655 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 656 const TargetLowering &TLI) { 657 BasicBlock *UserBB = User->getParent(); 658 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 659 TruncInst *TruncI = dyn_cast<TruncInst>(User); 660 bool MadeChange = false; 661 662 for (Value::user_iterator TruncUI = TruncI->user_begin(), 663 TruncE = TruncI->user_end(); 664 TruncUI != TruncE;) { 665 666 Use &TruncTheUse = TruncUI.getUse(); 667 Instruction *TruncUser = cast<Instruction>(*TruncUI); 668 // Preincrement use iterator so we don't invalidate it. 669 670 ++TruncUI; 671 672 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 673 if (!ISDOpcode) 674 continue; 675 676 // If the use is actually a legal node, there will not be an implicit 677 // truncate. 678 if (TLI.isOperationLegalOrCustom(ISDOpcode, 679 EVT::getEVT(TruncUser->getType()))) 680 continue; 681 682 // Don't bother for PHI nodes. 683 if (isa<PHINode>(TruncUser)) 684 continue; 685 686 BasicBlock *TruncUserBB = TruncUser->getParent(); 687 688 if (UserBB == TruncUserBB) 689 continue; 690 691 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 692 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 693 694 if (!InsertedShift && !InsertedTrunc) { 695 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 696 // Sink the shift 697 if (ShiftI->getOpcode() == Instruction::AShr) 698 InsertedShift = 699 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 700 else 701 InsertedShift = 702 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 703 704 // Sink the trunc 705 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 706 TruncInsertPt++; 707 708 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 709 TruncI->getType(), "", TruncInsertPt); 710 711 MadeChange = true; 712 713 TruncTheUse = InsertedTrunc; 714 } 715 } 716 return MadeChange; 717 } 718 719 /// OptimizeExtractBits - sink the shift *right* instruction into user blocks if 720 /// the uses could potentially be combined with this shift instruction and 721 /// generate BitExtract instruction. It will only be applied if the architecture 722 /// supports BitExtract instruction. Here is an example: 723 /// BB1: 724 /// %x.extract.shift = lshr i64 %arg1, 32 725 /// BB2: 726 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 727 /// ==> 728 /// 729 /// BB2: 730 /// %x.extract.shift.1 = lshr i64 %arg1, 32 731 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 732 /// 733 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 734 /// instruction. 735 /// Return true if any changes are made. 736 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 737 const TargetLowering &TLI) { 738 BasicBlock *DefBB = ShiftI->getParent(); 739 740 /// Only insert instructions in each block once. 741 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 742 743 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType())); 744 745 bool MadeChange = false; 746 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 747 UI != E;) { 748 Use &TheUse = UI.getUse(); 749 Instruction *User = cast<Instruction>(*UI); 750 // Preincrement use iterator so we don't invalidate it. 751 ++UI; 752 753 // Don't bother for PHI nodes. 754 if (isa<PHINode>(User)) 755 continue; 756 757 if (!isExtractBitsCandidateUse(User)) 758 continue; 759 760 BasicBlock *UserBB = User->getParent(); 761 762 if (UserBB == DefBB) { 763 // If the shift and truncate instruction are in the same BB. The use of 764 // the truncate(TruncUse) may still introduce another truncate if not 765 // legal. In this case, we would like to sink both shift and truncate 766 // instruction to the BB of TruncUse. 767 // for example: 768 // BB1: 769 // i64 shift.result = lshr i64 opnd, imm 770 // trunc.result = trunc shift.result to i16 771 // 772 // BB2: 773 // ----> We will have an implicit truncate here if the architecture does 774 // not have i16 compare. 775 // cmp i16 trunc.result, opnd2 776 // 777 if (isa<TruncInst>(User) && shiftIsLegal 778 // If the type of the truncate is legal, no trucate will be 779 // introduced in other basic blocks. 780 && (!TLI.isTypeLegal(TLI.getValueType(User->getType())))) 781 MadeChange = 782 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI); 783 784 continue; 785 } 786 // If we have already inserted a shift into this block, use it. 787 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 788 789 if (!InsertedShift) { 790 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 791 792 if (ShiftI->getOpcode() == Instruction::AShr) 793 InsertedShift = 794 BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); 795 else 796 InsertedShift = 797 BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); 798 799 MadeChange = true; 800 } 801 802 // Replace a use of the shift with a use of the new shift. 803 TheUse = InsertedShift; 804 } 805 806 // If we removed all uses, nuke the shift. 807 if (ShiftI->use_empty()) 808 ShiftI->eraseFromParent(); 809 810 return MadeChange; 811 } 812 813 namespace { 814 class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { 815 protected: 816 void replaceCall(Value *With) override { 817 CI->replaceAllUsesWith(With); 818 CI->eraseFromParent(); 819 } 820 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override { 821 if (ConstantInt *SizeCI = 822 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) 823 return SizeCI->isAllOnesValue(); 824 return false; 825 } 826 }; 827 } // end anonymous namespace 828 829 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { 830 BasicBlock *BB = CI->getParent(); 831 832 // Lower inline assembly if we can. 833 // If we found an inline asm expession, and if the target knows how to 834 // lower it to normal LLVM code, do so now. 835 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 836 if (TLI->ExpandInlineAsm(CI)) { 837 // Avoid invalidating the iterator. 838 CurInstIterator = BB->begin(); 839 // Avoid processing instructions out of order, which could cause 840 // reuse before a value is defined. 841 SunkAddrs.clear(); 842 return true; 843 } 844 // Sink address computing for memory operands into the block. 845 if (OptimizeInlineAsmInst(CI)) 846 return true; 847 } 848 849 // Lower all uses of llvm.objectsize.* 850 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 851 if (II && II->getIntrinsicID() == Intrinsic::objectsize) { 852 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 853 Type *ReturnTy = CI->getType(); 854 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 855 856 // Substituting this can cause recursive simplifications, which can 857 // invalidate our iterator. Use a WeakVH to hold onto it in case this 858 // happens. 859 WeakVH IterHandle(CurInstIterator); 860 861 replaceAndRecursivelySimplify(CI, RetVal, 862 TLI ? TLI->getDataLayout() : nullptr, 863 TLInfo, ModifiedDT ? nullptr : DT); 864 865 // If the iterator instruction was recursively deleted, start over at the 866 // start of the block. 867 if (IterHandle != CurInstIterator) { 868 CurInstIterator = BB->begin(); 869 SunkAddrs.clear(); 870 } 871 return true; 872 } 873 // Lower all uses of llvm.safe.[us]{div|rem}... 874 if (II && 875 (II->getIntrinsicID() == Intrinsic::safe_sdiv || 876 II->getIntrinsicID() == Intrinsic::safe_udiv || 877 II->getIntrinsicID() == Intrinsic::safe_srem || 878 II->getIntrinsicID() == Intrinsic::safe_urem)) { 879 // Given 880 // result_struct = type {iN, i1} 881 // %R = call result_struct llvm.safe.sdiv.iN(iN %x, iN %y) 882 // Expand it to actual IR, which produces result to the same variable %R. 883 // First element of the result %R.1 is the result of division, second 884 // element shows whether the division was correct or not. 885 // If %y is 0, %R.1 is 0, %R.2 is 1. (1) 886 // If %x is minSignedValue and %y is -1, %R.1 is %x, %R.2 is 1. (2) 887 // In other cases %R.1 is (sdiv %x, %y), %R.2 is 0. (3) 888 // 889 // Similar applies to srem, udiv, and urem builtins, except that in unsigned 890 // variants we don't check condition (2). 891 892 bool IsSigned; 893 BinaryOperator::BinaryOps Op; 894 switch (II->getIntrinsicID()) { 895 case Intrinsic::safe_sdiv: 896 IsSigned = true; 897 Op = Instruction::SDiv; 898 break; 899 case Intrinsic::safe_udiv: 900 IsSigned = false; 901 Op = Instruction::UDiv; 902 break; 903 case Intrinsic::safe_srem: 904 IsSigned = true; 905 Op = Instruction::SRem; 906 break; 907 case Intrinsic::safe_urem: 908 IsSigned = false; 909 Op = Instruction::URem; 910 break; 911 default: 912 llvm_unreachable("Only Div/Rem intrinsics are handled here."); 913 } 914 915 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1); 916 bool DivWellDefined = TLI && TLI->isDivWellDefined(); 917 918 bool ResultNeeded[2] = {false, false}; 919 SmallVector<User*, 1> ResultsUsers[2]; 920 bool BadCase = false; 921 for (User *U: II->users()) { 922 ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 923 if (!EVI || EVI->getNumIndices() > 1 || EVI->getIndices()[0] > 1) { 924 BadCase = true; 925 break; 926 } 927 ResultNeeded[EVI->getIndices()[0]] = true; 928 ResultsUsers[EVI->getIndices()[0]].push_back(U); 929 } 930 // Behave conservatively, if there is an unusual user of the results. 931 if (BadCase) 932 ResultNeeded[0] = ResultNeeded[1] = true; 933 934 // Early exit if non of the results is ever used. 935 if (!ResultNeeded[0] && !ResultNeeded[1]) { 936 II->eraseFromParent(); 937 return true; 938 } 939 940 // Early exit if the second result (flag) isn't used and target 941 // div-instruction computes exactly what we want to get as the first result 942 // and never traps. 943 if (ResultNeeded[0] && !ResultNeeded[1] && DivWellDefined) { 944 BinaryOperator *Div = BinaryOperator::Create(Op, LHS, RHS); 945 Div->insertAfter(II); 946 for (User *U: ResultsUsers[0]) { 947 Instruction *UserInst = dyn_cast<Instruction>(U); 948 assert(UserInst && "Unexpected null-instruction"); 949 UserInst->replaceAllUsesWith(Div); 950 UserInst->eraseFromParent(); 951 } 952 II->eraseFromParent(); 953 CurInstIterator = Div; 954 ModifiedDT = true; 955 return true; 956 } 957 958 Value *MinusOne = Constant::getAllOnesValue(LHS->getType()); 959 Value *Zero = Constant::getNullValue(LHS->getType()); 960 961 // Split the original BB and create other basic blocks that will be used 962 // for checks. 963 BasicBlock *StartBB = II->getParent(); 964 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(II)); 965 BasicBlock *NextBB = StartBB->splitBasicBlock(SplitPt, "div.end"); 966 967 BasicBlock *DivByZeroBB; 968 DivByZeroBB = BasicBlock::Create(II->getContext(), "div.divz", 969 NextBB->getParent(), NextBB); 970 BranchInst::Create(NextBB, DivByZeroBB); 971 BasicBlock *DivBB = BasicBlock::Create(II->getContext(), "div.div", 972 NextBB->getParent(), NextBB); 973 BranchInst::Create(NextBB, DivBB); 974 975 // For signed variants, check the condition (2): 976 // LHS == SignedMinValue, RHS == -1. 977 Value *CmpMinusOne; 978 Value *CmpMinValue; 979 BasicBlock *ChkDivMinBB; 980 BasicBlock *DivMinBB; 981 Value *MinValue; 982 if (IsSigned) { 983 APInt SignedMinValue = 984 APInt::getSignedMinValue(LHS->getType()->getPrimitiveSizeInBits()); 985 MinValue = Constant::getIntegerValue(LHS->getType(), SignedMinValue); 986 ChkDivMinBB = BasicBlock::Create(II->getContext(), "div.chkdivmin", 987 NextBB->getParent(), NextBB); 988 BranchInst::Create(NextBB, ChkDivMinBB); 989 DivMinBB = BasicBlock::Create(II->getContext(), "div.divmin", 990 NextBB->getParent(), NextBB); 991 BranchInst::Create(NextBB, DivMinBB); 992 CmpMinusOne = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 993 RHS, MinusOne, "cmp.rhs.minus.one", 994 ChkDivMinBB->getTerminator()); 995 CmpMinValue = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 996 LHS, MinValue, "cmp.lhs.signed.min", 997 ChkDivMinBB->getTerminator()); 998 BinaryOperator *CmpSignedOvf = BinaryOperator::Create(Instruction::And, 999 CmpMinusOne, 1000 CmpMinValue); 1001 // Here we're interested in the case when both %x is TMin and %y is -1. 1002 // In this case the result will overflow. 1003 // If that's not the case, we can perform usual division. These blocks 1004 // will be inserted after DivByZero, so the division will be safe. 1005 CmpSignedOvf->insertBefore(ChkDivMinBB->getTerminator()); 1006 BranchInst::Create(DivMinBB, DivBB, CmpSignedOvf, 1007 ChkDivMinBB->getTerminator()); 1008 ChkDivMinBB->getTerminator()->eraseFromParent(); 1009 } 1010 1011 // Check the condition (1): 1012 // RHS == 0. 1013 Value *CmpDivZero = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 1014 RHS, Zero, "cmp.rhs.zero", 1015 StartBB->getTerminator()); 1016 1017 // If RHS != 0, we want to check condition (2) in signed case, or proceed 1018 // to usual division in unsigned case. 1019 BranchInst::Create(DivByZeroBB, IsSigned ? ChkDivMinBB : DivBB, CmpDivZero, 1020 StartBB->getTerminator()); 1021 StartBB->getTerminator()->eraseFromParent(); 1022 1023 // At the moment we have all the control flow created. We just need to 1024 // insert DIV and PHI (if needed) to get the result value. 1025 Instruction *DivRes, *FlagRes; 1026 Instruction *InsPoint = nullptr; 1027 if (ResultNeeded[0]) { 1028 BinaryOperator *Div = BinaryOperator::Create(Op, LHS, RHS); 1029 if (DivWellDefined) { 1030 // The result value is the result of DIV operation placed right at the 1031 // original place of the intrinsic. 1032 Div->insertAfter(II); 1033 DivRes = Div; 1034 } else { 1035 // The result is a PHI-node. 1036 Div->insertBefore(DivBB->getTerminator()); 1037 PHINode *DivResPN = 1038 PHINode::Create(LHS->getType(), IsSigned ? 3 : 2, "div.res.phi", 1039 NextBB->begin()); 1040 DivResPN->addIncoming(Div, DivBB); 1041 DivResPN->addIncoming(Zero, DivByZeroBB); 1042 if (IsSigned) 1043 DivResPN->addIncoming(MinValue, DivMinBB); 1044 DivRes = DivResPN; 1045 InsPoint = DivResPN; 1046 } 1047 } 1048 1049 // Prepare a value for the second result (flag) if it is needed. 1050 if (ResultNeeded[1]) { 1051 Type *FlagTy = II->getType()->getStructElementType(1); 1052 PHINode *FlagResPN = 1053 PHINode::Create(FlagTy, IsSigned ? 3 : 2, "div.flag.phi", 1054 NextBB->begin()); 1055 FlagResPN->addIncoming(Constant::getNullValue(FlagTy), DivBB); 1056 FlagResPN->addIncoming(Constant::getAllOnesValue(FlagTy), DivByZeroBB); 1057 if (IsSigned) 1058 FlagResPN->addIncoming(Constant::getAllOnesValue(FlagTy), DivMinBB); 1059 FlagRes = FlagResPN; 1060 if (!InsPoint) 1061 InsPoint = FlagRes; 1062 } 1063 1064 // If possible, propagate the results to the user. Otherwise, create alloca, 1065 // and create a struct with the results on stack. 1066 if (!BadCase) { 1067 if (ResultNeeded[0]) { 1068 for (User *U: ResultsUsers[0]) { 1069 Instruction *UserInst = dyn_cast<Instruction>(U); 1070 assert(UserInst && "Unexpected null-instruction"); 1071 UserInst->replaceAllUsesWith(DivRes); 1072 UserInst->eraseFromParent(); 1073 } 1074 } 1075 if (ResultNeeded[1]) { 1076 for (User *FlagU: ResultsUsers[1]) { 1077 Instruction *FlagUInst = dyn_cast<Instruction>(FlagU); 1078 FlagUInst->replaceAllUsesWith(FlagRes); 1079 FlagUInst->eraseFromParent(); 1080 } 1081 } 1082 } else { 1083 // Create alloca, store our new values to it, and then load the final 1084 // result from it. 1085 Constant *Idx0 = ConstantInt::get(Type::getInt32Ty(II->getContext()), 0); 1086 Constant *Idx1 = ConstantInt::get(Type::getInt32Ty(II->getContext()), 1); 1087 Value *Idxs_DivRes[2] = {Idx0, Idx0}; 1088 Value *Idxs_FlagRes[2] = {Idx0, Idx1}; 1089 Value *NewRes = new llvm::AllocaInst(II->getType(), 0, "div.res.ptr", II); 1090 Instruction *ResDivAddr = GetElementPtrInst::Create(NewRes, Idxs_DivRes); 1091 Instruction *ResFlagAddr = 1092 GetElementPtrInst::Create(NewRes, Idxs_FlagRes); 1093 ResDivAddr->insertAfter(InsPoint); 1094 ResFlagAddr->insertAfter(ResDivAddr); 1095 StoreInst *StoreResDiv = new StoreInst(DivRes, ResDivAddr); 1096 StoreInst *StoreResFlag = new StoreInst(FlagRes, ResFlagAddr); 1097 StoreResDiv->insertAfter(ResFlagAddr); 1098 StoreResFlag->insertAfter(StoreResDiv); 1099 LoadInst *LoadRes = new LoadInst(NewRes, "div.res"); 1100 LoadRes->insertAfter(StoreResFlag); 1101 II->replaceAllUsesWith(LoadRes); 1102 } 1103 1104 II->eraseFromParent(); 1105 CurInstIterator = StartBB->end(); 1106 ModifiedDT = true; 1107 return true; 1108 } 1109 1110 if (II && TLI) { 1111 SmallVector<Value*, 2> PtrOps; 1112 Type *AccessTy; 1113 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) 1114 while (!PtrOps.empty()) 1115 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) 1116 return true; 1117 } 1118 1119 // From here on out we're working with named functions. 1120 if (!CI->getCalledFunction()) return false; 1121 1122 // We'll need DataLayout from here on out. 1123 const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; 1124 if (!TD) return false; 1125 1126 // Lower all default uses of _chk calls. This is very similar 1127 // to what InstCombineCalls does, but here we are only lowering calls 1128 // that have the default "don't know" as the objectsize. Anything else 1129 // should be left alone. 1130 CodeGenPrepareFortifiedLibCalls Simplifier; 1131 return Simplifier.fold(CI, TD, TLInfo); 1132 } 1133 1134 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return 1135 /// instructions to the predecessor to enable tail call optimizations. The 1136 /// case it is currently looking for is: 1137 /// @code 1138 /// bb0: 1139 /// %tmp0 = tail call i32 @f0() 1140 /// br label %return 1141 /// bb1: 1142 /// %tmp1 = tail call i32 @f1() 1143 /// br label %return 1144 /// bb2: 1145 /// %tmp2 = tail call i32 @f2() 1146 /// br label %return 1147 /// return: 1148 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1149 /// ret i32 %retval 1150 /// @endcode 1151 /// 1152 /// => 1153 /// 1154 /// @code 1155 /// bb0: 1156 /// %tmp0 = tail call i32 @f0() 1157 /// ret i32 %tmp0 1158 /// bb1: 1159 /// %tmp1 = tail call i32 @f1() 1160 /// ret i32 %tmp1 1161 /// bb2: 1162 /// %tmp2 = tail call i32 @f2() 1163 /// ret i32 %tmp2 1164 /// @endcode 1165 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { 1166 if (!TLI) 1167 return false; 1168 1169 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 1170 if (!RI) 1171 return false; 1172 1173 PHINode *PN = nullptr; 1174 BitCastInst *BCI = nullptr; 1175 Value *V = RI->getReturnValue(); 1176 if (V) { 1177 BCI = dyn_cast<BitCastInst>(V); 1178 if (BCI) 1179 V = BCI->getOperand(0); 1180 1181 PN = dyn_cast<PHINode>(V); 1182 if (!PN) 1183 return false; 1184 } 1185 1186 if (PN && PN->getParent() != BB) 1187 return false; 1188 1189 // It's not safe to eliminate the sign / zero extension of the return value. 1190 // See llvm::isInTailCallPosition(). 1191 const Function *F = BB->getParent(); 1192 AttributeSet CallerAttrs = F->getAttributes(); 1193 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 1194 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 1195 return false; 1196 1197 // Make sure there are no instructions between the PHI and return, or that the 1198 // return is the first instruction in the block. 1199 if (PN) { 1200 BasicBlock::iterator BI = BB->begin(); 1201 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1202 if (&*BI == BCI) 1203 // Also skip over the bitcast. 1204 ++BI; 1205 if (&*BI != RI) 1206 return false; 1207 } else { 1208 BasicBlock::iterator BI = BB->begin(); 1209 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1210 if (&*BI != RI) 1211 return false; 1212 } 1213 1214 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1215 /// call. 1216 SmallVector<CallInst*, 4> TailCalls; 1217 if (PN) { 1218 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1219 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1220 // Make sure the phi value is indeed produced by the tail call. 1221 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1222 TLI->mayBeEmittedAsTailCall(CI)) 1223 TailCalls.push_back(CI); 1224 } 1225 } else { 1226 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1227 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1228 if (!VisitedBBs.insert(*PI)) 1229 continue; 1230 1231 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1232 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1233 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1234 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1235 if (RI == RE) 1236 continue; 1237 1238 CallInst *CI = dyn_cast<CallInst>(&*RI); 1239 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1240 TailCalls.push_back(CI); 1241 } 1242 } 1243 1244 bool Changed = false; 1245 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1246 CallInst *CI = TailCalls[i]; 1247 CallSite CS(CI); 1248 1249 // Conservatively require the attributes of the call to match those of the 1250 // return. Ignore noalias because it doesn't affect the call sequence. 1251 AttributeSet CalleeAttrs = CS.getAttributes(); 1252 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1253 removeAttribute(Attribute::NoAlias) != 1254 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1255 removeAttribute(Attribute::NoAlias)) 1256 continue; 1257 1258 // Make sure the call instruction is followed by an unconditional branch to 1259 // the return block. 1260 BasicBlock *CallBB = CI->getParent(); 1261 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1262 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1263 continue; 1264 1265 // Duplicate the return into CallBB. 1266 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 1267 ModifiedDT = Changed = true; 1268 ++NumRetsDup; 1269 } 1270 1271 // If we eliminated all predecessors of the block, delete the block now. 1272 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 1273 BB->eraseFromParent(); 1274 1275 return Changed; 1276 } 1277 1278 //===----------------------------------------------------------------------===// 1279 // Memory Optimization 1280 //===----------------------------------------------------------------------===// 1281 1282 namespace { 1283 1284 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode 1285 /// which holds actual Value*'s for register values. 1286 struct ExtAddrMode : public TargetLowering::AddrMode { 1287 Value *BaseReg; 1288 Value *ScaledReg; 1289 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 1290 void print(raw_ostream &OS) const; 1291 void dump() const; 1292 1293 bool operator==(const ExtAddrMode& O) const { 1294 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 1295 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 1296 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 1297 } 1298 }; 1299 1300 #ifndef NDEBUG 1301 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 1302 AM.print(OS); 1303 return OS; 1304 } 1305 #endif 1306 1307 void ExtAddrMode::print(raw_ostream &OS) const { 1308 bool NeedPlus = false; 1309 OS << "["; 1310 if (BaseGV) { 1311 OS << (NeedPlus ? " + " : "") 1312 << "GV:"; 1313 BaseGV->printAsOperand(OS, /*PrintType=*/false); 1314 NeedPlus = true; 1315 } 1316 1317 if (BaseOffs) 1318 OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true; 1319 1320 if (BaseReg) { 1321 OS << (NeedPlus ? " + " : "") 1322 << "Base:"; 1323 BaseReg->printAsOperand(OS, /*PrintType=*/false); 1324 NeedPlus = true; 1325 } 1326 if (Scale) { 1327 OS << (NeedPlus ? " + " : "") 1328 << Scale << "*"; 1329 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 1330 } 1331 1332 OS << ']'; 1333 } 1334 1335 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1336 void ExtAddrMode::dump() const { 1337 print(dbgs()); 1338 dbgs() << '\n'; 1339 } 1340 #endif 1341 1342 /// \brief This class provides transaction based operation on the IR. 1343 /// Every change made through this class is recorded in the internal state and 1344 /// can be undone (rollback) until commit is called. 1345 class TypePromotionTransaction { 1346 1347 /// \brief This represents the common interface of the individual transaction. 1348 /// Each class implements the logic for doing one specific modification on 1349 /// the IR via the TypePromotionTransaction. 1350 class TypePromotionAction { 1351 protected: 1352 /// The Instruction modified. 1353 Instruction *Inst; 1354 1355 public: 1356 /// \brief Constructor of the action. 1357 /// The constructor performs the related action on the IR. 1358 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 1359 1360 virtual ~TypePromotionAction() {} 1361 1362 /// \brief Undo the modification done by this action. 1363 /// When this method is called, the IR must be in the same state as it was 1364 /// before this action was applied. 1365 /// \pre Undoing the action works if and only if the IR is in the exact same 1366 /// state as it was directly after this action was applied. 1367 virtual void undo() = 0; 1368 1369 /// \brief Advocate every change made by this action. 1370 /// When the results on the IR of the action are to be kept, it is important 1371 /// to call this function, otherwise hidden information may be kept forever. 1372 virtual void commit() { 1373 // Nothing to be done, this action is not doing anything. 1374 } 1375 }; 1376 1377 /// \brief Utility to remember the position of an instruction. 1378 class InsertionHandler { 1379 /// Position of an instruction. 1380 /// Either an instruction: 1381 /// - Is the first in a basic block: BB is used. 1382 /// - Has a previous instructon: PrevInst is used. 1383 union { 1384 Instruction *PrevInst; 1385 BasicBlock *BB; 1386 } Point; 1387 /// Remember whether or not the instruction had a previous instruction. 1388 bool HasPrevInstruction; 1389 1390 public: 1391 /// \brief Record the position of \p Inst. 1392 InsertionHandler(Instruction *Inst) { 1393 BasicBlock::iterator It = Inst; 1394 HasPrevInstruction = (It != (Inst->getParent()->begin())); 1395 if (HasPrevInstruction) 1396 Point.PrevInst = --It; 1397 else 1398 Point.BB = Inst->getParent(); 1399 } 1400 1401 /// \brief Insert \p Inst at the recorded position. 1402 void insert(Instruction *Inst) { 1403 if (HasPrevInstruction) { 1404 if (Inst->getParent()) 1405 Inst->removeFromParent(); 1406 Inst->insertAfter(Point.PrevInst); 1407 } else { 1408 Instruction *Position = Point.BB->getFirstInsertionPt(); 1409 if (Inst->getParent()) 1410 Inst->moveBefore(Position); 1411 else 1412 Inst->insertBefore(Position); 1413 } 1414 } 1415 }; 1416 1417 /// \brief Move an instruction before another. 1418 class InstructionMoveBefore : public TypePromotionAction { 1419 /// Original position of the instruction. 1420 InsertionHandler Position; 1421 1422 public: 1423 /// \brief Move \p Inst before \p Before. 1424 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 1425 : TypePromotionAction(Inst), Position(Inst) { 1426 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 1427 Inst->moveBefore(Before); 1428 } 1429 1430 /// \brief Move the instruction back to its original position. 1431 void undo() override { 1432 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 1433 Position.insert(Inst); 1434 } 1435 }; 1436 1437 /// \brief Set the operand of an instruction with a new value. 1438 class OperandSetter : public TypePromotionAction { 1439 /// Original operand of the instruction. 1440 Value *Origin; 1441 /// Index of the modified instruction. 1442 unsigned Idx; 1443 1444 public: 1445 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 1446 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 1447 : TypePromotionAction(Inst), Idx(Idx) { 1448 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 1449 << "for:" << *Inst << "\n" 1450 << "with:" << *NewVal << "\n"); 1451 Origin = Inst->getOperand(Idx); 1452 Inst->setOperand(Idx, NewVal); 1453 } 1454 1455 /// \brief Restore the original value of the instruction. 1456 void undo() override { 1457 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 1458 << "for: " << *Inst << "\n" 1459 << "with: " << *Origin << "\n"); 1460 Inst->setOperand(Idx, Origin); 1461 } 1462 }; 1463 1464 /// \brief Hide the operands of an instruction. 1465 /// Do as if this instruction was not using any of its operands. 1466 class OperandsHider : public TypePromotionAction { 1467 /// The list of original operands. 1468 SmallVector<Value *, 4> OriginalValues; 1469 1470 public: 1471 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 1472 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 1473 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 1474 unsigned NumOpnds = Inst->getNumOperands(); 1475 OriginalValues.reserve(NumOpnds); 1476 for (unsigned It = 0; It < NumOpnds; ++It) { 1477 // Save the current operand. 1478 Value *Val = Inst->getOperand(It); 1479 OriginalValues.push_back(Val); 1480 // Set a dummy one. 1481 // We could use OperandSetter here, but that would implied an overhead 1482 // that we are not willing to pay. 1483 Inst->setOperand(It, UndefValue::get(Val->getType())); 1484 } 1485 } 1486 1487 /// \brief Restore the original list of uses. 1488 void undo() override { 1489 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 1490 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 1491 Inst->setOperand(It, OriginalValues[It]); 1492 } 1493 }; 1494 1495 /// \brief Build a truncate instruction. 1496 class TruncBuilder : public TypePromotionAction { 1497 public: 1498 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 1499 /// result. 1500 /// trunc Opnd to Ty. 1501 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 1502 IRBuilder<> Builder(Opnd); 1503 Inst = cast<Instruction>(Builder.CreateTrunc(Opnd, Ty, "promoted")); 1504 DEBUG(dbgs() << "Do: TruncBuilder: " << *Inst << "\n"); 1505 } 1506 1507 /// \brief Get the built instruction. 1508 Instruction *getBuiltInstruction() { return Inst; } 1509 1510 /// \brief Remove the built instruction. 1511 void undo() override { 1512 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Inst << "\n"); 1513 Inst->eraseFromParent(); 1514 } 1515 }; 1516 1517 /// \brief Build a sign extension instruction. 1518 class SExtBuilder : public TypePromotionAction { 1519 public: 1520 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 1521 /// result. 1522 /// sext Opnd to Ty. 1523 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1524 : TypePromotionAction(Inst) { 1525 IRBuilder<> Builder(InsertPt); 1526 Inst = cast<Instruction>(Builder.CreateSExt(Opnd, Ty, "promoted")); 1527 DEBUG(dbgs() << "Do: SExtBuilder: " << *Inst << "\n"); 1528 } 1529 1530 /// \brief Get the built instruction. 1531 Instruction *getBuiltInstruction() { return Inst; } 1532 1533 /// \brief Remove the built instruction. 1534 void undo() override { 1535 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Inst << "\n"); 1536 Inst->eraseFromParent(); 1537 } 1538 }; 1539 1540 /// \brief Mutate an instruction to another type. 1541 class TypeMutator : public TypePromotionAction { 1542 /// Record the original type. 1543 Type *OrigTy; 1544 1545 public: 1546 /// \brief Mutate the type of \p Inst into \p NewTy. 1547 TypeMutator(Instruction *Inst, Type *NewTy) 1548 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 1549 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 1550 << "\n"); 1551 Inst->mutateType(NewTy); 1552 } 1553 1554 /// \brief Mutate the instruction back to its original type. 1555 void undo() override { 1556 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 1557 << "\n"); 1558 Inst->mutateType(OrigTy); 1559 } 1560 }; 1561 1562 /// \brief Replace the uses of an instruction by another instruction. 1563 class UsesReplacer : public TypePromotionAction { 1564 /// Helper structure to keep track of the replaced uses. 1565 struct InstructionAndIdx { 1566 /// The instruction using the instruction. 1567 Instruction *Inst; 1568 /// The index where this instruction is used for Inst. 1569 unsigned Idx; 1570 InstructionAndIdx(Instruction *Inst, unsigned Idx) 1571 : Inst(Inst), Idx(Idx) {} 1572 }; 1573 1574 /// Keep track of the original uses (pair Instruction, Index). 1575 SmallVector<InstructionAndIdx, 4> OriginalUses; 1576 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 1577 1578 public: 1579 /// \brief Replace all the use of \p Inst by \p New. 1580 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 1581 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 1582 << "\n"); 1583 // Record the original uses. 1584 for (Use &U : Inst->uses()) { 1585 Instruction *UserI = cast<Instruction>(U.getUser()); 1586 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 1587 } 1588 // Now, we can replace the uses. 1589 Inst->replaceAllUsesWith(New); 1590 } 1591 1592 /// \brief Reassign the original uses of Inst to Inst. 1593 void undo() override { 1594 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 1595 for (use_iterator UseIt = OriginalUses.begin(), 1596 EndIt = OriginalUses.end(); 1597 UseIt != EndIt; ++UseIt) { 1598 UseIt->Inst->setOperand(UseIt->Idx, Inst); 1599 } 1600 } 1601 }; 1602 1603 /// \brief Remove an instruction from the IR. 1604 class InstructionRemover : public TypePromotionAction { 1605 /// Original position of the instruction. 1606 InsertionHandler Inserter; 1607 /// Helper structure to hide all the link to the instruction. In other 1608 /// words, this helps to do as if the instruction was removed. 1609 OperandsHider Hider; 1610 /// Keep track of the uses replaced, if any. 1611 UsesReplacer *Replacer; 1612 1613 public: 1614 /// \brief Remove all reference of \p Inst and optinally replace all its 1615 /// uses with New. 1616 /// \pre If !Inst->use_empty(), then New != nullptr 1617 InstructionRemover(Instruction *Inst, Value *New = nullptr) 1618 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 1619 Replacer(nullptr) { 1620 if (New) 1621 Replacer = new UsesReplacer(Inst, New); 1622 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 1623 Inst->removeFromParent(); 1624 } 1625 1626 ~InstructionRemover() { delete Replacer; } 1627 1628 /// \brief Really remove the instruction. 1629 void commit() override { delete Inst; } 1630 1631 /// \brief Resurrect the instruction and reassign it to the proper uses if 1632 /// new value was provided when build this action. 1633 void undo() override { 1634 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 1635 Inserter.insert(Inst); 1636 if (Replacer) 1637 Replacer->undo(); 1638 Hider.undo(); 1639 } 1640 }; 1641 1642 public: 1643 /// Restoration point. 1644 /// The restoration point is a pointer to an action instead of an iterator 1645 /// because the iterator may be invalidated but not the pointer. 1646 typedef const TypePromotionAction *ConstRestorationPt; 1647 /// Advocate every changes made in that transaction. 1648 void commit(); 1649 /// Undo all the changes made after the given point. 1650 void rollback(ConstRestorationPt Point); 1651 /// Get the current restoration point. 1652 ConstRestorationPt getRestorationPoint() const; 1653 1654 /// \name API for IR modification with state keeping to support rollback. 1655 /// @{ 1656 /// Same as Instruction::setOperand. 1657 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 1658 /// Same as Instruction::eraseFromParent. 1659 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 1660 /// Same as Value::replaceAllUsesWith. 1661 void replaceAllUsesWith(Instruction *Inst, Value *New); 1662 /// Same as Value::mutateType. 1663 void mutateType(Instruction *Inst, Type *NewTy); 1664 /// Same as IRBuilder::createTrunc. 1665 Instruction *createTrunc(Instruction *Opnd, Type *Ty); 1666 /// Same as IRBuilder::createSExt. 1667 Instruction *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 1668 /// Same as Instruction::moveBefore. 1669 void moveBefore(Instruction *Inst, Instruction *Before); 1670 /// @} 1671 1672 private: 1673 /// The ordered list of actions made so far. 1674 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 1675 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 1676 }; 1677 1678 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 1679 Value *NewVal) { 1680 Actions.push_back( 1681 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 1682 } 1683 1684 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 1685 Value *NewVal) { 1686 Actions.push_back( 1687 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 1688 } 1689 1690 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 1691 Value *New) { 1692 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 1693 } 1694 1695 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 1696 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 1697 } 1698 1699 Instruction *TypePromotionTransaction::createTrunc(Instruction *Opnd, 1700 Type *Ty) { 1701 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 1702 Instruction *I = Ptr->getBuiltInstruction(); 1703 Actions.push_back(std::move(Ptr)); 1704 return I; 1705 } 1706 1707 Instruction *TypePromotionTransaction::createSExt(Instruction *Inst, 1708 Value *Opnd, Type *Ty) { 1709 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 1710 Instruction *I = Ptr->getBuiltInstruction(); 1711 Actions.push_back(std::move(Ptr)); 1712 return I; 1713 } 1714 1715 void TypePromotionTransaction::moveBefore(Instruction *Inst, 1716 Instruction *Before) { 1717 Actions.push_back( 1718 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 1719 } 1720 1721 TypePromotionTransaction::ConstRestorationPt 1722 TypePromotionTransaction::getRestorationPoint() const { 1723 return !Actions.empty() ? Actions.back().get() : nullptr; 1724 } 1725 1726 void TypePromotionTransaction::commit() { 1727 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 1728 ++It) 1729 (*It)->commit(); 1730 Actions.clear(); 1731 } 1732 1733 void TypePromotionTransaction::rollback( 1734 TypePromotionTransaction::ConstRestorationPt Point) { 1735 while (!Actions.empty() && Point != Actions.back().get()) { 1736 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 1737 Curr->undo(); 1738 } 1739 } 1740 1741 /// \brief A helper class for matching addressing modes. 1742 /// 1743 /// This encapsulates the logic for matching the target-legal addressing modes. 1744 class AddressingModeMatcher { 1745 SmallVectorImpl<Instruction*> &AddrModeInsts; 1746 const TargetLowering &TLI; 1747 1748 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 1749 /// the memory instruction that we're computing this address for. 1750 Type *AccessTy; 1751 Instruction *MemoryInst; 1752 1753 /// AddrMode - This is the addressing mode that we're building up. This is 1754 /// part of the return value of this addressing mode matching stuff. 1755 ExtAddrMode &AddrMode; 1756 1757 /// The truncate instruction inserted by other CodeGenPrepare optimizations. 1758 const SetOfInstrs &InsertedTruncs; 1759 /// A map from the instructions to their type before promotion. 1760 InstrToOrigTy &PromotedInsts; 1761 /// The ongoing transaction where every action should be registered. 1762 TypePromotionTransaction &TPT; 1763 1764 /// IgnoreProfitability - This is set to true when we should not do 1765 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode 1766 /// always returns true. 1767 bool IgnoreProfitability; 1768 1769 AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, 1770 const TargetLowering &T, Type *AT, 1771 Instruction *MI, ExtAddrMode &AM, 1772 const SetOfInstrs &InsertedTruncs, 1773 InstrToOrigTy &PromotedInsts, 1774 TypePromotionTransaction &TPT) 1775 : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM), 1776 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { 1777 IgnoreProfitability = false; 1778 } 1779 public: 1780 1781 /// Match - Find the maximal addressing mode that a load/store of V can fold, 1782 /// give an access type of AccessTy. This returns a list of involved 1783 /// instructions in AddrModeInsts. 1784 /// \p InsertedTruncs The truncate instruction inserted by other 1785 /// CodeGenPrepare 1786 /// optimizations. 1787 /// \p PromotedInsts maps the instructions to their type before promotion. 1788 /// \p The ongoing transaction where every action should be registered. 1789 static ExtAddrMode Match(Value *V, Type *AccessTy, 1790 Instruction *MemoryInst, 1791 SmallVectorImpl<Instruction*> &AddrModeInsts, 1792 const TargetLowering &TLI, 1793 const SetOfInstrs &InsertedTruncs, 1794 InstrToOrigTy &PromotedInsts, 1795 TypePromotionTransaction &TPT) { 1796 ExtAddrMode Result; 1797 1798 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, 1799 MemoryInst, Result, InsertedTruncs, 1800 PromotedInsts, TPT).MatchAddr(V, 0); 1801 (void)Success; assert(Success && "Couldn't select *anything*?"); 1802 return Result; 1803 } 1804 private: 1805 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 1806 bool MatchAddr(Value *V, unsigned Depth); 1807 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 1808 bool *MovedAway = nullptr); 1809 bool IsProfitableToFoldIntoAddressingMode(Instruction *I, 1810 ExtAddrMode &AMBefore, 1811 ExtAddrMode &AMAfter); 1812 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 1813 bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion, 1814 Value *PromotedOperand) const; 1815 }; 1816 1817 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. 1818 /// Return true and update AddrMode if this addr mode is legal for the target, 1819 /// false if not. 1820 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, 1821 unsigned Depth) { 1822 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 1823 // mode. Just process that directly. 1824 if (Scale == 1) 1825 return MatchAddr(ScaleReg, Depth); 1826 1827 // If the scale is 0, it takes nothing to add this. 1828 if (Scale == 0) 1829 return true; 1830 1831 // If we already have a scale of this value, we can add to it, otherwise, we 1832 // need an available scale field. 1833 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 1834 return false; 1835 1836 ExtAddrMode TestAddrMode = AddrMode; 1837 1838 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 1839 // [A+B + A*7] -> [B+A*8]. 1840 TestAddrMode.Scale += Scale; 1841 TestAddrMode.ScaledReg = ScaleReg; 1842 1843 // If the new address isn't legal, bail out. 1844 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) 1845 return false; 1846 1847 // It was legal, so commit it. 1848 AddrMode = TestAddrMode; 1849 1850 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 1851 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 1852 // X*Scale + C*Scale to addr mode. 1853 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 1854 if (isa<Instruction>(ScaleReg) && // not a constant expr. 1855 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 1856 TestAddrMode.ScaledReg = AddLHS; 1857 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 1858 1859 // If this addressing mode is legal, commit it and remember that we folded 1860 // this instruction. 1861 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { 1862 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 1863 AddrMode = TestAddrMode; 1864 return true; 1865 } 1866 } 1867 1868 // Otherwise, not (x+c)*scale, just return what we have. 1869 return true; 1870 } 1871 1872 /// MightBeFoldableInst - This is a little filter, which returns true if an 1873 /// addressing computation involving I might be folded into a load/store 1874 /// accessing it. This doesn't need to be perfect, but needs to accept at least 1875 /// the set of instructions that MatchOperationAddr can. 1876 static bool MightBeFoldableInst(Instruction *I) { 1877 switch (I->getOpcode()) { 1878 case Instruction::BitCast: 1879 // Don't touch identity bitcasts. 1880 if (I->getType() == I->getOperand(0)->getType()) 1881 return false; 1882 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 1883 case Instruction::PtrToInt: 1884 // PtrToInt is always a noop, as we know that the int type is pointer sized. 1885 return true; 1886 case Instruction::IntToPtr: 1887 // We know the input is intptr_t, so this is foldable. 1888 return true; 1889 case Instruction::Add: 1890 return true; 1891 case Instruction::Mul: 1892 case Instruction::Shl: 1893 // Can only handle X*C and X << C. 1894 return isa<ConstantInt>(I->getOperand(1)); 1895 case Instruction::GetElementPtr: 1896 return true; 1897 default: 1898 return false; 1899 } 1900 } 1901 1902 /// \brief Hepler class to perform type promotion. 1903 class TypePromotionHelper { 1904 /// \brief Utility function to check whether or not a sign extension of 1905 /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either 1906 /// using the operands of \p Inst or promoting \p Inst. 1907 /// In other words, check if: 1908 /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType. 1909 /// #1 Promotion applies: 1910 /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...). 1911 /// #2 Operand reuses: 1912 /// sext opnd1 to ConsideredSExtType. 1913 /// \p PromotedInsts maps the instructions to their type before promotion. 1914 static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType, 1915 const InstrToOrigTy &PromotedInsts); 1916 1917 /// \brief Utility function to determine if \p OpIdx should be promoted when 1918 /// promoting \p Inst. 1919 static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { 1920 if (isa<SelectInst>(Inst) && OpIdx == 0) 1921 return false; 1922 return true; 1923 } 1924 1925 /// \brief Utility function to promote the operand of \p SExt when this 1926 /// operand is a promotable trunc or sext. 1927 /// \p PromotedInsts maps the instructions to their type before promotion. 1928 /// \p CreatedInsts[out] contains how many non-free instructions have been 1929 /// created to promote the operand of SExt. 1930 /// Should never be called directly. 1931 /// \return The promoted value which is used instead of SExt. 1932 static Value *promoteOperandForTruncAndSExt(Instruction *SExt, 1933 TypePromotionTransaction &TPT, 1934 InstrToOrigTy &PromotedInsts, 1935 unsigned &CreatedInsts); 1936 1937 /// \brief Utility function to promote the operand of \p SExt when this 1938 /// operand is promotable and is not a supported trunc or sext. 1939 /// \p PromotedInsts maps the instructions to their type before promotion. 1940 /// \p CreatedInsts[out] contains how many non-free instructions have been 1941 /// created to promote the operand of SExt. 1942 /// Should never be called directly. 1943 /// \return The promoted value which is used instead of SExt. 1944 static Value *promoteOperandForOther(Instruction *SExt, 1945 TypePromotionTransaction &TPT, 1946 InstrToOrigTy &PromotedInsts, 1947 unsigned &CreatedInsts); 1948 1949 public: 1950 /// Type for the utility function that promotes the operand of SExt. 1951 typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT, 1952 InstrToOrigTy &PromotedInsts, 1953 unsigned &CreatedInsts); 1954 /// \brief Given a sign extend instruction \p SExt, return the approriate 1955 /// action to promote the operand of \p SExt instead of using SExt. 1956 /// \return NULL if no promotable action is possible with the current 1957 /// sign extension. 1958 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by 1959 /// the others CodeGenPrepare optimizations. This information is important 1960 /// because we do not want to promote these instructions as CodeGenPrepare 1961 /// will reinsert them later. Thus creating an infinite loop: create/remove. 1962 /// \p PromotedInsts maps the instructions to their type before promotion. 1963 static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs, 1964 const TargetLowering &TLI, 1965 const InstrToOrigTy &PromotedInsts); 1966 }; 1967 1968 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 1969 Type *ConsideredSExtType, 1970 const InstrToOrigTy &PromotedInsts) { 1971 // We can always get through sext. 1972 if (isa<SExtInst>(Inst)) 1973 return true; 1974 1975 // We can get through binary operator, if it is legal. In other words, the 1976 // binary operator must have a nuw or nsw flag. 1977 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 1978 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 1979 (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap())) 1980 return true; 1981 1982 // Check if we can do the following simplification. 1983 // sext(trunc(sext)) --> sext 1984 if (!isa<TruncInst>(Inst)) 1985 return false; 1986 1987 Value *OpndVal = Inst->getOperand(0); 1988 // Check if we can use this operand in the sext. 1989 // If the type is larger than the result type of the sign extension, 1990 // we cannot. 1991 if (OpndVal->getType()->getIntegerBitWidth() > 1992 ConsideredSExtType->getIntegerBitWidth()) 1993 return false; 1994 1995 // If the operand of the truncate is not an instruction, we will not have 1996 // any information on the dropped bits. 1997 // (Actually we could for constant but it is not worth the extra logic). 1998 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 1999 if (!Opnd) 2000 return false; 2001 2002 // Check if the source of the type is narrow enough. 2003 // I.e., check that trunc just drops sign extended bits. 2004 // #1 get the type of the operand. 2005 const Type *OpndType; 2006 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 2007 if (It != PromotedInsts.end()) 2008 OpndType = It->second; 2009 else if (isa<SExtInst>(Opnd)) 2010 OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType(); 2011 else 2012 return false; 2013 2014 // #2 check that the truncate just drop sign extended bits. 2015 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 2016 return true; 2017 2018 return false; 2019 } 2020 2021 TypePromotionHelper::Action TypePromotionHelper::getAction( 2022 Instruction *SExt, const SetOfInstrs &InsertedTruncs, 2023 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 2024 Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0)); 2025 Type *SExtTy = SExt->getType(); 2026 // If the operand of the sign extension is not an instruction, we cannot 2027 // get through. 2028 // If it, check we can get through. 2029 if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts)) 2030 return nullptr; 2031 2032 // Do not promote if the operand has been added by codegenprepare. 2033 // Otherwise, it means we are undoing an optimization that is likely to be 2034 // redone, thus causing potential infinite loop. 2035 if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd)) 2036 return nullptr; 2037 2038 // SExt or Trunc instructions. 2039 // Return the related handler. 2040 if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd)) 2041 return promoteOperandForTruncAndSExt; 2042 2043 // Regular instruction. 2044 // Abort early if we will have to insert non-free instructions. 2045 if (!SExtOpnd->hasOneUse() && 2046 !TLI.isTruncateFree(SExtTy, SExtOpnd->getType())) 2047 return nullptr; 2048 return promoteOperandForOther; 2049 } 2050 2051 Value *TypePromotionHelper::promoteOperandForTruncAndSExt( 2052 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 2053 InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) { 2054 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2055 // get through it and this method should not be called. 2056 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2057 // Replace sext(trunc(opnd)) or sext(sext(opnd)) 2058 // => sext(opnd). 2059 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 2060 CreatedInsts = 0; 2061 2062 // Remove dead code. 2063 if (SExtOpnd->use_empty()) 2064 TPT.eraseInstruction(SExtOpnd); 2065 2066 // Check if the sext is still needed. 2067 if (SExt->getType() != SExt->getOperand(0)->getType()) 2068 return SExt; 2069 2070 // At this point we have: sext ty opnd to ty. 2071 // Reassign the uses of SExt to the opnd and remove SExt. 2072 Value *NextVal = SExt->getOperand(0); 2073 TPT.eraseInstruction(SExt, NextVal); 2074 return NextVal; 2075 } 2076 2077 Value * 2078 TypePromotionHelper::promoteOperandForOther(Instruction *SExt, 2079 TypePromotionTransaction &TPT, 2080 InstrToOrigTy &PromotedInsts, 2081 unsigned &CreatedInsts) { 2082 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2083 // get through it and this method should not be called. 2084 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2085 CreatedInsts = 0; 2086 if (!SExtOpnd->hasOneUse()) { 2087 // SExtOpnd will be promoted. 2088 // All its uses, but SExt, will need to use a truncated value of the 2089 // promoted version. 2090 // Create the truncate now. 2091 Instruction *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType()); 2092 Trunc->removeFromParent(); 2093 // Insert it just after the definition. 2094 Trunc->insertAfter(SExtOpnd); 2095 2096 TPT.replaceAllUsesWith(SExtOpnd, Trunc); 2097 // Restore the operand of SExt (which has been replace by the previous call 2098 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 2099 TPT.setOperand(SExt, 0, SExtOpnd); 2100 } 2101 2102 // Get through the Instruction: 2103 // 1. Update its type. 2104 // 2. Replace the uses of SExt by Inst. 2105 // 3. Sign extend each operand that needs to be sign extended. 2106 2107 // Remember the original type of the instruction before promotion. 2108 // This is useful to know that the high bits are sign extended bits. 2109 PromotedInsts.insert( 2110 std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType())); 2111 // Step #1. 2112 TPT.mutateType(SExtOpnd, SExt->getType()); 2113 // Step #2. 2114 TPT.replaceAllUsesWith(SExt, SExtOpnd); 2115 // Step #3. 2116 Instruction *SExtForOpnd = SExt; 2117 2118 DEBUG(dbgs() << "Propagate SExt to operands\n"); 2119 for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 2120 ++OpIdx) { 2121 DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n'); 2122 if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() || 2123 !shouldSExtOperand(SExtOpnd, OpIdx)) { 2124 DEBUG(dbgs() << "No need to propagate\n"); 2125 continue; 2126 } 2127 // Check if we can statically sign extend the operand. 2128 Value *Opnd = SExtOpnd->getOperand(OpIdx); 2129 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 2130 DEBUG(dbgs() << "Statically sign extend\n"); 2131 TPT.setOperand( 2132 SExtOpnd, OpIdx, 2133 ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue())); 2134 continue; 2135 } 2136 // UndefValue are typed, so we have to statically sign extend them. 2137 if (isa<UndefValue>(Opnd)) { 2138 DEBUG(dbgs() << "Statically sign extend\n"); 2139 TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType())); 2140 continue; 2141 } 2142 2143 // Otherwise we have to explicity sign extend the operand. 2144 // Check if SExt was reused to sign extend an operand. 2145 if (!SExtForOpnd) { 2146 // If yes, create a new one. 2147 DEBUG(dbgs() << "More operands to sext\n"); 2148 SExtForOpnd = TPT.createSExt(SExt, Opnd, SExt->getType()); 2149 ++CreatedInsts; 2150 } 2151 2152 TPT.setOperand(SExtForOpnd, 0, Opnd); 2153 2154 // Move the sign extension before the insertion point. 2155 TPT.moveBefore(SExtForOpnd, SExtOpnd); 2156 TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd); 2157 // If more sext are required, new instructions will have to be created. 2158 SExtForOpnd = nullptr; 2159 } 2160 if (SExtForOpnd == SExt) { 2161 DEBUG(dbgs() << "Sign extension is useless now\n"); 2162 TPT.eraseInstruction(SExt); 2163 } 2164 return SExtOpnd; 2165 } 2166 2167 /// IsPromotionProfitable - Check whether or not promoting an instruction 2168 /// to a wider type was profitable. 2169 /// \p MatchedSize gives the number of instructions that have been matched 2170 /// in the addressing mode after the promotion was applied. 2171 /// \p SizeWithPromotion gives the number of created instructions for 2172 /// the promotion plus the number of instructions that have been 2173 /// matched in the addressing mode before the promotion. 2174 /// \p PromotedOperand is the value that has been promoted. 2175 /// \return True if the promotion is profitable, false otherwise. 2176 bool 2177 AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize, 2178 unsigned SizeWithPromotion, 2179 Value *PromotedOperand) const { 2180 // We folded less instructions than what we created to promote the operand. 2181 // This is not profitable. 2182 if (MatchedSize < SizeWithPromotion) 2183 return false; 2184 if (MatchedSize > SizeWithPromotion) 2185 return true; 2186 // The promotion is neutral but it may help folding the sign extension in 2187 // loads for instance. 2188 // Check that we did not create an illegal instruction. 2189 Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand); 2190 if (!PromotedInst) 2191 return false; 2192 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 2193 // If the ISDOpcode is undefined, it was undefined before the promotion. 2194 if (!ISDOpcode) 2195 return true; 2196 // Otherwise, check if the promoted instruction is legal or not. 2197 return TLI.isOperationLegalOrCustom(ISDOpcode, 2198 EVT::getEVT(PromotedInst->getType())); 2199 } 2200 2201 /// MatchOperationAddr - Given an instruction or constant expr, see if we can 2202 /// fold the operation into the addressing mode. If so, update the addressing 2203 /// mode and return true, otherwise return false without modifying AddrMode. 2204 /// If \p MovedAway is not NULL, it contains the information of whether or 2205 /// not AddrInst has to be folded into the addressing mode on success. 2206 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 2207 /// because it has been moved away. 2208 /// Thus AddrInst must not be added in the matched instructions. 2209 /// This state can happen when AddrInst is a sext, since it may be moved away. 2210 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 2211 /// not be referenced anymore. 2212 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, 2213 unsigned Depth, 2214 bool *MovedAway) { 2215 // Avoid exponential behavior on extremely deep expression trees. 2216 if (Depth >= 5) return false; 2217 2218 // By default, all matched instructions stay in place. 2219 if (MovedAway) 2220 *MovedAway = false; 2221 2222 switch (Opcode) { 2223 case Instruction::PtrToInt: 2224 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2225 return MatchAddr(AddrInst->getOperand(0), Depth); 2226 case Instruction::IntToPtr: 2227 // This inttoptr is a no-op if the integer type is pointer sized. 2228 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == 2229 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) 2230 return MatchAddr(AddrInst->getOperand(0), Depth); 2231 return false; 2232 case Instruction::BitCast: 2233 // BitCast is always a noop, and we can handle it as long as it is 2234 // int->int or pointer->pointer (we don't want int<->fp or something). 2235 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 2236 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 2237 // Don't touch identity bitcasts. These were probably put here by LSR, 2238 // and we don't want to mess around with them. Assume it knows what it 2239 // is doing. 2240 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 2241 return MatchAddr(AddrInst->getOperand(0), Depth); 2242 return false; 2243 case Instruction::Add: { 2244 // Check to see if we can merge in the RHS then the LHS. If so, we win. 2245 ExtAddrMode BackupAddrMode = AddrMode; 2246 unsigned OldSize = AddrModeInsts.size(); 2247 // Start a transaction at this point. 2248 // The LHS may match but not the RHS. 2249 // Therefore, we need a higher level restoration point to undo partially 2250 // matched operation. 2251 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2252 TPT.getRestorationPoint(); 2253 2254 if (MatchAddr(AddrInst->getOperand(1), Depth+1) && 2255 MatchAddr(AddrInst->getOperand(0), Depth+1)) 2256 return true; 2257 2258 // Restore the old addr mode info. 2259 AddrMode = BackupAddrMode; 2260 AddrModeInsts.resize(OldSize); 2261 TPT.rollback(LastKnownGood); 2262 2263 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 2264 if (MatchAddr(AddrInst->getOperand(0), Depth+1) && 2265 MatchAddr(AddrInst->getOperand(1), Depth+1)) 2266 return true; 2267 2268 // Otherwise we definitely can't merge the ADD in. 2269 AddrMode = BackupAddrMode; 2270 AddrModeInsts.resize(OldSize); 2271 TPT.rollback(LastKnownGood); 2272 break; 2273 } 2274 //case Instruction::Or: 2275 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 2276 //break; 2277 case Instruction::Mul: 2278 case Instruction::Shl: { 2279 // Can only handle X*C and X << C. 2280 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 2281 if (!RHS) return false; 2282 int64_t Scale = RHS->getSExtValue(); 2283 if (Opcode == Instruction::Shl) 2284 Scale = 1LL << Scale; 2285 2286 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); 2287 } 2288 case Instruction::GetElementPtr: { 2289 // Scan the GEP. We check it if it contains constant offsets and at most 2290 // one variable offset. 2291 int VariableOperand = -1; 2292 unsigned VariableScale = 0; 2293 2294 int64_t ConstantOffset = 0; 2295 const DataLayout *TD = TLI.getDataLayout(); 2296 gep_type_iterator GTI = gep_type_begin(AddrInst); 2297 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 2298 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 2299 const StructLayout *SL = TD->getStructLayout(STy); 2300 unsigned Idx = 2301 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 2302 ConstantOffset += SL->getElementOffset(Idx); 2303 } else { 2304 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); 2305 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 2306 ConstantOffset += CI->getSExtValue()*TypeSize; 2307 } else if (TypeSize) { // Scales of zero don't do anything. 2308 // We only allow one variable index at the moment. 2309 if (VariableOperand != -1) 2310 return false; 2311 2312 // Remember the variable index. 2313 VariableOperand = i; 2314 VariableScale = TypeSize; 2315 } 2316 } 2317 } 2318 2319 // A common case is for the GEP to only do a constant offset. In this case, 2320 // just add it to the disp field and check validity. 2321 if (VariableOperand == -1) { 2322 AddrMode.BaseOffs += ConstantOffset; 2323 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ 2324 // Check to see if we can fold the base pointer in too. 2325 if (MatchAddr(AddrInst->getOperand(0), Depth+1)) 2326 return true; 2327 } 2328 AddrMode.BaseOffs -= ConstantOffset; 2329 return false; 2330 } 2331 2332 // Save the valid addressing mode in case we can't match. 2333 ExtAddrMode BackupAddrMode = AddrMode; 2334 unsigned OldSize = AddrModeInsts.size(); 2335 2336 // See if the scale and offset amount is valid for this target. 2337 AddrMode.BaseOffs += ConstantOffset; 2338 2339 // Match the base operand of the GEP. 2340 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { 2341 // If it couldn't be matched, just stuff the value in a register. 2342 if (AddrMode.HasBaseReg) { 2343 AddrMode = BackupAddrMode; 2344 AddrModeInsts.resize(OldSize); 2345 return false; 2346 } 2347 AddrMode.HasBaseReg = true; 2348 AddrMode.BaseReg = AddrInst->getOperand(0); 2349 } 2350 2351 // Match the remaining variable portion of the GEP. 2352 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 2353 Depth)) { 2354 // If it couldn't be matched, try stuffing the base into a register 2355 // instead of matching it, and retrying the match of the scale. 2356 AddrMode = BackupAddrMode; 2357 AddrModeInsts.resize(OldSize); 2358 if (AddrMode.HasBaseReg) 2359 return false; 2360 AddrMode.HasBaseReg = true; 2361 AddrMode.BaseReg = AddrInst->getOperand(0); 2362 AddrMode.BaseOffs += ConstantOffset; 2363 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), 2364 VariableScale, Depth)) { 2365 // If even that didn't work, bail. 2366 AddrMode = BackupAddrMode; 2367 AddrModeInsts.resize(OldSize); 2368 return false; 2369 } 2370 } 2371 2372 return true; 2373 } 2374 case Instruction::SExt: { 2375 // Try to move this sext out of the way of the addressing mode. 2376 Instruction *SExt = cast<Instruction>(AddrInst); 2377 // Ask for a method for doing so. 2378 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 2379 SExt, InsertedTruncs, TLI, PromotedInsts); 2380 if (!TPH) 2381 return false; 2382 2383 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2384 TPT.getRestorationPoint(); 2385 unsigned CreatedInsts = 0; 2386 Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts); 2387 // SExt has been moved away. 2388 // Thus either it will be rematched later in the recursive calls or it is 2389 // gone. Anyway, we must not fold it into the addressing mode at this point. 2390 // E.g., 2391 // op = add opnd, 1 2392 // idx = sext op 2393 // addr = gep base, idx 2394 // is now: 2395 // promotedOpnd = sext opnd <- no match here 2396 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 2397 // addr = gep base, op <- match 2398 if (MovedAway) 2399 *MovedAway = true; 2400 2401 assert(PromotedOperand && 2402 "TypePromotionHelper should have filtered out those cases"); 2403 2404 ExtAddrMode BackupAddrMode = AddrMode; 2405 unsigned OldSize = AddrModeInsts.size(); 2406 2407 if (!MatchAddr(PromotedOperand, Depth) || 2408 !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts, 2409 PromotedOperand)) { 2410 AddrMode = BackupAddrMode; 2411 AddrModeInsts.resize(OldSize); 2412 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 2413 TPT.rollback(LastKnownGood); 2414 return false; 2415 } 2416 return true; 2417 } 2418 } 2419 return false; 2420 } 2421 2422 /// MatchAddr - If we can, try to add the value of 'Addr' into the current 2423 /// addressing mode. If Addr can't be added to AddrMode this returns false and 2424 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type 2425 /// or intptr_t for the target. 2426 /// 2427 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { 2428 // Start a transaction at this point that we will rollback if the matching 2429 // fails. 2430 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2431 TPT.getRestorationPoint(); 2432 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 2433 // Fold in immediates if legal for the target. 2434 AddrMode.BaseOffs += CI->getSExtValue(); 2435 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2436 return true; 2437 AddrMode.BaseOffs -= CI->getSExtValue(); 2438 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 2439 // If this is a global variable, try to fold it into the addressing mode. 2440 if (!AddrMode.BaseGV) { 2441 AddrMode.BaseGV = GV; 2442 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2443 return true; 2444 AddrMode.BaseGV = nullptr; 2445 } 2446 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 2447 ExtAddrMode BackupAddrMode = AddrMode; 2448 unsigned OldSize = AddrModeInsts.size(); 2449 2450 // Check to see if it is possible to fold this operation. 2451 bool MovedAway = false; 2452 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 2453 // This instruction may have been move away. If so, there is nothing 2454 // to check here. 2455 if (MovedAway) 2456 return true; 2457 // Okay, it's possible to fold this. Check to see if it is actually 2458 // *profitable* to do so. We use a simple cost model to avoid increasing 2459 // register pressure too much. 2460 if (I->hasOneUse() || 2461 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 2462 AddrModeInsts.push_back(I); 2463 return true; 2464 } 2465 2466 // It isn't profitable to do this, roll back. 2467 //cerr << "NOT FOLDING: " << *I; 2468 AddrMode = BackupAddrMode; 2469 AddrModeInsts.resize(OldSize); 2470 TPT.rollback(LastKnownGood); 2471 } 2472 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 2473 if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) 2474 return true; 2475 TPT.rollback(LastKnownGood); 2476 } else if (isa<ConstantPointerNull>(Addr)) { 2477 // Null pointer gets folded without affecting the addressing mode. 2478 return true; 2479 } 2480 2481 // Worse case, the target should support [reg] addressing modes. :) 2482 if (!AddrMode.HasBaseReg) { 2483 AddrMode.HasBaseReg = true; 2484 AddrMode.BaseReg = Addr; 2485 // Still check for legality in case the target supports [imm] but not [i+r]. 2486 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2487 return true; 2488 AddrMode.HasBaseReg = false; 2489 AddrMode.BaseReg = nullptr; 2490 } 2491 2492 // If the base register is already taken, see if we can do [r+r]. 2493 if (AddrMode.Scale == 0) { 2494 AddrMode.Scale = 1; 2495 AddrMode.ScaledReg = Addr; 2496 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) 2497 return true; 2498 AddrMode.Scale = 0; 2499 AddrMode.ScaledReg = nullptr; 2500 } 2501 // Couldn't match. 2502 TPT.rollback(LastKnownGood); 2503 return false; 2504 } 2505 2506 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified 2507 /// inline asm call are due to memory operands. If so, return true, otherwise 2508 /// return false. 2509 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 2510 const TargetLowering &TLI) { 2511 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); 2512 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2513 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2514 2515 // Compute the constraint code and ConstraintType to use. 2516 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 2517 2518 // If this asm operand is our Value*, and if it isn't an indirect memory 2519 // operand, we can't fold it! 2520 if (OpInfo.CallOperandVal == OpVal && 2521 (OpInfo.ConstraintType != TargetLowering::C_Memory || 2522 !OpInfo.isIndirect)) 2523 return false; 2524 } 2525 2526 return true; 2527 } 2528 2529 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a 2530 /// memory use. If we find an obviously non-foldable instruction, return true. 2531 /// Add the ultimately found memory instructions to MemoryUses. 2532 static bool FindAllMemoryUses(Instruction *I, 2533 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, 2534 SmallPtrSet<Instruction*, 16> &ConsideredInsts, 2535 const TargetLowering &TLI) { 2536 // If we already considered this instruction, we're done. 2537 if (!ConsideredInsts.insert(I)) 2538 return false; 2539 2540 // If this is an obviously unfoldable instruction, bail out. 2541 if (!MightBeFoldableInst(I)) 2542 return true; 2543 2544 // Loop over all the uses, recursively processing them. 2545 for (Use &U : I->uses()) { 2546 Instruction *UserI = cast<Instruction>(U.getUser()); 2547 2548 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 2549 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 2550 continue; 2551 } 2552 2553 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 2554 unsigned opNo = U.getOperandNo(); 2555 if (opNo == 0) return true; // Storing addr, not into addr. 2556 MemoryUses.push_back(std::make_pair(SI, opNo)); 2557 continue; 2558 } 2559 2560 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 2561 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 2562 if (!IA) return true; 2563 2564 // If this is a memory operand, we're cool, otherwise bail out. 2565 if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) 2566 return true; 2567 continue; 2568 } 2569 2570 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI)) 2571 return true; 2572 } 2573 2574 return false; 2575 } 2576 2577 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at 2578 /// the use site that we're folding it into. If so, there is no cost to 2579 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values 2580 /// that we know are live at the instruction already. 2581 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 2582 Value *KnownLive2) { 2583 // If Val is either of the known-live values, we know it is live! 2584 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 2585 return true; 2586 2587 // All values other than instructions and arguments (e.g. constants) are live. 2588 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 2589 2590 // If Val is a constant sized alloca in the entry block, it is live, this is 2591 // true because it is just a reference to the stack/frame pointer, which is 2592 // live for the whole function. 2593 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 2594 if (AI->isStaticAlloca()) 2595 return true; 2596 2597 // Check to see if this value is already used in the memory instruction's 2598 // block. If so, it's already live into the block at the very least, so we 2599 // can reasonably fold it. 2600 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 2601 } 2602 2603 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing 2604 /// mode of the machine to fold the specified instruction into a load or store 2605 /// that ultimately uses it. However, the specified instruction has multiple 2606 /// uses. Given this, it may actually increase register pressure to fold it 2607 /// into the load. For example, consider this code: 2608 /// 2609 /// X = ... 2610 /// Y = X+1 2611 /// use(Y) -> nonload/store 2612 /// Z = Y+1 2613 /// load Z 2614 /// 2615 /// In this case, Y has multiple uses, and can be folded into the load of Z 2616 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 2617 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 2618 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 2619 /// number of computations either. 2620 /// 2621 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 2622 /// X was live across 'load Z' for other reasons, we actually *would* want to 2623 /// fold the addressing mode in the Z case. This would make Y die earlier. 2624 bool AddressingModeMatcher:: 2625 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 2626 ExtAddrMode &AMAfter) { 2627 if (IgnoreProfitability) return true; 2628 2629 // AMBefore is the addressing mode before this instruction was folded into it, 2630 // and AMAfter is the addressing mode after the instruction was folded. Get 2631 // the set of registers referenced by AMAfter and subtract out those 2632 // referenced by AMBefore: this is the set of values which folding in this 2633 // address extends the lifetime of. 2634 // 2635 // Note that there are only two potential values being referenced here, 2636 // BaseReg and ScaleReg (global addresses are always available, as are any 2637 // folded immediates). 2638 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 2639 2640 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 2641 // lifetime wasn't extended by adding this instruction. 2642 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2643 BaseReg = nullptr; 2644 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 2645 ScaledReg = nullptr; 2646 2647 // If folding this instruction (and it's subexprs) didn't extend any live 2648 // ranges, we're ok with it. 2649 if (!BaseReg && !ScaledReg) 2650 return true; 2651 2652 // If all uses of this instruction are ultimately load/store/inlineasm's, 2653 // check to see if their addressing modes will include this instruction. If 2654 // so, we can fold it into all uses, so it doesn't matter if it has multiple 2655 // uses. 2656 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 2657 SmallPtrSet<Instruction*, 16> ConsideredInsts; 2658 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) 2659 return false; // Has a non-memory, non-foldable use! 2660 2661 // Now that we know that all uses of this instruction are part of a chain of 2662 // computation involving only operations that could theoretically be folded 2663 // into a memory use, loop over each of these uses and see if they could 2664 // *actually* fold the instruction. 2665 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 2666 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 2667 Instruction *User = MemoryUses[i].first; 2668 unsigned OpNo = MemoryUses[i].second; 2669 2670 // Get the access type of this use. If the use isn't a pointer, we don't 2671 // know what it accesses. 2672 Value *Address = User->getOperand(OpNo); 2673 if (!Address->getType()->isPointerTy()) 2674 return false; 2675 Type *AddressAccessTy = Address->getType()->getPointerElementType(); 2676 2677 // Do a match against the root of this address, ignoring profitability. This 2678 // will tell us if the addressing mode for the memory operation will 2679 // *actually* cover the shared instruction. 2680 ExtAddrMode Result; 2681 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2682 TPT.getRestorationPoint(); 2683 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, 2684 MemoryInst, Result, InsertedTruncs, 2685 PromotedInsts, TPT); 2686 Matcher.IgnoreProfitability = true; 2687 bool Success = Matcher.MatchAddr(Address, 0); 2688 (void)Success; assert(Success && "Couldn't select *anything*?"); 2689 2690 // The match was to check the profitability, the changes made are not 2691 // part of the original matcher. Therefore, they should be dropped 2692 // otherwise the original matcher will not present the right state. 2693 TPT.rollback(LastKnownGood); 2694 2695 // If the match didn't cover I, then it won't be shared by it. 2696 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 2697 I) == MatchedAddrModeInsts.end()) 2698 return false; 2699 2700 MatchedAddrModeInsts.clear(); 2701 } 2702 2703 return true; 2704 } 2705 2706 } // end anonymous namespace 2707 2708 /// IsNonLocalValue - Return true if the specified values are defined in a 2709 /// different basic block than BB. 2710 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 2711 if (Instruction *I = dyn_cast<Instruction>(V)) 2712 return I->getParent() != BB; 2713 return false; 2714 } 2715 2716 /// OptimizeMemoryInst - Load and Store Instructions often have 2717 /// addressing modes that can do significant amounts of computation. As such, 2718 /// instruction selection will try to get the load or store to do as much 2719 /// computation as possible for the program. The problem is that isel can only 2720 /// see within a single block. As such, we sink as much legal addressing mode 2721 /// stuff into the block as possible. 2722 /// 2723 /// This method is used to optimize both load/store and inline asms with memory 2724 /// operands. 2725 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 2726 Type *AccessTy) { 2727 Value *Repl = Addr; 2728 2729 // Try to collapse single-value PHI nodes. This is necessary to undo 2730 // unprofitable PRE transformations. 2731 SmallVector<Value*, 8> worklist; 2732 SmallPtrSet<Value*, 16> Visited; 2733 worklist.push_back(Addr); 2734 2735 // Use a worklist to iteratively look through PHI nodes, and ensure that 2736 // the addressing mode obtained from the non-PHI roots of the graph 2737 // are equivalent. 2738 Value *Consensus = nullptr; 2739 unsigned NumUsesConsensus = 0; 2740 bool IsNumUsesConsensusValid = false; 2741 SmallVector<Instruction*, 16> AddrModeInsts; 2742 ExtAddrMode AddrMode; 2743 TypePromotionTransaction TPT; 2744 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2745 TPT.getRestorationPoint(); 2746 while (!worklist.empty()) { 2747 Value *V = worklist.back(); 2748 worklist.pop_back(); 2749 2750 // Break use-def graph loops. 2751 if (!Visited.insert(V)) { 2752 Consensus = nullptr; 2753 break; 2754 } 2755 2756 // For a PHI node, push all of its incoming values. 2757 if (PHINode *P = dyn_cast<PHINode>(V)) { 2758 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) 2759 worklist.push_back(P->getIncomingValue(i)); 2760 continue; 2761 } 2762 2763 // For non-PHIs, determine the addressing mode being computed. 2764 SmallVector<Instruction*, 16> NewAddrModeInsts; 2765 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 2766 V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet, 2767 PromotedInsts, TPT); 2768 2769 // This check is broken into two cases with very similar code to avoid using 2770 // getNumUses() as much as possible. Some values have a lot of uses, so 2771 // calling getNumUses() unconditionally caused a significant compile-time 2772 // regression. 2773 if (!Consensus) { 2774 Consensus = V; 2775 AddrMode = NewAddrMode; 2776 AddrModeInsts = NewAddrModeInsts; 2777 continue; 2778 } else if (NewAddrMode == AddrMode) { 2779 if (!IsNumUsesConsensusValid) { 2780 NumUsesConsensus = Consensus->getNumUses(); 2781 IsNumUsesConsensusValid = true; 2782 } 2783 2784 // Ensure that the obtained addressing mode is equivalent to that obtained 2785 // for all other roots of the PHI traversal. Also, when choosing one 2786 // such root as representative, select the one with the most uses in order 2787 // to keep the cost modeling heuristics in AddressingModeMatcher 2788 // applicable. 2789 unsigned NumUses = V->getNumUses(); 2790 if (NumUses > NumUsesConsensus) { 2791 Consensus = V; 2792 NumUsesConsensus = NumUses; 2793 AddrModeInsts = NewAddrModeInsts; 2794 } 2795 continue; 2796 } 2797 2798 Consensus = nullptr; 2799 break; 2800 } 2801 2802 // If the addressing mode couldn't be determined, or if multiple different 2803 // ones were determined, bail out now. 2804 if (!Consensus) { 2805 TPT.rollback(LastKnownGood); 2806 return false; 2807 } 2808 TPT.commit(); 2809 2810 // Check to see if any of the instructions supersumed by this addr mode are 2811 // non-local to I's BB. 2812 bool AnyNonLocal = false; 2813 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 2814 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 2815 AnyNonLocal = true; 2816 break; 2817 } 2818 } 2819 2820 // If all the instructions matched are already in this BB, don't do anything. 2821 if (!AnyNonLocal) { 2822 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 2823 return false; 2824 } 2825 2826 // Insert this computation right after this user. Since our caller is 2827 // scanning from the top of the BB to the bottom, reuse of the expr are 2828 // guaranteed to happen later. 2829 IRBuilder<> Builder(MemoryInst); 2830 2831 // Now that we determined the addressing expression we want to use and know 2832 // that we have to sink it into this block. Check to see if we have already 2833 // done this for some other load/store instr in this block. If so, reuse the 2834 // computation. 2835 Value *&SunkAddr = SunkAddrs[Addr]; 2836 if (SunkAddr) { 2837 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 2838 << *MemoryInst); 2839 if (SunkAddr->getType() != Addr->getType()) 2840 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 2841 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && 2842 TM && TM->getSubtarget<TargetSubtargetInfo>().useAA())) { 2843 // By default, we use the GEP-based method when AA is used later. This 2844 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 2845 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 2846 << *MemoryInst); 2847 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 2848 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 2849 2850 // First, find the pointer. 2851 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 2852 ResultPtr = AddrMode.BaseReg; 2853 AddrMode.BaseReg = nullptr; 2854 } 2855 2856 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 2857 // We can't add more than one pointer together, nor can we scale a 2858 // pointer (both of which seem meaningless). 2859 if (ResultPtr || AddrMode.Scale != 1) 2860 return false; 2861 2862 ResultPtr = AddrMode.ScaledReg; 2863 AddrMode.Scale = 0; 2864 } 2865 2866 if (AddrMode.BaseGV) { 2867 if (ResultPtr) 2868 return false; 2869 2870 ResultPtr = AddrMode.BaseGV; 2871 } 2872 2873 // If the real base value actually came from an inttoptr, then the matcher 2874 // will look through it and provide only the integer value. In that case, 2875 // use it here. 2876 if (!ResultPtr && AddrMode.BaseReg) { 2877 ResultPtr = 2878 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 2879 AddrMode.BaseReg = nullptr; 2880 } else if (!ResultPtr && AddrMode.Scale == 1) { 2881 ResultPtr = 2882 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 2883 AddrMode.Scale = 0; 2884 } 2885 2886 if (!ResultPtr && 2887 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 2888 SunkAddr = Constant::getNullValue(Addr->getType()); 2889 } else if (!ResultPtr) { 2890 return false; 2891 } else { 2892 Type *I8PtrTy = 2893 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 2894 2895 // Start with the base register. Do this first so that subsequent address 2896 // matching finds it last, which will prevent it from trying to match it 2897 // as the scaled value in case it happens to be a mul. That would be 2898 // problematic if we've sunk a different mul for the scale, because then 2899 // we'd end up sinking both muls. 2900 if (AddrMode.BaseReg) { 2901 Value *V = AddrMode.BaseReg; 2902 if (V->getType() != IntPtrTy) 2903 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 2904 2905 ResultIndex = V; 2906 } 2907 2908 // Add the scale value. 2909 if (AddrMode.Scale) { 2910 Value *V = AddrMode.ScaledReg; 2911 if (V->getType() == IntPtrTy) { 2912 // done. 2913 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 2914 cast<IntegerType>(V->getType())->getBitWidth()) { 2915 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 2916 } else { 2917 // It is only safe to sign extend the BaseReg if we know that the math 2918 // required to create it did not overflow before we extend it. Since 2919 // the original IR value was tossed in favor of a constant back when 2920 // the AddrMode was created we need to bail out gracefully if widths 2921 // do not match instead of extending it. 2922 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 2923 if (I && (ResultIndex != AddrMode.BaseReg)) 2924 I->eraseFromParent(); 2925 return false; 2926 } 2927 2928 if (AddrMode.Scale != 1) 2929 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 2930 "sunkaddr"); 2931 if (ResultIndex) 2932 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 2933 else 2934 ResultIndex = V; 2935 } 2936 2937 // Add in the Base Offset if present. 2938 if (AddrMode.BaseOffs) { 2939 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 2940 if (ResultIndex) { 2941 // We need to add this separately from the scale above to help with 2942 // SDAG consecutive load/store merging. 2943 if (ResultPtr->getType() != I8PtrTy) 2944 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 2945 ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 2946 } 2947 2948 ResultIndex = V; 2949 } 2950 2951 if (!ResultIndex) { 2952 SunkAddr = ResultPtr; 2953 } else { 2954 if (ResultPtr->getType() != I8PtrTy) 2955 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 2956 SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); 2957 } 2958 2959 if (SunkAddr->getType() != Addr->getType()) 2960 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 2961 } 2962 } else { 2963 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 2964 << *MemoryInst); 2965 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); 2966 Value *Result = nullptr; 2967 2968 // Start with the base register. Do this first so that subsequent address 2969 // matching finds it last, which will prevent it from trying to match it 2970 // as the scaled value in case it happens to be a mul. That would be 2971 // problematic if we've sunk a different mul for the scale, because then 2972 // we'd end up sinking both muls. 2973 if (AddrMode.BaseReg) { 2974 Value *V = AddrMode.BaseReg; 2975 if (V->getType()->isPointerTy()) 2976 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2977 if (V->getType() != IntPtrTy) 2978 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 2979 Result = V; 2980 } 2981 2982 // Add the scale value. 2983 if (AddrMode.Scale) { 2984 Value *V = AddrMode.ScaledReg; 2985 if (V->getType() == IntPtrTy) { 2986 // done. 2987 } else if (V->getType()->isPointerTy()) { 2988 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 2989 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 2990 cast<IntegerType>(V->getType())->getBitWidth()) { 2991 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 2992 } else { 2993 // It is only safe to sign extend the BaseReg if we know that the math 2994 // required to create it did not overflow before we extend it. Since 2995 // the original IR value was tossed in favor of a constant back when 2996 // the AddrMode was created we need to bail out gracefully if widths 2997 // do not match instead of extending it. 2998 Instruction *I = dyn_cast<Instruction>(Result); 2999 if (I && (Result != AddrMode.BaseReg)) 3000 I->eraseFromParent(); 3001 return false; 3002 } 3003 if (AddrMode.Scale != 1) 3004 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3005 "sunkaddr"); 3006 if (Result) 3007 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3008 else 3009 Result = V; 3010 } 3011 3012 // Add in the BaseGV if present. 3013 if (AddrMode.BaseGV) { 3014 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 3015 if (Result) 3016 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3017 else 3018 Result = V; 3019 } 3020 3021 // Add in the Base Offset if present. 3022 if (AddrMode.BaseOffs) { 3023 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3024 if (Result) 3025 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3026 else 3027 Result = V; 3028 } 3029 3030 if (!Result) 3031 SunkAddr = Constant::getNullValue(Addr->getType()); 3032 else 3033 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 3034 } 3035 3036 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 3037 3038 // If we have no uses, recursively delete the value and all dead instructions 3039 // using it. 3040 if (Repl->use_empty()) { 3041 // This can cause recursive deletion, which can invalidate our iterator. 3042 // Use a WeakVH to hold onto it in case this happens. 3043 WeakVH IterHandle(CurInstIterator); 3044 BasicBlock *BB = CurInstIterator->getParent(); 3045 3046 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 3047 3048 if (IterHandle != CurInstIterator) { 3049 // If the iterator instruction was recursively deleted, start over at the 3050 // start of the block. 3051 CurInstIterator = BB->begin(); 3052 SunkAddrs.clear(); 3053 } 3054 } 3055 ++NumMemoryInsts; 3056 return true; 3057 } 3058 3059 /// OptimizeInlineAsmInst - If there are any memory operands, use 3060 /// OptimizeMemoryInst to sink their address computing into the block when 3061 /// possible / profitable. 3062 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { 3063 bool MadeChange = false; 3064 3065 TargetLowering::AsmOperandInfoVector 3066 TargetConstraints = TLI->ParseConstraints(CS); 3067 unsigned ArgNo = 0; 3068 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3069 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3070 3071 // Compute the constraint code and ConstraintType to use. 3072 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3073 3074 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 3075 OpInfo.isIndirect) { 3076 Value *OpVal = CS->getArgOperand(ArgNo++); 3077 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); 3078 } else if (OpInfo.Type == InlineAsm::isInput) 3079 ArgNo++; 3080 } 3081 3082 return MadeChange; 3083 } 3084 3085 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same 3086 /// basic block as the load, unless conditions are unfavorable. This allows 3087 /// SelectionDAG to fold the extend into the load. 3088 /// 3089 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { 3090 // Look for a load being extended. 3091 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0)); 3092 if (!LI) return false; 3093 3094 // If they're already in the same block, there's nothing to do. 3095 if (LI->getParent() == I->getParent()) 3096 return false; 3097 3098 // If the load has other users and the truncate is not free, this probably 3099 // isn't worthwhile. 3100 if (!LI->hasOneUse() && 3101 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) || 3102 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) && 3103 !TLI->isTruncateFree(I->getType(), LI->getType())) 3104 return false; 3105 3106 // Check whether the target supports casts folded into loads. 3107 unsigned LType; 3108 if (isa<ZExtInst>(I)) 3109 LType = ISD::ZEXTLOAD; 3110 else { 3111 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 3112 LType = ISD::SEXTLOAD; 3113 } 3114 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) 3115 return false; 3116 3117 // Move the extend into the same block as the load, so that SelectionDAG 3118 // can fold it. 3119 I->removeFromParent(); 3120 I->insertAfter(LI); 3121 ++NumExtsMoved; 3122 return true; 3123 } 3124 3125 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { 3126 BasicBlock *DefBB = I->getParent(); 3127 3128 // If the result of a {s|z}ext and its source are both live out, rewrite all 3129 // other uses of the source with result of extension. 3130 Value *Src = I->getOperand(0); 3131 if (Src->hasOneUse()) 3132 return false; 3133 3134 // Only do this xform if truncating is free. 3135 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 3136 return false; 3137 3138 // Only safe to perform the optimization if the source is also defined in 3139 // this block. 3140 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 3141 return false; 3142 3143 bool DefIsLiveOut = false; 3144 for (User *U : I->users()) { 3145 Instruction *UI = cast<Instruction>(U); 3146 3147 // Figure out which BB this ext is used in. 3148 BasicBlock *UserBB = UI->getParent(); 3149 if (UserBB == DefBB) continue; 3150 DefIsLiveOut = true; 3151 break; 3152 } 3153 if (!DefIsLiveOut) 3154 return false; 3155 3156 // Make sure none of the uses are PHI nodes. 3157 for (User *U : Src->users()) { 3158 Instruction *UI = cast<Instruction>(U); 3159 BasicBlock *UserBB = UI->getParent(); 3160 if (UserBB == DefBB) continue; 3161 // Be conservative. We don't want this xform to end up introducing 3162 // reloads just before load / store instructions. 3163 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 3164 return false; 3165 } 3166 3167 // InsertedTruncs - Only insert one trunc in each block once. 3168 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 3169 3170 bool MadeChange = false; 3171 for (Use &U : Src->uses()) { 3172 Instruction *User = cast<Instruction>(U.getUser()); 3173 3174 // Figure out which BB this ext is used in. 3175 BasicBlock *UserBB = User->getParent(); 3176 if (UserBB == DefBB) continue; 3177 3178 // Both src and def are live in this block. Rewrite the use. 3179 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 3180 3181 if (!InsertedTrunc) { 3182 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3183 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); 3184 InsertedTruncsSet.insert(InsertedTrunc); 3185 } 3186 3187 // Replace a use of the {s|z}ext source with a use of the result. 3188 U = InsertedTrunc; 3189 ++NumExtUses; 3190 MadeChange = true; 3191 } 3192 3193 return MadeChange; 3194 } 3195 3196 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be 3197 /// turned into an explicit branch. 3198 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 3199 // FIXME: This should use the same heuristics as IfConversion to determine 3200 // whether a select is better represented as a branch. This requires that 3201 // branch probability metadata is preserved for the select, which is not the 3202 // case currently. 3203 3204 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 3205 3206 // If the branch is predicted right, an out of order CPU can avoid blocking on 3207 // the compare. Emit cmovs on compares with a memory operand as branches to 3208 // avoid stalls on the load from memory. If the compare has more than one use 3209 // there's probably another cmov or setcc around so it's not worth emitting a 3210 // branch. 3211 if (!Cmp) 3212 return false; 3213 3214 Value *CmpOp0 = Cmp->getOperand(0); 3215 Value *CmpOp1 = Cmp->getOperand(1); 3216 3217 // We check that the memory operand has one use to avoid uses of the loaded 3218 // value directly after the compare, making branches unprofitable. 3219 return Cmp->hasOneUse() && 3220 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 3221 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 3222 } 3223 3224 3225 /// If we have a SelectInst that will likely profit from branch prediction, 3226 /// turn it into a branch. 3227 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { 3228 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 3229 3230 // Can we convert the 'select' to CF ? 3231 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 3232 return false; 3233 3234 TargetLowering::SelectSupportKind SelectKind; 3235 if (VectorCond) 3236 SelectKind = TargetLowering::VectorMaskSelect; 3237 else if (SI->getType()->isVectorTy()) 3238 SelectKind = TargetLowering::ScalarCondVectorVal; 3239 else 3240 SelectKind = TargetLowering::ScalarValSelect; 3241 3242 // Do we have efficient codegen support for this kind of 'selects' ? 3243 if (TLI->isSelectSupported(SelectKind)) { 3244 // We have efficient codegen support for the select instruction. 3245 // Check if it is profitable to keep this 'select'. 3246 if (!TLI->isPredictableSelectExpensive() || 3247 !isFormingBranchFromSelectProfitable(SI)) 3248 return false; 3249 } 3250 3251 ModifiedDT = true; 3252 3253 // First, we split the block containing the select into 2 blocks. 3254 BasicBlock *StartBlock = SI->getParent(); 3255 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 3256 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 3257 3258 // Create a new block serving as the landing pad for the branch. 3259 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 3260 NextBlock->getParent(), NextBlock); 3261 3262 // Move the unconditional branch from the block with the select in it into our 3263 // landing pad block. 3264 StartBlock->getTerminator()->eraseFromParent(); 3265 BranchInst::Create(NextBlock, SmallBlock); 3266 3267 // Insert the real conditional branch based on the original condition. 3268 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 3269 3270 // The select itself is replaced with a PHI Node. 3271 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); 3272 PN->takeName(SI); 3273 PN->addIncoming(SI->getTrueValue(), StartBlock); 3274 PN->addIncoming(SI->getFalseValue(), SmallBlock); 3275 SI->replaceAllUsesWith(PN); 3276 SI->eraseFromParent(); 3277 3278 // Instruct OptimizeBlock to skip to the next block. 3279 CurInstIterator = StartBlock->end(); 3280 ++NumSelectsExpanded; 3281 return true; 3282 } 3283 3284 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 3285 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 3286 int SplatElem = -1; 3287 for (unsigned i = 0; i < Mask.size(); ++i) { 3288 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 3289 return false; 3290 SplatElem = Mask[i]; 3291 } 3292 3293 return true; 3294 } 3295 3296 /// Some targets have expensive vector shifts if the lanes aren't all the same 3297 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 3298 /// it's often worth sinking a shufflevector splat down to its use so that 3299 /// codegen can spot all lanes are identical. 3300 bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 3301 BasicBlock *DefBB = SVI->getParent(); 3302 3303 // Only do this xform if variable vector shifts are particularly expensive. 3304 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 3305 return false; 3306 3307 // We only expect better codegen by sinking a shuffle if we can recognise a 3308 // constant splat. 3309 if (!isBroadcastShuffle(SVI)) 3310 return false; 3311 3312 // InsertedShuffles - Only insert a shuffle in each block once. 3313 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 3314 3315 bool MadeChange = false; 3316 for (User *U : SVI->users()) { 3317 Instruction *UI = cast<Instruction>(U); 3318 3319 // Figure out which BB this ext is used in. 3320 BasicBlock *UserBB = UI->getParent(); 3321 if (UserBB == DefBB) continue; 3322 3323 // For now only apply this when the splat is used by a shift instruction. 3324 if (!UI->isShift()) continue; 3325 3326 // Everything checks out, sink the shuffle if the user's block doesn't 3327 // already have a copy. 3328 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 3329 3330 if (!InsertedShuffle) { 3331 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3332 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), 3333 SVI->getOperand(1), 3334 SVI->getOperand(2), "", InsertPt); 3335 } 3336 3337 UI->replaceUsesOfWith(SVI, InsertedShuffle); 3338 MadeChange = true; 3339 } 3340 3341 // If we removed all uses, nuke the shuffle. 3342 if (SVI->use_empty()) { 3343 SVI->eraseFromParent(); 3344 MadeChange = true; 3345 } 3346 3347 return MadeChange; 3348 } 3349 3350 bool CodeGenPrepare::OptimizeInst(Instruction *I) { 3351 if (PHINode *P = dyn_cast<PHINode>(I)) { 3352 // It is possible for very late stage optimizations (such as SimplifyCFG) 3353 // to introduce PHI nodes too late to be cleaned up. If we detect such a 3354 // trivial PHI, go ahead and zap it here. 3355 if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr, 3356 TLInfo, DT)) { 3357 P->replaceAllUsesWith(V); 3358 P->eraseFromParent(); 3359 ++NumPHIsElim; 3360 return true; 3361 } 3362 return false; 3363 } 3364 3365 if (CastInst *CI = dyn_cast<CastInst>(I)) { 3366 // If the source of the cast is a constant, then this should have 3367 // already been constant folded. The only reason NOT to constant fold 3368 // it is if something (e.g. LSR) was careful to place the constant 3369 // evaluation in a block other than then one that uses it (e.g. to hoist 3370 // the address of globals out of a loop). If this is the case, we don't 3371 // want to forward-subst the cast. 3372 if (isa<Constant>(CI->getOperand(0))) 3373 return false; 3374 3375 if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) 3376 return true; 3377 3378 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 3379 /// Sink a zext or sext into its user blocks if the target type doesn't 3380 /// fit in one register 3381 if (TLI && TLI->getTypeAction(CI->getContext(), 3382 TLI->getValueType(CI->getType())) == 3383 TargetLowering::TypeExpandInteger) { 3384 return SinkCast(CI); 3385 } else { 3386 bool MadeChange = MoveExtToFormExtLoad(I); 3387 return MadeChange | OptimizeExtUses(I); 3388 } 3389 } 3390 return false; 3391 } 3392 3393 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 3394 if (!TLI || !TLI->hasMultipleConditionRegisters()) 3395 return OptimizeCmpExpression(CI); 3396 3397 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3398 if (TLI) 3399 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); 3400 return false; 3401 } 3402 3403 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3404 if (TLI) 3405 return OptimizeMemoryInst(I, SI->getOperand(1), 3406 SI->getOperand(0)->getType()); 3407 return false; 3408 } 3409 3410 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 3411 3412 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 3413 BinOp->getOpcode() == Instruction::LShr)) { 3414 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 3415 if (TLI && CI && TLI->hasExtractBitsInsn()) 3416 return OptimizeExtractBits(BinOp, CI, *TLI); 3417 3418 return false; 3419 } 3420 3421 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 3422 if (GEPI->hasAllZeroIndices()) { 3423 /// The GEP operand must be a pointer, so must its result -> BitCast 3424 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 3425 GEPI->getName(), GEPI); 3426 GEPI->replaceAllUsesWith(NC); 3427 GEPI->eraseFromParent(); 3428 ++NumGEPsElim; 3429 OptimizeInst(NC); 3430 return true; 3431 } 3432 return false; 3433 } 3434 3435 if (CallInst *CI = dyn_cast<CallInst>(I)) 3436 return OptimizeCallInst(CI); 3437 3438 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 3439 return OptimizeSelectInst(SI); 3440 3441 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 3442 return OptimizeShuffleVectorInst(SVI); 3443 3444 return false; 3445 } 3446 3447 // In this pass we look for GEP and cast instructions that are used 3448 // across basic blocks and rewrite them to improve basic-block-at-a-time 3449 // selection. 3450 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { 3451 SunkAddrs.clear(); 3452 bool MadeChange = false; 3453 3454 CurInstIterator = BB.begin(); 3455 while (CurInstIterator != BB.end()) 3456 MadeChange |= OptimizeInst(CurInstIterator++); 3457 3458 MadeChange |= DupRetToEnableTailCallOpts(&BB); 3459 3460 return MadeChange; 3461 } 3462 3463 // llvm.dbg.value is far away from the value then iSel may not be able 3464 // handle it properly. iSel will drop llvm.dbg.value if it can not 3465 // find a node corresponding to the value. 3466 bool CodeGenPrepare::PlaceDbgValues(Function &F) { 3467 bool MadeChange = false; 3468 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { 3469 Instruction *PrevNonDbgInst = nullptr; 3470 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) { 3471 Instruction *Insn = BI; ++BI; 3472 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 3473 if (!DVI) { 3474 PrevNonDbgInst = Insn; 3475 continue; 3476 } 3477 3478 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 3479 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 3480 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 3481 DVI->removeFromParent(); 3482 if (isa<PHINode>(VI)) 3483 DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); 3484 else 3485 DVI->insertAfter(VI); 3486 MadeChange = true; 3487 ++NumDbgValueMoved; 3488 } 3489 } 3490 } 3491 return MadeChange; 3492 } 3493 3494 // If there is a sequence that branches based on comparing a single bit 3495 // against zero that can be combined into a single instruction, and the 3496 // target supports folding these into a single instruction, sink the 3497 // mask and compare into the branch uses. Do this before OptimizeBlock -> 3498 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 3499 // searched for. 3500 bool CodeGenPrepare::sinkAndCmp(Function &F) { 3501 if (!EnableAndCmpSinking) 3502 return false; 3503 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 3504 return false; 3505 bool MadeChange = false; 3506 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 3507 BasicBlock *BB = I++; 3508 3509 // Does this BB end with the following? 3510 // %andVal = and %val, #single-bit-set 3511 // %icmpVal = icmp %andResult, 0 3512 // br i1 %cmpVal label %dest1, label %dest2" 3513 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 3514 if (!Brcc || !Brcc->isConditional()) 3515 continue; 3516 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 3517 if (!Cmp || Cmp->getParent() != BB) 3518 continue; 3519 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 3520 if (!Zero || !Zero->isZero()) 3521 continue; 3522 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 3523 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 3524 continue; 3525 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 3526 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 3527 continue; 3528 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 3529 3530 // Push the "and; icmp" for any users that are conditional branches. 3531 // Since there can only be one branch use per BB, we don't need to keep 3532 // track of which BBs we insert into. 3533 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 3534 UI != E; ) { 3535 Use &TheUse = *UI; 3536 // Find brcc use. 3537 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 3538 ++UI; 3539 if (!BrccUser || !BrccUser->isConditional()) 3540 continue; 3541 BasicBlock *UserBB = BrccUser->getParent(); 3542 if (UserBB == BB) continue; 3543 DEBUG(dbgs() << "found Brcc use\n"); 3544 3545 // Sink the "and; icmp" to use. 3546 MadeChange = true; 3547 BinaryOperator *NewAnd = 3548 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 3549 BrccUser); 3550 CmpInst *NewCmp = 3551 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 3552 "", BrccUser); 3553 TheUse = NewCmp; 3554 ++NumAndCmpsMoved; 3555 DEBUG(BrccUser->getParent()->dump()); 3556 } 3557 } 3558 return MadeChange; 3559 } 3560