1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/IR/CallSite.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/GetElementPtrTypeIterator.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/MDBuilder.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/IR/Statepoint.h" 37 #include "llvm/IR/ValueHandle.h" 38 #include "llvm/IR/ValueMap.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetLowering.h" 44 #include "llvm/Target/TargetSubtargetInfo.h" 45 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 46 #include "llvm/Transforms/Utils/BuildLibCalls.h" 47 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 48 #include "llvm/Transforms/Utils/Local.h" 49 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 50 using namespace llvm; 51 using namespace llvm::PatternMatch; 52 53 #define DEBUG_TYPE "codegenprepare" 54 55 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 56 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 57 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 58 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 59 "sunken Cmps"); 60 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 61 "of sunken Casts"); 62 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 63 "computations were sunk"); 64 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 65 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 66 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 67 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 68 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 69 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 70 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 71 72 static cl::opt<bool> DisableBranchOpts( 73 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 74 cl::desc("Disable branch optimizations in CodeGenPrepare")); 75 76 static cl::opt<bool> 77 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 78 cl::desc("Disable GC optimizations in CodeGenPrepare")); 79 80 static cl::opt<bool> DisableSelectToBranch( 81 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 82 cl::desc("Disable select to branch conversion.")); 83 84 static cl::opt<bool> AddrSinkUsingGEPs( 85 "addr-sink-using-gep", cl::Hidden, cl::init(false), 86 cl::desc("Address sinking in CGP using GEPs.")); 87 88 static cl::opt<bool> EnableAndCmpSinking( 89 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 90 cl::desc("Enable sinkinig and/cmp into branches.")); 91 92 static cl::opt<bool> DisableStoreExtract( 93 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 94 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 95 96 static cl::opt<bool> StressStoreExtract( 97 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 98 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 99 100 static cl::opt<bool> DisableExtLdPromotion( 101 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 102 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 103 "CodeGenPrepare")); 104 105 static cl::opt<bool> StressExtLdPromotion( 106 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 107 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 108 "optimization in CodeGenPrepare")); 109 110 namespace { 111 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 112 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt; 113 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 114 class TypePromotionTransaction; 115 116 class CodeGenPrepare : public FunctionPass { 117 const TargetMachine *TM; 118 const TargetLowering *TLI; 119 const TargetTransformInfo *TTI; 120 const TargetLibraryInfo *TLInfo; 121 122 /// As we scan instructions optimizing them, this is the next instruction 123 /// to optimize. Transforms that can invalidate this should update it. 124 BasicBlock::iterator CurInstIterator; 125 126 /// Keeps track of non-local addresses that have been sunk into a block. 127 /// This allows us to avoid inserting duplicate code for blocks with 128 /// multiple load/stores of the same address. 129 ValueMap<Value*, Value*> SunkAddrs; 130 131 /// Keeps track of all instructions inserted for the current function. 132 SetOfInstrs InsertedInsts; 133 /// Keeps track of the type of the related instruction before their 134 /// promotion for the current function. 135 InstrToOrigTy PromotedInsts; 136 137 /// True if CFG is modified in any way. 138 bool ModifiedDT; 139 140 /// True if optimizing for size. 141 bool OptSize; 142 143 /// DataLayout for the Function being processed. 144 const DataLayout *DL; 145 146 public: 147 static char ID; // Pass identification, replacement for typeid 148 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 149 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) { 150 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 151 } 152 bool runOnFunction(Function &F) override; 153 154 const char *getPassName() const override { return "CodeGen Prepare"; } 155 156 void getAnalysisUsage(AnalysisUsage &AU) const override { 157 AU.addPreserved<DominatorTreeWrapperPass>(); 158 AU.addRequired<TargetLibraryInfoWrapperPass>(); 159 AU.addRequired<TargetTransformInfoWrapperPass>(); 160 } 161 162 private: 163 bool eliminateFallThrough(Function &F); 164 bool eliminateMostlyEmptyBlocks(Function &F); 165 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 166 void eliminateMostlyEmptyBlock(BasicBlock *BB); 167 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT); 168 bool optimizeInst(Instruction *I, bool& ModifiedDT); 169 bool optimizeMemoryInst(Instruction *I, Value *Addr, 170 Type *AccessTy, unsigned AS); 171 bool optimizeInlineAsmInst(CallInst *CS); 172 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT); 173 bool moveExtToFormExtLoad(Instruction *&I); 174 bool optimizeExtUses(Instruction *I); 175 bool optimizeSelectInst(SelectInst *SI); 176 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 177 bool optimizeExtractElementInst(Instruction *Inst); 178 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 179 bool placeDbgValues(Function &F); 180 bool sinkAndCmp(Function &F); 181 bool extLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI, 182 Instruction *&Inst, 183 const SmallVectorImpl<Instruction *> &Exts, 184 unsigned CreatedInstCost); 185 bool splitBranchCondition(Function &F); 186 bool simplifyOffsetableRelocate(Instruction &I); 187 void stripInvariantGroupMetadata(Instruction &I); 188 }; 189 } 190 191 char CodeGenPrepare::ID = 0; 192 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare", 193 "Optimize for code generation", false, false) 194 195 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 196 return new CodeGenPrepare(TM); 197 } 198 199 bool CodeGenPrepare::runOnFunction(Function &F) { 200 if (skipOptnoneFunction(F)) 201 return false; 202 203 DL = &F.getParent()->getDataLayout(); 204 205 bool EverMadeChange = false; 206 // Clear per function information. 207 InsertedInsts.clear(); 208 PromotedInsts.clear(); 209 210 ModifiedDT = false; 211 if (TM) 212 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 213 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 214 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 215 OptSize = F.optForSize(); 216 217 /// This optimization identifies DIV instructions that can be 218 /// profitably bypassed and carried out with a shorter, faster divide. 219 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 220 const DenseMap<unsigned int, unsigned int> &BypassWidths = 221 TLI->getBypassSlowDivWidths(); 222 for (Function::iterator I = F.begin(); I != F.end(); I++) 223 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); 224 } 225 226 // Eliminate blocks that contain only PHI nodes and an 227 // unconditional branch. 228 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 229 230 // llvm.dbg.value is far away from the value then iSel may not be able 231 // handle it properly. iSel will drop llvm.dbg.value if it can not 232 // find a node corresponding to the value. 233 EverMadeChange |= placeDbgValues(F); 234 235 // If there is a mask, compare against zero, and branch that can be combined 236 // into a single target instruction, push the mask and compare into branch 237 // users. Do this before OptimizeBlock -> OptimizeInst -> 238 // OptimizeCmpExpression, which perturbs the pattern being searched for. 239 if (!DisableBranchOpts) { 240 EverMadeChange |= sinkAndCmp(F); 241 EverMadeChange |= splitBranchCondition(F); 242 } 243 244 bool MadeChange = true; 245 while (MadeChange) { 246 MadeChange = false; 247 for (Function::iterator I = F.begin(); I != F.end(); ) { 248 BasicBlock *BB = &*I++; 249 bool ModifiedDTOnIteration = false; 250 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 251 252 // Restart BB iteration if the dominator tree of the Function was changed 253 if (ModifiedDTOnIteration) 254 break; 255 } 256 EverMadeChange |= MadeChange; 257 } 258 259 SunkAddrs.clear(); 260 261 if (!DisableBranchOpts) { 262 MadeChange = false; 263 SmallPtrSet<BasicBlock*, 8> WorkList; 264 for (BasicBlock &BB : F) { 265 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 266 MadeChange |= ConstantFoldTerminator(&BB, true); 267 if (!MadeChange) continue; 268 269 for (SmallVectorImpl<BasicBlock*>::iterator 270 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 271 if (pred_begin(*II) == pred_end(*II)) 272 WorkList.insert(*II); 273 } 274 275 // Delete the dead blocks and any of their dead successors. 276 MadeChange |= !WorkList.empty(); 277 while (!WorkList.empty()) { 278 BasicBlock *BB = *WorkList.begin(); 279 WorkList.erase(BB); 280 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 281 282 DeleteDeadBlock(BB); 283 284 for (SmallVectorImpl<BasicBlock*>::iterator 285 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 286 if (pred_begin(*II) == pred_end(*II)) 287 WorkList.insert(*II); 288 } 289 290 // Merge pairs of basic blocks with unconditional branches, connected by 291 // a single edge. 292 if (EverMadeChange || MadeChange) 293 MadeChange |= eliminateFallThrough(F); 294 295 EverMadeChange |= MadeChange; 296 } 297 298 if (!DisableGCOpts) { 299 SmallVector<Instruction *, 2> Statepoints; 300 for (BasicBlock &BB : F) 301 for (Instruction &I : BB) 302 if (isStatepoint(I)) 303 Statepoints.push_back(&I); 304 for (auto &I : Statepoints) 305 EverMadeChange |= simplifyOffsetableRelocate(*I); 306 } 307 308 return EverMadeChange; 309 } 310 311 /// Merge basic blocks which are connected by a single edge, where one of the 312 /// basic blocks has a single successor pointing to the other basic block, 313 /// which has a single predecessor. 314 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 315 bool Changed = false; 316 // Scan all of the blocks in the function, except for the entry block. 317 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 318 BasicBlock *BB = &*I++; 319 // If the destination block has a single pred, then this is a trivial 320 // edge, just collapse it. 321 BasicBlock *SinglePred = BB->getSinglePredecessor(); 322 323 // Don't merge if BB's address is taken. 324 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 325 326 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 327 if (Term && !Term->isConditional()) { 328 Changed = true; 329 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 330 // Remember if SinglePred was the entry block of the function. 331 // If so, we will need to move BB back to the entry position. 332 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 333 MergeBasicBlockIntoOnlyPred(BB, nullptr); 334 335 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 336 BB->moveBefore(&BB->getParent()->getEntryBlock()); 337 338 // We have erased a block. Update the iterator. 339 I = BB->getIterator(); 340 } 341 } 342 return Changed; 343 } 344 345 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 346 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 347 /// edges in ways that are non-optimal for isel. Start by eliminating these 348 /// blocks so we can split them the way we want them. 349 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 350 bool MadeChange = false; 351 // Note that this intentionally skips the entry block. 352 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 353 BasicBlock *BB = &*I++; 354 355 // If this block doesn't end with an uncond branch, ignore it. 356 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 357 if (!BI || !BI->isUnconditional()) 358 continue; 359 360 // If the instruction before the branch (skipping debug info) isn't a phi 361 // node, then other stuff is happening here. 362 BasicBlock::iterator BBI = BI->getIterator(); 363 if (BBI != BB->begin()) { 364 --BBI; 365 while (isa<DbgInfoIntrinsic>(BBI)) { 366 if (BBI == BB->begin()) 367 break; 368 --BBI; 369 } 370 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 371 continue; 372 } 373 374 // Do not break infinite loops. 375 BasicBlock *DestBB = BI->getSuccessor(0); 376 if (DestBB == BB) 377 continue; 378 379 if (!canMergeBlocks(BB, DestBB)) 380 continue; 381 382 eliminateMostlyEmptyBlock(BB); 383 MadeChange = true; 384 } 385 return MadeChange; 386 } 387 388 /// Return true if we can merge BB into DestBB if there is a single 389 /// unconditional branch between them, and BB contains no other non-phi 390 /// instructions. 391 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 392 const BasicBlock *DestBB) const { 393 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 394 // the successor. If there are more complex condition (e.g. preheaders), 395 // don't mess around with them. 396 BasicBlock::const_iterator BBI = BB->begin(); 397 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 398 for (const User *U : PN->users()) { 399 const Instruction *UI = cast<Instruction>(U); 400 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 401 return false; 402 // If User is inside DestBB block and it is a PHINode then check 403 // incoming value. If incoming value is not from BB then this is 404 // a complex condition (e.g. preheaders) we want to avoid here. 405 if (UI->getParent() == DestBB) { 406 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 407 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 408 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 409 if (Insn && Insn->getParent() == BB && 410 Insn->getParent() != UPN->getIncomingBlock(I)) 411 return false; 412 } 413 } 414 } 415 } 416 417 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 418 // and DestBB may have conflicting incoming values for the block. If so, we 419 // can't merge the block. 420 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 421 if (!DestBBPN) return true; // no conflict. 422 423 // Collect the preds of BB. 424 SmallPtrSet<const BasicBlock*, 16> BBPreds; 425 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 426 // It is faster to get preds from a PHI than with pred_iterator. 427 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 428 BBPreds.insert(BBPN->getIncomingBlock(i)); 429 } else { 430 BBPreds.insert(pred_begin(BB), pred_end(BB)); 431 } 432 433 // Walk the preds of DestBB. 434 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 435 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 436 if (BBPreds.count(Pred)) { // Common predecessor? 437 BBI = DestBB->begin(); 438 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 439 const Value *V1 = PN->getIncomingValueForBlock(Pred); 440 const Value *V2 = PN->getIncomingValueForBlock(BB); 441 442 // If V2 is a phi node in BB, look up what the mapped value will be. 443 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 444 if (V2PN->getParent() == BB) 445 V2 = V2PN->getIncomingValueForBlock(Pred); 446 447 // If there is a conflict, bail out. 448 if (V1 != V2) return false; 449 } 450 } 451 } 452 453 return true; 454 } 455 456 457 /// Eliminate a basic block that has only phi's and an unconditional branch in 458 /// it. 459 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 460 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 461 BasicBlock *DestBB = BI->getSuccessor(0); 462 463 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 464 465 // If the destination block has a single pred, then this is a trivial edge, 466 // just collapse it. 467 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 468 if (SinglePred != DestBB) { 469 // Remember if SinglePred was the entry block of the function. If so, we 470 // will need to move BB back to the entry position. 471 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 472 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 473 474 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 475 BB->moveBefore(&BB->getParent()->getEntryBlock()); 476 477 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 478 return; 479 } 480 } 481 482 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 483 // to handle the new incoming edges it is about to have. 484 PHINode *PN; 485 for (BasicBlock::iterator BBI = DestBB->begin(); 486 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 487 // Remove the incoming value for BB, and remember it. 488 Value *InVal = PN->removeIncomingValue(BB, false); 489 490 // Two options: either the InVal is a phi node defined in BB or it is some 491 // value that dominates BB. 492 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 493 if (InValPhi && InValPhi->getParent() == BB) { 494 // Add all of the input values of the input PHI as inputs of this phi. 495 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 496 PN->addIncoming(InValPhi->getIncomingValue(i), 497 InValPhi->getIncomingBlock(i)); 498 } else { 499 // Otherwise, add one instance of the dominating value for each edge that 500 // we will be adding. 501 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 502 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 503 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 504 } else { 505 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 506 PN->addIncoming(InVal, *PI); 507 } 508 } 509 } 510 511 // The PHIs are now updated, change everything that refers to BB to use 512 // DestBB and remove BB. 513 BB->replaceAllUsesWith(DestBB); 514 BB->eraseFromParent(); 515 ++NumBlocksElim; 516 517 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 518 } 519 520 // Computes a map of base pointer relocation instructions to corresponding 521 // derived pointer relocation instructions given a vector of all relocate calls 522 static void computeBaseDerivedRelocateMap( 523 const SmallVectorImpl<User *> &AllRelocateCalls, 524 DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> & 525 RelocateInstMap) { 526 // Collect information in two maps: one primarily for locating the base object 527 // while filling the second map; the second map is the final structure holding 528 // a mapping between Base and corresponding Derived relocate calls 529 DenseMap<std::pair<unsigned, unsigned>, IntrinsicInst *> RelocateIdxMap; 530 for (auto &U : AllRelocateCalls) { 531 GCRelocateOperands ThisRelocate(U); 532 IntrinsicInst *I = cast<IntrinsicInst>(U); 533 auto K = std::make_pair(ThisRelocate.getBasePtrIndex(), 534 ThisRelocate.getDerivedPtrIndex()); 535 RelocateIdxMap.insert(std::make_pair(K, I)); 536 } 537 for (auto &Item : RelocateIdxMap) { 538 std::pair<unsigned, unsigned> Key = Item.first; 539 if (Key.first == Key.second) 540 // Base relocation: nothing to insert 541 continue; 542 543 IntrinsicInst *I = Item.second; 544 auto BaseKey = std::make_pair(Key.first, Key.first); 545 546 // We're iterating over RelocateIdxMap so we cannot modify it. 547 auto MaybeBase = RelocateIdxMap.find(BaseKey); 548 if (MaybeBase == RelocateIdxMap.end()) 549 // TODO: We might want to insert a new base object relocate and gep off 550 // that, if there are enough derived object relocates. 551 continue; 552 553 RelocateInstMap[MaybeBase->second].push_back(I); 554 } 555 } 556 557 // Accepts a GEP and extracts the operands into a vector provided they're all 558 // small integer constants 559 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 560 SmallVectorImpl<Value *> &OffsetV) { 561 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 562 // Only accept small constant integer operands 563 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 564 if (!Op || Op->getZExtValue() > 20) 565 return false; 566 } 567 568 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 569 OffsetV.push_back(GEP->getOperand(i)); 570 return true; 571 } 572 573 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 574 // replace, computes a replacement, and affects it. 575 static bool 576 simplifyRelocatesOffABase(IntrinsicInst *RelocatedBase, 577 const SmallVectorImpl<IntrinsicInst *> &Targets) { 578 bool MadeChange = false; 579 for (auto &ToReplace : Targets) { 580 GCRelocateOperands MasterRelocate(RelocatedBase); 581 GCRelocateOperands ThisRelocate(ToReplace); 582 583 assert(ThisRelocate.getBasePtrIndex() == MasterRelocate.getBasePtrIndex() && 584 "Not relocating a derived object of the original base object"); 585 if (ThisRelocate.getBasePtrIndex() == ThisRelocate.getDerivedPtrIndex()) { 586 // A duplicate relocate call. TODO: coalesce duplicates. 587 continue; 588 } 589 590 Value *Base = ThisRelocate.getBasePtr(); 591 auto Derived = dyn_cast<GetElementPtrInst>(ThisRelocate.getDerivedPtr()); 592 if (!Derived || Derived->getPointerOperand() != Base) 593 continue; 594 595 SmallVector<Value *, 2> OffsetV; 596 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 597 continue; 598 599 // Create a Builder and replace the target callsite with a gep 600 assert(RelocatedBase->getNextNode() && "Should always have one since it's not a terminator"); 601 602 // Insert after RelocatedBase 603 IRBuilder<> Builder(RelocatedBase->getNextNode()); 604 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 605 606 // If gc_relocate does not match the actual type, cast it to the right type. 607 // In theory, there must be a bitcast after gc_relocate if the type does not 608 // match, and we should reuse it to get the derived pointer. But it could be 609 // cases like this: 610 // bb1: 611 // ... 612 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 613 // br label %merge 614 // 615 // bb2: 616 // ... 617 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 618 // br label %merge 619 // 620 // merge: 621 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 622 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 623 // 624 // In this case, we can not find the bitcast any more. So we insert a new bitcast 625 // no matter there is already one or not. In this way, we can handle all cases, and 626 // the extra bitcast should be optimized away in later passes. 627 Instruction *ActualRelocatedBase = RelocatedBase; 628 if (RelocatedBase->getType() != Base->getType()) { 629 ActualRelocatedBase = 630 cast<Instruction>(Builder.CreateBitCast(RelocatedBase, Base->getType())); 631 } 632 Value *Replacement = Builder.CreateGEP( 633 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 634 Instruction *ReplacementInst = cast<Instruction>(Replacement); 635 Replacement->takeName(ToReplace); 636 // If the newly generated derived pointer's type does not match the original derived 637 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 638 Instruction *ActualReplacement = ReplacementInst; 639 if (ReplacementInst->getType() != ToReplace->getType()) { 640 ActualReplacement = 641 cast<Instruction>(Builder.CreateBitCast(ReplacementInst, ToReplace->getType())); 642 } 643 ToReplace->replaceAllUsesWith(ActualReplacement); 644 ToReplace->eraseFromParent(); 645 646 MadeChange = true; 647 } 648 return MadeChange; 649 } 650 651 // Turns this: 652 // 653 // %base = ... 654 // %ptr = gep %base + 15 655 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 656 // %base' = relocate(%tok, i32 4, i32 4) 657 // %ptr' = relocate(%tok, i32 4, i32 5) 658 // %val = load %ptr' 659 // 660 // into this: 661 // 662 // %base = ... 663 // %ptr = gep %base + 15 664 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 665 // %base' = gc.relocate(%tok, i32 4, i32 4) 666 // %ptr' = gep %base' + 15 667 // %val = load %ptr' 668 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 669 bool MadeChange = false; 670 SmallVector<User *, 2> AllRelocateCalls; 671 672 for (auto *U : I.users()) 673 if (isGCRelocate(dyn_cast<Instruction>(U))) 674 // Collect all the relocate calls associated with a statepoint 675 AllRelocateCalls.push_back(U); 676 677 // We need atleast one base pointer relocation + one derived pointer 678 // relocation to mangle 679 if (AllRelocateCalls.size() < 2) 680 return false; 681 682 // RelocateInstMap is a mapping from the base relocate instruction to the 683 // corresponding derived relocate instructions 684 DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> RelocateInstMap; 685 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 686 if (RelocateInstMap.empty()) 687 return false; 688 689 for (auto &Item : RelocateInstMap) 690 // Item.first is the RelocatedBase to offset against 691 // Item.second is the vector of Targets to replace 692 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 693 return MadeChange; 694 } 695 696 /// SinkCast - Sink the specified cast instruction into its user blocks 697 static bool SinkCast(CastInst *CI) { 698 BasicBlock *DefBB = CI->getParent(); 699 700 /// InsertedCasts - Only insert a cast in each block once. 701 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 702 703 bool MadeChange = false; 704 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 705 UI != E; ) { 706 Use &TheUse = UI.getUse(); 707 Instruction *User = cast<Instruction>(*UI); 708 709 // Figure out which BB this cast is used in. For PHI's this is the 710 // appropriate predecessor block. 711 BasicBlock *UserBB = User->getParent(); 712 if (PHINode *PN = dyn_cast<PHINode>(User)) { 713 UserBB = PN->getIncomingBlock(TheUse); 714 } 715 716 // Preincrement use iterator so we don't invalidate it. 717 ++UI; 718 719 // If this user is in the same block as the cast, don't change the cast. 720 if (UserBB == DefBB) continue; 721 722 // If we have already inserted a cast into this block, use it. 723 CastInst *&InsertedCast = InsertedCasts[UserBB]; 724 725 if (!InsertedCast) { 726 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 727 assert(InsertPt != UserBB->end()); 728 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 729 CI->getType(), "", &*InsertPt); 730 } 731 732 // Replace a use of the cast with a use of the new cast. 733 TheUse = InsertedCast; 734 MadeChange = true; 735 ++NumCastUses; 736 } 737 738 // If we removed all uses, nuke the cast. 739 if (CI->use_empty()) { 740 CI->eraseFromParent(); 741 MadeChange = true; 742 } 743 744 return MadeChange; 745 } 746 747 /// If the specified cast instruction is a noop copy (e.g. it's casting from 748 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 749 /// reduce the number of virtual registers that must be created and coalesced. 750 /// 751 /// Return true if any changes are made. 752 /// 753 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 754 const DataLayout &DL) { 755 // If this is a noop copy, 756 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 757 EVT DstVT = TLI.getValueType(DL, CI->getType()); 758 759 // This is an fp<->int conversion? 760 if (SrcVT.isInteger() != DstVT.isInteger()) 761 return false; 762 763 // If this is an extension, it will be a zero or sign extension, which 764 // isn't a noop. 765 if (SrcVT.bitsLT(DstVT)) return false; 766 767 // If these values will be promoted, find out what they will be promoted 768 // to. This helps us consider truncates on PPC as noop copies when they 769 // are. 770 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 771 TargetLowering::TypePromoteInteger) 772 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 773 if (TLI.getTypeAction(CI->getContext(), DstVT) == 774 TargetLowering::TypePromoteInteger) 775 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 776 777 // If, after promotion, these are the same types, this is a noop copy. 778 if (SrcVT != DstVT) 779 return false; 780 781 return SinkCast(CI); 782 } 783 784 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 785 /// possible. 786 /// 787 /// Return true if any changes were made. 788 static bool CombineUAddWithOverflow(CmpInst *CI) { 789 Value *A, *B; 790 Instruction *AddI; 791 if (!match(CI, 792 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 793 return false; 794 795 Type *Ty = AddI->getType(); 796 if (!isa<IntegerType>(Ty)) 797 return false; 798 799 // We don't want to move around uses of condition values this late, so we we 800 // check if it is legal to create the call to the intrinsic in the basic 801 // block containing the icmp: 802 803 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 804 return false; 805 806 #ifndef NDEBUG 807 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 808 // for now: 809 if (AddI->hasOneUse()) 810 assert(*AddI->user_begin() == CI && "expected!"); 811 #endif 812 813 Module *M = CI->getParent()->getParent()->getParent(); 814 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 815 816 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 817 818 auto *UAddWithOverflow = 819 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 820 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 821 auto *Overflow = 822 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 823 824 CI->replaceAllUsesWith(Overflow); 825 AddI->replaceAllUsesWith(UAdd); 826 CI->eraseFromParent(); 827 AddI->eraseFromParent(); 828 return true; 829 } 830 831 /// Sink the given CmpInst into user blocks to reduce the number of virtual 832 /// registers that must be created and coalesced. This is a clear win except on 833 /// targets with multiple condition code registers (PowerPC), where it might 834 /// lose; some adjustment may be wanted there. 835 /// 836 /// Return true if any changes are made. 837 static bool SinkCmpExpression(CmpInst *CI) { 838 BasicBlock *DefBB = CI->getParent(); 839 840 /// Only insert a cmp in each block once. 841 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 842 843 bool MadeChange = false; 844 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 845 UI != E; ) { 846 Use &TheUse = UI.getUse(); 847 Instruction *User = cast<Instruction>(*UI); 848 849 // Preincrement use iterator so we don't invalidate it. 850 ++UI; 851 852 // Don't bother for PHI nodes. 853 if (isa<PHINode>(User)) 854 continue; 855 856 // Figure out which BB this cmp is used in. 857 BasicBlock *UserBB = User->getParent(); 858 859 // If this user is in the same block as the cmp, don't change the cmp. 860 if (UserBB == DefBB) continue; 861 862 // If we have already inserted a cmp into this block, use it. 863 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 864 865 if (!InsertedCmp) { 866 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 867 assert(InsertPt != UserBB->end()); 868 InsertedCmp = 869 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 870 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 871 } 872 873 // Replace a use of the cmp with a use of the new cmp. 874 TheUse = InsertedCmp; 875 MadeChange = true; 876 ++NumCmpUses; 877 } 878 879 // If we removed all uses, nuke the cmp. 880 if (CI->use_empty()) { 881 CI->eraseFromParent(); 882 MadeChange = true; 883 } 884 885 return MadeChange; 886 } 887 888 static bool OptimizeCmpExpression(CmpInst *CI) { 889 if (SinkCmpExpression(CI)) 890 return true; 891 892 if (CombineUAddWithOverflow(CI)) 893 return true; 894 895 return false; 896 } 897 898 /// Check if the candidates could be combined with a shift instruction, which 899 /// includes: 900 /// 1. Truncate instruction 901 /// 2. And instruction and the imm is a mask of the low bits: 902 /// imm & (imm+1) == 0 903 static bool isExtractBitsCandidateUse(Instruction *User) { 904 if (!isa<TruncInst>(User)) { 905 if (User->getOpcode() != Instruction::And || 906 !isa<ConstantInt>(User->getOperand(1))) 907 return false; 908 909 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 910 911 if ((Cimm & (Cimm + 1)).getBoolValue()) 912 return false; 913 } 914 return true; 915 } 916 917 /// Sink both shift and truncate instruction to the use of truncate's BB. 918 static bool 919 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 920 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 921 const TargetLowering &TLI, const DataLayout &DL) { 922 BasicBlock *UserBB = User->getParent(); 923 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 924 TruncInst *TruncI = dyn_cast<TruncInst>(User); 925 bool MadeChange = false; 926 927 for (Value::user_iterator TruncUI = TruncI->user_begin(), 928 TruncE = TruncI->user_end(); 929 TruncUI != TruncE;) { 930 931 Use &TruncTheUse = TruncUI.getUse(); 932 Instruction *TruncUser = cast<Instruction>(*TruncUI); 933 // Preincrement use iterator so we don't invalidate it. 934 935 ++TruncUI; 936 937 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 938 if (!ISDOpcode) 939 continue; 940 941 // If the use is actually a legal node, there will not be an 942 // implicit truncate. 943 // FIXME: always querying the result type is just an 944 // approximation; some nodes' legality is determined by the 945 // operand or other means. There's no good way to find out though. 946 if (TLI.isOperationLegalOrCustom( 947 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 948 continue; 949 950 // Don't bother for PHI nodes. 951 if (isa<PHINode>(TruncUser)) 952 continue; 953 954 BasicBlock *TruncUserBB = TruncUser->getParent(); 955 956 if (UserBB == TruncUserBB) 957 continue; 958 959 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 960 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 961 962 if (!InsertedShift && !InsertedTrunc) { 963 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 964 assert(InsertPt != TruncUserBB->end()); 965 // Sink the shift 966 if (ShiftI->getOpcode() == Instruction::AShr) 967 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 968 "", &*InsertPt); 969 else 970 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 971 "", &*InsertPt); 972 973 // Sink the trunc 974 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 975 TruncInsertPt++; 976 assert(TruncInsertPt != TruncUserBB->end()); 977 978 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 979 TruncI->getType(), "", &*TruncInsertPt); 980 981 MadeChange = true; 982 983 TruncTheUse = InsertedTrunc; 984 } 985 } 986 return MadeChange; 987 } 988 989 /// Sink the shift *right* instruction into user blocks if the uses could 990 /// potentially be combined with this shift instruction and generate BitExtract 991 /// instruction. It will only be applied if the architecture supports BitExtract 992 /// instruction. Here is an example: 993 /// BB1: 994 /// %x.extract.shift = lshr i64 %arg1, 32 995 /// BB2: 996 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 997 /// ==> 998 /// 999 /// BB2: 1000 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1001 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1002 /// 1003 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1004 /// instruction. 1005 /// Return true if any changes are made. 1006 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1007 const TargetLowering &TLI, 1008 const DataLayout &DL) { 1009 BasicBlock *DefBB = ShiftI->getParent(); 1010 1011 /// Only insert instructions in each block once. 1012 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1013 1014 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1015 1016 bool MadeChange = false; 1017 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1018 UI != E;) { 1019 Use &TheUse = UI.getUse(); 1020 Instruction *User = cast<Instruction>(*UI); 1021 // Preincrement use iterator so we don't invalidate it. 1022 ++UI; 1023 1024 // Don't bother for PHI nodes. 1025 if (isa<PHINode>(User)) 1026 continue; 1027 1028 if (!isExtractBitsCandidateUse(User)) 1029 continue; 1030 1031 BasicBlock *UserBB = User->getParent(); 1032 1033 if (UserBB == DefBB) { 1034 // If the shift and truncate instruction are in the same BB. The use of 1035 // the truncate(TruncUse) may still introduce another truncate if not 1036 // legal. In this case, we would like to sink both shift and truncate 1037 // instruction to the BB of TruncUse. 1038 // for example: 1039 // BB1: 1040 // i64 shift.result = lshr i64 opnd, imm 1041 // trunc.result = trunc shift.result to i16 1042 // 1043 // BB2: 1044 // ----> We will have an implicit truncate here if the architecture does 1045 // not have i16 compare. 1046 // cmp i16 trunc.result, opnd2 1047 // 1048 if (isa<TruncInst>(User) && shiftIsLegal 1049 // If the type of the truncate is legal, no trucate will be 1050 // introduced in other basic blocks. 1051 && 1052 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1053 MadeChange = 1054 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1055 1056 continue; 1057 } 1058 // If we have already inserted a shift into this block, use it. 1059 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1060 1061 if (!InsertedShift) { 1062 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1063 assert(InsertPt != UserBB->end()); 1064 1065 if (ShiftI->getOpcode() == Instruction::AShr) 1066 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1067 "", &*InsertPt); 1068 else 1069 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1070 "", &*InsertPt); 1071 1072 MadeChange = true; 1073 } 1074 1075 // Replace a use of the shift with a use of the new shift. 1076 TheUse = InsertedShift; 1077 } 1078 1079 // If we removed all uses, nuke the shift. 1080 if (ShiftI->use_empty()) 1081 ShiftI->eraseFromParent(); 1082 1083 return MadeChange; 1084 } 1085 1086 // Translate a masked load intrinsic like 1087 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1088 // <16 x i1> %mask, <16 x i32> %passthru) 1089 // to a chain of basic blocks, with loading element one-by-one if 1090 // the appropriate mask bit is set 1091 // 1092 // %1 = bitcast i8* %addr to i32* 1093 // %2 = extractelement <16 x i1> %mask, i32 0 1094 // %3 = icmp eq i1 %2, true 1095 // br i1 %3, label %cond.load, label %else 1096 // 1097 //cond.load: ; preds = %0 1098 // %4 = getelementptr i32* %1, i32 0 1099 // %5 = load i32* %4 1100 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1101 // br label %else 1102 // 1103 //else: ; preds = %0, %cond.load 1104 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1105 // %7 = extractelement <16 x i1> %mask, i32 1 1106 // %8 = icmp eq i1 %7, true 1107 // br i1 %8, label %cond.load1, label %else2 1108 // 1109 //cond.load1: ; preds = %else 1110 // %9 = getelementptr i32* %1, i32 1 1111 // %10 = load i32* %9 1112 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1113 // br label %else2 1114 // 1115 //else2: ; preds = %else, %cond.load1 1116 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1117 // %12 = extractelement <16 x i1> %mask, i32 2 1118 // %13 = icmp eq i1 %12, true 1119 // br i1 %13, label %cond.load4, label %else5 1120 // 1121 static void ScalarizeMaskedLoad(CallInst *CI) { 1122 Value *Ptr = CI->getArgOperand(0); 1123 Value *Src0 = CI->getArgOperand(3); 1124 Value *Mask = CI->getArgOperand(2); 1125 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1126 Type *EltTy = VecType->getElementType(); 1127 1128 assert(VecType && "Unexpected return type of masked load intrinsic"); 1129 1130 IRBuilder<> Builder(CI->getContext()); 1131 Instruction *InsertPt = CI; 1132 BasicBlock *IfBlock = CI->getParent(); 1133 BasicBlock *CondBlock = nullptr; 1134 BasicBlock *PrevIfBlock = CI->getParent(); 1135 Builder.SetInsertPoint(InsertPt); 1136 1137 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1138 1139 // Bitcast %addr fron i8* to EltTy* 1140 Type *NewPtrType = 1141 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1142 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1143 Value *UndefVal = UndefValue::get(VecType); 1144 1145 // The result vector 1146 Value *VResult = UndefVal; 1147 1148 PHINode *Phi = nullptr; 1149 Value *PrevPhi = UndefVal; 1150 1151 unsigned VectorWidth = VecType->getNumElements(); 1152 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1153 1154 // Fill the "else" block, created in the previous iteration 1155 // 1156 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1157 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1158 // %to_load = icmp eq i1 %mask_1, true 1159 // br i1 %to_load, label %cond.load, label %else 1160 // 1161 if (Idx > 0) { 1162 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1163 Phi->addIncoming(VResult, CondBlock); 1164 Phi->addIncoming(PrevPhi, PrevIfBlock); 1165 PrevPhi = Phi; 1166 VResult = Phi; 1167 } 1168 1169 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1170 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1171 ConstantInt::get(Predicate->getType(), 1)); 1172 1173 // Create "cond" block 1174 // 1175 // %EltAddr = getelementptr i32* %1, i32 0 1176 // %Elt = load i32* %EltAddr 1177 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1178 // 1179 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load"); 1180 Builder.SetInsertPoint(InsertPt); 1181 1182 Value *Gep = 1183 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1184 LoadInst* Load = Builder.CreateLoad(Gep, false); 1185 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1186 1187 // Create "else" block, fill it in the next iteration 1188 BasicBlock *NewIfBlock = 1189 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1190 Builder.SetInsertPoint(InsertPt); 1191 Instruction *OldBr = IfBlock->getTerminator(); 1192 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1193 OldBr->eraseFromParent(); 1194 PrevIfBlock = IfBlock; 1195 IfBlock = NewIfBlock; 1196 } 1197 1198 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1199 Phi->addIncoming(VResult, CondBlock); 1200 Phi->addIncoming(PrevPhi, PrevIfBlock); 1201 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1202 CI->replaceAllUsesWith(NewI); 1203 CI->eraseFromParent(); 1204 } 1205 1206 // Translate a masked store intrinsic, like 1207 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1208 // <16 x i1> %mask) 1209 // to a chain of basic blocks, that stores element one-by-one if 1210 // the appropriate mask bit is set 1211 // 1212 // %1 = bitcast i8* %addr to i32* 1213 // %2 = extractelement <16 x i1> %mask, i32 0 1214 // %3 = icmp eq i1 %2, true 1215 // br i1 %3, label %cond.store, label %else 1216 // 1217 // cond.store: ; preds = %0 1218 // %4 = extractelement <16 x i32> %val, i32 0 1219 // %5 = getelementptr i32* %1, i32 0 1220 // store i32 %4, i32* %5 1221 // br label %else 1222 // 1223 // else: ; preds = %0, %cond.store 1224 // %6 = extractelement <16 x i1> %mask, i32 1 1225 // %7 = icmp eq i1 %6, true 1226 // br i1 %7, label %cond.store1, label %else2 1227 // 1228 // cond.store1: ; preds = %else 1229 // %8 = extractelement <16 x i32> %val, i32 1 1230 // %9 = getelementptr i32* %1, i32 1 1231 // store i32 %8, i32* %9 1232 // br label %else2 1233 // . . . 1234 static void ScalarizeMaskedStore(CallInst *CI) { 1235 Value *Ptr = CI->getArgOperand(1); 1236 Value *Src = CI->getArgOperand(0); 1237 Value *Mask = CI->getArgOperand(3); 1238 1239 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1240 Type *EltTy = VecType->getElementType(); 1241 1242 assert(VecType && "Unexpected data type in masked store intrinsic"); 1243 1244 IRBuilder<> Builder(CI->getContext()); 1245 Instruction *InsertPt = CI; 1246 BasicBlock *IfBlock = CI->getParent(); 1247 Builder.SetInsertPoint(InsertPt); 1248 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1249 1250 // Bitcast %addr fron i8* to EltTy* 1251 Type *NewPtrType = 1252 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1253 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1254 1255 unsigned VectorWidth = VecType->getNumElements(); 1256 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1257 1258 // Fill the "else" block, created in the previous iteration 1259 // 1260 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1261 // %to_store = icmp eq i1 %mask_1, true 1262 // br i1 %to_load, label %cond.store, label %else 1263 // 1264 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1265 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1266 ConstantInt::get(Predicate->getType(), 1)); 1267 1268 // Create "cond" block 1269 // 1270 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1271 // %EltAddr = getelementptr i32* %1, i32 0 1272 // %store i32 %OneElt, i32* %EltAddr 1273 // 1274 BasicBlock *CondBlock = 1275 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store"); 1276 Builder.SetInsertPoint(InsertPt); 1277 1278 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1279 Value *Gep = 1280 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1281 Builder.CreateStore(OneElt, Gep); 1282 1283 // Create "else" block, fill it in the next iteration 1284 BasicBlock *NewIfBlock = 1285 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1286 Builder.SetInsertPoint(InsertPt); 1287 Instruction *OldBr = IfBlock->getTerminator(); 1288 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1289 OldBr->eraseFromParent(); 1290 IfBlock = NewIfBlock; 1291 } 1292 CI->eraseFromParent(); 1293 } 1294 1295 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { 1296 BasicBlock *BB = CI->getParent(); 1297 1298 // Lower inline assembly if we can. 1299 // If we found an inline asm expession, and if the target knows how to 1300 // lower it to normal LLVM code, do so now. 1301 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1302 if (TLI->ExpandInlineAsm(CI)) { 1303 // Avoid invalidating the iterator. 1304 CurInstIterator = BB->begin(); 1305 // Avoid processing instructions out of order, which could cause 1306 // reuse before a value is defined. 1307 SunkAddrs.clear(); 1308 return true; 1309 } 1310 // Sink address computing for memory operands into the block. 1311 if (optimizeInlineAsmInst(CI)) 1312 return true; 1313 } 1314 1315 // Align the pointer arguments to this call if the target thinks it's a good 1316 // idea 1317 unsigned MinSize, PrefAlign; 1318 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1319 for (auto &Arg : CI->arg_operands()) { 1320 // We want to align both objects whose address is used directly and 1321 // objects whose address is used in casts and GEPs, though it only makes 1322 // sense for GEPs if the offset is a multiple of the desired alignment and 1323 // if size - offset meets the size threshold. 1324 if (!Arg->getType()->isPointerTy()) 1325 continue; 1326 APInt Offset(DL->getPointerSizeInBits( 1327 cast<PointerType>(Arg->getType())->getAddressSpace()), 1328 0); 1329 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1330 uint64_t Offset2 = Offset.getLimitedValue(); 1331 if ((Offset2 & (PrefAlign-1)) != 0) 1332 continue; 1333 AllocaInst *AI; 1334 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1335 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1336 AI->setAlignment(PrefAlign); 1337 // Global variables can only be aligned if they are defined in this 1338 // object (i.e. they are uniquely initialized in this object), and 1339 // over-aligning global variables that have an explicit section is 1340 // forbidden. 1341 GlobalVariable *GV; 1342 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->hasUniqueInitializer() && 1343 !GV->hasSection() && GV->getAlignment() < PrefAlign && 1344 DL->getTypeAllocSize(GV->getType()->getElementType()) >= 1345 MinSize + Offset2) 1346 GV->setAlignment(PrefAlign); 1347 } 1348 // If this is a memcpy (or similar) then we may be able to improve the 1349 // alignment 1350 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1351 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 1352 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 1353 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 1354 if (Align > MI->getAlignment()) 1355 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 1356 } 1357 } 1358 1359 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1360 if (II) { 1361 switch (II->getIntrinsicID()) { 1362 default: break; 1363 case Intrinsic::objectsize: { 1364 // Lower all uses of llvm.objectsize.* 1365 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 1366 Type *ReturnTy = CI->getType(); 1367 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 1368 1369 // Substituting this can cause recursive simplifications, which can 1370 // invalidate our iterator. Use a WeakVH to hold onto it in case this 1371 // happens. 1372 WeakVH IterHandle(&*CurInstIterator); 1373 1374 replaceAndRecursivelySimplify(CI, RetVal, 1375 TLInfo, nullptr); 1376 1377 // If the iterator instruction was recursively deleted, start over at the 1378 // start of the block. 1379 if (IterHandle != CurInstIterator.getNodePtrUnchecked()) { 1380 CurInstIterator = BB->begin(); 1381 SunkAddrs.clear(); 1382 } 1383 return true; 1384 } 1385 case Intrinsic::masked_load: { 1386 // Scalarize unsupported vector masked load 1387 if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) { 1388 ScalarizeMaskedLoad(CI); 1389 ModifiedDT = true; 1390 return true; 1391 } 1392 return false; 1393 } 1394 case Intrinsic::masked_store: { 1395 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) { 1396 ScalarizeMaskedStore(CI); 1397 ModifiedDT = true; 1398 return true; 1399 } 1400 return false; 1401 } 1402 case Intrinsic::aarch64_stlxr: 1403 case Intrinsic::aarch64_stxr: { 1404 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1405 if (!ExtVal || !ExtVal->hasOneUse() || 1406 ExtVal->getParent() == CI->getParent()) 1407 return false; 1408 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1409 ExtVal->moveBefore(CI); 1410 // Mark this instruction as "inserted by CGP", so that other 1411 // optimizations don't touch it. 1412 InsertedInsts.insert(ExtVal); 1413 return true; 1414 } 1415 case Intrinsic::invariant_group_barrier: 1416 II->replaceAllUsesWith(II->getArgOperand(0)); 1417 II->eraseFromParent(); 1418 return true; 1419 } 1420 1421 if (TLI) { 1422 // Unknown address space. 1423 // TODO: Target hook to pick which address space the intrinsic cares 1424 // about? 1425 unsigned AddrSpace = ~0u; 1426 SmallVector<Value*, 2> PtrOps; 1427 Type *AccessTy; 1428 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace)) 1429 while (!PtrOps.empty()) 1430 if (optimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy, AddrSpace)) 1431 return true; 1432 } 1433 } 1434 1435 // From here on out we're working with named functions. 1436 if (!CI->getCalledFunction()) return false; 1437 1438 // Lower all default uses of _chk calls. This is very similar 1439 // to what InstCombineCalls does, but here we are only lowering calls 1440 // to fortified library functions (e.g. __memcpy_chk) that have the default 1441 // "don't know" as the objectsize. Anything else should be left alone. 1442 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1443 if (Value *V = Simplifier.optimizeCall(CI)) { 1444 CI->replaceAllUsesWith(V); 1445 CI->eraseFromParent(); 1446 return true; 1447 } 1448 return false; 1449 } 1450 1451 /// Look for opportunities to duplicate return instructions to the predecessor 1452 /// to enable tail call optimizations. The case it is currently looking for is: 1453 /// @code 1454 /// bb0: 1455 /// %tmp0 = tail call i32 @f0() 1456 /// br label %return 1457 /// bb1: 1458 /// %tmp1 = tail call i32 @f1() 1459 /// br label %return 1460 /// bb2: 1461 /// %tmp2 = tail call i32 @f2() 1462 /// br label %return 1463 /// return: 1464 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1465 /// ret i32 %retval 1466 /// @endcode 1467 /// 1468 /// => 1469 /// 1470 /// @code 1471 /// bb0: 1472 /// %tmp0 = tail call i32 @f0() 1473 /// ret i32 %tmp0 1474 /// bb1: 1475 /// %tmp1 = tail call i32 @f1() 1476 /// ret i32 %tmp1 1477 /// bb2: 1478 /// %tmp2 = tail call i32 @f2() 1479 /// ret i32 %tmp2 1480 /// @endcode 1481 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1482 if (!TLI) 1483 return false; 1484 1485 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 1486 if (!RI) 1487 return false; 1488 1489 PHINode *PN = nullptr; 1490 BitCastInst *BCI = nullptr; 1491 Value *V = RI->getReturnValue(); 1492 if (V) { 1493 BCI = dyn_cast<BitCastInst>(V); 1494 if (BCI) 1495 V = BCI->getOperand(0); 1496 1497 PN = dyn_cast<PHINode>(V); 1498 if (!PN) 1499 return false; 1500 } 1501 1502 if (PN && PN->getParent() != BB) 1503 return false; 1504 1505 // It's not safe to eliminate the sign / zero extension of the return value. 1506 // See llvm::isInTailCallPosition(). 1507 const Function *F = BB->getParent(); 1508 AttributeSet CallerAttrs = F->getAttributes(); 1509 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 1510 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 1511 return false; 1512 1513 // Make sure there are no instructions between the PHI and return, or that the 1514 // return is the first instruction in the block. 1515 if (PN) { 1516 BasicBlock::iterator BI = BB->begin(); 1517 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1518 if (&*BI == BCI) 1519 // Also skip over the bitcast. 1520 ++BI; 1521 if (&*BI != RI) 1522 return false; 1523 } else { 1524 BasicBlock::iterator BI = BB->begin(); 1525 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1526 if (&*BI != RI) 1527 return false; 1528 } 1529 1530 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1531 /// call. 1532 SmallVector<CallInst*, 4> TailCalls; 1533 if (PN) { 1534 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1535 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1536 // Make sure the phi value is indeed produced by the tail call. 1537 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1538 TLI->mayBeEmittedAsTailCall(CI)) 1539 TailCalls.push_back(CI); 1540 } 1541 } else { 1542 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1543 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1544 if (!VisitedBBs.insert(*PI).second) 1545 continue; 1546 1547 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1548 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1549 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1550 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1551 if (RI == RE) 1552 continue; 1553 1554 CallInst *CI = dyn_cast<CallInst>(&*RI); 1555 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1556 TailCalls.push_back(CI); 1557 } 1558 } 1559 1560 bool Changed = false; 1561 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1562 CallInst *CI = TailCalls[i]; 1563 CallSite CS(CI); 1564 1565 // Conservatively require the attributes of the call to match those of the 1566 // return. Ignore noalias because it doesn't affect the call sequence. 1567 AttributeSet CalleeAttrs = CS.getAttributes(); 1568 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1569 removeAttribute(Attribute::NoAlias) != 1570 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1571 removeAttribute(Attribute::NoAlias)) 1572 continue; 1573 1574 // Make sure the call instruction is followed by an unconditional branch to 1575 // the return block. 1576 BasicBlock *CallBB = CI->getParent(); 1577 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 1578 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 1579 continue; 1580 1581 // Duplicate the return into CallBB. 1582 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 1583 ModifiedDT = Changed = true; 1584 ++NumRetsDup; 1585 } 1586 1587 // If we eliminated all predecessors of the block, delete the block now. 1588 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 1589 BB->eraseFromParent(); 1590 1591 return Changed; 1592 } 1593 1594 //===----------------------------------------------------------------------===// 1595 // Memory Optimization 1596 //===----------------------------------------------------------------------===// 1597 1598 namespace { 1599 1600 /// This is an extended version of TargetLowering::AddrMode 1601 /// which holds actual Value*'s for register values. 1602 struct ExtAddrMode : public TargetLowering::AddrMode { 1603 Value *BaseReg; 1604 Value *ScaledReg; 1605 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 1606 void print(raw_ostream &OS) const; 1607 void dump() const; 1608 1609 bool operator==(const ExtAddrMode& O) const { 1610 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 1611 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 1612 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 1613 } 1614 }; 1615 1616 #ifndef NDEBUG 1617 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 1618 AM.print(OS); 1619 return OS; 1620 } 1621 #endif 1622 1623 void ExtAddrMode::print(raw_ostream &OS) const { 1624 bool NeedPlus = false; 1625 OS << "["; 1626 if (BaseGV) { 1627 OS << (NeedPlus ? " + " : "") 1628 << "GV:"; 1629 BaseGV->printAsOperand(OS, /*PrintType=*/false); 1630 NeedPlus = true; 1631 } 1632 1633 if (BaseOffs) { 1634 OS << (NeedPlus ? " + " : "") 1635 << BaseOffs; 1636 NeedPlus = true; 1637 } 1638 1639 if (BaseReg) { 1640 OS << (NeedPlus ? " + " : "") 1641 << "Base:"; 1642 BaseReg->printAsOperand(OS, /*PrintType=*/false); 1643 NeedPlus = true; 1644 } 1645 if (Scale) { 1646 OS << (NeedPlus ? " + " : "") 1647 << Scale << "*"; 1648 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 1649 } 1650 1651 OS << ']'; 1652 } 1653 1654 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1655 void ExtAddrMode::dump() const { 1656 print(dbgs()); 1657 dbgs() << '\n'; 1658 } 1659 #endif 1660 1661 /// \brief This class provides transaction based operation on the IR. 1662 /// Every change made through this class is recorded in the internal state and 1663 /// can be undone (rollback) until commit is called. 1664 class TypePromotionTransaction { 1665 1666 /// \brief This represents the common interface of the individual transaction. 1667 /// Each class implements the logic for doing one specific modification on 1668 /// the IR via the TypePromotionTransaction. 1669 class TypePromotionAction { 1670 protected: 1671 /// The Instruction modified. 1672 Instruction *Inst; 1673 1674 public: 1675 /// \brief Constructor of the action. 1676 /// The constructor performs the related action on the IR. 1677 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 1678 1679 virtual ~TypePromotionAction() {} 1680 1681 /// \brief Undo the modification done by this action. 1682 /// When this method is called, the IR must be in the same state as it was 1683 /// before this action was applied. 1684 /// \pre Undoing the action works if and only if the IR is in the exact same 1685 /// state as it was directly after this action was applied. 1686 virtual void undo() = 0; 1687 1688 /// \brief Advocate every change made by this action. 1689 /// When the results on the IR of the action are to be kept, it is important 1690 /// to call this function, otherwise hidden information may be kept forever. 1691 virtual void commit() { 1692 // Nothing to be done, this action is not doing anything. 1693 } 1694 }; 1695 1696 /// \brief Utility to remember the position of an instruction. 1697 class InsertionHandler { 1698 /// Position of an instruction. 1699 /// Either an instruction: 1700 /// - Is the first in a basic block: BB is used. 1701 /// - Has a previous instructon: PrevInst is used. 1702 union { 1703 Instruction *PrevInst; 1704 BasicBlock *BB; 1705 } Point; 1706 /// Remember whether or not the instruction had a previous instruction. 1707 bool HasPrevInstruction; 1708 1709 public: 1710 /// \brief Record the position of \p Inst. 1711 InsertionHandler(Instruction *Inst) { 1712 BasicBlock::iterator It = Inst->getIterator(); 1713 HasPrevInstruction = (It != (Inst->getParent()->begin())); 1714 if (HasPrevInstruction) 1715 Point.PrevInst = &*--It; 1716 else 1717 Point.BB = Inst->getParent(); 1718 } 1719 1720 /// \brief Insert \p Inst at the recorded position. 1721 void insert(Instruction *Inst) { 1722 if (HasPrevInstruction) { 1723 if (Inst->getParent()) 1724 Inst->removeFromParent(); 1725 Inst->insertAfter(Point.PrevInst); 1726 } else { 1727 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 1728 if (Inst->getParent()) 1729 Inst->moveBefore(Position); 1730 else 1731 Inst->insertBefore(Position); 1732 } 1733 } 1734 }; 1735 1736 /// \brief Move an instruction before another. 1737 class InstructionMoveBefore : public TypePromotionAction { 1738 /// Original position of the instruction. 1739 InsertionHandler Position; 1740 1741 public: 1742 /// \brief Move \p Inst before \p Before. 1743 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 1744 : TypePromotionAction(Inst), Position(Inst) { 1745 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 1746 Inst->moveBefore(Before); 1747 } 1748 1749 /// \brief Move the instruction back to its original position. 1750 void undo() override { 1751 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 1752 Position.insert(Inst); 1753 } 1754 }; 1755 1756 /// \brief Set the operand of an instruction with a new value. 1757 class OperandSetter : public TypePromotionAction { 1758 /// Original operand of the instruction. 1759 Value *Origin; 1760 /// Index of the modified instruction. 1761 unsigned Idx; 1762 1763 public: 1764 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 1765 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 1766 : TypePromotionAction(Inst), Idx(Idx) { 1767 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 1768 << "for:" << *Inst << "\n" 1769 << "with:" << *NewVal << "\n"); 1770 Origin = Inst->getOperand(Idx); 1771 Inst->setOperand(Idx, NewVal); 1772 } 1773 1774 /// \brief Restore the original value of the instruction. 1775 void undo() override { 1776 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 1777 << "for: " << *Inst << "\n" 1778 << "with: " << *Origin << "\n"); 1779 Inst->setOperand(Idx, Origin); 1780 } 1781 }; 1782 1783 /// \brief Hide the operands of an instruction. 1784 /// Do as if this instruction was not using any of its operands. 1785 class OperandsHider : public TypePromotionAction { 1786 /// The list of original operands. 1787 SmallVector<Value *, 4> OriginalValues; 1788 1789 public: 1790 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 1791 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 1792 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 1793 unsigned NumOpnds = Inst->getNumOperands(); 1794 OriginalValues.reserve(NumOpnds); 1795 for (unsigned It = 0; It < NumOpnds; ++It) { 1796 // Save the current operand. 1797 Value *Val = Inst->getOperand(It); 1798 OriginalValues.push_back(Val); 1799 // Set a dummy one. 1800 // We could use OperandSetter here, but that would imply an overhead 1801 // that we are not willing to pay. 1802 Inst->setOperand(It, UndefValue::get(Val->getType())); 1803 } 1804 } 1805 1806 /// \brief Restore the original list of uses. 1807 void undo() override { 1808 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 1809 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 1810 Inst->setOperand(It, OriginalValues[It]); 1811 } 1812 }; 1813 1814 /// \brief Build a truncate instruction. 1815 class TruncBuilder : public TypePromotionAction { 1816 Value *Val; 1817 public: 1818 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 1819 /// result. 1820 /// trunc Opnd to Ty. 1821 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 1822 IRBuilder<> Builder(Opnd); 1823 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 1824 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 1825 } 1826 1827 /// \brief Get the built value. 1828 Value *getBuiltValue() { return Val; } 1829 1830 /// \brief Remove the built instruction. 1831 void undo() override { 1832 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 1833 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1834 IVal->eraseFromParent(); 1835 } 1836 }; 1837 1838 /// \brief Build a sign extension instruction. 1839 class SExtBuilder : public TypePromotionAction { 1840 Value *Val; 1841 public: 1842 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 1843 /// result. 1844 /// sext Opnd to Ty. 1845 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1846 : TypePromotionAction(InsertPt) { 1847 IRBuilder<> Builder(InsertPt); 1848 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 1849 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 1850 } 1851 1852 /// \brief Get the built value. 1853 Value *getBuiltValue() { return Val; } 1854 1855 /// \brief Remove the built instruction. 1856 void undo() override { 1857 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 1858 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1859 IVal->eraseFromParent(); 1860 } 1861 }; 1862 1863 /// \brief Build a zero extension instruction. 1864 class ZExtBuilder : public TypePromotionAction { 1865 Value *Val; 1866 public: 1867 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 1868 /// result. 1869 /// zext Opnd to Ty. 1870 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 1871 : TypePromotionAction(InsertPt) { 1872 IRBuilder<> Builder(InsertPt); 1873 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 1874 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 1875 } 1876 1877 /// \brief Get the built value. 1878 Value *getBuiltValue() { return Val; } 1879 1880 /// \brief Remove the built instruction. 1881 void undo() override { 1882 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 1883 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 1884 IVal->eraseFromParent(); 1885 } 1886 }; 1887 1888 /// \brief Mutate an instruction to another type. 1889 class TypeMutator : public TypePromotionAction { 1890 /// Record the original type. 1891 Type *OrigTy; 1892 1893 public: 1894 /// \brief Mutate the type of \p Inst into \p NewTy. 1895 TypeMutator(Instruction *Inst, Type *NewTy) 1896 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 1897 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 1898 << "\n"); 1899 Inst->mutateType(NewTy); 1900 } 1901 1902 /// \brief Mutate the instruction back to its original type. 1903 void undo() override { 1904 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 1905 << "\n"); 1906 Inst->mutateType(OrigTy); 1907 } 1908 }; 1909 1910 /// \brief Replace the uses of an instruction by another instruction. 1911 class UsesReplacer : public TypePromotionAction { 1912 /// Helper structure to keep track of the replaced uses. 1913 struct InstructionAndIdx { 1914 /// The instruction using the instruction. 1915 Instruction *Inst; 1916 /// The index where this instruction is used for Inst. 1917 unsigned Idx; 1918 InstructionAndIdx(Instruction *Inst, unsigned Idx) 1919 : Inst(Inst), Idx(Idx) {} 1920 }; 1921 1922 /// Keep track of the original uses (pair Instruction, Index). 1923 SmallVector<InstructionAndIdx, 4> OriginalUses; 1924 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 1925 1926 public: 1927 /// \brief Replace all the use of \p Inst by \p New. 1928 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 1929 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 1930 << "\n"); 1931 // Record the original uses. 1932 for (Use &U : Inst->uses()) { 1933 Instruction *UserI = cast<Instruction>(U.getUser()); 1934 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 1935 } 1936 // Now, we can replace the uses. 1937 Inst->replaceAllUsesWith(New); 1938 } 1939 1940 /// \brief Reassign the original uses of Inst to Inst. 1941 void undo() override { 1942 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 1943 for (use_iterator UseIt = OriginalUses.begin(), 1944 EndIt = OriginalUses.end(); 1945 UseIt != EndIt; ++UseIt) { 1946 UseIt->Inst->setOperand(UseIt->Idx, Inst); 1947 } 1948 } 1949 }; 1950 1951 /// \brief Remove an instruction from the IR. 1952 class InstructionRemover : public TypePromotionAction { 1953 /// Original position of the instruction. 1954 InsertionHandler Inserter; 1955 /// Helper structure to hide all the link to the instruction. In other 1956 /// words, this helps to do as if the instruction was removed. 1957 OperandsHider Hider; 1958 /// Keep track of the uses replaced, if any. 1959 UsesReplacer *Replacer; 1960 1961 public: 1962 /// \brief Remove all reference of \p Inst and optinally replace all its 1963 /// uses with New. 1964 /// \pre If !Inst->use_empty(), then New != nullptr 1965 InstructionRemover(Instruction *Inst, Value *New = nullptr) 1966 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 1967 Replacer(nullptr) { 1968 if (New) 1969 Replacer = new UsesReplacer(Inst, New); 1970 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 1971 Inst->removeFromParent(); 1972 } 1973 1974 ~InstructionRemover() override { delete Replacer; } 1975 1976 /// \brief Really remove the instruction. 1977 void commit() override { delete Inst; } 1978 1979 /// \brief Resurrect the instruction and reassign it to the proper uses if 1980 /// new value was provided when build this action. 1981 void undo() override { 1982 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 1983 Inserter.insert(Inst); 1984 if (Replacer) 1985 Replacer->undo(); 1986 Hider.undo(); 1987 } 1988 }; 1989 1990 public: 1991 /// Restoration point. 1992 /// The restoration point is a pointer to an action instead of an iterator 1993 /// because the iterator may be invalidated but not the pointer. 1994 typedef const TypePromotionAction *ConstRestorationPt; 1995 /// Advocate every changes made in that transaction. 1996 void commit(); 1997 /// Undo all the changes made after the given point. 1998 void rollback(ConstRestorationPt Point); 1999 /// Get the current restoration point. 2000 ConstRestorationPt getRestorationPoint() const; 2001 2002 /// \name API for IR modification with state keeping to support rollback. 2003 /// @{ 2004 /// Same as Instruction::setOperand. 2005 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2006 /// Same as Instruction::eraseFromParent. 2007 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2008 /// Same as Value::replaceAllUsesWith. 2009 void replaceAllUsesWith(Instruction *Inst, Value *New); 2010 /// Same as Value::mutateType. 2011 void mutateType(Instruction *Inst, Type *NewTy); 2012 /// Same as IRBuilder::createTrunc. 2013 Value *createTrunc(Instruction *Opnd, Type *Ty); 2014 /// Same as IRBuilder::createSExt. 2015 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2016 /// Same as IRBuilder::createZExt. 2017 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2018 /// Same as Instruction::moveBefore. 2019 void moveBefore(Instruction *Inst, Instruction *Before); 2020 /// @} 2021 2022 private: 2023 /// The ordered list of actions made so far. 2024 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2025 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2026 }; 2027 2028 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2029 Value *NewVal) { 2030 Actions.push_back( 2031 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2032 } 2033 2034 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2035 Value *NewVal) { 2036 Actions.push_back( 2037 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 2038 } 2039 2040 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2041 Value *New) { 2042 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2043 } 2044 2045 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2046 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2047 } 2048 2049 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2050 Type *Ty) { 2051 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2052 Value *Val = Ptr->getBuiltValue(); 2053 Actions.push_back(std::move(Ptr)); 2054 return Val; 2055 } 2056 2057 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2058 Value *Opnd, Type *Ty) { 2059 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2060 Value *Val = Ptr->getBuiltValue(); 2061 Actions.push_back(std::move(Ptr)); 2062 return Val; 2063 } 2064 2065 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2066 Value *Opnd, Type *Ty) { 2067 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2068 Value *Val = Ptr->getBuiltValue(); 2069 Actions.push_back(std::move(Ptr)); 2070 return Val; 2071 } 2072 2073 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2074 Instruction *Before) { 2075 Actions.push_back( 2076 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2077 } 2078 2079 TypePromotionTransaction::ConstRestorationPt 2080 TypePromotionTransaction::getRestorationPoint() const { 2081 return !Actions.empty() ? Actions.back().get() : nullptr; 2082 } 2083 2084 void TypePromotionTransaction::commit() { 2085 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2086 ++It) 2087 (*It)->commit(); 2088 Actions.clear(); 2089 } 2090 2091 void TypePromotionTransaction::rollback( 2092 TypePromotionTransaction::ConstRestorationPt Point) { 2093 while (!Actions.empty() && Point != Actions.back().get()) { 2094 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2095 Curr->undo(); 2096 } 2097 } 2098 2099 /// \brief A helper class for matching addressing modes. 2100 /// 2101 /// This encapsulates the logic for matching the target-legal addressing modes. 2102 class AddressingModeMatcher { 2103 SmallVectorImpl<Instruction*> &AddrModeInsts; 2104 const TargetMachine &TM; 2105 const TargetLowering &TLI; 2106 const DataLayout &DL; 2107 2108 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2109 /// the memory instruction that we're computing this address for. 2110 Type *AccessTy; 2111 unsigned AddrSpace; 2112 Instruction *MemoryInst; 2113 2114 /// This is the addressing mode that we're building up. This is 2115 /// part of the return value of this addressing mode matching stuff. 2116 ExtAddrMode &AddrMode; 2117 2118 /// The instructions inserted by other CodeGenPrepare optimizations. 2119 const SetOfInstrs &InsertedInsts; 2120 /// A map from the instructions to their type before promotion. 2121 InstrToOrigTy &PromotedInsts; 2122 /// The ongoing transaction where every action should be registered. 2123 TypePromotionTransaction &TPT; 2124 2125 /// This is set to true when we should not do profitability checks. 2126 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2127 bool IgnoreProfitability; 2128 2129 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 2130 const TargetMachine &TM, Type *AT, unsigned AS, 2131 Instruction *MI, ExtAddrMode &AM, 2132 const SetOfInstrs &InsertedInsts, 2133 InstrToOrigTy &PromotedInsts, 2134 TypePromotionTransaction &TPT) 2135 : AddrModeInsts(AMI), TM(TM), 2136 TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent()) 2137 ->getTargetLowering()), 2138 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2139 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2140 PromotedInsts(PromotedInsts), TPT(TPT) { 2141 IgnoreProfitability = false; 2142 } 2143 public: 2144 2145 /// Find the maximal addressing mode that a load/store of V can fold, 2146 /// give an access type of AccessTy. This returns a list of involved 2147 /// instructions in AddrModeInsts. 2148 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2149 /// optimizations. 2150 /// \p PromotedInsts maps the instructions to their type before promotion. 2151 /// \p The ongoing transaction where every action should be registered. 2152 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 2153 Instruction *MemoryInst, 2154 SmallVectorImpl<Instruction*> &AddrModeInsts, 2155 const TargetMachine &TM, 2156 const SetOfInstrs &InsertedInsts, 2157 InstrToOrigTy &PromotedInsts, 2158 TypePromotionTransaction &TPT) { 2159 ExtAddrMode Result; 2160 2161 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS, 2162 MemoryInst, Result, InsertedInsts, 2163 PromotedInsts, TPT).matchAddr(V, 0); 2164 (void)Success; assert(Success && "Couldn't select *anything*?"); 2165 return Result; 2166 } 2167 private: 2168 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2169 bool matchAddr(Value *V, unsigned Depth); 2170 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 2171 bool *MovedAway = nullptr); 2172 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2173 ExtAddrMode &AMBefore, 2174 ExtAddrMode &AMAfter); 2175 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2176 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2177 Value *PromotedOperand) const; 2178 }; 2179 2180 /// Try adding ScaleReg*Scale to the current addressing mode. 2181 /// Return true and update AddrMode if this addr mode is legal for the target, 2182 /// false if not. 2183 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 2184 unsigned Depth) { 2185 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 2186 // mode. Just process that directly. 2187 if (Scale == 1) 2188 return matchAddr(ScaleReg, Depth); 2189 2190 // If the scale is 0, it takes nothing to add this. 2191 if (Scale == 0) 2192 return true; 2193 2194 // If we already have a scale of this value, we can add to it, otherwise, we 2195 // need an available scale field. 2196 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 2197 return false; 2198 2199 ExtAddrMode TestAddrMode = AddrMode; 2200 2201 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 2202 // [A+B + A*7] -> [B+A*8]. 2203 TestAddrMode.Scale += Scale; 2204 TestAddrMode.ScaledReg = ScaleReg; 2205 2206 // If the new address isn't legal, bail out. 2207 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 2208 return false; 2209 2210 // It was legal, so commit it. 2211 AddrMode = TestAddrMode; 2212 2213 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 2214 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 2215 // X*Scale + C*Scale to addr mode. 2216 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 2217 if (isa<Instruction>(ScaleReg) && // not a constant expr. 2218 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 2219 TestAddrMode.ScaledReg = AddLHS; 2220 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 2221 2222 // If this addressing mode is legal, commit it and remember that we folded 2223 // this instruction. 2224 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 2225 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 2226 AddrMode = TestAddrMode; 2227 return true; 2228 } 2229 } 2230 2231 // Otherwise, not (x+c)*scale, just return what we have. 2232 return true; 2233 } 2234 2235 /// This is a little filter, which returns true if an addressing computation 2236 /// involving I might be folded into a load/store accessing it. 2237 /// This doesn't need to be perfect, but needs to accept at least 2238 /// the set of instructions that MatchOperationAddr can. 2239 static bool MightBeFoldableInst(Instruction *I) { 2240 switch (I->getOpcode()) { 2241 case Instruction::BitCast: 2242 case Instruction::AddrSpaceCast: 2243 // Don't touch identity bitcasts. 2244 if (I->getType() == I->getOperand(0)->getType()) 2245 return false; 2246 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 2247 case Instruction::PtrToInt: 2248 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2249 return true; 2250 case Instruction::IntToPtr: 2251 // We know the input is intptr_t, so this is foldable. 2252 return true; 2253 case Instruction::Add: 2254 return true; 2255 case Instruction::Mul: 2256 case Instruction::Shl: 2257 // Can only handle X*C and X << C. 2258 return isa<ConstantInt>(I->getOperand(1)); 2259 case Instruction::GetElementPtr: 2260 return true; 2261 default: 2262 return false; 2263 } 2264 } 2265 2266 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 2267 /// \note \p Val is assumed to be the product of some type promotion. 2268 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 2269 /// to be legal, as the non-promoted value would have had the same state. 2270 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 2271 const DataLayout &DL, Value *Val) { 2272 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 2273 if (!PromotedInst) 2274 return false; 2275 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 2276 // If the ISDOpcode is undefined, it was undefined before the promotion. 2277 if (!ISDOpcode) 2278 return true; 2279 // Otherwise, check if the promoted instruction is legal or not. 2280 return TLI.isOperationLegalOrCustom( 2281 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 2282 } 2283 2284 /// \brief Hepler class to perform type promotion. 2285 class TypePromotionHelper { 2286 /// \brief Utility function to check whether or not a sign or zero extension 2287 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 2288 /// either using the operands of \p Inst or promoting \p Inst. 2289 /// The type of the extension is defined by \p IsSExt. 2290 /// In other words, check if: 2291 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 2292 /// #1 Promotion applies: 2293 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 2294 /// #2 Operand reuses: 2295 /// ext opnd1 to ConsideredExtType. 2296 /// \p PromotedInsts maps the instructions to their type before promotion. 2297 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 2298 const InstrToOrigTy &PromotedInsts, bool IsSExt); 2299 2300 /// \brief Utility function to determine if \p OpIdx should be promoted when 2301 /// promoting \p Inst. 2302 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 2303 if (isa<SelectInst>(Inst) && OpIdx == 0) 2304 return false; 2305 return true; 2306 } 2307 2308 /// \brief Utility function to promote the operand of \p Ext when this 2309 /// operand is a promotable trunc or sext or zext. 2310 /// \p PromotedInsts maps the instructions to their type before promotion. 2311 /// \p CreatedInstsCost[out] contains the cost of all instructions 2312 /// created to promote the operand of Ext. 2313 /// Newly added extensions are inserted in \p Exts. 2314 /// Newly added truncates are inserted in \p Truncs. 2315 /// Should never be called directly. 2316 /// \return The promoted value which is used instead of Ext. 2317 static Value *promoteOperandForTruncAndAnyExt( 2318 Instruction *Ext, TypePromotionTransaction &TPT, 2319 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2320 SmallVectorImpl<Instruction *> *Exts, 2321 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 2322 2323 /// \brief Utility function to promote the operand of \p Ext when this 2324 /// operand is promotable and is not a supported trunc or sext. 2325 /// \p PromotedInsts maps the instructions to their type before promotion. 2326 /// \p CreatedInstsCost[out] contains the cost of all the instructions 2327 /// created to promote the operand of Ext. 2328 /// Newly added extensions are inserted in \p Exts. 2329 /// Newly added truncates are inserted in \p Truncs. 2330 /// Should never be called directly. 2331 /// \return The promoted value which is used instead of Ext. 2332 static Value *promoteOperandForOther(Instruction *Ext, 2333 TypePromotionTransaction &TPT, 2334 InstrToOrigTy &PromotedInsts, 2335 unsigned &CreatedInstsCost, 2336 SmallVectorImpl<Instruction *> *Exts, 2337 SmallVectorImpl<Instruction *> *Truncs, 2338 const TargetLowering &TLI, bool IsSExt); 2339 2340 /// \see promoteOperandForOther. 2341 static Value *signExtendOperandForOther( 2342 Instruction *Ext, TypePromotionTransaction &TPT, 2343 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2344 SmallVectorImpl<Instruction *> *Exts, 2345 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2346 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2347 Exts, Truncs, TLI, true); 2348 } 2349 2350 /// \see promoteOperandForOther. 2351 static Value *zeroExtendOperandForOther( 2352 Instruction *Ext, TypePromotionTransaction &TPT, 2353 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2354 SmallVectorImpl<Instruction *> *Exts, 2355 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2356 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2357 Exts, Truncs, TLI, false); 2358 } 2359 2360 public: 2361 /// Type for the utility function that promotes the operand of Ext. 2362 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 2363 InstrToOrigTy &PromotedInsts, 2364 unsigned &CreatedInstsCost, 2365 SmallVectorImpl<Instruction *> *Exts, 2366 SmallVectorImpl<Instruction *> *Truncs, 2367 const TargetLowering &TLI); 2368 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 2369 /// action to promote the operand of \p Ext instead of using Ext. 2370 /// \return NULL if no promotable action is possible with the current 2371 /// sign extension. 2372 /// \p InsertedInsts keeps track of all the instructions inserted by the 2373 /// other CodeGenPrepare optimizations. This information is important 2374 /// because we do not want to promote these instructions as CodeGenPrepare 2375 /// will reinsert them later. Thus creating an infinite loop: create/remove. 2376 /// \p PromotedInsts maps the instructions to their type before promotion. 2377 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 2378 const TargetLowering &TLI, 2379 const InstrToOrigTy &PromotedInsts); 2380 }; 2381 2382 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 2383 Type *ConsideredExtType, 2384 const InstrToOrigTy &PromotedInsts, 2385 bool IsSExt) { 2386 // The promotion helper does not know how to deal with vector types yet. 2387 // To be able to fix that, we would need to fix the places where we 2388 // statically extend, e.g., constants and such. 2389 if (Inst->getType()->isVectorTy()) 2390 return false; 2391 2392 // We can always get through zext. 2393 if (isa<ZExtInst>(Inst)) 2394 return true; 2395 2396 // sext(sext) is ok too. 2397 if (IsSExt && isa<SExtInst>(Inst)) 2398 return true; 2399 2400 // We can get through binary operator, if it is legal. In other words, the 2401 // binary operator must have a nuw or nsw flag. 2402 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 2403 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 2404 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 2405 (IsSExt && BinOp->hasNoSignedWrap()))) 2406 return true; 2407 2408 // Check if we can do the following simplification. 2409 // ext(trunc(opnd)) --> ext(opnd) 2410 if (!isa<TruncInst>(Inst)) 2411 return false; 2412 2413 Value *OpndVal = Inst->getOperand(0); 2414 // Check if we can use this operand in the extension. 2415 // If the type is larger than the result type of the extension, we cannot. 2416 if (!OpndVal->getType()->isIntegerTy() || 2417 OpndVal->getType()->getIntegerBitWidth() > 2418 ConsideredExtType->getIntegerBitWidth()) 2419 return false; 2420 2421 // If the operand of the truncate is not an instruction, we will not have 2422 // any information on the dropped bits. 2423 // (Actually we could for constant but it is not worth the extra logic). 2424 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 2425 if (!Opnd) 2426 return false; 2427 2428 // Check if the source of the type is narrow enough. 2429 // I.e., check that trunc just drops extended bits of the same kind of 2430 // the extension. 2431 // #1 get the type of the operand and check the kind of the extended bits. 2432 const Type *OpndType; 2433 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 2434 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 2435 OpndType = It->second.getPointer(); 2436 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 2437 OpndType = Opnd->getOperand(0)->getType(); 2438 else 2439 return false; 2440 2441 // #2 check that the truncate just drops extended bits. 2442 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) 2443 return true; 2444 2445 return false; 2446 } 2447 2448 TypePromotionHelper::Action TypePromotionHelper::getAction( 2449 Instruction *Ext, const SetOfInstrs &InsertedInsts, 2450 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 2451 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 2452 "Unexpected instruction type"); 2453 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 2454 Type *ExtTy = Ext->getType(); 2455 bool IsSExt = isa<SExtInst>(Ext); 2456 // If the operand of the extension is not an instruction, we cannot 2457 // get through. 2458 // If it, check we can get through. 2459 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 2460 return nullptr; 2461 2462 // Do not promote if the operand has been added by codegenprepare. 2463 // Otherwise, it means we are undoing an optimization that is likely to be 2464 // redone, thus causing potential infinite loop. 2465 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 2466 return nullptr; 2467 2468 // SExt or Trunc instructions. 2469 // Return the related handler. 2470 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 2471 isa<ZExtInst>(ExtOpnd)) 2472 return promoteOperandForTruncAndAnyExt; 2473 2474 // Regular instruction. 2475 // Abort early if we will have to insert non-free instructions. 2476 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 2477 return nullptr; 2478 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 2479 } 2480 2481 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 2482 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 2483 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2484 SmallVectorImpl<Instruction *> *Exts, 2485 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2486 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2487 // get through it and this method should not be called. 2488 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2489 Value *ExtVal = SExt; 2490 bool HasMergedNonFreeExt = false; 2491 if (isa<ZExtInst>(SExtOpnd)) { 2492 // Replace s|zext(zext(opnd)) 2493 // => zext(opnd). 2494 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 2495 Value *ZExt = 2496 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 2497 TPT.replaceAllUsesWith(SExt, ZExt); 2498 TPT.eraseInstruction(SExt); 2499 ExtVal = ZExt; 2500 } else { 2501 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 2502 // => z|sext(opnd). 2503 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 2504 } 2505 CreatedInstsCost = 0; 2506 2507 // Remove dead code. 2508 if (SExtOpnd->use_empty()) 2509 TPT.eraseInstruction(SExtOpnd); 2510 2511 // Check if the extension is still needed. 2512 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 2513 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 2514 if (ExtInst) { 2515 if (Exts) 2516 Exts->push_back(ExtInst); 2517 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 2518 } 2519 return ExtVal; 2520 } 2521 2522 // At this point we have: ext ty opnd to ty. 2523 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 2524 Value *NextVal = ExtInst->getOperand(0); 2525 TPT.eraseInstruction(ExtInst, NextVal); 2526 return NextVal; 2527 } 2528 2529 Value *TypePromotionHelper::promoteOperandForOther( 2530 Instruction *Ext, TypePromotionTransaction &TPT, 2531 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2532 SmallVectorImpl<Instruction *> *Exts, 2533 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 2534 bool IsSExt) { 2535 // By construction, the operand of Ext is an instruction. Otherwise we cannot 2536 // get through it and this method should not be called. 2537 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 2538 CreatedInstsCost = 0; 2539 if (!ExtOpnd->hasOneUse()) { 2540 // ExtOpnd will be promoted. 2541 // All its uses, but Ext, will need to use a truncated value of the 2542 // promoted version. 2543 // Create the truncate now. 2544 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 2545 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 2546 ITrunc->removeFromParent(); 2547 // Insert it just after the definition. 2548 ITrunc->insertAfter(ExtOpnd); 2549 if (Truncs) 2550 Truncs->push_back(ITrunc); 2551 } 2552 2553 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 2554 // Restore the operand of Ext (which has been replaced by the previous call 2555 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 2556 TPT.setOperand(Ext, 0, ExtOpnd); 2557 } 2558 2559 // Get through the Instruction: 2560 // 1. Update its type. 2561 // 2. Replace the uses of Ext by Inst. 2562 // 3. Extend each operand that needs to be extended. 2563 2564 // Remember the original type of the instruction before promotion. 2565 // This is useful to know that the high bits are sign extended bits. 2566 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 2567 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 2568 // Step #1. 2569 TPT.mutateType(ExtOpnd, Ext->getType()); 2570 // Step #2. 2571 TPT.replaceAllUsesWith(Ext, ExtOpnd); 2572 // Step #3. 2573 Instruction *ExtForOpnd = Ext; 2574 2575 DEBUG(dbgs() << "Propagate Ext to operands\n"); 2576 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 2577 ++OpIdx) { 2578 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 2579 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 2580 !shouldExtOperand(ExtOpnd, OpIdx)) { 2581 DEBUG(dbgs() << "No need to propagate\n"); 2582 continue; 2583 } 2584 // Check if we can statically extend the operand. 2585 Value *Opnd = ExtOpnd->getOperand(OpIdx); 2586 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 2587 DEBUG(dbgs() << "Statically extend\n"); 2588 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 2589 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 2590 : Cst->getValue().zext(BitWidth); 2591 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 2592 continue; 2593 } 2594 // UndefValue are typed, so we have to statically sign extend them. 2595 if (isa<UndefValue>(Opnd)) { 2596 DEBUG(dbgs() << "Statically extend\n"); 2597 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 2598 continue; 2599 } 2600 2601 // Otherwise we have to explicity sign extend the operand. 2602 // Check if Ext was reused to extend an operand. 2603 if (!ExtForOpnd) { 2604 // If yes, create a new one. 2605 DEBUG(dbgs() << "More operands to ext\n"); 2606 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 2607 : TPT.createZExt(Ext, Opnd, Ext->getType()); 2608 if (!isa<Instruction>(ValForExtOpnd)) { 2609 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 2610 continue; 2611 } 2612 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 2613 } 2614 if (Exts) 2615 Exts->push_back(ExtForOpnd); 2616 TPT.setOperand(ExtForOpnd, 0, Opnd); 2617 2618 // Move the sign extension before the insertion point. 2619 TPT.moveBefore(ExtForOpnd, ExtOpnd); 2620 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 2621 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 2622 // If more sext are required, new instructions will have to be created. 2623 ExtForOpnd = nullptr; 2624 } 2625 if (ExtForOpnd == Ext) { 2626 DEBUG(dbgs() << "Extension is useless now\n"); 2627 TPT.eraseInstruction(Ext); 2628 } 2629 return ExtOpnd; 2630 } 2631 2632 /// Check whether or not promoting an instruction to a wider type is profitable. 2633 /// \p NewCost gives the cost of extension instructions created by the 2634 /// promotion. 2635 /// \p OldCost gives the cost of extension instructions before the promotion 2636 /// plus the number of instructions that have been 2637 /// matched in the addressing mode the promotion. 2638 /// \p PromotedOperand is the value that has been promoted. 2639 /// \return True if the promotion is profitable, false otherwise. 2640 bool AddressingModeMatcher::isPromotionProfitable( 2641 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 2642 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 2643 // The cost of the new extensions is greater than the cost of the 2644 // old extension plus what we folded. 2645 // This is not profitable. 2646 if (NewCost > OldCost) 2647 return false; 2648 if (NewCost < OldCost) 2649 return true; 2650 // The promotion is neutral but it may help folding the sign extension in 2651 // loads for instance. 2652 // Check that we did not create an illegal instruction. 2653 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 2654 } 2655 2656 /// Given an instruction or constant expr, see if we can fold the operation 2657 /// into the addressing mode. If so, update the addressing mode and return 2658 /// true, otherwise return false without modifying AddrMode. 2659 /// If \p MovedAway is not NULL, it contains the information of whether or 2660 /// not AddrInst has to be folded into the addressing mode on success. 2661 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 2662 /// because it has been moved away. 2663 /// Thus AddrInst must not be added in the matched instructions. 2664 /// This state can happen when AddrInst is a sext, since it may be moved away. 2665 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 2666 /// not be referenced anymore. 2667 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 2668 unsigned Depth, 2669 bool *MovedAway) { 2670 // Avoid exponential behavior on extremely deep expression trees. 2671 if (Depth >= 5) return false; 2672 2673 // By default, all matched instructions stay in place. 2674 if (MovedAway) 2675 *MovedAway = false; 2676 2677 switch (Opcode) { 2678 case Instruction::PtrToInt: 2679 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2680 return matchAddr(AddrInst->getOperand(0), Depth); 2681 case Instruction::IntToPtr: { 2682 auto AS = AddrInst->getType()->getPointerAddressSpace(); 2683 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 2684 // This inttoptr is a no-op if the integer type is pointer sized. 2685 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 2686 return matchAddr(AddrInst->getOperand(0), Depth); 2687 return false; 2688 } 2689 case Instruction::BitCast: 2690 // BitCast is always a noop, and we can handle it as long as it is 2691 // int->int or pointer->pointer (we don't want int<->fp or something). 2692 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 2693 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 2694 // Don't touch identity bitcasts. These were probably put here by LSR, 2695 // and we don't want to mess around with them. Assume it knows what it 2696 // is doing. 2697 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 2698 return matchAddr(AddrInst->getOperand(0), Depth); 2699 return false; 2700 case Instruction::AddrSpaceCast: { 2701 unsigned SrcAS 2702 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 2703 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 2704 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 2705 return matchAddr(AddrInst->getOperand(0), Depth); 2706 return false; 2707 } 2708 case Instruction::Add: { 2709 // Check to see if we can merge in the RHS then the LHS. If so, we win. 2710 ExtAddrMode BackupAddrMode = AddrMode; 2711 unsigned OldSize = AddrModeInsts.size(); 2712 // Start a transaction at this point. 2713 // The LHS may match but not the RHS. 2714 // Therefore, we need a higher level restoration point to undo partially 2715 // matched operation. 2716 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2717 TPT.getRestorationPoint(); 2718 2719 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 2720 matchAddr(AddrInst->getOperand(0), Depth+1)) 2721 return true; 2722 2723 // Restore the old addr mode info. 2724 AddrMode = BackupAddrMode; 2725 AddrModeInsts.resize(OldSize); 2726 TPT.rollback(LastKnownGood); 2727 2728 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 2729 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 2730 matchAddr(AddrInst->getOperand(1), Depth+1)) 2731 return true; 2732 2733 // Otherwise we definitely can't merge the ADD in. 2734 AddrMode = BackupAddrMode; 2735 AddrModeInsts.resize(OldSize); 2736 TPT.rollback(LastKnownGood); 2737 break; 2738 } 2739 //case Instruction::Or: 2740 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 2741 //break; 2742 case Instruction::Mul: 2743 case Instruction::Shl: { 2744 // Can only handle X*C and X << C. 2745 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 2746 if (!RHS) 2747 return false; 2748 int64_t Scale = RHS->getSExtValue(); 2749 if (Opcode == Instruction::Shl) 2750 Scale = 1LL << Scale; 2751 2752 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 2753 } 2754 case Instruction::GetElementPtr: { 2755 // Scan the GEP. We check it if it contains constant offsets and at most 2756 // one variable offset. 2757 int VariableOperand = -1; 2758 unsigned VariableScale = 0; 2759 2760 int64_t ConstantOffset = 0; 2761 gep_type_iterator GTI = gep_type_begin(AddrInst); 2762 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 2763 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 2764 const StructLayout *SL = DL.getStructLayout(STy); 2765 unsigned Idx = 2766 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 2767 ConstantOffset += SL->getElementOffset(Idx); 2768 } else { 2769 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 2770 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 2771 ConstantOffset += CI->getSExtValue()*TypeSize; 2772 } else if (TypeSize) { // Scales of zero don't do anything. 2773 // We only allow one variable index at the moment. 2774 if (VariableOperand != -1) 2775 return false; 2776 2777 // Remember the variable index. 2778 VariableOperand = i; 2779 VariableScale = TypeSize; 2780 } 2781 } 2782 } 2783 2784 // A common case is for the GEP to only do a constant offset. In this case, 2785 // just add it to the disp field and check validity. 2786 if (VariableOperand == -1) { 2787 AddrMode.BaseOffs += ConstantOffset; 2788 if (ConstantOffset == 0 || 2789 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 2790 // Check to see if we can fold the base pointer in too. 2791 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 2792 return true; 2793 } 2794 AddrMode.BaseOffs -= ConstantOffset; 2795 return false; 2796 } 2797 2798 // Save the valid addressing mode in case we can't match. 2799 ExtAddrMode BackupAddrMode = AddrMode; 2800 unsigned OldSize = AddrModeInsts.size(); 2801 2802 // See if the scale and offset amount is valid for this target. 2803 AddrMode.BaseOffs += ConstantOffset; 2804 2805 // Match the base operand of the GEP. 2806 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 2807 // If it couldn't be matched, just stuff the value in a register. 2808 if (AddrMode.HasBaseReg) { 2809 AddrMode = BackupAddrMode; 2810 AddrModeInsts.resize(OldSize); 2811 return false; 2812 } 2813 AddrMode.HasBaseReg = true; 2814 AddrMode.BaseReg = AddrInst->getOperand(0); 2815 } 2816 2817 // Match the remaining variable portion of the GEP. 2818 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 2819 Depth)) { 2820 // If it couldn't be matched, try stuffing the base into a register 2821 // instead of matching it, and retrying the match of the scale. 2822 AddrMode = BackupAddrMode; 2823 AddrModeInsts.resize(OldSize); 2824 if (AddrMode.HasBaseReg) 2825 return false; 2826 AddrMode.HasBaseReg = true; 2827 AddrMode.BaseReg = AddrInst->getOperand(0); 2828 AddrMode.BaseOffs += ConstantOffset; 2829 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 2830 VariableScale, Depth)) { 2831 // If even that didn't work, bail. 2832 AddrMode = BackupAddrMode; 2833 AddrModeInsts.resize(OldSize); 2834 return false; 2835 } 2836 } 2837 2838 return true; 2839 } 2840 case Instruction::SExt: 2841 case Instruction::ZExt: { 2842 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 2843 if (!Ext) 2844 return false; 2845 2846 // Try to move this ext out of the way of the addressing mode. 2847 // Ask for a method for doing so. 2848 TypePromotionHelper::Action TPH = 2849 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 2850 if (!TPH) 2851 return false; 2852 2853 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2854 TPT.getRestorationPoint(); 2855 unsigned CreatedInstsCost = 0; 2856 unsigned ExtCost = !TLI.isExtFree(Ext); 2857 Value *PromotedOperand = 2858 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 2859 // SExt has been moved away. 2860 // Thus either it will be rematched later in the recursive calls or it is 2861 // gone. Anyway, we must not fold it into the addressing mode at this point. 2862 // E.g., 2863 // op = add opnd, 1 2864 // idx = ext op 2865 // addr = gep base, idx 2866 // is now: 2867 // promotedOpnd = ext opnd <- no match here 2868 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 2869 // addr = gep base, op <- match 2870 if (MovedAway) 2871 *MovedAway = true; 2872 2873 assert(PromotedOperand && 2874 "TypePromotionHelper should have filtered out those cases"); 2875 2876 ExtAddrMode BackupAddrMode = AddrMode; 2877 unsigned OldSize = AddrModeInsts.size(); 2878 2879 if (!matchAddr(PromotedOperand, Depth) || 2880 // The total of the new cost is equal to the cost of the created 2881 // instructions. 2882 // The total of the old cost is equal to the cost of the extension plus 2883 // what we have saved in the addressing mode. 2884 !isPromotionProfitable(CreatedInstsCost, 2885 ExtCost + (AddrModeInsts.size() - OldSize), 2886 PromotedOperand)) { 2887 AddrMode = BackupAddrMode; 2888 AddrModeInsts.resize(OldSize); 2889 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 2890 TPT.rollback(LastKnownGood); 2891 return false; 2892 } 2893 return true; 2894 } 2895 } 2896 return false; 2897 } 2898 2899 /// If we can, try to add the value of 'Addr' into the current addressing mode. 2900 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 2901 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 2902 /// for the target. 2903 /// 2904 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 2905 // Start a transaction at this point that we will rollback if the matching 2906 // fails. 2907 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 2908 TPT.getRestorationPoint(); 2909 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 2910 // Fold in immediates if legal for the target. 2911 AddrMode.BaseOffs += CI->getSExtValue(); 2912 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 2913 return true; 2914 AddrMode.BaseOffs -= CI->getSExtValue(); 2915 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 2916 // If this is a global variable, try to fold it into the addressing mode. 2917 if (!AddrMode.BaseGV) { 2918 AddrMode.BaseGV = GV; 2919 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 2920 return true; 2921 AddrMode.BaseGV = nullptr; 2922 } 2923 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 2924 ExtAddrMode BackupAddrMode = AddrMode; 2925 unsigned OldSize = AddrModeInsts.size(); 2926 2927 // Check to see if it is possible to fold this operation. 2928 bool MovedAway = false; 2929 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 2930 // This instruction may have been moved away. If so, there is nothing 2931 // to check here. 2932 if (MovedAway) 2933 return true; 2934 // Okay, it's possible to fold this. Check to see if it is actually 2935 // *profitable* to do so. We use a simple cost model to avoid increasing 2936 // register pressure too much. 2937 if (I->hasOneUse() || 2938 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 2939 AddrModeInsts.push_back(I); 2940 return true; 2941 } 2942 2943 // It isn't profitable to do this, roll back. 2944 //cerr << "NOT FOLDING: " << *I; 2945 AddrMode = BackupAddrMode; 2946 AddrModeInsts.resize(OldSize); 2947 TPT.rollback(LastKnownGood); 2948 } 2949 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 2950 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 2951 return true; 2952 TPT.rollback(LastKnownGood); 2953 } else if (isa<ConstantPointerNull>(Addr)) { 2954 // Null pointer gets folded without affecting the addressing mode. 2955 return true; 2956 } 2957 2958 // Worse case, the target should support [reg] addressing modes. :) 2959 if (!AddrMode.HasBaseReg) { 2960 AddrMode.HasBaseReg = true; 2961 AddrMode.BaseReg = Addr; 2962 // Still check for legality in case the target supports [imm] but not [i+r]. 2963 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 2964 return true; 2965 AddrMode.HasBaseReg = false; 2966 AddrMode.BaseReg = nullptr; 2967 } 2968 2969 // If the base register is already taken, see if we can do [r+r]. 2970 if (AddrMode.Scale == 0) { 2971 AddrMode.Scale = 1; 2972 AddrMode.ScaledReg = Addr; 2973 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 2974 return true; 2975 AddrMode.Scale = 0; 2976 AddrMode.ScaledReg = nullptr; 2977 } 2978 // Couldn't match. 2979 TPT.rollback(LastKnownGood); 2980 return false; 2981 } 2982 2983 /// Check to see if all uses of OpVal by the specified inline asm call are due 2984 /// to memory operands. If so, return true, otherwise return false. 2985 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 2986 const TargetMachine &TM) { 2987 const Function *F = CI->getParent()->getParent(); 2988 const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); 2989 const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); 2990 TargetLowering::AsmOperandInfoVector TargetConstraints = 2991 TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI, 2992 ImmutableCallSite(CI)); 2993 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 2994 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 2995 2996 // Compute the constraint code and ConstraintType to use. 2997 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 2998 2999 // If this asm operand is our Value*, and if it isn't an indirect memory 3000 // operand, we can't fold it! 3001 if (OpInfo.CallOperandVal == OpVal && 3002 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3003 !OpInfo.isIndirect)) 3004 return false; 3005 } 3006 3007 return true; 3008 } 3009 3010 /// Recursively walk all the uses of I until we find a memory use. 3011 /// If we find an obviously non-foldable instruction, return true. 3012 /// Add the ultimately found memory instructions to MemoryUses. 3013 static bool FindAllMemoryUses( 3014 Instruction *I, 3015 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3016 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) { 3017 // If we already considered this instruction, we're done. 3018 if (!ConsideredInsts.insert(I).second) 3019 return false; 3020 3021 // If this is an obviously unfoldable instruction, bail out. 3022 if (!MightBeFoldableInst(I)) 3023 return true; 3024 3025 // Loop over all the uses, recursively processing them. 3026 for (Use &U : I->uses()) { 3027 Instruction *UserI = cast<Instruction>(U.getUser()); 3028 3029 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3030 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3031 continue; 3032 } 3033 3034 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3035 unsigned opNo = U.getOperandNo(); 3036 if (opNo == 0) return true; // Storing addr, not into addr. 3037 MemoryUses.push_back(std::make_pair(SI, opNo)); 3038 continue; 3039 } 3040 3041 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3042 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3043 if (!IA) return true; 3044 3045 // If this is a memory operand, we're cool, otherwise bail out. 3046 if (!IsOperandAMemoryOperand(CI, IA, I, TM)) 3047 return true; 3048 continue; 3049 } 3050 3051 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM)) 3052 return true; 3053 } 3054 3055 return false; 3056 } 3057 3058 /// Return true if Val is already known to be live at the use site that we're 3059 /// folding it into. If so, there is no cost to include it in the addressing 3060 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 3061 /// instruction already. 3062 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3063 Value *KnownLive2) { 3064 // If Val is either of the known-live values, we know it is live! 3065 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3066 return true; 3067 3068 // All values other than instructions and arguments (e.g. constants) are live. 3069 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3070 3071 // If Val is a constant sized alloca in the entry block, it is live, this is 3072 // true because it is just a reference to the stack/frame pointer, which is 3073 // live for the whole function. 3074 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3075 if (AI->isStaticAlloca()) 3076 return true; 3077 3078 // Check to see if this value is already used in the memory instruction's 3079 // block. If so, it's already live into the block at the very least, so we 3080 // can reasonably fold it. 3081 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3082 } 3083 3084 /// It is possible for the addressing mode of the machine to fold the specified 3085 /// instruction into a load or store that ultimately uses it. 3086 /// However, the specified instruction has multiple uses. 3087 /// Given this, it may actually increase register pressure to fold it 3088 /// into the load. For example, consider this code: 3089 /// 3090 /// X = ... 3091 /// Y = X+1 3092 /// use(Y) -> nonload/store 3093 /// Z = Y+1 3094 /// load Z 3095 /// 3096 /// In this case, Y has multiple uses, and can be folded into the load of Z 3097 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 3098 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 3099 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 3100 /// number of computations either. 3101 /// 3102 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 3103 /// X was live across 'load Z' for other reasons, we actually *would* want to 3104 /// fold the addressing mode in the Z case. This would make Y die earlier. 3105 bool AddressingModeMatcher:: 3106 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 3107 ExtAddrMode &AMAfter) { 3108 if (IgnoreProfitability) return true; 3109 3110 // AMBefore is the addressing mode before this instruction was folded into it, 3111 // and AMAfter is the addressing mode after the instruction was folded. Get 3112 // the set of registers referenced by AMAfter and subtract out those 3113 // referenced by AMBefore: this is the set of values which folding in this 3114 // address extends the lifetime of. 3115 // 3116 // Note that there are only two potential values being referenced here, 3117 // BaseReg and ScaleReg (global addresses are always available, as are any 3118 // folded immediates). 3119 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 3120 3121 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 3122 // lifetime wasn't extended by adding this instruction. 3123 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3124 BaseReg = nullptr; 3125 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3126 ScaledReg = nullptr; 3127 3128 // If folding this instruction (and it's subexprs) didn't extend any live 3129 // ranges, we're ok with it. 3130 if (!BaseReg && !ScaledReg) 3131 return true; 3132 3133 // If all uses of this instruction are ultimately load/store/inlineasm's, 3134 // check to see if their addressing modes will include this instruction. If 3135 // so, we can fold it into all uses, so it doesn't matter if it has multiple 3136 // uses. 3137 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 3138 SmallPtrSet<Instruction*, 16> ConsideredInsts; 3139 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM)) 3140 return false; // Has a non-memory, non-foldable use! 3141 3142 // Now that we know that all uses of this instruction are part of a chain of 3143 // computation involving only operations that could theoretically be folded 3144 // into a memory use, loop over each of these uses and see if they could 3145 // *actually* fold the instruction. 3146 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 3147 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 3148 Instruction *User = MemoryUses[i].first; 3149 unsigned OpNo = MemoryUses[i].second; 3150 3151 // Get the access type of this use. If the use isn't a pointer, we don't 3152 // know what it accesses. 3153 Value *Address = User->getOperand(OpNo); 3154 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 3155 if (!AddrTy) 3156 return false; 3157 Type *AddressAccessTy = AddrTy->getElementType(); 3158 unsigned AS = AddrTy->getAddressSpace(); 3159 3160 // Do a match against the root of this address, ignoring profitability. This 3161 // will tell us if the addressing mode for the memory operation will 3162 // *actually* cover the shared instruction. 3163 ExtAddrMode Result; 3164 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3165 TPT.getRestorationPoint(); 3166 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS, 3167 MemoryInst, Result, InsertedInsts, 3168 PromotedInsts, TPT); 3169 Matcher.IgnoreProfitability = true; 3170 bool Success = Matcher.matchAddr(Address, 0); 3171 (void)Success; assert(Success && "Couldn't select *anything*?"); 3172 3173 // The match was to check the profitability, the changes made are not 3174 // part of the original matcher. Therefore, they should be dropped 3175 // otherwise the original matcher will not present the right state. 3176 TPT.rollback(LastKnownGood); 3177 3178 // If the match didn't cover I, then it won't be shared by it. 3179 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 3180 I) == MatchedAddrModeInsts.end()) 3181 return false; 3182 3183 MatchedAddrModeInsts.clear(); 3184 } 3185 3186 return true; 3187 } 3188 3189 } // end anonymous namespace 3190 3191 /// Return true if the specified values are defined in a 3192 /// different basic block than BB. 3193 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 3194 if (Instruction *I = dyn_cast<Instruction>(V)) 3195 return I->getParent() != BB; 3196 return false; 3197 } 3198 3199 /// Load and Store Instructions often have addressing modes that can do 3200 /// significant amounts of computation. As such, instruction selection will try 3201 /// to get the load or store to do as much computation as possible for the 3202 /// program. The problem is that isel can only see within a single block. As 3203 /// such, we sink as much legal addressing mode work into the block as possible. 3204 /// 3205 /// This method is used to optimize both load/store and inline asms with memory 3206 /// operands. 3207 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 3208 Type *AccessTy, unsigned AddrSpace) { 3209 Value *Repl = Addr; 3210 3211 // Try to collapse single-value PHI nodes. This is necessary to undo 3212 // unprofitable PRE transformations. 3213 SmallVector<Value*, 8> worklist; 3214 SmallPtrSet<Value*, 16> Visited; 3215 worklist.push_back(Addr); 3216 3217 // Use a worklist to iteratively look through PHI nodes, and ensure that 3218 // the addressing mode obtained from the non-PHI roots of the graph 3219 // are equivalent. 3220 Value *Consensus = nullptr; 3221 unsigned NumUsesConsensus = 0; 3222 bool IsNumUsesConsensusValid = false; 3223 SmallVector<Instruction*, 16> AddrModeInsts; 3224 ExtAddrMode AddrMode; 3225 TypePromotionTransaction TPT; 3226 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3227 TPT.getRestorationPoint(); 3228 while (!worklist.empty()) { 3229 Value *V = worklist.back(); 3230 worklist.pop_back(); 3231 3232 // Break use-def graph loops. 3233 if (!Visited.insert(V).second) { 3234 Consensus = nullptr; 3235 break; 3236 } 3237 3238 // For a PHI node, push all of its incoming values. 3239 if (PHINode *P = dyn_cast<PHINode>(V)) { 3240 for (Value *IncValue : P->incoming_values()) 3241 worklist.push_back(IncValue); 3242 continue; 3243 } 3244 3245 // For non-PHIs, determine the addressing mode being computed. 3246 SmallVector<Instruction*, 16> NewAddrModeInsts; 3247 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 3248 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM, 3249 InsertedInsts, PromotedInsts, TPT); 3250 3251 // This check is broken into two cases with very similar code to avoid using 3252 // getNumUses() as much as possible. Some values have a lot of uses, so 3253 // calling getNumUses() unconditionally caused a significant compile-time 3254 // regression. 3255 if (!Consensus) { 3256 Consensus = V; 3257 AddrMode = NewAddrMode; 3258 AddrModeInsts = NewAddrModeInsts; 3259 continue; 3260 } else if (NewAddrMode == AddrMode) { 3261 if (!IsNumUsesConsensusValid) { 3262 NumUsesConsensus = Consensus->getNumUses(); 3263 IsNumUsesConsensusValid = true; 3264 } 3265 3266 // Ensure that the obtained addressing mode is equivalent to that obtained 3267 // for all other roots of the PHI traversal. Also, when choosing one 3268 // such root as representative, select the one with the most uses in order 3269 // to keep the cost modeling heuristics in AddressingModeMatcher 3270 // applicable. 3271 unsigned NumUses = V->getNumUses(); 3272 if (NumUses > NumUsesConsensus) { 3273 Consensus = V; 3274 NumUsesConsensus = NumUses; 3275 AddrModeInsts = NewAddrModeInsts; 3276 } 3277 continue; 3278 } 3279 3280 Consensus = nullptr; 3281 break; 3282 } 3283 3284 // If the addressing mode couldn't be determined, or if multiple different 3285 // ones were determined, bail out now. 3286 if (!Consensus) { 3287 TPT.rollback(LastKnownGood); 3288 return false; 3289 } 3290 TPT.commit(); 3291 3292 // Check to see if any of the instructions supersumed by this addr mode are 3293 // non-local to I's BB. 3294 bool AnyNonLocal = false; 3295 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 3296 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 3297 AnyNonLocal = true; 3298 break; 3299 } 3300 } 3301 3302 // If all the instructions matched are already in this BB, don't do anything. 3303 if (!AnyNonLocal) { 3304 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 3305 return false; 3306 } 3307 3308 // Insert this computation right after this user. Since our caller is 3309 // scanning from the top of the BB to the bottom, reuse of the expr are 3310 // guaranteed to happen later. 3311 IRBuilder<> Builder(MemoryInst); 3312 3313 // Now that we determined the addressing expression we want to use and know 3314 // that we have to sink it into this block. Check to see if we have already 3315 // done this for some other load/store instr in this block. If so, reuse the 3316 // computation. 3317 Value *&SunkAddr = SunkAddrs[Addr]; 3318 if (SunkAddr) { 3319 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 3320 << *MemoryInst << "\n"); 3321 if (SunkAddr->getType() != Addr->getType()) 3322 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3323 } else if (AddrSinkUsingGEPs || 3324 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 3325 TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) 3326 ->useAA())) { 3327 // By default, we use the GEP-based method when AA is used later. This 3328 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 3329 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3330 << *MemoryInst << "\n"); 3331 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3332 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 3333 3334 // First, find the pointer. 3335 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 3336 ResultPtr = AddrMode.BaseReg; 3337 AddrMode.BaseReg = nullptr; 3338 } 3339 3340 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 3341 // We can't add more than one pointer together, nor can we scale a 3342 // pointer (both of which seem meaningless). 3343 if (ResultPtr || AddrMode.Scale != 1) 3344 return false; 3345 3346 ResultPtr = AddrMode.ScaledReg; 3347 AddrMode.Scale = 0; 3348 } 3349 3350 if (AddrMode.BaseGV) { 3351 if (ResultPtr) 3352 return false; 3353 3354 ResultPtr = AddrMode.BaseGV; 3355 } 3356 3357 // If the real base value actually came from an inttoptr, then the matcher 3358 // will look through it and provide only the integer value. In that case, 3359 // use it here. 3360 if (!ResultPtr && AddrMode.BaseReg) { 3361 ResultPtr = 3362 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 3363 AddrMode.BaseReg = nullptr; 3364 } else if (!ResultPtr && AddrMode.Scale == 1) { 3365 ResultPtr = 3366 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 3367 AddrMode.Scale = 0; 3368 } 3369 3370 if (!ResultPtr && 3371 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 3372 SunkAddr = Constant::getNullValue(Addr->getType()); 3373 } else if (!ResultPtr) { 3374 return false; 3375 } else { 3376 Type *I8PtrTy = 3377 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 3378 Type *I8Ty = Builder.getInt8Ty(); 3379 3380 // Start with the base register. Do this first so that subsequent address 3381 // matching finds it last, which will prevent it from trying to match it 3382 // as the scaled value in case it happens to be a mul. That would be 3383 // problematic if we've sunk a different mul for the scale, because then 3384 // we'd end up sinking both muls. 3385 if (AddrMode.BaseReg) { 3386 Value *V = AddrMode.BaseReg; 3387 if (V->getType() != IntPtrTy) 3388 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3389 3390 ResultIndex = V; 3391 } 3392 3393 // Add the scale value. 3394 if (AddrMode.Scale) { 3395 Value *V = AddrMode.ScaledReg; 3396 if (V->getType() == IntPtrTy) { 3397 // done. 3398 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3399 cast<IntegerType>(V->getType())->getBitWidth()) { 3400 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3401 } else { 3402 // It is only safe to sign extend the BaseReg if we know that the math 3403 // required to create it did not overflow before we extend it. Since 3404 // the original IR value was tossed in favor of a constant back when 3405 // the AddrMode was created we need to bail out gracefully if widths 3406 // do not match instead of extending it. 3407 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 3408 if (I && (ResultIndex != AddrMode.BaseReg)) 3409 I->eraseFromParent(); 3410 return false; 3411 } 3412 3413 if (AddrMode.Scale != 1) 3414 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3415 "sunkaddr"); 3416 if (ResultIndex) 3417 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 3418 else 3419 ResultIndex = V; 3420 } 3421 3422 // Add in the Base Offset if present. 3423 if (AddrMode.BaseOffs) { 3424 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3425 if (ResultIndex) { 3426 // We need to add this separately from the scale above to help with 3427 // SDAG consecutive load/store merging. 3428 if (ResultPtr->getType() != I8PtrTy) 3429 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3430 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3431 } 3432 3433 ResultIndex = V; 3434 } 3435 3436 if (!ResultIndex) { 3437 SunkAddr = ResultPtr; 3438 } else { 3439 if (ResultPtr->getType() != I8PtrTy) 3440 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3441 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3442 } 3443 3444 if (SunkAddr->getType() != Addr->getType()) 3445 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3446 } 3447 } else { 3448 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3449 << *MemoryInst << "\n"); 3450 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3451 Value *Result = nullptr; 3452 3453 // Start with the base register. Do this first so that subsequent address 3454 // matching finds it last, which will prevent it from trying to match it 3455 // as the scaled value in case it happens to be a mul. That would be 3456 // problematic if we've sunk a different mul for the scale, because then 3457 // we'd end up sinking both muls. 3458 if (AddrMode.BaseReg) { 3459 Value *V = AddrMode.BaseReg; 3460 if (V->getType()->isPointerTy()) 3461 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3462 if (V->getType() != IntPtrTy) 3463 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3464 Result = V; 3465 } 3466 3467 // Add the scale value. 3468 if (AddrMode.Scale) { 3469 Value *V = AddrMode.ScaledReg; 3470 if (V->getType() == IntPtrTy) { 3471 // done. 3472 } else if (V->getType()->isPointerTy()) { 3473 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3474 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3475 cast<IntegerType>(V->getType())->getBitWidth()) { 3476 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3477 } else { 3478 // It is only safe to sign extend the BaseReg if we know that the math 3479 // required to create it did not overflow before we extend it. Since 3480 // the original IR value was tossed in favor of a constant back when 3481 // the AddrMode was created we need to bail out gracefully if widths 3482 // do not match instead of extending it. 3483 Instruction *I = dyn_cast_or_null<Instruction>(Result); 3484 if (I && (Result != AddrMode.BaseReg)) 3485 I->eraseFromParent(); 3486 return false; 3487 } 3488 if (AddrMode.Scale != 1) 3489 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3490 "sunkaddr"); 3491 if (Result) 3492 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3493 else 3494 Result = V; 3495 } 3496 3497 // Add in the BaseGV if present. 3498 if (AddrMode.BaseGV) { 3499 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 3500 if (Result) 3501 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3502 else 3503 Result = V; 3504 } 3505 3506 // Add in the Base Offset if present. 3507 if (AddrMode.BaseOffs) { 3508 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3509 if (Result) 3510 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3511 else 3512 Result = V; 3513 } 3514 3515 if (!Result) 3516 SunkAddr = Constant::getNullValue(Addr->getType()); 3517 else 3518 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 3519 } 3520 3521 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 3522 3523 // If we have no uses, recursively delete the value and all dead instructions 3524 // using it. 3525 if (Repl->use_empty()) { 3526 // This can cause recursive deletion, which can invalidate our iterator. 3527 // Use a WeakVH to hold onto it in case this happens. 3528 WeakVH IterHandle(&*CurInstIterator); 3529 BasicBlock *BB = CurInstIterator->getParent(); 3530 3531 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 3532 3533 if (IterHandle != CurInstIterator.getNodePtrUnchecked()) { 3534 // If the iterator instruction was recursively deleted, start over at the 3535 // start of the block. 3536 CurInstIterator = BB->begin(); 3537 SunkAddrs.clear(); 3538 } 3539 } 3540 ++NumMemoryInsts; 3541 return true; 3542 } 3543 3544 /// If there are any memory operands, use OptimizeMemoryInst to sink their 3545 /// address computing into the block when possible / profitable. 3546 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 3547 bool MadeChange = false; 3548 3549 const TargetRegisterInfo *TRI = 3550 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 3551 TargetLowering::AsmOperandInfoVector TargetConstraints = 3552 TLI->ParseConstraints(*DL, TRI, CS); 3553 unsigned ArgNo = 0; 3554 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3555 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3556 3557 // Compute the constraint code and ConstraintType to use. 3558 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3559 3560 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 3561 OpInfo.isIndirect) { 3562 Value *OpVal = CS->getArgOperand(ArgNo++); 3563 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 3564 } else if (OpInfo.Type == InlineAsm::isInput) 3565 ArgNo++; 3566 } 3567 3568 return MadeChange; 3569 } 3570 3571 /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or 3572 /// sign extensions. 3573 static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { 3574 assert(!Inst->use_empty() && "Input must have at least one use"); 3575 const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin()); 3576 bool IsSExt = isa<SExtInst>(FirstUser); 3577 Type *ExtTy = FirstUser->getType(); 3578 for (const User *U : Inst->users()) { 3579 const Instruction *UI = cast<Instruction>(U); 3580 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 3581 return false; 3582 Type *CurTy = UI->getType(); 3583 // Same input and output types: Same instruction after CSE. 3584 if (CurTy == ExtTy) 3585 continue; 3586 3587 // If IsSExt is true, we are in this situation: 3588 // a = Inst 3589 // b = sext ty1 a to ty2 3590 // c = sext ty1 a to ty3 3591 // Assuming ty2 is shorter than ty3, this could be turned into: 3592 // a = Inst 3593 // b = sext ty1 a to ty2 3594 // c = sext ty2 b to ty3 3595 // However, the last sext is not free. 3596 if (IsSExt) 3597 return false; 3598 3599 // This is a ZExt, maybe this is free to extend from one type to another. 3600 // In that case, we would not account for a different use. 3601 Type *NarrowTy; 3602 Type *LargeTy; 3603 if (ExtTy->getScalarType()->getIntegerBitWidth() > 3604 CurTy->getScalarType()->getIntegerBitWidth()) { 3605 NarrowTy = CurTy; 3606 LargeTy = ExtTy; 3607 } else { 3608 NarrowTy = ExtTy; 3609 LargeTy = CurTy; 3610 } 3611 3612 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 3613 return false; 3614 } 3615 // All uses are the same or can be derived from one another for free. 3616 return true; 3617 } 3618 3619 /// \brief Try to form ExtLd by promoting \p Exts until they reach a 3620 /// load instruction. 3621 /// If an ext(load) can be formed, it is returned via \p LI for the load 3622 /// and \p Inst for the extension. 3623 /// Otherwise LI == nullptr and Inst == nullptr. 3624 /// When some promotion happened, \p TPT contains the proper state to 3625 /// revert them. 3626 /// 3627 /// \return true when promoting was necessary to expose the ext(load) 3628 /// opportunity, false otherwise. 3629 /// 3630 /// Example: 3631 /// \code 3632 /// %ld = load i32* %addr 3633 /// %add = add nuw i32 %ld, 4 3634 /// %zext = zext i32 %add to i64 3635 /// \endcode 3636 /// => 3637 /// \code 3638 /// %ld = load i32* %addr 3639 /// %zext = zext i32 %ld to i64 3640 /// %add = add nuw i64 %zext, 4 3641 /// \encode 3642 /// Thanks to the promotion, we can match zext(load i32*) to i64. 3643 bool CodeGenPrepare::extLdPromotion(TypePromotionTransaction &TPT, 3644 LoadInst *&LI, Instruction *&Inst, 3645 const SmallVectorImpl<Instruction *> &Exts, 3646 unsigned CreatedInstsCost = 0) { 3647 // Iterate over all the extensions to see if one form an ext(load). 3648 for (auto I : Exts) { 3649 // Check if we directly have ext(load). 3650 if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) { 3651 Inst = I; 3652 // No promotion happened here. 3653 return false; 3654 } 3655 // Check whether or not we want to do any promotion. 3656 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 3657 continue; 3658 // Get the action to perform the promotion. 3659 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 3660 I, InsertedInsts, *TLI, PromotedInsts); 3661 // Check if we can promote. 3662 if (!TPH) 3663 continue; 3664 // Save the current state. 3665 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3666 TPT.getRestorationPoint(); 3667 SmallVector<Instruction *, 4> NewExts; 3668 unsigned NewCreatedInstsCost = 0; 3669 unsigned ExtCost = !TLI->isExtFree(I); 3670 // Promote. 3671 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 3672 &NewExts, nullptr, *TLI); 3673 assert(PromotedVal && 3674 "TypePromotionHelper should have filtered out those cases"); 3675 3676 // We would be able to merge only one extension in a load. 3677 // Therefore, if we have more than 1 new extension we heuristically 3678 // cut this search path, because it means we degrade the code quality. 3679 // With exactly 2, the transformation is neutral, because we will merge 3680 // one extension but leave one. However, we optimistically keep going, 3681 // because the new extension may be removed too. 3682 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 3683 TotalCreatedInstsCost -= ExtCost; 3684 if (!StressExtLdPromotion && 3685 (TotalCreatedInstsCost > 1 || 3686 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 3687 // The promotion is not profitable, rollback to the previous state. 3688 TPT.rollback(LastKnownGood); 3689 continue; 3690 } 3691 // The promotion is profitable. 3692 // Check if it exposes an ext(load). 3693 (void)extLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost); 3694 if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 3695 // If we have created a new extension, i.e., now we have two 3696 // extensions. We must make sure one of them is merged with 3697 // the load, otherwise we may degrade the code quality. 3698 (LI->hasOneUse() || hasSameExtUse(LI, *TLI)))) 3699 // Promotion happened. 3700 return true; 3701 // If this does not help to expose an ext(load) then, rollback. 3702 TPT.rollback(LastKnownGood); 3703 } 3704 // None of the extension can form an ext(load). 3705 LI = nullptr; 3706 Inst = nullptr; 3707 return false; 3708 } 3709 3710 /// Move a zext or sext fed by a load into the same basic block as the load, 3711 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 3712 /// extend into the load. 3713 /// \p I[in/out] the extension may be modified during the process if some 3714 /// promotions apply. 3715 /// 3716 bool CodeGenPrepare::moveExtToFormExtLoad(Instruction *&I) { 3717 // Try to promote a chain of computation if it allows to form 3718 // an extended load. 3719 TypePromotionTransaction TPT; 3720 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3721 TPT.getRestorationPoint(); 3722 SmallVector<Instruction *, 1> Exts; 3723 Exts.push_back(I); 3724 // Look for a load being extended. 3725 LoadInst *LI = nullptr; 3726 Instruction *OldExt = I; 3727 bool HasPromoted = extLdPromotion(TPT, LI, I, Exts); 3728 if (!LI || !I) { 3729 assert(!HasPromoted && !LI && "If we did not match any load instruction " 3730 "the code must remain the same"); 3731 I = OldExt; 3732 return false; 3733 } 3734 3735 // If they're already in the same block, there's nothing to do. 3736 // Make the cheap checks first if we did not promote. 3737 // If we promoted, we need to check if it is indeed profitable. 3738 if (!HasPromoted && LI->getParent() == I->getParent()) 3739 return false; 3740 3741 EVT VT = TLI->getValueType(*DL, I->getType()); 3742 EVT LoadVT = TLI->getValueType(*DL, LI->getType()); 3743 3744 // If the load has other users and the truncate is not free, this probably 3745 // isn't worthwhile. 3746 if (!LI->hasOneUse() && TLI && 3747 (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 3748 !TLI->isTruncateFree(I->getType(), LI->getType())) { 3749 I = OldExt; 3750 TPT.rollback(LastKnownGood); 3751 return false; 3752 } 3753 3754 // Check whether the target supports casts folded into loads. 3755 unsigned LType; 3756 if (isa<ZExtInst>(I)) 3757 LType = ISD::ZEXTLOAD; 3758 else { 3759 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 3760 LType = ISD::SEXTLOAD; 3761 } 3762 if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) { 3763 I = OldExt; 3764 TPT.rollback(LastKnownGood); 3765 return false; 3766 } 3767 3768 // Move the extend into the same block as the load, so that SelectionDAG 3769 // can fold it. 3770 TPT.commit(); 3771 I->removeFromParent(); 3772 I->insertAfter(LI); 3773 ++NumExtsMoved; 3774 return true; 3775 } 3776 3777 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 3778 BasicBlock *DefBB = I->getParent(); 3779 3780 // If the result of a {s|z}ext and its source are both live out, rewrite all 3781 // other uses of the source with result of extension. 3782 Value *Src = I->getOperand(0); 3783 if (Src->hasOneUse()) 3784 return false; 3785 3786 // Only do this xform if truncating is free. 3787 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 3788 return false; 3789 3790 // Only safe to perform the optimization if the source is also defined in 3791 // this block. 3792 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 3793 return false; 3794 3795 bool DefIsLiveOut = false; 3796 for (User *U : I->users()) { 3797 Instruction *UI = cast<Instruction>(U); 3798 3799 // Figure out which BB this ext is used in. 3800 BasicBlock *UserBB = UI->getParent(); 3801 if (UserBB == DefBB) continue; 3802 DefIsLiveOut = true; 3803 break; 3804 } 3805 if (!DefIsLiveOut) 3806 return false; 3807 3808 // Make sure none of the uses are PHI nodes. 3809 for (User *U : Src->users()) { 3810 Instruction *UI = cast<Instruction>(U); 3811 BasicBlock *UserBB = UI->getParent(); 3812 if (UserBB == DefBB) continue; 3813 // Be conservative. We don't want this xform to end up introducing 3814 // reloads just before load / store instructions. 3815 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 3816 return false; 3817 } 3818 3819 // InsertedTruncs - Only insert one trunc in each block once. 3820 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 3821 3822 bool MadeChange = false; 3823 for (Use &U : Src->uses()) { 3824 Instruction *User = cast<Instruction>(U.getUser()); 3825 3826 // Figure out which BB this ext is used in. 3827 BasicBlock *UserBB = User->getParent(); 3828 if (UserBB == DefBB) continue; 3829 3830 // Both src and def are live in this block. Rewrite the use. 3831 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 3832 3833 if (!InsertedTrunc) { 3834 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3835 assert(InsertPt != UserBB->end()); 3836 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 3837 InsertedInsts.insert(InsertedTrunc); 3838 } 3839 3840 // Replace a use of the {s|z}ext source with a use of the result. 3841 U = InsertedTrunc; 3842 ++NumExtUses; 3843 MadeChange = true; 3844 } 3845 3846 return MadeChange; 3847 } 3848 3849 /// Returns true if a SelectInst should be turned into an explicit branch. 3850 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { 3851 // FIXME: This should use the same heuristics as IfConversion to determine 3852 // whether a select is better represented as a branch. This requires that 3853 // branch probability metadata is preserved for the select, which is not the 3854 // case currently. 3855 3856 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 3857 3858 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 3859 // comparison condition. If the compare has more than one use, there's 3860 // probably another cmov or setcc around, so it's not worth emitting a branch. 3861 if (!Cmp || !Cmp->hasOneUse()) 3862 return false; 3863 3864 Value *CmpOp0 = Cmp->getOperand(0); 3865 Value *CmpOp1 = Cmp->getOperand(1); 3866 3867 // Emit "cmov on compare with a memory operand" as a branch to avoid stalls 3868 // on a load from memory. But if the load is used more than once, do not 3869 // change the select to a branch because the load is probably needed 3870 // regardless of whether the branch is taken or not. 3871 return ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || 3872 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); 3873 } 3874 3875 3876 /// If we have a SelectInst that will likely profit from branch prediction, 3877 /// turn it into a branch. 3878 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 3879 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 3880 3881 // Can we convert the 'select' to CF ? 3882 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 3883 return false; 3884 3885 TargetLowering::SelectSupportKind SelectKind; 3886 if (VectorCond) 3887 SelectKind = TargetLowering::VectorMaskSelect; 3888 else if (SI->getType()->isVectorTy()) 3889 SelectKind = TargetLowering::ScalarCondVectorVal; 3890 else 3891 SelectKind = TargetLowering::ScalarValSelect; 3892 3893 // Do we have efficient codegen support for this kind of 'selects' ? 3894 if (TLI->isSelectSupported(SelectKind)) { 3895 // We have efficient codegen support for the select instruction. 3896 // Check if it is profitable to keep this 'select'. 3897 if (!TLI->isPredictableSelectExpensive() || 3898 !isFormingBranchFromSelectProfitable(SI)) 3899 return false; 3900 } 3901 3902 ModifiedDT = true; 3903 3904 // First, we split the block containing the select into 2 blocks. 3905 BasicBlock *StartBlock = SI->getParent(); 3906 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 3907 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 3908 3909 // Create a new block serving as the landing pad for the branch. 3910 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", 3911 NextBlock->getParent(), NextBlock); 3912 3913 // Move the unconditional branch from the block with the select in it into our 3914 // landing pad block. 3915 StartBlock->getTerminator()->eraseFromParent(); 3916 BranchInst::Create(NextBlock, SmallBlock); 3917 3918 // Insert the real conditional branch based on the original condition. 3919 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); 3920 3921 // The select itself is replaced with a PHI Node. 3922 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &NextBlock->front()); 3923 PN->takeName(SI); 3924 PN->addIncoming(SI->getTrueValue(), StartBlock); 3925 PN->addIncoming(SI->getFalseValue(), SmallBlock); 3926 SI->replaceAllUsesWith(PN); 3927 SI->eraseFromParent(); 3928 3929 // Instruct OptimizeBlock to skip to the next block. 3930 CurInstIterator = StartBlock->end(); 3931 ++NumSelectsExpanded; 3932 return true; 3933 } 3934 3935 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 3936 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 3937 int SplatElem = -1; 3938 for (unsigned i = 0; i < Mask.size(); ++i) { 3939 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 3940 return false; 3941 SplatElem = Mask[i]; 3942 } 3943 3944 return true; 3945 } 3946 3947 /// Some targets have expensive vector shifts if the lanes aren't all the same 3948 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 3949 /// it's often worth sinking a shufflevector splat down to its use so that 3950 /// codegen can spot all lanes are identical. 3951 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 3952 BasicBlock *DefBB = SVI->getParent(); 3953 3954 // Only do this xform if variable vector shifts are particularly expensive. 3955 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 3956 return false; 3957 3958 // We only expect better codegen by sinking a shuffle if we can recognise a 3959 // constant splat. 3960 if (!isBroadcastShuffle(SVI)) 3961 return false; 3962 3963 // InsertedShuffles - Only insert a shuffle in each block once. 3964 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 3965 3966 bool MadeChange = false; 3967 for (User *U : SVI->users()) { 3968 Instruction *UI = cast<Instruction>(U); 3969 3970 // Figure out which BB this ext is used in. 3971 BasicBlock *UserBB = UI->getParent(); 3972 if (UserBB == DefBB) continue; 3973 3974 // For now only apply this when the splat is used by a shift instruction. 3975 if (!UI->isShift()) continue; 3976 3977 // Everything checks out, sink the shuffle if the user's block doesn't 3978 // already have a copy. 3979 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 3980 3981 if (!InsertedShuffle) { 3982 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 3983 assert(InsertPt != UserBB->end()); 3984 InsertedShuffle = 3985 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 3986 SVI->getOperand(2), "", &*InsertPt); 3987 } 3988 3989 UI->replaceUsesOfWith(SVI, InsertedShuffle); 3990 MadeChange = true; 3991 } 3992 3993 // If we removed all uses, nuke the shuffle. 3994 if (SVI->use_empty()) { 3995 SVI->eraseFromParent(); 3996 MadeChange = true; 3997 } 3998 3999 return MadeChange; 4000 } 4001 4002 namespace { 4003 /// \brief Helper class to promote a scalar operation to a vector one. 4004 /// This class is used to move downward extractelement transition. 4005 /// E.g., 4006 /// a = vector_op <2 x i32> 4007 /// b = extractelement <2 x i32> a, i32 0 4008 /// c = scalar_op b 4009 /// store c 4010 /// 4011 /// => 4012 /// a = vector_op <2 x i32> 4013 /// c = vector_op a (equivalent to scalar_op on the related lane) 4014 /// * d = extractelement <2 x i32> c, i32 0 4015 /// * store d 4016 /// Assuming both extractelement and store can be combine, we get rid of the 4017 /// transition. 4018 class VectorPromoteHelper { 4019 /// DataLayout associated with the current module. 4020 const DataLayout &DL; 4021 4022 /// Used to perform some checks on the legality of vector operations. 4023 const TargetLowering &TLI; 4024 4025 /// Used to estimated the cost of the promoted chain. 4026 const TargetTransformInfo &TTI; 4027 4028 /// The transition being moved downwards. 4029 Instruction *Transition; 4030 /// The sequence of instructions to be promoted. 4031 SmallVector<Instruction *, 4> InstsToBePromoted; 4032 /// Cost of combining a store and an extract. 4033 unsigned StoreExtractCombineCost; 4034 /// Instruction that will be combined with the transition. 4035 Instruction *CombineInst; 4036 4037 /// \brief The instruction that represents the current end of the transition. 4038 /// Since we are faking the promotion until we reach the end of the chain 4039 /// of computation, we need a way to get the current end of the transition. 4040 Instruction *getEndOfTransition() const { 4041 if (InstsToBePromoted.empty()) 4042 return Transition; 4043 return InstsToBePromoted.back(); 4044 } 4045 4046 /// \brief Return the index of the original value in the transition. 4047 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 4048 /// c, is at index 0. 4049 unsigned getTransitionOriginalValueIdx() const { 4050 assert(isa<ExtractElementInst>(Transition) && 4051 "Other kind of transitions are not supported yet"); 4052 return 0; 4053 } 4054 4055 /// \brief Return the index of the index in the transition. 4056 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 4057 /// is at index 1. 4058 unsigned getTransitionIdx() const { 4059 assert(isa<ExtractElementInst>(Transition) && 4060 "Other kind of transitions are not supported yet"); 4061 return 1; 4062 } 4063 4064 /// \brief Get the type of the transition. 4065 /// This is the type of the original value. 4066 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 4067 /// transition is <2 x i32>. 4068 Type *getTransitionType() const { 4069 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 4070 } 4071 4072 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 4073 /// I.e., we have the following sequence: 4074 /// Def = Transition <ty1> a to <ty2> 4075 /// b = ToBePromoted <ty2> Def, ... 4076 /// => 4077 /// b = ToBePromoted <ty1> a, ... 4078 /// Def = Transition <ty1> ToBePromoted to <ty2> 4079 void promoteImpl(Instruction *ToBePromoted); 4080 4081 /// \brief Check whether or not it is profitable to promote all the 4082 /// instructions enqueued to be promoted. 4083 bool isProfitableToPromote() { 4084 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 4085 unsigned Index = isa<ConstantInt>(ValIdx) 4086 ? cast<ConstantInt>(ValIdx)->getZExtValue() 4087 : -1; 4088 Type *PromotedType = getTransitionType(); 4089 4090 StoreInst *ST = cast<StoreInst>(CombineInst); 4091 unsigned AS = ST->getPointerAddressSpace(); 4092 unsigned Align = ST->getAlignment(); 4093 // Check if this store is supported. 4094 if (!TLI.allowsMisalignedMemoryAccesses( 4095 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 4096 Align)) { 4097 // If this is not supported, there is no way we can combine 4098 // the extract with the store. 4099 return false; 4100 } 4101 4102 // The scalar chain of computation has to pay for the transition 4103 // scalar to vector. 4104 // The vector chain has to account for the combining cost. 4105 uint64_t ScalarCost = 4106 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 4107 uint64_t VectorCost = StoreExtractCombineCost; 4108 for (const auto &Inst : InstsToBePromoted) { 4109 // Compute the cost. 4110 // By construction, all instructions being promoted are arithmetic ones. 4111 // Moreover, one argument is a constant that can be viewed as a splat 4112 // constant. 4113 Value *Arg0 = Inst->getOperand(0); 4114 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 4115 isa<ConstantFP>(Arg0); 4116 TargetTransformInfo::OperandValueKind Arg0OVK = 4117 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4118 : TargetTransformInfo::OK_AnyValue; 4119 TargetTransformInfo::OperandValueKind Arg1OVK = 4120 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4121 : TargetTransformInfo::OK_AnyValue; 4122 ScalarCost += TTI.getArithmeticInstrCost( 4123 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 4124 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 4125 Arg0OVK, Arg1OVK); 4126 } 4127 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 4128 << ScalarCost << "\nVector: " << VectorCost << '\n'); 4129 return ScalarCost > VectorCost; 4130 } 4131 4132 /// \brief Generate a constant vector with \p Val with the same 4133 /// number of elements as the transition. 4134 /// \p UseSplat defines whether or not \p Val should be replicated 4135 /// across the whole vector. 4136 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 4137 /// otherwise we generate a vector with as many undef as possible: 4138 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 4139 /// used at the index of the extract. 4140 Value *getConstantVector(Constant *Val, bool UseSplat) const { 4141 unsigned ExtractIdx = UINT_MAX; 4142 if (!UseSplat) { 4143 // If we cannot determine where the constant must be, we have to 4144 // use a splat constant. 4145 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 4146 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 4147 ExtractIdx = CstVal->getSExtValue(); 4148 else 4149 UseSplat = true; 4150 } 4151 4152 unsigned End = getTransitionType()->getVectorNumElements(); 4153 if (UseSplat) 4154 return ConstantVector::getSplat(End, Val); 4155 4156 SmallVector<Constant *, 4> ConstVec; 4157 UndefValue *UndefVal = UndefValue::get(Val->getType()); 4158 for (unsigned Idx = 0; Idx != End; ++Idx) { 4159 if (Idx == ExtractIdx) 4160 ConstVec.push_back(Val); 4161 else 4162 ConstVec.push_back(UndefVal); 4163 } 4164 return ConstantVector::get(ConstVec); 4165 } 4166 4167 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 4168 /// in \p Use can trigger undefined behavior. 4169 static bool canCauseUndefinedBehavior(const Instruction *Use, 4170 unsigned OperandIdx) { 4171 // This is not safe to introduce undef when the operand is on 4172 // the right hand side of a division-like instruction. 4173 if (OperandIdx != 1) 4174 return false; 4175 switch (Use->getOpcode()) { 4176 default: 4177 return false; 4178 case Instruction::SDiv: 4179 case Instruction::UDiv: 4180 case Instruction::SRem: 4181 case Instruction::URem: 4182 return true; 4183 case Instruction::FDiv: 4184 case Instruction::FRem: 4185 return !Use->hasNoNaNs(); 4186 } 4187 llvm_unreachable(nullptr); 4188 } 4189 4190 public: 4191 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 4192 const TargetTransformInfo &TTI, Instruction *Transition, 4193 unsigned CombineCost) 4194 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 4195 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 4196 assert(Transition && "Do not know how to promote null"); 4197 } 4198 4199 /// \brief Check if we can promote \p ToBePromoted to \p Type. 4200 bool canPromote(const Instruction *ToBePromoted) const { 4201 // We could support CastInst too. 4202 return isa<BinaryOperator>(ToBePromoted); 4203 } 4204 4205 /// \brief Check if it is profitable to promote \p ToBePromoted 4206 /// by moving downward the transition through. 4207 bool shouldPromote(const Instruction *ToBePromoted) const { 4208 // Promote only if all the operands can be statically expanded. 4209 // Indeed, we do not want to introduce any new kind of transitions. 4210 for (const Use &U : ToBePromoted->operands()) { 4211 const Value *Val = U.get(); 4212 if (Val == getEndOfTransition()) { 4213 // If the use is a division and the transition is on the rhs, 4214 // we cannot promote the operation, otherwise we may create a 4215 // division by zero. 4216 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 4217 return false; 4218 continue; 4219 } 4220 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 4221 !isa<ConstantFP>(Val)) 4222 return false; 4223 } 4224 // Check that the resulting operation is legal. 4225 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 4226 if (!ISDOpcode) 4227 return false; 4228 return StressStoreExtract || 4229 TLI.isOperationLegalOrCustom( 4230 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 4231 } 4232 4233 /// \brief Check whether or not \p Use can be combined 4234 /// with the transition. 4235 /// I.e., is it possible to do Use(Transition) => AnotherUse? 4236 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 4237 4238 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 4239 void enqueueForPromotion(Instruction *ToBePromoted) { 4240 InstsToBePromoted.push_back(ToBePromoted); 4241 } 4242 4243 /// \brief Set the instruction that will be combined with the transition. 4244 void recordCombineInstruction(Instruction *ToBeCombined) { 4245 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 4246 CombineInst = ToBeCombined; 4247 } 4248 4249 /// \brief Promote all the instructions enqueued for promotion if it is 4250 /// is profitable. 4251 /// \return True if the promotion happened, false otherwise. 4252 bool promote() { 4253 // Check if there is something to promote. 4254 // Right now, if we do not have anything to combine with, 4255 // we assume the promotion is not profitable. 4256 if (InstsToBePromoted.empty() || !CombineInst) 4257 return false; 4258 4259 // Check cost. 4260 if (!StressStoreExtract && !isProfitableToPromote()) 4261 return false; 4262 4263 // Promote. 4264 for (auto &ToBePromoted : InstsToBePromoted) 4265 promoteImpl(ToBePromoted); 4266 InstsToBePromoted.clear(); 4267 return true; 4268 } 4269 }; 4270 } // End of anonymous namespace. 4271 4272 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 4273 // At this point, we know that all the operands of ToBePromoted but Def 4274 // can be statically promoted. 4275 // For Def, we need to use its parameter in ToBePromoted: 4276 // b = ToBePromoted ty1 a 4277 // Def = Transition ty1 b to ty2 4278 // Move the transition down. 4279 // 1. Replace all uses of the promoted operation by the transition. 4280 // = ... b => = ... Def. 4281 assert(ToBePromoted->getType() == Transition->getType() && 4282 "The type of the result of the transition does not match " 4283 "the final type"); 4284 ToBePromoted->replaceAllUsesWith(Transition); 4285 // 2. Update the type of the uses. 4286 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 4287 Type *TransitionTy = getTransitionType(); 4288 ToBePromoted->mutateType(TransitionTy); 4289 // 3. Update all the operands of the promoted operation with promoted 4290 // operands. 4291 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 4292 for (Use &U : ToBePromoted->operands()) { 4293 Value *Val = U.get(); 4294 Value *NewVal = nullptr; 4295 if (Val == Transition) 4296 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 4297 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 4298 isa<ConstantFP>(Val)) { 4299 // Use a splat constant if it is not safe to use undef. 4300 NewVal = getConstantVector( 4301 cast<Constant>(Val), 4302 isa<UndefValue>(Val) || 4303 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 4304 } else 4305 llvm_unreachable("Did you modified shouldPromote and forgot to update " 4306 "this?"); 4307 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 4308 } 4309 Transition->removeFromParent(); 4310 Transition->insertAfter(ToBePromoted); 4311 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 4312 } 4313 4314 /// Some targets can do store(extractelement) with one instruction. 4315 /// Try to push the extractelement towards the stores when the target 4316 /// has this feature and this is profitable. 4317 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 4318 unsigned CombineCost = UINT_MAX; 4319 if (DisableStoreExtract || !TLI || 4320 (!StressStoreExtract && 4321 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 4322 Inst->getOperand(1), CombineCost))) 4323 return false; 4324 4325 // At this point we know that Inst is a vector to scalar transition. 4326 // Try to move it down the def-use chain, until: 4327 // - We can combine the transition with its single use 4328 // => we got rid of the transition. 4329 // - We escape the current basic block 4330 // => we would need to check that we are moving it at a cheaper place and 4331 // we do not do that for now. 4332 BasicBlock *Parent = Inst->getParent(); 4333 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 4334 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 4335 // If the transition has more than one use, assume this is not going to be 4336 // beneficial. 4337 while (Inst->hasOneUse()) { 4338 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 4339 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 4340 4341 if (ToBePromoted->getParent() != Parent) { 4342 DEBUG(dbgs() << "Instruction to promote is in a different block (" 4343 << ToBePromoted->getParent()->getName() 4344 << ") than the transition (" << Parent->getName() << ").\n"); 4345 return false; 4346 } 4347 4348 if (VPH.canCombine(ToBePromoted)) { 4349 DEBUG(dbgs() << "Assume " << *Inst << '\n' 4350 << "will be combined with: " << *ToBePromoted << '\n'); 4351 VPH.recordCombineInstruction(ToBePromoted); 4352 bool Changed = VPH.promote(); 4353 NumStoreExtractExposed += Changed; 4354 return Changed; 4355 } 4356 4357 DEBUG(dbgs() << "Try promoting.\n"); 4358 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 4359 return false; 4360 4361 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 4362 4363 VPH.enqueueForPromotion(ToBePromoted); 4364 Inst = ToBePromoted; 4365 } 4366 return false; 4367 } 4368 4369 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { 4370 // Bail out if we inserted the instruction to prevent optimizations from 4371 // stepping on each other's toes. 4372 if (InsertedInsts.count(I)) 4373 return false; 4374 4375 if (PHINode *P = dyn_cast<PHINode>(I)) { 4376 // It is possible for very late stage optimizations (such as SimplifyCFG) 4377 // to introduce PHI nodes too late to be cleaned up. If we detect such a 4378 // trivial PHI, go ahead and zap it here. 4379 if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) { 4380 P->replaceAllUsesWith(V); 4381 P->eraseFromParent(); 4382 ++NumPHIsElim; 4383 return true; 4384 } 4385 return false; 4386 } 4387 4388 if (CastInst *CI = dyn_cast<CastInst>(I)) { 4389 // If the source of the cast is a constant, then this should have 4390 // already been constant folded. The only reason NOT to constant fold 4391 // it is if something (e.g. LSR) was careful to place the constant 4392 // evaluation in a block other than then one that uses it (e.g. to hoist 4393 // the address of globals out of a loop). If this is the case, we don't 4394 // want to forward-subst the cast. 4395 if (isa<Constant>(CI->getOperand(0))) 4396 return false; 4397 4398 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 4399 return true; 4400 4401 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 4402 /// Sink a zext or sext into its user blocks if the target type doesn't 4403 /// fit in one register 4404 if (TLI && 4405 TLI->getTypeAction(CI->getContext(), 4406 TLI->getValueType(*DL, CI->getType())) == 4407 TargetLowering::TypeExpandInteger) { 4408 return SinkCast(CI); 4409 } else { 4410 bool MadeChange = moveExtToFormExtLoad(I); 4411 return MadeChange | optimizeExtUses(I); 4412 } 4413 } 4414 return false; 4415 } 4416 4417 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 4418 if (!TLI || !TLI->hasMultipleConditionRegisters()) 4419 return OptimizeCmpExpression(CI); 4420 4421 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 4422 stripInvariantGroupMetadata(*LI); 4423 if (TLI) { 4424 unsigned AS = LI->getPointerAddressSpace(); 4425 return optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 4426 } 4427 return false; 4428 } 4429 4430 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 4431 stripInvariantGroupMetadata(*SI); 4432 if (TLI) { 4433 unsigned AS = SI->getPointerAddressSpace(); 4434 return optimizeMemoryInst(I, SI->getOperand(1), 4435 SI->getOperand(0)->getType(), AS); 4436 } 4437 return false; 4438 } 4439 4440 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 4441 4442 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 4443 BinOp->getOpcode() == Instruction::LShr)) { 4444 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 4445 if (TLI && CI && TLI->hasExtractBitsInsn()) 4446 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 4447 4448 return false; 4449 } 4450 4451 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 4452 if (GEPI->hasAllZeroIndices()) { 4453 /// The GEP operand must be a pointer, so must its result -> BitCast 4454 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 4455 GEPI->getName(), GEPI); 4456 GEPI->replaceAllUsesWith(NC); 4457 GEPI->eraseFromParent(); 4458 ++NumGEPsElim; 4459 optimizeInst(NC, ModifiedDT); 4460 return true; 4461 } 4462 return false; 4463 } 4464 4465 if (CallInst *CI = dyn_cast<CallInst>(I)) 4466 return optimizeCallInst(CI, ModifiedDT); 4467 4468 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 4469 return optimizeSelectInst(SI); 4470 4471 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 4472 return optimizeShuffleVectorInst(SVI); 4473 4474 if (isa<ExtractElementInst>(I)) 4475 return optimizeExtractElementInst(I); 4476 4477 return false; 4478 } 4479 4480 // In this pass we look for GEP and cast instructions that are used 4481 // across basic blocks and rewrite them to improve basic-block-at-a-time 4482 // selection. 4483 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 4484 SunkAddrs.clear(); 4485 bool MadeChange = false; 4486 4487 CurInstIterator = BB.begin(); 4488 while (CurInstIterator != BB.end()) { 4489 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 4490 if (ModifiedDT) 4491 return true; 4492 } 4493 MadeChange |= dupRetToEnableTailCallOpts(&BB); 4494 4495 return MadeChange; 4496 } 4497 4498 // llvm.dbg.value is far away from the value then iSel may not be able 4499 // handle it properly. iSel will drop llvm.dbg.value if it can not 4500 // find a node corresponding to the value. 4501 bool CodeGenPrepare::placeDbgValues(Function &F) { 4502 bool MadeChange = false; 4503 for (BasicBlock &BB : F) { 4504 Instruction *PrevNonDbgInst = nullptr; 4505 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 4506 Instruction *Insn = &*BI++; 4507 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 4508 // Leave dbg.values that refer to an alloca alone. These 4509 // instrinsics describe the address of a variable (= the alloca) 4510 // being taken. They should not be moved next to the alloca 4511 // (and to the beginning of the scope), but rather stay close to 4512 // where said address is used. 4513 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 4514 PrevNonDbgInst = Insn; 4515 continue; 4516 } 4517 4518 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 4519 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 4520 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 4521 DVI->removeFromParent(); 4522 if (isa<PHINode>(VI)) 4523 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 4524 else 4525 DVI->insertAfter(VI); 4526 MadeChange = true; 4527 ++NumDbgValueMoved; 4528 } 4529 } 4530 } 4531 return MadeChange; 4532 } 4533 4534 // If there is a sequence that branches based on comparing a single bit 4535 // against zero that can be combined into a single instruction, and the 4536 // target supports folding these into a single instruction, sink the 4537 // mask and compare into the branch uses. Do this before OptimizeBlock -> 4538 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 4539 // searched for. 4540 bool CodeGenPrepare::sinkAndCmp(Function &F) { 4541 if (!EnableAndCmpSinking) 4542 return false; 4543 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 4544 return false; 4545 bool MadeChange = false; 4546 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 4547 BasicBlock *BB = &*I++; 4548 4549 // Does this BB end with the following? 4550 // %andVal = and %val, #single-bit-set 4551 // %icmpVal = icmp %andResult, 0 4552 // br i1 %cmpVal label %dest1, label %dest2" 4553 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 4554 if (!Brcc || !Brcc->isConditional()) 4555 continue; 4556 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 4557 if (!Cmp || Cmp->getParent() != BB) 4558 continue; 4559 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 4560 if (!Zero || !Zero->isZero()) 4561 continue; 4562 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 4563 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 4564 continue; 4565 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 4566 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 4567 continue; 4568 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 4569 4570 // Push the "and; icmp" for any users that are conditional branches. 4571 // Since there can only be one branch use per BB, we don't need to keep 4572 // track of which BBs we insert into. 4573 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 4574 UI != E; ) { 4575 Use &TheUse = *UI; 4576 // Find brcc use. 4577 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 4578 ++UI; 4579 if (!BrccUser || !BrccUser->isConditional()) 4580 continue; 4581 BasicBlock *UserBB = BrccUser->getParent(); 4582 if (UserBB == BB) continue; 4583 DEBUG(dbgs() << "found Brcc use\n"); 4584 4585 // Sink the "and; icmp" to use. 4586 MadeChange = true; 4587 BinaryOperator *NewAnd = 4588 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 4589 BrccUser); 4590 CmpInst *NewCmp = 4591 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 4592 "", BrccUser); 4593 TheUse = NewCmp; 4594 ++NumAndCmpsMoved; 4595 DEBUG(BrccUser->getParent()->dump()); 4596 } 4597 } 4598 return MadeChange; 4599 } 4600 4601 /// \brief Retrieve the probabilities of a conditional branch. Returns true on 4602 /// success, or returns false if no or invalid metadata was found. 4603 static bool extractBranchMetadata(BranchInst *BI, 4604 uint64_t &ProbTrue, uint64_t &ProbFalse) { 4605 assert(BI->isConditional() && 4606 "Looking for probabilities on unconditional branch?"); 4607 auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof); 4608 if (!ProfileData || ProfileData->getNumOperands() != 3) 4609 return false; 4610 4611 const auto *CITrue = 4612 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)); 4613 const auto *CIFalse = 4614 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2)); 4615 if (!CITrue || !CIFalse) 4616 return false; 4617 4618 ProbTrue = CITrue->getValue().getZExtValue(); 4619 ProbFalse = CIFalse->getValue().getZExtValue(); 4620 4621 return true; 4622 } 4623 4624 /// \brief Scale down both weights to fit into uint32_t. 4625 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 4626 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 4627 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 4628 NewTrue = NewTrue / Scale; 4629 NewFalse = NewFalse / Scale; 4630 } 4631 4632 /// \brief Some targets prefer to split a conditional branch like: 4633 /// \code 4634 /// %0 = icmp ne i32 %a, 0 4635 /// %1 = icmp ne i32 %b, 0 4636 /// %or.cond = or i1 %0, %1 4637 /// br i1 %or.cond, label %TrueBB, label %FalseBB 4638 /// \endcode 4639 /// into multiple branch instructions like: 4640 /// \code 4641 /// bb1: 4642 /// %0 = icmp ne i32 %a, 0 4643 /// br i1 %0, label %TrueBB, label %bb2 4644 /// bb2: 4645 /// %1 = icmp ne i32 %b, 0 4646 /// br i1 %1, label %TrueBB, label %FalseBB 4647 /// \endcode 4648 /// This usually allows instruction selection to do even further optimizations 4649 /// and combine the compare with the branch instruction. Currently this is 4650 /// applied for targets which have "cheap" jump instructions. 4651 /// 4652 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 4653 /// 4654 bool CodeGenPrepare::splitBranchCondition(Function &F) { 4655 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 4656 return false; 4657 4658 bool MadeChange = false; 4659 for (auto &BB : F) { 4660 // Does this BB end with the following? 4661 // %cond1 = icmp|fcmp|binary instruction ... 4662 // %cond2 = icmp|fcmp|binary instruction ... 4663 // %cond.or = or|and i1 %cond1, cond2 4664 // br i1 %cond.or label %dest1, label %dest2" 4665 BinaryOperator *LogicOp; 4666 BasicBlock *TBB, *FBB; 4667 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 4668 continue; 4669 4670 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 4671 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 4672 continue; 4673 4674 unsigned Opc; 4675 Value *Cond1, *Cond2; 4676 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 4677 m_OneUse(m_Value(Cond2))))) 4678 Opc = Instruction::And; 4679 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 4680 m_OneUse(m_Value(Cond2))))) 4681 Opc = Instruction::Or; 4682 else 4683 continue; 4684 4685 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 4686 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 4687 continue; 4688 4689 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 4690 4691 // Create a new BB. 4692 auto *InsertBefore = std::next(Function::iterator(BB)) 4693 .getNodePtrUnchecked(); 4694 auto TmpBB = BasicBlock::Create(BB.getContext(), 4695 BB.getName() + ".cond.split", 4696 BB.getParent(), InsertBefore); 4697 4698 // Update original basic block by using the first condition directly by the 4699 // branch instruction and removing the no longer needed and/or instruction. 4700 Br1->setCondition(Cond1); 4701 LogicOp->eraseFromParent(); 4702 4703 // Depending on the conditon we have to either replace the true or the false 4704 // successor of the original branch instruction. 4705 if (Opc == Instruction::And) 4706 Br1->setSuccessor(0, TmpBB); 4707 else 4708 Br1->setSuccessor(1, TmpBB); 4709 4710 // Fill in the new basic block. 4711 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 4712 if (auto *I = dyn_cast<Instruction>(Cond2)) { 4713 I->removeFromParent(); 4714 I->insertBefore(Br2); 4715 } 4716 4717 // Update PHI nodes in both successors. The original BB needs to be 4718 // replaced in one succesor's PHI nodes, because the branch comes now from 4719 // the newly generated BB (NewBB). In the other successor we need to add one 4720 // incoming edge to the PHI nodes, because both branch instructions target 4721 // now the same successor. Depending on the original branch condition 4722 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 4723 // we perfrom the correct update for the PHI nodes. 4724 // This doesn't change the successor order of the just created branch 4725 // instruction (or any other instruction). 4726 if (Opc == Instruction::Or) 4727 std::swap(TBB, FBB); 4728 4729 // Replace the old BB with the new BB. 4730 for (auto &I : *TBB) { 4731 PHINode *PN = dyn_cast<PHINode>(&I); 4732 if (!PN) 4733 break; 4734 int i; 4735 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 4736 PN->setIncomingBlock(i, TmpBB); 4737 } 4738 4739 // Add another incoming edge form the new BB. 4740 for (auto &I : *FBB) { 4741 PHINode *PN = dyn_cast<PHINode>(&I); 4742 if (!PN) 4743 break; 4744 auto *Val = PN->getIncomingValueForBlock(&BB); 4745 PN->addIncoming(Val, TmpBB); 4746 } 4747 4748 // Update the branch weights (from SelectionDAGBuilder:: 4749 // FindMergedConditions). 4750 if (Opc == Instruction::Or) { 4751 // Codegen X | Y as: 4752 // BB1: 4753 // jmp_if_X TBB 4754 // jmp TmpBB 4755 // TmpBB: 4756 // jmp_if_Y TBB 4757 // jmp FBB 4758 // 4759 4760 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 4761 // The requirement is that 4762 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 4763 // = TrueProb for orignal BB. 4764 // Assuming the orignal weights are A and B, one choice is to set BB1's 4765 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 4766 // assumes that 4767 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 4768 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 4769 // TmpBB, but the math is more complicated. 4770 uint64_t TrueWeight, FalseWeight; 4771 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 4772 uint64_t NewTrueWeight = TrueWeight; 4773 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 4774 scaleWeights(NewTrueWeight, NewFalseWeight); 4775 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 4776 .createBranchWeights(TrueWeight, FalseWeight)); 4777 4778 NewTrueWeight = TrueWeight; 4779 NewFalseWeight = 2 * FalseWeight; 4780 scaleWeights(NewTrueWeight, NewFalseWeight); 4781 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 4782 .createBranchWeights(TrueWeight, FalseWeight)); 4783 } 4784 } else { 4785 // Codegen X & Y as: 4786 // BB1: 4787 // jmp_if_X TmpBB 4788 // jmp FBB 4789 // TmpBB: 4790 // jmp_if_Y TBB 4791 // jmp FBB 4792 // 4793 // This requires creation of TmpBB after CurBB. 4794 4795 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 4796 // The requirement is that 4797 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 4798 // = FalseProb for orignal BB. 4799 // Assuming the orignal weights are A and B, one choice is to set BB1's 4800 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 4801 // assumes that 4802 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 4803 uint64_t TrueWeight, FalseWeight; 4804 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 4805 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 4806 uint64_t NewFalseWeight = FalseWeight; 4807 scaleWeights(NewTrueWeight, NewFalseWeight); 4808 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 4809 .createBranchWeights(TrueWeight, FalseWeight)); 4810 4811 NewTrueWeight = 2 * TrueWeight; 4812 NewFalseWeight = FalseWeight; 4813 scaleWeights(NewTrueWeight, NewFalseWeight); 4814 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 4815 .createBranchWeights(TrueWeight, FalseWeight)); 4816 } 4817 } 4818 4819 // Note: No point in getting fancy here, since the DT info is never 4820 // available to CodeGenPrepare. 4821 ModifiedDT = true; 4822 4823 MadeChange = true; 4824 4825 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 4826 TmpBB->dump()); 4827 } 4828 return MadeChange; 4829 } 4830 4831 void CodeGenPrepare::stripInvariantGroupMetadata(Instruction &I) { 4832 if (auto *InvariantMD = I.getMetadata(LLVMContext::MD_invariant_group)) 4833 I.dropUnknownNonDebugMetadata(InvariantMD->getMetadataID()); 4834 } 4835