1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/CallSite.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GetElementPtrTypeIterator.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/InlineAsm.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/MDBuilder.h" 36 #include "llvm/IR/PatternMatch.h" 37 #include "llvm/IR/Statepoint.h" 38 #include "llvm/IR/ValueHandle.h" 39 #include "llvm/IR/ValueMap.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetSubtargetInfo.h" 46 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 47 #include "llvm/Transforms/Utils/BuildLibCalls.h" 48 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 49 #include "llvm/Transforms/Utils/Local.h" 50 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 51 using namespace llvm; 52 using namespace llvm::PatternMatch; 53 54 #define DEBUG_TYPE "codegenprepare" 55 56 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 57 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 58 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 59 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 60 "sunken Cmps"); 61 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 62 "of sunken Casts"); 63 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 64 "computations were sunk"); 65 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 66 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 67 STATISTIC(NumAndsAdded, 68 "Number of and mask instructions added to form ext loads"); 69 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 70 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 71 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 72 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 73 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 74 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 75 76 static cl::opt<bool> DisableBranchOpts( 77 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 78 cl::desc("Disable branch optimizations in CodeGenPrepare")); 79 80 static cl::opt<bool> 81 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 82 cl::desc("Disable GC optimizations in CodeGenPrepare")); 83 84 static cl::opt<bool> DisableSelectToBranch( 85 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 86 cl::desc("Disable select to branch conversion.")); 87 88 static cl::opt<bool> AddrSinkUsingGEPs( 89 "addr-sink-using-gep", cl::Hidden, cl::init(false), 90 cl::desc("Address sinking in CGP using GEPs.")); 91 92 static cl::opt<bool> EnableAndCmpSinking( 93 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 94 cl::desc("Enable sinkinig and/cmp into branches.")); 95 96 static cl::opt<bool> DisableStoreExtract( 97 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 98 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 99 100 static cl::opt<bool> StressStoreExtract( 101 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 102 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 103 104 static cl::opt<bool> DisableExtLdPromotion( 105 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 106 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 107 "CodeGenPrepare")); 108 109 static cl::opt<bool> StressExtLdPromotion( 110 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 111 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 112 "optimization in CodeGenPrepare")); 113 114 namespace { 115 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 116 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt; 117 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 118 class TypePromotionTransaction; 119 120 class CodeGenPrepare : public FunctionPass { 121 const TargetMachine *TM; 122 const TargetLowering *TLI; 123 const TargetTransformInfo *TTI; 124 const TargetLibraryInfo *TLInfo; 125 126 /// As we scan instructions optimizing them, this is the next instruction 127 /// to optimize. Transforms that can invalidate this should update it. 128 BasicBlock::iterator CurInstIterator; 129 130 /// Keeps track of non-local addresses that have been sunk into a block. 131 /// This allows us to avoid inserting duplicate code for blocks with 132 /// multiple load/stores of the same address. 133 ValueMap<Value*, Value*> SunkAddrs; 134 135 /// Keeps track of all instructions inserted for the current function. 136 SetOfInstrs InsertedInsts; 137 /// Keeps track of the type of the related instruction before their 138 /// promotion for the current function. 139 InstrToOrigTy PromotedInsts; 140 141 /// True if CFG is modified in any way. 142 bool ModifiedDT; 143 144 /// True if optimizing for size. 145 bool OptSize; 146 147 /// DataLayout for the Function being processed. 148 const DataLayout *DL; 149 150 public: 151 static char ID; // Pass identification, replacement for typeid 152 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 153 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) { 154 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 155 } 156 bool runOnFunction(Function &F) override; 157 158 const char *getPassName() const override { return "CodeGen Prepare"; } 159 160 void getAnalysisUsage(AnalysisUsage &AU) const override { 161 AU.addPreserved<DominatorTreeWrapperPass>(); 162 AU.addRequired<TargetLibraryInfoWrapperPass>(); 163 AU.addRequired<TargetTransformInfoWrapperPass>(); 164 } 165 166 private: 167 bool eliminateFallThrough(Function &F); 168 bool eliminateMostlyEmptyBlocks(Function &F); 169 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 170 void eliminateMostlyEmptyBlock(BasicBlock *BB); 171 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT); 172 bool optimizeInst(Instruction *I, bool& ModifiedDT); 173 bool optimizeMemoryInst(Instruction *I, Value *Addr, 174 Type *AccessTy, unsigned AS); 175 bool optimizeInlineAsmInst(CallInst *CS); 176 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT); 177 bool moveExtToFormExtLoad(Instruction *&I); 178 bool optimizeExtUses(Instruction *I); 179 bool optimizeLoadExt(LoadInst *I); 180 bool optimizeSelectInst(SelectInst *SI); 181 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 182 bool optimizeSwitchInst(SwitchInst *CI); 183 bool optimizeExtractElementInst(Instruction *Inst); 184 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 185 bool placeDbgValues(Function &F); 186 bool sinkAndCmp(Function &F); 187 bool extLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI, 188 Instruction *&Inst, 189 const SmallVectorImpl<Instruction *> &Exts, 190 unsigned CreatedInstCost); 191 bool splitBranchCondition(Function &F); 192 bool simplifyOffsetableRelocate(Instruction &I); 193 void stripInvariantGroupMetadata(Instruction &I); 194 }; 195 } 196 197 char CodeGenPrepare::ID = 0; 198 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare", 199 "Optimize for code generation", false, false) 200 201 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 202 return new CodeGenPrepare(TM); 203 } 204 205 bool CodeGenPrepare::runOnFunction(Function &F) { 206 if (skipOptnoneFunction(F)) 207 return false; 208 209 DL = &F.getParent()->getDataLayout(); 210 211 bool EverMadeChange = false; 212 // Clear per function information. 213 InsertedInsts.clear(); 214 PromotedInsts.clear(); 215 216 ModifiedDT = false; 217 if (TM) 218 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 219 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 220 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 221 OptSize = F.optForSize(); 222 223 /// This optimization identifies DIV instructions that can be 224 /// profitably bypassed and carried out with a shorter, faster divide. 225 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 226 const DenseMap<unsigned int, unsigned int> &BypassWidths = 227 TLI->getBypassSlowDivWidths(); 228 BasicBlock* BB = &*F.begin(); 229 while (BB != nullptr) { 230 // bypassSlowDivision may create new BBs, but we don't want to reapply the 231 // optimization to those blocks. 232 BasicBlock* Next = BB->getNextNode(); 233 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 234 BB = Next; 235 } 236 } 237 238 // Eliminate blocks that contain only PHI nodes and an 239 // unconditional branch. 240 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 241 242 // llvm.dbg.value is far away from the value then iSel may not be able 243 // handle it properly. iSel will drop llvm.dbg.value if it can not 244 // find a node corresponding to the value. 245 EverMadeChange |= placeDbgValues(F); 246 247 // If there is a mask, compare against zero, and branch that can be combined 248 // into a single target instruction, push the mask and compare into branch 249 // users. Do this before OptimizeBlock -> OptimizeInst -> 250 // OptimizeCmpExpression, which perturbs the pattern being searched for. 251 if (!DisableBranchOpts) { 252 EverMadeChange |= sinkAndCmp(F); 253 EverMadeChange |= splitBranchCondition(F); 254 } 255 256 bool MadeChange = true; 257 while (MadeChange) { 258 MadeChange = false; 259 for (Function::iterator I = F.begin(); I != F.end(); ) { 260 BasicBlock *BB = &*I++; 261 bool ModifiedDTOnIteration = false; 262 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 263 264 // Restart BB iteration if the dominator tree of the Function was changed 265 if (ModifiedDTOnIteration) 266 break; 267 } 268 EverMadeChange |= MadeChange; 269 } 270 271 SunkAddrs.clear(); 272 273 if (!DisableBranchOpts) { 274 MadeChange = false; 275 SmallPtrSet<BasicBlock*, 8> WorkList; 276 for (BasicBlock &BB : F) { 277 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 278 MadeChange |= ConstantFoldTerminator(&BB, true); 279 if (!MadeChange) continue; 280 281 for (SmallVectorImpl<BasicBlock*>::iterator 282 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 283 if (pred_begin(*II) == pred_end(*II)) 284 WorkList.insert(*II); 285 } 286 287 // Delete the dead blocks and any of their dead successors. 288 MadeChange |= !WorkList.empty(); 289 while (!WorkList.empty()) { 290 BasicBlock *BB = *WorkList.begin(); 291 WorkList.erase(BB); 292 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 293 294 DeleteDeadBlock(BB); 295 296 for (SmallVectorImpl<BasicBlock*>::iterator 297 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 298 if (pred_begin(*II) == pred_end(*II)) 299 WorkList.insert(*II); 300 } 301 302 // Merge pairs of basic blocks with unconditional branches, connected by 303 // a single edge. 304 if (EverMadeChange || MadeChange) 305 MadeChange |= eliminateFallThrough(F); 306 307 EverMadeChange |= MadeChange; 308 } 309 310 if (!DisableGCOpts) { 311 SmallVector<Instruction *, 2> Statepoints; 312 for (BasicBlock &BB : F) 313 for (Instruction &I : BB) 314 if (isStatepoint(I)) 315 Statepoints.push_back(&I); 316 for (auto &I : Statepoints) 317 EverMadeChange |= simplifyOffsetableRelocate(*I); 318 } 319 320 return EverMadeChange; 321 } 322 323 /// Merge basic blocks which are connected by a single edge, where one of the 324 /// basic blocks has a single successor pointing to the other basic block, 325 /// which has a single predecessor. 326 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 327 bool Changed = false; 328 // Scan all of the blocks in the function, except for the entry block. 329 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 330 BasicBlock *BB = &*I++; 331 // If the destination block has a single pred, then this is a trivial 332 // edge, just collapse it. 333 BasicBlock *SinglePred = BB->getSinglePredecessor(); 334 335 // Don't merge if BB's address is taken. 336 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 337 338 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 339 if (Term && !Term->isConditional()) { 340 Changed = true; 341 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 342 // Remember if SinglePred was the entry block of the function. 343 // If so, we will need to move BB back to the entry position. 344 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 345 MergeBasicBlockIntoOnlyPred(BB, nullptr); 346 347 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 348 BB->moveBefore(&BB->getParent()->getEntryBlock()); 349 350 // We have erased a block. Update the iterator. 351 I = BB->getIterator(); 352 } 353 } 354 return Changed; 355 } 356 357 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 358 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 359 /// edges in ways that are non-optimal for isel. Start by eliminating these 360 /// blocks so we can split them the way we want them. 361 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 362 bool MadeChange = false; 363 // Note that this intentionally skips the entry block. 364 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 365 BasicBlock *BB = &*I++; 366 367 // If this block doesn't end with an uncond branch, ignore it. 368 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 369 if (!BI || !BI->isUnconditional()) 370 continue; 371 372 // If the instruction before the branch (skipping debug info) isn't a phi 373 // node, then other stuff is happening here. 374 BasicBlock::iterator BBI = BI->getIterator(); 375 if (BBI != BB->begin()) { 376 --BBI; 377 while (isa<DbgInfoIntrinsic>(BBI)) { 378 if (BBI == BB->begin()) 379 break; 380 --BBI; 381 } 382 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 383 continue; 384 } 385 386 // Do not break infinite loops. 387 BasicBlock *DestBB = BI->getSuccessor(0); 388 if (DestBB == BB) 389 continue; 390 391 if (!canMergeBlocks(BB, DestBB)) 392 continue; 393 394 eliminateMostlyEmptyBlock(BB); 395 MadeChange = true; 396 } 397 return MadeChange; 398 } 399 400 /// Return true if we can merge BB into DestBB if there is a single 401 /// unconditional branch between them, and BB contains no other non-phi 402 /// instructions. 403 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 404 const BasicBlock *DestBB) const { 405 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 406 // the successor. If there are more complex condition (e.g. preheaders), 407 // don't mess around with them. 408 BasicBlock::const_iterator BBI = BB->begin(); 409 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 410 for (const User *U : PN->users()) { 411 const Instruction *UI = cast<Instruction>(U); 412 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 413 return false; 414 // If User is inside DestBB block and it is a PHINode then check 415 // incoming value. If incoming value is not from BB then this is 416 // a complex condition (e.g. preheaders) we want to avoid here. 417 if (UI->getParent() == DestBB) { 418 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 419 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 420 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 421 if (Insn && Insn->getParent() == BB && 422 Insn->getParent() != UPN->getIncomingBlock(I)) 423 return false; 424 } 425 } 426 } 427 } 428 429 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 430 // and DestBB may have conflicting incoming values for the block. If so, we 431 // can't merge the block. 432 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 433 if (!DestBBPN) return true; // no conflict. 434 435 // Collect the preds of BB. 436 SmallPtrSet<const BasicBlock*, 16> BBPreds; 437 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 438 // It is faster to get preds from a PHI than with pred_iterator. 439 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 440 BBPreds.insert(BBPN->getIncomingBlock(i)); 441 } else { 442 BBPreds.insert(pred_begin(BB), pred_end(BB)); 443 } 444 445 // Walk the preds of DestBB. 446 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 447 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 448 if (BBPreds.count(Pred)) { // Common predecessor? 449 BBI = DestBB->begin(); 450 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 451 const Value *V1 = PN->getIncomingValueForBlock(Pred); 452 const Value *V2 = PN->getIncomingValueForBlock(BB); 453 454 // If V2 is a phi node in BB, look up what the mapped value will be. 455 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 456 if (V2PN->getParent() == BB) 457 V2 = V2PN->getIncomingValueForBlock(Pred); 458 459 // If there is a conflict, bail out. 460 if (V1 != V2) return false; 461 } 462 } 463 } 464 465 return true; 466 } 467 468 469 /// Eliminate a basic block that has only phi's and an unconditional branch in 470 /// it. 471 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 472 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 473 BasicBlock *DestBB = BI->getSuccessor(0); 474 475 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 476 477 // If the destination block has a single pred, then this is a trivial edge, 478 // just collapse it. 479 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 480 if (SinglePred != DestBB) { 481 // Remember if SinglePred was the entry block of the function. If so, we 482 // will need to move BB back to the entry position. 483 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 484 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 485 486 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 487 BB->moveBefore(&BB->getParent()->getEntryBlock()); 488 489 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 490 return; 491 } 492 } 493 494 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 495 // to handle the new incoming edges it is about to have. 496 PHINode *PN; 497 for (BasicBlock::iterator BBI = DestBB->begin(); 498 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 499 // Remove the incoming value for BB, and remember it. 500 Value *InVal = PN->removeIncomingValue(BB, false); 501 502 // Two options: either the InVal is a phi node defined in BB or it is some 503 // value that dominates BB. 504 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 505 if (InValPhi && InValPhi->getParent() == BB) { 506 // Add all of the input values of the input PHI as inputs of this phi. 507 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 508 PN->addIncoming(InValPhi->getIncomingValue(i), 509 InValPhi->getIncomingBlock(i)); 510 } else { 511 // Otherwise, add one instance of the dominating value for each edge that 512 // we will be adding. 513 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 514 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 515 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 516 } else { 517 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 518 PN->addIncoming(InVal, *PI); 519 } 520 } 521 } 522 523 // The PHIs are now updated, change everything that refers to BB to use 524 // DestBB and remove BB. 525 BB->replaceAllUsesWith(DestBB); 526 BB->eraseFromParent(); 527 ++NumBlocksElim; 528 529 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 530 } 531 532 // Computes a map of base pointer relocation instructions to corresponding 533 // derived pointer relocation instructions given a vector of all relocate calls 534 static void computeBaseDerivedRelocateMap( 535 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 536 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 537 &RelocateInstMap) { 538 // Collect information in two maps: one primarily for locating the base object 539 // while filling the second map; the second map is the final structure holding 540 // a mapping between Base and corresponding Derived relocate calls 541 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 542 for (auto *ThisRelocate : AllRelocateCalls) { 543 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 544 ThisRelocate->getDerivedPtrIndex()); 545 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 546 } 547 for (auto &Item : RelocateIdxMap) { 548 std::pair<unsigned, unsigned> Key = Item.first; 549 if (Key.first == Key.second) 550 // Base relocation: nothing to insert 551 continue; 552 553 GCRelocateInst *I = Item.second; 554 auto BaseKey = std::make_pair(Key.first, Key.first); 555 556 // We're iterating over RelocateIdxMap so we cannot modify it. 557 auto MaybeBase = RelocateIdxMap.find(BaseKey); 558 if (MaybeBase == RelocateIdxMap.end()) 559 // TODO: We might want to insert a new base object relocate and gep off 560 // that, if there are enough derived object relocates. 561 continue; 562 563 RelocateInstMap[MaybeBase->second].push_back(I); 564 } 565 } 566 567 // Accepts a GEP and extracts the operands into a vector provided they're all 568 // small integer constants 569 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 570 SmallVectorImpl<Value *> &OffsetV) { 571 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 572 // Only accept small constant integer operands 573 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 574 if (!Op || Op->getZExtValue() > 20) 575 return false; 576 } 577 578 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 579 OffsetV.push_back(GEP->getOperand(i)); 580 return true; 581 } 582 583 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 584 // replace, computes a replacement, and affects it. 585 static bool 586 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 587 const SmallVectorImpl<GCRelocateInst *> &Targets) { 588 bool MadeChange = false; 589 for (GCRelocateInst *ToReplace : Targets) { 590 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 591 "Not relocating a derived object of the original base object"); 592 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 593 // A duplicate relocate call. TODO: coalesce duplicates. 594 continue; 595 } 596 597 if (RelocatedBase->getParent() != ToReplace->getParent()) { 598 // Base and derived relocates are in different basic blocks. 599 // In this case transform is only valid when base dominates derived 600 // relocate. However it would be too expensive to check dominance 601 // for each such relocate, so we skip the whole transformation. 602 continue; 603 } 604 605 Value *Base = ToReplace->getBasePtr(); 606 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 607 if (!Derived || Derived->getPointerOperand() != Base) 608 continue; 609 610 SmallVector<Value *, 2> OffsetV; 611 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 612 continue; 613 614 // Create a Builder and replace the target callsite with a gep 615 assert(RelocatedBase->getNextNode() && 616 "Should always have one since it's not a terminator"); 617 618 // Insert after RelocatedBase 619 IRBuilder<> Builder(RelocatedBase->getNextNode()); 620 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 621 622 // If gc_relocate does not match the actual type, cast it to the right type. 623 // In theory, there must be a bitcast after gc_relocate if the type does not 624 // match, and we should reuse it to get the derived pointer. But it could be 625 // cases like this: 626 // bb1: 627 // ... 628 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 629 // br label %merge 630 // 631 // bb2: 632 // ... 633 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 634 // br label %merge 635 // 636 // merge: 637 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 638 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 639 // 640 // In this case, we can not find the bitcast any more. So we insert a new bitcast 641 // no matter there is already one or not. In this way, we can handle all cases, and 642 // the extra bitcast should be optimized away in later passes. 643 Value *ActualRelocatedBase = RelocatedBase; 644 if (RelocatedBase->getType() != Base->getType()) { 645 ActualRelocatedBase = 646 Builder.CreateBitCast(RelocatedBase, Base->getType()); 647 } 648 Value *Replacement = Builder.CreateGEP( 649 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 650 Replacement->takeName(ToReplace); 651 // If the newly generated derived pointer's type does not match the original derived 652 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 653 Value *ActualReplacement = Replacement; 654 if (Replacement->getType() != ToReplace->getType()) { 655 ActualReplacement = 656 Builder.CreateBitCast(Replacement, ToReplace->getType()); 657 } 658 ToReplace->replaceAllUsesWith(ActualReplacement); 659 ToReplace->eraseFromParent(); 660 661 MadeChange = true; 662 } 663 return MadeChange; 664 } 665 666 // Turns this: 667 // 668 // %base = ... 669 // %ptr = gep %base + 15 670 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 671 // %base' = relocate(%tok, i32 4, i32 4) 672 // %ptr' = relocate(%tok, i32 4, i32 5) 673 // %val = load %ptr' 674 // 675 // into this: 676 // 677 // %base = ... 678 // %ptr = gep %base + 15 679 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 680 // %base' = gc.relocate(%tok, i32 4, i32 4) 681 // %ptr' = gep %base' + 15 682 // %val = load %ptr' 683 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 684 bool MadeChange = false; 685 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 686 687 for (auto *U : I.users()) 688 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 689 // Collect all the relocate calls associated with a statepoint 690 AllRelocateCalls.push_back(Relocate); 691 692 // We need atleast one base pointer relocation + one derived pointer 693 // relocation to mangle 694 if (AllRelocateCalls.size() < 2) 695 return false; 696 697 // RelocateInstMap is a mapping from the base relocate instruction to the 698 // corresponding derived relocate instructions 699 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 700 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 701 if (RelocateInstMap.empty()) 702 return false; 703 704 for (auto &Item : RelocateInstMap) 705 // Item.first is the RelocatedBase to offset against 706 // Item.second is the vector of Targets to replace 707 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 708 return MadeChange; 709 } 710 711 /// SinkCast - Sink the specified cast instruction into its user blocks 712 static bool SinkCast(CastInst *CI) { 713 BasicBlock *DefBB = CI->getParent(); 714 715 /// InsertedCasts - Only insert a cast in each block once. 716 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 717 718 bool MadeChange = false; 719 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 720 UI != E; ) { 721 Use &TheUse = UI.getUse(); 722 Instruction *User = cast<Instruction>(*UI); 723 724 // Figure out which BB this cast is used in. For PHI's this is the 725 // appropriate predecessor block. 726 BasicBlock *UserBB = User->getParent(); 727 if (PHINode *PN = dyn_cast<PHINode>(User)) { 728 UserBB = PN->getIncomingBlock(TheUse); 729 } 730 731 // Preincrement use iterator so we don't invalidate it. 732 ++UI; 733 734 // If the block selected to receive the cast is an EH pad that does not 735 // allow non-PHI instructions before the terminator, we can't sink the 736 // cast. 737 if (UserBB->getTerminator()->isEHPad()) 738 continue; 739 740 // If this user is in the same block as the cast, don't change the cast. 741 if (UserBB == DefBB) continue; 742 743 // If we have already inserted a cast into this block, use it. 744 CastInst *&InsertedCast = InsertedCasts[UserBB]; 745 746 if (!InsertedCast) { 747 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 748 assert(InsertPt != UserBB->end()); 749 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 750 CI->getType(), "", &*InsertPt); 751 } 752 753 // Replace a use of the cast with a use of the new cast. 754 TheUse = InsertedCast; 755 MadeChange = true; 756 ++NumCastUses; 757 } 758 759 // If we removed all uses, nuke the cast. 760 if (CI->use_empty()) { 761 CI->eraseFromParent(); 762 MadeChange = true; 763 } 764 765 return MadeChange; 766 } 767 768 /// If the specified cast instruction is a noop copy (e.g. it's casting from 769 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 770 /// reduce the number of virtual registers that must be created and coalesced. 771 /// 772 /// Return true if any changes are made. 773 /// 774 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 775 const DataLayout &DL) { 776 // If this is a noop copy, 777 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 778 EVT DstVT = TLI.getValueType(DL, CI->getType()); 779 780 // This is an fp<->int conversion? 781 if (SrcVT.isInteger() != DstVT.isInteger()) 782 return false; 783 784 // If this is an extension, it will be a zero or sign extension, which 785 // isn't a noop. 786 if (SrcVT.bitsLT(DstVT)) return false; 787 788 // If these values will be promoted, find out what they will be promoted 789 // to. This helps us consider truncates on PPC as noop copies when they 790 // are. 791 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 792 TargetLowering::TypePromoteInteger) 793 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 794 if (TLI.getTypeAction(CI->getContext(), DstVT) == 795 TargetLowering::TypePromoteInteger) 796 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 797 798 // If, after promotion, these are the same types, this is a noop copy. 799 if (SrcVT != DstVT) 800 return false; 801 802 return SinkCast(CI); 803 } 804 805 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 806 /// possible. 807 /// 808 /// Return true if any changes were made. 809 static bool CombineUAddWithOverflow(CmpInst *CI) { 810 Value *A, *B; 811 Instruction *AddI; 812 if (!match(CI, 813 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 814 return false; 815 816 Type *Ty = AddI->getType(); 817 if (!isa<IntegerType>(Ty)) 818 return false; 819 820 // We don't want to move around uses of condition values this late, so we we 821 // check if it is legal to create the call to the intrinsic in the basic 822 // block containing the icmp: 823 824 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 825 return false; 826 827 #ifndef NDEBUG 828 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 829 // for now: 830 if (AddI->hasOneUse()) 831 assert(*AddI->user_begin() == CI && "expected!"); 832 #endif 833 834 Module *M = CI->getModule(); 835 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 836 837 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 838 839 auto *UAddWithOverflow = 840 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 841 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 842 auto *Overflow = 843 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 844 845 CI->replaceAllUsesWith(Overflow); 846 AddI->replaceAllUsesWith(UAdd); 847 CI->eraseFromParent(); 848 AddI->eraseFromParent(); 849 return true; 850 } 851 852 /// Sink the given CmpInst into user blocks to reduce the number of virtual 853 /// registers that must be created and coalesced. This is a clear win except on 854 /// targets with multiple condition code registers (PowerPC), where it might 855 /// lose; some adjustment may be wanted there. 856 /// 857 /// Return true if any changes are made. 858 static bool SinkCmpExpression(CmpInst *CI) { 859 BasicBlock *DefBB = CI->getParent(); 860 861 /// Only insert a cmp in each block once. 862 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 863 864 bool MadeChange = false; 865 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 866 UI != E; ) { 867 Use &TheUse = UI.getUse(); 868 Instruction *User = cast<Instruction>(*UI); 869 870 // Preincrement use iterator so we don't invalidate it. 871 ++UI; 872 873 // Don't bother for PHI nodes. 874 if (isa<PHINode>(User)) 875 continue; 876 877 // Figure out which BB this cmp is used in. 878 BasicBlock *UserBB = User->getParent(); 879 880 // If this user is in the same block as the cmp, don't change the cmp. 881 if (UserBB == DefBB) continue; 882 883 // If we have already inserted a cmp into this block, use it. 884 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 885 886 if (!InsertedCmp) { 887 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 888 assert(InsertPt != UserBB->end()); 889 InsertedCmp = 890 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 891 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 892 } 893 894 // Replace a use of the cmp with a use of the new cmp. 895 TheUse = InsertedCmp; 896 MadeChange = true; 897 ++NumCmpUses; 898 } 899 900 // If we removed all uses, nuke the cmp. 901 if (CI->use_empty()) { 902 CI->eraseFromParent(); 903 MadeChange = true; 904 } 905 906 return MadeChange; 907 } 908 909 static bool OptimizeCmpExpression(CmpInst *CI) { 910 if (SinkCmpExpression(CI)) 911 return true; 912 913 if (CombineUAddWithOverflow(CI)) 914 return true; 915 916 return false; 917 } 918 919 /// Check if the candidates could be combined with a shift instruction, which 920 /// includes: 921 /// 1. Truncate instruction 922 /// 2. And instruction and the imm is a mask of the low bits: 923 /// imm & (imm+1) == 0 924 static bool isExtractBitsCandidateUse(Instruction *User) { 925 if (!isa<TruncInst>(User)) { 926 if (User->getOpcode() != Instruction::And || 927 !isa<ConstantInt>(User->getOperand(1))) 928 return false; 929 930 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 931 932 if ((Cimm & (Cimm + 1)).getBoolValue()) 933 return false; 934 } 935 return true; 936 } 937 938 /// Sink both shift and truncate instruction to the use of truncate's BB. 939 static bool 940 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 941 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 942 const TargetLowering &TLI, const DataLayout &DL) { 943 BasicBlock *UserBB = User->getParent(); 944 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 945 TruncInst *TruncI = dyn_cast<TruncInst>(User); 946 bool MadeChange = false; 947 948 for (Value::user_iterator TruncUI = TruncI->user_begin(), 949 TruncE = TruncI->user_end(); 950 TruncUI != TruncE;) { 951 952 Use &TruncTheUse = TruncUI.getUse(); 953 Instruction *TruncUser = cast<Instruction>(*TruncUI); 954 // Preincrement use iterator so we don't invalidate it. 955 956 ++TruncUI; 957 958 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 959 if (!ISDOpcode) 960 continue; 961 962 // If the use is actually a legal node, there will not be an 963 // implicit truncate. 964 // FIXME: always querying the result type is just an 965 // approximation; some nodes' legality is determined by the 966 // operand or other means. There's no good way to find out though. 967 if (TLI.isOperationLegalOrCustom( 968 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 969 continue; 970 971 // Don't bother for PHI nodes. 972 if (isa<PHINode>(TruncUser)) 973 continue; 974 975 BasicBlock *TruncUserBB = TruncUser->getParent(); 976 977 if (UserBB == TruncUserBB) 978 continue; 979 980 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 981 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 982 983 if (!InsertedShift && !InsertedTrunc) { 984 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 985 assert(InsertPt != TruncUserBB->end()); 986 // Sink the shift 987 if (ShiftI->getOpcode() == Instruction::AShr) 988 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 989 "", &*InsertPt); 990 else 991 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 992 "", &*InsertPt); 993 994 // Sink the trunc 995 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 996 TruncInsertPt++; 997 assert(TruncInsertPt != TruncUserBB->end()); 998 999 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1000 TruncI->getType(), "", &*TruncInsertPt); 1001 1002 MadeChange = true; 1003 1004 TruncTheUse = InsertedTrunc; 1005 } 1006 } 1007 return MadeChange; 1008 } 1009 1010 /// Sink the shift *right* instruction into user blocks if the uses could 1011 /// potentially be combined with this shift instruction and generate BitExtract 1012 /// instruction. It will only be applied if the architecture supports BitExtract 1013 /// instruction. Here is an example: 1014 /// BB1: 1015 /// %x.extract.shift = lshr i64 %arg1, 32 1016 /// BB2: 1017 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1018 /// ==> 1019 /// 1020 /// BB2: 1021 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1022 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1023 /// 1024 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1025 /// instruction. 1026 /// Return true if any changes are made. 1027 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1028 const TargetLowering &TLI, 1029 const DataLayout &DL) { 1030 BasicBlock *DefBB = ShiftI->getParent(); 1031 1032 /// Only insert instructions in each block once. 1033 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1034 1035 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1036 1037 bool MadeChange = false; 1038 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1039 UI != E;) { 1040 Use &TheUse = UI.getUse(); 1041 Instruction *User = cast<Instruction>(*UI); 1042 // Preincrement use iterator so we don't invalidate it. 1043 ++UI; 1044 1045 // Don't bother for PHI nodes. 1046 if (isa<PHINode>(User)) 1047 continue; 1048 1049 if (!isExtractBitsCandidateUse(User)) 1050 continue; 1051 1052 BasicBlock *UserBB = User->getParent(); 1053 1054 if (UserBB == DefBB) { 1055 // If the shift and truncate instruction are in the same BB. The use of 1056 // the truncate(TruncUse) may still introduce another truncate if not 1057 // legal. In this case, we would like to sink both shift and truncate 1058 // instruction to the BB of TruncUse. 1059 // for example: 1060 // BB1: 1061 // i64 shift.result = lshr i64 opnd, imm 1062 // trunc.result = trunc shift.result to i16 1063 // 1064 // BB2: 1065 // ----> We will have an implicit truncate here if the architecture does 1066 // not have i16 compare. 1067 // cmp i16 trunc.result, opnd2 1068 // 1069 if (isa<TruncInst>(User) && shiftIsLegal 1070 // If the type of the truncate is legal, no trucate will be 1071 // introduced in other basic blocks. 1072 && 1073 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1074 MadeChange = 1075 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1076 1077 continue; 1078 } 1079 // If we have already inserted a shift into this block, use it. 1080 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1081 1082 if (!InsertedShift) { 1083 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1084 assert(InsertPt != UserBB->end()); 1085 1086 if (ShiftI->getOpcode() == Instruction::AShr) 1087 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1088 "", &*InsertPt); 1089 else 1090 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1091 "", &*InsertPt); 1092 1093 MadeChange = true; 1094 } 1095 1096 // Replace a use of the shift with a use of the new shift. 1097 TheUse = InsertedShift; 1098 } 1099 1100 // If we removed all uses, nuke the shift. 1101 if (ShiftI->use_empty()) 1102 ShiftI->eraseFromParent(); 1103 1104 return MadeChange; 1105 } 1106 1107 // Translate a masked load intrinsic like 1108 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1109 // <16 x i1> %mask, <16 x i32> %passthru) 1110 // to a chain of basic blocks, with loading element one-by-one if 1111 // the appropriate mask bit is set 1112 // 1113 // %1 = bitcast i8* %addr to i32* 1114 // %2 = extractelement <16 x i1> %mask, i32 0 1115 // %3 = icmp eq i1 %2, true 1116 // br i1 %3, label %cond.load, label %else 1117 // 1118 //cond.load: ; preds = %0 1119 // %4 = getelementptr i32* %1, i32 0 1120 // %5 = load i32* %4 1121 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1122 // br label %else 1123 // 1124 //else: ; preds = %0, %cond.load 1125 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1126 // %7 = extractelement <16 x i1> %mask, i32 1 1127 // %8 = icmp eq i1 %7, true 1128 // br i1 %8, label %cond.load1, label %else2 1129 // 1130 //cond.load1: ; preds = %else 1131 // %9 = getelementptr i32* %1, i32 1 1132 // %10 = load i32* %9 1133 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1134 // br label %else2 1135 // 1136 //else2: ; preds = %else, %cond.load1 1137 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1138 // %12 = extractelement <16 x i1> %mask, i32 2 1139 // %13 = icmp eq i1 %12, true 1140 // br i1 %13, label %cond.load4, label %else5 1141 // 1142 static void scalarizeMaskedLoad(CallInst *CI) { 1143 Value *Ptr = CI->getArgOperand(0); 1144 Value *Alignment = CI->getArgOperand(1); 1145 Value *Mask = CI->getArgOperand(2); 1146 Value *Src0 = CI->getArgOperand(3); 1147 1148 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1149 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1150 assert(VecType && "Unexpected return type of masked load intrinsic"); 1151 1152 Type *EltTy = CI->getType()->getVectorElementType(); 1153 1154 IRBuilder<> Builder(CI->getContext()); 1155 Instruction *InsertPt = CI; 1156 BasicBlock *IfBlock = CI->getParent(); 1157 BasicBlock *CondBlock = nullptr; 1158 BasicBlock *PrevIfBlock = CI->getParent(); 1159 1160 Builder.SetInsertPoint(InsertPt); 1161 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1162 1163 // Short-cut if the mask is all-true. 1164 bool IsAllOnesMask = isa<Constant>(Mask) && 1165 cast<Constant>(Mask)->isAllOnesValue(); 1166 1167 if (IsAllOnesMask) { 1168 Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal); 1169 CI->replaceAllUsesWith(NewI); 1170 CI->eraseFromParent(); 1171 return; 1172 } 1173 1174 // Adjust alignment for the scalar instruction. 1175 AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8); 1176 // Bitcast %addr fron i8* to EltTy* 1177 Type *NewPtrType = 1178 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1179 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1180 unsigned VectorWidth = VecType->getNumElements(); 1181 1182 Value *UndefVal = UndefValue::get(VecType); 1183 1184 // The result vector 1185 Value *VResult = UndefVal; 1186 1187 if (isa<ConstantVector>(Mask)) { 1188 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1189 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1190 continue; 1191 Value *Gep = 1192 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1193 LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1194 VResult = Builder.CreateInsertElement(VResult, Load, 1195 Builder.getInt32(Idx)); 1196 } 1197 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1198 CI->replaceAllUsesWith(NewI); 1199 CI->eraseFromParent(); 1200 return; 1201 } 1202 1203 PHINode *Phi = nullptr; 1204 Value *PrevPhi = UndefVal; 1205 1206 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1207 1208 // Fill the "else" block, created in the previous iteration 1209 // 1210 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1211 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1212 // %to_load = icmp eq i1 %mask_1, true 1213 // br i1 %to_load, label %cond.load, label %else 1214 // 1215 if (Idx > 0) { 1216 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1217 Phi->addIncoming(VResult, CondBlock); 1218 Phi->addIncoming(PrevPhi, PrevIfBlock); 1219 PrevPhi = Phi; 1220 VResult = Phi; 1221 } 1222 1223 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1224 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1225 ConstantInt::get(Predicate->getType(), 1)); 1226 1227 // Create "cond" block 1228 // 1229 // %EltAddr = getelementptr i32* %1, i32 0 1230 // %Elt = load i32* %EltAddr 1231 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1232 // 1233 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load"); 1234 Builder.SetInsertPoint(InsertPt); 1235 1236 Value *Gep = 1237 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1238 LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1239 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1240 1241 // Create "else" block, fill it in the next iteration 1242 BasicBlock *NewIfBlock = 1243 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1244 Builder.SetInsertPoint(InsertPt); 1245 Instruction *OldBr = IfBlock->getTerminator(); 1246 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1247 OldBr->eraseFromParent(); 1248 PrevIfBlock = IfBlock; 1249 IfBlock = NewIfBlock; 1250 } 1251 1252 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1253 Phi->addIncoming(VResult, CondBlock); 1254 Phi->addIncoming(PrevPhi, PrevIfBlock); 1255 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1256 CI->replaceAllUsesWith(NewI); 1257 CI->eraseFromParent(); 1258 } 1259 1260 // Translate a masked store intrinsic, like 1261 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1262 // <16 x i1> %mask) 1263 // to a chain of basic blocks, that stores element one-by-one if 1264 // the appropriate mask bit is set 1265 // 1266 // %1 = bitcast i8* %addr to i32* 1267 // %2 = extractelement <16 x i1> %mask, i32 0 1268 // %3 = icmp eq i1 %2, true 1269 // br i1 %3, label %cond.store, label %else 1270 // 1271 // cond.store: ; preds = %0 1272 // %4 = extractelement <16 x i32> %val, i32 0 1273 // %5 = getelementptr i32* %1, i32 0 1274 // store i32 %4, i32* %5 1275 // br label %else 1276 // 1277 // else: ; preds = %0, %cond.store 1278 // %6 = extractelement <16 x i1> %mask, i32 1 1279 // %7 = icmp eq i1 %6, true 1280 // br i1 %7, label %cond.store1, label %else2 1281 // 1282 // cond.store1: ; preds = %else 1283 // %8 = extractelement <16 x i32> %val, i32 1 1284 // %9 = getelementptr i32* %1, i32 1 1285 // store i32 %8, i32* %9 1286 // br label %else2 1287 // . . . 1288 static void scalarizeMaskedStore(CallInst *CI) { 1289 Value *Src = CI->getArgOperand(0); 1290 Value *Ptr = CI->getArgOperand(1); 1291 Value *Alignment = CI->getArgOperand(2); 1292 Value *Mask = CI->getArgOperand(3); 1293 1294 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1295 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1296 assert(VecType && "Unexpected data type in masked store intrinsic"); 1297 1298 Type *EltTy = VecType->getElementType(); 1299 1300 IRBuilder<> Builder(CI->getContext()); 1301 Instruction *InsertPt = CI; 1302 BasicBlock *IfBlock = CI->getParent(); 1303 Builder.SetInsertPoint(InsertPt); 1304 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1305 1306 // Short-cut if the mask is all-true. 1307 bool IsAllOnesMask = isa<Constant>(Mask) && 1308 cast<Constant>(Mask)->isAllOnesValue(); 1309 1310 if (IsAllOnesMask) { 1311 Builder.CreateAlignedStore(Src, Ptr, AlignVal); 1312 CI->eraseFromParent(); 1313 return; 1314 } 1315 1316 // Adjust alignment for the scalar instruction. 1317 AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8); 1318 // Bitcast %addr fron i8* to EltTy* 1319 Type *NewPtrType = 1320 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1321 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1322 unsigned VectorWidth = VecType->getNumElements(); 1323 1324 if (isa<ConstantVector>(Mask)) { 1325 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1326 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1327 continue; 1328 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1329 Value *Gep = 1330 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1331 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1332 } 1333 CI->eraseFromParent(); 1334 return; 1335 } 1336 1337 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1338 1339 // Fill the "else" block, created in the previous iteration 1340 // 1341 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1342 // %to_store = icmp eq i1 %mask_1, true 1343 // br i1 %to_store, label %cond.store, label %else 1344 // 1345 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1346 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1347 ConstantInt::get(Predicate->getType(), 1)); 1348 1349 // Create "cond" block 1350 // 1351 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1352 // %EltAddr = getelementptr i32* %1, i32 0 1353 // %store i32 %OneElt, i32* %EltAddr 1354 // 1355 BasicBlock *CondBlock = 1356 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store"); 1357 Builder.SetInsertPoint(InsertPt); 1358 1359 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1360 Value *Gep = 1361 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1362 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1363 1364 // Create "else" block, fill it in the next iteration 1365 BasicBlock *NewIfBlock = 1366 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1367 Builder.SetInsertPoint(InsertPt); 1368 Instruction *OldBr = IfBlock->getTerminator(); 1369 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1370 OldBr->eraseFromParent(); 1371 IfBlock = NewIfBlock; 1372 } 1373 CI->eraseFromParent(); 1374 } 1375 1376 // Translate a masked gather intrinsic like 1377 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4, 1378 // <16 x i1> %Mask, <16 x i32> %Src) 1379 // to a chain of basic blocks, with loading element one-by-one if 1380 // the appropriate mask bit is set 1381 // 1382 // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind 1383 // % Mask0 = extractelement <16 x i1> %Mask, i32 0 1384 // % ToLoad0 = icmp eq i1 % Mask0, true 1385 // br i1 % ToLoad0, label %cond.load, label %else 1386 // 1387 // cond.load: 1388 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1389 // % Load0 = load i32, i32* % Ptr0, align 4 1390 // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0 1391 // br label %else 1392 // 1393 // else: 1394 // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0] 1395 // % Mask1 = extractelement <16 x i1> %Mask, i32 1 1396 // % ToLoad1 = icmp eq i1 % Mask1, true 1397 // br i1 % ToLoad1, label %cond.load1, label %else2 1398 // 1399 // cond.load1: 1400 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1401 // % Load1 = load i32, i32* % Ptr1, align 4 1402 // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1 1403 // br label %else2 1404 // . . . 1405 // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src 1406 // ret <16 x i32> %Result 1407 static void scalarizeMaskedGather(CallInst *CI) { 1408 Value *Ptrs = CI->getArgOperand(0); 1409 Value *Alignment = CI->getArgOperand(1); 1410 Value *Mask = CI->getArgOperand(2); 1411 Value *Src0 = CI->getArgOperand(3); 1412 1413 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1414 1415 assert(VecType && "Unexpected return type of masked load intrinsic"); 1416 1417 IRBuilder<> Builder(CI->getContext()); 1418 Instruction *InsertPt = CI; 1419 BasicBlock *IfBlock = CI->getParent(); 1420 BasicBlock *CondBlock = nullptr; 1421 BasicBlock *PrevIfBlock = CI->getParent(); 1422 Builder.SetInsertPoint(InsertPt); 1423 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1424 1425 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1426 1427 Value *UndefVal = UndefValue::get(VecType); 1428 1429 // The result vector 1430 Value *VResult = UndefVal; 1431 unsigned VectorWidth = VecType->getNumElements(); 1432 1433 // Shorten the way if the mask is a vector of constants. 1434 bool IsConstMask = isa<ConstantVector>(Mask); 1435 1436 if (IsConstMask) { 1437 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1438 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1439 continue; 1440 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1441 "Ptr" + Twine(Idx)); 1442 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1443 "Load" + Twine(Idx)); 1444 VResult = Builder.CreateInsertElement(VResult, Load, 1445 Builder.getInt32(Idx), 1446 "Res" + Twine(Idx)); 1447 } 1448 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1449 CI->replaceAllUsesWith(NewI); 1450 CI->eraseFromParent(); 1451 return; 1452 } 1453 1454 PHINode *Phi = nullptr; 1455 Value *PrevPhi = UndefVal; 1456 1457 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1458 1459 // Fill the "else" block, created in the previous iteration 1460 // 1461 // %Mask1 = extractelement <16 x i1> %Mask, i32 1 1462 // %ToLoad1 = icmp eq i1 %Mask1, true 1463 // br i1 %ToLoad1, label %cond.load, label %else 1464 // 1465 if (Idx > 0) { 1466 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1467 Phi->addIncoming(VResult, CondBlock); 1468 Phi->addIncoming(PrevPhi, PrevIfBlock); 1469 PrevPhi = Phi; 1470 VResult = Phi; 1471 } 1472 1473 Value *Predicate = Builder.CreateExtractElement(Mask, 1474 Builder.getInt32(Idx), 1475 "Mask" + Twine(Idx)); 1476 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1477 ConstantInt::get(Predicate->getType(), 1), 1478 "ToLoad" + Twine(Idx)); 1479 1480 // Create "cond" block 1481 // 1482 // %EltAddr = getelementptr i32* %1, i32 0 1483 // %Elt = load i32* %EltAddr 1484 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1485 // 1486 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); 1487 Builder.SetInsertPoint(InsertPt); 1488 1489 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1490 "Ptr" + Twine(Idx)); 1491 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1492 "Load" + Twine(Idx)); 1493 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx), 1494 "Res" + Twine(Idx)); 1495 1496 // Create "else" block, fill it in the next iteration 1497 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1498 Builder.SetInsertPoint(InsertPt); 1499 Instruction *OldBr = IfBlock->getTerminator(); 1500 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1501 OldBr->eraseFromParent(); 1502 PrevIfBlock = IfBlock; 1503 IfBlock = NewIfBlock; 1504 } 1505 1506 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1507 Phi->addIncoming(VResult, CondBlock); 1508 Phi->addIncoming(PrevPhi, PrevIfBlock); 1509 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1510 CI->replaceAllUsesWith(NewI); 1511 CI->eraseFromParent(); 1512 } 1513 1514 // Translate a masked scatter intrinsic, like 1515 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4, 1516 // <16 x i1> %Mask) 1517 // to a chain of basic blocks, that stores element one-by-one if 1518 // the appropriate mask bit is set. 1519 // 1520 // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind 1521 // % Mask0 = extractelement <16 x i1> % Mask, i32 0 1522 // % ToStore0 = icmp eq i1 % Mask0, true 1523 // br i1 %ToStore0, label %cond.store, label %else 1524 // 1525 // cond.store: 1526 // % Elt0 = extractelement <16 x i32> %Src, i32 0 1527 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1528 // store i32 %Elt0, i32* % Ptr0, align 4 1529 // br label %else 1530 // 1531 // else: 1532 // % Mask1 = extractelement <16 x i1> % Mask, i32 1 1533 // % ToStore1 = icmp eq i1 % Mask1, true 1534 // br i1 % ToStore1, label %cond.store1, label %else2 1535 // 1536 // cond.store1: 1537 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1538 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1539 // store i32 % Elt1, i32* % Ptr1, align 4 1540 // br label %else2 1541 // . . . 1542 static void scalarizeMaskedScatter(CallInst *CI) { 1543 Value *Src = CI->getArgOperand(0); 1544 Value *Ptrs = CI->getArgOperand(1); 1545 Value *Alignment = CI->getArgOperand(2); 1546 Value *Mask = CI->getArgOperand(3); 1547 1548 assert(isa<VectorType>(Src->getType()) && 1549 "Unexpected data type in masked scatter intrinsic"); 1550 assert(isa<VectorType>(Ptrs->getType()) && 1551 isa<PointerType>(Ptrs->getType()->getVectorElementType()) && 1552 "Vector of pointers is expected in masked scatter intrinsic"); 1553 1554 IRBuilder<> Builder(CI->getContext()); 1555 Instruction *InsertPt = CI; 1556 BasicBlock *IfBlock = CI->getParent(); 1557 Builder.SetInsertPoint(InsertPt); 1558 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1559 1560 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1561 unsigned VectorWidth = Src->getType()->getVectorNumElements(); 1562 1563 // Shorten the way if the mask is a vector of constants. 1564 bool IsConstMask = isa<ConstantVector>(Mask); 1565 1566 if (IsConstMask) { 1567 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1568 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1569 continue; 1570 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 1571 "Elt" + Twine(Idx)); 1572 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1573 "Ptr" + Twine(Idx)); 1574 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 1575 } 1576 CI->eraseFromParent(); 1577 return; 1578 } 1579 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1580 // Fill the "else" block, created in the previous iteration 1581 // 1582 // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx 1583 // % ToStore = icmp eq i1 % Mask1, true 1584 // br i1 % ToStore, label %cond.store, label %else 1585 // 1586 Value *Predicate = Builder.CreateExtractElement(Mask, 1587 Builder.getInt32(Idx), 1588 "Mask" + Twine(Idx)); 1589 Value *Cmp = 1590 Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1591 ConstantInt::get(Predicate->getType(), 1), 1592 "ToStore" + Twine(Idx)); 1593 1594 // Create "cond" block 1595 // 1596 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1597 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1598 // %store i32 % Elt1, i32* % Ptr1 1599 // 1600 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); 1601 Builder.SetInsertPoint(InsertPt); 1602 1603 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 1604 "Elt" + Twine(Idx)); 1605 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1606 "Ptr" + Twine(Idx)); 1607 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 1608 1609 // Create "else" block, fill it in the next iteration 1610 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1611 Builder.SetInsertPoint(InsertPt); 1612 Instruction *OldBr = IfBlock->getTerminator(); 1613 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1614 OldBr->eraseFromParent(); 1615 IfBlock = NewIfBlock; 1616 } 1617 CI->eraseFromParent(); 1618 } 1619 1620 /// If counting leading or trailing zeros is an expensive operation and a zero 1621 /// input is defined, add a check for zero to avoid calling the intrinsic. 1622 /// 1623 /// We want to transform: 1624 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1625 /// 1626 /// into: 1627 /// entry: 1628 /// %cmpz = icmp eq i64 %A, 0 1629 /// br i1 %cmpz, label %cond.end, label %cond.false 1630 /// cond.false: 1631 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1632 /// br label %cond.end 1633 /// cond.end: 1634 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1635 /// 1636 /// If the transform is performed, return true and set ModifiedDT to true. 1637 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1638 const TargetLowering *TLI, 1639 const DataLayout *DL, 1640 bool &ModifiedDT) { 1641 if (!TLI || !DL) 1642 return false; 1643 1644 // If a zero input is undefined, it doesn't make sense to despeculate that. 1645 if (match(CountZeros->getOperand(1), m_One())) 1646 return false; 1647 1648 // If it's cheap to speculate, there's nothing to do. 1649 auto IntrinsicID = CountZeros->getIntrinsicID(); 1650 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1651 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1652 return false; 1653 1654 // Only handle legal scalar cases. Anything else requires too much work. 1655 Type *Ty = CountZeros->getType(); 1656 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1657 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSize()) 1658 return false; 1659 1660 // The intrinsic will be sunk behind a compare against zero and branch. 1661 BasicBlock *StartBlock = CountZeros->getParent(); 1662 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1663 1664 // Create another block after the count zero intrinsic. A PHI will be added 1665 // in this block to select the result of the intrinsic or the bit-width 1666 // constant if the input to the intrinsic is zero. 1667 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1668 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1669 1670 // Set up a builder to create a compare, conditional branch, and PHI. 1671 IRBuilder<> Builder(CountZeros->getContext()); 1672 Builder.SetInsertPoint(StartBlock->getTerminator()); 1673 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1674 1675 // Replace the unconditional branch that was created by the first split with 1676 // a compare against zero and a conditional branch. 1677 Value *Zero = Constant::getNullValue(Ty); 1678 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1679 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1680 StartBlock->getTerminator()->eraseFromParent(); 1681 1682 // Create a PHI in the end block to select either the output of the intrinsic 1683 // or the bit width of the operand. 1684 Builder.SetInsertPoint(&EndBlock->front()); 1685 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1686 CountZeros->replaceAllUsesWith(PN); 1687 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1688 PN->addIncoming(BitWidth, StartBlock); 1689 PN->addIncoming(CountZeros, CallBlock); 1690 1691 // We are explicitly handling the zero case, so we can set the intrinsic's 1692 // undefined zero argument to 'true'. This will also prevent reprocessing the 1693 // intrinsic; we only despeculate when a zero input is defined. 1694 CountZeros->setArgOperand(1, Builder.getTrue()); 1695 ModifiedDT = true; 1696 return true; 1697 } 1698 1699 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { 1700 BasicBlock *BB = CI->getParent(); 1701 1702 // Lower inline assembly if we can. 1703 // If we found an inline asm expession, and if the target knows how to 1704 // lower it to normal LLVM code, do so now. 1705 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1706 if (TLI->ExpandInlineAsm(CI)) { 1707 // Avoid invalidating the iterator. 1708 CurInstIterator = BB->begin(); 1709 // Avoid processing instructions out of order, which could cause 1710 // reuse before a value is defined. 1711 SunkAddrs.clear(); 1712 return true; 1713 } 1714 // Sink address computing for memory operands into the block. 1715 if (optimizeInlineAsmInst(CI)) 1716 return true; 1717 } 1718 1719 // Align the pointer arguments to this call if the target thinks it's a good 1720 // idea 1721 unsigned MinSize, PrefAlign; 1722 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1723 for (auto &Arg : CI->arg_operands()) { 1724 // We want to align both objects whose address is used directly and 1725 // objects whose address is used in casts and GEPs, though it only makes 1726 // sense for GEPs if the offset is a multiple of the desired alignment and 1727 // if size - offset meets the size threshold. 1728 if (!Arg->getType()->isPointerTy()) 1729 continue; 1730 APInt Offset(DL->getPointerSizeInBits( 1731 cast<PointerType>(Arg->getType())->getAddressSpace()), 1732 0); 1733 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1734 uint64_t Offset2 = Offset.getLimitedValue(); 1735 if ((Offset2 & (PrefAlign-1)) != 0) 1736 continue; 1737 AllocaInst *AI; 1738 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1739 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1740 AI->setAlignment(PrefAlign); 1741 // Global variables can only be aligned if they are defined in this 1742 // object (i.e. they are uniquely initialized in this object), and 1743 // over-aligning global variables that have an explicit section is 1744 // forbidden. 1745 GlobalVariable *GV; 1746 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1747 GV->getAlignment() < PrefAlign && 1748 DL->getTypeAllocSize(GV->getValueType()) >= 1749 MinSize + Offset2) 1750 GV->setAlignment(PrefAlign); 1751 } 1752 // If this is a memcpy (or similar) then we may be able to improve the 1753 // alignment 1754 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1755 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 1756 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 1757 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 1758 if (Align > MI->getAlignment()) 1759 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 1760 } 1761 } 1762 1763 // If we have a cold call site, try to sink addressing computation into the 1764 // cold block. This interacts with our handling for loads and stores to 1765 // ensure that we can fold all uses of a potential addressing computation 1766 // into their uses. TODO: generalize this to work over profiling data 1767 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 1768 for (auto &Arg : CI->arg_operands()) { 1769 if (!Arg->getType()->isPointerTy()) 1770 continue; 1771 unsigned AS = Arg->getType()->getPointerAddressSpace(); 1772 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); 1773 } 1774 1775 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1776 if (II) { 1777 switch (II->getIntrinsicID()) { 1778 default: break; 1779 case Intrinsic::objectsize: { 1780 // Lower all uses of llvm.objectsize.* 1781 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 1782 Type *ReturnTy = CI->getType(); 1783 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 1784 1785 // Substituting this can cause recursive simplifications, which can 1786 // invalidate our iterator. Use a WeakVH to hold onto it in case this 1787 // happens. 1788 Value *CurValue = &*CurInstIterator; 1789 WeakVH IterHandle(CurValue); 1790 1791 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1792 1793 // If the iterator instruction was recursively deleted, start over at the 1794 // start of the block. 1795 if (IterHandle != CurValue) { 1796 CurInstIterator = BB->begin(); 1797 SunkAddrs.clear(); 1798 } 1799 return true; 1800 } 1801 case Intrinsic::masked_load: { 1802 // Scalarize unsupported vector masked load 1803 if (!TTI->isLegalMaskedLoad(CI->getType())) { 1804 scalarizeMaskedLoad(CI); 1805 ModifiedDT = true; 1806 return true; 1807 } 1808 return false; 1809 } 1810 case Intrinsic::masked_store: { 1811 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) { 1812 scalarizeMaskedStore(CI); 1813 ModifiedDT = true; 1814 return true; 1815 } 1816 return false; 1817 } 1818 case Intrinsic::masked_gather: { 1819 if (!TTI->isLegalMaskedGather(CI->getType())) { 1820 scalarizeMaskedGather(CI); 1821 ModifiedDT = true; 1822 return true; 1823 } 1824 return false; 1825 } 1826 case Intrinsic::masked_scatter: { 1827 if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) { 1828 scalarizeMaskedScatter(CI); 1829 ModifiedDT = true; 1830 return true; 1831 } 1832 return false; 1833 } 1834 case Intrinsic::aarch64_stlxr: 1835 case Intrinsic::aarch64_stxr: { 1836 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1837 if (!ExtVal || !ExtVal->hasOneUse() || 1838 ExtVal->getParent() == CI->getParent()) 1839 return false; 1840 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1841 ExtVal->moveBefore(CI); 1842 // Mark this instruction as "inserted by CGP", so that other 1843 // optimizations don't touch it. 1844 InsertedInsts.insert(ExtVal); 1845 return true; 1846 } 1847 case Intrinsic::invariant_group_barrier: 1848 II->replaceAllUsesWith(II->getArgOperand(0)); 1849 II->eraseFromParent(); 1850 return true; 1851 1852 case Intrinsic::cttz: 1853 case Intrinsic::ctlz: 1854 // If counting zeros is expensive, try to avoid it. 1855 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1856 } 1857 1858 if (TLI) { 1859 // Unknown address space. 1860 // TODO: Target hook to pick which address space the intrinsic cares 1861 // about? 1862 unsigned AddrSpace = ~0u; 1863 SmallVector<Value*, 2> PtrOps; 1864 Type *AccessTy; 1865 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace)) 1866 while (!PtrOps.empty()) 1867 if (optimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy, AddrSpace)) 1868 return true; 1869 } 1870 } 1871 1872 // From here on out we're working with named functions. 1873 if (!CI->getCalledFunction()) return false; 1874 1875 // Lower all default uses of _chk calls. This is very similar 1876 // to what InstCombineCalls does, but here we are only lowering calls 1877 // to fortified library functions (e.g. __memcpy_chk) that have the default 1878 // "don't know" as the objectsize. Anything else should be left alone. 1879 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1880 if (Value *V = Simplifier.optimizeCall(CI)) { 1881 CI->replaceAllUsesWith(V); 1882 CI->eraseFromParent(); 1883 return true; 1884 } 1885 return false; 1886 } 1887 1888 /// Look for opportunities to duplicate return instructions to the predecessor 1889 /// to enable tail call optimizations. The case it is currently looking for is: 1890 /// @code 1891 /// bb0: 1892 /// %tmp0 = tail call i32 @f0() 1893 /// br label %return 1894 /// bb1: 1895 /// %tmp1 = tail call i32 @f1() 1896 /// br label %return 1897 /// bb2: 1898 /// %tmp2 = tail call i32 @f2() 1899 /// br label %return 1900 /// return: 1901 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1902 /// ret i32 %retval 1903 /// @endcode 1904 /// 1905 /// => 1906 /// 1907 /// @code 1908 /// bb0: 1909 /// %tmp0 = tail call i32 @f0() 1910 /// ret i32 %tmp0 1911 /// bb1: 1912 /// %tmp1 = tail call i32 @f1() 1913 /// ret i32 %tmp1 1914 /// bb2: 1915 /// %tmp2 = tail call i32 @f2() 1916 /// ret i32 %tmp2 1917 /// @endcode 1918 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1919 if (!TLI) 1920 return false; 1921 1922 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 1923 if (!RI) 1924 return false; 1925 1926 PHINode *PN = nullptr; 1927 BitCastInst *BCI = nullptr; 1928 Value *V = RI->getReturnValue(); 1929 if (V) { 1930 BCI = dyn_cast<BitCastInst>(V); 1931 if (BCI) 1932 V = BCI->getOperand(0); 1933 1934 PN = dyn_cast<PHINode>(V); 1935 if (!PN) 1936 return false; 1937 } 1938 1939 if (PN && PN->getParent() != BB) 1940 return false; 1941 1942 // It's not safe to eliminate the sign / zero extension of the return value. 1943 // See llvm::isInTailCallPosition(). 1944 const Function *F = BB->getParent(); 1945 AttributeSet CallerAttrs = F->getAttributes(); 1946 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 1947 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 1948 return false; 1949 1950 // Make sure there are no instructions between the PHI and return, or that the 1951 // return is the first instruction in the block. 1952 if (PN) { 1953 BasicBlock::iterator BI = BB->begin(); 1954 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1955 if (&*BI == BCI) 1956 // Also skip over the bitcast. 1957 ++BI; 1958 if (&*BI != RI) 1959 return false; 1960 } else { 1961 BasicBlock::iterator BI = BB->begin(); 1962 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1963 if (&*BI != RI) 1964 return false; 1965 } 1966 1967 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1968 /// call. 1969 SmallVector<CallInst*, 4> TailCalls; 1970 if (PN) { 1971 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1972 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1973 // Make sure the phi value is indeed produced by the tail call. 1974 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1975 TLI->mayBeEmittedAsTailCall(CI)) 1976 TailCalls.push_back(CI); 1977 } 1978 } else { 1979 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1980 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1981 if (!VisitedBBs.insert(*PI).second) 1982 continue; 1983 1984 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1985 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1986 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1987 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1988 if (RI == RE) 1989 continue; 1990 1991 CallInst *CI = dyn_cast<CallInst>(&*RI); 1992 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1993 TailCalls.push_back(CI); 1994 } 1995 } 1996 1997 bool Changed = false; 1998 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1999 CallInst *CI = TailCalls[i]; 2000 CallSite CS(CI); 2001 2002 // Conservatively require the attributes of the call to match those of the 2003 // return. Ignore noalias because it doesn't affect the call sequence. 2004 AttributeSet CalleeAttrs = CS.getAttributes(); 2005 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 2006 removeAttribute(Attribute::NoAlias) != 2007 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 2008 removeAttribute(Attribute::NoAlias)) 2009 continue; 2010 2011 // Make sure the call instruction is followed by an unconditional branch to 2012 // the return block. 2013 BasicBlock *CallBB = CI->getParent(); 2014 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2015 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2016 continue; 2017 2018 // Duplicate the return into CallBB. 2019 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 2020 ModifiedDT = Changed = true; 2021 ++NumRetsDup; 2022 } 2023 2024 // If we eliminated all predecessors of the block, delete the block now. 2025 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2026 BB->eraseFromParent(); 2027 2028 return Changed; 2029 } 2030 2031 //===----------------------------------------------------------------------===// 2032 // Memory Optimization 2033 //===----------------------------------------------------------------------===// 2034 2035 namespace { 2036 2037 /// This is an extended version of TargetLowering::AddrMode 2038 /// which holds actual Value*'s for register values. 2039 struct ExtAddrMode : public TargetLowering::AddrMode { 2040 Value *BaseReg; 2041 Value *ScaledReg; 2042 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 2043 void print(raw_ostream &OS) const; 2044 void dump() const; 2045 2046 bool operator==(const ExtAddrMode& O) const { 2047 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 2048 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 2049 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 2050 } 2051 }; 2052 2053 #ifndef NDEBUG 2054 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2055 AM.print(OS); 2056 return OS; 2057 } 2058 #endif 2059 2060 void ExtAddrMode::print(raw_ostream &OS) const { 2061 bool NeedPlus = false; 2062 OS << "["; 2063 if (BaseGV) { 2064 OS << (NeedPlus ? " + " : "") 2065 << "GV:"; 2066 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2067 NeedPlus = true; 2068 } 2069 2070 if (BaseOffs) { 2071 OS << (NeedPlus ? " + " : "") 2072 << BaseOffs; 2073 NeedPlus = true; 2074 } 2075 2076 if (BaseReg) { 2077 OS << (NeedPlus ? " + " : "") 2078 << "Base:"; 2079 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2080 NeedPlus = true; 2081 } 2082 if (Scale) { 2083 OS << (NeedPlus ? " + " : "") 2084 << Scale << "*"; 2085 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2086 } 2087 2088 OS << ']'; 2089 } 2090 2091 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2092 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2093 print(dbgs()); 2094 dbgs() << '\n'; 2095 } 2096 #endif 2097 2098 /// \brief This class provides transaction based operation on the IR. 2099 /// Every change made through this class is recorded in the internal state and 2100 /// can be undone (rollback) until commit is called. 2101 class TypePromotionTransaction { 2102 2103 /// \brief This represents the common interface of the individual transaction. 2104 /// Each class implements the logic for doing one specific modification on 2105 /// the IR via the TypePromotionTransaction. 2106 class TypePromotionAction { 2107 protected: 2108 /// The Instruction modified. 2109 Instruction *Inst; 2110 2111 public: 2112 /// \brief Constructor of the action. 2113 /// The constructor performs the related action on the IR. 2114 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2115 2116 virtual ~TypePromotionAction() {} 2117 2118 /// \brief Undo the modification done by this action. 2119 /// When this method is called, the IR must be in the same state as it was 2120 /// before this action was applied. 2121 /// \pre Undoing the action works if and only if the IR is in the exact same 2122 /// state as it was directly after this action was applied. 2123 virtual void undo() = 0; 2124 2125 /// \brief Advocate every change made by this action. 2126 /// When the results on the IR of the action are to be kept, it is important 2127 /// to call this function, otherwise hidden information may be kept forever. 2128 virtual void commit() { 2129 // Nothing to be done, this action is not doing anything. 2130 } 2131 }; 2132 2133 /// \brief Utility to remember the position of an instruction. 2134 class InsertionHandler { 2135 /// Position of an instruction. 2136 /// Either an instruction: 2137 /// - Is the first in a basic block: BB is used. 2138 /// - Has a previous instructon: PrevInst is used. 2139 union { 2140 Instruction *PrevInst; 2141 BasicBlock *BB; 2142 } Point; 2143 /// Remember whether or not the instruction had a previous instruction. 2144 bool HasPrevInstruction; 2145 2146 public: 2147 /// \brief Record the position of \p Inst. 2148 InsertionHandler(Instruction *Inst) { 2149 BasicBlock::iterator It = Inst->getIterator(); 2150 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2151 if (HasPrevInstruction) 2152 Point.PrevInst = &*--It; 2153 else 2154 Point.BB = Inst->getParent(); 2155 } 2156 2157 /// \brief Insert \p Inst at the recorded position. 2158 void insert(Instruction *Inst) { 2159 if (HasPrevInstruction) { 2160 if (Inst->getParent()) 2161 Inst->removeFromParent(); 2162 Inst->insertAfter(Point.PrevInst); 2163 } else { 2164 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2165 if (Inst->getParent()) 2166 Inst->moveBefore(Position); 2167 else 2168 Inst->insertBefore(Position); 2169 } 2170 } 2171 }; 2172 2173 /// \brief Move an instruction before another. 2174 class InstructionMoveBefore : public TypePromotionAction { 2175 /// Original position of the instruction. 2176 InsertionHandler Position; 2177 2178 public: 2179 /// \brief Move \p Inst before \p Before. 2180 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2181 : TypePromotionAction(Inst), Position(Inst) { 2182 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2183 Inst->moveBefore(Before); 2184 } 2185 2186 /// \brief Move the instruction back to its original position. 2187 void undo() override { 2188 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2189 Position.insert(Inst); 2190 } 2191 }; 2192 2193 /// \brief Set the operand of an instruction with a new value. 2194 class OperandSetter : public TypePromotionAction { 2195 /// Original operand of the instruction. 2196 Value *Origin; 2197 /// Index of the modified instruction. 2198 unsigned Idx; 2199 2200 public: 2201 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2202 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2203 : TypePromotionAction(Inst), Idx(Idx) { 2204 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2205 << "for:" << *Inst << "\n" 2206 << "with:" << *NewVal << "\n"); 2207 Origin = Inst->getOperand(Idx); 2208 Inst->setOperand(Idx, NewVal); 2209 } 2210 2211 /// \brief Restore the original value of the instruction. 2212 void undo() override { 2213 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2214 << "for: " << *Inst << "\n" 2215 << "with: " << *Origin << "\n"); 2216 Inst->setOperand(Idx, Origin); 2217 } 2218 }; 2219 2220 /// \brief Hide the operands of an instruction. 2221 /// Do as if this instruction was not using any of its operands. 2222 class OperandsHider : public TypePromotionAction { 2223 /// The list of original operands. 2224 SmallVector<Value *, 4> OriginalValues; 2225 2226 public: 2227 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2228 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2229 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2230 unsigned NumOpnds = Inst->getNumOperands(); 2231 OriginalValues.reserve(NumOpnds); 2232 for (unsigned It = 0; It < NumOpnds; ++It) { 2233 // Save the current operand. 2234 Value *Val = Inst->getOperand(It); 2235 OriginalValues.push_back(Val); 2236 // Set a dummy one. 2237 // We could use OperandSetter here, but that would imply an overhead 2238 // that we are not willing to pay. 2239 Inst->setOperand(It, UndefValue::get(Val->getType())); 2240 } 2241 } 2242 2243 /// \brief Restore the original list of uses. 2244 void undo() override { 2245 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2246 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2247 Inst->setOperand(It, OriginalValues[It]); 2248 } 2249 }; 2250 2251 /// \brief Build a truncate instruction. 2252 class TruncBuilder : public TypePromotionAction { 2253 Value *Val; 2254 public: 2255 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2256 /// result. 2257 /// trunc Opnd to Ty. 2258 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2259 IRBuilder<> Builder(Opnd); 2260 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2261 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2262 } 2263 2264 /// \brief Get the built value. 2265 Value *getBuiltValue() { return Val; } 2266 2267 /// \brief Remove the built instruction. 2268 void undo() override { 2269 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2270 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2271 IVal->eraseFromParent(); 2272 } 2273 }; 2274 2275 /// \brief Build a sign extension instruction. 2276 class SExtBuilder : public TypePromotionAction { 2277 Value *Val; 2278 public: 2279 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 2280 /// result. 2281 /// sext Opnd to Ty. 2282 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2283 : TypePromotionAction(InsertPt) { 2284 IRBuilder<> Builder(InsertPt); 2285 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2286 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2287 } 2288 2289 /// \brief Get the built value. 2290 Value *getBuiltValue() { return Val; } 2291 2292 /// \brief Remove the built instruction. 2293 void undo() override { 2294 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2295 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2296 IVal->eraseFromParent(); 2297 } 2298 }; 2299 2300 /// \brief Build a zero extension instruction. 2301 class ZExtBuilder : public TypePromotionAction { 2302 Value *Val; 2303 public: 2304 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 2305 /// result. 2306 /// zext Opnd to Ty. 2307 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2308 : TypePromotionAction(InsertPt) { 2309 IRBuilder<> Builder(InsertPt); 2310 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2311 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2312 } 2313 2314 /// \brief Get the built value. 2315 Value *getBuiltValue() { return Val; } 2316 2317 /// \brief Remove the built instruction. 2318 void undo() override { 2319 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2320 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2321 IVal->eraseFromParent(); 2322 } 2323 }; 2324 2325 /// \brief Mutate an instruction to another type. 2326 class TypeMutator : public TypePromotionAction { 2327 /// Record the original type. 2328 Type *OrigTy; 2329 2330 public: 2331 /// \brief Mutate the type of \p Inst into \p NewTy. 2332 TypeMutator(Instruction *Inst, Type *NewTy) 2333 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2334 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2335 << "\n"); 2336 Inst->mutateType(NewTy); 2337 } 2338 2339 /// \brief Mutate the instruction back to its original type. 2340 void undo() override { 2341 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2342 << "\n"); 2343 Inst->mutateType(OrigTy); 2344 } 2345 }; 2346 2347 /// \brief Replace the uses of an instruction by another instruction. 2348 class UsesReplacer : public TypePromotionAction { 2349 /// Helper structure to keep track of the replaced uses. 2350 struct InstructionAndIdx { 2351 /// The instruction using the instruction. 2352 Instruction *Inst; 2353 /// The index where this instruction is used for Inst. 2354 unsigned Idx; 2355 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2356 : Inst(Inst), Idx(Idx) {} 2357 }; 2358 2359 /// Keep track of the original uses (pair Instruction, Index). 2360 SmallVector<InstructionAndIdx, 4> OriginalUses; 2361 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 2362 2363 public: 2364 /// \brief Replace all the use of \p Inst by \p New. 2365 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2366 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2367 << "\n"); 2368 // Record the original uses. 2369 for (Use &U : Inst->uses()) { 2370 Instruction *UserI = cast<Instruction>(U.getUser()); 2371 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2372 } 2373 // Now, we can replace the uses. 2374 Inst->replaceAllUsesWith(New); 2375 } 2376 2377 /// \brief Reassign the original uses of Inst to Inst. 2378 void undo() override { 2379 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2380 for (use_iterator UseIt = OriginalUses.begin(), 2381 EndIt = OriginalUses.end(); 2382 UseIt != EndIt; ++UseIt) { 2383 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2384 } 2385 } 2386 }; 2387 2388 /// \brief Remove an instruction from the IR. 2389 class InstructionRemover : public TypePromotionAction { 2390 /// Original position of the instruction. 2391 InsertionHandler Inserter; 2392 /// Helper structure to hide all the link to the instruction. In other 2393 /// words, this helps to do as if the instruction was removed. 2394 OperandsHider Hider; 2395 /// Keep track of the uses replaced, if any. 2396 UsesReplacer *Replacer; 2397 2398 public: 2399 /// \brief Remove all reference of \p Inst and optinally replace all its 2400 /// uses with New. 2401 /// \pre If !Inst->use_empty(), then New != nullptr 2402 InstructionRemover(Instruction *Inst, Value *New = nullptr) 2403 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2404 Replacer(nullptr) { 2405 if (New) 2406 Replacer = new UsesReplacer(Inst, New); 2407 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2408 Inst->removeFromParent(); 2409 } 2410 2411 ~InstructionRemover() override { delete Replacer; } 2412 2413 /// \brief Really remove the instruction. 2414 void commit() override { delete Inst; } 2415 2416 /// \brief Resurrect the instruction and reassign it to the proper uses if 2417 /// new value was provided when build this action. 2418 void undo() override { 2419 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2420 Inserter.insert(Inst); 2421 if (Replacer) 2422 Replacer->undo(); 2423 Hider.undo(); 2424 } 2425 }; 2426 2427 public: 2428 /// Restoration point. 2429 /// The restoration point is a pointer to an action instead of an iterator 2430 /// because the iterator may be invalidated but not the pointer. 2431 typedef const TypePromotionAction *ConstRestorationPt; 2432 /// Advocate every changes made in that transaction. 2433 void commit(); 2434 /// Undo all the changes made after the given point. 2435 void rollback(ConstRestorationPt Point); 2436 /// Get the current restoration point. 2437 ConstRestorationPt getRestorationPoint() const; 2438 2439 /// \name API for IR modification with state keeping to support rollback. 2440 /// @{ 2441 /// Same as Instruction::setOperand. 2442 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2443 /// Same as Instruction::eraseFromParent. 2444 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2445 /// Same as Value::replaceAllUsesWith. 2446 void replaceAllUsesWith(Instruction *Inst, Value *New); 2447 /// Same as Value::mutateType. 2448 void mutateType(Instruction *Inst, Type *NewTy); 2449 /// Same as IRBuilder::createTrunc. 2450 Value *createTrunc(Instruction *Opnd, Type *Ty); 2451 /// Same as IRBuilder::createSExt. 2452 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2453 /// Same as IRBuilder::createZExt. 2454 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2455 /// Same as Instruction::moveBefore. 2456 void moveBefore(Instruction *Inst, Instruction *Before); 2457 /// @} 2458 2459 private: 2460 /// The ordered list of actions made so far. 2461 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2462 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2463 }; 2464 2465 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2466 Value *NewVal) { 2467 Actions.push_back( 2468 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2469 } 2470 2471 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2472 Value *NewVal) { 2473 Actions.push_back( 2474 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 2475 } 2476 2477 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2478 Value *New) { 2479 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2480 } 2481 2482 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2483 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2484 } 2485 2486 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2487 Type *Ty) { 2488 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2489 Value *Val = Ptr->getBuiltValue(); 2490 Actions.push_back(std::move(Ptr)); 2491 return Val; 2492 } 2493 2494 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2495 Value *Opnd, Type *Ty) { 2496 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2497 Value *Val = Ptr->getBuiltValue(); 2498 Actions.push_back(std::move(Ptr)); 2499 return Val; 2500 } 2501 2502 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2503 Value *Opnd, Type *Ty) { 2504 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2505 Value *Val = Ptr->getBuiltValue(); 2506 Actions.push_back(std::move(Ptr)); 2507 return Val; 2508 } 2509 2510 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2511 Instruction *Before) { 2512 Actions.push_back( 2513 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2514 } 2515 2516 TypePromotionTransaction::ConstRestorationPt 2517 TypePromotionTransaction::getRestorationPoint() const { 2518 return !Actions.empty() ? Actions.back().get() : nullptr; 2519 } 2520 2521 void TypePromotionTransaction::commit() { 2522 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2523 ++It) 2524 (*It)->commit(); 2525 Actions.clear(); 2526 } 2527 2528 void TypePromotionTransaction::rollback( 2529 TypePromotionTransaction::ConstRestorationPt Point) { 2530 while (!Actions.empty() && Point != Actions.back().get()) { 2531 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2532 Curr->undo(); 2533 } 2534 } 2535 2536 /// \brief A helper class for matching addressing modes. 2537 /// 2538 /// This encapsulates the logic for matching the target-legal addressing modes. 2539 class AddressingModeMatcher { 2540 SmallVectorImpl<Instruction*> &AddrModeInsts; 2541 const TargetMachine &TM; 2542 const TargetLowering &TLI; 2543 const DataLayout &DL; 2544 2545 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2546 /// the memory instruction that we're computing this address for. 2547 Type *AccessTy; 2548 unsigned AddrSpace; 2549 Instruction *MemoryInst; 2550 2551 /// This is the addressing mode that we're building up. This is 2552 /// part of the return value of this addressing mode matching stuff. 2553 ExtAddrMode &AddrMode; 2554 2555 /// The instructions inserted by other CodeGenPrepare optimizations. 2556 const SetOfInstrs &InsertedInsts; 2557 /// A map from the instructions to their type before promotion. 2558 InstrToOrigTy &PromotedInsts; 2559 /// The ongoing transaction where every action should be registered. 2560 TypePromotionTransaction &TPT; 2561 2562 /// This is set to true when we should not do profitability checks. 2563 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2564 bool IgnoreProfitability; 2565 2566 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 2567 const TargetMachine &TM, Type *AT, unsigned AS, 2568 Instruction *MI, ExtAddrMode &AM, 2569 const SetOfInstrs &InsertedInsts, 2570 InstrToOrigTy &PromotedInsts, 2571 TypePromotionTransaction &TPT) 2572 : AddrModeInsts(AMI), TM(TM), 2573 TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent()) 2574 ->getTargetLowering()), 2575 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2576 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2577 PromotedInsts(PromotedInsts), TPT(TPT) { 2578 IgnoreProfitability = false; 2579 } 2580 public: 2581 2582 /// Find the maximal addressing mode that a load/store of V can fold, 2583 /// give an access type of AccessTy. This returns a list of involved 2584 /// instructions in AddrModeInsts. 2585 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2586 /// optimizations. 2587 /// \p PromotedInsts maps the instructions to their type before promotion. 2588 /// \p The ongoing transaction where every action should be registered. 2589 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 2590 Instruction *MemoryInst, 2591 SmallVectorImpl<Instruction*> &AddrModeInsts, 2592 const TargetMachine &TM, 2593 const SetOfInstrs &InsertedInsts, 2594 InstrToOrigTy &PromotedInsts, 2595 TypePromotionTransaction &TPT) { 2596 ExtAddrMode Result; 2597 2598 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS, 2599 MemoryInst, Result, InsertedInsts, 2600 PromotedInsts, TPT).matchAddr(V, 0); 2601 (void)Success; assert(Success && "Couldn't select *anything*?"); 2602 return Result; 2603 } 2604 private: 2605 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2606 bool matchAddr(Value *V, unsigned Depth); 2607 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 2608 bool *MovedAway = nullptr); 2609 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2610 ExtAddrMode &AMBefore, 2611 ExtAddrMode &AMAfter); 2612 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2613 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2614 Value *PromotedOperand) const; 2615 }; 2616 2617 /// Try adding ScaleReg*Scale to the current addressing mode. 2618 /// Return true and update AddrMode if this addr mode is legal for the target, 2619 /// false if not. 2620 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 2621 unsigned Depth) { 2622 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 2623 // mode. Just process that directly. 2624 if (Scale == 1) 2625 return matchAddr(ScaleReg, Depth); 2626 2627 // If the scale is 0, it takes nothing to add this. 2628 if (Scale == 0) 2629 return true; 2630 2631 // If we already have a scale of this value, we can add to it, otherwise, we 2632 // need an available scale field. 2633 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 2634 return false; 2635 2636 ExtAddrMode TestAddrMode = AddrMode; 2637 2638 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 2639 // [A+B + A*7] -> [B+A*8]. 2640 TestAddrMode.Scale += Scale; 2641 TestAddrMode.ScaledReg = ScaleReg; 2642 2643 // If the new address isn't legal, bail out. 2644 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 2645 return false; 2646 2647 // It was legal, so commit it. 2648 AddrMode = TestAddrMode; 2649 2650 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 2651 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 2652 // X*Scale + C*Scale to addr mode. 2653 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 2654 if (isa<Instruction>(ScaleReg) && // not a constant expr. 2655 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 2656 TestAddrMode.ScaledReg = AddLHS; 2657 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 2658 2659 // If this addressing mode is legal, commit it and remember that we folded 2660 // this instruction. 2661 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 2662 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 2663 AddrMode = TestAddrMode; 2664 return true; 2665 } 2666 } 2667 2668 // Otherwise, not (x+c)*scale, just return what we have. 2669 return true; 2670 } 2671 2672 /// This is a little filter, which returns true if an addressing computation 2673 /// involving I might be folded into a load/store accessing it. 2674 /// This doesn't need to be perfect, but needs to accept at least 2675 /// the set of instructions that MatchOperationAddr can. 2676 static bool MightBeFoldableInst(Instruction *I) { 2677 switch (I->getOpcode()) { 2678 case Instruction::BitCast: 2679 case Instruction::AddrSpaceCast: 2680 // Don't touch identity bitcasts. 2681 if (I->getType() == I->getOperand(0)->getType()) 2682 return false; 2683 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 2684 case Instruction::PtrToInt: 2685 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2686 return true; 2687 case Instruction::IntToPtr: 2688 // We know the input is intptr_t, so this is foldable. 2689 return true; 2690 case Instruction::Add: 2691 return true; 2692 case Instruction::Mul: 2693 case Instruction::Shl: 2694 // Can only handle X*C and X << C. 2695 return isa<ConstantInt>(I->getOperand(1)); 2696 case Instruction::GetElementPtr: 2697 return true; 2698 default: 2699 return false; 2700 } 2701 } 2702 2703 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 2704 /// \note \p Val is assumed to be the product of some type promotion. 2705 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 2706 /// to be legal, as the non-promoted value would have had the same state. 2707 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 2708 const DataLayout &DL, Value *Val) { 2709 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 2710 if (!PromotedInst) 2711 return false; 2712 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 2713 // If the ISDOpcode is undefined, it was undefined before the promotion. 2714 if (!ISDOpcode) 2715 return true; 2716 // Otherwise, check if the promoted instruction is legal or not. 2717 return TLI.isOperationLegalOrCustom( 2718 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 2719 } 2720 2721 /// \brief Hepler class to perform type promotion. 2722 class TypePromotionHelper { 2723 /// \brief Utility function to check whether or not a sign or zero extension 2724 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 2725 /// either using the operands of \p Inst or promoting \p Inst. 2726 /// The type of the extension is defined by \p IsSExt. 2727 /// In other words, check if: 2728 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 2729 /// #1 Promotion applies: 2730 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 2731 /// #2 Operand reuses: 2732 /// ext opnd1 to ConsideredExtType. 2733 /// \p PromotedInsts maps the instructions to their type before promotion. 2734 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 2735 const InstrToOrigTy &PromotedInsts, bool IsSExt); 2736 2737 /// \brief Utility function to determine if \p OpIdx should be promoted when 2738 /// promoting \p Inst. 2739 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 2740 return !(isa<SelectInst>(Inst) && OpIdx == 0); 2741 } 2742 2743 /// \brief Utility function to promote the operand of \p Ext when this 2744 /// operand is a promotable trunc or sext or zext. 2745 /// \p PromotedInsts maps the instructions to their type before promotion. 2746 /// \p CreatedInstsCost[out] contains the cost of all instructions 2747 /// created to promote the operand of Ext. 2748 /// Newly added extensions are inserted in \p Exts. 2749 /// Newly added truncates are inserted in \p Truncs. 2750 /// Should never be called directly. 2751 /// \return The promoted value which is used instead of Ext. 2752 static Value *promoteOperandForTruncAndAnyExt( 2753 Instruction *Ext, TypePromotionTransaction &TPT, 2754 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2755 SmallVectorImpl<Instruction *> *Exts, 2756 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 2757 2758 /// \brief Utility function to promote the operand of \p Ext when this 2759 /// operand is promotable and is not a supported trunc or sext. 2760 /// \p PromotedInsts maps the instructions to their type before promotion. 2761 /// \p CreatedInstsCost[out] contains the cost of all the instructions 2762 /// created to promote the operand of Ext. 2763 /// Newly added extensions are inserted in \p Exts. 2764 /// Newly added truncates are inserted in \p Truncs. 2765 /// Should never be called directly. 2766 /// \return The promoted value which is used instead of Ext. 2767 static Value *promoteOperandForOther(Instruction *Ext, 2768 TypePromotionTransaction &TPT, 2769 InstrToOrigTy &PromotedInsts, 2770 unsigned &CreatedInstsCost, 2771 SmallVectorImpl<Instruction *> *Exts, 2772 SmallVectorImpl<Instruction *> *Truncs, 2773 const TargetLowering &TLI, bool IsSExt); 2774 2775 /// \see promoteOperandForOther. 2776 static Value *signExtendOperandForOther( 2777 Instruction *Ext, TypePromotionTransaction &TPT, 2778 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2779 SmallVectorImpl<Instruction *> *Exts, 2780 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2781 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2782 Exts, Truncs, TLI, true); 2783 } 2784 2785 /// \see promoteOperandForOther. 2786 static Value *zeroExtendOperandForOther( 2787 Instruction *Ext, TypePromotionTransaction &TPT, 2788 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2789 SmallVectorImpl<Instruction *> *Exts, 2790 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2791 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2792 Exts, Truncs, TLI, false); 2793 } 2794 2795 public: 2796 /// Type for the utility function that promotes the operand of Ext. 2797 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 2798 InstrToOrigTy &PromotedInsts, 2799 unsigned &CreatedInstsCost, 2800 SmallVectorImpl<Instruction *> *Exts, 2801 SmallVectorImpl<Instruction *> *Truncs, 2802 const TargetLowering &TLI); 2803 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 2804 /// action to promote the operand of \p Ext instead of using Ext. 2805 /// \return NULL if no promotable action is possible with the current 2806 /// sign extension. 2807 /// \p InsertedInsts keeps track of all the instructions inserted by the 2808 /// other CodeGenPrepare optimizations. This information is important 2809 /// because we do not want to promote these instructions as CodeGenPrepare 2810 /// will reinsert them later. Thus creating an infinite loop: create/remove. 2811 /// \p PromotedInsts maps the instructions to their type before promotion. 2812 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 2813 const TargetLowering &TLI, 2814 const InstrToOrigTy &PromotedInsts); 2815 }; 2816 2817 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 2818 Type *ConsideredExtType, 2819 const InstrToOrigTy &PromotedInsts, 2820 bool IsSExt) { 2821 // The promotion helper does not know how to deal with vector types yet. 2822 // To be able to fix that, we would need to fix the places where we 2823 // statically extend, e.g., constants and such. 2824 if (Inst->getType()->isVectorTy()) 2825 return false; 2826 2827 // We can always get through zext. 2828 if (isa<ZExtInst>(Inst)) 2829 return true; 2830 2831 // sext(sext) is ok too. 2832 if (IsSExt && isa<SExtInst>(Inst)) 2833 return true; 2834 2835 // We can get through binary operator, if it is legal. In other words, the 2836 // binary operator must have a nuw or nsw flag. 2837 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 2838 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 2839 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 2840 (IsSExt && BinOp->hasNoSignedWrap()))) 2841 return true; 2842 2843 // Check if we can do the following simplification. 2844 // ext(trunc(opnd)) --> ext(opnd) 2845 if (!isa<TruncInst>(Inst)) 2846 return false; 2847 2848 Value *OpndVal = Inst->getOperand(0); 2849 // Check if we can use this operand in the extension. 2850 // If the type is larger than the result type of the extension, we cannot. 2851 if (!OpndVal->getType()->isIntegerTy() || 2852 OpndVal->getType()->getIntegerBitWidth() > 2853 ConsideredExtType->getIntegerBitWidth()) 2854 return false; 2855 2856 // If the operand of the truncate is not an instruction, we will not have 2857 // any information on the dropped bits. 2858 // (Actually we could for constant but it is not worth the extra logic). 2859 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 2860 if (!Opnd) 2861 return false; 2862 2863 // Check if the source of the type is narrow enough. 2864 // I.e., check that trunc just drops extended bits of the same kind of 2865 // the extension. 2866 // #1 get the type of the operand and check the kind of the extended bits. 2867 const Type *OpndType; 2868 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 2869 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 2870 OpndType = It->second.getPointer(); 2871 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 2872 OpndType = Opnd->getOperand(0)->getType(); 2873 else 2874 return false; 2875 2876 // #2 check that the truncate just drops extended bits. 2877 return Inst->getType()->getIntegerBitWidth() >= 2878 OpndType->getIntegerBitWidth(); 2879 } 2880 2881 TypePromotionHelper::Action TypePromotionHelper::getAction( 2882 Instruction *Ext, const SetOfInstrs &InsertedInsts, 2883 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 2884 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 2885 "Unexpected instruction type"); 2886 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 2887 Type *ExtTy = Ext->getType(); 2888 bool IsSExt = isa<SExtInst>(Ext); 2889 // If the operand of the extension is not an instruction, we cannot 2890 // get through. 2891 // If it, check we can get through. 2892 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 2893 return nullptr; 2894 2895 // Do not promote if the operand has been added by codegenprepare. 2896 // Otherwise, it means we are undoing an optimization that is likely to be 2897 // redone, thus causing potential infinite loop. 2898 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 2899 return nullptr; 2900 2901 // SExt or Trunc instructions. 2902 // Return the related handler. 2903 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 2904 isa<ZExtInst>(ExtOpnd)) 2905 return promoteOperandForTruncAndAnyExt; 2906 2907 // Regular instruction. 2908 // Abort early if we will have to insert non-free instructions. 2909 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 2910 return nullptr; 2911 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 2912 } 2913 2914 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 2915 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 2916 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2917 SmallVectorImpl<Instruction *> *Exts, 2918 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2919 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2920 // get through it and this method should not be called. 2921 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2922 Value *ExtVal = SExt; 2923 bool HasMergedNonFreeExt = false; 2924 if (isa<ZExtInst>(SExtOpnd)) { 2925 // Replace s|zext(zext(opnd)) 2926 // => zext(opnd). 2927 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 2928 Value *ZExt = 2929 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 2930 TPT.replaceAllUsesWith(SExt, ZExt); 2931 TPT.eraseInstruction(SExt); 2932 ExtVal = ZExt; 2933 } else { 2934 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 2935 // => z|sext(opnd). 2936 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 2937 } 2938 CreatedInstsCost = 0; 2939 2940 // Remove dead code. 2941 if (SExtOpnd->use_empty()) 2942 TPT.eraseInstruction(SExtOpnd); 2943 2944 // Check if the extension is still needed. 2945 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 2946 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 2947 if (ExtInst) { 2948 if (Exts) 2949 Exts->push_back(ExtInst); 2950 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 2951 } 2952 return ExtVal; 2953 } 2954 2955 // At this point we have: ext ty opnd to ty. 2956 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 2957 Value *NextVal = ExtInst->getOperand(0); 2958 TPT.eraseInstruction(ExtInst, NextVal); 2959 return NextVal; 2960 } 2961 2962 Value *TypePromotionHelper::promoteOperandForOther( 2963 Instruction *Ext, TypePromotionTransaction &TPT, 2964 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2965 SmallVectorImpl<Instruction *> *Exts, 2966 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 2967 bool IsSExt) { 2968 // By construction, the operand of Ext is an instruction. Otherwise we cannot 2969 // get through it and this method should not be called. 2970 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 2971 CreatedInstsCost = 0; 2972 if (!ExtOpnd->hasOneUse()) { 2973 // ExtOpnd will be promoted. 2974 // All its uses, but Ext, will need to use a truncated value of the 2975 // promoted version. 2976 // Create the truncate now. 2977 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 2978 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 2979 ITrunc->removeFromParent(); 2980 // Insert it just after the definition. 2981 ITrunc->insertAfter(ExtOpnd); 2982 if (Truncs) 2983 Truncs->push_back(ITrunc); 2984 } 2985 2986 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 2987 // Restore the operand of Ext (which has been replaced by the previous call 2988 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 2989 TPT.setOperand(Ext, 0, ExtOpnd); 2990 } 2991 2992 // Get through the Instruction: 2993 // 1. Update its type. 2994 // 2. Replace the uses of Ext by Inst. 2995 // 3. Extend each operand that needs to be extended. 2996 2997 // Remember the original type of the instruction before promotion. 2998 // This is useful to know that the high bits are sign extended bits. 2999 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 3000 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 3001 // Step #1. 3002 TPT.mutateType(ExtOpnd, Ext->getType()); 3003 // Step #2. 3004 TPT.replaceAllUsesWith(Ext, ExtOpnd); 3005 // Step #3. 3006 Instruction *ExtForOpnd = Ext; 3007 3008 DEBUG(dbgs() << "Propagate Ext to operands\n"); 3009 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 3010 ++OpIdx) { 3011 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3012 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3013 !shouldExtOperand(ExtOpnd, OpIdx)) { 3014 DEBUG(dbgs() << "No need to propagate\n"); 3015 continue; 3016 } 3017 // Check if we can statically extend the operand. 3018 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3019 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3020 DEBUG(dbgs() << "Statically extend\n"); 3021 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3022 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3023 : Cst->getValue().zext(BitWidth); 3024 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3025 continue; 3026 } 3027 // UndefValue are typed, so we have to statically sign extend them. 3028 if (isa<UndefValue>(Opnd)) { 3029 DEBUG(dbgs() << "Statically extend\n"); 3030 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3031 continue; 3032 } 3033 3034 // Otherwise we have to explicity sign extend the operand. 3035 // Check if Ext was reused to extend an operand. 3036 if (!ExtForOpnd) { 3037 // If yes, create a new one. 3038 DEBUG(dbgs() << "More operands to ext\n"); 3039 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3040 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3041 if (!isa<Instruction>(ValForExtOpnd)) { 3042 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3043 continue; 3044 } 3045 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3046 } 3047 if (Exts) 3048 Exts->push_back(ExtForOpnd); 3049 TPT.setOperand(ExtForOpnd, 0, Opnd); 3050 3051 // Move the sign extension before the insertion point. 3052 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3053 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3054 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3055 // If more sext are required, new instructions will have to be created. 3056 ExtForOpnd = nullptr; 3057 } 3058 if (ExtForOpnd == Ext) { 3059 DEBUG(dbgs() << "Extension is useless now\n"); 3060 TPT.eraseInstruction(Ext); 3061 } 3062 return ExtOpnd; 3063 } 3064 3065 /// Check whether or not promoting an instruction to a wider type is profitable. 3066 /// \p NewCost gives the cost of extension instructions created by the 3067 /// promotion. 3068 /// \p OldCost gives the cost of extension instructions before the promotion 3069 /// plus the number of instructions that have been 3070 /// matched in the addressing mode the promotion. 3071 /// \p PromotedOperand is the value that has been promoted. 3072 /// \return True if the promotion is profitable, false otherwise. 3073 bool AddressingModeMatcher::isPromotionProfitable( 3074 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3075 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3076 // The cost of the new extensions is greater than the cost of the 3077 // old extension plus what we folded. 3078 // This is not profitable. 3079 if (NewCost > OldCost) 3080 return false; 3081 if (NewCost < OldCost) 3082 return true; 3083 // The promotion is neutral but it may help folding the sign extension in 3084 // loads for instance. 3085 // Check that we did not create an illegal instruction. 3086 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3087 } 3088 3089 /// Given an instruction or constant expr, see if we can fold the operation 3090 /// into the addressing mode. If so, update the addressing mode and return 3091 /// true, otherwise return false without modifying AddrMode. 3092 /// If \p MovedAway is not NULL, it contains the information of whether or 3093 /// not AddrInst has to be folded into the addressing mode on success. 3094 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3095 /// because it has been moved away. 3096 /// Thus AddrInst must not be added in the matched instructions. 3097 /// This state can happen when AddrInst is a sext, since it may be moved away. 3098 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3099 /// not be referenced anymore. 3100 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3101 unsigned Depth, 3102 bool *MovedAway) { 3103 // Avoid exponential behavior on extremely deep expression trees. 3104 if (Depth >= 5) return false; 3105 3106 // By default, all matched instructions stay in place. 3107 if (MovedAway) 3108 *MovedAway = false; 3109 3110 switch (Opcode) { 3111 case Instruction::PtrToInt: 3112 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3113 return matchAddr(AddrInst->getOperand(0), Depth); 3114 case Instruction::IntToPtr: { 3115 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3116 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3117 // This inttoptr is a no-op if the integer type is pointer sized. 3118 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3119 return matchAddr(AddrInst->getOperand(0), Depth); 3120 return false; 3121 } 3122 case Instruction::BitCast: 3123 // BitCast is always a noop, and we can handle it as long as it is 3124 // int->int or pointer->pointer (we don't want int<->fp or something). 3125 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3126 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3127 // Don't touch identity bitcasts. These were probably put here by LSR, 3128 // and we don't want to mess around with them. Assume it knows what it 3129 // is doing. 3130 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3131 return matchAddr(AddrInst->getOperand(0), Depth); 3132 return false; 3133 case Instruction::AddrSpaceCast: { 3134 unsigned SrcAS 3135 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3136 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3137 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3138 return matchAddr(AddrInst->getOperand(0), Depth); 3139 return false; 3140 } 3141 case Instruction::Add: { 3142 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3143 ExtAddrMode BackupAddrMode = AddrMode; 3144 unsigned OldSize = AddrModeInsts.size(); 3145 // Start a transaction at this point. 3146 // The LHS may match but not the RHS. 3147 // Therefore, we need a higher level restoration point to undo partially 3148 // matched operation. 3149 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3150 TPT.getRestorationPoint(); 3151 3152 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 3153 matchAddr(AddrInst->getOperand(0), Depth+1)) 3154 return true; 3155 3156 // Restore the old addr mode info. 3157 AddrMode = BackupAddrMode; 3158 AddrModeInsts.resize(OldSize); 3159 TPT.rollback(LastKnownGood); 3160 3161 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 3162 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 3163 matchAddr(AddrInst->getOperand(1), Depth+1)) 3164 return true; 3165 3166 // Otherwise we definitely can't merge the ADD in. 3167 AddrMode = BackupAddrMode; 3168 AddrModeInsts.resize(OldSize); 3169 TPT.rollback(LastKnownGood); 3170 break; 3171 } 3172 //case Instruction::Or: 3173 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 3174 //break; 3175 case Instruction::Mul: 3176 case Instruction::Shl: { 3177 // Can only handle X*C and X << C. 3178 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 3179 if (!RHS) 3180 return false; 3181 int64_t Scale = RHS->getSExtValue(); 3182 if (Opcode == Instruction::Shl) 3183 Scale = 1LL << Scale; 3184 3185 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 3186 } 3187 case Instruction::GetElementPtr: { 3188 // Scan the GEP. We check it if it contains constant offsets and at most 3189 // one variable offset. 3190 int VariableOperand = -1; 3191 unsigned VariableScale = 0; 3192 3193 int64_t ConstantOffset = 0; 3194 gep_type_iterator GTI = gep_type_begin(AddrInst); 3195 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 3196 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 3197 const StructLayout *SL = DL.getStructLayout(STy); 3198 unsigned Idx = 3199 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 3200 ConstantOffset += SL->getElementOffset(Idx); 3201 } else { 3202 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 3203 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 3204 ConstantOffset += CI->getSExtValue()*TypeSize; 3205 } else if (TypeSize) { // Scales of zero don't do anything. 3206 // We only allow one variable index at the moment. 3207 if (VariableOperand != -1) 3208 return false; 3209 3210 // Remember the variable index. 3211 VariableOperand = i; 3212 VariableScale = TypeSize; 3213 } 3214 } 3215 } 3216 3217 // A common case is for the GEP to only do a constant offset. In this case, 3218 // just add it to the disp field and check validity. 3219 if (VariableOperand == -1) { 3220 AddrMode.BaseOffs += ConstantOffset; 3221 if (ConstantOffset == 0 || 3222 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 3223 // Check to see if we can fold the base pointer in too. 3224 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 3225 return true; 3226 } 3227 AddrMode.BaseOffs -= ConstantOffset; 3228 return false; 3229 } 3230 3231 // Save the valid addressing mode in case we can't match. 3232 ExtAddrMode BackupAddrMode = AddrMode; 3233 unsigned OldSize = AddrModeInsts.size(); 3234 3235 // See if the scale and offset amount is valid for this target. 3236 AddrMode.BaseOffs += ConstantOffset; 3237 3238 // Match the base operand of the GEP. 3239 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 3240 // If it couldn't be matched, just stuff the value in a register. 3241 if (AddrMode.HasBaseReg) { 3242 AddrMode = BackupAddrMode; 3243 AddrModeInsts.resize(OldSize); 3244 return false; 3245 } 3246 AddrMode.HasBaseReg = true; 3247 AddrMode.BaseReg = AddrInst->getOperand(0); 3248 } 3249 3250 // Match the remaining variable portion of the GEP. 3251 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 3252 Depth)) { 3253 // If it couldn't be matched, try stuffing the base into a register 3254 // instead of matching it, and retrying the match of the scale. 3255 AddrMode = BackupAddrMode; 3256 AddrModeInsts.resize(OldSize); 3257 if (AddrMode.HasBaseReg) 3258 return false; 3259 AddrMode.HasBaseReg = true; 3260 AddrMode.BaseReg = AddrInst->getOperand(0); 3261 AddrMode.BaseOffs += ConstantOffset; 3262 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 3263 VariableScale, Depth)) { 3264 // If even that didn't work, bail. 3265 AddrMode = BackupAddrMode; 3266 AddrModeInsts.resize(OldSize); 3267 return false; 3268 } 3269 } 3270 3271 return true; 3272 } 3273 case Instruction::SExt: 3274 case Instruction::ZExt: { 3275 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 3276 if (!Ext) 3277 return false; 3278 3279 // Try to move this ext out of the way of the addressing mode. 3280 // Ask for a method for doing so. 3281 TypePromotionHelper::Action TPH = 3282 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 3283 if (!TPH) 3284 return false; 3285 3286 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3287 TPT.getRestorationPoint(); 3288 unsigned CreatedInstsCost = 0; 3289 unsigned ExtCost = !TLI.isExtFree(Ext); 3290 Value *PromotedOperand = 3291 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 3292 // SExt has been moved away. 3293 // Thus either it will be rematched later in the recursive calls or it is 3294 // gone. Anyway, we must not fold it into the addressing mode at this point. 3295 // E.g., 3296 // op = add opnd, 1 3297 // idx = ext op 3298 // addr = gep base, idx 3299 // is now: 3300 // promotedOpnd = ext opnd <- no match here 3301 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 3302 // addr = gep base, op <- match 3303 if (MovedAway) 3304 *MovedAway = true; 3305 3306 assert(PromotedOperand && 3307 "TypePromotionHelper should have filtered out those cases"); 3308 3309 ExtAddrMode BackupAddrMode = AddrMode; 3310 unsigned OldSize = AddrModeInsts.size(); 3311 3312 if (!matchAddr(PromotedOperand, Depth) || 3313 // The total of the new cost is equal to the cost of the created 3314 // instructions. 3315 // The total of the old cost is equal to the cost of the extension plus 3316 // what we have saved in the addressing mode. 3317 !isPromotionProfitable(CreatedInstsCost, 3318 ExtCost + (AddrModeInsts.size() - OldSize), 3319 PromotedOperand)) { 3320 AddrMode = BackupAddrMode; 3321 AddrModeInsts.resize(OldSize); 3322 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 3323 TPT.rollback(LastKnownGood); 3324 return false; 3325 } 3326 return true; 3327 } 3328 } 3329 return false; 3330 } 3331 3332 /// If we can, try to add the value of 'Addr' into the current addressing mode. 3333 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 3334 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 3335 /// for the target. 3336 /// 3337 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 3338 // Start a transaction at this point that we will rollback if the matching 3339 // fails. 3340 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3341 TPT.getRestorationPoint(); 3342 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 3343 // Fold in immediates if legal for the target. 3344 AddrMode.BaseOffs += CI->getSExtValue(); 3345 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3346 return true; 3347 AddrMode.BaseOffs -= CI->getSExtValue(); 3348 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 3349 // If this is a global variable, try to fold it into the addressing mode. 3350 if (!AddrMode.BaseGV) { 3351 AddrMode.BaseGV = GV; 3352 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3353 return true; 3354 AddrMode.BaseGV = nullptr; 3355 } 3356 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 3357 ExtAddrMode BackupAddrMode = AddrMode; 3358 unsigned OldSize = AddrModeInsts.size(); 3359 3360 // Check to see if it is possible to fold this operation. 3361 bool MovedAway = false; 3362 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 3363 // This instruction may have been moved away. If so, there is nothing 3364 // to check here. 3365 if (MovedAway) 3366 return true; 3367 // Okay, it's possible to fold this. Check to see if it is actually 3368 // *profitable* to do so. We use a simple cost model to avoid increasing 3369 // register pressure too much. 3370 if (I->hasOneUse() || 3371 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 3372 AddrModeInsts.push_back(I); 3373 return true; 3374 } 3375 3376 // It isn't profitable to do this, roll back. 3377 //cerr << "NOT FOLDING: " << *I; 3378 AddrMode = BackupAddrMode; 3379 AddrModeInsts.resize(OldSize); 3380 TPT.rollback(LastKnownGood); 3381 } 3382 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 3383 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 3384 return true; 3385 TPT.rollback(LastKnownGood); 3386 } else if (isa<ConstantPointerNull>(Addr)) { 3387 // Null pointer gets folded without affecting the addressing mode. 3388 return true; 3389 } 3390 3391 // Worse case, the target should support [reg] addressing modes. :) 3392 if (!AddrMode.HasBaseReg) { 3393 AddrMode.HasBaseReg = true; 3394 AddrMode.BaseReg = Addr; 3395 // Still check for legality in case the target supports [imm] but not [i+r]. 3396 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3397 return true; 3398 AddrMode.HasBaseReg = false; 3399 AddrMode.BaseReg = nullptr; 3400 } 3401 3402 // If the base register is already taken, see if we can do [r+r]. 3403 if (AddrMode.Scale == 0) { 3404 AddrMode.Scale = 1; 3405 AddrMode.ScaledReg = Addr; 3406 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3407 return true; 3408 AddrMode.Scale = 0; 3409 AddrMode.ScaledReg = nullptr; 3410 } 3411 // Couldn't match. 3412 TPT.rollback(LastKnownGood); 3413 return false; 3414 } 3415 3416 /// Check to see if all uses of OpVal by the specified inline asm call are due 3417 /// to memory operands. If so, return true, otherwise return false. 3418 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 3419 const TargetMachine &TM) { 3420 const Function *F = CI->getParent()->getParent(); 3421 const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); 3422 const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); 3423 TargetLowering::AsmOperandInfoVector TargetConstraints = 3424 TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI, 3425 ImmutableCallSite(CI)); 3426 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3427 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3428 3429 // Compute the constraint code and ConstraintType to use. 3430 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3431 3432 // If this asm operand is our Value*, and if it isn't an indirect memory 3433 // operand, we can't fold it! 3434 if (OpInfo.CallOperandVal == OpVal && 3435 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3436 !OpInfo.isIndirect)) 3437 return false; 3438 } 3439 3440 return true; 3441 } 3442 3443 /// Recursively walk all the uses of I until we find a memory use. 3444 /// If we find an obviously non-foldable instruction, return true. 3445 /// Add the ultimately found memory instructions to MemoryUses. 3446 static bool FindAllMemoryUses( 3447 Instruction *I, 3448 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3449 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) { 3450 // If we already considered this instruction, we're done. 3451 if (!ConsideredInsts.insert(I).second) 3452 return false; 3453 3454 // If this is an obviously unfoldable instruction, bail out. 3455 if (!MightBeFoldableInst(I)) 3456 return true; 3457 3458 const bool OptSize = I->getFunction()->optForSize(); 3459 3460 // Loop over all the uses, recursively processing them. 3461 for (Use &U : I->uses()) { 3462 Instruction *UserI = cast<Instruction>(U.getUser()); 3463 3464 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3465 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3466 continue; 3467 } 3468 3469 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3470 unsigned opNo = U.getOperandNo(); 3471 if (opNo == 0) return true; // Storing addr, not into addr. 3472 MemoryUses.push_back(std::make_pair(SI, opNo)); 3473 continue; 3474 } 3475 3476 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3477 // If this is a cold call, we can sink the addressing calculation into 3478 // the cold path. See optimizeCallInst 3479 if (!OptSize && CI->hasFnAttr(Attribute::Cold)) 3480 continue; 3481 3482 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3483 if (!IA) return true; 3484 3485 // If this is a memory operand, we're cool, otherwise bail out. 3486 if (!IsOperandAMemoryOperand(CI, IA, I, TM)) 3487 return true; 3488 continue; 3489 } 3490 3491 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM)) 3492 return true; 3493 } 3494 3495 return false; 3496 } 3497 3498 /// Return true if Val is already known to be live at the use site that we're 3499 /// folding it into. If so, there is no cost to include it in the addressing 3500 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 3501 /// instruction already. 3502 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3503 Value *KnownLive2) { 3504 // If Val is either of the known-live values, we know it is live! 3505 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3506 return true; 3507 3508 // All values other than instructions and arguments (e.g. constants) are live. 3509 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3510 3511 // If Val is a constant sized alloca in the entry block, it is live, this is 3512 // true because it is just a reference to the stack/frame pointer, which is 3513 // live for the whole function. 3514 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3515 if (AI->isStaticAlloca()) 3516 return true; 3517 3518 // Check to see if this value is already used in the memory instruction's 3519 // block. If so, it's already live into the block at the very least, so we 3520 // can reasonably fold it. 3521 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3522 } 3523 3524 /// It is possible for the addressing mode of the machine to fold the specified 3525 /// instruction into a load or store that ultimately uses it. 3526 /// However, the specified instruction has multiple uses. 3527 /// Given this, it may actually increase register pressure to fold it 3528 /// into the load. For example, consider this code: 3529 /// 3530 /// X = ... 3531 /// Y = X+1 3532 /// use(Y) -> nonload/store 3533 /// Z = Y+1 3534 /// load Z 3535 /// 3536 /// In this case, Y has multiple uses, and can be folded into the load of Z 3537 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 3538 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 3539 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 3540 /// number of computations either. 3541 /// 3542 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 3543 /// X was live across 'load Z' for other reasons, we actually *would* want to 3544 /// fold the addressing mode in the Z case. This would make Y die earlier. 3545 bool AddressingModeMatcher:: 3546 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 3547 ExtAddrMode &AMAfter) { 3548 if (IgnoreProfitability) return true; 3549 3550 // AMBefore is the addressing mode before this instruction was folded into it, 3551 // and AMAfter is the addressing mode after the instruction was folded. Get 3552 // the set of registers referenced by AMAfter and subtract out those 3553 // referenced by AMBefore: this is the set of values which folding in this 3554 // address extends the lifetime of. 3555 // 3556 // Note that there are only two potential values being referenced here, 3557 // BaseReg and ScaleReg (global addresses are always available, as are any 3558 // folded immediates). 3559 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 3560 3561 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 3562 // lifetime wasn't extended by adding this instruction. 3563 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3564 BaseReg = nullptr; 3565 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3566 ScaledReg = nullptr; 3567 3568 // If folding this instruction (and it's subexprs) didn't extend any live 3569 // ranges, we're ok with it. 3570 if (!BaseReg && !ScaledReg) 3571 return true; 3572 3573 // If all uses of this instruction can have the address mode sunk into them, 3574 // we can remove the addressing mode and effectively trade one live register 3575 // for another (at worst.) In this context, folding an addressing mode into 3576 // the use is just a particularly nice way of sinking it. 3577 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 3578 SmallPtrSet<Instruction*, 16> ConsideredInsts; 3579 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM)) 3580 return false; // Has a non-memory, non-foldable use! 3581 3582 // Now that we know that all uses of this instruction are part of a chain of 3583 // computation involving only operations that could theoretically be folded 3584 // into a memory use, loop over each of these memory operation uses and see 3585 // if they could *actually* fold the instruction. The assumption is that 3586 // addressing modes are cheap and that duplicating the computation involved 3587 // many times is worthwhile, even on a fastpath. For sinking candidates 3588 // (i.e. cold call sites), this serves as a way to prevent excessive code 3589 // growth since most architectures have some reasonable small and fast way to 3590 // compute an effective address. (i.e LEA on x86) 3591 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 3592 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 3593 Instruction *User = MemoryUses[i].first; 3594 unsigned OpNo = MemoryUses[i].second; 3595 3596 // Get the access type of this use. If the use isn't a pointer, we don't 3597 // know what it accesses. 3598 Value *Address = User->getOperand(OpNo); 3599 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 3600 if (!AddrTy) 3601 return false; 3602 Type *AddressAccessTy = AddrTy->getElementType(); 3603 unsigned AS = AddrTy->getAddressSpace(); 3604 3605 // Do a match against the root of this address, ignoring profitability. This 3606 // will tell us if the addressing mode for the memory operation will 3607 // *actually* cover the shared instruction. 3608 ExtAddrMode Result; 3609 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3610 TPT.getRestorationPoint(); 3611 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS, 3612 MemoryInst, Result, InsertedInsts, 3613 PromotedInsts, TPT); 3614 Matcher.IgnoreProfitability = true; 3615 bool Success = Matcher.matchAddr(Address, 0); 3616 (void)Success; assert(Success && "Couldn't select *anything*?"); 3617 3618 // The match was to check the profitability, the changes made are not 3619 // part of the original matcher. Therefore, they should be dropped 3620 // otherwise the original matcher will not present the right state. 3621 TPT.rollback(LastKnownGood); 3622 3623 // If the match didn't cover I, then it won't be shared by it. 3624 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 3625 I) == MatchedAddrModeInsts.end()) 3626 return false; 3627 3628 MatchedAddrModeInsts.clear(); 3629 } 3630 3631 return true; 3632 } 3633 3634 } // end anonymous namespace 3635 3636 /// Return true if the specified values are defined in a 3637 /// different basic block than BB. 3638 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 3639 if (Instruction *I = dyn_cast<Instruction>(V)) 3640 return I->getParent() != BB; 3641 return false; 3642 } 3643 3644 /// Sink addressing mode computation immediate before MemoryInst if doing so 3645 /// can be done without increasing register pressure. The need for the 3646 /// register pressure constraint means this can end up being an all or nothing 3647 /// decision for all uses of the same addressing computation. 3648 /// 3649 /// Load and Store Instructions often have addressing modes that can do 3650 /// significant amounts of computation. As such, instruction selection will try 3651 /// to get the load or store to do as much computation as possible for the 3652 /// program. The problem is that isel can only see within a single block. As 3653 /// such, we sink as much legal addressing mode work into the block as possible. 3654 /// 3655 /// This method is used to optimize both load/store and inline asms with memory 3656 /// operands. It's also used to sink addressing computations feeding into cold 3657 /// call sites into their (cold) basic block. 3658 /// 3659 /// The motivation for handling sinking into cold blocks is that doing so can 3660 /// both enable other address mode sinking (by satisfying the register pressure 3661 /// constraint above), and reduce register pressure globally (by removing the 3662 /// addressing mode computation from the fast path entirely.). 3663 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 3664 Type *AccessTy, unsigned AddrSpace) { 3665 Value *Repl = Addr; 3666 3667 // Try to collapse single-value PHI nodes. This is necessary to undo 3668 // unprofitable PRE transformations. 3669 SmallVector<Value*, 8> worklist; 3670 SmallPtrSet<Value*, 16> Visited; 3671 worklist.push_back(Addr); 3672 3673 // Use a worklist to iteratively look through PHI nodes, and ensure that 3674 // the addressing mode obtained from the non-PHI roots of the graph 3675 // are equivalent. 3676 Value *Consensus = nullptr; 3677 unsigned NumUsesConsensus = 0; 3678 bool IsNumUsesConsensusValid = false; 3679 SmallVector<Instruction*, 16> AddrModeInsts; 3680 ExtAddrMode AddrMode; 3681 TypePromotionTransaction TPT; 3682 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3683 TPT.getRestorationPoint(); 3684 while (!worklist.empty()) { 3685 Value *V = worklist.back(); 3686 worklist.pop_back(); 3687 3688 // Break use-def graph loops. 3689 if (!Visited.insert(V).second) { 3690 Consensus = nullptr; 3691 break; 3692 } 3693 3694 // For a PHI node, push all of its incoming values. 3695 if (PHINode *P = dyn_cast<PHINode>(V)) { 3696 for (Value *IncValue : P->incoming_values()) 3697 worklist.push_back(IncValue); 3698 continue; 3699 } 3700 3701 // For non-PHIs, determine the addressing mode being computed. Note that 3702 // the result may differ depending on what other uses our candidate 3703 // addressing instructions might have. 3704 SmallVector<Instruction*, 16> NewAddrModeInsts; 3705 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 3706 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM, 3707 InsertedInsts, PromotedInsts, TPT); 3708 3709 // This check is broken into two cases with very similar code to avoid using 3710 // getNumUses() as much as possible. Some values have a lot of uses, so 3711 // calling getNumUses() unconditionally caused a significant compile-time 3712 // regression. 3713 if (!Consensus) { 3714 Consensus = V; 3715 AddrMode = NewAddrMode; 3716 AddrModeInsts = NewAddrModeInsts; 3717 continue; 3718 } else if (NewAddrMode == AddrMode) { 3719 if (!IsNumUsesConsensusValid) { 3720 NumUsesConsensus = Consensus->getNumUses(); 3721 IsNumUsesConsensusValid = true; 3722 } 3723 3724 // Ensure that the obtained addressing mode is equivalent to that obtained 3725 // for all other roots of the PHI traversal. Also, when choosing one 3726 // such root as representative, select the one with the most uses in order 3727 // to keep the cost modeling heuristics in AddressingModeMatcher 3728 // applicable. 3729 unsigned NumUses = V->getNumUses(); 3730 if (NumUses > NumUsesConsensus) { 3731 Consensus = V; 3732 NumUsesConsensus = NumUses; 3733 AddrModeInsts = NewAddrModeInsts; 3734 } 3735 continue; 3736 } 3737 3738 Consensus = nullptr; 3739 break; 3740 } 3741 3742 // If the addressing mode couldn't be determined, or if multiple different 3743 // ones were determined, bail out now. 3744 if (!Consensus) { 3745 TPT.rollback(LastKnownGood); 3746 return false; 3747 } 3748 TPT.commit(); 3749 3750 // Check to see if any of the instructions supersumed by this addr mode are 3751 // non-local to I's BB. 3752 bool AnyNonLocal = false; 3753 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 3754 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 3755 AnyNonLocal = true; 3756 break; 3757 } 3758 } 3759 3760 // If all the instructions matched are already in this BB, don't do anything. 3761 if (!AnyNonLocal) { 3762 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 3763 return false; 3764 } 3765 3766 // Insert this computation right after this user. Since our caller is 3767 // scanning from the top of the BB to the bottom, reuse of the expr are 3768 // guaranteed to happen later. 3769 IRBuilder<> Builder(MemoryInst); 3770 3771 // Now that we determined the addressing expression we want to use and know 3772 // that we have to sink it into this block. Check to see if we have already 3773 // done this for some other load/store instr in this block. If so, reuse the 3774 // computation. 3775 Value *&SunkAddr = SunkAddrs[Addr]; 3776 if (SunkAddr) { 3777 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 3778 << *MemoryInst << "\n"); 3779 if (SunkAddr->getType() != Addr->getType()) 3780 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3781 } else if (AddrSinkUsingGEPs || 3782 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 3783 TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) 3784 ->useAA())) { 3785 // By default, we use the GEP-based method when AA is used later. This 3786 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 3787 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3788 << *MemoryInst << "\n"); 3789 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3790 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 3791 3792 // First, find the pointer. 3793 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 3794 ResultPtr = AddrMode.BaseReg; 3795 AddrMode.BaseReg = nullptr; 3796 } 3797 3798 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 3799 // We can't add more than one pointer together, nor can we scale a 3800 // pointer (both of which seem meaningless). 3801 if (ResultPtr || AddrMode.Scale != 1) 3802 return false; 3803 3804 ResultPtr = AddrMode.ScaledReg; 3805 AddrMode.Scale = 0; 3806 } 3807 3808 if (AddrMode.BaseGV) { 3809 if (ResultPtr) 3810 return false; 3811 3812 ResultPtr = AddrMode.BaseGV; 3813 } 3814 3815 // If the real base value actually came from an inttoptr, then the matcher 3816 // will look through it and provide only the integer value. In that case, 3817 // use it here. 3818 if (!ResultPtr && AddrMode.BaseReg) { 3819 ResultPtr = 3820 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 3821 AddrMode.BaseReg = nullptr; 3822 } else if (!ResultPtr && AddrMode.Scale == 1) { 3823 ResultPtr = 3824 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 3825 AddrMode.Scale = 0; 3826 } 3827 3828 if (!ResultPtr && 3829 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 3830 SunkAddr = Constant::getNullValue(Addr->getType()); 3831 } else if (!ResultPtr) { 3832 return false; 3833 } else { 3834 Type *I8PtrTy = 3835 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 3836 Type *I8Ty = Builder.getInt8Ty(); 3837 3838 // Start with the base register. Do this first so that subsequent address 3839 // matching finds it last, which will prevent it from trying to match it 3840 // as the scaled value in case it happens to be a mul. That would be 3841 // problematic if we've sunk a different mul for the scale, because then 3842 // we'd end up sinking both muls. 3843 if (AddrMode.BaseReg) { 3844 Value *V = AddrMode.BaseReg; 3845 if (V->getType() != IntPtrTy) 3846 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3847 3848 ResultIndex = V; 3849 } 3850 3851 // Add the scale value. 3852 if (AddrMode.Scale) { 3853 Value *V = AddrMode.ScaledReg; 3854 if (V->getType() == IntPtrTy) { 3855 // done. 3856 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3857 cast<IntegerType>(V->getType())->getBitWidth()) { 3858 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3859 } else { 3860 // It is only safe to sign extend the BaseReg if we know that the math 3861 // required to create it did not overflow before we extend it. Since 3862 // the original IR value was tossed in favor of a constant back when 3863 // the AddrMode was created we need to bail out gracefully if widths 3864 // do not match instead of extending it. 3865 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 3866 if (I && (ResultIndex != AddrMode.BaseReg)) 3867 I->eraseFromParent(); 3868 return false; 3869 } 3870 3871 if (AddrMode.Scale != 1) 3872 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3873 "sunkaddr"); 3874 if (ResultIndex) 3875 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 3876 else 3877 ResultIndex = V; 3878 } 3879 3880 // Add in the Base Offset if present. 3881 if (AddrMode.BaseOffs) { 3882 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3883 if (ResultIndex) { 3884 // We need to add this separately from the scale above to help with 3885 // SDAG consecutive load/store merging. 3886 if (ResultPtr->getType() != I8PtrTy) 3887 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3888 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3889 } 3890 3891 ResultIndex = V; 3892 } 3893 3894 if (!ResultIndex) { 3895 SunkAddr = ResultPtr; 3896 } else { 3897 if (ResultPtr->getType() != I8PtrTy) 3898 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3899 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3900 } 3901 3902 if (SunkAddr->getType() != Addr->getType()) 3903 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3904 } 3905 } else { 3906 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3907 << *MemoryInst << "\n"); 3908 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3909 Value *Result = nullptr; 3910 3911 // Start with the base register. Do this first so that subsequent address 3912 // matching finds it last, which will prevent it from trying to match it 3913 // as the scaled value in case it happens to be a mul. That would be 3914 // problematic if we've sunk a different mul for the scale, because then 3915 // we'd end up sinking both muls. 3916 if (AddrMode.BaseReg) { 3917 Value *V = AddrMode.BaseReg; 3918 if (V->getType()->isPointerTy()) 3919 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3920 if (V->getType() != IntPtrTy) 3921 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3922 Result = V; 3923 } 3924 3925 // Add the scale value. 3926 if (AddrMode.Scale) { 3927 Value *V = AddrMode.ScaledReg; 3928 if (V->getType() == IntPtrTy) { 3929 // done. 3930 } else if (V->getType()->isPointerTy()) { 3931 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3932 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3933 cast<IntegerType>(V->getType())->getBitWidth()) { 3934 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3935 } else { 3936 // It is only safe to sign extend the BaseReg if we know that the math 3937 // required to create it did not overflow before we extend it. Since 3938 // the original IR value was tossed in favor of a constant back when 3939 // the AddrMode was created we need to bail out gracefully if widths 3940 // do not match instead of extending it. 3941 Instruction *I = dyn_cast_or_null<Instruction>(Result); 3942 if (I && (Result != AddrMode.BaseReg)) 3943 I->eraseFromParent(); 3944 return false; 3945 } 3946 if (AddrMode.Scale != 1) 3947 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3948 "sunkaddr"); 3949 if (Result) 3950 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3951 else 3952 Result = V; 3953 } 3954 3955 // Add in the BaseGV if present. 3956 if (AddrMode.BaseGV) { 3957 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 3958 if (Result) 3959 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3960 else 3961 Result = V; 3962 } 3963 3964 // Add in the Base Offset if present. 3965 if (AddrMode.BaseOffs) { 3966 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3967 if (Result) 3968 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3969 else 3970 Result = V; 3971 } 3972 3973 if (!Result) 3974 SunkAddr = Constant::getNullValue(Addr->getType()); 3975 else 3976 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 3977 } 3978 3979 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 3980 3981 // If we have no uses, recursively delete the value and all dead instructions 3982 // using it. 3983 if (Repl->use_empty()) { 3984 // This can cause recursive deletion, which can invalidate our iterator. 3985 // Use a WeakVH to hold onto it in case this happens. 3986 Value *CurValue = &*CurInstIterator; 3987 WeakVH IterHandle(CurValue); 3988 BasicBlock *BB = CurInstIterator->getParent(); 3989 3990 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 3991 3992 if (IterHandle != CurValue) { 3993 // If the iterator instruction was recursively deleted, start over at the 3994 // start of the block. 3995 CurInstIterator = BB->begin(); 3996 SunkAddrs.clear(); 3997 } 3998 } 3999 ++NumMemoryInsts; 4000 return true; 4001 } 4002 4003 /// If there are any memory operands, use OptimizeMemoryInst to sink their 4004 /// address computing into the block when possible / profitable. 4005 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 4006 bool MadeChange = false; 4007 4008 const TargetRegisterInfo *TRI = 4009 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 4010 TargetLowering::AsmOperandInfoVector TargetConstraints = 4011 TLI->ParseConstraints(*DL, TRI, CS); 4012 unsigned ArgNo = 0; 4013 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 4014 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 4015 4016 // Compute the constraint code and ConstraintType to use. 4017 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 4018 4019 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4020 OpInfo.isIndirect) { 4021 Value *OpVal = CS->getArgOperand(ArgNo++); 4022 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 4023 } else if (OpInfo.Type == InlineAsm::isInput) 4024 ArgNo++; 4025 } 4026 4027 return MadeChange; 4028 } 4029 4030 /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or 4031 /// sign extensions. 4032 static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { 4033 assert(!Inst->use_empty() && "Input must have at least one use"); 4034 const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin()); 4035 bool IsSExt = isa<SExtInst>(FirstUser); 4036 Type *ExtTy = FirstUser->getType(); 4037 for (const User *U : Inst->users()) { 4038 const Instruction *UI = cast<Instruction>(U); 4039 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4040 return false; 4041 Type *CurTy = UI->getType(); 4042 // Same input and output types: Same instruction after CSE. 4043 if (CurTy == ExtTy) 4044 continue; 4045 4046 // If IsSExt is true, we are in this situation: 4047 // a = Inst 4048 // b = sext ty1 a to ty2 4049 // c = sext ty1 a to ty3 4050 // Assuming ty2 is shorter than ty3, this could be turned into: 4051 // a = Inst 4052 // b = sext ty1 a to ty2 4053 // c = sext ty2 b to ty3 4054 // However, the last sext is not free. 4055 if (IsSExt) 4056 return false; 4057 4058 // This is a ZExt, maybe this is free to extend from one type to another. 4059 // In that case, we would not account for a different use. 4060 Type *NarrowTy; 4061 Type *LargeTy; 4062 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4063 CurTy->getScalarType()->getIntegerBitWidth()) { 4064 NarrowTy = CurTy; 4065 LargeTy = ExtTy; 4066 } else { 4067 NarrowTy = ExtTy; 4068 LargeTy = CurTy; 4069 } 4070 4071 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4072 return false; 4073 } 4074 // All uses are the same or can be derived from one another for free. 4075 return true; 4076 } 4077 4078 /// \brief Try to form ExtLd by promoting \p Exts until they reach a 4079 /// load instruction. 4080 /// If an ext(load) can be formed, it is returned via \p LI for the load 4081 /// and \p Inst for the extension. 4082 /// Otherwise LI == nullptr and Inst == nullptr. 4083 /// When some promotion happened, \p TPT contains the proper state to 4084 /// revert them. 4085 /// 4086 /// \return true when promoting was necessary to expose the ext(load) 4087 /// opportunity, false otherwise. 4088 /// 4089 /// Example: 4090 /// \code 4091 /// %ld = load i32* %addr 4092 /// %add = add nuw i32 %ld, 4 4093 /// %zext = zext i32 %add to i64 4094 /// \endcode 4095 /// => 4096 /// \code 4097 /// %ld = load i32* %addr 4098 /// %zext = zext i32 %ld to i64 4099 /// %add = add nuw i64 %zext, 4 4100 /// \encode 4101 /// Thanks to the promotion, we can match zext(load i32*) to i64. 4102 bool CodeGenPrepare::extLdPromotion(TypePromotionTransaction &TPT, 4103 LoadInst *&LI, Instruction *&Inst, 4104 const SmallVectorImpl<Instruction *> &Exts, 4105 unsigned CreatedInstsCost = 0) { 4106 // Iterate over all the extensions to see if one form an ext(load). 4107 for (auto I : Exts) { 4108 // Check if we directly have ext(load). 4109 if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) { 4110 Inst = I; 4111 // No promotion happened here. 4112 return false; 4113 } 4114 // Check whether or not we want to do any promotion. 4115 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4116 continue; 4117 // Get the action to perform the promotion. 4118 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 4119 I, InsertedInsts, *TLI, PromotedInsts); 4120 // Check if we can promote. 4121 if (!TPH) 4122 continue; 4123 // Save the current state. 4124 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4125 TPT.getRestorationPoint(); 4126 SmallVector<Instruction *, 4> NewExts; 4127 unsigned NewCreatedInstsCost = 0; 4128 unsigned ExtCost = !TLI->isExtFree(I); 4129 // Promote. 4130 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 4131 &NewExts, nullptr, *TLI); 4132 assert(PromotedVal && 4133 "TypePromotionHelper should have filtered out those cases"); 4134 4135 // We would be able to merge only one extension in a load. 4136 // Therefore, if we have more than 1 new extension we heuristically 4137 // cut this search path, because it means we degrade the code quality. 4138 // With exactly 2, the transformation is neutral, because we will merge 4139 // one extension but leave one. However, we optimistically keep going, 4140 // because the new extension may be removed too. 4141 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 4142 TotalCreatedInstsCost -= ExtCost; 4143 if (!StressExtLdPromotion && 4144 (TotalCreatedInstsCost > 1 || 4145 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 4146 // The promotion is not profitable, rollback to the previous state. 4147 TPT.rollback(LastKnownGood); 4148 continue; 4149 } 4150 // The promotion is profitable. 4151 // Check if it exposes an ext(load). 4152 (void)extLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost); 4153 if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 4154 // If we have created a new extension, i.e., now we have two 4155 // extensions. We must make sure one of them is merged with 4156 // the load, otherwise we may degrade the code quality. 4157 (LI->hasOneUse() || hasSameExtUse(LI, *TLI)))) 4158 // Promotion happened. 4159 return true; 4160 // If this does not help to expose an ext(load) then, rollback. 4161 TPT.rollback(LastKnownGood); 4162 } 4163 // None of the extension can form an ext(load). 4164 LI = nullptr; 4165 Inst = nullptr; 4166 return false; 4167 } 4168 4169 /// Move a zext or sext fed by a load into the same basic block as the load, 4170 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 4171 /// extend into the load. 4172 /// \p I[in/out] the extension may be modified during the process if some 4173 /// promotions apply. 4174 /// 4175 bool CodeGenPrepare::moveExtToFormExtLoad(Instruction *&I) { 4176 // Try to promote a chain of computation if it allows to form 4177 // an extended load. 4178 TypePromotionTransaction TPT; 4179 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4180 TPT.getRestorationPoint(); 4181 SmallVector<Instruction *, 1> Exts; 4182 Exts.push_back(I); 4183 // Look for a load being extended. 4184 LoadInst *LI = nullptr; 4185 Instruction *OldExt = I; 4186 bool HasPromoted = extLdPromotion(TPT, LI, I, Exts); 4187 if (!LI || !I) { 4188 assert(!HasPromoted && !LI && "If we did not match any load instruction " 4189 "the code must remain the same"); 4190 I = OldExt; 4191 return false; 4192 } 4193 4194 // If they're already in the same block, there's nothing to do. 4195 // Make the cheap checks first if we did not promote. 4196 // If we promoted, we need to check if it is indeed profitable. 4197 if (!HasPromoted && LI->getParent() == I->getParent()) 4198 return false; 4199 4200 EVT VT = TLI->getValueType(*DL, I->getType()); 4201 EVT LoadVT = TLI->getValueType(*DL, LI->getType()); 4202 4203 // If the load has other users and the truncate is not free, this probably 4204 // isn't worthwhile. 4205 if (!LI->hasOneUse() && TLI && 4206 (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 4207 !TLI->isTruncateFree(I->getType(), LI->getType())) { 4208 I = OldExt; 4209 TPT.rollback(LastKnownGood); 4210 return false; 4211 } 4212 4213 // Check whether the target supports casts folded into loads. 4214 unsigned LType; 4215 if (isa<ZExtInst>(I)) 4216 LType = ISD::ZEXTLOAD; 4217 else { 4218 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 4219 LType = ISD::SEXTLOAD; 4220 } 4221 if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) { 4222 I = OldExt; 4223 TPT.rollback(LastKnownGood); 4224 return false; 4225 } 4226 4227 // Move the extend into the same block as the load, so that SelectionDAG 4228 // can fold it. 4229 TPT.commit(); 4230 I->removeFromParent(); 4231 I->insertAfter(LI); 4232 ++NumExtsMoved; 4233 return true; 4234 } 4235 4236 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 4237 BasicBlock *DefBB = I->getParent(); 4238 4239 // If the result of a {s|z}ext and its source are both live out, rewrite all 4240 // other uses of the source with result of extension. 4241 Value *Src = I->getOperand(0); 4242 if (Src->hasOneUse()) 4243 return false; 4244 4245 // Only do this xform if truncating is free. 4246 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 4247 return false; 4248 4249 // Only safe to perform the optimization if the source is also defined in 4250 // this block. 4251 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 4252 return false; 4253 4254 bool DefIsLiveOut = false; 4255 for (User *U : I->users()) { 4256 Instruction *UI = cast<Instruction>(U); 4257 4258 // Figure out which BB this ext is used in. 4259 BasicBlock *UserBB = UI->getParent(); 4260 if (UserBB == DefBB) continue; 4261 DefIsLiveOut = true; 4262 break; 4263 } 4264 if (!DefIsLiveOut) 4265 return false; 4266 4267 // Make sure none of the uses are PHI nodes. 4268 for (User *U : Src->users()) { 4269 Instruction *UI = cast<Instruction>(U); 4270 BasicBlock *UserBB = UI->getParent(); 4271 if (UserBB == DefBB) continue; 4272 // Be conservative. We don't want this xform to end up introducing 4273 // reloads just before load / store instructions. 4274 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 4275 return false; 4276 } 4277 4278 // InsertedTruncs - Only insert one trunc in each block once. 4279 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 4280 4281 bool MadeChange = false; 4282 for (Use &U : Src->uses()) { 4283 Instruction *User = cast<Instruction>(U.getUser()); 4284 4285 // Figure out which BB this ext is used in. 4286 BasicBlock *UserBB = User->getParent(); 4287 if (UserBB == DefBB) continue; 4288 4289 // Both src and def are live in this block. Rewrite the use. 4290 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 4291 4292 if (!InsertedTrunc) { 4293 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4294 assert(InsertPt != UserBB->end()); 4295 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 4296 InsertedInsts.insert(InsertedTrunc); 4297 } 4298 4299 // Replace a use of the {s|z}ext source with a use of the result. 4300 U = InsertedTrunc; 4301 ++NumExtUses; 4302 MadeChange = true; 4303 } 4304 4305 return MadeChange; 4306 } 4307 4308 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 4309 // just after the load if the target can fold this into one extload instruction, 4310 // with the hope of eliminating some of the other later "and" instructions using 4311 // the loaded value. "and"s that are made trivially redundant by the insertion 4312 // of the new "and" are removed by this function, while others (e.g. those whose 4313 // path from the load goes through a phi) are left for isel to potentially 4314 // remove. 4315 // 4316 // For example: 4317 // 4318 // b0: 4319 // x = load i32 4320 // ... 4321 // b1: 4322 // y = and x, 0xff 4323 // z = use y 4324 // 4325 // becomes: 4326 // 4327 // b0: 4328 // x = load i32 4329 // x' = and x, 0xff 4330 // ... 4331 // b1: 4332 // z = use x' 4333 // 4334 // whereas: 4335 // 4336 // b0: 4337 // x1 = load i32 4338 // ... 4339 // b1: 4340 // x2 = load i32 4341 // ... 4342 // b2: 4343 // x = phi x1, x2 4344 // y = and x, 0xff 4345 // 4346 // becomes (after a call to optimizeLoadExt for each load): 4347 // 4348 // b0: 4349 // x1 = load i32 4350 // x1' = and x1, 0xff 4351 // ... 4352 // b1: 4353 // x2 = load i32 4354 // x2' = and x2, 0xff 4355 // ... 4356 // b2: 4357 // x = phi x1', x2' 4358 // y = and x, 0xff 4359 // 4360 4361 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 4362 4363 if (!Load->isSimple() || 4364 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 4365 return false; 4366 4367 // Skip loads we've already transformed or have no reason to transform. 4368 if (Load->hasOneUse()) { 4369 User *LoadUser = *Load->user_begin(); 4370 if (cast<Instruction>(LoadUser)->getParent() == Load->getParent() && 4371 !dyn_cast<PHINode>(LoadUser)) 4372 return false; 4373 } 4374 4375 // Look at all uses of Load, looking through phis, to determine how many bits 4376 // of the loaded value are needed. 4377 SmallVector<Instruction *, 8> WorkList; 4378 SmallPtrSet<Instruction *, 16> Visited; 4379 SmallVector<Instruction *, 8> AndsToMaybeRemove; 4380 for (auto *U : Load->users()) 4381 WorkList.push_back(cast<Instruction>(U)); 4382 4383 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 4384 unsigned BitWidth = LoadResultVT.getSizeInBits(); 4385 APInt DemandBits(BitWidth, 0); 4386 APInt WidestAndBits(BitWidth, 0); 4387 4388 while (!WorkList.empty()) { 4389 Instruction *I = WorkList.back(); 4390 WorkList.pop_back(); 4391 4392 // Break use-def graph loops. 4393 if (!Visited.insert(I).second) 4394 continue; 4395 4396 // For a PHI node, push all of its users. 4397 if (auto *Phi = dyn_cast<PHINode>(I)) { 4398 for (auto *U : Phi->users()) 4399 WorkList.push_back(cast<Instruction>(U)); 4400 continue; 4401 } 4402 4403 switch (I->getOpcode()) { 4404 case llvm::Instruction::And: { 4405 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 4406 if (!AndC) 4407 return false; 4408 APInt AndBits = AndC->getValue(); 4409 DemandBits |= AndBits; 4410 // Keep track of the widest and mask we see. 4411 if (AndBits.ugt(WidestAndBits)) 4412 WidestAndBits = AndBits; 4413 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 4414 AndsToMaybeRemove.push_back(I); 4415 break; 4416 } 4417 4418 case llvm::Instruction::Shl: { 4419 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 4420 if (!ShlC) 4421 return false; 4422 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 4423 auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt); 4424 DemandBits |= ShlDemandBits; 4425 break; 4426 } 4427 4428 case llvm::Instruction::Trunc: { 4429 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 4430 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 4431 auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth); 4432 DemandBits |= TruncBits; 4433 break; 4434 } 4435 4436 default: 4437 return false; 4438 } 4439 } 4440 4441 uint32_t ActiveBits = DemandBits.getActiveBits(); 4442 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 4443 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 4444 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 4445 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 4446 // followed by an AND. 4447 // TODO: Look into removing this restriction by fixing backends to either 4448 // return false for isLoadExtLegal for i1 or have them select this pattern to 4449 // a single instruction. 4450 // 4451 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 4452 // mask, since these are the only ands that will be removed by isel. 4453 if (ActiveBits <= 1 || !APIntOps::isMask(ActiveBits, DemandBits) || 4454 WidestAndBits != DemandBits) 4455 return false; 4456 4457 LLVMContext &Ctx = Load->getType()->getContext(); 4458 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 4459 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 4460 4461 // Reject cases that won't be matched as extloads. 4462 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 4463 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 4464 return false; 4465 4466 IRBuilder<> Builder(Load->getNextNode()); 4467 auto *NewAnd = dyn_cast<Instruction>( 4468 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 4469 4470 // Replace all uses of load with new and (except for the use of load in the 4471 // new and itself). 4472 Load->replaceAllUsesWith(NewAnd); 4473 NewAnd->setOperand(0, Load); 4474 4475 // Remove any and instructions that are now redundant. 4476 for (auto *And : AndsToMaybeRemove) 4477 // Check that the and mask is the same as the one we decided to put on the 4478 // new and. 4479 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 4480 And->replaceAllUsesWith(NewAnd); 4481 if (&*CurInstIterator == And) 4482 CurInstIterator = std::next(And->getIterator()); 4483 And->eraseFromParent(); 4484 ++NumAndUses; 4485 } 4486 4487 ++NumAndsAdded; 4488 return true; 4489 } 4490 4491 /// Check if V (an operand of a select instruction) is an expensive instruction 4492 /// that is only used once. 4493 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 4494 auto *I = dyn_cast<Instruction>(V); 4495 // If it's safe to speculatively execute, then it should not have side 4496 // effects; therefore, it's safe to sink and possibly *not* execute. 4497 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 4498 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 4499 } 4500 4501 /// Returns true if a SelectInst should be turned into an explicit branch. 4502 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 4503 SelectInst *SI) { 4504 // FIXME: This should use the same heuristics as IfConversion to determine 4505 // whether a select is better represented as a branch. This requires that 4506 // branch probability metadata is preserved for the select, which is not the 4507 // case currently. 4508 4509 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 4510 4511 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 4512 // comparison condition. If the compare has more than one use, there's 4513 // probably another cmov or setcc around, so it's not worth emitting a branch. 4514 if (!Cmp || !Cmp->hasOneUse()) 4515 return false; 4516 4517 // If either operand of the select is expensive and only needed on one side 4518 // of the select, we should form a branch. 4519 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 4520 sinkSelectOperand(TTI, SI->getFalseValue())) 4521 return true; 4522 4523 return false; 4524 } 4525 4526 4527 /// If we have a SelectInst that will likely profit from branch prediction, 4528 /// turn it into a branch. 4529 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 4530 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 4531 4532 // Can we convert the 'select' to CF ? 4533 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 4534 return false; 4535 4536 TargetLowering::SelectSupportKind SelectKind; 4537 if (VectorCond) 4538 SelectKind = TargetLowering::VectorMaskSelect; 4539 else if (SI->getType()->isVectorTy()) 4540 SelectKind = TargetLowering::ScalarCondVectorVal; 4541 else 4542 SelectKind = TargetLowering::ScalarValSelect; 4543 4544 // Do we have efficient codegen support for this kind of 'selects' ? 4545 if (TLI->isSelectSupported(SelectKind)) { 4546 // We have efficient codegen support for the select instruction. 4547 // Check if it is profitable to keep this 'select'. 4548 if (!TLI->isPredictableSelectExpensive() || 4549 !isFormingBranchFromSelectProfitable(TTI, SI)) 4550 return false; 4551 } 4552 4553 ModifiedDT = true; 4554 4555 // Transform a sequence like this: 4556 // start: 4557 // %cmp = cmp uge i32 %a, %b 4558 // %sel = select i1 %cmp, i32 %c, i32 %d 4559 // 4560 // Into: 4561 // start: 4562 // %cmp = cmp uge i32 %a, %b 4563 // br i1 %cmp, label %select.true, label %select.false 4564 // select.true: 4565 // br label %select.end 4566 // select.false: 4567 // br label %select.end 4568 // select.end: 4569 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 4570 // 4571 // In addition, we may sink instructions that produce %c or %d from 4572 // the entry block into the destination(s) of the new branch. 4573 // If the true or false blocks do not contain a sunken instruction, that 4574 // block and its branch may be optimized away. In that case, one side of the 4575 // first branch will point directly to select.end, and the corresponding PHI 4576 // predecessor block will be the start block. 4577 4578 // First, we split the block containing the select into 2 blocks. 4579 BasicBlock *StartBlock = SI->getParent(); 4580 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 4581 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 4582 4583 // Delete the unconditional branch that was just created by the split. 4584 StartBlock->getTerminator()->eraseFromParent(); 4585 4586 // These are the new basic blocks for the conditional branch. 4587 // At least one will become an actual new basic block. 4588 BasicBlock *TrueBlock = nullptr; 4589 BasicBlock *FalseBlock = nullptr; 4590 4591 // Sink expensive instructions into the conditional blocks to avoid executing 4592 // them speculatively. 4593 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 4594 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 4595 EndBlock->getParent(), EndBlock); 4596 auto *TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 4597 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 4598 TrueInst->moveBefore(TrueBranch); 4599 } 4600 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 4601 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 4602 EndBlock->getParent(), EndBlock); 4603 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 4604 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 4605 FalseInst->moveBefore(FalseBranch); 4606 } 4607 4608 // If there was nothing to sink, then arbitrarily choose the 'false' side 4609 // for a new input value to the PHI. 4610 if (TrueBlock == FalseBlock) { 4611 assert(TrueBlock == nullptr && 4612 "Unexpected basic block transform while optimizing select"); 4613 4614 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 4615 EndBlock->getParent(), EndBlock); 4616 BranchInst::Create(EndBlock, FalseBlock); 4617 } 4618 4619 // Insert the real conditional branch based on the original condition. 4620 // If we did not create a new block for one of the 'true' or 'false' paths 4621 // of the condition, it means that side of the branch goes to the end block 4622 // directly and the path originates from the start block from the point of 4623 // view of the new PHI. 4624 if (TrueBlock == nullptr) { 4625 BranchInst::Create(EndBlock, FalseBlock, SI->getCondition(), SI); 4626 TrueBlock = StartBlock; 4627 } else if (FalseBlock == nullptr) { 4628 BranchInst::Create(TrueBlock, EndBlock, SI->getCondition(), SI); 4629 FalseBlock = StartBlock; 4630 } else { 4631 BranchInst::Create(TrueBlock, FalseBlock, SI->getCondition(), SI); 4632 } 4633 4634 // The select itself is replaced with a PHI Node. 4635 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 4636 PN->takeName(SI); 4637 PN->addIncoming(SI->getTrueValue(), TrueBlock); 4638 PN->addIncoming(SI->getFalseValue(), FalseBlock); 4639 4640 SI->replaceAllUsesWith(PN); 4641 SI->eraseFromParent(); 4642 4643 // Instruct OptimizeBlock to skip to the next block. 4644 CurInstIterator = StartBlock->end(); 4645 ++NumSelectsExpanded; 4646 return true; 4647 } 4648 4649 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 4650 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 4651 int SplatElem = -1; 4652 for (unsigned i = 0; i < Mask.size(); ++i) { 4653 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 4654 return false; 4655 SplatElem = Mask[i]; 4656 } 4657 4658 return true; 4659 } 4660 4661 /// Some targets have expensive vector shifts if the lanes aren't all the same 4662 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 4663 /// it's often worth sinking a shufflevector splat down to its use so that 4664 /// codegen can spot all lanes are identical. 4665 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 4666 BasicBlock *DefBB = SVI->getParent(); 4667 4668 // Only do this xform if variable vector shifts are particularly expensive. 4669 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 4670 return false; 4671 4672 // We only expect better codegen by sinking a shuffle if we can recognise a 4673 // constant splat. 4674 if (!isBroadcastShuffle(SVI)) 4675 return false; 4676 4677 // InsertedShuffles - Only insert a shuffle in each block once. 4678 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 4679 4680 bool MadeChange = false; 4681 for (User *U : SVI->users()) { 4682 Instruction *UI = cast<Instruction>(U); 4683 4684 // Figure out which BB this ext is used in. 4685 BasicBlock *UserBB = UI->getParent(); 4686 if (UserBB == DefBB) continue; 4687 4688 // For now only apply this when the splat is used by a shift instruction. 4689 if (!UI->isShift()) continue; 4690 4691 // Everything checks out, sink the shuffle if the user's block doesn't 4692 // already have a copy. 4693 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 4694 4695 if (!InsertedShuffle) { 4696 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4697 assert(InsertPt != UserBB->end()); 4698 InsertedShuffle = 4699 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 4700 SVI->getOperand(2), "", &*InsertPt); 4701 } 4702 4703 UI->replaceUsesOfWith(SVI, InsertedShuffle); 4704 MadeChange = true; 4705 } 4706 4707 // If we removed all uses, nuke the shuffle. 4708 if (SVI->use_empty()) { 4709 SVI->eraseFromParent(); 4710 MadeChange = true; 4711 } 4712 4713 return MadeChange; 4714 } 4715 4716 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 4717 if (!TLI || !DL) 4718 return false; 4719 4720 Value *Cond = SI->getCondition(); 4721 Type *OldType = Cond->getType(); 4722 LLVMContext &Context = Cond->getContext(); 4723 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 4724 unsigned RegWidth = RegType.getSizeInBits(); 4725 4726 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 4727 return false; 4728 4729 // If the register width is greater than the type width, expand the condition 4730 // of the switch instruction and each case constant to the width of the 4731 // register. By widening the type of the switch condition, subsequent 4732 // comparisons (for case comparisons) will not need to be extended to the 4733 // preferred register width, so we will potentially eliminate N-1 extends, 4734 // where N is the number of cases in the switch. 4735 auto *NewType = Type::getIntNTy(Context, RegWidth); 4736 4737 // Zero-extend the switch condition and case constants unless the switch 4738 // condition is a function argument that is already being sign-extended. 4739 // In that case, we can avoid an unnecessary mask/extension by sign-extending 4740 // everything instead. 4741 Instruction::CastOps ExtType = Instruction::ZExt; 4742 if (auto *Arg = dyn_cast<Argument>(Cond)) 4743 if (Arg->hasSExtAttr()) 4744 ExtType = Instruction::SExt; 4745 4746 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 4747 ExtInst->insertBefore(SI); 4748 SI->setCondition(ExtInst); 4749 for (SwitchInst::CaseIt Case : SI->cases()) { 4750 APInt NarrowConst = Case.getCaseValue()->getValue(); 4751 APInt WideConst = (ExtType == Instruction::ZExt) ? 4752 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 4753 Case.setValue(ConstantInt::get(Context, WideConst)); 4754 } 4755 4756 return true; 4757 } 4758 4759 namespace { 4760 /// \brief Helper class to promote a scalar operation to a vector one. 4761 /// This class is used to move downward extractelement transition. 4762 /// E.g., 4763 /// a = vector_op <2 x i32> 4764 /// b = extractelement <2 x i32> a, i32 0 4765 /// c = scalar_op b 4766 /// store c 4767 /// 4768 /// => 4769 /// a = vector_op <2 x i32> 4770 /// c = vector_op a (equivalent to scalar_op on the related lane) 4771 /// * d = extractelement <2 x i32> c, i32 0 4772 /// * store d 4773 /// Assuming both extractelement and store can be combine, we get rid of the 4774 /// transition. 4775 class VectorPromoteHelper { 4776 /// DataLayout associated with the current module. 4777 const DataLayout &DL; 4778 4779 /// Used to perform some checks on the legality of vector operations. 4780 const TargetLowering &TLI; 4781 4782 /// Used to estimated the cost of the promoted chain. 4783 const TargetTransformInfo &TTI; 4784 4785 /// The transition being moved downwards. 4786 Instruction *Transition; 4787 /// The sequence of instructions to be promoted. 4788 SmallVector<Instruction *, 4> InstsToBePromoted; 4789 /// Cost of combining a store and an extract. 4790 unsigned StoreExtractCombineCost; 4791 /// Instruction that will be combined with the transition. 4792 Instruction *CombineInst; 4793 4794 /// \brief The instruction that represents the current end of the transition. 4795 /// Since we are faking the promotion until we reach the end of the chain 4796 /// of computation, we need a way to get the current end of the transition. 4797 Instruction *getEndOfTransition() const { 4798 if (InstsToBePromoted.empty()) 4799 return Transition; 4800 return InstsToBePromoted.back(); 4801 } 4802 4803 /// \brief Return the index of the original value in the transition. 4804 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 4805 /// c, is at index 0. 4806 unsigned getTransitionOriginalValueIdx() const { 4807 assert(isa<ExtractElementInst>(Transition) && 4808 "Other kind of transitions are not supported yet"); 4809 return 0; 4810 } 4811 4812 /// \brief Return the index of the index in the transition. 4813 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 4814 /// is at index 1. 4815 unsigned getTransitionIdx() const { 4816 assert(isa<ExtractElementInst>(Transition) && 4817 "Other kind of transitions are not supported yet"); 4818 return 1; 4819 } 4820 4821 /// \brief Get the type of the transition. 4822 /// This is the type of the original value. 4823 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 4824 /// transition is <2 x i32>. 4825 Type *getTransitionType() const { 4826 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 4827 } 4828 4829 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 4830 /// I.e., we have the following sequence: 4831 /// Def = Transition <ty1> a to <ty2> 4832 /// b = ToBePromoted <ty2> Def, ... 4833 /// => 4834 /// b = ToBePromoted <ty1> a, ... 4835 /// Def = Transition <ty1> ToBePromoted to <ty2> 4836 void promoteImpl(Instruction *ToBePromoted); 4837 4838 /// \brief Check whether or not it is profitable to promote all the 4839 /// instructions enqueued to be promoted. 4840 bool isProfitableToPromote() { 4841 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 4842 unsigned Index = isa<ConstantInt>(ValIdx) 4843 ? cast<ConstantInt>(ValIdx)->getZExtValue() 4844 : -1; 4845 Type *PromotedType = getTransitionType(); 4846 4847 StoreInst *ST = cast<StoreInst>(CombineInst); 4848 unsigned AS = ST->getPointerAddressSpace(); 4849 unsigned Align = ST->getAlignment(); 4850 // Check if this store is supported. 4851 if (!TLI.allowsMisalignedMemoryAccesses( 4852 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 4853 Align)) { 4854 // If this is not supported, there is no way we can combine 4855 // the extract with the store. 4856 return false; 4857 } 4858 4859 // The scalar chain of computation has to pay for the transition 4860 // scalar to vector. 4861 // The vector chain has to account for the combining cost. 4862 uint64_t ScalarCost = 4863 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 4864 uint64_t VectorCost = StoreExtractCombineCost; 4865 for (const auto &Inst : InstsToBePromoted) { 4866 // Compute the cost. 4867 // By construction, all instructions being promoted are arithmetic ones. 4868 // Moreover, one argument is a constant that can be viewed as a splat 4869 // constant. 4870 Value *Arg0 = Inst->getOperand(0); 4871 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 4872 isa<ConstantFP>(Arg0); 4873 TargetTransformInfo::OperandValueKind Arg0OVK = 4874 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4875 : TargetTransformInfo::OK_AnyValue; 4876 TargetTransformInfo::OperandValueKind Arg1OVK = 4877 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4878 : TargetTransformInfo::OK_AnyValue; 4879 ScalarCost += TTI.getArithmeticInstrCost( 4880 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 4881 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 4882 Arg0OVK, Arg1OVK); 4883 } 4884 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 4885 << ScalarCost << "\nVector: " << VectorCost << '\n'); 4886 return ScalarCost > VectorCost; 4887 } 4888 4889 /// \brief Generate a constant vector with \p Val with the same 4890 /// number of elements as the transition. 4891 /// \p UseSplat defines whether or not \p Val should be replicated 4892 /// across the whole vector. 4893 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 4894 /// otherwise we generate a vector with as many undef as possible: 4895 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 4896 /// used at the index of the extract. 4897 Value *getConstantVector(Constant *Val, bool UseSplat) const { 4898 unsigned ExtractIdx = UINT_MAX; 4899 if (!UseSplat) { 4900 // If we cannot determine where the constant must be, we have to 4901 // use a splat constant. 4902 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 4903 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 4904 ExtractIdx = CstVal->getSExtValue(); 4905 else 4906 UseSplat = true; 4907 } 4908 4909 unsigned End = getTransitionType()->getVectorNumElements(); 4910 if (UseSplat) 4911 return ConstantVector::getSplat(End, Val); 4912 4913 SmallVector<Constant *, 4> ConstVec; 4914 UndefValue *UndefVal = UndefValue::get(Val->getType()); 4915 for (unsigned Idx = 0; Idx != End; ++Idx) { 4916 if (Idx == ExtractIdx) 4917 ConstVec.push_back(Val); 4918 else 4919 ConstVec.push_back(UndefVal); 4920 } 4921 return ConstantVector::get(ConstVec); 4922 } 4923 4924 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 4925 /// in \p Use can trigger undefined behavior. 4926 static bool canCauseUndefinedBehavior(const Instruction *Use, 4927 unsigned OperandIdx) { 4928 // This is not safe to introduce undef when the operand is on 4929 // the right hand side of a division-like instruction. 4930 if (OperandIdx != 1) 4931 return false; 4932 switch (Use->getOpcode()) { 4933 default: 4934 return false; 4935 case Instruction::SDiv: 4936 case Instruction::UDiv: 4937 case Instruction::SRem: 4938 case Instruction::URem: 4939 return true; 4940 case Instruction::FDiv: 4941 case Instruction::FRem: 4942 return !Use->hasNoNaNs(); 4943 } 4944 llvm_unreachable(nullptr); 4945 } 4946 4947 public: 4948 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 4949 const TargetTransformInfo &TTI, Instruction *Transition, 4950 unsigned CombineCost) 4951 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 4952 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 4953 assert(Transition && "Do not know how to promote null"); 4954 } 4955 4956 /// \brief Check if we can promote \p ToBePromoted to \p Type. 4957 bool canPromote(const Instruction *ToBePromoted) const { 4958 // We could support CastInst too. 4959 return isa<BinaryOperator>(ToBePromoted); 4960 } 4961 4962 /// \brief Check if it is profitable to promote \p ToBePromoted 4963 /// by moving downward the transition through. 4964 bool shouldPromote(const Instruction *ToBePromoted) const { 4965 // Promote only if all the operands can be statically expanded. 4966 // Indeed, we do not want to introduce any new kind of transitions. 4967 for (const Use &U : ToBePromoted->operands()) { 4968 const Value *Val = U.get(); 4969 if (Val == getEndOfTransition()) { 4970 // If the use is a division and the transition is on the rhs, 4971 // we cannot promote the operation, otherwise we may create a 4972 // division by zero. 4973 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 4974 return false; 4975 continue; 4976 } 4977 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 4978 !isa<ConstantFP>(Val)) 4979 return false; 4980 } 4981 // Check that the resulting operation is legal. 4982 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 4983 if (!ISDOpcode) 4984 return false; 4985 return StressStoreExtract || 4986 TLI.isOperationLegalOrCustom( 4987 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 4988 } 4989 4990 /// \brief Check whether or not \p Use can be combined 4991 /// with the transition. 4992 /// I.e., is it possible to do Use(Transition) => AnotherUse? 4993 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 4994 4995 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 4996 void enqueueForPromotion(Instruction *ToBePromoted) { 4997 InstsToBePromoted.push_back(ToBePromoted); 4998 } 4999 5000 /// \brief Set the instruction that will be combined with the transition. 5001 void recordCombineInstruction(Instruction *ToBeCombined) { 5002 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 5003 CombineInst = ToBeCombined; 5004 } 5005 5006 /// \brief Promote all the instructions enqueued for promotion if it is 5007 /// is profitable. 5008 /// \return True if the promotion happened, false otherwise. 5009 bool promote() { 5010 // Check if there is something to promote. 5011 // Right now, if we do not have anything to combine with, 5012 // we assume the promotion is not profitable. 5013 if (InstsToBePromoted.empty() || !CombineInst) 5014 return false; 5015 5016 // Check cost. 5017 if (!StressStoreExtract && !isProfitableToPromote()) 5018 return false; 5019 5020 // Promote. 5021 for (auto &ToBePromoted : InstsToBePromoted) 5022 promoteImpl(ToBePromoted); 5023 InstsToBePromoted.clear(); 5024 return true; 5025 } 5026 }; 5027 } // End of anonymous namespace. 5028 5029 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 5030 // At this point, we know that all the operands of ToBePromoted but Def 5031 // can be statically promoted. 5032 // For Def, we need to use its parameter in ToBePromoted: 5033 // b = ToBePromoted ty1 a 5034 // Def = Transition ty1 b to ty2 5035 // Move the transition down. 5036 // 1. Replace all uses of the promoted operation by the transition. 5037 // = ... b => = ... Def. 5038 assert(ToBePromoted->getType() == Transition->getType() && 5039 "The type of the result of the transition does not match " 5040 "the final type"); 5041 ToBePromoted->replaceAllUsesWith(Transition); 5042 // 2. Update the type of the uses. 5043 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 5044 Type *TransitionTy = getTransitionType(); 5045 ToBePromoted->mutateType(TransitionTy); 5046 // 3. Update all the operands of the promoted operation with promoted 5047 // operands. 5048 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 5049 for (Use &U : ToBePromoted->operands()) { 5050 Value *Val = U.get(); 5051 Value *NewVal = nullptr; 5052 if (Val == Transition) 5053 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 5054 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 5055 isa<ConstantFP>(Val)) { 5056 // Use a splat constant if it is not safe to use undef. 5057 NewVal = getConstantVector( 5058 cast<Constant>(Val), 5059 isa<UndefValue>(Val) || 5060 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 5061 } else 5062 llvm_unreachable("Did you modified shouldPromote and forgot to update " 5063 "this?"); 5064 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 5065 } 5066 Transition->removeFromParent(); 5067 Transition->insertAfter(ToBePromoted); 5068 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 5069 } 5070 5071 /// Some targets can do store(extractelement) with one instruction. 5072 /// Try to push the extractelement towards the stores when the target 5073 /// has this feature and this is profitable. 5074 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 5075 unsigned CombineCost = UINT_MAX; 5076 if (DisableStoreExtract || !TLI || 5077 (!StressStoreExtract && 5078 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 5079 Inst->getOperand(1), CombineCost))) 5080 return false; 5081 5082 // At this point we know that Inst is a vector to scalar transition. 5083 // Try to move it down the def-use chain, until: 5084 // - We can combine the transition with its single use 5085 // => we got rid of the transition. 5086 // - We escape the current basic block 5087 // => we would need to check that we are moving it at a cheaper place and 5088 // we do not do that for now. 5089 BasicBlock *Parent = Inst->getParent(); 5090 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 5091 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 5092 // If the transition has more than one use, assume this is not going to be 5093 // beneficial. 5094 while (Inst->hasOneUse()) { 5095 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 5096 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 5097 5098 if (ToBePromoted->getParent() != Parent) { 5099 DEBUG(dbgs() << "Instruction to promote is in a different block (" 5100 << ToBePromoted->getParent()->getName() 5101 << ") than the transition (" << Parent->getName() << ").\n"); 5102 return false; 5103 } 5104 5105 if (VPH.canCombine(ToBePromoted)) { 5106 DEBUG(dbgs() << "Assume " << *Inst << '\n' 5107 << "will be combined with: " << *ToBePromoted << '\n'); 5108 VPH.recordCombineInstruction(ToBePromoted); 5109 bool Changed = VPH.promote(); 5110 NumStoreExtractExposed += Changed; 5111 return Changed; 5112 } 5113 5114 DEBUG(dbgs() << "Try promoting.\n"); 5115 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 5116 return false; 5117 5118 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 5119 5120 VPH.enqueueForPromotion(ToBePromoted); 5121 Inst = ToBePromoted; 5122 } 5123 return false; 5124 } 5125 5126 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { 5127 // Bail out if we inserted the instruction to prevent optimizations from 5128 // stepping on each other's toes. 5129 if (InsertedInsts.count(I)) 5130 return false; 5131 5132 if (PHINode *P = dyn_cast<PHINode>(I)) { 5133 // It is possible for very late stage optimizations (such as SimplifyCFG) 5134 // to introduce PHI nodes too late to be cleaned up. If we detect such a 5135 // trivial PHI, go ahead and zap it here. 5136 if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) { 5137 P->replaceAllUsesWith(V); 5138 P->eraseFromParent(); 5139 ++NumPHIsElim; 5140 return true; 5141 } 5142 return false; 5143 } 5144 5145 if (CastInst *CI = dyn_cast<CastInst>(I)) { 5146 // If the source of the cast is a constant, then this should have 5147 // already been constant folded. The only reason NOT to constant fold 5148 // it is if something (e.g. LSR) was careful to place the constant 5149 // evaluation in a block other than then one that uses it (e.g. to hoist 5150 // the address of globals out of a loop). If this is the case, we don't 5151 // want to forward-subst the cast. 5152 if (isa<Constant>(CI->getOperand(0))) 5153 return false; 5154 5155 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 5156 return true; 5157 5158 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 5159 /// Sink a zext or sext into its user blocks if the target type doesn't 5160 /// fit in one register 5161 if (TLI && 5162 TLI->getTypeAction(CI->getContext(), 5163 TLI->getValueType(*DL, CI->getType())) == 5164 TargetLowering::TypeExpandInteger) { 5165 return SinkCast(CI); 5166 } else { 5167 bool MadeChange = moveExtToFormExtLoad(I); 5168 return MadeChange | optimizeExtUses(I); 5169 } 5170 } 5171 return false; 5172 } 5173 5174 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 5175 if (!TLI || !TLI->hasMultipleConditionRegisters()) 5176 return OptimizeCmpExpression(CI); 5177 5178 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 5179 stripInvariantGroupMetadata(*LI); 5180 if (TLI) { 5181 bool Modified = optimizeLoadExt(LI); 5182 unsigned AS = LI->getPointerAddressSpace(); 5183 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 5184 return Modified; 5185 } 5186 return false; 5187 } 5188 5189 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 5190 stripInvariantGroupMetadata(*SI); 5191 if (TLI) { 5192 unsigned AS = SI->getPointerAddressSpace(); 5193 return optimizeMemoryInst(I, SI->getOperand(1), 5194 SI->getOperand(0)->getType(), AS); 5195 } 5196 return false; 5197 } 5198 5199 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 5200 5201 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 5202 BinOp->getOpcode() == Instruction::LShr)) { 5203 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 5204 if (TLI && CI && TLI->hasExtractBitsInsn()) 5205 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 5206 5207 return false; 5208 } 5209 5210 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 5211 if (GEPI->hasAllZeroIndices()) { 5212 /// The GEP operand must be a pointer, so must its result -> BitCast 5213 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 5214 GEPI->getName(), GEPI); 5215 GEPI->replaceAllUsesWith(NC); 5216 GEPI->eraseFromParent(); 5217 ++NumGEPsElim; 5218 optimizeInst(NC, ModifiedDT); 5219 return true; 5220 } 5221 return false; 5222 } 5223 5224 if (CallInst *CI = dyn_cast<CallInst>(I)) 5225 return optimizeCallInst(CI, ModifiedDT); 5226 5227 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 5228 return optimizeSelectInst(SI); 5229 5230 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 5231 return optimizeShuffleVectorInst(SVI); 5232 5233 if (auto *Switch = dyn_cast<SwitchInst>(I)) 5234 return optimizeSwitchInst(Switch); 5235 5236 if (isa<ExtractElementInst>(I)) 5237 return optimizeExtractElementInst(I); 5238 5239 return false; 5240 } 5241 5242 /// Given an OR instruction, check to see if this is a bitreverse 5243 /// idiom. If so, insert the new intrinsic and return true. 5244 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 5245 const TargetLowering &TLI) { 5246 if (!I.getType()->isIntegerTy() || 5247 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 5248 TLI.getValueType(DL, I.getType(), true))) 5249 return false; 5250 5251 SmallVector<Instruction*, 4> Insts; 5252 if (!recognizeBitReverseOrBSwapIdiom(&I, false, true, Insts)) 5253 return false; 5254 Instruction *LastInst = Insts.back(); 5255 I.replaceAllUsesWith(LastInst); 5256 RecursivelyDeleteTriviallyDeadInstructions(&I); 5257 return true; 5258 } 5259 5260 // In this pass we look for GEP and cast instructions that are used 5261 // across basic blocks and rewrite them to improve basic-block-at-a-time 5262 // selection. 5263 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 5264 SunkAddrs.clear(); 5265 bool MadeChange = false; 5266 5267 CurInstIterator = BB.begin(); 5268 while (CurInstIterator != BB.end()) { 5269 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 5270 if (ModifiedDT) 5271 return true; 5272 } 5273 5274 bool MadeBitReverse = true; 5275 while (TLI && MadeBitReverse) { 5276 MadeBitReverse = false; 5277 for (auto &I : reverse(BB)) { 5278 if (makeBitReverse(I, *DL, *TLI)) { 5279 MadeBitReverse = MadeChange = true; 5280 break; 5281 } 5282 } 5283 } 5284 MadeChange |= dupRetToEnableTailCallOpts(&BB); 5285 5286 return MadeChange; 5287 } 5288 5289 // llvm.dbg.value is far away from the value then iSel may not be able 5290 // handle it properly. iSel will drop llvm.dbg.value if it can not 5291 // find a node corresponding to the value. 5292 bool CodeGenPrepare::placeDbgValues(Function &F) { 5293 bool MadeChange = false; 5294 for (BasicBlock &BB : F) { 5295 Instruction *PrevNonDbgInst = nullptr; 5296 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 5297 Instruction *Insn = &*BI++; 5298 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 5299 // Leave dbg.values that refer to an alloca alone. These 5300 // instrinsics describe the address of a variable (= the alloca) 5301 // being taken. They should not be moved next to the alloca 5302 // (and to the beginning of the scope), but rather stay close to 5303 // where said address is used. 5304 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 5305 PrevNonDbgInst = Insn; 5306 continue; 5307 } 5308 5309 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 5310 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 5311 // If VI is a phi in a block with an EHPad terminator, we can't insert 5312 // after it. 5313 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 5314 continue; 5315 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 5316 DVI->removeFromParent(); 5317 if (isa<PHINode>(VI)) 5318 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 5319 else 5320 DVI->insertAfter(VI); 5321 MadeChange = true; 5322 ++NumDbgValueMoved; 5323 } 5324 } 5325 } 5326 return MadeChange; 5327 } 5328 5329 // If there is a sequence that branches based on comparing a single bit 5330 // against zero that can be combined into a single instruction, and the 5331 // target supports folding these into a single instruction, sink the 5332 // mask and compare into the branch uses. Do this before OptimizeBlock -> 5333 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 5334 // searched for. 5335 bool CodeGenPrepare::sinkAndCmp(Function &F) { 5336 if (!EnableAndCmpSinking) 5337 return false; 5338 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 5339 return false; 5340 bool MadeChange = false; 5341 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 5342 BasicBlock *BB = &*I++; 5343 5344 // Does this BB end with the following? 5345 // %andVal = and %val, #single-bit-set 5346 // %icmpVal = icmp %andResult, 0 5347 // br i1 %cmpVal label %dest1, label %dest2" 5348 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 5349 if (!Brcc || !Brcc->isConditional()) 5350 continue; 5351 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 5352 if (!Cmp || Cmp->getParent() != BB) 5353 continue; 5354 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 5355 if (!Zero || !Zero->isZero()) 5356 continue; 5357 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 5358 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 5359 continue; 5360 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 5361 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 5362 continue; 5363 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 5364 5365 // Push the "and; icmp" for any users that are conditional branches. 5366 // Since there can only be one branch use per BB, we don't need to keep 5367 // track of which BBs we insert into. 5368 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 5369 UI != E; ) { 5370 Use &TheUse = *UI; 5371 // Find brcc use. 5372 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 5373 ++UI; 5374 if (!BrccUser || !BrccUser->isConditional()) 5375 continue; 5376 BasicBlock *UserBB = BrccUser->getParent(); 5377 if (UserBB == BB) continue; 5378 DEBUG(dbgs() << "found Brcc use\n"); 5379 5380 // Sink the "and; icmp" to use. 5381 MadeChange = true; 5382 BinaryOperator *NewAnd = 5383 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 5384 BrccUser); 5385 CmpInst *NewCmp = 5386 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 5387 "", BrccUser); 5388 TheUse = NewCmp; 5389 ++NumAndCmpsMoved; 5390 DEBUG(BrccUser->getParent()->dump()); 5391 } 5392 } 5393 return MadeChange; 5394 } 5395 5396 /// \brief Retrieve the probabilities of a conditional branch. Returns true on 5397 /// success, or returns false if no or invalid metadata was found. 5398 static bool extractBranchMetadata(BranchInst *BI, 5399 uint64_t &ProbTrue, uint64_t &ProbFalse) { 5400 assert(BI->isConditional() && 5401 "Looking for probabilities on unconditional branch?"); 5402 auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof); 5403 if (!ProfileData || ProfileData->getNumOperands() != 3) 5404 return false; 5405 5406 const auto *CITrue = 5407 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)); 5408 const auto *CIFalse = 5409 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2)); 5410 if (!CITrue || !CIFalse) 5411 return false; 5412 5413 ProbTrue = CITrue->getValue().getZExtValue(); 5414 ProbFalse = CIFalse->getValue().getZExtValue(); 5415 5416 return true; 5417 } 5418 5419 /// \brief Scale down both weights to fit into uint32_t. 5420 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 5421 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 5422 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 5423 NewTrue = NewTrue / Scale; 5424 NewFalse = NewFalse / Scale; 5425 } 5426 5427 /// \brief Some targets prefer to split a conditional branch like: 5428 /// \code 5429 /// %0 = icmp ne i32 %a, 0 5430 /// %1 = icmp ne i32 %b, 0 5431 /// %or.cond = or i1 %0, %1 5432 /// br i1 %or.cond, label %TrueBB, label %FalseBB 5433 /// \endcode 5434 /// into multiple branch instructions like: 5435 /// \code 5436 /// bb1: 5437 /// %0 = icmp ne i32 %a, 0 5438 /// br i1 %0, label %TrueBB, label %bb2 5439 /// bb2: 5440 /// %1 = icmp ne i32 %b, 0 5441 /// br i1 %1, label %TrueBB, label %FalseBB 5442 /// \endcode 5443 /// This usually allows instruction selection to do even further optimizations 5444 /// and combine the compare with the branch instruction. Currently this is 5445 /// applied for targets which have "cheap" jump instructions. 5446 /// 5447 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 5448 /// 5449 bool CodeGenPrepare::splitBranchCondition(Function &F) { 5450 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 5451 return false; 5452 5453 bool MadeChange = false; 5454 for (auto &BB : F) { 5455 // Does this BB end with the following? 5456 // %cond1 = icmp|fcmp|binary instruction ... 5457 // %cond2 = icmp|fcmp|binary instruction ... 5458 // %cond.or = or|and i1 %cond1, cond2 5459 // br i1 %cond.or label %dest1, label %dest2" 5460 BinaryOperator *LogicOp; 5461 BasicBlock *TBB, *FBB; 5462 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 5463 continue; 5464 5465 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 5466 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 5467 continue; 5468 5469 unsigned Opc; 5470 Value *Cond1, *Cond2; 5471 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 5472 m_OneUse(m_Value(Cond2))))) 5473 Opc = Instruction::And; 5474 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 5475 m_OneUse(m_Value(Cond2))))) 5476 Opc = Instruction::Or; 5477 else 5478 continue; 5479 5480 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 5481 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 5482 continue; 5483 5484 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 5485 5486 // Create a new BB. 5487 auto TmpBB = 5488 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 5489 BB.getParent(), BB.getNextNode()); 5490 5491 // Update original basic block by using the first condition directly by the 5492 // branch instruction and removing the no longer needed and/or instruction. 5493 Br1->setCondition(Cond1); 5494 LogicOp->eraseFromParent(); 5495 5496 // Depending on the conditon we have to either replace the true or the false 5497 // successor of the original branch instruction. 5498 if (Opc == Instruction::And) 5499 Br1->setSuccessor(0, TmpBB); 5500 else 5501 Br1->setSuccessor(1, TmpBB); 5502 5503 // Fill in the new basic block. 5504 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 5505 if (auto *I = dyn_cast<Instruction>(Cond2)) { 5506 I->removeFromParent(); 5507 I->insertBefore(Br2); 5508 } 5509 5510 // Update PHI nodes in both successors. The original BB needs to be 5511 // replaced in one succesor's PHI nodes, because the branch comes now from 5512 // the newly generated BB (NewBB). In the other successor we need to add one 5513 // incoming edge to the PHI nodes, because both branch instructions target 5514 // now the same successor. Depending on the original branch condition 5515 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 5516 // we perfrom the correct update for the PHI nodes. 5517 // This doesn't change the successor order of the just created branch 5518 // instruction (or any other instruction). 5519 if (Opc == Instruction::Or) 5520 std::swap(TBB, FBB); 5521 5522 // Replace the old BB with the new BB. 5523 for (auto &I : *TBB) { 5524 PHINode *PN = dyn_cast<PHINode>(&I); 5525 if (!PN) 5526 break; 5527 int i; 5528 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 5529 PN->setIncomingBlock(i, TmpBB); 5530 } 5531 5532 // Add another incoming edge form the new BB. 5533 for (auto &I : *FBB) { 5534 PHINode *PN = dyn_cast<PHINode>(&I); 5535 if (!PN) 5536 break; 5537 auto *Val = PN->getIncomingValueForBlock(&BB); 5538 PN->addIncoming(Val, TmpBB); 5539 } 5540 5541 // Update the branch weights (from SelectionDAGBuilder:: 5542 // FindMergedConditions). 5543 if (Opc == Instruction::Or) { 5544 // Codegen X | Y as: 5545 // BB1: 5546 // jmp_if_X TBB 5547 // jmp TmpBB 5548 // TmpBB: 5549 // jmp_if_Y TBB 5550 // jmp FBB 5551 // 5552 5553 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 5554 // The requirement is that 5555 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 5556 // = TrueProb for orignal BB. 5557 // Assuming the orignal weights are A and B, one choice is to set BB1's 5558 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 5559 // assumes that 5560 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 5561 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 5562 // TmpBB, but the math is more complicated. 5563 uint64_t TrueWeight, FalseWeight; 5564 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 5565 uint64_t NewTrueWeight = TrueWeight; 5566 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 5567 scaleWeights(NewTrueWeight, NewFalseWeight); 5568 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 5569 .createBranchWeights(TrueWeight, FalseWeight)); 5570 5571 NewTrueWeight = TrueWeight; 5572 NewFalseWeight = 2 * FalseWeight; 5573 scaleWeights(NewTrueWeight, NewFalseWeight); 5574 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 5575 .createBranchWeights(TrueWeight, FalseWeight)); 5576 } 5577 } else { 5578 // Codegen X & Y as: 5579 // BB1: 5580 // jmp_if_X TmpBB 5581 // jmp FBB 5582 // TmpBB: 5583 // jmp_if_Y TBB 5584 // jmp FBB 5585 // 5586 // This requires creation of TmpBB after CurBB. 5587 5588 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 5589 // The requirement is that 5590 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 5591 // = FalseProb for orignal BB. 5592 // Assuming the orignal weights are A and B, one choice is to set BB1's 5593 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 5594 // assumes that 5595 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 5596 uint64_t TrueWeight, FalseWeight; 5597 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 5598 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 5599 uint64_t NewFalseWeight = FalseWeight; 5600 scaleWeights(NewTrueWeight, NewFalseWeight); 5601 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 5602 .createBranchWeights(TrueWeight, FalseWeight)); 5603 5604 NewTrueWeight = 2 * TrueWeight; 5605 NewFalseWeight = FalseWeight; 5606 scaleWeights(NewTrueWeight, NewFalseWeight); 5607 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 5608 .createBranchWeights(TrueWeight, FalseWeight)); 5609 } 5610 } 5611 5612 // Note: No point in getting fancy here, since the DT info is never 5613 // available to CodeGenPrepare. 5614 ModifiedDT = true; 5615 5616 MadeChange = true; 5617 5618 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 5619 TmpBB->dump()); 5620 } 5621 return MadeChange; 5622 } 5623 5624 void CodeGenPrepare::stripInvariantGroupMetadata(Instruction &I) { 5625 if (auto *InvariantMD = I.getMetadata(LLVMContext::MD_invariant_group)) 5626 I.dropUnknownNonDebugMetadata(InvariantMD->getMetadataID()); 5627 } 5628