1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass munges the code in the input function to better prepare it for 11 // SelectionDAG-based code generation. This works around limitations in it's 12 // basic-block-at-a-time approach. It should eventually be removed. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/CallSite.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GetElementPtrTypeIterator.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/InlineAsm.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/MDBuilder.h" 36 #include "llvm/IR/PatternMatch.h" 37 #include "llvm/IR/Statepoint.h" 38 #include "llvm/IR/ValueHandle.h" 39 #include "llvm/IR/ValueMap.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetSubtargetInfo.h" 46 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 47 #include "llvm/Transforms/Utils/BuildLibCalls.h" 48 #include "llvm/Transforms/Utils/BypassSlowDivision.h" 49 #include "llvm/Transforms/Utils/Local.h" 50 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 51 using namespace llvm; 52 using namespace llvm::PatternMatch; 53 54 #define DEBUG_TYPE "codegenprepare" 55 56 STATISTIC(NumBlocksElim, "Number of blocks eliminated"); 57 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated"); 58 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts"); 59 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " 60 "sunken Cmps"); 61 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses " 62 "of sunken Casts"); 63 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address " 64 "computations were sunk"); 65 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads"); 66 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized"); 67 STATISTIC(NumAndsAdded, 68 "Number of and mask instructions added to form ext loads"); 69 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized"); 70 STATISTIC(NumRetsDup, "Number of return instructions duplicated"); 71 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved"); 72 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches"); 73 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches"); 74 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed"); 75 76 static cl::opt<bool> DisableBranchOpts( 77 "disable-cgp-branch-opts", cl::Hidden, cl::init(false), 78 cl::desc("Disable branch optimizations in CodeGenPrepare")); 79 80 static cl::opt<bool> 81 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), 82 cl::desc("Disable GC optimizations in CodeGenPrepare")); 83 84 static cl::opt<bool> DisableSelectToBranch( 85 "disable-cgp-select2branch", cl::Hidden, cl::init(false), 86 cl::desc("Disable select to branch conversion.")); 87 88 static cl::opt<bool> AddrSinkUsingGEPs( 89 "addr-sink-using-gep", cl::Hidden, cl::init(false), 90 cl::desc("Address sinking in CGP using GEPs.")); 91 92 static cl::opt<bool> EnableAndCmpSinking( 93 "enable-andcmp-sinking", cl::Hidden, cl::init(true), 94 cl::desc("Enable sinkinig and/cmp into branches.")); 95 96 static cl::opt<bool> DisableStoreExtract( 97 "disable-cgp-store-extract", cl::Hidden, cl::init(false), 98 cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); 99 100 static cl::opt<bool> StressStoreExtract( 101 "stress-cgp-store-extract", cl::Hidden, cl::init(false), 102 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); 103 104 static cl::opt<bool> DisableExtLdPromotion( 105 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 106 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " 107 "CodeGenPrepare")); 108 109 static cl::opt<bool> StressExtLdPromotion( 110 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), 111 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " 112 "optimization in CodeGenPrepare")); 113 114 namespace { 115 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; 116 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt; 117 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; 118 class TypePromotionTransaction; 119 120 class CodeGenPrepare : public FunctionPass { 121 const TargetMachine *TM; 122 const TargetLowering *TLI; 123 const TargetTransformInfo *TTI; 124 const TargetLibraryInfo *TLInfo; 125 126 /// As we scan instructions optimizing them, this is the next instruction 127 /// to optimize. Transforms that can invalidate this should update it. 128 BasicBlock::iterator CurInstIterator; 129 130 /// Keeps track of non-local addresses that have been sunk into a block. 131 /// This allows us to avoid inserting duplicate code for blocks with 132 /// multiple load/stores of the same address. 133 ValueMap<Value*, Value*> SunkAddrs; 134 135 /// Keeps track of all instructions inserted for the current function. 136 SetOfInstrs InsertedInsts; 137 /// Keeps track of the type of the related instruction before their 138 /// promotion for the current function. 139 InstrToOrigTy PromotedInsts; 140 141 /// True if CFG is modified in any way. 142 bool ModifiedDT; 143 144 /// True if optimizing for size. 145 bool OptSize; 146 147 /// DataLayout for the Function being processed. 148 const DataLayout *DL; 149 150 public: 151 static char ID; // Pass identification, replacement for typeid 152 explicit CodeGenPrepare(const TargetMachine *TM = nullptr) 153 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) { 154 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); 155 } 156 bool runOnFunction(Function &F) override; 157 158 const char *getPassName() const override { return "CodeGen Prepare"; } 159 160 void getAnalysisUsage(AnalysisUsage &AU) const override { 161 AU.addPreserved<DominatorTreeWrapperPass>(); 162 AU.addRequired<TargetLibraryInfoWrapperPass>(); 163 AU.addRequired<TargetTransformInfoWrapperPass>(); 164 } 165 166 private: 167 bool eliminateFallThrough(Function &F); 168 bool eliminateMostlyEmptyBlocks(Function &F); 169 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; 170 void eliminateMostlyEmptyBlock(BasicBlock *BB); 171 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT); 172 bool optimizeInst(Instruction *I, bool& ModifiedDT); 173 bool optimizeMemoryInst(Instruction *I, Value *Addr, 174 Type *AccessTy, unsigned AS); 175 bool optimizeInlineAsmInst(CallInst *CS); 176 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT); 177 bool moveExtToFormExtLoad(Instruction *&I); 178 bool optimizeExtUses(Instruction *I); 179 bool optimizeLoadExt(LoadInst *I); 180 bool optimizeSelectInst(SelectInst *SI); 181 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); 182 bool optimizeSwitchInst(SwitchInst *CI); 183 bool optimizeExtractElementInst(Instruction *Inst); 184 bool dupRetToEnableTailCallOpts(BasicBlock *BB); 185 bool placeDbgValues(Function &F); 186 bool sinkAndCmp(Function &F); 187 bool extLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI, 188 Instruction *&Inst, 189 const SmallVectorImpl<Instruction *> &Exts, 190 unsigned CreatedInstCost); 191 bool splitBranchCondition(Function &F); 192 bool simplifyOffsetableRelocate(Instruction &I); 193 void stripInvariantGroupMetadata(Instruction &I); 194 }; 195 } 196 197 char CodeGenPrepare::ID = 0; 198 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare", 199 "Optimize for code generation", false, false) 200 201 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { 202 return new CodeGenPrepare(TM); 203 } 204 205 bool CodeGenPrepare::runOnFunction(Function &F) { 206 if (skipOptnoneFunction(F)) 207 return false; 208 209 DL = &F.getParent()->getDataLayout(); 210 211 bool EverMadeChange = false; 212 // Clear per function information. 213 InsertedInsts.clear(); 214 PromotedInsts.clear(); 215 216 ModifiedDT = false; 217 if (TM) 218 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 219 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 220 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 221 OptSize = F.optForSize(); 222 223 /// This optimization identifies DIV instructions that can be 224 /// profitably bypassed and carried out with a shorter, faster divide. 225 if (!OptSize && TLI && TLI->isSlowDivBypassed()) { 226 const DenseMap<unsigned int, unsigned int> &BypassWidths = 227 TLI->getBypassSlowDivWidths(); 228 BasicBlock* BB = &*F.begin(); 229 while (BB != nullptr) { 230 // bypassSlowDivision may create new BBs, but we don't want to reapply the 231 // optimization to those blocks. 232 BasicBlock* Next = BB->getNextNode(); 233 EverMadeChange |= bypassSlowDivision(BB, BypassWidths); 234 BB = Next; 235 } 236 } 237 238 // Eliminate blocks that contain only PHI nodes and an 239 // unconditional branch. 240 EverMadeChange |= eliminateMostlyEmptyBlocks(F); 241 242 // llvm.dbg.value is far away from the value then iSel may not be able 243 // handle it properly. iSel will drop llvm.dbg.value if it can not 244 // find a node corresponding to the value. 245 EverMadeChange |= placeDbgValues(F); 246 247 // If there is a mask, compare against zero, and branch that can be combined 248 // into a single target instruction, push the mask and compare into branch 249 // users. Do this before OptimizeBlock -> OptimizeInst -> 250 // OptimizeCmpExpression, which perturbs the pattern being searched for. 251 if (!DisableBranchOpts) { 252 EverMadeChange |= sinkAndCmp(F); 253 EverMadeChange |= splitBranchCondition(F); 254 } 255 256 bool MadeChange = true; 257 while (MadeChange) { 258 MadeChange = false; 259 for (Function::iterator I = F.begin(); I != F.end(); ) { 260 BasicBlock *BB = &*I++; 261 bool ModifiedDTOnIteration = false; 262 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); 263 264 // Restart BB iteration if the dominator tree of the Function was changed 265 if (ModifiedDTOnIteration) 266 break; 267 } 268 EverMadeChange |= MadeChange; 269 } 270 271 SunkAddrs.clear(); 272 273 if (!DisableBranchOpts) { 274 MadeChange = false; 275 SmallPtrSet<BasicBlock*, 8> WorkList; 276 for (BasicBlock &BB : F) { 277 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); 278 MadeChange |= ConstantFoldTerminator(&BB, true); 279 if (!MadeChange) continue; 280 281 for (SmallVectorImpl<BasicBlock*>::iterator 282 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 283 if (pred_begin(*II) == pred_end(*II)) 284 WorkList.insert(*II); 285 } 286 287 // Delete the dead blocks and any of their dead successors. 288 MadeChange |= !WorkList.empty(); 289 while (!WorkList.empty()) { 290 BasicBlock *BB = *WorkList.begin(); 291 WorkList.erase(BB); 292 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); 293 294 DeleteDeadBlock(BB); 295 296 for (SmallVectorImpl<BasicBlock*>::iterator 297 II = Successors.begin(), IE = Successors.end(); II != IE; ++II) 298 if (pred_begin(*II) == pred_end(*II)) 299 WorkList.insert(*II); 300 } 301 302 // Merge pairs of basic blocks with unconditional branches, connected by 303 // a single edge. 304 if (EverMadeChange || MadeChange) 305 MadeChange |= eliminateFallThrough(F); 306 307 EverMadeChange |= MadeChange; 308 } 309 310 if (!DisableGCOpts) { 311 SmallVector<Instruction *, 2> Statepoints; 312 for (BasicBlock &BB : F) 313 for (Instruction &I : BB) 314 if (isStatepoint(I)) 315 Statepoints.push_back(&I); 316 for (auto &I : Statepoints) 317 EverMadeChange |= simplifyOffsetableRelocate(*I); 318 } 319 320 return EverMadeChange; 321 } 322 323 /// Merge basic blocks which are connected by a single edge, where one of the 324 /// basic blocks has a single successor pointing to the other basic block, 325 /// which has a single predecessor. 326 bool CodeGenPrepare::eliminateFallThrough(Function &F) { 327 bool Changed = false; 328 // Scan all of the blocks in the function, except for the entry block. 329 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 330 BasicBlock *BB = &*I++; 331 // If the destination block has a single pred, then this is a trivial 332 // edge, just collapse it. 333 BasicBlock *SinglePred = BB->getSinglePredecessor(); 334 335 // Don't merge if BB's address is taken. 336 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; 337 338 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); 339 if (Term && !Term->isConditional()) { 340 Changed = true; 341 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); 342 // Remember if SinglePred was the entry block of the function. 343 // If so, we will need to move BB back to the entry position. 344 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 345 MergeBasicBlockIntoOnlyPred(BB, nullptr); 346 347 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 348 BB->moveBefore(&BB->getParent()->getEntryBlock()); 349 350 // We have erased a block. Update the iterator. 351 I = BB->getIterator(); 352 } 353 } 354 return Changed; 355 } 356 357 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an 358 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split 359 /// edges in ways that are non-optimal for isel. Start by eliminating these 360 /// blocks so we can split them the way we want them. 361 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { 362 bool MadeChange = false; 363 // Note that this intentionally skips the entry block. 364 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { 365 BasicBlock *BB = &*I++; 366 367 // If this block doesn't end with an uncond branch, ignore it. 368 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 369 if (!BI || !BI->isUnconditional()) 370 continue; 371 372 // If the instruction before the branch (skipping debug info) isn't a phi 373 // node, then other stuff is happening here. 374 BasicBlock::iterator BBI = BI->getIterator(); 375 if (BBI != BB->begin()) { 376 --BBI; 377 while (isa<DbgInfoIntrinsic>(BBI)) { 378 if (BBI == BB->begin()) 379 break; 380 --BBI; 381 } 382 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) 383 continue; 384 } 385 386 // Do not break infinite loops. 387 BasicBlock *DestBB = BI->getSuccessor(0); 388 if (DestBB == BB) 389 continue; 390 391 if (!canMergeBlocks(BB, DestBB)) 392 continue; 393 394 eliminateMostlyEmptyBlock(BB); 395 MadeChange = true; 396 } 397 return MadeChange; 398 } 399 400 /// Return true if we can merge BB into DestBB if there is a single 401 /// unconditional branch between them, and BB contains no other non-phi 402 /// instructions. 403 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, 404 const BasicBlock *DestBB) const { 405 // We only want to eliminate blocks whose phi nodes are used by phi nodes in 406 // the successor. If there are more complex condition (e.g. preheaders), 407 // don't mess around with them. 408 BasicBlock::const_iterator BBI = BB->begin(); 409 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 410 for (const User *U : PN->users()) { 411 const Instruction *UI = cast<Instruction>(U); 412 if (UI->getParent() != DestBB || !isa<PHINode>(UI)) 413 return false; 414 // If User is inside DestBB block and it is a PHINode then check 415 // incoming value. If incoming value is not from BB then this is 416 // a complex condition (e.g. preheaders) we want to avoid here. 417 if (UI->getParent() == DestBB) { 418 if (const PHINode *UPN = dyn_cast<PHINode>(UI)) 419 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { 420 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); 421 if (Insn && Insn->getParent() == BB && 422 Insn->getParent() != UPN->getIncomingBlock(I)) 423 return false; 424 } 425 } 426 } 427 } 428 429 // If BB and DestBB contain any common predecessors, then the phi nodes in BB 430 // and DestBB may have conflicting incoming values for the block. If so, we 431 // can't merge the block. 432 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); 433 if (!DestBBPN) return true; // no conflict. 434 435 // Collect the preds of BB. 436 SmallPtrSet<const BasicBlock*, 16> BBPreds; 437 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 438 // It is faster to get preds from a PHI than with pred_iterator. 439 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 440 BBPreds.insert(BBPN->getIncomingBlock(i)); 441 } else { 442 BBPreds.insert(pred_begin(BB), pred_end(BB)); 443 } 444 445 // Walk the preds of DestBB. 446 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { 447 BasicBlock *Pred = DestBBPN->getIncomingBlock(i); 448 if (BBPreds.count(Pred)) { // Common predecessor? 449 BBI = DestBB->begin(); 450 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { 451 const Value *V1 = PN->getIncomingValueForBlock(Pred); 452 const Value *V2 = PN->getIncomingValueForBlock(BB); 453 454 // If V2 is a phi node in BB, look up what the mapped value will be. 455 if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) 456 if (V2PN->getParent() == BB) 457 V2 = V2PN->getIncomingValueForBlock(Pred); 458 459 // If there is a conflict, bail out. 460 if (V1 != V2) return false; 461 } 462 } 463 } 464 465 return true; 466 } 467 468 469 /// Eliminate a basic block that has only phi's and an unconditional branch in 470 /// it. 471 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { 472 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 473 BasicBlock *DestBB = BI->getSuccessor(0); 474 475 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); 476 477 // If the destination block has a single pred, then this is a trivial edge, 478 // just collapse it. 479 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { 480 if (SinglePred != DestBB) { 481 // Remember if SinglePred was the entry block of the function. If so, we 482 // will need to move BB back to the entry position. 483 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 484 MergeBasicBlockIntoOnlyPred(DestBB, nullptr); 485 486 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 487 BB->moveBefore(&BB->getParent()->getEntryBlock()); 488 489 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 490 return; 491 } 492 } 493 494 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB 495 // to handle the new incoming edges it is about to have. 496 PHINode *PN; 497 for (BasicBlock::iterator BBI = DestBB->begin(); 498 (PN = dyn_cast<PHINode>(BBI)); ++BBI) { 499 // Remove the incoming value for BB, and remember it. 500 Value *InVal = PN->removeIncomingValue(BB, false); 501 502 // Two options: either the InVal is a phi node defined in BB or it is some 503 // value that dominates BB. 504 PHINode *InValPhi = dyn_cast<PHINode>(InVal); 505 if (InValPhi && InValPhi->getParent() == BB) { 506 // Add all of the input values of the input PHI as inputs of this phi. 507 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) 508 PN->addIncoming(InValPhi->getIncomingValue(i), 509 InValPhi->getIncomingBlock(i)); 510 } else { 511 // Otherwise, add one instance of the dominating value for each edge that 512 // we will be adding. 513 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { 514 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) 515 PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); 516 } else { 517 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 518 PN->addIncoming(InVal, *PI); 519 } 520 } 521 } 522 523 // The PHIs are now updated, change everything that refers to BB to use 524 // DestBB and remove BB. 525 BB->replaceAllUsesWith(DestBB); 526 BB->eraseFromParent(); 527 ++NumBlocksElim; 528 529 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); 530 } 531 532 // Computes a map of base pointer relocation instructions to corresponding 533 // derived pointer relocation instructions given a vector of all relocate calls 534 static void computeBaseDerivedRelocateMap( 535 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, 536 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> 537 &RelocateInstMap) { 538 // Collect information in two maps: one primarily for locating the base object 539 // while filling the second map; the second map is the final structure holding 540 // a mapping between Base and corresponding Derived relocate calls 541 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; 542 for (auto *ThisRelocate : AllRelocateCalls) { 543 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), 544 ThisRelocate->getDerivedPtrIndex()); 545 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); 546 } 547 for (auto &Item : RelocateIdxMap) { 548 std::pair<unsigned, unsigned> Key = Item.first; 549 if (Key.first == Key.second) 550 // Base relocation: nothing to insert 551 continue; 552 553 GCRelocateInst *I = Item.second; 554 auto BaseKey = std::make_pair(Key.first, Key.first); 555 556 // We're iterating over RelocateIdxMap so we cannot modify it. 557 auto MaybeBase = RelocateIdxMap.find(BaseKey); 558 if (MaybeBase == RelocateIdxMap.end()) 559 // TODO: We might want to insert a new base object relocate and gep off 560 // that, if there are enough derived object relocates. 561 continue; 562 563 RelocateInstMap[MaybeBase->second].push_back(I); 564 } 565 } 566 567 // Accepts a GEP and extracts the operands into a vector provided they're all 568 // small integer constants 569 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, 570 SmallVectorImpl<Value *> &OffsetV) { 571 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 572 // Only accept small constant integer operands 573 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); 574 if (!Op || Op->getZExtValue() > 20) 575 return false; 576 } 577 578 for (unsigned i = 1; i < GEP->getNumOperands(); i++) 579 OffsetV.push_back(GEP->getOperand(i)); 580 return true; 581 } 582 583 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to 584 // replace, computes a replacement, and affects it. 585 static bool 586 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, 587 const SmallVectorImpl<GCRelocateInst *> &Targets) { 588 bool MadeChange = false; 589 for (GCRelocateInst *ToReplace : Targets) { 590 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && 591 "Not relocating a derived object of the original base object"); 592 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { 593 // A duplicate relocate call. TODO: coalesce duplicates. 594 continue; 595 } 596 597 if (RelocatedBase->getParent() != ToReplace->getParent()) { 598 // Base and derived relocates are in different basic blocks. 599 // In this case transform is only valid when base dominates derived 600 // relocate. However it would be too expensive to check dominance 601 // for each such relocate, so we skip the whole transformation. 602 continue; 603 } 604 605 Value *Base = ToReplace->getBasePtr(); 606 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); 607 if (!Derived || Derived->getPointerOperand() != Base) 608 continue; 609 610 SmallVector<Value *, 2> OffsetV; 611 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) 612 continue; 613 614 // Create a Builder and replace the target callsite with a gep 615 assert(RelocatedBase->getNextNode() && 616 "Should always have one since it's not a terminator"); 617 618 // Insert after RelocatedBase 619 IRBuilder<> Builder(RelocatedBase->getNextNode()); 620 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); 621 622 // If gc_relocate does not match the actual type, cast it to the right type. 623 // In theory, there must be a bitcast after gc_relocate if the type does not 624 // match, and we should reuse it to get the derived pointer. But it could be 625 // cases like this: 626 // bb1: 627 // ... 628 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 629 // br label %merge 630 // 631 // bb2: 632 // ... 633 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) 634 // br label %merge 635 // 636 // merge: 637 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] 638 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* 639 // 640 // In this case, we can not find the bitcast any more. So we insert a new bitcast 641 // no matter there is already one or not. In this way, we can handle all cases, and 642 // the extra bitcast should be optimized away in later passes. 643 Value *ActualRelocatedBase = RelocatedBase; 644 if (RelocatedBase->getType() != Base->getType()) { 645 ActualRelocatedBase = 646 Builder.CreateBitCast(RelocatedBase, Base->getType()); 647 } 648 Value *Replacement = Builder.CreateGEP( 649 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); 650 Replacement->takeName(ToReplace); 651 // If the newly generated derived pointer's type does not match the original derived 652 // pointer's type, cast the new derived pointer to match it. Same reasoning as above. 653 Value *ActualReplacement = Replacement; 654 if (Replacement->getType() != ToReplace->getType()) { 655 ActualReplacement = 656 Builder.CreateBitCast(Replacement, ToReplace->getType()); 657 } 658 ToReplace->replaceAllUsesWith(ActualReplacement); 659 ToReplace->eraseFromParent(); 660 661 MadeChange = true; 662 } 663 return MadeChange; 664 } 665 666 // Turns this: 667 // 668 // %base = ... 669 // %ptr = gep %base + 15 670 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 671 // %base' = relocate(%tok, i32 4, i32 4) 672 // %ptr' = relocate(%tok, i32 4, i32 5) 673 // %val = load %ptr' 674 // 675 // into this: 676 // 677 // %base = ... 678 // %ptr = gep %base + 15 679 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) 680 // %base' = gc.relocate(%tok, i32 4, i32 4) 681 // %ptr' = gep %base' + 15 682 // %val = load %ptr' 683 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { 684 bool MadeChange = false; 685 SmallVector<GCRelocateInst *, 2> AllRelocateCalls; 686 687 for (auto *U : I.users()) 688 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) 689 // Collect all the relocate calls associated with a statepoint 690 AllRelocateCalls.push_back(Relocate); 691 692 // We need atleast one base pointer relocation + one derived pointer 693 // relocation to mangle 694 if (AllRelocateCalls.size() < 2) 695 return false; 696 697 // RelocateInstMap is a mapping from the base relocate instruction to the 698 // corresponding derived relocate instructions 699 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; 700 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); 701 if (RelocateInstMap.empty()) 702 return false; 703 704 for (auto &Item : RelocateInstMap) 705 // Item.first is the RelocatedBase to offset against 706 // Item.second is the vector of Targets to replace 707 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); 708 return MadeChange; 709 } 710 711 /// SinkCast - Sink the specified cast instruction into its user blocks 712 static bool SinkCast(CastInst *CI) { 713 BasicBlock *DefBB = CI->getParent(); 714 715 /// InsertedCasts - Only insert a cast in each block once. 716 DenseMap<BasicBlock*, CastInst*> InsertedCasts; 717 718 bool MadeChange = false; 719 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 720 UI != E; ) { 721 Use &TheUse = UI.getUse(); 722 Instruction *User = cast<Instruction>(*UI); 723 724 // Figure out which BB this cast is used in. For PHI's this is the 725 // appropriate predecessor block. 726 BasicBlock *UserBB = User->getParent(); 727 if (PHINode *PN = dyn_cast<PHINode>(User)) { 728 UserBB = PN->getIncomingBlock(TheUse); 729 } 730 731 // Preincrement use iterator so we don't invalidate it. 732 ++UI; 733 734 // If the block selected to receive the cast is an EH pad that does not 735 // allow non-PHI instructions before the terminator, we can't sink the 736 // cast. 737 if (UserBB->getTerminator()->isEHPad()) 738 continue; 739 740 // If this user is in the same block as the cast, don't change the cast. 741 if (UserBB == DefBB) continue; 742 743 // If we have already inserted a cast into this block, use it. 744 CastInst *&InsertedCast = InsertedCasts[UserBB]; 745 746 if (!InsertedCast) { 747 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 748 assert(InsertPt != UserBB->end()); 749 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), 750 CI->getType(), "", &*InsertPt); 751 } 752 753 // Replace a use of the cast with a use of the new cast. 754 TheUse = InsertedCast; 755 MadeChange = true; 756 ++NumCastUses; 757 } 758 759 // If we removed all uses, nuke the cast. 760 if (CI->use_empty()) { 761 CI->eraseFromParent(); 762 MadeChange = true; 763 } 764 765 return MadeChange; 766 } 767 768 /// If the specified cast instruction is a noop copy (e.g. it's casting from 769 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to 770 /// reduce the number of virtual registers that must be created and coalesced. 771 /// 772 /// Return true if any changes are made. 773 /// 774 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, 775 const DataLayout &DL) { 776 // If this is a noop copy, 777 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); 778 EVT DstVT = TLI.getValueType(DL, CI->getType()); 779 780 // This is an fp<->int conversion? 781 if (SrcVT.isInteger() != DstVT.isInteger()) 782 return false; 783 784 // If this is an extension, it will be a zero or sign extension, which 785 // isn't a noop. 786 if (SrcVT.bitsLT(DstVT)) return false; 787 788 // If these values will be promoted, find out what they will be promoted 789 // to. This helps us consider truncates on PPC as noop copies when they 790 // are. 791 if (TLI.getTypeAction(CI->getContext(), SrcVT) == 792 TargetLowering::TypePromoteInteger) 793 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); 794 if (TLI.getTypeAction(CI->getContext(), DstVT) == 795 TargetLowering::TypePromoteInteger) 796 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); 797 798 // If, after promotion, these are the same types, this is a noop copy. 799 if (SrcVT != DstVT) 800 return false; 801 802 return SinkCast(CI); 803 } 804 805 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if 806 /// possible. 807 /// 808 /// Return true if any changes were made. 809 static bool CombineUAddWithOverflow(CmpInst *CI) { 810 Value *A, *B; 811 Instruction *AddI; 812 if (!match(CI, 813 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) 814 return false; 815 816 Type *Ty = AddI->getType(); 817 if (!isa<IntegerType>(Ty)) 818 return false; 819 820 // We don't want to move around uses of condition values this late, so we we 821 // check if it is legal to create the call to the intrinsic in the basic 822 // block containing the icmp: 823 824 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) 825 return false; 826 827 #ifndef NDEBUG 828 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption 829 // for now: 830 if (AddI->hasOneUse()) 831 assert(*AddI->user_begin() == CI && "expected!"); 832 #endif 833 834 Module *M = CI->getModule(); 835 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); 836 837 auto *InsertPt = AddI->hasOneUse() ? CI : AddI; 838 839 auto *UAddWithOverflow = 840 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); 841 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); 842 auto *Overflow = 843 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); 844 845 CI->replaceAllUsesWith(Overflow); 846 AddI->replaceAllUsesWith(UAdd); 847 CI->eraseFromParent(); 848 AddI->eraseFromParent(); 849 return true; 850 } 851 852 /// Sink the given CmpInst into user blocks to reduce the number of virtual 853 /// registers that must be created and coalesced. This is a clear win except on 854 /// targets with multiple condition code registers (PowerPC), where it might 855 /// lose; some adjustment may be wanted there. 856 /// 857 /// Return true if any changes are made. 858 static bool SinkCmpExpression(CmpInst *CI) { 859 BasicBlock *DefBB = CI->getParent(); 860 861 /// Only insert a cmp in each block once. 862 DenseMap<BasicBlock*, CmpInst*> InsertedCmps; 863 864 bool MadeChange = false; 865 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); 866 UI != E; ) { 867 Use &TheUse = UI.getUse(); 868 Instruction *User = cast<Instruction>(*UI); 869 870 // Preincrement use iterator so we don't invalidate it. 871 ++UI; 872 873 // Don't bother for PHI nodes. 874 if (isa<PHINode>(User)) 875 continue; 876 877 // Figure out which BB this cmp is used in. 878 BasicBlock *UserBB = User->getParent(); 879 880 // If this user is in the same block as the cmp, don't change the cmp. 881 if (UserBB == DefBB) continue; 882 883 // If we have already inserted a cmp into this block, use it. 884 CmpInst *&InsertedCmp = InsertedCmps[UserBB]; 885 886 if (!InsertedCmp) { 887 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 888 assert(InsertPt != UserBB->end()); 889 InsertedCmp = 890 CmpInst::Create(CI->getOpcode(), CI->getPredicate(), 891 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); 892 } 893 894 // Replace a use of the cmp with a use of the new cmp. 895 TheUse = InsertedCmp; 896 MadeChange = true; 897 ++NumCmpUses; 898 } 899 900 // If we removed all uses, nuke the cmp. 901 if (CI->use_empty()) { 902 CI->eraseFromParent(); 903 MadeChange = true; 904 } 905 906 return MadeChange; 907 } 908 909 static bool OptimizeCmpExpression(CmpInst *CI) { 910 if (SinkCmpExpression(CI)) 911 return true; 912 913 if (CombineUAddWithOverflow(CI)) 914 return true; 915 916 return false; 917 } 918 919 /// Check if the candidates could be combined with a shift instruction, which 920 /// includes: 921 /// 1. Truncate instruction 922 /// 2. And instruction and the imm is a mask of the low bits: 923 /// imm & (imm+1) == 0 924 static bool isExtractBitsCandidateUse(Instruction *User) { 925 if (!isa<TruncInst>(User)) { 926 if (User->getOpcode() != Instruction::And || 927 !isa<ConstantInt>(User->getOperand(1))) 928 return false; 929 930 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); 931 932 if ((Cimm & (Cimm + 1)).getBoolValue()) 933 return false; 934 } 935 return true; 936 } 937 938 /// Sink both shift and truncate instruction to the use of truncate's BB. 939 static bool 940 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, 941 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, 942 const TargetLowering &TLI, const DataLayout &DL) { 943 BasicBlock *UserBB = User->getParent(); 944 DenseMap<BasicBlock *, CastInst *> InsertedTruncs; 945 TruncInst *TruncI = dyn_cast<TruncInst>(User); 946 bool MadeChange = false; 947 948 for (Value::user_iterator TruncUI = TruncI->user_begin(), 949 TruncE = TruncI->user_end(); 950 TruncUI != TruncE;) { 951 952 Use &TruncTheUse = TruncUI.getUse(); 953 Instruction *TruncUser = cast<Instruction>(*TruncUI); 954 // Preincrement use iterator so we don't invalidate it. 955 956 ++TruncUI; 957 958 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); 959 if (!ISDOpcode) 960 continue; 961 962 // If the use is actually a legal node, there will not be an 963 // implicit truncate. 964 // FIXME: always querying the result type is just an 965 // approximation; some nodes' legality is determined by the 966 // operand or other means. There's no good way to find out though. 967 if (TLI.isOperationLegalOrCustom( 968 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) 969 continue; 970 971 // Don't bother for PHI nodes. 972 if (isa<PHINode>(TruncUser)) 973 continue; 974 975 BasicBlock *TruncUserBB = TruncUser->getParent(); 976 977 if (UserBB == TruncUserBB) 978 continue; 979 980 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; 981 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; 982 983 if (!InsertedShift && !InsertedTrunc) { 984 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); 985 assert(InsertPt != TruncUserBB->end()); 986 // Sink the shift 987 if (ShiftI->getOpcode() == Instruction::AShr) 988 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 989 "", &*InsertPt); 990 else 991 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 992 "", &*InsertPt); 993 994 // Sink the trunc 995 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); 996 TruncInsertPt++; 997 assert(TruncInsertPt != TruncUserBB->end()); 998 999 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, 1000 TruncI->getType(), "", &*TruncInsertPt); 1001 1002 MadeChange = true; 1003 1004 TruncTheUse = InsertedTrunc; 1005 } 1006 } 1007 return MadeChange; 1008 } 1009 1010 /// Sink the shift *right* instruction into user blocks if the uses could 1011 /// potentially be combined with this shift instruction and generate BitExtract 1012 /// instruction. It will only be applied if the architecture supports BitExtract 1013 /// instruction. Here is an example: 1014 /// BB1: 1015 /// %x.extract.shift = lshr i64 %arg1, 32 1016 /// BB2: 1017 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 1018 /// ==> 1019 /// 1020 /// BB2: 1021 /// %x.extract.shift.1 = lshr i64 %arg1, 32 1022 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 1023 /// 1024 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract 1025 /// instruction. 1026 /// Return true if any changes are made. 1027 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, 1028 const TargetLowering &TLI, 1029 const DataLayout &DL) { 1030 BasicBlock *DefBB = ShiftI->getParent(); 1031 1032 /// Only insert instructions in each block once. 1033 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; 1034 1035 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); 1036 1037 bool MadeChange = false; 1038 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); 1039 UI != E;) { 1040 Use &TheUse = UI.getUse(); 1041 Instruction *User = cast<Instruction>(*UI); 1042 // Preincrement use iterator so we don't invalidate it. 1043 ++UI; 1044 1045 // Don't bother for PHI nodes. 1046 if (isa<PHINode>(User)) 1047 continue; 1048 1049 if (!isExtractBitsCandidateUse(User)) 1050 continue; 1051 1052 BasicBlock *UserBB = User->getParent(); 1053 1054 if (UserBB == DefBB) { 1055 // If the shift and truncate instruction are in the same BB. The use of 1056 // the truncate(TruncUse) may still introduce another truncate if not 1057 // legal. In this case, we would like to sink both shift and truncate 1058 // instruction to the BB of TruncUse. 1059 // for example: 1060 // BB1: 1061 // i64 shift.result = lshr i64 opnd, imm 1062 // trunc.result = trunc shift.result to i16 1063 // 1064 // BB2: 1065 // ----> We will have an implicit truncate here if the architecture does 1066 // not have i16 compare. 1067 // cmp i16 trunc.result, opnd2 1068 // 1069 if (isa<TruncInst>(User) && shiftIsLegal 1070 // If the type of the truncate is legal, no trucate will be 1071 // introduced in other basic blocks. 1072 && 1073 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) 1074 MadeChange = 1075 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); 1076 1077 continue; 1078 } 1079 // If we have already inserted a shift into this block, use it. 1080 BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; 1081 1082 if (!InsertedShift) { 1083 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 1084 assert(InsertPt != UserBB->end()); 1085 1086 if (ShiftI->getOpcode() == Instruction::AShr) 1087 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, 1088 "", &*InsertPt); 1089 else 1090 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, 1091 "", &*InsertPt); 1092 1093 MadeChange = true; 1094 } 1095 1096 // Replace a use of the shift with a use of the new shift. 1097 TheUse = InsertedShift; 1098 } 1099 1100 // If we removed all uses, nuke the shift. 1101 if (ShiftI->use_empty()) 1102 ShiftI->eraseFromParent(); 1103 1104 return MadeChange; 1105 } 1106 1107 // Translate a masked load intrinsic like 1108 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, 1109 // <16 x i1> %mask, <16 x i32> %passthru) 1110 // to a chain of basic blocks, with loading element one-by-one if 1111 // the appropriate mask bit is set 1112 // 1113 // %1 = bitcast i8* %addr to i32* 1114 // %2 = extractelement <16 x i1> %mask, i32 0 1115 // %3 = icmp eq i1 %2, true 1116 // br i1 %3, label %cond.load, label %else 1117 // 1118 //cond.load: ; preds = %0 1119 // %4 = getelementptr i32* %1, i32 0 1120 // %5 = load i32* %4 1121 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 1122 // br label %else 1123 // 1124 //else: ; preds = %0, %cond.load 1125 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] 1126 // %7 = extractelement <16 x i1> %mask, i32 1 1127 // %8 = icmp eq i1 %7, true 1128 // br i1 %8, label %cond.load1, label %else2 1129 // 1130 //cond.load1: ; preds = %else 1131 // %9 = getelementptr i32* %1, i32 1 1132 // %10 = load i32* %9 1133 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 1134 // br label %else2 1135 // 1136 //else2: ; preds = %else, %cond.load1 1137 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1138 // %12 = extractelement <16 x i1> %mask, i32 2 1139 // %13 = icmp eq i1 %12, true 1140 // br i1 %13, label %cond.load4, label %else5 1141 // 1142 static void scalarizeMaskedLoad(CallInst *CI) { 1143 Value *Ptr = CI->getArgOperand(0); 1144 Value *Alignment = CI->getArgOperand(1); 1145 Value *Mask = CI->getArgOperand(2); 1146 Value *Src0 = CI->getArgOperand(3); 1147 1148 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1149 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1150 assert(VecType && "Unexpected return type of masked load intrinsic"); 1151 1152 Type *EltTy = CI->getType()->getVectorElementType(); 1153 1154 IRBuilder<> Builder(CI->getContext()); 1155 Instruction *InsertPt = CI; 1156 BasicBlock *IfBlock = CI->getParent(); 1157 BasicBlock *CondBlock = nullptr; 1158 BasicBlock *PrevIfBlock = CI->getParent(); 1159 1160 Builder.SetInsertPoint(InsertPt); 1161 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1162 1163 // Short-cut if the mask is all-true. 1164 bool IsAllOnesMask = isa<Constant>(Mask) && 1165 cast<Constant>(Mask)->isAllOnesValue(); 1166 1167 if (IsAllOnesMask) { 1168 Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal); 1169 CI->replaceAllUsesWith(NewI); 1170 CI->eraseFromParent(); 1171 return; 1172 } 1173 1174 // Adjust alignment for the scalar instruction. 1175 AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8); 1176 // Bitcast %addr fron i8* to EltTy* 1177 Type *NewPtrType = 1178 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1179 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1180 unsigned VectorWidth = VecType->getNumElements(); 1181 1182 Value *UndefVal = UndefValue::get(VecType); 1183 1184 // The result vector 1185 Value *VResult = UndefVal; 1186 1187 if (isa<ConstantVector>(Mask)) { 1188 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1189 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1190 continue; 1191 Value *Gep = 1192 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1193 LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1194 VResult = Builder.CreateInsertElement(VResult, Load, 1195 Builder.getInt32(Idx)); 1196 } 1197 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1198 CI->replaceAllUsesWith(NewI); 1199 CI->eraseFromParent(); 1200 return; 1201 } 1202 1203 PHINode *Phi = nullptr; 1204 Value *PrevPhi = UndefVal; 1205 1206 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1207 1208 // Fill the "else" block, created in the previous iteration 1209 // 1210 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] 1211 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1212 // %to_load = icmp eq i1 %mask_1, true 1213 // br i1 %to_load, label %cond.load, label %else 1214 // 1215 if (Idx > 0) { 1216 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1217 Phi->addIncoming(VResult, CondBlock); 1218 Phi->addIncoming(PrevPhi, PrevIfBlock); 1219 PrevPhi = Phi; 1220 VResult = Phi; 1221 } 1222 1223 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1224 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1225 ConstantInt::get(Predicate->getType(), 1)); 1226 1227 // Create "cond" block 1228 // 1229 // %EltAddr = getelementptr i32* %1, i32 0 1230 // %Elt = load i32* %EltAddr 1231 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1232 // 1233 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load"); 1234 Builder.SetInsertPoint(InsertPt); 1235 1236 Value *Gep = 1237 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1238 LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal); 1239 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); 1240 1241 // Create "else" block, fill it in the next iteration 1242 BasicBlock *NewIfBlock = 1243 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1244 Builder.SetInsertPoint(InsertPt); 1245 Instruction *OldBr = IfBlock->getTerminator(); 1246 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1247 OldBr->eraseFromParent(); 1248 PrevIfBlock = IfBlock; 1249 IfBlock = NewIfBlock; 1250 } 1251 1252 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1253 Phi->addIncoming(VResult, CondBlock); 1254 Phi->addIncoming(PrevPhi, PrevIfBlock); 1255 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1256 CI->replaceAllUsesWith(NewI); 1257 CI->eraseFromParent(); 1258 } 1259 1260 // Translate a masked store intrinsic, like 1261 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, 1262 // <16 x i1> %mask) 1263 // to a chain of basic blocks, that stores element one-by-one if 1264 // the appropriate mask bit is set 1265 // 1266 // %1 = bitcast i8* %addr to i32* 1267 // %2 = extractelement <16 x i1> %mask, i32 0 1268 // %3 = icmp eq i1 %2, true 1269 // br i1 %3, label %cond.store, label %else 1270 // 1271 // cond.store: ; preds = %0 1272 // %4 = extractelement <16 x i32> %val, i32 0 1273 // %5 = getelementptr i32* %1, i32 0 1274 // store i32 %4, i32* %5 1275 // br label %else 1276 // 1277 // else: ; preds = %0, %cond.store 1278 // %6 = extractelement <16 x i1> %mask, i32 1 1279 // %7 = icmp eq i1 %6, true 1280 // br i1 %7, label %cond.store1, label %else2 1281 // 1282 // cond.store1: ; preds = %else 1283 // %8 = extractelement <16 x i32> %val, i32 1 1284 // %9 = getelementptr i32* %1, i32 1 1285 // store i32 %8, i32* %9 1286 // br label %else2 1287 // . . . 1288 static void scalarizeMaskedStore(CallInst *CI) { 1289 Value *Src = CI->getArgOperand(0); 1290 Value *Ptr = CI->getArgOperand(1); 1291 Value *Alignment = CI->getArgOperand(2); 1292 Value *Mask = CI->getArgOperand(3); 1293 1294 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1295 VectorType *VecType = dyn_cast<VectorType>(Src->getType()); 1296 assert(VecType && "Unexpected data type in masked store intrinsic"); 1297 1298 Type *EltTy = VecType->getElementType(); 1299 1300 IRBuilder<> Builder(CI->getContext()); 1301 Instruction *InsertPt = CI; 1302 BasicBlock *IfBlock = CI->getParent(); 1303 Builder.SetInsertPoint(InsertPt); 1304 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1305 1306 // Short-cut if the mask is all-true. 1307 bool IsAllOnesMask = isa<Constant>(Mask) && 1308 cast<Constant>(Mask)->isAllOnesValue(); 1309 1310 if (IsAllOnesMask) { 1311 Builder.CreateAlignedStore(Src, Ptr, AlignVal); 1312 CI->eraseFromParent(); 1313 return; 1314 } 1315 1316 // Adjust alignment for the scalar instruction. 1317 AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8); 1318 // Bitcast %addr fron i8* to EltTy* 1319 Type *NewPtrType = 1320 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); 1321 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); 1322 unsigned VectorWidth = VecType->getNumElements(); 1323 1324 if (isa<ConstantVector>(Mask)) { 1325 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1326 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1327 continue; 1328 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1329 Value *Gep = 1330 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1331 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1332 } 1333 CI->eraseFromParent(); 1334 return; 1335 } 1336 1337 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1338 1339 // Fill the "else" block, created in the previous iteration 1340 // 1341 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx 1342 // %to_store = icmp eq i1 %mask_1, true 1343 // br i1 %to_store, label %cond.store, label %else 1344 // 1345 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); 1346 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1347 ConstantInt::get(Predicate->getType(), 1)); 1348 1349 // Create "cond" block 1350 // 1351 // %OneElt = extractelement <16 x i32> %Src, i32 Idx 1352 // %EltAddr = getelementptr i32* %1, i32 0 1353 // %store i32 %OneElt, i32* %EltAddr 1354 // 1355 BasicBlock *CondBlock = 1356 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store"); 1357 Builder.SetInsertPoint(InsertPt); 1358 1359 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); 1360 Value *Gep = 1361 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx)); 1362 Builder.CreateAlignedStore(OneElt, Gep, AlignVal); 1363 1364 // Create "else" block, fill it in the next iteration 1365 BasicBlock *NewIfBlock = 1366 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else"); 1367 Builder.SetInsertPoint(InsertPt); 1368 Instruction *OldBr = IfBlock->getTerminator(); 1369 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1370 OldBr->eraseFromParent(); 1371 IfBlock = NewIfBlock; 1372 } 1373 CI->eraseFromParent(); 1374 } 1375 1376 // Translate a masked gather intrinsic like 1377 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4, 1378 // <16 x i1> %Mask, <16 x i32> %Src) 1379 // to a chain of basic blocks, with loading element one-by-one if 1380 // the appropriate mask bit is set 1381 // 1382 // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind 1383 // % Mask0 = extractelement <16 x i1> %Mask, i32 0 1384 // % ToLoad0 = icmp eq i1 % Mask0, true 1385 // br i1 % ToLoad0, label %cond.load, label %else 1386 // 1387 // cond.load: 1388 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1389 // % Load0 = load i32, i32* % Ptr0, align 4 1390 // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0 1391 // br label %else 1392 // 1393 // else: 1394 // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0] 1395 // % Mask1 = extractelement <16 x i1> %Mask, i32 1 1396 // % ToLoad1 = icmp eq i1 % Mask1, true 1397 // br i1 % ToLoad1, label %cond.load1, label %else2 1398 // 1399 // cond.load1: 1400 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1401 // % Load1 = load i32, i32* % Ptr1, align 4 1402 // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1 1403 // br label %else2 1404 // . . . 1405 // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src 1406 // ret <16 x i32> %Result 1407 static void scalarizeMaskedGather(CallInst *CI) { 1408 Value *Ptrs = CI->getArgOperand(0); 1409 Value *Alignment = CI->getArgOperand(1); 1410 Value *Mask = CI->getArgOperand(2); 1411 Value *Src0 = CI->getArgOperand(3); 1412 1413 VectorType *VecType = dyn_cast<VectorType>(CI->getType()); 1414 1415 assert(VecType && "Unexpected return type of masked load intrinsic"); 1416 1417 IRBuilder<> Builder(CI->getContext()); 1418 Instruction *InsertPt = CI; 1419 BasicBlock *IfBlock = CI->getParent(); 1420 BasicBlock *CondBlock = nullptr; 1421 BasicBlock *PrevIfBlock = CI->getParent(); 1422 Builder.SetInsertPoint(InsertPt); 1423 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1424 1425 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1426 1427 Value *UndefVal = UndefValue::get(VecType); 1428 1429 // The result vector 1430 Value *VResult = UndefVal; 1431 unsigned VectorWidth = VecType->getNumElements(); 1432 1433 // Shorten the way if the mask is a vector of constants. 1434 bool IsConstMask = isa<ConstantVector>(Mask); 1435 1436 if (IsConstMask) { 1437 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1438 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1439 continue; 1440 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1441 "Ptr" + Twine(Idx)); 1442 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1443 "Load" + Twine(Idx)); 1444 VResult = Builder.CreateInsertElement(VResult, Load, 1445 Builder.getInt32(Idx), 1446 "Res" + Twine(Idx)); 1447 } 1448 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0); 1449 CI->replaceAllUsesWith(NewI); 1450 CI->eraseFromParent(); 1451 return; 1452 } 1453 1454 PHINode *Phi = nullptr; 1455 Value *PrevPhi = UndefVal; 1456 1457 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1458 1459 // Fill the "else" block, created in the previous iteration 1460 // 1461 // %Mask1 = extractelement <16 x i1> %Mask, i32 1 1462 // %ToLoad1 = icmp eq i1 %Mask1, true 1463 // br i1 %ToLoad1, label %cond.load, label %else 1464 // 1465 if (Idx > 0) { 1466 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); 1467 Phi->addIncoming(VResult, CondBlock); 1468 Phi->addIncoming(PrevPhi, PrevIfBlock); 1469 PrevPhi = Phi; 1470 VResult = Phi; 1471 } 1472 1473 Value *Predicate = Builder.CreateExtractElement(Mask, 1474 Builder.getInt32(Idx), 1475 "Mask" + Twine(Idx)); 1476 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1477 ConstantInt::get(Predicate->getType(), 1), 1478 "ToLoad" + Twine(Idx)); 1479 1480 // Create "cond" block 1481 // 1482 // %EltAddr = getelementptr i32* %1, i32 0 1483 // %Elt = load i32* %EltAddr 1484 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx 1485 // 1486 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); 1487 Builder.SetInsertPoint(InsertPt); 1488 1489 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1490 "Ptr" + Twine(Idx)); 1491 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal, 1492 "Load" + Twine(Idx)); 1493 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx), 1494 "Res" + Twine(Idx)); 1495 1496 // Create "else" block, fill it in the next iteration 1497 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1498 Builder.SetInsertPoint(InsertPt); 1499 Instruction *OldBr = IfBlock->getTerminator(); 1500 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1501 OldBr->eraseFromParent(); 1502 PrevIfBlock = IfBlock; 1503 IfBlock = NewIfBlock; 1504 } 1505 1506 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); 1507 Phi->addIncoming(VResult, CondBlock); 1508 Phi->addIncoming(PrevPhi, PrevIfBlock); 1509 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); 1510 CI->replaceAllUsesWith(NewI); 1511 CI->eraseFromParent(); 1512 } 1513 1514 // Translate a masked scatter intrinsic, like 1515 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4, 1516 // <16 x i1> %Mask) 1517 // to a chain of basic blocks, that stores element one-by-one if 1518 // the appropriate mask bit is set. 1519 // 1520 // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind 1521 // % Mask0 = extractelement <16 x i1> % Mask, i32 0 1522 // % ToStore0 = icmp eq i1 % Mask0, true 1523 // br i1 %ToStore0, label %cond.store, label %else 1524 // 1525 // cond.store: 1526 // % Elt0 = extractelement <16 x i32> %Src, i32 0 1527 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0 1528 // store i32 %Elt0, i32* % Ptr0, align 4 1529 // br label %else 1530 // 1531 // else: 1532 // % Mask1 = extractelement <16 x i1> % Mask, i32 1 1533 // % ToStore1 = icmp eq i1 % Mask1, true 1534 // br i1 % ToStore1, label %cond.store1, label %else2 1535 // 1536 // cond.store1: 1537 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1538 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1539 // store i32 % Elt1, i32* % Ptr1, align 4 1540 // br label %else2 1541 // . . . 1542 static void scalarizeMaskedScatter(CallInst *CI) { 1543 Value *Src = CI->getArgOperand(0); 1544 Value *Ptrs = CI->getArgOperand(1); 1545 Value *Alignment = CI->getArgOperand(2); 1546 Value *Mask = CI->getArgOperand(3); 1547 1548 assert(isa<VectorType>(Src->getType()) && 1549 "Unexpected data type in masked scatter intrinsic"); 1550 assert(isa<VectorType>(Ptrs->getType()) && 1551 isa<PointerType>(Ptrs->getType()->getVectorElementType()) && 1552 "Vector of pointers is expected in masked scatter intrinsic"); 1553 1554 IRBuilder<> Builder(CI->getContext()); 1555 Instruction *InsertPt = CI; 1556 BasicBlock *IfBlock = CI->getParent(); 1557 Builder.SetInsertPoint(InsertPt); 1558 Builder.SetCurrentDebugLocation(CI->getDebugLoc()); 1559 1560 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); 1561 unsigned VectorWidth = Src->getType()->getVectorNumElements(); 1562 1563 // Shorten the way if the mask is a vector of constants. 1564 bool IsConstMask = isa<ConstantVector>(Mask); 1565 1566 if (IsConstMask) { 1567 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1568 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue()) 1569 continue; 1570 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 1571 "Elt" + Twine(Idx)); 1572 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1573 "Ptr" + Twine(Idx)); 1574 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 1575 } 1576 CI->eraseFromParent(); 1577 return; 1578 } 1579 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { 1580 // Fill the "else" block, created in the previous iteration 1581 // 1582 // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx 1583 // % ToStore = icmp eq i1 % Mask1, true 1584 // br i1 % ToStore, label %cond.store, label %else 1585 // 1586 Value *Predicate = Builder.CreateExtractElement(Mask, 1587 Builder.getInt32(Idx), 1588 "Mask" + Twine(Idx)); 1589 Value *Cmp = 1590 Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, 1591 ConstantInt::get(Predicate->getType(), 1), 1592 "ToStore" + Twine(Idx)); 1593 1594 // Create "cond" block 1595 // 1596 // % Elt1 = extractelement <16 x i32> %Src, i32 1 1597 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1 1598 // %store i32 % Elt1, i32* % Ptr1 1599 // 1600 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); 1601 Builder.SetInsertPoint(InsertPt); 1602 1603 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx), 1604 "Elt" + Twine(Idx)); 1605 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx), 1606 "Ptr" + Twine(Idx)); 1607 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal); 1608 1609 // Create "else" block, fill it in the next iteration 1610 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); 1611 Builder.SetInsertPoint(InsertPt); 1612 Instruction *OldBr = IfBlock->getTerminator(); 1613 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); 1614 OldBr->eraseFromParent(); 1615 IfBlock = NewIfBlock; 1616 } 1617 CI->eraseFromParent(); 1618 } 1619 1620 /// If counting leading or trailing zeros is an expensive operation and a zero 1621 /// input is defined, add a check for zero to avoid calling the intrinsic. 1622 /// 1623 /// We want to transform: 1624 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 1625 /// 1626 /// into: 1627 /// entry: 1628 /// %cmpz = icmp eq i64 %A, 0 1629 /// br i1 %cmpz, label %cond.end, label %cond.false 1630 /// cond.false: 1631 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 1632 /// br label %cond.end 1633 /// cond.end: 1634 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 1635 /// 1636 /// If the transform is performed, return true and set ModifiedDT to true. 1637 static bool despeculateCountZeros(IntrinsicInst *CountZeros, 1638 const TargetLowering *TLI, 1639 const DataLayout *DL, 1640 bool &ModifiedDT) { 1641 if (!TLI || !DL) 1642 return false; 1643 1644 // If a zero input is undefined, it doesn't make sense to despeculate that. 1645 if (match(CountZeros->getOperand(1), m_One())) 1646 return false; 1647 1648 // If it's cheap to speculate, there's nothing to do. 1649 auto IntrinsicID = CountZeros->getIntrinsicID(); 1650 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || 1651 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) 1652 return false; 1653 1654 // Only handle legal scalar cases. Anything else requires too much work. 1655 Type *Ty = CountZeros->getType(); 1656 unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); 1657 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSize()) 1658 return false; 1659 1660 // The intrinsic will be sunk behind a compare against zero and branch. 1661 BasicBlock *StartBlock = CountZeros->getParent(); 1662 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); 1663 1664 // Create another block after the count zero intrinsic. A PHI will be added 1665 // in this block to select the result of the intrinsic or the bit-width 1666 // constant if the input to the intrinsic is zero. 1667 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); 1668 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); 1669 1670 // Set up a builder to create a compare, conditional branch, and PHI. 1671 IRBuilder<> Builder(CountZeros->getContext()); 1672 Builder.SetInsertPoint(StartBlock->getTerminator()); 1673 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); 1674 1675 // Replace the unconditional branch that was created by the first split with 1676 // a compare against zero and a conditional branch. 1677 Value *Zero = Constant::getNullValue(Ty); 1678 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); 1679 Builder.CreateCondBr(Cmp, EndBlock, CallBlock); 1680 StartBlock->getTerminator()->eraseFromParent(); 1681 1682 // Create a PHI in the end block to select either the output of the intrinsic 1683 // or the bit width of the operand. 1684 Builder.SetInsertPoint(&EndBlock->front()); 1685 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); 1686 CountZeros->replaceAllUsesWith(PN); 1687 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); 1688 PN->addIncoming(BitWidth, StartBlock); 1689 PN->addIncoming(CountZeros, CallBlock); 1690 1691 // We are explicitly handling the zero case, so we can set the intrinsic's 1692 // undefined zero argument to 'true'. This will also prevent reprocessing the 1693 // intrinsic; we only despeculate when a zero input is defined. 1694 CountZeros->setArgOperand(1, Builder.getTrue()); 1695 ModifiedDT = true; 1696 return true; 1697 } 1698 1699 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { 1700 BasicBlock *BB = CI->getParent(); 1701 1702 // Lower inline assembly if we can. 1703 // If we found an inline asm expession, and if the target knows how to 1704 // lower it to normal LLVM code, do so now. 1705 if (TLI && isa<InlineAsm>(CI->getCalledValue())) { 1706 if (TLI->ExpandInlineAsm(CI)) { 1707 // Avoid invalidating the iterator. 1708 CurInstIterator = BB->begin(); 1709 // Avoid processing instructions out of order, which could cause 1710 // reuse before a value is defined. 1711 SunkAddrs.clear(); 1712 return true; 1713 } 1714 // Sink address computing for memory operands into the block. 1715 if (optimizeInlineAsmInst(CI)) 1716 return true; 1717 } 1718 1719 // Align the pointer arguments to this call if the target thinks it's a good 1720 // idea 1721 unsigned MinSize, PrefAlign; 1722 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { 1723 for (auto &Arg : CI->arg_operands()) { 1724 // We want to align both objects whose address is used directly and 1725 // objects whose address is used in casts and GEPs, though it only makes 1726 // sense for GEPs if the offset is a multiple of the desired alignment and 1727 // if size - offset meets the size threshold. 1728 if (!Arg->getType()->isPointerTy()) 1729 continue; 1730 APInt Offset(DL->getPointerSizeInBits( 1731 cast<PointerType>(Arg->getType())->getAddressSpace()), 1732 0); 1733 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); 1734 uint64_t Offset2 = Offset.getLimitedValue(); 1735 if ((Offset2 & (PrefAlign-1)) != 0) 1736 continue; 1737 AllocaInst *AI; 1738 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && 1739 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) 1740 AI->setAlignment(PrefAlign); 1741 // Global variables can only be aligned if they are defined in this 1742 // object (i.e. they are uniquely initialized in this object), and 1743 // over-aligning global variables that have an explicit section is 1744 // forbidden. 1745 GlobalVariable *GV; 1746 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && 1747 GV->getAlignment() < PrefAlign && 1748 DL->getTypeAllocSize(GV->getValueType()) >= 1749 MinSize + Offset2) 1750 GV->setAlignment(PrefAlign); 1751 } 1752 // If this is a memcpy (or similar) then we may be able to improve the 1753 // alignment 1754 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { 1755 unsigned Align = getKnownAlignment(MI->getDest(), *DL); 1756 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 1757 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); 1758 if (Align > MI->getAlignment()) 1759 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); 1760 } 1761 } 1762 1763 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1764 if (II) { 1765 switch (II->getIntrinsicID()) { 1766 default: break; 1767 case Intrinsic::objectsize: { 1768 // Lower all uses of llvm.objectsize.* 1769 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 1770 Type *ReturnTy = CI->getType(); 1771 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 1772 1773 // Substituting this can cause recursive simplifications, which can 1774 // invalidate our iterator. Use a WeakVH to hold onto it in case this 1775 // happens. 1776 Value *CurValue = &*CurInstIterator; 1777 WeakVH IterHandle(CurValue); 1778 1779 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); 1780 1781 // If the iterator instruction was recursively deleted, start over at the 1782 // start of the block. 1783 if (IterHandle != CurValue) { 1784 CurInstIterator = BB->begin(); 1785 SunkAddrs.clear(); 1786 } 1787 return true; 1788 } 1789 case Intrinsic::masked_load: { 1790 // Scalarize unsupported vector masked load 1791 if (!TTI->isLegalMaskedLoad(CI->getType())) { 1792 scalarizeMaskedLoad(CI); 1793 ModifiedDT = true; 1794 return true; 1795 } 1796 return false; 1797 } 1798 case Intrinsic::masked_store: { 1799 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) { 1800 scalarizeMaskedStore(CI); 1801 ModifiedDT = true; 1802 return true; 1803 } 1804 return false; 1805 } 1806 case Intrinsic::masked_gather: { 1807 if (!TTI->isLegalMaskedGather(CI->getType())) { 1808 scalarizeMaskedGather(CI); 1809 ModifiedDT = true; 1810 return true; 1811 } 1812 return false; 1813 } 1814 case Intrinsic::masked_scatter: { 1815 if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) { 1816 scalarizeMaskedScatter(CI); 1817 ModifiedDT = true; 1818 return true; 1819 } 1820 return false; 1821 } 1822 case Intrinsic::aarch64_stlxr: 1823 case Intrinsic::aarch64_stxr: { 1824 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); 1825 if (!ExtVal || !ExtVal->hasOneUse() || 1826 ExtVal->getParent() == CI->getParent()) 1827 return false; 1828 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. 1829 ExtVal->moveBefore(CI); 1830 // Mark this instruction as "inserted by CGP", so that other 1831 // optimizations don't touch it. 1832 InsertedInsts.insert(ExtVal); 1833 return true; 1834 } 1835 case Intrinsic::invariant_group_barrier: 1836 II->replaceAllUsesWith(II->getArgOperand(0)); 1837 II->eraseFromParent(); 1838 return true; 1839 1840 case Intrinsic::cttz: 1841 case Intrinsic::ctlz: 1842 // If counting zeros is expensive, try to avoid it. 1843 return despeculateCountZeros(II, TLI, DL, ModifiedDT); 1844 } 1845 1846 if (TLI) { 1847 // Unknown address space. 1848 // TODO: Target hook to pick which address space the intrinsic cares 1849 // about? 1850 unsigned AddrSpace = ~0u; 1851 SmallVector<Value*, 2> PtrOps; 1852 Type *AccessTy; 1853 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace)) 1854 while (!PtrOps.empty()) 1855 if (optimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy, AddrSpace)) 1856 return true; 1857 } 1858 } 1859 1860 // From here on out we're working with named functions. 1861 if (!CI->getCalledFunction()) return false; 1862 1863 // Lower all default uses of _chk calls. This is very similar 1864 // to what InstCombineCalls does, but here we are only lowering calls 1865 // to fortified library functions (e.g. __memcpy_chk) that have the default 1866 // "don't know" as the objectsize. Anything else should be left alone. 1867 FortifiedLibCallSimplifier Simplifier(TLInfo, true); 1868 if (Value *V = Simplifier.optimizeCall(CI)) { 1869 CI->replaceAllUsesWith(V); 1870 CI->eraseFromParent(); 1871 return true; 1872 } 1873 return false; 1874 } 1875 1876 /// Look for opportunities to duplicate return instructions to the predecessor 1877 /// to enable tail call optimizations. The case it is currently looking for is: 1878 /// @code 1879 /// bb0: 1880 /// %tmp0 = tail call i32 @f0() 1881 /// br label %return 1882 /// bb1: 1883 /// %tmp1 = tail call i32 @f1() 1884 /// br label %return 1885 /// bb2: 1886 /// %tmp2 = tail call i32 @f2() 1887 /// br label %return 1888 /// return: 1889 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] 1890 /// ret i32 %retval 1891 /// @endcode 1892 /// 1893 /// => 1894 /// 1895 /// @code 1896 /// bb0: 1897 /// %tmp0 = tail call i32 @f0() 1898 /// ret i32 %tmp0 1899 /// bb1: 1900 /// %tmp1 = tail call i32 @f1() 1901 /// ret i32 %tmp1 1902 /// bb2: 1903 /// %tmp2 = tail call i32 @f2() 1904 /// ret i32 %tmp2 1905 /// @endcode 1906 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { 1907 if (!TLI) 1908 return false; 1909 1910 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); 1911 if (!RI) 1912 return false; 1913 1914 PHINode *PN = nullptr; 1915 BitCastInst *BCI = nullptr; 1916 Value *V = RI->getReturnValue(); 1917 if (V) { 1918 BCI = dyn_cast<BitCastInst>(V); 1919 if (BCI) 1920 V = BCI->getOperand(0); 1921 1922 PN = dyn_cast<PHINode>(V); 1923 if (!PN) 1924 return false; 1925 } 1926 1927 if (PN && PN->getParent() != BB) 1928 return false; 1929 1930 // It's not safe to eliminate the sign / zero extension of the return value. 1931 // See llvm::isInTailCallPosition(). 1932 const Function *F = BB->getParent(); 1933 AttributeSet CallerAttrs = F->getAttributes(); 1934 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 1935 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 1936 return false; 1937 1938 // Make sure there are no instructions between the PHI and return, or that the 1939 // return is the first instruction in the block. 1940 if (PN) { 1941 BasicBlock::iterator BI = BB->begin(); 1942 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); 1943 if (&*BI == BCI) 1944 // Also skip over the bitcast. 1945 ++BI; 1946 if (&*BI != RI) 1947 return false; 1948 } else { 1949 BasicBlock::iterator BI = BB->begin(); 1950 while (isa<DbgInfoIntrinsic>(BI)) ++BI; 1951 if (&*BI != RI) 1952 return false; 1953 } 1954 1955 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail 1956 /// call. 1957 SmallVector<CallInst*, 4> TailCalls; 1958 if (PN) { 1959 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { 1960 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); 1961 // Make sure the phi value is indeed produced by the tail call. 1962 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && 1963 TLI->mayBeEmittedAsTailCall(CI)) 1964 TailCalls.push_back(CI); 1965 } 1966 } else { 1967 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 1968 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { 1969 if (!VisitedBBs.insert(*PI).second) 1970 continue; 1971 1972 BasicBlock::InstListType &InstList = (*PI)->getInstList(); 1973 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); 1974 BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); 1975 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); 1976 if (RI == RE) 1977 continue; 1978 1979 CallInst *CI = dyn_cast<CallInst>(&*RI); 1980 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) 1981 TailCalls.push_back(CI); 1982 } 1983 } 1984 1985 bool Changed = false; 1986 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { 1987 CallInst *CI = TailCalls[i]; 1988 CallSite CS(CI); 1989 1990 // Conservatively require the attributes of the call to match those of the 1991 // return. Ignore noalias because it doesn't affect the call sequence. 1992 AttributeSet CalleeAttrs = CS.getAttributes(); 1993 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1994 removeAttribute(Attribute::NoAlias) != 1995 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). 1996 removeAttribute(Attribute::NoAlias)) 1997 continue; 1998 1999 // Make sure the call instruction is followed by an unconditional branch to 2000 // the return block. 2001 BasicBlock *CallBB = CI->getParent(); 2002 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); 2003 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) 2004 continue; 2005 2006 // Duplicate the return into CallBB. 2007 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); 2008 ModifiedDT = Changed = true; 2009 ++NumRetsDup; 2010 } 2011 2012 // If we eliminated all predecessors of the block, delete the block now. 2013 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 2014 BB->eraseFromParent(); 2015 2016 return Changed; 2017 } 2018 2019 //===----------------------------------------------------------------------===// 2020 // Memory Optimization 2021 //===----------------------------------------------------------------------===// 2022 2023 namespace { 2024 2025 /// This is an extended version of TargetLowering::AddrMode 2026 /// which holds actual Value*'s for register values. 2027 struct ExtAddrMode : public TargetLowering::AddrMode { 2028 Value *BaseReg; 2029 Value *ScaledReg; 2030 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} 2031 void print(raw_ostream &OS) const; 2032 void dump() const; 2033 2034 bool operator==(const ExtAddrMode& O) const { 2035 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && 2036 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && 2037 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); 2038 } 2039 }; 2040 2041 #ifndef NDEBUG 2042 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { 2043 AM.print(OS); 2044 return OS; 2045 } 2046 #endif 2047 2048 void ExtAddrMode::print(raw_ostream &OS) const { 2049 bool NeedPlus = false; 2050 OS << "["; 2051 if (BaseGV) { 2052 OS << (NeedPlus ? " + " : "") 2053 << "GV:"; 2054 BaseGV->printAsOperand(OS, /*PrintType=*/false); 2055 NeedPlus = true; 2056 } 2057 2058 if (BaseOffs) { 2059 OS << (NeedPlus ? " + " : "") 2060 << BaseOffs; 2061 NeedPlus = true; 2062 } 2063 2064 if (BaseReg) { 2065 OS << (NeedPlus ? " + " : "") 2066 << "Base:"; 2067 BaseReg->printAsOperand(OS, /*PrintType=*/false); 2068 NeedPlus = true; 2069 } 2070 if (Scale) { 2071 OS << (NeedPlus ? " + " : "") 2072 << Scale << "*"; 2073 ScaledReg->printAsOperand(OS, /*PrintType=*/false); 2074 } 2075 2076 OS << ']'; 2077 } 2078 2079 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2080 LLVM_DUMP_METHOD void ExtAddrMode::dump() const { 2081 print(dbgs()); 2082 dbgs() << '\n'; 2083 } 2084 #endif 2085 2086 /// \brief This class provides transaction based operation on the IR. 2087 /// Every change made through this class is recorded in the internal state and 2088 /// can be undone (rollback) until commit is called. 2089 class TypePromotionTransaction { 2090 2091 /// \brief This represents the common interface of the individual transaction. 2092 /// Each class implements the logic for doing one specific modification on 2093 /// the IR via the TypePromotionTransaction. 2094 class TypePromotionAction { 2095 protected: 2096 /// The Instruction modified. 2097 Instruction *Inst; 2098 2099 public: 2100 /// \brief Constructor of the action. 2101 /// The constructor performs the related action on the IR. 2102 TypePromotionAction(Instruction *Inst) : Inst(Inst) {} 2103 2104 virtual ~TypePromotionAction() {} 2105 2106 /// \brief Undo the modification done by this action. 2107 /// When this method is called, the IR must be in the same state as it was 2108 /// before this action was applied. 2109 /// \pre Undoing the action works if and only if the IR is in the exact same 2110 /// state as it was directly after this action was applied. 2111 virtual void undo() = 0; 2112 2113 /// \brief Advocate every change made by this action. 2114 /// When the results on the IR of the action are to be kept, it is important 2115 /// to call this function, otherwise hidden information may be kept forever. 2116 virtual void commit() { 2117 // Nothing to be done, this action is not doing anything. 2118 } 2119 }; 2120 2121 /// \brief Utility to remember the position of an instruction. 2122 class InsertionHandler { 2123 /// Position of an instruction. 2124 /// Either an instruction: 2125 /// - Is the first in a basic block: BB is used. 2126 /// - Has a previous instructon: PrevInst is used. 2127 union { 2128 Instruction *PrevInst; 2129 BasicBlock *BB; 2130 } Point; 2131 /// Remember whether or not the instruction had a previous instruction. 2132 bool HasPrevInstruction; 2133 2134 public: 2135 /// \brief Record the position of \p Inst. 2136 InsertionHandler(Instruction *Inst) { 2137 BasicBlock::iterator It = Inst->getIterator(); 2138 HasPrevInstruction = (It != (Inst->getParent()->begin())); 2139 if (HasPrevInstruction) 2140 Point.PrevInst = &*--It; 2141 else 2142 Point.BB = Inst->getParent(); 2143 } 2144 2145 /// \brief Insert \p Inst at the recorded position. 2146 void insert(Instruction *Inst) { 2147 if (HasPrevInstruction) { 2148 if (Inst->getParent()) 2149 Inst->removeFromParent(); 2150 Inst->insertAfter(Point.PrevInst); 2151 } else { 2152 Instruction *Position = &*Point.BB->getFirstInsertionPt(); 2153 if (Inst->getParent()) 2154 Inst->moveBefore(Position); 2155 else 2156 Inst->insertBefore(Position); 2157 } 2158 } 2159 }; 2160 2161 /// \brief Move an instruction before another. 2162 class InstructionMoveBefore : public TypePromotionAction { 2163 /// Original position of the instruction. 2164 InsertionHandler Position; 2165 2166 public: 2167 /// \brief Move \p Inst before \p Before. 2168 InstructionMoveBefore(Instruction *Inst, Instruction *Before) 2169 : TypePromotionAction(Inst), Position(Inst) { 2170 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); 2171 Inst->moveBefore(Before); 2172 } 2173 2174 /// \brief Move the instruction back to its original position. 2175 void undo() override { 2176 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); 2177 Position.insert(Inst); 2178 } 2179 }; 2180 2181 /// \brief Set the operand of an instruction with a new value. 2182 class OperandSetter : public TypePromotionAction { 2183 /// Original operand of the instruction. 2184 Value *Origin; 2185 /// Index of the modified instruction. 2186 unsigned Idx; 2187 2188 public: 2189 /// \brief Set \p Idx operand of \p Inst with \p NewVal. 2190 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) 2191 : TypePromotionAction(Inst), Idx(Idx) { 2192 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" 2193 << "for:" << *Inst << "\n" 2194 << "with:" << *NewVal << "\n"); 2195 Origin = Inst->getOperand(Idx); 2196 Inst->setOperand(Idx, NewVal); 2197 } 2198 2199 /// \brief Restore the original value of the instruction. 2200 void undo() override { 2201 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" 2202 << "for: " << *Inst << "\n" 2203 << "with: " << *Origin << "\n"); 2204 Inst->setOperand(Idx, Origin); 2205 } 2206 }; 2207 2208 /// \brief Hide the operands of an instruction. 2209 /// Do as if this instruction was not using any of its operands. 2210 class OperandsHider : public TypePromotionAction { 2211 /// The list of original operands. 2212 SmallVector<Value *, 4> OriginalValues; 2213 2214 public: 2215 /// \brief Remove \p Inst from the uses of the operands of \p Inst. 2216 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { 2217 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); 2218 unsigned NumOpnds = Inst->getNumOperands(); 2219 OriginalValues.reserve(NumOpnds); 2220 for (unsigned It = 0; It < NumOpnds; ++It) { 2221 // Save the current operand. 2222 Value *Val = Inst->getOperand(It); 2223 OriginalValues.push_back(Val); 2224 // Set a dummy one. 2225 // We could use OperandSetter here, but that would imply an overhead 2226 // that we are not willing to pay. 2227 Inst->setOperand(It, UndefValue::get(Val->getType())); 2228 } 2229 } 2230 2231 /// \brief Restore the original list of uses. 2232 void undo() override { 2233 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); 2234 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) 2235 Inst->setOperand(It, OriginalValues[It]); 2236 } 2237 }; 2238 2239 /// \brief Build a truncate instruction. 2240 class TruncBuilder : public TypePromotionAction { 2241 Value *Val; 2242 public: 2243 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty 2244 /// result. 2245 /// trunc Opnd to Ty. 2246 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { 2247 IRBuilder<> Builder(Opnd); 2248 Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); 2249 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); 2250 } 2251 2252 /// \brief Get the built value. 2253 Value *getBuiltValue() { return Val; } 2254 2255 /// \brief Remove the built instruction. 2256 void undo() override { 2257 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); 2258 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2259 IVal->eraseFromParent(); 2260 } 2261 }; 2262 2263 /// \brief Build a sign extension instruction. 2264 class SExtBuilder : public TypePromotionAction { 2265 Value *Val; 2266 public: 2267 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty 2268 /// result. 2269 /// sext Opnd to Ty. 2270 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2271 : TypePromotionAction(InsertPt) { 2272 IRBuilder<> Builder(InsertPt); 2273 Val = Builder.CreateSExt(Opnd, Ty, "promoted"); 2274 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); 2275 } 2276 2277 /// \brief Get the built value. 2278 Value *getBuiltValue() { return Val; } 2279 2280 /// \brief Remove the built instruction. 2281 void undo() override { 2282 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); 2283 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2284 IVal->eraseFromParent(); 2285 } 2286 }; 2287 2288 /// \brief Build a zero extension instruction. 2289 class ZExtBuilder : public TypePromotionAction { 2290 Value *Val; 2291 public: 2292 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty 2293 /// result. 2294 /// zext Opnd to Ty. 2295 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) 2296 : TypePromotionAction(InsertPt) { 2297 IRBuilder<> Builder(InsertPt); 2298 Val = Builder.CreateZExt(Opnd, Ty, "promoted"); 2299 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); 2300 } 2301 2302 /// \brief Get the built value. 2303 Value *getBuiltValue() { return Val; } 2304 2305 /// \brief Remove the built instruction. 2306 void undo() override { 2307 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); 2308 if (Instruction *IVal = dyn_cast<Instruction>(Val)) 2309 IVal->eraseFromParent(); 2310 } 2311 }; 2312 2313 /// \brief Mutate an instruction to another type. 2314 class TypeMutator : public TypePromotionAction { 2315 /// Record the original type. 2316 Type *OrigTy; 2317 2318 public: 2319 /// \brief Mutate the type of \p Inst into \p NewTy. 2320 TypeMutator(Instruction *Inst, Type *NewTy) 2321 : TypePromotionAction(Inst), OrigTy(Inst->getType()) { 2322 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy 2323 << "\n"); 2324 Inst->mutateType(NewTy); 2325 } 2326 2327 /// \brief Mutate the instruction back to its original type. 2328 void undo() override { 2329 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy 2330 << "\n"); 2331 Inst->mutateType(OrigTy); 2332 } 2333 }; 2334 2335 /// \brief Replace the uses of an instruction by another instruction. 2336 class UsesReplacer : public TypePromotionAction { 2337 /// Helper structure to keep track of the replaced uses. 2338 struct InstructionAndIdx { 2339 /// The instruction using the instruction. 2340 Instruction *Inst; 2341 /// The index where this instruction is used for Inst. 2342 unsigned Idx; 2343 InstructionAndIdx(Instruction *Inst, unsigned Idx) 2344 : Inst(Inst), Idx(Idx) {} 2345 }; 2346 2347 /// Keep track of the original uses (pair Instruction, Index). 2348 SmallVector<InstructionAndIdx, 4> OriginalUses; 2349 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; 2350 2351 public: 2352 /// \brief Replace all the use of \p Inst by \p New. 2353 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { 2354 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New 2355 << "\n"); 2356 // Record the original uses. 2357 for (Use &U : Inst->uses()) { 2358 Instruction *UserI = cast<Instruction>(U.getUser()); 2359 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); 2360 } 2361 // Now, we can replace the uses. 2362 Inst->replaceAllUsesWith(New); 2363 } 2364 2365 /// \brief Reassign the original uses of Inst to Inst. 2366 void undo() override { 2367 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); 2368 for (use_iterator UseIt = OriginalUses.begin(), 2369 EndIt = OriginalUses.end(); 2370 UseIt != EndIt; ++UseIt) { 2371 UseIt->Inst->setOperand(UseIt->Idx, Inst); 2372 } 2373 } 2374 }; 2375 2376 /// \brief Remove an instruction from the IR. 2377 class InstructionRemover : public TypePromotionAction { 2378 /// Original position of the instruction. 2379 InsertionHandler Inserter; 2380 /// Helper structure to hide all the link to the instruction. In other 2381 /// words, this helps to do as if the instruction was removed. 2382 OperandsHider Hider; 2383 /// Keep track of the uses replaced, if any. 2384 UsesReplacer *Replacer; 2385 2386 public: 2387 /// \brief Remove all reference of \p Inst and optinally replace all its 2388 /// uses with New. 2389 /// \pre If !Inst->use_empty(), then New != nullptr 2390 InstructionRemover(Instruction *Inst, Value *New = nullptr) 2391 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), 2392 Replacer(nullptr) { 2393 if (New) 2394 Replacer = new UsesReplacer(Inst, New); 2395 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n"); 2396 Inst->removeFromParent(); 2397 } 2398 2399 ~InstructionRemover() override { delete Replacer; } 2400 2401 /// \brief Really remove the instruction. 2402 void commit() override { delete Inst; } 2403 2404 /// \brief Resurrect the instruction and reassign it to the proper uses if 2405 /// new value was provided when build this action. 2406 void undo() override { 2407 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); 2408 Inserter.insert(Inst); 2409 if (Replacer) 2410 Replacer->undo(); 2411 Hider.undo(); 2412 } 2413 }; 2414 2415 public: 2416 /// Restoration point. 2417 /// The restoration point is a pointer to an action instead of an iterator 2418 /// because the iterator may be invalidated but not the pointer. 2419 typedef const TypePromotionAction *ConstRestorationPt; 2420 /// Advocate every changes made in that transaction. 2421 void commit(); 2422 /// Undo all the changes made after the given point. 2423 void rollback(ConstRestorationPt Point); 2424 /// Get the current restoration point. 2425 ConstRestorationPt getRestorationPoint() const; 2426 2427 /// \name API for IR modification with state keeping to support rollback. 2428 /// @{ 2429 /// Same as Instruction::setOperand. 2430 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); 2431 /// Same as Instruction::eraseFromParent. 2432 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); 2433 /// Same as Value::replaceAllUsesWith. 2434 void replaceAllUsesWith(Instruction *Inst, Value *New); 2435 /// Same as Value::mutateType. 2436 void mutateType(Instruction *Inst, Type *NewTy); 2437 /// Same as IRBuilder::createTrunc. 2438 Value *createTrunc(Instruction *Opnd, Type *Ty); 2439 /// Same as IRBuilder::createSExt. 2440 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); 2441 /// Same as IRBuilder::createZExt. 2442 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); 2443 /// Same as Instruction::moveBefore. 2444 void moveBefore(Instruction *Inst, Instruction *Before); 2445 /// @} 2446 2447 private: 2448 /// The ordered list of actions made so far. 2449 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; 2450 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; 2451 }; 2452 2453 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, 2454 Value *NewVal) { 2455 Actions.push_back( 2456 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); 2457 } 2458 2459 void TypePromotionTransaction::eraseInstruction(Instruction *Inst, 2460 Value *NewVal) { 2461 Actions.push_back( 2462 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); 2463 } 2464 2465 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, 2466 Value *New) { 2467 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); 2468 } 2469 2470 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { 2471 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); 2472 } 2473 2474 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, 2475 Type *Ty) { 2476 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); 2477 Value *Val = Ptr->getBuiltValue(); 2478 Actions.push_back(std::move(Ptr)); 2479 return Val; 2480 } 2481 2482 Value *TypePromotionTransaction::createSExt(Instruction *Inst, 2483 Value *Opnd, Type *Ty) { 2484 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); 2485 Value *Val = Ptr->getBuiltValue(); 2486 Actions.push_back(std::move(Ptr)); 2487 return Val; 2488 } 2489 2490 Value *TypePromotionTransaction::createZExt(Instruction *Inst, 2491 Value *Opnd, Type *Ty) { 2492 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); 2493 Value *Val = Ptr->getBuiltValue(); 2494 Actions.push_back(std::move(Ptr)); 2495 return Val; 2496 } 2497 2498 void TypePromotionTransaction::moveBefore(Instruction *Inst, 2499 Instruction *Before) { 2500 Actions.push_back( 2501 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); 2502 } 2503 2504 TypePromotionTransaction::ConstRestorationPt 2505 TypePromotionTransaction::getRestorationPoint() const { 2506 return !Actions.empty() ? Actions.back().get() : nullptr; 2507 } 2508 2509 void TypePromotionTransaction::commit() { 2510 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; 2511 ++It) 2512 (*It)->commit(); 2513 Actions.clear(); 2514 } 2515 2516 void TypePromotionTransaction::rollback( 2517 TypePromotionTransaction::ConstRestorationPt Point) { 2518 while (!Actions.empty() && Point != Actions.back().get()) { 2519 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); 2520 Curr->undo(); 2521 } 2522 } 2523 2524 /// \brief A helper class for matching addressing modes. 2525 /// 2526 /// This encapsulates the logic for matching the target-legal addressing modes. 2527 class AddressingModeMatcher { 2528 SmallVectorImpl<Instruction*> &AddrModeInsts; 2529 const TargetMachine &TM; 2530 const TargetLowering &TLI; 2531 const DataLayout &DL; 2532 2533 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and 2534 /// the memory instruction that we're computing this address for. 2535 Type *AccessTy; 2536 unsigned AddrSpace; 2537 Instruction *MemoryInst; 2538 2539 /// This is the addressing mode that we're building up. This is 2540 /// part of the return value of this addressing mode matching stuff. 2541 ExtAddrMode &AddrMode; 2542 2543 /// The instructions inserted by other CodeGenPrepare optimizations. 2544 const SetOfInstrs &InsertedInsts; 2545 /// A map from the instructions to their type before promotion. 2546 InstrToOrigTy &PromotedInsts; 2547 /// The ongoing transaction where every action should be registered. 2548 TypePromotionTransaction &TPT; 2549 2550 /// This is set to true when we should not do profitability checks. 2551 /// When true, IsProfitableToFoldIntoAddressingMode always returns true. 2552 bool IgnoreProfitability; 2553 2554 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, 2555 const TargetMachine &TM, Type *AT, unsigned AS, 2556 Instruction *MI, ExtAddrMode &AM, 2557 const SetOfInstrs &InsertedInsts, 2558 InstrToOrigTy &PromotedInsts, 2559 TypePromotionTransaction &TPT) 2560 : AddrModeInsts(AMI), TM(TM), 2561 TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent()) 2562 ->getTargetLowering()), 2563 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), 2564 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), 2565 PromotedInsts(PromotedInsts), TPT(TPT) { 2566 IgnoreProfitability = false; 2567 } 2568 public: 2569 2570 /// Find the maximal addressing mode that a load/store of V can fold, 2571 /// give an access type of AccessTy. This returns a list of involved 2572 /// instructions in AddrModeInsts. 2573 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare 2574 /// optimizations. 2575 /// \p PromotedInsts maps the instructions to their type before promotion. 2576 /// \p The ongoing transaction where every action should be registered. 2577 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, 2578 Instruction *MemoryInst, 2579 SmallVectorImpl<Instruction*> &AddrModeInsts, 2580 const TargetMachine &TM, 2581 const SetOfInstrs &InsertedInsts, 2582 InstrToOrigTy &PromotedInsts, 2583 TypePromotionTransaction &TPT) { 2584 ExtAddrMode Result; 2585 2586 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS, 2587 MemoryInst, Result, InsertedInsts, 2588 PromotedInsts, TPT).matchAddr(V, 0); 2589 (void)Success; assert(Success && "Couldn't select *anything*?"); 2590 return Result; 2591 } 2592 private: 2593 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); 2594 bool matchAddr(Value *V, unsigned Depth); 2595 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, 2596 bool *MovedAway = nullptr); 2597 bool isProfitableToFoldIntoAddressingMode(Instruction *I, 2598 ExtAddrMode &AMBefore, 2599 ExtAddrMode &AMAfter); 2600 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); 2601 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, 2602 Value *PromotedOperand) const; 2603 }; 2604 2605 /// Try adding ScaleReg*Scale to the current addressing mode. 2606 /// Return true and update AddrMode if this addr mode is legal for the target, 2607 /// false if not. 2608 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, 2609 unsigned Depth) { 2610 // If Scale is 1, then this is the same as adding ScaleReg to the addressing 2611 // mode. Just process that directly. 2612 if (Scale == 1) 2613 return matchAddr(ScaleReg, Depth); 2614 2615 // If the scale is 0, it takes nothing to add this. 2616 if (Scale == 0) 2617 return true; 2618 2619 // If we already have a scale of this value, we can add to it, otherwise, we 2620 // need an available scale field. 2621 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) 2622 return false; 2623 2624 ExtAddrMode TestAddrMode = AddrMode; 2625 2626 // Add scale to turn X*4+X*3 -> X*7. This could also do things like 2627 // [A+B + A*7] -> [B+A*8]. 2628 TestAddrMode.Scale += Scale; 2629 TestAddrMode.ScaledReg = ScaleReg; 2630 2631 // If the new address isn't legal, bail out. 2632 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) 2633 return false; 2634 2635 // It was legal, so commit it. 2636 AddrMode = TestAddrMode; 2637 2638 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now 2639 // to see if ScaleReg is actually X+C. If so, we can turn this into adding 2640 // X*Scale + C*Scale to addr mode. 2641 ConstantInt *CI = nullptr; Value *AddLHS = nullptr; 2642 if (isa<Instruction>(ScaleReg) && // not a constant expr. 2643 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { 2644 TestAddrMode.ScaledReg = AddLHS; 2645 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; 2646 2647 // If this addressing mode is legal, commit it and remember that we folded 2648 // this instruction. 2649 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { 2650 AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); 2651 AddrMode = TestAddrMode; 2652 return true; 2653 } 2654 } 2655 2656 // Otherwise, not (x+c)*scale, just return what we have. 2657 return true; 2658 } 2659 2660 /// This is a little filter, which returns true if an addressing computation 2661 /// involving I might be folded into a load/store accessing it. 2662 /// This doesn't need to be perfect, but needs to accept at least 2663 /// the set of instructions that MatchOperationAddr can. 2664 static bool MightBeFoldableInst(Instruction *I) { 2665 switch (I->getOpcode()) { 2666 case Instruction::BitCast: 2667 case Instruction::AddrSpaceCast: 2668 // Don't touch identity bitcasts. 2669 if (I->getType() == I->getOperand(0)->getType()) 2670 return false; 2671 return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); 2672 case Instruction::PtrToInt: 2673 // PtrToInt is always a noop, as we know that the int type is pointer sized. 2674 return true; 2675 case Instruction::IntToPtr: 2676 // We know the input is intptr_t, so this is foldable. 2677 return true; 2678 case Instruction::Add: 2679 return true; 2680 case Instruction::Mul: 2681 case Instruction::Shl: 2682 // Can only handle X*C and X << C. 2683 return isa<ConstantInt>(I->getOperand(1)); 2684 case Instruction::GetElementPtr: 2685 return true; 2686 default: 2687 return false; 2688 } 2689 } 2690 2691 /// \brief Check whether or not \p Val is a legal instruction for \p TLI. 2692 /// \note \p Val is assumed to be the product of some type promotion. 2693 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed 2694 /// to be legal, as the non-promoted value would have had the same state. 2695 static bool isPromotedInstructionLegal(const TargetLowering &TLI, 2696 const DataLayout &DL, Value *Val) { 2697 Instruction *PromotedInst = dyn_cast<Instruction>(Val); 2698 if (!PromotedInst) 2699 return false; 2700 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); 2701 // If the ISDOpcode is undefined, it was undefined before the promotion. 2702 if (!ISDOpcode) 2703 return true; 2704 // Otherwise, check if the promoted instruction is legal or not. 2705 return TLI.isOperationLegalOrCustom( 2706 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); 2707 } 2708 2709 /// \brief Hepler class to perform type promotion. 2710 class TypePromotionHelper { 2711 /// \brief Utility function to check whether or not a sign or zero extension 2712 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by 2713 /// either using the operands of \p Inst or promoting \p Inst. 2714 /// The type of the extension is defined by \p IsSExt. 2715 /// In other words, check if: 2716 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. 2717 /// #1 Promotion applies: 2718 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). 2719 /// #2 Operand reuses: 2720 /// ext opnd1 to ConsideredExtType. 2721 /// \p PromotedInsts maps the instructions to their type before promotion. 2722 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, 2723 const InstrToOrigTy &PromotedInsts, bool IsSExt); 2724 2725 /// \brief Utility function to determine if \p OpIdx should be promoted when 2726 /// promoting \p Inst. 2727 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { 2728 return !(isa<SelectInst>(Inst) && OpIdx == 0); 2729 } 2730 2731 /// \brief Utility function to promote the operand of \p Ext when this 2732 /// operand is a promotable trunc or sext or zext. 2733 /// \p PromotedInsts maps the instructions to their type before promotion. 2734 /// \p CreatedInstsCost[out] contains the cost of all instructions 2735 /// created to promote the operand of Ext. 2736 /// Newly added extensions are inserted in \p Exts. 2737 /// Newly added truncates are inserted in \p Truncs. 2738 /// Should never be called directly. 2739 /// \return The promoted value which is used instead of Ext. 2740 static Value *promoteOperandForTruncAndAnyExt( 2741 Instruction *Ext, TypePromotionTransaction &TPT, 2742 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2743 SmallVectorImpl<Instruction *> *Exts, 2744 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); 2745 2746 /// \brief Utility function to promote the operand of \p Ext when this 2747 /// operand is promotable and is not a supported trunc or sext. 2748 /// \p PromotedInsts maps the instructions to their type before promotion. 2749 /// \p CreatedInstsCost[out] contains the cost of all the instructions 2750 /// created to promote the operand of Ext. 2751 /// Newly added extensions are inserted in \p Exts. 2752 /// Newly added truncates are inserted in \p Truncs. 2753 /// Should never be called directly. 2754 /// \return The promoted value which is used instead of Ext. 2755 static Value *promoteOperandForOther(Instruction *Ext, 2756 TypePromotionTransaction &TPT, 2757 InstrToOrigTy &PromotedInsts, 2758 unsigned &CreatedInstsCost, 2759 SmallVectorImpl<Instruction *> *Exts, 2760 SmallVectorImpl<Instruction *> *Truncs, 2761 const TargetLowering &TLI, bool IsSExt); 2762 2763 /// \see promoteOperandForOther. 2764 static Value *signExtendOperandForOther( 2765 Instruction *Ext, TypePromotionTransaction &TPT, 2766 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2767 SmallVectorImpl<Instruction *> *Exts, 2768 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2769 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2770 Exts, Truncs, TLI, true); 2771 } 2772 2773 /// \see promoteOperandForOther. 2774 static Value *zeroExtendOperandForOther( 2775 Instruction *Ext, TypePromotionTransaction &TPT, 2776 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2777 SmallVectorImpl<Instruction *> *Exts, 2778 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2779 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, 2780 Exts, Truncs, TLI, false); 2781 } 2782 2783 public: 2784 /// Type for the utility function that promotes the operand of Ext. 2785 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, 2786 InstrToOrigTy &PromotedInsts, 2787 unsigned &CreatedInstsCost, 2788 SmallVectorImpl<Instruction *> *Exts, 2789 SmallVectorImpl<Instruction *> *Truncs, 2790 const TargetLowering &TLI); 2791 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate 2792 /// action to promote the operand of \p Ext instead of using Ext. 2793 /// \return NULL if no promotable action is possible with the current 2794 /// sign extension. 2795 /// \p InsertedInsts keeps track of all the instructions inserted by the 2796 /// other CodeGenPrepare optimizations. This information is important 2797 /// because we do not want to promote these instructions as CodeGenPrepare 2798 /// will reinsert them later. Thus creating an infinite loop: create/remove. 2799 /// \p PromotedInsts maps the instructions to their type before promotion. 2800 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, 2801 const TargetLowering &TLI, 2802 const InstrToOrigTy &PromotedInsts); 2803 }; 2804 2805 bool TypePromotionHelper::canGetThrough(const Instruction *Inst, 2806 Type *ConsideredExtType, 2807 const InstrToOrigTy &PromotedInsts, 2808 bool IsSExt) { 2809 // The promotion helper does not know how to deal with vector types yet. 2810 // To be able to fix that, we would need to fix the places where we 2811 // statically extend, e.g., constants and such. 2812 if (Inst->getType()->isVectorTy()) 2813 return false; 2814 2815 // We can always get through zext. 2816 if (isa<ZExtInst>(Inst)) 2817 return true; 2818 2819 // sext(sext) is ok too. 2820 if (IsSExt && isa<SExtInst>(Inst)) 2821 return true; 2822 2823 // We can get through binary operator, if it is legal. In other words, the 2824 // binary operator must have a nuw or nsw flag. 2825 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); 2826 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && 2827 ((!IsSExt && BinOp->hasNoUnsignedWrap()) || 2828 (IsSExt && BinOp->hasNoSignedWrap()))) 2829 return true; 2830 2831 // Check if we can do the following simplification. 2832 // ext(trunc(opnd)) --> ext(opnd) 2833 if (!isa<TruncInst>(Inst)) 2834 return false; 2835 2836 Value *OpndVal = Inst->getOperand(0); 2837 // Check if we can use this operand in the extension. 2838 // If the type is larger than the result type of the extension, we cannot. 2839 if (!OpndVal->getType()->isIntegerTy() || 2840 OpndVal->getType()->getIntegerBitWidth() > 2841 ConsideredExtType->getIntegerBitWidth()) 2842 return false; 2843 2844 // If the operand of the truncate is not an instruction, we will not have 2845 // any information on the dropped bits. 2846 // (Actually we could for constant but it is not worth the extra logic). 2847 Instruction *Opnd = dyn_cast<Instruction>(OpndVal); 2848 if (!Opnd) 2849 return false; 2850 2851 // Check if the source of the type is narrow enough. 2852 // I.e., check that trunc just drops extended bits of the same kind of 2853 // the extension. 2854 // #1 get the type of the operand and check the kind of the extended bits. 2855 const Type *OpndType; 2856 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); 2857 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) 2858 OpndType = It->second.getPointer(); 2859 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) 2860 OpndType = Opnd->getOperand(0)->getType(); 2861 else 2862 return false; 2863 2864 // #2 check that the truncate just drops extended bits. 2865 return Inst->getType()->getIntegerBitWidth() >= 2866 OpndType->getIntegerBitWidth(); 2867 } 2868 2869 TypePromotionHelper::Action TypePromotionHelper::getAction( 2870 Instruction *Ext, const SetOfInstrs &InsertedInsts, 2871 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { 2872 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 2873 "Unexpected instruction type"); 2874 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); 2875 Type *ExtTy = Ext->getType(); 2876 bool IsSExt = isa<SExtInst>(Ext); 2877 // If the operand of the extension is not an instruction, we cannot 2878 // get through. 2879 // If it, check we can get through. 2880 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) 2881 return nullptr; 2882 2883 // Do not promote if the operand has been added by codegenprepare. 2884 // Otherwise, it means we are undoing an optimization that is likely to be 2885 // redone, thus causing potential infinite loop. 2886 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) 2887 return nullptr; 2888 2889 // SExt or Trunc instructions. 2890 // Return the related handler. 2891 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || 2892 isa<ZExtInst>(ExtOpnd)) 2893 return promoteOperandForTruncAndAnyExt; 2894 2895 // Regular instruction. 2896 // Abort early if we will have to insert non-free instructions. 2897 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) 2898 return nullptr; 2899 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; 2900 } 2901 2902 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( 2903 llvm::Instruction *SExt, TypePromotionTransaction &TPT, 2904 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2905 SmallVectorImpl<Instruction *> *Exts, 2906 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { 2907 // By construction, the operand of SExt is an instruction. Otherwise we cannot 2908 // get through it and this method should not be called. 2909 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); 2910 Value *ExtVal = SExt; 2911 bool HasMergedNonFreeExt = false; 2912 if (isa<ZExtInst>(SExtOpnd)) { 2913 // Replace s|zext(zext(opnd)) 2914 // => zext(opnd). 2915 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); 2916 Value *ZExt = 2917 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); 2918 TPT.replaceAllUsesWith(SExt, ZExt); 2919 TPT.eraseInstruction(SExt); 2920 ExtVal = ZExt; 2921 } else { 2922 // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) 2923 // => z|sext(opnd). 2924 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); 2925 } 2926 CreatedInstsCost = 0; 2927 2928 // Remove dead code. 2929 if (SExtOpnd->use_empty()) 2930 TPT.eraseInstruction(SExtOpnd); 2931 2932 // Check if the extension is still needed. 2933 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); 2934 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { 2935 if (ExtInst) { 2936 if (Exts) 2937 Exts->push_back(ExtInst); 2938 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; 2939 } 2940 return ExtVal; 2941 } 2942 2943 // At this point we have: ext ty opnd to ty. 2944 // Reassign the uses of ExtInst to the opnd and remove ExtInst. 2945 Value *NextVal = ExtInst->getOperand(0); 2946 TPT.eraseInstruction(ExtInst, NextVal); 2947 return NextVal; 2948 } 2949 2950 Value *TypePromotionHelper::promoteOperandForOther( 2951 Instruction *Ext, TypePromotionTransaction &TPT, 2952 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, 2953 SmallVectorImpl<Instruction *> *Exts, 2954 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, 2955 bool IsSExt) { 2956 // By construction, the operand of Ext is an instruction. Otherwise we cannot 2957 // get through it and this method should not be called. 2958 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); 2959 CreatedInstsCost = 0; 2960 if (!ExtOpnd->hasOneUse()) { 2961 // ExtOpnd will be promoted. 2962 // All its uses, but Ext, will need to use a truncated value of the 2963 // promoted version. 2964 // Create the truncate now. 2965 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); 2966 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { 2967 ITrunc->removeFromParent(); 2968 // Insert it just after the definition. 2969 ITrunc->insertAfter(ExtOpnd); 2970 if (Truncs) 2971 Truncs->push_back(ITrunc); 2972 } 2973 2974 TPT.replaceAllUsesWith(ExtOpnd, Trunc); 2975 // Restore the operand of Ext (which has been replaced by the previous call 2976 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. 2977 TPT.setOperand(Ext, 0, ExtOpnd); 2978 } 2979 2980 // Get through the Instruction: 2981 // 1. Update its type. 2982 // 2. Replace the uses of Ext by Inst. 2983 // 3. Extend each operand that needs to be extended. 2984 2985 // Remember the original type of the instruction before promotion. 2986 // This is useful to know that the high bits are sign extended bits. 2987 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( 2988 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); 2989 // Step #1. 2990 TPT.mutateType(ExtOpnd, Ext->getType()); 2991 // Step #2. 2992 TPT.replaceAllUsesWith(Ext, ExtOpnd); 2993 // Step #3. 2994 Instruction *ExtForOpnd = Ext; 2995 2996 DEBUG(dbgs() << "Propagate Ext to operands\n"); 2997 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; 2998 ++OpIdx) { 2999 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n'); 3000 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || 3001 !shouldExtOperand(ExtOpnd, OpIdx)) { 3002 DEBUG(dbgs() << "No need to propagate\n"); 3003 continue; 3004 } 3005 // Check if we can statically extend the operand. 3006 Value *Opnd = ExtOpnd->getOperand(OpIdx); 3007 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { 3008 DEBUG(dbgs() << "Statically extend\n"); 3009 unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); 3010 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) 3011 : Cst->getValue().zext(BitWidth); 3012 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); 3013 continue; 3014 } 3015 // UndefValue are typed, so we have to statically sign extend them. 3016 if (isa<UndefValue>(Opnd)) { 3017 DEBUG(dbgs() << "Statically extend\n"); 3018 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); 3019 continue; 3020 } 3021 3022 // Otherwise we have to explicity sign extend the operand. 3023 // Check if Ext was reused to extend an operand. 3024 if (!ExtForOpnd) { 3025 // If yes, create a new one. 3026 DEBUG(dbgs() << "More operands to ext\n"); 3027 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) 3028 : TPT.createZExt(Ext, Opnd, Ext->getType()); 3029 if (!isa<Instruction>(ValForExtOpnd)) { 3030 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); 3031 continue; 3032 } 3033 ExtForOpnd = cast<Instruction>(ValForExtOpnd); 3034 } 3035 if (Exts) 3036 Exts->push_back(ExtForOpnd); 3037 TPT.setOperand(ExtForOpnd, 0, Opnd); 3038 3039 // Move the sign extension before the insertion point. 3040 TPT.moveBefore(ExtForOpnd, ExtOpnd); 3041 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); 3042 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); 3043 // If more sext are required, new instructions will have to be created. 3044 ExtForOpnd = nullptr; 3045 } 3046 if (ExtForOpnd == Ext) { 3047 DEBUG(dbgs() << "Extension is useless now\n"); 3048 TPT.eraseInstruction(Ext); 3049 } 3050 return ExtOpnd; 3051 } 3052 3053 /// Check whether or not promoting an instruction to a wider type is profitable. 3054 /// \p NewCost gives the cost of extension instructions created by the 3055 /// promotion. 3056 /// \p OldCost gives the cost of extension instructions before the promotion 3057 /// plus the number of instructions that have been 3058 /// matched in the addressing mode the promotion. 3059 /// \p PromotedOperand is the value that has been promoted. 3060 /// \return True if the promotion is profitable, false otherwise. 3061 bool AddressingModeMatcher::isPromotionProfitable( 3062 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { 3063 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'); 3064 // The cost of the new extensions is greater than the cost of the 3065 // old extension plus what we folded. 3066 // This is not profitable. 3067 if (NewCost > OldCost) 3068 return false; 3069 if (NewCost < OldCost) 3070 return true; 3071 // The promotion is neutral but it may help folding the sign extension in 3072 // loads for instance. 3073 // Check that we did not create an illegal instruction. 3074 return isPromotedInstructionLegal(TLI, DL, PromotedOperand); 3075 } 3076 3077 /// Given an instruction or constant expr, see if we can fold the operation 3078 /// into the addressing mode. If so, update the addressing mode and return 3079 /// true, otherwise return false without modifying AddrMode. 3080 /// If \p MovedAway is not NULL, it contains the information of whether or 3081 /// not AddrInst has to be folded into the addressing mode on success. 3082 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing 3083 /// because it has been moved away. 3084 /// Thus AddrInst must not be added in the matched instructions. 3085 /// This state can happen when AddrInst is a sext, since it may be moved away. 3086 /// Therefore, AddrInst may not be valid when MovedAway is true and it must 3087 /// not be referenced anymore. 3088 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, 3089 unsigned Depth, 3090 bool *MovedAway) { 3091 // Avoid exponential behavior on extremely deep expression trees. 3092 if (Depth >= 5) return false; 3093 3094 // By default, all matched instructions stay in place. 3095 if (MovedAway) 3096 *MovedAway = false; 3097 3098 switch (Opcode) { 3099 case Instruction::PtrToInt: 3100 // PtrToInt is always a noop, as we know that the int type is pointer sized. 3101 return matchAddr(AddrInst->getOperand(0), Depth); 3102 case Instruction::IntToPtr: { 3103 auto AS = AddrInst->getType()->getPointerAddressSpace(); 3104 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 3105 // This inttoptr is a no-op if the integer type is pointer sized. 3106 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) 3107 return matchAddr(AddrInst->getOperand(0), Depth); 3108 return false; 3109 } 3110 case Instruction::BitCast: 3111 // BitCast is always a noop, and we can handle it as long as it is 3112 // int->int or pointer->pointer (we don't want int<->fp or something). 3113 if ((AddrInst->getOperand(0)->getType()->isPointerTy() || 3114 AddrInst->getOperand(0)->getType()->isIntegerTy()) && 3115 // Don't touch identity bitcasts. These were probably put here by LSR, 3116 // and we don't want to mess around with them. Assume it knows what it 3117 // is doing. 3118 AddrInst->getOperand(0)->getType() != AddrInst->getType()) 3119 return matchAddr(AddrInst->getOperand(0), Depth); 3120 return false; 3121 case Instruction::AddrSpaceCast: { 3122 unsigned SrcAS 3123 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); 3124 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); 3125 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3126 return matchAddr(AddrInst->getOperand(0), Depth); 3127 return false; 3128 } 3129 case Instruction::Add: { 3130 // Check to see if we can merge in the RHS then the LHS. If so, we win. 3131 ExtAddrMode BackupAddrMode = AddrMode; 3132 unsigned OldSize = AddrModeInsts.size(); 3133 // Start a transaction at this point. 3134 // The LHS may match but not the RHS. 3135 // Therefore, we need a higher level restoration point to undo partially 3136 // matched operation. 3137 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3138 TPT.getRestorationPoint(); 3139 3140 if (matchAddr(AddrInst->getOperand(1), Depth+1) && 3141 matchAddr(AddrInst->getOperand(0), Depth+1)) 3142 return true; 3143 3144 // Restore the old addr mode info. 3145 AddrMode = BackupAddrMode; 3146 AddrModeInsts.resize(OldSize); 3147 TPT.rollback(LastKnownGood); 3148 3149 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. 3150 if (matchAddr(AddrInst->getOperand(0), Depth+1) && 3151 matchAddr(AddrInst->getOperand(1), Depth+1)) 3152 return true; 3153 3154 // Otherwise we definitely can't merge the ADD in. 3155 AddrMode = BackupAddrMode; 3156 AddrModeInsts.resize(OldSize); 3157 TPT.rollback(LastKnownGood); 3158 break; 3159 } 3160 //case Instruction::Or: 3161 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. 3162 //break; 3163 case Instruction::Mul: 3164 case Instruction::Shl: { 3165 // Can only handle X*C and X << C. 3166 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); 3167 if (!RHS) 3168 return false; 3169 int64_t Scale = RHS->getSExtValue(); 3170 if (Opcode == Instruction::Shl) 3171 Scale = 1LL << Scale; 3172 3173 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); 3174 } 3175 case Instruction::GetElementPtr: { 3176 // Scan the GEP. We check it if it contains constant offsets and at most 3177 // one variable offset. 3178 int VariableOperand = -1; 3179 unsigned VariableScale = 0; 3180 3181 int64_t ConstantOffset = 0; 3182 gep_type_iterator GTI = gep_type_begin(AddrInst); 3183 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { 3184 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 3185 const StructLayout *SL = DL.getStructLayout(STy); 3186 unsigned Idx = 3187 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); 3188 ConstantOffset += SL->getElementOffset(Idx); 3189 } else { 3190 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); 3191 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { 3192 ConstantOffset += CI->getSExtValue()*TypeSize; 3193 } else if (TypeSize) { // Scales of zero don't do anything. 3194 // We only allow one variable index at the moment. 3195 if (VariableOperand != -1) 3196 return false; 3197 3198 // Remember the variable index. 3199 VariableOperand = i; 3200 VariableScale = TypeSize; 3201 } 3202 } 3203 } 3204 3205 // A common case is for the GEP to only do a constant offset. In this case, 3206 // just add it to the disp field and check validity. 3207 if (VariableOperand == -1) { 3208 AddrMode.BaseOffs += ConstantOffset; 3209 if (ConstantOffset == 0 || 3210 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { 3211 // Check to see if we can fold the base pointer in too. 3212 if (matchAddr(AddrInst->getOperand(0), Depth+1)) 3213 return true; 3214 } 3215 AddrMode.BaseOffs -= ConstantOffset; 3216 return false; 3217 } 3218 3219 // Save the valid addressing mode in case we can't match. 3220 ExtAddrMode BackupAddrMode = AddrMode; 3221 unsigned OldSize = AddrModeInsts.size(); 3222 3223 // See if the scale and offset amount is valid for this target. 3224 AddrMode.BaseOffs += ConstantOffset; 3225 3226 // Match the base operand of the GEP. 3227 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { 3228 // If it couldn't be matched, just stuff the value in a register. 3229 if (AddrMode.HasBaseReg) { 3230 AddrMode = BackupAddrMode; 3231 AddrModeInsts.resize(OldSize); 3232 return false; 3233 } 3234 AddrMode.HasBaseReg = true; 3235 AddrMode.BaseReg = AddrInst->getOperand(0); 3236 } 3237 3238 // Match the remaining variable portion of the GEP. 3239 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, 3240 Depth)) { 3241 // If it couldn't be matched, try stuffing the base into a register 3242 // instead of matching it, and retrying the match of the scale. 3243 AddrMode = BackupAddrMode; 3244 AddrModeInsts.resize(OldSize); 3245 if (AddrMode.HasBaseReg) 3246 return false; 3247 AddrMode.HasBaseReg = true; 3248 AddrMode.BaseReg = AddrInst->getOperand(0); 3249 AddrMode.BaseOffs += ConstantOffset; 3250 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), 3251 VariableScale, Depth)) { 3252 // If even that didn't work, bail. 3253 AddrMode = BackupAddrMode; 3254 AddrModeInsts.resize(OldSize); 3255 return false; 3256 } 3257 } 3258 3259 return true; 3260 } 3261 case Instruction::SExt: 3262 case Instruction::ZExt: { 3263 Instruction *Ext = dyn_cast<Instruction>(AddrInst); 3264 if (!Ext) 3265 return false; 3266 3267 // Try to move this ext out of the way of the addressing mode. 3268 // Ask for a method for doing so. 3269 TypePromotionHelper::Action TPH = 3270 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); 3271 if (!TPH) 3272 return false; 3273 3274 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3275 TPT.getRestorationPoint(); 3276 unsigned CreatedInstsCost = 0; 3277 unsigned ExtCost = !TLI.isExtFree(Ext); 3278 Value *PromotedOperand = 3279 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); 3280 // SExt has been moved away. 3281 // Thus either it will be rematched later in the recursive calls or it is 3282 // gone. Anyway, we must not fold it into the addressing mode at this point. 3283 // E.g., 3284 // op = add opnd, 1 3285 // idx = ext op 3286 // addr = gep base, idx 3287 // is now: 3288 // promotedOpnd = ext opnd <- no match here 3289 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) 3290 // addr = gep base, op <- match 3291 if (MovedAway) 3292 *MovedAway = true; 3293 3294 assert(PromotedOperand && 3295 "TypePromotionHelper should have filtered out those cases"); 3296 3297 ExtAddrMode BackupAddrMode = AddrMode; 3298 unsigned OldSize = AddrModeInsts.size(); 3299 3300 if (!matchAddr(PromotedOperand, Depth) || 3301 // The total of the new cost is equal to the cost of the created 3302 // instructions. 3303 // The total of the old cost is equal to the cost of the extension plus 3304 // what we have saved in the addressing mode. 3305 !isPromotionProfitable(CreatedInstsCost, 3306 ExtCost + (AddrModeInsts.size() - OldSize), 3307 PromotedOperand)) { 3308 AddrMode = BackupAddrMode; 3309 AddrModeInsts.resize(OldSize); 3310 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n"); 3311 TPT.rollback(LastKnownGood); 3312 return false; 3313 } 3314 return true; 3315 } 3316 } 3317 return false; 3318 } 3319 3320 /// If we can, try to add the value of 'Addr' into the current addressing mode. 3321 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode 3322 /// unmodified. This assumes that Addr is either a pointer type or intptr_t 3323 /// for the target. 3324 /// 3325 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { 3326 // Start a transaction at this point that we will rollback if the matching 3327 // fails. 3328 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3329 TPT.getRestorationPoint(); 3330 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { 3331 // Fold in immediates if legal for the target. 3332 AddrMode.BaseOffs += CI->getSExtValue(); 3333 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3334 return true; 3335 AddrMode.BaseOffs -= CI->getSExtValue(); 3336 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { 3337 // If this is a global variable, try to fold it into the addressing mode. 3338 if (!AddrMode.BaseGV) { 3339 AddrMode.BaseGV = GV; 3340 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3341 return true; 3342 AddrMode.BaseGV = nullptr; 3343 } 3344 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { 3345 ExtAddrMode BackupAddrMode = AddrMode; 3346 unsigned OldSize = AddrModeInsts.size(); 3347 3348 // Check to see if it is possible to fold this operation. 3349 bool MovedAway = false; 3350 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { 3351 // This instruction may have been moved away. If so, there is nothing 3352 // to check here. 3353 if (MovedAway) 3354 return true; 3355 // Okay, it's possible to fold this. Check to see if it is actually 3356 // *profitable* to do so. We use a simple cost model to avoid increasing 3357 // register pressure too much. 3358 if (I->hasOneUse() || 3359 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { 3360 AddrModeInsts.push_back(I); 3361 return true; 3362 } 3363 3364 // It isn't profitable to do this, roll back. 3365 //cerr << "NOT FOLDING: " << *I; 3366 AddrMode = BackupAddrMode; 3367 AddrModeInsts.resize(OldSize); 3368 TPT.rollback(LastKnownGood); 3369 } 3370 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { 3371 if (matchOperationAddr(CE, CE->getOpcode(), Depth)) 3372 return true; 3373 TPT.rollback(LastKnownGood); 3374 } else if (isa<ConstantPointerNull>(Addr)) { 3375 // Null pointer gets folded without affecting the addressing mode. 3376 return true; 3377 } 3378 3379 // Worse case, the target should support [reg] addressing modes. :) 3380 if (!AddrMode.HasBaseReg) { 3381 AddrMode.HasBaseReg = true; 3382 AddrMode.BaseReg = Addr; 3383 // Still check for legality in case the target supports [imm] but not [i+r]. 3384 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3385 return true; 3386 AddrMode.HasBaseReg = false; 3387 AddrMode.BaseReg = nullptr; 3388 } 3389 3390 // If the base register is already taken, see if we can do [r+r]. 3391 if (AddrMode.Scale == 0) { 3392 AddrMode.Scale = 1; 3393 AddrMode.ScaledReg = Addr; 3394 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) 3395 return true; 3396 AddrMode.Scale = 0; 3397 AddrMode.ScaledReg = nullptr; 3398 } 3399 // Couldn't match. 3400 TPT.rollback(LastKnownGood); 3401 return false; 3402 } 3403 3404 /// Check to see if all uses of OpVal by the specified inline asm call are due 3405 /// to memory operands. If so, return true, otherwise return false. 3406 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, 3407 const TargetMachine &TM) { 3408 const Function *F = CI->getParent()->getParent(); 3409 const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); 3410 const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); 3411 TargetLowering::AsmOperandInfoVector TargetConstraints = 3412 TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI, 3413 ImmutableCallSite(CI)); 3414 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3415 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3416 3417 // Compute the constraint code and ConstraintType to use. 3418 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3419 3420 // If this asm operand is our Value*, and if it isn't an indirect memory 3421 // operand, we can't fold it! 3422 if (OpInfo.CallOperandVal == OpVal && 3423 (OpInfo.ConstraintType != TargetLowering::C_Memory || 3424 !OpInfo.isIndirect)) 3425 return false; 3426 } 3427 3428 return true; 3429 } 3430 3431 /// Recursively walk all the uses of I until we find a memory use. 3432 /// If we find an obviously non-foldable instruction, return true. 3433 /// Add the ultimately found memory instructions to MemoryUses. 3434 static bool FindAllMemoryUses( 3435 Instruction *I, 3436 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, 3437 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) { 3438 // If we already considered this instruction, we're done. 3439 if (!ConsideredInsts.insert(I).second) 3440 return false; 3441 3442 // If this is an obviously unfoldable instruction, bail out. 3443 if (!MightBeFoldableInst(I)) 3444 return true; 3445 3446 // Loop over all the uses, recursively processing them. 3447 for (Use &U : I->uses()) { 3448 Instruction *UserI = cast<Instruction>(U.getUser()); 3449 3450 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { 3451 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); 3452 continue; 3453 } 3454 3455 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 3456 unsigned opNo = U.getOperandNo(); 3457 if (opNo == 0) return true; // Storing addr, not into addr. 3458 MemoryUses.push_back(std::make_pair(SI, opNo)); 3459 continue; 3460 } 3461 3462 if (CallInst *CI = dyn_cast<CallInst>(UserI)) { 3463 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); 3464 if (!IA) return true; 3465 3466 // If this is a memory operand, we're cool, otherwise bail out. 3467 if (!IsOperandAMemoryOperand(CI, IA, I, TM)) 3468 return true; 3469 continue; 3470 } 3471 3472 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM)) 3473 return true; 3474 } 3475 3476 return false; 3477 } 3478 3479 /// Return true if Val is already known to be live at the use site that we're 3480 /// folding it into. If so, there is no cost to include it in the addressing 3481 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the 3482 /// instruction already. 3483 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, 3484 Value *KnownLive2) { 3485 // If Val is either of the known-live values, we know it is live! 3486 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) 3487 return true; 3488 3489 // All values other than instructions and arguments (e.g. constants) are live. 3490 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; 3491 3492 // If Val is a constant sized alloca in the entry block, it is live, this is 3493 // true because it is just a reference to the stack/frame pointer, which is 3494 // live for the whole function. 3495 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) 3496 if (AI->isStaticAlloca()) 3497 return true; 3498 3499 // Check to see if this value is already used in the memory instruction's 3500 // block. If so, it's already live into the block at the very least, so we 3501 // can reasonably fold it. 3502 return Val->isUsedInBasicBlock(MemoryInst->getParent()); 3503 } 3504 3505 /// It is possible for the addressing mode of the machine to fold the specified 3506 /// instruction into a load or store that ultimately uses it. 3507 /// However, the specified instruction has multiple uses. 3508 /// Given this, it may actually increase register pressure to fold it 3509 /// into the load. For example, consider this code: 3510 /// 3511 /// X = ... 3512 /// Y = X+1 3513 /// use(Y) -> nonload/store 3514 /// Z = Y+1 3515 /// load Z 3516 /// 3517 /// In this case, Y has multiple uses, and can be folded into the load of Z 3518 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to 3519 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one 3520 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the 3521 /// number of computations either. 3522 /// 3523 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If 3524 /// X was live across 'load Z' for other reasons, we actually *would* want to 3525 /// fold the addressing mode in the Z case. This would make Y die earlier. 3526 bool AddressingModeMatcher:: 3527 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, 3528 ExtAddrMode &AMAfter) { 3529 if (IgnoreProfitability) return true; 3530 3531 // AMBefore is the addressing mode before this instruction was folded into it, 3532 // and AMAfter is the addressing mode after the instruction was folded. Get 3533 // the set of registers referenced by AMAfter and subtract out those 3534 // referenced by AMBefore: this is the set of values which folding in this 3535 // address extends the lifetime of. 3536 // 3537 // Note that there are only two potential values being referenced here, 3538 // BaseReg and ScaleReg (global addresses are always available, as are any 3539 // folded immediates). 3540 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; 3541 3542 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their 3543 // lifetime wasn't extended by adding this instruction. 3544 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3545 BaseReg = nullptr; 3546 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) 3547 ScaledReg = nullptr; 3548 3549 // If folding this instruction (and it's subexprs) didn't extend any live 3550 // ranges, we're ok with it. 3551 if (!BaseReg && !ScaledReg) 3552 return true; 3553 3554 // If all uses of this instruction are ultimately load/store/inlineasm's, 3555 // check to see if their addressing modes will include this instruction. If 3556 // so, we can fold it into all uses, so it doesn't matter if it has multiple 3557 // uses. 3558 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; 3559 SmallPtrSet<Instruction*, 16> ConsideredInsts; 3560 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM)) 3561 return false; // Has a non-memory, non-foldable use! 3562 3563 // Now that we know that all uses of this instruction are part of a chain of 3564 // computation involving only operations that could theoretically be folded 3565 // into a memory use, loop over each of these uses and see if they could 3566 // *actually* fold the instruction. 3567 SmallVector<Instruction*, 32> MatchedAddrModeInsts; 3568 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { 3569 Instruction *User = MemoryUses[i].first; 3570 unsigned OpNo = MemoryUses[i].second; 3571 3572 // Get the access type of this use. If the use isn't a pointer, we don't 3573 // know what it accesses. 3574 Value *Address = User->getOperand(OpNo); 3575 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); 3576 if (!AddrTy) 3577 return false; 3578 Type *AddressAccessTy = AddrTy->getElementType(); 3579 unsigned AS = AddrTy->getAddressSpace(); 3580 3581 // Do a match against the root of this address, ignoring profitability. This 3582 // will tell us if the addressing mode for the memory operation will 3583 // *actually* cover the shared instruction. 3584 ExtAddrMode Result; 3585 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3586 TPT.getRestorationPoint(); 3587 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS, 3588 MemoryInst, Result, InsertedInsts, 3589 PromotedInsts, TPT); 3590 Matcher.IgnoreProfitability = true; 3591 bool Success = Matcher.matchAddr(Address, 0); 3592 (void)Success; assert(Success && "Couldn't select *anything*?"); 3593 3594 // The match was to check the profitability, the changes made are not 3595 // part of the original matcher. Therefore, they should be dropped 3596 // otherwise the original matcher will not present the right state. 3597 TPT.rollback(LastKnownGood); 3598 3599 // If the match didn't cover I, then it won't be shared by it. 3600 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), 3601 I) == MatchedAddrModeInsts.end()) 3602 return false; 3603 3604 MatchedAddrModeInsts.clear(); 3605 } 3606 3607 return true; 3608 } 3609 3610 } // end anonymous namespace 3611 3612 /// Return true if the specified values are defined in a 3613 /// different basic block than BB. 3614 static bool IsNonLocalValue(Value *V, BasicBlock *BB) { 3615 if (Instruction *I = dyn_cast<Instruction>(V)) 3616 return I->getParent() != BB; 3617 return false; 3618 } 3619 3620 /// Load and Store Instructions often have addressing modes that can do 3621 /// significant amounts of computation. As such, instruction selection will try 3622 /// to get the load or store to do as much computation as possible for the 3623 /// program. The problem is that isel can only see within a single block. As 3624 /// such, we sink as much legal addressing mode work into the block as possible. 3625 /// 3626 /// This method is used to optimize both load/store and inline asms with memory 3627 /// operands. 3628 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, 3629 Type *AccessTy, unsigned AddrSpace) { 3630 Value *Repl = Addr; 3631 3632 // Try to collapse single-value PHI nodes. This is necessary to undo 3633 // unprofitable PRE transformations. 3634 SmallVector<Value*, 8> worklist; 3635 SmallPtrSet<Value*, 16> Visited; 3636 worklist.push_back(Addr); 3637 3638 // Use a worklist to iteratively look through PHI nodes, and ensure that 3639 // the addressing mode obtained from the non-PHI roots of the graph 3640 // are equivalent. 3641 Value *Consensus = nullptr; 3642 unsigned NumUsesConsensus = 0; 3643 bool IsNumUsesConsensusValid = false; 3644 SmallVector<Instruction*, 16> AddrModeInsts; 3645 ExtAddrMode AddrMode; 3646 TypePromotionTransaction TPT; 3647 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 3648 TPT.getRestorationPoint(); 3649 while (!worklist.empty()) { 3650 Value *V = worklist.back(); 3651 worklist.pop_back(); 3652 3653 // Break use-def graph loops. 3654 if (!Visited.insert(V).second) { 3655 Consensus = nullptr; 3656 break; 3657 } 3658 3659 // For a PHI node, push all of its incoming values. 3660 if (PHINode *P = dyn_cast<PHINode>(V)) { 3661 for (Value *IncValue : P->incoming_values()) 3662 worklist.push_back(IncValue); 3663 continue; 3664 } 3665 3666 // For non-PHIs, determine the addressing mode being computed. 3667 SmallVector<Instruction*, 16> NewAddrModeInsts; 3668 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( 3669 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM, 3670 InsertedInsts, PromotedInsts, TPT); 3671 3672 // This check is broken into two cases with very similar code to avoid using 3673 // getNumUses() as much as possible. Some values have a lot of uses, so 3674 // calling getNumUses() unconditionally caused a significant compile-time 3675 // regression. 3676 if (!Consensus) { 3677 Consensus = V; 3678 AddrMode = NewAddrMode; 3679 AddrModeInsts = NewAddrModeInsts; 3680 continue; 3681 } else if (NewAddrMode == AddrMode) { 3682 if (!IsNumUsesConsensusValid) { 3683 NumUsesConsensus = Consensus->getNumUses(); 3684 IsNumUsesConsensusValid = true; 3685 } 3686 3687 // Ensure that the obtained addressing mode is equivalent to that obtained 3688 // for all other roots of the PHI traversal. Also, when choosing one 3689 // such root as representative, select the one with the most uses in order 3690 // to keep the cost modeling heuristics in AddressingModeMatcher 3691 // applicable. 3692 unsigned NumUses = V->getNumUses(); 3693 if (NumUses > NumUsesConsensus) { 3694 Consensus = V; 3695 NumUsesConsensus = NumUses; 3696 AddrModeInsts = NewAddrModeInsts; 3697 } 3698 continue; 3699 } 3700 3701 Consensus = nullptr; 3702 break; 3703 } 3704 3705 // If the addressing mode couldn't be determined, or if multiple different 3706 // ones were determined, bail out now. 3707 if (!Consensus) { 3708 TPT.rollback(LastKnownGood); 3709 return false; 3710 } 3711 TPT.commit(); 3712 3713 // Check to see if any of the instructions supersumed by this addr mode are 3714 // non-local to I's BB. 3715 bool AnyNonLocal = false; 3716 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { 3717 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { 3718 AnyNonLocal = true; 3719 break; 3720 } 3721 } 3722 3723 // If all the instructions matched are already in this BB, don't do anything. 3724 if (!AnyNonLocal) { 3725 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); 3726 return false; 3727 } 3728 3729 // Insert this computation right after this user. Since our caller is 3730 // scanning from the top of the BB to the bottom, reuse of the expr are 3731 // guaranteed to happen later. 3732 IRBuilder<> Builder(MemoryInst); 3733 3734 // Now that we determined the addressing expression we want to use and know 3735 // that we have to sink it into this block. Check to see if we have already 3736 // done this for some other load/store instr in this block. If so, reuse the 3737 // computation. 3738 Value *&SunkAddr = SunkAddrs[Addr]; 3739 if (SunkAddr) { 3740 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " 3741 << *MemoryInst << "\n"); 3742 if (SunkAddr->getType() != Addr->getType()) 3743 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3744 } else if (AddrSinkUsingGEPs || 3745 (!AddrSinkUsingGEPs.getNumOccurrences() && TM && 3746 TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) 3747 ->useAA())) { 3748 // By default, we use the GEP-based method when AA is used later. This 3749 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. 3750 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3751 << *MemoryInst << "\n"); 3752 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3753 Value *ResultPtr = nullptr, *ResultIndex = nullptr; 3754 3755 // First, find the pointer. 3756 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { 3757 ResultPtr = AddrMode.BaseReg; 3758 AddrMode.BaseReg = nullptr; 3759 } 3760 3761 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { 3762 // We can't add more than one pointer together, nor can we scale a 3763 // pointer (both of which seem meaningless). 3764 if (ResultPtr || AddrMode.Scale != 1) 3765 return false; 3766 3767 ResultPtr = AddrMode.ScaledReg; 3768 AddrMode.Scale = 0; 3769 } 3770 3771 if (AddrMode.BaseGV) { 3772 if (ResultPtr) 3773 return false; 3774 3775 ResultPtr = AddrMode.BaseGV; 3776 } 3777 3778 // If the real base value actually came from an inttoptr, then the matcher 3779 // will look through it and provide only the integer value. In that case, 3780 // use it here. 3781 if (!ResultPtr && AddrMode.BaseReg) { 3782 ResultPtr = 3783 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); 3784 AddrMode.BaseReg = nullptr; 3785 } else if (!ResultPtr && AddrMode.Scale == 1) { 3786 ResultPtr = 3787 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); 3788 AddrMode.Scale = 0; 3789 } 3790 3791 if (!ResultPtr && 3792 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { 3793 SunkAddr = Constant::getNullValue(Addr->getType()); 3794 } else if (!ResultPtr) { 3795 return false; 3796 } else { 3797 Type *I8PtrTy = 3798 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); 3799 Type *I8Ty = Builder.getInt8Ty(); 3800 3801 // Start with the base register. Do this first so that subsequent address 3802 // matching finds it last, which will prevent it from trying to match it 3803 // as the scaled value in case it happens to be a mul. That would be 3804 // problematic if we've sunk a different mul for the scale, because then 3805 // we'd end up sinking both muls. 3806 if (AddrMode.BaseReg) { 3807 Value *V = AddrMode.BaseReg; 3808 if (V->getType() != IntPtrTy) 3809 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3810 3811 ResultIndex = V; 3812 } 3813 3814 // Add the scale value. 3815 if (AddrMode.Scale) { 3816 Value *V = AddrMode.ScaledReg; 3817 if (V->getType() == IntPtrTy) { 3818 // done. 3819 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3820 cast<IntegerType>(V->getType())->getBitWidth()) { 3821 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3822 } else { 3823 // It is only safe to sign extend the BaseReg if we know that the math 3824 // required to create it did not overflow before we extend it. Since 3825 // the original IR value was tossed in favor of a constant back when 3826 // the AddrMode was created we need to bail out gracefully if widths 3827 // do not match instead of extending it. 3828 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); 3829 if (I && (ResultIndex != AddrMode.BaseReg)) 3830 I->eraseFromParent(); 3831 return false; 3832 } 3833 3834 if (AddrMode.Scale != 1) 3835 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3836 "sunkaddr"); 3837 if (ResultIndex) 3838 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); 3839 else 3840 ResultIndex = V; 3841 } 3842 3843 // Add in the Base Offset if present. 3844 if (AddrMode.BaseOffs) { 3845 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3846 if (ResultIndex) { 3847 // We need to add this separately from the scale above to help with 3848 // SDAG consecutive load/store merging. 3849 if (ResultPtr->getType() != I8PtrTy) 3850 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3851 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3852 } 3853 3854 ResultIndex = V; 3855 } 3856 3857 if (!ResultIndex) { 3858 SunkAddr = ResultPtr; 3859 } else { 3860 if (ResultPtr->getType() != I8PtrTy) 3861 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); 3862 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); 3863 } 3864 3865 if (SunkAddr->getType() != Addr->getType()) 3866 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); 3867 } 3868 } else { 3869 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " 3870 << *MemoryInst << "\n"); 3871 Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); 3872 Value *Result = nullptr; 3873 3874 // Start with the base register. Do this first so that subsequent address 3875 // matching finds it last, which will prevent it from trying to match it 3876 // as the scaled value in case it happens to be a mul. That would be 3877 // problematic if we've sunk a different mul for the scale, because then 3878 // we'd end up sinking both muls. 3879 if (AddrMode.BaseReg) { 3880 Value *V = AddrMode.BaseReg; 3881 if (V->getType()->isPointerTy()) 3882 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3883 if (V->getType() != IntPtrTy) 3884 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); 3885 Result = V; 3886 } 3887 3888 // Add the scale value. 3889 if (AddrMode.Scale) { 3890 Value *V = AddrMode.ScaledReg; 3891 if (V->getType() == IntPtrTy) { 3892 // done. 3893 } else if (V->getType()->isPointerTy()) { 3894 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); 3895 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < 3896 cast<IntegerType>(V->getType())->getBitWidth()) { 3897 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); 3898 } else { 3899 // It is only safe to sign extend the BaseReg if we know that the math 3900 // required to create it did not overflow before we extend it. Since 3901 // the original IR value was tossed in favor of a constant back when 3902 // the AddrMode was created we need to bail out gracefully if widths 3903 // do not match instead of extending it. 3904 Instruction *I = dyn_cast_or_null<Instruction>(Result); 3905 if (I && (Result != AddrMode.BaseReg)) 3906 I->eraseFromParent(); 3907 return false; 3908 } 3909 if (AddrMode.Scale != 1) 3910 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), 3911 "sunkaddr"); 3912 if (Result) 3913 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3914 else 3915 Result = V; 3916 } 3917 3918 // Add in the BaseGV if present. 3919 if (AddrMode.BaseGV) { 3920 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); 3921 if (Result) 3922 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3923 else 3924 Result = V; 3925 } 3926 3927 // Add in the Base Offset if present. 3928 if (AddrMode.BaseOffs) { 3929 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); 3930 if (Result) 3931 Result = Builder.CreateAdd(Result, V, "sunkaddr"); 3932 else 3933 Result = V; 3934 } 3935 3936 if (!Result) 3937 SunkAddr = Constant::getNullValue(Addr->getType()); 3938 else 3939 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); 3940 } 3941 3942 MemoryInst->replaceUsesOfWith(Repl, SunkAddr); 3943 3944 // If we have no uses, recursively delete the value and all dead instructions 3945 // using it. 3946 if (Repl->use_empty()) { 3947 // This can cause recursive deletion, which can invalidate our iterator. 3948 // Use a WeakVH to hold onto it in case this happens. 3949 Value *CurValue = &*CurInstIterator; 3950 WeakVH IterHandle(CurValue); 3951 BasicBlock *BB = CurInstIterator->getParent(); 3952 3953 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); 3954 3955 if (IterHandle != CurValue) { 3956 // If the iterator instruction was recursively deleted, start over at the 3957 // start of the block. 3958 CurInstIterator = BB->begin(); 3959 SunkAddrs.clear(); 3960 } 3961 } 3962 ++NumMemoryInsts; 3963 return true; 3964 } 3965 3966 /// If there are any memory operands, use OptimizeMemoryInst to sink their 3967 /// address computing into the block when possible / profitable. 3968 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { 3969 bool MadeChange = false; 3970 3971 const TargetRegisterInfo *TRI = 3972 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); 3973 TargetLowering::AsmOperandInfoVector TargetConstraints = 3974 TLI->ParseConstraints(*DL, TRI, CS); 3975 unsigned ArgNo = 0; 3976 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 3977 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 3978 3979 // Compute the constraint code and ConstraintType to use. 3980 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 3981 3982 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 3983 OpInfo.isIndirect) { 3984 Value *OpVal = CS->getArgOperand(ArgNo++); 3985 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); 3986 } else if (OpInfo.Type == InlineAsm::isInput) 3987 ArgNo++; 3988 } 3989 3990 return MadeChange; 3991 } 3992 3993 /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or 3994 /// sign extensions. 3995 static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { 3996 assert(!Inst->use_empty() && "Input must have at least one use"); 3997 const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin()); 3998 bool IsSExt = isa<SExtInst>(FirstUser); 3999 Type *ExtTy = FirstUser->getType(); 4000 for (const User *U : Inst->users()) { 4001 const Instruction *UI = cast<Instruction>(U); 4002 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) 4003 return false; 4004 Type *CurTy = UI->getType(); 4005 // Same input and output types: Same instruction after CSE. 4006 if (CurTy == ExtTy) 4007 continue; 4008 4009 // If IsSExt is true, we are in this situation: 4010 // a = Inst 4011 // b = sext ty1 a to ty2 4012 // c = sext ty1 a to ty3 4013 // Assuming ty2 is shorter than ty3, this could be turned into: 4014 // a = Inst 4015 // b = sext ty1 a to ty2 4016 // c = sext ty2 b to ty3 4017 // However, the last sext is not free. 4018 if (IsSExt) 4019 return false; 4020 4021 // This is a ZExt, maybe this is free to extend from one type to another. 4022 // In that case, we would not account for a different use. 4023 Type *NarrowTy; 4024 Type *LargeTy; 4025 if (ExtTy->getScalarType()->getIntegerBitWidth() > 4026 CurTy->getScalarType()->getIntegerBitWidth()) { 4027 NarrowTy = CurTy; 4028 LargeTy = ExtTy; 4029 } else { 4030 NarrowTy = ExtTy; 4031 LargeTy = CurTy; 4032 } 4033 4034 if (!TLI.isZExtFree(NarrowTy, LargeTy)) 4035 return false; 4036 } 4037 // All uses are the same or can be derived from one another for free. 4038 return true; 4039 } 4040 4041 /// \brief Try to form ExtLd by promoting \p Exts until they reach a 4042 /// load instruction. 4043 /// If an ext(load) can be formed, it is returned via \p LI for the load 4044 /// and \p Inst for the extension. 4045 /// Otherwise LI == nullptr and Inst == nullptr. 4046 /// When some promotion happened, \p TPT contains the proper state to 4047 /// revert them. 4048 /// 4049 /// \return true when promoting was necessary to expose the ext(load) 4050 /// opportunity, false otherwise. 4051 /// 4052 /// Example: 4053 /// \code 4054 /// %ld = load i32* %addr 4055 /// %add = add nuw i32 %ld, 4 4056 /// %zext = zext i32 %add to i64 4057 /// \endcode 4058 /// => 4059 /// \code 4060 /// %ld = load i32* %addr 4061 /// %zext = zext i32 %ld to i64 4062 /// %add = add nuw i64 %zext, 4 4063 /// \encode 4064 /// Thanks to the promotion, we can match zext(load i32*) to i64. 4065 bool CodeGenPrepare::extLdPromotion(TypePromotionTransaction &TPT, 4066 LoadInst *&LI, Instruction *&Inst, 4067 const SmallVectorImpl<Instruction *> &Exts, 4068 unsigned CreatedInstsCost = 0) { 4069 // Iterate over all the extensions to see if one form an ext(load). 4070 for (auto I : Exts) { 4071 // Check if we directly have ext(load). 4072 if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) { 4073 Inst = I; 4074 // No promotion happened here. 4075 return false; 4076 } 4077 // Check whether or not we want to do any promotion. 4078 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) 4079 continue; 4080 // Get the action to perform the promotion. 4081 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( 4082 I, InsertedInsts, *TLI, PromotedInsts); 4083 // Check if we can promote. 4084 if (!TPH) 4085 continue; 4086 // Save the current state. 4087 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4088 TPT.getRestorationPoint(); 4089 SmallVector<Instruction *, 4> NewExts; 4090 unsigned NewCreatedInstsCost = 0; 4091 unsigned ExtCost = !TLI->isExtFree(I); 4092 // Promote. 4093 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, 4094 &NewExts, nullptr, *TLI); 4095 assert(PromotedVal && 4096 "TypePromotionHelper should have filtered out those cases"); 4097 4098 // We would be able to merge only one extension in a load. 4099 // Therefore, if we have more than 1 new extension we heuristically 4100 // cut this search path, because it means we degrade the code quality. 4101 // With exactly 2, the transformation is neutral, because we will merge 4102 // one extension but leave one. However, we optimistically keep going, 4103 // because the new extension may be removed too. 4104 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; 4105 TotalCreatedInstsCost -= ExtCost; 4106 if (!StressExtLdPromotion && 4107 (TotalCreatedInstsCost > 1 || 4108 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { 4109 // The promotion is not profitable, rollback to the previous state. 4110 TPT.rollback(LastKnownGood); 4111 continue; 4112 } 4113 // The promotion is profitable. 4114 // Check if it exposes an ext(load). 4115 (void)extLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost); 4116 if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || 4117 // If we have created a new extension, i.e., now we have two 4118 // extensions. We must make sure one of them is merged with 4119 // the load, otherwise we may degrade the code quality. 4120 (LI->hasOneUse() || hasSameExtUse(LI, *TLI)))) 4121 // Promotion happened. 4122 return true; 4123 // If this does not help to expose an ext(load) then, rollback. 4124 TPT.rollback(LastKnownGood); 4125 } 4126 // None of the extension can form an ext(load). 4127 LI = nullptr; 4128 Inst = nullptr; 4129 return false; 4130 } 4131 4132 /// Move a zext or sext fed by a load into the same basic block as the load, 4133 /// unless conditions are unfavorable. This allows SelectionDAG to fold the 4134 /// extend into the load. 4135 /// \p I[in/out] the extension may be modified during the process if some 4136 /// promotions apply. 4137 /// 4138 bool CodeGenPrepare::moveExtToFormExtLoad(Instruction *&I) { 4139 // Try to promote a chain of computation if it allows to form 4140 // an extended load. 4141 TypePromotionTransaction TPT; 4142 TypePromotionTransaction::ConstRestorationPt LastKnownGood = 4143 TPT.getRestorationPoint(); 4144 SmallVector<Instruction *, 1> Exts; 4145 Exts.push_back(I); 4146 // Look for a load being extended. 4147 LoadInst *LI = nullptr; 4148 Instruction *OldExt = I; 4149 bool HasPromoted = extLdPromotion(TPT, LI, I, Exts); 4150 if (!LI || !I) { 4151 assert(!HasPromoted && !LI && "If we did not match any load instruction " 4152 "the code must remain the same"); 4153 I = OldExt; 4154 return false; 4155 } 4156 4157 // If they're already in the same block, there's nothing to do. 4158 // Make the cheap checks first if we did not promote. 4159 // If we promoted, we need to check if it is indeed profitable. 4160 if (!HasPromoted && LI->getParent() == I->getParent()) 4161 return false; 4162 4163 EVT VT = TLI->getValueType(*DL, I->getType()); 4164 EVT LoadVT = TLI->getValueType(*DL, LI->getType()); 4165 4166 // If the load has other users and the truncate is not free, this probably 4167 // isn't worthwhile. 4168 if (!LI->hasOneUse() && TLI && 4169 (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && 4170 !TLI->isTruncateFree(I->getType(), LI->getType())) { 4171 I = OldExt; 4172 TPT.rollback(LastKnownGood); 4173 return false; 4174 } 4175 4176 // Check whether the target supports casts folded into loads. 4177 unsigned LType; 4178 if (isa<ZExtInst>(I)) 4179 LType = ISD::ZEXTLOAD; 4180 else { 4181 assert(isa<SExtInst>(I) && "Unexpected ext type!"); 4182 LType = ISD::SEXTLOAD; 4183 } 4184 if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) { 4185 I = OldExt; 4186 TPT.rollback(LastKnownGood); 4187 return false; 4188 } 4189 4190 // Move the extend into the same block as the load, so that SelectionDAG 4191 // can fold it. 4192 TPT.commit(); 4193 I->removeFromParent(); 4194 I->insertAfter(LI); 4195 ++NumExtsMoved; 4196 return true; 4197 } 4198 4199 bool CodeGenPrepare::optimizeExtUses(Instruction *I) { 4200 BasicBlock *DefBB = I->getParent(); 4201 4202 // If the result of a {s|z}ext and its source are both live out, rewrite all 4203 // other uses of the source with result of extension. 4204 Value *Src = I->getOperand(0); 4205 if (Src->hasOneUse()) 4206 return false; 4207 4208 // Only do this xform if truncating is free. 4209 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) 4210 return false; 4211 4212 // Only safe to perform the optimization if the source is also defined in 4213 // this block. 4214 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) 4215 return false; 4216 4217 bool DefIsLiveOut = false; 4218 for (User *U : I->users()) { 4219 Instruction *UI = cast<Instruction>(U); 4220 4221 // Figure out which BB this ext is used in. 4222 BasicBlock *UserBB = UI->getParent(); 4223 if (UserBB == DefBB) continue; 4224 DefIsLiveOut = true; 4225 break; 4226 } 4227 if (!DefIsLiveOut) 4228 return false; 4229 4230 // Make sure none of the uses are PHI nodes. 4231 for (User *U : Src->users()) { 4232 Instruction *UI = cast<Instruction>(U); 4233 BasicBlock *UserBB = UI->getParent(); 4234 if (UserBB == DefBB) continue; 4235 // Be conservative. We don't want this xform to end up introducing 4236 // reloads just before load / store instructions. 4237 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) 4238 return false; 4239 } 4240 4241 // InsertedTruncs - Only insert one trunc in each block once. 4242 DenseMap<BasicBlock*, Instruction*> InsertedTruncs; 4243 4244 bool MadeChange = false; 4245 for (Use &U : Src->uses()) { 4246 Instruction *User = cast<Instruction>(U.getUser()); 4247 4248 // Figure out which BB this ext is used in. 4249 BasicBlock *UserBB = User->getParent(); 4250 if (UserBB == DefBB) continue; 4251 4252 // Both src and def are live in this block. Rewrite the use. 4253 Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; 4254 4255 if (!InsertedTrunc) { 4256 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4257 assert(InsertPt != UserBB->end()); 4258 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); 4259 InsertedInsts.insert(InsertedTrunc); 4260 } 4261 4262 // Replace a use of the {s|z}ext source with a use of the result. 4263 U = InsertedTrunc; 4264 ++NumExtUses; 4265 MadeChange = true; 4266 } 4267 4268 return MadeChange; 4269 } 4270 4271 // Find loads whose uses only use some of the loaded value's bits. Add an "and" 4272 // just after the load if the target can fold this into one extload instruction, 4273 // with the hope of eliminating some of the other later "and" instructions using 4274 // the loaded value. "and"s that are made trivially redundant by the insertion 4275 // of the new "and" are removed by this function, while others (e.g. those whose 4276 // path from the load goes through a phi) are left for isel to potentially 4277 // remove. 4278 // 4279 // For example: 4280 // 4281 // b0: 4282 // x = load i32 4283 // ... 4284 // b1: 4285 // y = and x, 0xff 4286 // z = use y 4287 // 4288 // becomes: 4289 // 4290 // b0: 4291 // x = load i32 4292 // x' = and x, 0xff 4293 // ... 4294 // b1: 4295 // z = use x' 4296 // 4297 // whereas: 4298 // 4299 // b0: 4300 // x1 = load i32 4301 // ... 4302 // b1: 4303 // x2 = load i32 4304 // ... 4305 // b2: 4306 // x = phi x1, x2 4307 // y = and x, 0xff 4308 // 4309 // becomes (after a call to optimizeLoadExt for each load): 4310 // 4311 // b0: 4312 // x1 = load i32 4313 // x1' = and x1, 0xff 4314 // ... 4315 // b1: 4316 // x2 = load i32 4317 // x2' = and x2, 0xff 4318 // ... 4319 // b2: 4320 // x = phi x1', x2' 4321 // y = and x, 0xff 4322 // 4323 4324 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { 4325 4326 if (!Load->isSimple() || 4327 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) 4328 return false; 4329 4330 // Skip loads we've already transformed or have no reason to transform. 4331 if (Load->hasOneUse()) { 4332 User *LoadUser = *Load->user_begin(); 4333 if (cast<Instruction>(LoadUser)->getParent() == Load->getParent() && 4334 !dyn_cast<PHINode>(LoadUser)) 4335 return false; 4336 } 4337 4338 // Look at all uses of Load, looking through phis, to determine how many bits 4339 // of the loaded value are needed. 4340 SmallVector<Instruction *, 8> WorkList; 4341 SmallPtrSet<Instruction *, 16> Visited; 4342 SmallVector<Instruction *, 8> AndsToMaybeRemove; 4343 for (auto *U : Load->users()) 4344 WorkList.push_back(cast<Instruction>(U)); 4345 4346 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); 4347 unsigned BitWidth = LoadResultVT.getSizeInBits(); 4348 APInt DemandBits(BitWidth, 0); 4349 APInt WidestAndBits(BitWidth, 0); 4350 4351 while (!WorkList.empty()) { 4352 Instruction *I = WorkList.back(); 4353 WorkList.pop_back(); 4354 4355 // Break use-def graph loops. 4356 if (!Visited.insert(I).second) 4357 continue; 4358 4359 // For a PHI node, push all of its users. 4360 if (auto *Phi = dyn_cast<PHINode>(I)) { 4361 for (auto *U : Phi->users()) 4362 WorkList.push_back(cast<Instruction>(U)); 4363 continue; 4364 } 4365 4366 switch (I->getOpcode()) { 4367 case llvm::Instruction::And: { 4368 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); 4369 if (!AndC) 4370 return false; 4371 APInt AndBits = AndC->getValue(); 4372 DemandBits |= AndBits; 4373 // Keep track of the widest and mask we see. 4374 if (AndBits.ugt(WidestAndBits)) 4375 WidestAndBits = AndBits; 4376 if (AndBits == WidestAndBits && I->getOperand(0) == Load) 4377 AndsToMaybeRemove.push_back(I); 4378 break; 4379 } 4380 4381 case llvm::Instruction::Shl: { 4382 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); 4383 if (!ShlC) 4384 return false; 4385 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); 4386 auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt); 4387 DemandBits |= ShlDemandBits; 4388 break; 4389 } 4390 4391 case llvm::Instruction::Trunc: { 4392 EVT TruncVT = TLI->getValueType(*DL, I->getType()); 4393 unsigned TruncBitWidth = TruncVT.getSizeInBits(); 4394 auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth); 4395 DemandBits |= TruncBits; 4396 break; 4397 } 4398 4399 default: 4400 return false; 4401 } 4402 } 4403 4404 uint32_t ActiveBits = DemandBits.getActiveBits(); 4405 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the 4406 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, 4407 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but 4408 // (and (load x) 1) is not matched as a single instruction, rather as a LDR 4409 // followed by an AND. 4410 // TODO: Look into removing this restriction by fixing backends to either 4411 // return false for isLoadExtLegal for i1 or have them select this pattern to 4412 // a single instruction. 4413 // 4414 // Also avoid hoisting if we didn't see any ands with the exact DemandBits 4415 // mask, since these are the only ands that will be removed by isel. 4416 if (ActiveBits <= 1 || !APIntOps::isMask(ActiveBits, DemandBits) || 4417 WidestAndBits != DemandBits) 4418 return false; 4419 4420 LLVMContext &Ctx = Load->getType()->getContext(); 4421 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); 4422 EVT TruncVT = TLI->getValueType(*DL, TruncTy); 4423 4424 // Reject cases that won't be matched as extloads. 4425 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || 4426 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) 4427 return false; 4428 4429 IRBuilder<> Builder(Load->getNextNode()); 4430 auto *NewAnd = dyn_cast<Instruction>( 4431 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); 4432 4433 // Replace all uses of load with new and (except for the use of load in the 4434 // new and itself). 4435 Load->replaceAllUsesWith(NewAnd); 4436 NewAnd->setOperand(0, Load); 4437 4438 // Remove any and instructions that are now redundant. 4439 for (auto *And : AndsToMaybeRemove) 4440 // Check that the and mask is the same as the one we decided to put on the 4441 // new and. 4442 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { 4443 And->replaceAllUsesWith(NewAnd); 4444 if (&*CurInstIterator == And) 4445 CurInstIterator = std::next(And->getIterator()); 4446 And->eraseFromParent(); 4447 ++NumAndUses; 4448 } 4449 4450 ++NumAndsAdded; 4451 return true; 4452 } 4453 4454 /// Check if V (an operand of a select instruction) is an expensive instruction 4455 /// that is only used once. 4456 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { 4457 auto *I = dyn_cast<Instruction>(V); 4458 // If it's safe to speculatively execute, then it should not have side 4459 // effects; therefore, it's safe to sink and possibly *not* execute. 4460 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && 4461 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; 4462 } 4463 4464 /// Returns true if a SelectInst should be turned into an explicit branch. 4465 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, 4466 SelectInst *SI) { 4467 // FIXME: This should use the same heuristics as IfConversion to determine 4468 // whether a select is better represented as a branch. This requires that 4469 // branch probability metadata is preserved for the select, which is not the 4470 // case currently. 4471 4472 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 4473 4474 // If a branch is predictable, an out-of-order CPU can avoid blocking on its 4475 // comparison condition. If the compare has more than one use, there's 4476 // probably another cmov or setcc around, so it's not worth emitting a branch. 4477 if (!Cmp || !Cmp->hasOneUse()) 4478 return false; 4479 4480 // If either operand of the select is expensive and only needed on one side 4481 // of the select, we should form a branch. 4482 if (sinkSelectOperand(TTI, SI->getTrueValue()) || 4483 sinkSelectOperand(TTI, SI->getFalseValue())) 4484 return true; 4485 4486 return false; 4487 } 4488 4489 4490 /// If we have a SelectInst that will likely profit from branch prediction, 4491 /// turn it into a branch. 4492 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { 4493 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); 4494 4495 // Can we convert the 'select' to CF ? 4496 if (DisableSelectToBranch || OptSize || !TLI || VectorCond) 4497 return false; 4498 4499 TargetLowering::SelectSupportKind SelectKind; 4500 if (VectorCond) 4501 SelectKind = TargetLowering::VectorMaskSelect; 4502 else if (SI->getType()->isVectorTy()) 4503 SelectKind = TargetLowering::ScalarCondVectorVal; 4504 else 4505 SelectKind = TargetLowering::ScalarValSelect; 4506 4507 // Do we have efficient codegen support for this kind of 'selects' ? 4508 if (TLI->isSelectSupported(SelectKind)) { 4509 // We have efficient codegen support for the select instruction. 4510 // Check if it is profitable to keep this 'select'. 4511 if (!TLI->isPredictableSelectExpensive() || 4512 !isFormingBranchFromSelectProfitable(TTI, SI)) 4513 return false; 4514 } 4515 4516 ModifiedDT = true; 4517 4518 // Transform a sequence like this: 4519 // start: 4520 // %cmp = cmp uge i32 %a, %b 4521 // %sel = select i1 %cmp, i32 %c, i32 %d 4522 // 4523 // Into: 4524 // start: 4525 // %cmp = cmp uge i32 %a, %b 4526 // br i1 %cmp, label %select.true, label %select.false 4527 // select.true: 4528 // br label %select.end 4529 // select.false: 4530 // br label %select.end 4531 // select.end: 4532 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] 4533 // 4534 // In addition, we may sink instructions that produce %c or %d from 4535 // the entry block into the destination(s) of the new branch. 4536 // If the true or false blocks do not contain a sunken instruction, that 4537 // block and its branch may be optimized away. In that case, one side of the 4538 // first branch will point directly to select.end, and the corresponding PHI 4539 // predecessor block will be the start block. 4540 4541 // First, we split the block containing the select into 2 blocks. 4542 BasicBlock *StartBlock = SI->getParent(); 4543 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); 4544 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); 4545 4546 // Delete the unconditional branch that was just created by the split. 4547 StartBlock->getTerminator()->eraseFromParent(); 4548 4549 // These are the new basic blocks for the conditional branch. 4550 // At least one will become an actual new basic block. 4551 BasicBlock *TrueBlock = nullptr; 4552 BasicBlock *FalseBlock = nullptr; 4553 4554 // Sink expensive instructions into the conditional blocks to avoid executing 4555 // them speculatively. 4556 if (sinkSelectOperand(TTI, SI->getTrueValue())) { 4557 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", 4558 EndBlock->getParent(), EndBlock); 4559 auto *TrueBranch = BranchInst::Create(EndBlock, TrueBlock); 4560 auto *TrueInst = cast<Instruction>(SI->getTrueValue()); 4561 TrueInst->moveBefore(TrueBranch); 4562 } 4563 if (sinkSelectOperand(TTI, SI->getFalseValue())) { 4564 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", 4565 EndBlock->getParent(), EndBlock); 4566 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); 4567 auto *FalseInst = cast<Instruction>(SI->getFalseValue()); 4568 FalseInst->moveBefore(FalseBranch); 4569 } 4570 4571 // If there was nothing to sink, then arbitrarily choose the 'false' side 4572 // for a new input value to the PHI. 4573 if (TrueBlock == FalseBlock) { 4574 assert(TrueBlock == nullptr && 4575 "Unexpected basic block transform while optimizing select"); 4576 4577 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", 4578 EndBlock->getParent(), EndBlock); 4579 BranchInst::Create(EndBlock, FalseBlock); 4580 } 4581 4582 // Insert the real conditional branch based on the original condition. 4583 // If we did not create a new block for one of the 'true' or 'false' paths 4584 // of the condition, it means that side of the branch goes to the end block 4585 // directly and the path originates from the start block from the point of 4586 // view of the new PHI. 4587 if (TrueBlock == nullptr) { 4588 BranchInst::Create(EndBlock, FalseBlock, SI->getCondition(), SI); 4589 TrueBlock = StartBlock; 4590 } else if (FalseBlock == nullptr) { 4591 BranchInst::Create(TrueBlock, EndBlock, SI->getCondition(), SI); 4592 FalseBlock = StartBlock; 4593 } else { 4594 BranchInst::Create(TrueBlock, FalseBlock, SI->getCondition(), SI); 4595 } 4596 4597 // The select itself is replaced with a PHI Node. 4598 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); 4599 PN->takeName(SI); 4600 PN->addIncoming(SI->getTrueValue(), TrueBlock); 4601 PN->addIncoming(SI->getFalseValue(), FalseBlock); 4602 4603 SI->replaceAllUsesWith(PN); 4604 SI->eraseFromParent(); 4605 4606 // Instruct OptimizeBlock to skip to the next block. 4607 CurInstIterator = StartBlock->end(); 4608 ++NumSelectsExpanded; 4609 return true; 4610 } 4611 4612 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { 4613 SmallVector<int, 16> Mask(SVI->getShuffleMask()); 4614 int SplatElem = -1; 4615 for (unsigned i = 0; i < Mask.size(); ++i) { 4616 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) 4617 return false; 4618 SplatElem = Mask[i]; 4619 } 4620 4621 return true; 4622 } 4623 4624 /// Some targets have expensive vector shifts if the lanes aren't all the same 4625 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases 4626 /// it's often worth sinking a shufflevector splat down to its use so that 4627 /// codegen can spot all lanes are identical. 4628 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { 4629 BasicBlock *DefBB = SVI->getParent(); 4630 4631 // Only do this xform if variable vector shifts are particularly expensive. 4632 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) 4633 return false; 4634 4635 // We only expect better codegen by sinking a shuffle if we can recognise a 4636 // constant splat. 4637 if (!isBroadcastShuffle(SVI)) 4638 return false; 4639 4640 // InsertedShuffles - Only insert a shuffle in each block once. 4641 DenseMap<BasicBlock*, Instruction*> InsertedShuffles; 4642 4643 bool MadeChange = false; 4644 for (User *U : SVI->users()) { 4645 Instruction *UI = cast<Instruction>(U); 4646 4647 // Figure out which BB this ext is used in. 4648 BasicBlock *UserBB = UI->getParent(); 4649 if (UserBB == DefBB) continue; 4650 4651 // For now only apply this when the splat is used by a shift instruction. 4652 if (!UI->isShift()) continue; 4653 4654 // Everything checks out, sink the shuffle if the user's block doesn't 4655 // already have a copy. 4656 Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; 4657 4658 if (!InsertedShuffle) { 4659 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); 4660 assert(InsertPt != UserBB->end()); 4661 InsertedShuffle = 4662 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 4663 SVI->getOperand(2), "", &*InsertPt); 4664 } 4665 4666 UI->replaceUsesOfWith(SVI, InsertedShuffle); 4667 MadeChange = true; 4668 } 4669 4670 // If we removed all uses, nuke the shuffle. 4671 if (SVI->use_empty()) { 4672 SVI->eraseFromParent(); 4673 MadeChange = true; 4674 } 4675 4676 return MadeChange; 4677 } 4678 4679 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { 4680 if (!TLI || !DL) 4681 return false; 4682 4683 Value *Cond = SI->getCondition(); 4684 Type *OldType = Cond->getType(); 4685 LLVMContext &Context = Cond->getContext(); 4686 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); 4687 unsigned RegWidth = RegType.getSizeInBits(); 4688 4689 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) 4690 return false; 4691 4692 // If the register width is greater than the type width, expand the condition 4693 // of the switch instruction and each case constant to the width of the 4694 // register. By widening the type of the switch condition, subsequent 4695 // comparisons (for case comparisons) will not need to be extended to the 4696 // preferred register width, so we will potentially eliminate N-1 extends, 4697 // where N is the number of cases in the switch. 4698 auto *NewType = Type::getIntNTy(Context, RegWidth); 4699 4700 // Zero-extend the switch condition and case constants unless the switch 4701 // condition is a function argument that is already being sign-extended. 4702 // In that case, we can avoid an unnecessary mask/extension by sign-extending 4703 // everything instead. 4704 Instruction::CastOps ExtType = Instruction::ZExt; 4705 if (auto *Arg = dyn_cast<Argument>(Cond)) 4706 if (Arg->hasSExtAttr()) 4707 ExtType = Instruction::SExt; 4708 4709 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); 4710 ExtInst->insertBefore(SI); 4711 SI->setCondition(ExtInst); 4712 for (SwitchInst::CaseIt Case : SI->cases()) { 4713 APInt NarrowConst = Case.getCaseValue()->getValue(); 4714 APInt WideConst = (ExtType == Instruction::ZExt) ? 4715 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); 4716 Case.setValue(ConstantInt::get(Context, WideConst)); 4717 } 4718 4719 return true; 4720 } 4721 4722 namespace { 4723 /// \brief Helper class to promote a scalar operation to a vector one. 4724 /// This class is used to move downward extractelement transition. 4725 /// E.g., 4726 /// a = vector_op <2 x i32> 4727 /// b = extractelement <2 x i32> a, i32 0 4728 /// c = scalar_op b 4729 /// store c 4730 /// 4731 /// => 4732 /// a = vector_op <2 x i32> 4733 /// c = vector_op a (equivalent to scalar_op on the related lane) 4734 /// * d = extractelement <2 x i32> c, i32 0 4735 /// * store d 4736 /// Assuming both extractelement and store can be combine, we get rid of the 4737 /// transition. 4738 class VectorPromoteHelper { 4739 /// DataLayout associated with the current module. 4740 const DataLayout &DL; 4741 4742 /// Used to perform some checks on the legality of vector operations. 4743 const TargetLowering &TLI; 4744 4745 /// Used to estimated the cost of the promoted chain. 4746 const TargetTransformInfo &TTI; 4747 4748 /// The transition being moved downwards. 4749 Instruction *Transition; 4750 /// The sequence of instructions to be promoted. 4751 SmallVector<Instruction *, 4> InstsToBePromoted; 4752 /// Cost of combining a store and an extract. 4753 unsigned StoreExtractCombineCost; 4754 /// Instruction that will be combined with the transition. 4755 Instruction *CombineInst; 4756 4757 /// \brief The instruction that represents the current end of the transition. 4758 /// Since we are faking the promotion until we reach the end of the chain 4759 /// of computation, we need a way to get the current end of the transition. 4760 Instruction *getEndOfTransition() const { 4761 if (InstsToBePromoted.empty()) 4762 return Transition; 4763 return InstsToBePromoted.back(); 4764 } 4765 4766 /// \brief Return the index of the original value in the transition. 4767 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, 4768 /// c, is at index 0. 4769 unsigned getTransitionOriginalValueIdx() const { 4770 assert(isa<ExtractElementInst>(Transition) && 4771 "Other kind of transitions are not supported yet"); 4772 return 0; 4773 } 4774 4775 /// \brief Return the index of the index in the transition. 4776 /// E.g., for "extractelement <2 x i32> c, i32 0" the index 4777 /// is at index 1. 4778 unsigned getTransitionIdx() const { 4779 assert(isa<ExtractElementInst>(Transition) && 4780 "Other kind of transitions are not supported yet"); 4781 return 1; 4782 } 4783 4784 /// \brief Get the type of the transition. 4785 /// This is the type of the original value. 4786 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the 4787 /// transition is <2 x i32>. 4788 Type *getTransitionType() const { 4789 return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); 4790 } 4791 4792 /// \brief Promote \p ToBePromoted by moving \p Def downward through. 4793 /// I.e., we have the following sequence: 4794 /// Def = Transition <ty1> a to <ty2> 4795 /// b = ToBePromoted <ty2> Def, ... 4796 /// => 4797 /// b = ToBePromoted <ty1> a, ... 4798 /// Def = Transition <ty1> ToBePromoted to <ty2> 4799 void promoteImpl(Instruction *ToBePromoted); 4800 4801 /// \brief Check whether or not it is profitable to promote all the 4802 /// instructions enqueued to be promoted. 4803 bool isProfitableToPromote() { 4804 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); 4805 unsigned Index = isa<ConstantInt>(ValIdx) 4806 ? cast<ConstantInt>(ValIdx)->getZExtValue() 4807 : -1; 4808 Type *PromotedType = getTransitionType(); 4809 4810 StoreInst *ST = cast<StoreInst>(CombineInst); 4811 unsigned AS = ST->getPointerAddressSpace(); 4812 unsigned Align = ST->getAlignment(); 4813 // Check if this store is supported. 4814 if (!TLI.allowsMisalignedMemoryAccesses( 4815 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, 4816 Align)) { 4817 // If this is not supported, there is no way we can combine 4818 // the extract with the store. 4819 return false; 4820 } 4821 4822 // The scalar chain of computation has to pay for the transition 4823 // scalar to vector. 4824 // The vector chain has to account for the combining cost. 4825 uint64_t ScalarCost = 4826 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); 4827 uint64_t VectorCost = StoreExtractCombineCost; 4828 for (const auto &Inst : InstsToBePromoted) { 4829 // Compute the cost. 4830 // By construction, all instructions being promoted are arithmetic ones. 4831 // Moreover, one argument is a constant that can be viewed as a splat 4832 // constant. 4833 Value *Arg0 = Inst->getOperand(0); 4834 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || 4835 isa<ConstantFP>(Arg0); 4836 TargetTransformInfo::OperandValueKind Arg0OVK = 4837 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4838 : TargetTransformInfo::OK_AnyValue; 4839 TargetTransformInfo::OperandValueKind Arg1OVK = 4840 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue 4841 : TargetTransformInfo::OK_AnyValue; 4842 ScalarCost += TTI.getArithmeticInstrCost( 4843 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); 4844 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, 4845 Arg0OVK, Arg1OVK); 4846 } 4847 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: " 4848 << ScalarCost << "\nVector: " << VectorCost << '\n'); 4849 return ScalarCost > VectorCost; 4850 } 4851 4852 /// \brief Generate a constant vector with \p Val with the same 4853 /// number of elements as the transition. 4854 /// \p UseSplat defines whether or not \p Val should be replicated 4855 /// across the whole vector. 4856 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, 4857 /// otherwise we generate a vector with as many undef as possible: 4858 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only 4859 /// used at the index of the extract. 4860 Value *getConstantVector(Constant *Val, bool UseSplat) const { 4861 unsigned ExtractIdx = UINT_MAX; 4862 if (!UseSplat) { 4863 // If we cannot determine where the constant must be, we have to 4864 // use a splat constant. 4865 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); 4866 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) 4867 ExtractIdx = CstVal->getSExtValue(); 4868 else 4869 UseSplat = true; 4870 } 4871 4872 unsigned End = getTransitionType()->getVectorNumElements(); 4873 if (UseSplat) 4874 return ConstantVector::getSplat(End, Val); 4875 4876 SmallVector<Constant *, 4> ConstVec; 4877 UndefValue *UndefVal = UndefValue::get(Val->getType()); 4878 for (unsigned Idx = 0; Idx != End; ++Idx) { 4879 if (Idx == ExtractIdx) 4880 ConstVec.push_back(Val); 4881 else 4882 ConstVec.push_back(UndefVal); 4883 } 4884 return ConstantVector::get(ConstVec); 4885 } 4886 4887 /// \brief Check if promoting to a vector type an operand at \p OperandIdx 4888 /// in \p Use can trigger undefined behavior. 4889 static bool canCauseUndefinedBehavior(const Instruction *Use, 4890 unsigned OperandIdx) { 4891 // This is not safe to introduce undef when the operand is on 4892 // the right hand side of a division-like instruction. 4893 if (OperandIdx != 1) 4894 return false; 4895 switch (Use->getOpcode()) { 4896 default: 4897 return false; 4898 case Instruction::SDiv: 4899 case Instruction::UDiv: 4900 case Instruction::SRem: 4901 case Instruction::URem: 4902 return true; 4903 case Instruction::FDiv: 4904 case Instruction::FRem: 4905 return !Use->hasNoNaNs(); 4906 } 4907 llvm_unreachable(nullptr); 4908 } 4909 4910 public: 4911 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, 4912 const TargetTransformInfo &TTI, Instruction *Transition, 4913 unsigned CombineCost) 4914 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), 4915 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { 4916 assert(Transition && "Do not know how to promote null"); 4917 } 4918 4919 /// \brief Check if we can promote \p ToBePromoted to \p Type. 4920 bool canPromote(const Instruction *ToBePromoted) const { 4921 // We could support CastInst too. 4922 return isa<BinaryOperator>(ToBePromoted); 4923 } 4924 4925 /// \brief Check if it is profitable to promote \p ToBePromoted 4926 /// by moving downward the transition through. 4927 bool shouldPromote(const Instruction *ToBePromoted) const { 4928 // Promote only if all the operands can be statically expanded. 4929 // Indeed, we do not want to introduce any new kind of transitions. 4930 for (const Use &U : ToBePromoted->operands()) { 4931 const Value *Val = U.get(); 4932 if (Val == getEndOfTransition()) { 4933 // If the use is a division and the transition is on the rhs, 4934 // we cannot promote the operation, otherwise we may create a 4935 // division by zero. 4936 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) 4937 return false; 4938 continue; 4939 } 4940 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && 4941 !isa<ConstantFP>(Val)) 4942 return false; 4943 } 4944 // Check that the resulting operation is legal. 4945 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); 4946 if (!ISDOpcode) 4947 return false; 4948 return StressStoreExtract || 4949 TLI.isOperationLegalOrCustom( 4950 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); 4951 } 4952 4953 /// \brief Check whether or not \p Use can be combined 4954 /// with the transition. 4955 /// I.e., is it possible to do Use(Transition) => AnotherUse? 4956 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } 4957 4958 /// \brief Record \p ToBePromoted as part of the chain to be promoted. 4959 void enqueueForPromotion(Instruction *ToBePromoted) { 4960 InstsToBePromoted.push_back(ToBePromoted); 4961 } 4962 4963 /// \brief Set the instruction that will be combined with the transition. 4964 void recordCombineInstruction(Instruction *ToBeCombined) { 4965 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); 4966 CombineInst = ToBeCombined; 4967 } 4968 4969 /// \brief Promote all the instructions enqueued for promotion if it is 4970 /// is profitable. 4971 /// \return True if the promotion happened, false otherwise. 4972 bool promote() { 4973 // Check if there is something to promote. 4974 // Right now, if we do not have anything to combine with, 4975 // we assume the promotion is not profitable. 4976 if (InstsToBePromoted.empty() || !CombineInst) 4977 return false; 4978 4979 // Check cost. 4980 if (!StressStoreExtract && !isProfitableToPromote()) 4981 return false; 4982 4983 // Promote. 4984 for (auto &ToBePromoted : InstsToBePromoted) 4985 promoteImpl(ToBePromoted); 4986 InstsToBePromoted.clear(); 4987 return true; 4988 } 4989 }; 4990 } // End of anonymous namespace. 4991 4992 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { 4993 // At this point, we know that all the operands of ToBePromoted but Def 4994 // can be statically promoted. 4995 // For Def, we need to use its parameter in ToBePromoted: 4996 // b = ToBePromoted ty1 a 4997 // Def = Transition ty1 b to ty2 4998 // Move the transition down. 4999 // 1. Replace all uses of the promoted operation by the transition. 5000 // = ... b => = ... Def. 5001 assert(ToBePromoted->getType() == Transition->getType() && 5002 "The type of the result of the transition does not match " 5003 "the final type"); 5004 ToBePromoted->replaceAllUsesWith(Transition); 5005 // 2. Update the type of the uses. 5006 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. 5007 Type *TransitionTy = getTransitionType(); 5008 ToBePromoted->mutateType(TransitionTy); 5009 // 3. Update all the operands of the promoted operation with promoted 5010 // operands. 5011 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. 5012 for (Use &U : ToBePromoted->operands()) { 5013 Value *Val = U.get(); 5014 Value *NewVal = nullptr; 5015 if (Val == Transition) 5016 NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); 5017 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || 5018 isa<ConstantFP>(Val)) { 5019 // Use a splat constant if it is not safe to use undef. 5020 NewVal = getConstantVector( 5021 cast<Constant>(Val), 5022 isa<UndefValue>(Val) || 5023 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); 5024 } else 5025 llvm_unreachable("Did you modified shouldPromote and forgot to update " 5026 "this?"); 5027 ToBePromoted->setOperand(U.getOperandNo(), NewVal); 5028 } 5029 Transition->removeFromParent(); 5030 Transition->insertAfter(ToBePromoted); 5031 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); 5032 } 5033 5034 /// Some targets can do store(extractelement) with one instruction. 5035 /// Try to push the extractelement towards the stores when the target 5036 /// has this feature and this is profitable. 5037 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { 5038 unsigned CombineCost = UINT_MAX; 5039 if (DisableStoreExtract || !TLI || 5040 (!StressStoreExtract && 5041 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), 5042 Inst->getOperand(1), CombineCost))) 5043 return false; 5044 5045 // At this point we know that Inst is a vector to scalar transition. 5046 // Try to move it down the def-use chain, until: 5047 // - We can combine the transition with its single use 5048 // => we got rid of the transition. 5049 // - We escape the current basic block 5050 // => we would need to check that we are moving it at a cheaper place and 5051 // we do not do that for now. 5052 BasicBlock *Parent = Inst->getParent(); 5053 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n'); 5054 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); 5055 // If the transition has more than one use, assume this is not going to be 5056 // beneficial. 5057 while (Inst->hasOneUse()) { 5058 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); 5059 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); 5060 5061 if (ToBePromoted->getParent() != Parent) { 5062 DEBUG(dbgs() << "Instruction to promote is in a different block (" 5063 << ToBePromoted->getParent()->getName() 5064 << ") than the transition (" << Parent->getName() << ").\n"); 5065 return false; 5066 } 5067 5068 if (VPH.canCombine(ToBePromoted)) { 5069 DEBUG(dbgs() << "Assume " << *Inst << '\n' 5070 << "will be combined with: " << *ToBePromoted << '\n'); 5071 VPH.recordCombineInstruction(ToBePromoted); 5072 bool Changed = VPH.promote(); 5073 NumStoreExtractExposed += Changed; 5074 return Changed; 5075 } 5076 5077 DEBUG(dbgs() << "Try promoting.\n"); 5078 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) 5079 return false; 5080 5081 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n"); 5082 5083 VPH.enqueueForPromotion(ToBePromoted); 5084 Inst = ToBePromoted; 5085 } 5086 return false; 5087 } 5088 5089 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) { 5090 // Bail out if we inserted the instruction to prevent optimizations from 5091 // stepping on each other's toes. 5092 if (InsertedInsts.count(I)) 5093 return false; 5094 5095 if (PHINode *P = dyn_cast<PHINode>(I)) { 5096 // It is possible for very late stage optimizations (such as SimplifyCFG) 5097 // to introduce PHI nodes too late to be cleaned up. If we detect such a 5098 // trivial PHI, go ahead and zap it here. 5099 if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) { 5100 P->replaceAllUsesWith(V); 5101 P->eraseFromParent(); 5102 ++NumPHIsElim; 5103 return true; 5104 } 5105 return false; 5106 } 5107 5108 if (CastInst *CI = dyn_cast<CastInst>(I)) { 5109 // If the source of the cast is a constant, then this should have 5110 // already been constant folded. The only reason NOT to constant fold 5111 // it is if something (e.g. LSR) was careful to place the constant 5112 // evaluation in a block other than then one that uses it (e.g. to hoist 5113 // the address of globals out of a loop). If this is the case, we don't 5114 // want to forward-subst the cast. 5115 if (isa<Constant>(CI->getOperand(0))) 5116 return false; 5117 5118 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) 5119 return true; 5120 5121 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { 5122 /// Sink a zext or sext into its user blocks if the target type doesn't 5123 /// fit in one register 5124 if (TLI && 5125 TLI->getTypeAction(CI->getContext(), 5126 TLI->getValueType(*DL, CI->getType())) == 5127 TargetLowering::TypeExpandInteger) { 5128 return SinkCast(CI); 5129 } else { 5130 bool MadeChange = moveExtToFormExtLoad(I); 5131 return MadeChange | optimizeExtUses(I); 5132 } 5133 } 5134 return false; 5135 } 5136 5137 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 5138 if (!TLI || !TLI->hasMultipleConditionRegisters()) 5139 return OptimizeCmpExpression(CI); 5140 5141 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 5142 stripInvariantGroupMetadata(*LI); 5143 if (TLI) { 5144 bool Modified = optimizeLoadExt(LI); 5145 unsigned AS = LI->getPointerAddressSpace(); 5146 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); 5147 return Modified; 5148 } 5149 return false; 5150 } 5151 5152 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 5153 stripInvariantGroupMetadata(*SI); 5154 if (TLI) { 5155 unsigned AS = SI->getPointerAddressSpace(); 5156 return optimizeMemoryInst(I, SI->getOperand(1), 5157 SI->getOperand(0)->getType(), AS); 5158 } 5159 return false; 5160 } 5161 5162 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); 5163 5164 if (BinOp && (BinOp->getOpcode() == Instruction::AShr || 5165 BinOp->getOpcode() == Instruction::LShr)) { 5166 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); 5167 if (TLI && CI && TLI->hasExtractBitsInsn()) 5168 return OptimizeExtractBits(BinOp, CI, *TLI, *DL); 5169 5170 return false; 5171 } 5172 5173 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 5174 if (GEPI->hasAllZeroIndices()) { 5175 /// The GEP operand must be a pointer, so must its result -> BitCast 5176 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 5177 GEPI->getName(), GEPI); 5178 GEPI->replaceAllUsesWith(NC); 5179 GEPI->eraseFromParent(); 5180 ++NumGEPsElim; 5181 optimizeInst(NC, ModifiedDT); 5182 return true; 5183 } 5184 return false; 5185 } 5186 5187 if (CallInst *CI = dyn_cast<CallInst>(I)) 5188 return optimizeCallInst(CI, ModifiedDT); 5189 5190 if (SelectInst *SI = dyn_cast<SelectInst>(I)) 5191 return optimizeSelectInst(SI); 5192 5193 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) 5194 return optimizeShuffleVectorInst(SVI); 5195 5196 if (auto *Switch = dyn_cast<SwitchInst>(I)) 5197 return optimizeSwitchInst(Switch); 5198 5199 if (isa<ExtractElementInst>(I)) 5200 return optimizeExtractElementInst(I); 5201 5202 return false; 5203 } 5204 5205 /// Given an OR instruction, check to see if this is a bitreverse 5206 /// idiom. If so, insert the new intrinsic and return true. 5207 static bool makeBitReverse(Instruction &I, const DataLayout &DL, 5208 const TargetLowering &TLI) { 5209 if (!I.getType()->isIntegerTy() || 5210 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, 5211 TLI.getValueType(DL, I.getType(), true))) 5212 return false; 5213 5214 SmallVector<Instruction*, 4> Insts; 5215 if (!recognizeBitReverseOrBSwapIdiom(&I, false, true, Insts)) 5216 return false; 5217 Instruction *LastInst = Insts.back(); 5218 I.replaceAllUsesWith(LastInst); 5219 RecursivelyDeleteTriviallyDeadInstructions(&I); 5220 return true; 5221 } 5222 5223 // In this pass we look for GEP and cast instructions that are used 5224 // across basic blocks and rewrite them to improve basic-block-at-a-time 5225 // selection. 5226 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) { 5227 SunkAddrs.clear(); 5228 bool MadeChange = false; 5229 5230 CurInstIterator = BB.begin(); 5231 while (CurInstIterator != BB.end()) { 5232 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); 5233 if (ModifiedDT) 5234 return true; 5235 } 5236 5237 bool MadeBitReverse = true; 5238 while (TLI && MadeBitReverse) { 5239 MadeBitReverse = false; 5240 for (auto &I : reverse(BB)) { 5241 if (makeBitReverse(I, *DL, *TLI)) { 5242 MadeBitReverse = MadeChange = true; 5243 break; 5244 } 5245 } 5246 } 5247 MadeChange |= dupRetToEnableTailCallOpts(&BB); 5248 5249 return MadeChange; 5250 } 5251 5252 // llvm.dbg.value is far away from the value then iSel may not be able 5253 // handle it properly. iSel will drop llvm.dbg.value if it can not 5254 // find a node corresponding to the value. 5255 bool CodeGenPrepare::placeDbgValues(Function &F) { 5256 bool MadeChange = false; 5257 for (BasicBlock &BB : F) { 5258 Instruction *PrevNonDbgInst = nullptr; 5259 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 5260 Instruction *Insn = &*BI++; 5261 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); 5262 // Leave dbg.values that refer to an alloca alone. These 5263 // instrinsics describe the address of a variable (= the alloca) 5264 // being taken. They should not be moved next to the alloca 5265 // (and to the beginning of the scope), but rather stay close to 5266 // where said address is used. 5267 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { 5268 PrevNonDbgInst = Insn; 5269 continue; 5270 } 5271 5272 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); 5273 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { 5274 // If VI is a phi in a block with an EHPad terminator, we can't insert 5275 // after it. 5276 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) 5277 continue; 5278 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI); 5279 DVI->removeFromParent(); 5280 if (isa<PHINode>(VI)) 5281 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); 5282 else 5283 DVI->insertAfter(VI); 5284 MadeChange = true; 5285 ++NumDbgValueMoved; 5286 } 5287 } 5288 } 5289 return MadeChange; 5290 } 5291 5292 // If there is a sequence that branches based on comparing a single bit 5293 // against zero that can be combined into a single instruction, and the 5294 // target supports folding these into a single instruction, sink the 5295 // mask and compare into the branch uses. Do this before OptimizeBlock -> 5296 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being 5297 // searched for. 5298 bool CodeGenPrepare::sinkAndCmp(Function &F) { 5299 if (!EnableAndCmpSinking) 5300 return false; 5301 if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) 5302 return false; 5303 bool MadeChange = false; 5304 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { 5305 BasicBlock *BB = &*I++; 5306 5307 // Does this BB end with the following? 5308 // %andVal = and %val, #single-bit-set 5309 // %icmpVal = icmp %andResult, 0 5310 // br i1 %cmpVal label %dest1, label %dest2" 5311 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); 5312 if (!Brcc || !Brcc->isConditional()) 5313 continue; 5314 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); 5315 if (!Cmp || Cmp->getParent() != BB) 5316 continue; 5317 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); 5318 if (!Zero || !Zero->isZero()) 5319 continue; 5320 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); 5321 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) 5322 continue; 5323 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); 5324 if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) 5325 continue; 5326 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump()); 5327 5328 // Push the "and; icmp" for any users that are conditional branches. 5329 // Since there can only be one branch use per BB, we don't need to keep 5330 // track of which BBs we insert into. 5331 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); 5332 UI != E; ) { 5333 Use &TheUse = *UI; 5334 // Find brcc use. 5335 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); 5336 ++UI; 5337 if (!BrccUser || !BrccUser->isConditional()) 5338 continue; 5339 BasicBlock *UserBB = BrccUser->getParent(); 5340 if (UserBB == BB) continue; 5341 DEBUG(dbgs() << "found Brcc use\n"); 5342 5343 // Sink the "and; icmp" to use. 5344 MadeChange = true; 5345 BinaryOperator *NewAnd = 5346 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", 5347 BrccUser); 5348 CmpInst *NewCmp = 5349 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, 5350 "", BrccUser); 5351 TheUse = NewCmp; 5352 ++NumAndCmpsMoved; 5353 DEBUG(BrccUser->getParent()->dump()); 5354 } 5355 } 5356 return MadeChange; 5357 } 5358 5359 /// \brief Retrieve the probabilities of a conditional branch. Returns true on 5360 /// success, or returns false if no or invalid metadata was found. 5361 static bool extractBranchMetadata(BranchInst *BI, 5362 uint64_t &ProbTrue, uint64_t &ProbFalse) { 5363 assert(BI->isConditional() && 5364 "Looking for probabilities on unconditional branch?"); 5365 auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof); 5366 if (!ProfileData || ProfileData->getNumOperands() != 3) 5367 return false; 5368 5369 const auto *CITrue = 5370 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)); 5371 const auto *CIFalse = 5372 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2)); 5373 if (!CITrue || !CIFalse) 5374 return false; 5375 5376 ProbTrue = CITrue->getValue().getZExtValue(); 5377 ProbFalse = CIFalse->getValue().getZExtValue(); 5378 5379 return true; 5380 } 5381 5382 /// \brief Scale down both weights to fit into uint32_t. 5383 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 5384 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 5385 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 5386 NewTrue = NewTrue / Scale; 5387 NewFalse = NewFalse / Scale; 5388 } 5389 5390 /// \brief Some targets prefer to split a conditional branch like: 5391 /// \code 5392 /// %0 = icmp ne i32 %a, 0 5393 /// %1 = icmp ne i32 %b, 0 5394 /// %or.cond = or i1 %0, %1 5395 /// br i1 %or.cond, label %TrueBB, label %FalseBB 5396 /// \endcode 5397 /// into multiple branch instructions like: 5398 /// \code 5399 /// bb1: 5400 /// %0 = icmp ne i32 %a, 0 5401 /// br i1 %0, label %TrueBB, label %bb2 5402 /// bb2: 5403 /// %1 = icmp ne i32 %b, 0 5404 /// br i1 %1, label %TrueBB, label %FalseBB 5405 /// \endcode 5406 /// This usually allows instruction selection to do even further optimizations 5407 /// and combine the compare with the branch instruction. Currently this is 5408 /// applied for targets which have "cheap" jump instructions. 5409 /// 5410 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. 5411 /// 5412 bool CodeGenPrepare::splitBranchCondition(Function &F) { 5413 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) 5414 return false; 5415 5416 bool MadeChange = false; 5417 for (auto &BB : F) { 5418 // Does this BB end with the following? 5419 // %cond1 = icmp|fcmp|binary instruction ... 5420 // %cond2 = icmp|fcmp|binary instruction ... 5421 // %cond.or = or|and i1 %cond1, cond2 5422 // br i1 %cond.or label %dest1, label %dest2" 5423 BinaryOperator *LogicOp; 5424 BasicBlock *TBB, *FBB; 5425 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) 5426 continue; 5427 5428 auto *Br1 = cast<BranchInst>(BB.getTerminator()); 5429 if (Br1->getMetadata(LLVMContext::MD_unpredictable)) 5430 continue; 5431 5432 unsigned Opc; 5433 Value *Cond1, *Cond2; 5434 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), 5435 m_OneUse(m_Value(Cond2))))) 5436 Opc = Instruction::And; 5437 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), 5438 m_OneUse(m_Value(Cond2))))) 5439 Opc = Instruction::Or; 5440 else 5441 continue; 5442 5443 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || 5444 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) 5445 continue; 5446 5447 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump()); 5448 5449 // Create a new BB. 5450 auto TmpBB = 5451 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", 5452 BB.getParent(), BB.getNextNode()); 5453 5454 // Update original basic block by using the first condition directly by the 5455 // branch instruction and removing the no longer needed and/or instruction. 5456 Br1->setCondition(Cond1); 5457 LogicOp->eraseFromParent(); 5458 5459 // Depending on the conditon we have to either replace the true or the false 5460 // successor of the original branch instruction. 5461 if (Opc == Instruction::And) 5462 Br1->setSuccessor(0, TmpBB); 5463 else 5464 Br1->setSuccessor(1, TmpBB); 5465 5466 // Fill in the new basic block. 5467 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); 5468 if (auto *I = dyn_cast<Instruction>(Cond2)) { 5469 I->removeFromParent(); 5470 I->insertBefore(Br2); 5471 } 5472 5473 // Update PHI nodes in both successors. The original BB needs to be 5474 // replaced in one succesor's PHI nodes, because the branch comes now from 5475 // the newly generated BB (NewBB). In the other successor we need to add one 5476 // incoming edge to the PHI nodes, because both branch instructions target 5477 // now the same successor. Depending on the original branch condition 5478 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that 5479 // we perfrom the correct update for the PHI nodes. 5480 // This doesn't change the successor order of the just created branch 5481 // instruction (or any other instruction). 5482 if (Opc == Instruction::Or) 5483 std::swap(TBB, FBB); 5484 5485 // Replace the old BB with the new BB. 5486 for (auto &I : *TBB) { 5487 PHINode *PN = dyn_cast<PHINode>(&I); 5488 if (!PN) 5489 break; 5490 int i; 5491 while ((i = PN->getBasicBlockIndex(&BB)) >= 0) 5492 PN->setIncomingBlock(i, TmpBB); 5493 } 5494 5495 // Add another incoming edge form the new BB. 5496 for (auto &I : *FBB) { 5497 PHINode *PN = dyn_cast<PHINode>(&I); 5498 if (!PN) 5499 break; 5500 auto *Val = PN->getIncomingValueForBlock(&BB); 5501 PN->addIncoming(Val, TmpBB); 5502 } 5503 5504 // Update the branch weights (from SelectionDAGBuilder:: 5505 // FindMergedConditions). 5506 if (Opc == Instruction::Or) { 5507 // Codegen X | Y as: 5508 // BB1: 5509 // jmp_if_X TBB 5510 // jmp TmpBB 5511 // TmpBB: 5512 // jmp_if_Y TBB 5513 // jmp FBB 5514 // 5515 5516 // We have flexibility in setting Prob for BB1 and Prob for NewBB. 5517 // The requirement is that 5518 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 5519 // = TrueProb for orignal BB. 5520 // Assuming the orignal weights are A and B, one choice is to set BB1's 5521 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 5522 // assumes that 5523 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 5524 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 5525 // TmpBB, but the math is more complicated. 5526 uint64_t TrueWeight, FalseWeight; 5527 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 5528 uint64_t NewTrueWeight = TrueWeight; 5529 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; 5530 scaleWeights(NewTrueWeight, NewFalseWeight); 5531 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 5532 .createBranchWeights(TrueWeight, FalseWeight)); 5533 5534 NewTrueWeight = TrueWeight; 5535 NewFalseWeight = 2 * FalseWeight; 5536 scaleWeights(NewTrueWeight, NewFalseWeight); 5537 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 5538 .createBranchWeights(TrueWeight, FalseWeight)); 5539 } 5540 } else { 5541 // Codegen X & Y as: 5542 // BB1: 5543 // jmp_if_X TmpBB 5544 // jmp FBB 5545 // TmpBB: 5546 // jmp_if_Y TBB 5547 // jmp FBB 5548 // 5549 // This requires creation of TmpBB after CurBB. 5550 5551 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 5552 // The requirement is that 5553 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 5554 // = FalseProb for orignal BB. 5555 // Assuming the orignal weights are A and B, one choice is to set BB1's 5556 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 5557 // assumes that 5558 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 5559 uint64_t TrueWeight, FalseWeight; 5560 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { 5561 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; 5562 uint64_t NewFalseWeight = FalseWeight; 5563 scaleWeights(NewTrueWeight, NewFalseWeight); 5564 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) 5565 .createBranchWeights(TrueWeight, FalseWeight)); 5566 5567 NewTrueWeight = 2 * TrueWeight; 5568 NewFalseWeight = FalseWeight; 5569 scaleWeights(NewTrueWeight, NewFalseWeight); 5570 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) 5571 .createBranchWeights(TrueWeight, FalseWeight)); 5572 } 5573 } 5574 5575 // Note: No point in getting fancy here, since the DT info is never 5576 // available to CodeGenPrepare. 5577 ModifiedDT = true; 5578 5579 MadeChange = true; 5580 5581 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump(); 5582 TmpBB->dump()); 5583 } 5584 return MadeChange; 5585 } 5586 5587 void CodeGenPrepare::stripInvariantGroupMetadata(Instruction &I) { 5588 if (auto *InvariantMD = I.getMetadata(LLVMContext::MD_invariant_group)) 5589 I.dropUnknownNonDebugMetadata(InvariantMD->getMetadataID()); 5590 } 5591