1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Small functions that help with Scop and LLVM-IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/Support/ScopHelper.h" 15 #include "polly/Options.h" 16 #include "polly/ScopInfo.h" 17 #include "polly/Support/SCEVValidator.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/RegionInfo.h" 20 #include "llvm/Analysis/ScalarEvolution.h" 21 #include "llvm/Analysis/ScalarEvolutionExpander.h" 22 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 27 28 using namespace llvm; 29 using namespace polly; 30 31 #define DEBUG_TYPE "polly-scop-helper" 32 33 Value *polly::getPointerOperand(Instruction &Inst) { 34 if (LoadInst *load = dyn_cast<LoadInst>(&Inst)) 35 return load->getPointerOperand(); 36 else if (StoreInst *store = dyn_cast<StoreInst>(&Inst)) 37 return store->getPointerOperand(); 38 else if (GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(&Inst)) 39 return gep->getPointerOperand(); 40 41 return 0; 42 } 43 44 bool polly::hasInvokeEdge(const PHINode *PN) { 45 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 46 if (InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i))) 47 if (II->getParent() == PN->getIncomingBlock(i)) 48 return true; 49 50 return false; 51 } 52 53 // Ensures that there is just one predecessor to the entry node from outside the 54 // region. 55 // The identity of the region entry node is preserved. 56 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI, 57 RegionInfo *RI) { 58 BasicBlock *EnteringBB = R->getEnteringBlock(); 59 BasicBlock *Entry = R->getEntry(); 60 61 // Before (one of): 62 // 63 // \ / // 64 // EnteringBB // 65 // | \------> // 66 // \ / | // 67 // Entry <--\ Entry <--\ // 68 // / \ / / \ / // 69 // .... .... // 70 71 // Create single entry edge if the region has multiple entry edges. 72 if (!EnteringBB) { 73 SmallVector<BasicBlock *, 4> Preds; 74 for (BasicBlock *P : predecessors(Entry)) 75 if (!R->contains(P)) 76 Preds.push_back(P); 77 78 BasicBlock *NewEntering = 79 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI); 80 81 if (RI) { 82 // The exit block of predecessing regions must be changed to NewEntering 83 for (BasicBlock *ExitPred : predecessors(NewEntering)) { 84 Region *RegionOfPred = RI->getRegionFor(ExitPred); 85 if (RegionOfPred->getExit() != Entry) 86 continue; 87 88 while (!RegionOfPred->isTopLevelRegion() && 89 RegionOfPred->getExit() == Entry) { 90 RegionOfPred->replaceExit(NewEntering); 91 RegionOfPred = RegionOfPred->getParent(); 92 } 93 } 94 95 // Make all ancestors use EnteringBB as entry; there might be edges to it 96 Region *AncestorR = R->getParent(); 97 RI->setRegionFor(NewEntering, AncestorR); 98 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) { 99 AncestorR->replaceEntry(NewEntering); 100 AncestorR = AncestorR->getParent(); 101 } 102 } 103 104 EnteringBB = NewEntering; 105 } 106 assert(R->getEnteringBlock() == EnteringBB); 107 108 // After: 109 // 110 // \ / // 111 // EnteringBB // 112 // | // 113 // | // 114 // Entry <--\ // 115 // / \ / // 116 // .... // 117 } 118 119 // Ensure that the region has a single block that branches to the exit node. 120 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI, 121 RegionInfo *RI) { 122 BasicBlock *ExitBB = R->getExit(); 123 BasicBlock *ExitingBB = R->getExitingBlock(); 124 125 // Before: 126 // 127 // (Region) ______/ // 128 // \ | / // 129 // ExitBB // 130 // / \ // 131 132 if (!ExitingBB) { 133 SmallVector<BasicBlock *, 4> Preds; 134 for (BasicBlock *P : predecessors(ExitBB)) 135 if (R->contains(P)) 136 Preds.push_back(P); 137 138 // Preds[0] Preds[1] otherBB // 139 // \ | ________/ // 140 // \ | / // 141 // BB // 142 ExitingBB = 143 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI); 144 // Preds[0] Preds[1] otherBB // 145 // \ / / // 146 // BB.region_exiting / // 147 // \ / // 148 // BB // 149 150 if (RI) 151 RI->setRegionFor(ExitingBB, R); 152 153 // Change the exit of nested regions, but not the region itself, 154 R->replaceExitRecursive(ExitingBB); 155 R->replaceExit(ExitBB); 156 } 157 assert(ExitingBB == R->getExitingBlock()); 158 159 // After: 160 // 161 // \ / // 162 // ExitingBB _____/ // 163 // \ / // 164 // ExitBB // 165 // / \ // 166 } 167 168 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI, 169 RegionInfo *RI) { 170 assert(R && !R->isTopLevelRegion()); 171 assert(!RI || RI == R->getRegionInfo()); 172 assert((!RI || DT) && 173 "RegionInfo requires DominatorTree to be updated as well"); 174 175 simplifyRegionEntry(R, DT, LI, RI); 176 simplifyRegionExit(R, DT, LI, RI); 177 assert(R->isSimple()); 178 } 179 180 // Split the block into two successive blocks. 181 // 182 // Like llvm::SplitBlock, but also preserves RegionInfo 183 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt, 184 DominatorTree *DT, llvm::LoopInfo *LI, 185 RegionInfo *RI) { 186 assert(Old && SplitPt); 187 188 // Before: 189 // 190 // \ / // 191 // Old // 192 // / \ // 193 194 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI); 195 196 if (RI) { 197 Region *R = RI->getRegionFor(Old); 198 RI->setRegionFor(NewBlock, R); 199 } 200 201 // After: 202 // 203 // \ / // 204 // Old // 205 // | // 206 // NewBlock // 207 // / \ // 208 209 return NewBlock; 210 } 211 212 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) { 213 // Find first non-alloca instruction. Every basic block has a non-alloc 214 // instruction, as every well formed basic block has a terminator. 215 BasicBlock::iterator I = EntryBlock->begin(); 216 while (isa<AllocaInst>(I)) 217 ++I; 218 219 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 220 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 221 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>(); 222 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 223 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>(); 224 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr; 225 226 // splitBlock updates DT, LI and RI. 227 splitBlock(EntryBlock, I, DT, LI, RI); 228 } 229 230 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem 231 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want 232 /// however to generate new code if the instruction is in the analyzed region 233 /// and we generate code outside/in front of that region. Hence, we generate the 234 /// code for the SDiv/SRem operands in front of the analyzed region and then 235 /// create a new SDiv/SRem operation there too. 236 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> { 237 friend struct SCEVVisitor<ScopExpander, const SCEV *>; 238 239 explicit ScopExpander(const Region &R, ScalarEvolution &SE, 240 const DataLayout &DL, const char *Name, ValueMapT *VMap) 241 : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R), 242 VMap(VMap) {} 243 244 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) { 245 // If we generate code in the region we will immediately fall back to the 246 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if 247 // needed replace them by copies computed in the entering block. 248 if (!R.contains(I)) 249 E = visit(E); 250 return Expander.expandCodeFor(E, Ty, I); 251 } 252 253 private: 254 SCEVExpander Expander; 255 ScalarEvolution &SE; 256 const char *Name; 257 const Region &R; 258 ValueMapT *VMap; 259 260 const SCEV *visitUnknown(const SCEVUnknown *E) { 261 262 // If a value mapping was given try if the underlying value is remapped. 263 if (VMap) 264 if (Value *NewVal = VMap->lookup(E->getValue())) 265 if (NewVal != E->getValue()) 266 return visit(SE.getSCEV(NewVal)); 267 268 Instruction *Inst = dyn_cast<Instruction>(E->getValue()); 269 if (!Inst || (Inst->getOpcode() != Instruction::SRem && 270 Inst->getOpcode() != Instruction::SDiv)) 271 return E; 272 273 if (!R.contains(Inst)) 274 return E; 275 276 Instruction *StartIP = R.getEnteringBlock()->getTerminator(); 277 278 const SCEV *LHSScev = visit(SE.getSCEV(Inst->getOperand(0))); 279 const SCEV *RHSScev = visit(SE.getSCEV(Inst->getOperand(1))); 280 281 Value *LHS = Expander.expandCodeFor(LHSScev, E->getType(), StartIP); 282 Value *RHS = Expander.expandCodeFor(RHSScev, E->getType(), StartIP); 283 284 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(), 285 LHS, RHS, Inst->getName() + Name, StartIP); 286 return SE.getSCEV(Inst); 287 } 288 289 /// The following functions will just traverse the SCEV and rebuild it with 290 /// the new operands returned by the traversal. 291 /// 292 ///{ 293 const SCEV *visitConstant(const SCEVConstant *E) { return E; } 294 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) { 295 return SE.getTruncateExpr(visit(E->getOperand()), E->getType()); 296 } 297 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) { 298 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType()); 299 } 300 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) { 301 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType()); 302 } 303 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) { 304 return SE.getUDivExpr(visit(E->getLHS()), visit(E->getRHS())); 305 } 306 const SCEV *visitAddExpr(const SCEVAddExpr *E) { 307 SmallVector<const SCEV *, 4> NewOps; 308 for (const SCEV *Op : E->operands()) 309 NewOps.push_back(visit(Op)); 310 return SE.getAddExpr(NewOps); 311 } 312 const SCEV *visitMulExpr(const SCEVMulExpr *E) { 313 SmallVector<const SCEV *, 4> NewOps; 314 for (const SCEV *Op : E->operands()) 315 NewOps.push_back(visit(Op)); 316 return SE.getMulExpr(NewOps); 317 } 318 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) { 319 SmallVector<const SCEV *, 4> NewOps; 320 for (const SCEV *Op : E->operands()) 321 NewOps.push_back(visit(Op)); 322 return SE.getUMaxExpr(NewOps); 323 } 324 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) { 325 SmallVector<const SCEV *, 4> NewOps; 326 for (const SCEV *Op : E->operands()) 327 NewOps.push_back(visit(Op)); 328 return SE.getSMaxExpr(NewOps); 329 } 330 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) { 331 SmallVector<const SCEV *, 4> NewOps; 332 for (const SCEV *Op : E->operands()) 333 NewOps.push_back(visit(Op)); 334 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags()); 335 } 336 ///} 337 }; 338 339 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL, 340 const char *Name, const SCEV *E, Type *Ty, 341 Instruction *IP, ValueMapT *VMap) { 342 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap); 343 return Expander.expandCodeFor(E, Ty, IP); 344 } 345 346 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI, 347 const DominatorTree &DT) { 348 349 if (isa<UnreachableInst>(BB.getTerminator())) 350 return true; 351 352 if (LI.isLoopHeader(&BB)) 353 return false; 354 355 if (DT.dominates(&BB, R.getExit())) 356 return false; 357 358 // FIXME: This is a simple heuristic to determine if the load is executed 359 // in a conditional. However, we actually would need the control 360 // condition, i.e., the post dominance frontier. Alternatively we 361 // could walk up the dominance tree until we find a block that is 362 // not post dominated by the load and check if it is a conditional 363 // or a loop header. 364 auto *DTNode = DT.getNode(&BB); 365 auto *IDomBB = DTNode->getIDom()->getBlock(); 366 if (LI.isLoopHeader(IDomBB)) 367 return false; 368 369 for (Instruction &Inst : BB) 370 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) { 371 if (!CI->doesNotAccessMemory()) 372 return true; 373 if (CI->doesNotReturn()) 374 return true; 375 } 376 377 return false; 378 } 379 380 Value *polly::getConditionFromTerminator(TerminatorInst *TI) { 381 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) { 382 if (BR->isUnconditional()) 383 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext())); 384 385 return BR->getCondition(); 386 } 387 388 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) 389 return SI->getCondition(); 390 391 return nullptr; 392 } 393 394 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI, 395 ScalarEvolution &SE) { 396 Loop *L = LI.getLoopFor(LInst->getParent()); 397 const SCEV *PtrSCEV = SE.getSCEVAtScope(LInst->getPointerOperand(), L); 398 while (L && R.contains(L)) { 399 if (!SE.isLoopInvariant(PtrSCEV, L)) 400 return false; 401 L = L->getParentLoop(); 402 } 403 404 return true; 405 } 406 407 bool polly::isIgnoredIntrinsic(const Value *V) { 408 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 409 switch (IT->getIntrinsicID()) { 410 // Lifetime markers are supported/ignored. 411 case llvm::Intrinsic::lifetime_start: 412 case llvm::Intrinsic::lifetime_end: 413 // Invariant markers are supported/ignored. 414 case llvm::Intrinsic::invariant_start: 415 case llvm::Intrinsic::invariant_end: 416 // Some misc annotations are supported/ignored. 417 case llvm::Intrinsic::var_annotation: 418 case llvm::Intrinsic::ptr_annotation: 419 case llvm::Intrinsic::annotation: 420 case llvm::Intrinsic::donothing: 421 case llvm::Intrinsic::assume: 422 case llvm::Intrinsic::expect: 423 // Some debug info intrisics are supported/ignored. 424 case llvm::Intrinsic::dbg_value: 425 case llvm::Intrinsic::dbg_declare: 426 return true; 427 default: 428 break; 429 } 430 } 431 return false; 432 } 433 434 bool polly::canSynthesize(const Value *V, const llvm::LoopInfo *LI, 435 ScalarEvolution *SE, const Region *R) { 436 if (!V || !SE->isSCEVable(V->getType())) 437 return false; 438 439 if (const SCEV *Scev = SE->getSCEV(const_cast<Value *>(V))) 440 if (!isa<SCEVCouldNotCompute>(Scev)) 441 if (!hasScalarDepsInsideRegion(Scev, R)) 442 return true; 443 444 return false; 445 } 446