1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Small functions that help with Scop and LLVM-IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/Support/ScopHelper.h" 15 #include "polly/Options.h" 16 #include "polly/ScopInfo.h" 17 #include "polly/Support/SCEVValidator.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/RegionInfo.h" 20 #include "llvm/Analysis/ScalarEvolution.h" 21 #include "llvm/Analysis/ScalarEvolutionExpander.h" 22 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 27 28 using namespace llvm; 29 using namespace polly; 30 31 #define DEBUG_TYPE "polly-scop-helper" 32 33 bool polly::hasInvokeEdge(const PHINode *PN) { 34 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 35 if (InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i))) 36 if (II->getParent() == PN->getIncomingBlock(i)) 37 return true; 38 39 return false; 40 } 41 42 // Ensures that there is just one predecessor to the entry node from outside the 43 // region. 44 // The identity of the region entry node is preserved. 45 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI, 46 RegionInfo *RI) { 47 BasicBlock *EnteringBB = R->getEnteringBlock(); 48 BasicBlock *Entry = R->getEntry(); 49 50 // Before (one of): 51 // 52 // \ / // 53 // EnteringBB // 54 // | \------> // 55 // \ / | // 56 // Entry <--\ Entry <--\ // 57 // / \ / / \ / // 58 // .... .... // 59 60 // Create single entry edge if the region has multiple entry edges. 61 if (!EnteringBB) { 62 SmallVector<BasicBlock *, 4> Preds; 63 for (BasicBlock *P : predecessors(Entry)) 64 if (!R->contains(P)) 65 Preds.push_back(P); 66 67 BasicBlock *NewEntering = 68 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI); 69 70 if (RI) { 71 // The exit block of predecessing regions must be changed to NewEntering 72 for (BasicBlock *ExitPred : predecessors(NewEntering)) { 73 Region *RegionOfPred = RI->getRegionFor(ExitPred); 74 if (RegionOfPred->getExit() != Entry) 75 continue; 76 77 while (!RegionOfPred->isTopLevelRegion() && 78 RegionOfPred->getExit() == Entry) { 79 RegionOfPred->replaceExit(NewEntering); 80 RegionOfPred = RegionOfPred->getParent(); 81 } 82 } 83 84 // Make all ancestors use EnteringBB as entry; there might be edges to it 85 Region *AncestorR = R->getParent(); 86 RI->setRegionFor(NewEntering, AncestorR); 87 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) { 88 AncestorR->replaceEntry(NewEntering); 89 AncestorR = AncestorR->getParent(); 90 } 91 } 92 93 EnteringBB = NewEntering; 94 } 95 assert(R->getEnteringBlock() == EnteringBB); 96 97 // After: 98 // 99 // \ / // 100 // EnteringBB // 101 // | // 102 // | // 103 // Entry <--\ // 104 // / \ / // 105 // .... // 106 } 107 108 // Ensure that the region has a single block that branches to the exit node. 109 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI, 110 RegionInfo *RI) { 111 BasicBlock *ExitBB = R->getExit(); 112 BasicBlock *ExitingBB = R->getExitingBlock(); 113 114 // Before: 115 // 116 // (Region) ______/ // 117 // \ | / // 118 // ExitBB // 119 // / \ // 120 121 if (!ExitingBB) { 122 SmallVector<BasicBlock *, 4> Preds; 123 for (BasicBlock *P : predecessors(ExitBB)) 124 if (R->contains(P)) 125 Preds.push_back(P); 126 127 // Preds[0] Preds[1] otherBB // 128 // \ | ________/ // 129 // \ | / // 130 // BB // 131 ExitingBB = 132 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI); 133 // Preds[0] Preds[1] otherBB // 134 // \ / / // 135 // BB.region_exiting / // 136 // \ / // 137 // BB // 138 139 if (RI) 140 RI->setRegionFor(ExitingBB, R); 141 142 // Change the exit of nested regions, but not the region itself, 143 R->replaceExitRecursive(ExitingBB); 144 R->replaceExit(ExitBB); 145 } 146 assert(ExitingBB == R->getExitingBlock()); 147 148 // After: 149 // 150 // \ / // 151 // ExitingBB _____/ // 152 // \ / // 153 // ExitBB // 154 // / \ // 155 } 156 157 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI, 158 RegionInfo *RI) { 159 assert(R && !R->isTopLevelRegion()); 160 assert(!RI || RI == R->getRegionInfo()); 161 assert((!RI || DT) && 162 "RegionInfo requires DominatorTree to be updated as well"); 163 164 simplifyRegionEntry(R, DT, LI, RI); 165 simplifyRegionExit(R, DT, LI, RI); 166 assert(R->isSimple()); 167 } 168 169 // Split the block into two successive blocks. 170 // 171 // Like llvm::SplitBlock, but also preserves RegionInfo 172 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt, 173 DominatorTree *DT, llvm::LoopInfo *LI, 174 RegionInfo *RI) { 175 assert(Old && SplitPt); 176 177 // Before: 178 // 179 // \ / // 180 // Old // 181 // / \ // 182 183 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI); 184 185 if (RI) { 186 Region *R = RI->getRegionFor(Old); 187 RI->setRegionFor(NewBlock, R); 188 } 189 190 // After: 191 // 192 // \ / // 193 // Old // 194 // | // 195 // NewBlock // 196 // / \ // 197 198 return NewBlock; 199 } 200 201 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) { 202 // Find first non-alloca instruction. Every basic block has a non-alloc 203 // instruction, as every well formed basic block has a terminator. 204 BasicBlock::iterator I = EntryBlock->begin(); 205 while (isa<AllocaInst>(I)) 206 ++I; 207 208 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 209 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 210 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>(); 211 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 212 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>(); 213 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr; 214 215 // splitBlock updates DT, LI and RI. 216 splitBlock(EntryBlock, &*I, DT, LI, RI); 217 } 218 219 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem 220 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want 221 /// however to generate new code if the instruction is in the analyzed region 222 /// and we generate code outside/in front of that region. Hence, we generate the 223 /// code for the SDiv/SRem operands in front of the analyzed region and then 224 /// create a new SDiv/SRem operation there too. 225 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> { 226 friend struct SCEVVisitor<ScopExpander, const SCEV *>; 227 228 explicit ScopExpander(const Region &R, ScalarEvolution &SE, 229 const DataLayout &DL, const char *Name, ValueMapT *VMap) 230 : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R), 231 VMap(VMap) {} 232 233 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) { 234 // If we generate code in the region we will immediately fall back to the 235 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if 236 // needed replace them by copies computed in the entering block. 237 if (!R.contains(I)) 238 E = visit(E); 239 return Expander.expandCodeFor(E, Ty, I); 240 } 241 242 private: 243 SCEVExpander Expander; 244 ScalarEvolution &SE; 245 const char *Name; 246 const Region &R; 247 ValueMapT *VMap; 248 249 const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst, 250 Instruction *IP) { 251 if (!Inst || !R.contains(Inst)) 252 return E; 253 254 assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() && 255 !isa<PHINode>(Inst)); 256 257 auto *InstClone = Inst->clone(); 258 for (auto &Op : Inst->operands()) { 259 assert(SE.isSCEVable(Op->getType())); 260 auto *OpSCEV = SE.getSCEV(Op); 261 auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP); 262 InstClone->replaceUsesOfWith(Op, OpClone); 263 } 264 265 InstClone->setName(Name + Inst->getName()); 266 InstClone->insertBefore(IP); 267 return SE.getSCEV(InstClone); 268 } 269 270 const SCEV *visitUnknown(const SCEVUnknown *E) { 271 272 // If a value mapping was given try if the underlying value is remapped. 273 Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr; 274 if (NewVal) { 275 auto *NewE = SE.getSCEV(NewVal); 276 277 // While the mapped value might be different the SCEV representation might 278 // not be. To this end we will check before we go into recursion here. 279 if (E != NewE) 280 return visit(NewE); 281 } 282 283 auto *EnteringBB = R.getEnteringBlock(); 284 Instruction *Inst = dyn_cast<Instruction>(E->getValue()); 285 Instruction *IP; 286 if (Inst && !R.contains(Inst)) 287 IP = Inst; 288 else if (Inst && EnteringBB->getParent() == Inst->getFunction()) 289 IP = EnteringBB->getTerminator(); 290 else 291 IP = EnteringBB->getParent()->getEntryBlock().getTerminator(); 292 293 if (!Inst || (Inst->getOpcode() != Instruction::SRem && 294 Inst->getOpcode() != Instruction::SDiv)) 295 return visitGenericInst(E, Inst, IP); 296 297 const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0)); 298 const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1)); 299 300 if (!SE.isKnownNonZero(RHSScev)) 301 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 302 303 Value *LHS = expandCodeFor(LHSScev, E->getType(), IP); 304 Value *RHS = expandCodeFor(RHSScev, E->getType(), IP); 305 306 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(), 307 LHS, RHS, Inst->getName() + Name, IP); 308 return SE.getSCEV(Inst); 309 } 310 311 /// The following functions will just traverse the SCEV and rebuild it with 312 /// the new operands returned by the traversal. 313 /// 314 ///{ 315 const SCEV *visitConstant(const SCEVConstant *E) { return E; } 316 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) { 317 return SE.getTruncateExpr(visit(E->getOperand()), E->getType()); 318 } 319 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) { 320 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType()); 321 } 322 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) { 323 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType()); 324 } 325 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) { 326 auto *RHSScev = visit(E->getRHS()); 327 if (!SE.isKnownNonZero(RHSScev)) 328 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 329 return SE.getUDivExpr(visit(E->getLHS()), RHSScev); 330 } 331 const SCEV *visitAddExpr(const SCEVAddExpr *E) { 332 SmallVector<const SCEV *, 4> NewOps; 333 for (const SCEV *Op : E->operands()) 334 NewOps.push_back(visit(Op)); 335 return SE.getAddExpr(NewOps); 336 } 337 const SCEV *visitMulExpr(const SCEVMulExpr *E) { 338 SmallVector<const SCEV *, 4> NewOps; 339 for (const SCEV *Op : E->operands()) 340 NewOps.push_back(visit(Op)); 341 return SE.getMulExpr(NewOps); 342 } 343 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) { 344 SmallVector<const SCEV *, 4> NewOps; 345 for (const SCEV *Op : E->operands()) 346 NewOps.push_back(visit(Op)); 347 return SE.getUMaxExpr(NewOps); 348 } 349 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) { 350 SmallVector<const SCEV *, 4> NewOps; 351 for (const SCEV *Op : E->operands()) 352 NewOps.push_back(visit(Op)); 353 return SE.getSMaxExpr(NewOps); 354 } 355 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) { 356 SmallVector<const SCEV *, 4> NewOps; 357 for (const SCEV *Op : E->operands()) 358 NewOps.push_back(visit(Op)); 359 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags()); 360 } 361 ///} 362 }; 363 364 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL, 365 const char *Name, const SCEV *E, Type *Ty, 366 Instruction *IP, ValueMapT *VMap) { 367 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap); 368 return Expander.expandCodeFor(E, Ty, IP); 369 } 370 371 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI, 372 const DominatorTree &DT) { 373 374 if (isa<UnreachableInst>(BB.getTerminator())) 375 return true; 376 377 if (LI.isLoopHeader(&BB)) 378 return false; 379 380 // Basic blocks that are always executed are not considered error blocks, 381 // as their execution can not be a rare event. 382 bool DominatesAllPredecessors = true; 383 for (auto Pred : predecessors(R.getExit())) 384 if (R.contains(Pred) && !DT.dominates(&BB, Pred)) 385 DominatesAllPredecessors = false; 386 387 if (DominatesAllPredecessors) 388 return false; 389 390 // FIXME: This is a simple heuristic to determine if the load is executed 391 // in a conditional. However, we actually would need the control 392 // condition, i.e., the post dominance frontier. Alternatively we 393 // could walk up the dominance tree until we find a block that is 394 // not post dominated by the load and check if it is a conditional 395 // or a loop header. 396 auto *DTNode = DT.getNode(&BB); 397 auto *IDomBB = DTNode->getIDom()->getBlock(); 398 if (LI.isLoopHeader(IDomBB)) 399 return false; 400 401 for (Instruction &Inst : BB) 402 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) { 403 if (isIgnoredIntrinsic(CI)) 404 return false; 405 406 if (!CI->doesNotAccessMemory()) 407 return true; 408 if (CI->doesNotReturn()) 409 return true; 410 } 411 412 return false; 413 } 414 415 Value *polly::getConditionFromTerminator(TerminatorInst *TI) { 416 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) { 417 if (BR->isUnconditional()) 418 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext())); 419 420 return BR->getCondition(); 421 } 422 423 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) 424 return SI->getCondition(); 425 426 return nullptr; 427 } 428 429 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI, 430 ScalarEvolution &SE) { 431 Loop *L = LI.getLoopFor(LInst->getParent()); 432 const SCEV *PtrSCEV = SE.getSCEVAtScope(LInst->getPointerOperand(), L); 433 while (L && R.contains(L)) { 434 if (!SE.isLoopInvariant(PtrSCEV, L)) 435 return false; 436 L = L->getParentLoop(); 437 } 438 439 return true; 440 } 441 442 bool polly::isIgnoredIntrinsic(const Value *V) { 443 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 444 switch (IT->getIntrinsicID()) { 445 // Lifetime markers are supported/ignored. 446 case llvm::Intrinsic::lifetime_start: 447 case llvm::Intrinsic::lifetime_end: 448 // Invariant markers are supported/ignored. 449 case llvm::Intrinsic::invariant_start: 450 case llvm::Intrinsic::invariant_end: 451 // Some misc annotations are supported/ignored. 452 case llvm::Intrinsic::var_annotation: 453 case llvm::Intrinsic::ptr_annotation: 454 case llvm::Intrinsic::annotation: 455 case llvm::Intrinsic::donothing: 456 case llvm::Intrinsic::assume: 457 case llvm::Intrinsic::expect: 458 // Some debug info intrisics are supported/ignored. 459 case llvm::Intrinsic::dbg_value: 460 case llvm::Intrinsic::dbg_declare: 461 return true; 462 default: 463 break; 464 } 465 } 466 return false; 467 } 468 469 bool polly::canSynthesize(const Value *V, const Scop &S, 470 const llvm::LoopInfo *LI, ScalarEvolution *SE, 471 Loop *Scope) { 472 if (!V || !SE->isSCEVable(V->getType())) 473 return false; 474 475 if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope)) 476 if (!isa<SCEVCouldNotCompute>(Scev)) 477 if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false)) 478 return true; 479 480 return false; 481 } 482 483 llvm::BasicBlock *polly::getUseBlock(llvm::Use &U) { 484 Instruction *UI = dyn_cast<Instruction>(U.getUser()); 485 if (!UI) 486 return nullptr; 487 488 if (PHINode *PHI = dyn_cast<PHINode>(UI)) 489 return PHI->getIncomingBlock(U); 490 491 return UI->getParent(); 492 } 493 494 std::tuple<std::vector<const SCEV *>, std::vector<int>> 495 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) { 496 std::vector<const SCEV *> Subscripts; 497 std::vector<int> Sizes; 498 499 Type *Ty = GEP->getPointerOperandType(); 500 501 bool DroppedFirstDim = false; 502 503 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 504 505 const SCEV *Expr = SE.getSCEV(GEP->getOperand(i)); 506 507 if (i == 1) { 508 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 509 Ty = PtrTy->getElementType(); 510 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 511 Ty = ArrayTy->getElementType(); 512 } else { 513 Subscripts.clear(); 514 Sizes.clear(); 515 break; 516 } 517 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 518 if (Const->getValue()->isZero()) { 519 DroppedFirstDim = true; 520 continue; 521 } 522 Subscripts.push_back(Expr); 523 continue; 524 } 525 526 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 527 if (!ArrayTy) { 528 Subscripts.clear(); 529 Sizes.clear(); 530 break; 531 } 532 533 Subscripts.push_back(Expr); 534 if (!(DroppedFirstDim && i == 2)) 535 Sizes.push_back(ArrayTy->getNumElements()); 536 537 Ty = ArrayTy->getElementType(); 538 } 539 540 return std::make_tuple(Subscripts, Sizes); 541 } 542