1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Small functions that help with Scop and LLVM-IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/Support/ScopHelper.h" 15 #include "polly/Options.h" 16 #include "polly/ScopInfo.h" 17 #include "polly/Support/SCEVValidator.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/RegionInfo.h" 20 #include "llvm/Analysis/ScalarEvolution.h" 21 #include "llvm/Analysis/ScalarEvolutionExpander.h" 22 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 27 28 using namespace llvm; 29 using namespace polly; 30 31 #define DEBUG_TYPE "polly-scop-helper" 32 33 static cl::opt<bool> PollyAllowErrorBlocks( 34 "polly-allow-error-blocks", 35 cl::desc("Allow to speculate on the execution of 'error blocks'."), 36 cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); 37 38 static cl::list<std::string> DebugFunctions( 39 "polly-debug-func", 40 cl::desc("Allow calls to the specified functions in SCoPs even if their " 41 "side-effects are unknown. This can be used to do debug output in " 42 "Polly-transformed code."), 43 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory)); 44 45 // Ensures that there is just one predecessor to the entry node from outside the 46 // region. 47 // The identity of the region entry node is preserved. 48 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI, 49 RegionInfo *RI) { 50 BasicBlock *EnteringBB = R->getEnteringBlock(); 51 BasicBlock *Entry = R->getEntry(); 52 53 // Before (one of): 54 // 55 // \ / // 56 // EnteringBB // 57 // | \------> // 58 // \ / | // 59 // Entry <--\ Entry <--\ // 60 // / \ / / \ / // 61 // .... .... // 62 63 // Create single entry edge if the region has multiple entry edges. 64 if (!EnteringBB) { 65 SmallVector<BasicBlock *, 4> Preds; 66 for (BasicBlock *P : predecessors(Entry)) 67 if (!R->contains(P)) 68 Preds.push_back(P); 69 70 BasicBlock *NewEntering = 71 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI); 72 73 if (RI) { 74 // The exit block of predecessing regions must be changed to NewEntering 75 for (BasicBlock *ExitPred : predecessors(NewEntering)) { 76 Region *RegionOfPred = RI->getRegionFor(ExitPred); 77 if (RegionOfPred->getExit() != Entry) 78 continue; 79 80 while (!RegionOfPred->isTopLevelRegion() && 81 RegionOfPred->getExit() == Entry) { 82 RegionOfPred->replaceExit(NewEntering); 83 RegionOfPred = RegionOfPred->getParent(); 84 } 85 } 86 87 // Make all ancestors use EnteringBB as entry; there might be edges to it 88 Region *AncestorR = R->getParent(); 89 RI->setRegionFor(NewEntering, AncestorR); 90 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) { 91 AncestorR->replaceEntry(NewEntering); 92 AncestorR = AncestorR->getParent(); 93 } 94 } 95 96 EnteringBB = NewEntering; 97 } 98 assert(R->getEnteringBlock() == EnteringBB); 99 100 // After: 101 // 102 // \ / // 103 // EnteringBB // 104 // | // 105 // | // 106 // Entry <--\ // 107 // / \ / // 108 // .... // 109 } 110 111 // Ensure that the region has a single block that branches to the exit node. 112 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI, 113 RegionInfo *RI) { 114 BasicBlock *ExitBB = R->getExit(); 115 BasicBlock *ExitingBB = R->getExitingBlock(); 116 117 // Before: 118 // 119 // (Region) ______/ // 120 // \ | / // 121 // ExitBB // 122 // / \ // 123 124 if (!ExitingBB) { 125 SmallVector<BasicBlock *, 4> Preds; 126 for (BasicBlock *P : predecessors(ExitBB)) 127 if (R->contains(P)) 128 Preds.push_back(P); 129 130 // Preds[0] Preds[1] otherBB // 131 // \ | ________/ // 132 // \ | / // 133 // BB // 134 ExitingBB = 135 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI); 136 // Preds[0] Preds[1] otherBB // 137 // \ / / // 138 // BB.region_exiting / // 139 // \ / // 140 // BB // 141 142 if (RI) 143 RI->setRegionFor(ExitingBB, R); 144 145 // Change the exit of nested regions, but not the region itself, 146 R->replaceExitRecursive(ExitingBB); 147 R->replaceExit(ExitBB); 148 } 149 assert(ExitingBB == R->getExitingBlock()); 150 151 // After: 152 // 153 // \ / // 154 // ExitingBB _____/ // 155 // \ / // 156 // ExitBB // 157 // / \ // 158 } 159 160 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI, 161 RegionInfo *RI) { 162 assert(R && !R->isTopLevelRegion()); 163 assert(!RI || RI == R->getRegionInfo()); 164 assert((!RI || DT) && 165 "RegionInfo requires DominatorTree to be updated as well"); 166 167 simplifyRegionEntry(R, DT, LI, RI); 168 simplifyRegionExit(R, DT, LI, RI); 169 assert(R->isSimple()); 170 } 171 172 // Split the block into two successive blocks. 173 // 174 // Like llvm::SplitBlock, but also preserves RegionInfo 175 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt, 176 DominatorTree *DT, llvm::LoopInfo *LI, 177 RegionInfo *RI) { 178 assert(Old && SplitPt); 179 180 // Before: 181 // 182 // \ / // 183 // Old // 184 // / \ // 185 186 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI); 187 188 if (RI) { 189 Region *R = RI->getRegionFor(Old); 190 RI->setRegionFor(NewBlock, R); 191 } 192 193 // After: 194 // 195 // \ / // 196 // Old // 197 // | // 198 // NewBlock // 199 // / \ // 200 201 return NewBlock; 202 } 203 204 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT, 205 LoopInfo *LI, RegionInfo *RI) { 206 // Find first non-alloca instruction. Every basic block has a non-alloca 207 // instruction, as every well formed basic block has a terminator. 208 BasicBlock::iterator I = EntryBlock->begin(); 209 while (isa<AllocaInst>(I)) 210 ++I; 211 212 // splitBlock updates DT, LI and RI. 213 splitBlock(EntryBlock, &*I, DT, LI, RI); 214 } 215 216 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) { 217 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 218 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 219 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>(); 220 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 221 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>(); 222 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr; 223 224 // splitBlock updates DT, LI and RI. 225 polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI); 226 } 227 228 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem 229 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want 230 /// however to generate new code if the instruction is in the analyzed region 231 /// and we generate code outside/in front of that region. Hence, we generate the 232 /// code for the SDiv/SRem operands in front of the analyzed region and then 233 /// create a new SDiv/SRem operation there too. 234 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> { 235 friend struct SCEVVisitor<ScopExpander, const SCEV *>; 236 237 explicit ScopExpander(const Region &R, ScalarEvolution &SE, 238 const DataLayout &DL, const char *Name, ValueMapT *VMap, 239 BasicBlock *RTCBB) 240 : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R), 241 VMap(VMap), RTCBB(RTCBB) {} 242 243 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) { 244 // If we generate code in the region we will immediately fall back to the 245 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if 246 // needed replace them by copies computed in the entering block. 247 if (!R.contains(I)) 248 E = visit(E); 249 return Expander.expandCodeFor(E, Ty, I); 250 } 251 252 const SCEV *visit(const SCEV *E) { 253 // Cache the expansion results for intermediate SCEV expressions. A SCEV 254 // expression can refer to an operand multiple times (e.g. "x*x), so 255 // a naive visitor takes exponential time. 256 if (SCEVCache.count(E)) 257 return SCEVCache[E]; 258 const SCEV *Result = SCEVVisitor::visit(E); 259 SCEVCache[E] = Result; 260 return Result; 261 } 262 263 private: 264 SCEVExpander Expander; 265 ScalarEvolution &SE; 266 const char *Name; 267 const Region &R; 268 ValueMapT *VMap; 269 BasicBlock *RTCBB; 270 DenseMap<const SCEV *, const SCEV *> SCEVCache; 271 272 const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst, 273 Instruction *IP) { 274 if (!Inst || !R.contains(Inst)) 275 return E; 276 277 assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() && 278 !isa<PHINode>(Inst)); 279 280 auto *InstClone = Inst->clone(); 281 for (auto &Op : Inst->operands()) { 282 assert(SE.isSCEVable(Op->getType())); 283 auto *OpSCEV = SE.getSCEV(Op); 284 auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP); 285 InstClone->replaceUsesOfWith(Op, OpClone); 286 } 287 288 InstClone->setName(Name + Inst->getName()); 289 InstClone->insertBefore(IP); 290 return SE.getSCEV(InstClone); 291 } 292 293 const SCEV *visitUnknown(const SCEVUnknown *E) { 294 295 // If a value mapping was given try if the underlying value is remapped. 296 Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr; 297 if (NewVal) { 298 auto *NewE = SE.getSCEV(NewVal); 299 300 // While the mapped value might be different the SCEV representation might 301 // not be. To this end we will check before we go into recursion here. 302 if (E != NewE) 303 return visit(NewE); 304 } 305 306 Instruction *Inst = dyn_cast<Instruction>(E->getValue()); 307 Instruction *IP; 308 if (Inst && !R.contains(Inst)) 309 IP = Inst; 310 else if (Inst && RTCBB->getParent() == Inst->getFunction()) 311 IP = RTCBB->getTerminator(); 312 else 313 IP = RTCBB->getParent()->getEntryBlock().getTerminator(); 314 315 if (!Inst || (Inst->getOpcode() != Instruction::SRem && 316 Inst->getOpcode() != Instruction::SDiv)) 317 return visitGenericInst(E, Inst, IP); 318 319 const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0)); 320 const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1)); 321 322 if (!SE.isKnownNonZero(RHSScev)) 323 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 324 325 Value *LHS = expandCodeFor(LHSScev, E->getType(), IP); 326 Value *RHS = expandCodeFor(RHSScev, E->getType(), IP); 327 328 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(), 329 LHS, RHS, Inst->getName() + Name, IP); 330 return SE.getSCEV(Inst); 331 } 332 333 /// The following functions will just traverse the SCEV and rebuild it with 334 /// the new operands returned by the traversal. 335 /// 336 ///{ 337 const SCEV *visitConstant(const SCEVConstant *E) { return E; } 338 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) { 339 return SE.getTruncateExpr(visit(E->getOperand()), E->getType()); 340 } 341 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) { 342 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType()); 343 } 344 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) { 345 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType()); 346 } 347 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) { 348 auto *RHSScev = visit(E->getRHS()); 349 if (!SE.isKnownNonZero(RHSScev)) 350 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 351 return SE.getUDivExpr(visit(E->getLHS()), RHSScev); 352 } 353 const SCEV *visitAddExpr(const SCEVAddExpr *E) { 354 SmallVector<const SCEV *, 4> NewOps; 355 for (const SCEV *Op : E->operands()) 356 NewOps.push_back(visit(Op)); 357 return SE.getAddExpr(NewOps); 358 } 359 const SCEV *visitMulExpr(const SCEVMulExpr *E) { 360 SmallVector<const SCEV *, 4> NewOps; 361 for (const SCEV *Op : E->operands()) 362 NewOps.push_back(visit(Op)); 363 return SE.getMulExpr(NewOps); 364 } 365 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) { 366 SmallVector<const SCEV *, 4> NewOps; 367 for (const SCEV *Op : E->operands()) 368 NewOps.push_back(visit(Op)); 369 return SE.getUMaxExpr(NewOps); 370 } 371 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) { 372 SmallVector<const SCEV *, 4> NewOps; 373 for (const SCEV *Op : E->operands()) 374 NewOps.push_back(visit(Op)); 375 return SE.getSMaxExpr(NewOps); 376 } 377 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) { 378 SmallVector<const SCEV *, 4> NewOps; 379 for (const SCEV *Op : E->operands()) 380 NewOps.push_back(visit(Op)); 381 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags()); 382 } 383 ///} 384 }; 385 386 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL, 387 const char *Name, const SCEV *E, Type *Ty, 388 Instruction *IP, ValueMapT *VMap, 389 BasicBlock *RTCBB) { 390 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB); 391 return Expander.expandCodeFor(E, Ty, IP); 392 } 393 394 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI, 395 const DominatorTree &DT) { 396 if (!PollyAllowErrorBlocks) 397 return false; 398 399 if (isa<UnreachableInst>(BB.getTerminator())) 400 return true; 401 402 if (LI.isLoopHeader(&BB)) 403 return false; 404 405 // Basic blocks that are always executed are not considered error blocks, 406 // as their execution can not be a rare event. 407 bool DominatesAllPredecessors = true; 408 if (R.isTopLevelRegion()) { 409 for (BasicBlock &I : *R.getEntry()->getParent()) 410 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I)) 411 DominatesAllPredecessors = false; 412 } else { 413 for (auto Pred : predecessors(R.getExit())) 414 if (R.contains(Pred) && !DT.dominates(&BB, Pred)) 415 DominatesAllPredecessors = false; 416 } 417 418 if (DominatesAllPredecessors) 419 return false; 420 421 for (Instruction &Inst : BB) 422 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) { 423 if (isDebugCall(CI)) 424 continue; 425 426 if (isIgnoredIntrinsic(CI)) 427 continue; 428 429 // memset, memcpy and memmove are modeled intrinsics. 430 if (isa<MemSetInst>(CI) || isa<MemTransferInst>(CI)) 431 continue; 432 433 if (!CI->doesNotAccessMemory()) 434 return true; 435 if (CI->doesNotReturn()) 436 return true; 437 } 438 439 return false; 440 } 441 442 Value *polly::getConditionFromTerminator(TerminatorInst *TI) { 443 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) { 444 if (BR->isUnconditional()) 445 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext())); 446 447 return BR->getCondition(); 448 } 449 450 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) 451 return SI->getCondition(); 452 453 return nullptr; 454 } 455 456 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI, 457 ScalarEvolution &SE, const DominatorTree &DT) { 458 Loop *L = LI.getLoopFor(LInst->getParent()); 459 auto *Ptr = LInst->getPointerOperand(); 460 const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L); 461 while (L && R.contains(L)) { 462 if (!SE.isLoopInvariant(PtrSCEV, L)) 463 return false; 464 L = L->getParentLoop(); 465 } 466 467 for (auto *User : Ptr->users()) { 468 auto *UserI = dyn_cast<Instruction>(User); 469 if (!UserI || !R.contains(UserI)) 470 continue; 471 if (!UserI->mayWriteToMemory()) 472 continue; 473 474 auto &BB = *UserI->getParent(); 475 if (DT.dominates(&BB, LInst->getParent())) 476 return false; 477 478 bool DominatesAllPredecessors = true; 479 if (R.isTopLevelRegion()) { 480 for (BasicBlock &I : *R.getEntry()->getParent()) 481 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I)) 482 DominatesAllPredecessors = false; 483 } else { 484 for (auto Pred : predecessors(R.getExit())) 485 if (R.contains(Pred) && !DT.dominates(&BB, Pred)) 486 DominatesAllPredecessors = false; 487 } 488 489 if (!DominatesAllPredecessors) 490 continue; 491 492 return false; 493 } 494 495 return true; 496 } 497 498 bool polly::isIgnoredIntrinsic(const Value *V) { 499 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 500 switch (IT->getIntrinsicID()) { 501 // Lifetime markers are supported/ignored. 502 case llvm::Intrinsic::lifetime_start: 503 case llvm::Intrinsic::lifetime_end: 504 // Invariant markers are supported/ignored. 505 case llvm::Intrinsic::invariant_start: 506 case llvm::Intrinsic::invariant_end: 507 // Some misc annotations are supported/ignored. 508 case llvm::Intrinsic::var_annotation: 509 case llvm::Intrinsic::ptr_annotation: 510 case llvm::Intrinsic::annotation: 511 case llvm::Intrinsic::donothing: 512 case llvm::Intrinsic::assume: 513 // Some debug info intrinsics are supported/ignored. 514 case llvm::Intrinsic::dbg_value: 515 case llvm::Intrinsic::dbg_declare: 516 return true; 517 default: 518 break; 519 } 520 } 521 return false; 522 } 523 524 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE, 525 Loop *Scope) { 526 if (!V || !SE->isSCEVable(V->getType())) 527 return false; 528 529 const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads(); 530 if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope)) 531 if (!isa<SCEVCouldNotCompute>(Scev)) 532 if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS)) 533 return true; 534 535 return false; 536 } 537 538 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) { 539 Instruction *UI = dyn_cast<Instruction>(U.getUser()); 540 if (!UI) 541 return nullptr; 542 543 if (PHINode *PHI = dyn_cast<PHINode>(UI)) 544 return PHI->getIncomingBlock(U); 545 546 return UI->getParent(); 547 } 548 549 std::tuple<std::vector<const SCEV *>, std::vector<int>> 550 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) { 551 std::vector<const SCEV *> Subscripts; 552 std::vector<int> Sizes; 553 554 Type *Ty = GEP->getPointerOperandType(); 555 556 bool DroppedFirstDim = false; 557 558 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 559 560 const SCEV *Expr = SE.getSCEV(GEP->getOperand(i)); 561 562 if (i == 1) { 563 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 564 Ty = PtrTy->getElementType(); 565 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 566 Ty = ArrayTy->getElementType(); 567 } else { 568 Subscripts.clear(); 569 Sizes.clear(); 570 break; 571 } 572 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 573 if (Const->getValue()->isZero()) { 574 DroppedFirstDim = true; 575 continue; 576 } 577 Subscripts.push_back(Expr); 578 continue; 579 } 580 581 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 582 if (!ArrayTy) { 583 Subscripts.clear(); 584 Sizes.clear(); 585 break; 586 } 587 588 Subscripts.push_back(Expr); 589 if (!(DroppedFirstDim && i == 2)) 590 Sizes.push_back(ArrayTy->getNumElements()); 591 592 Ty = ArrayTy->getElementType(); 593 } 594 595 return std::make_tuple(Subscripts, Sizes); 596 } 597 598 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI, 599 const BoxedLoopsSetTy &BoxedLoops) { 600 while (BoxedLoops.count(L)) 601 L = L->getParentLoop(); 602 return L; 603 } 604 605 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, 606 llvm::LoopInfo &LI, 607 const BoxedLoopsSetTy &BoxedLoops) { 608 Loop *L = LI.getLoopFor(BB); 609 return getFirstNonBoxedLoopFor(L, LI, BoxedLoops); 610 } 611 612 bool polly::isDebugCall(Instruction *Inst) { 613 auto *CI = dyn_cast<CallInst>(Inst); 614 if (!CI) 615 return false; 616 617 Function *CF = CI->getCalledFunction(); 618 if (!CF) 619 return false; 620 621 return std::find(DebugFunctions.begin(), DebugFunctions.end(), 622 CF->getName()) != DebugFunctions.end(); 623 } 624 625 static bool hasDebugCall(BasicBlock *BB) { 626 for (Instruction &Inst : *BB) { 627 if (isDebugCall(&Inst)) 628 return true; 629 } 630 return false; 631 } 632 633 bool polly::hasDebugCall(ScopStmt *Stmt) { 634 // Quick skip if no debug functions have been defined. 635 if (DebugFunctions.empty()) 636 return false; 637 638 if (!Stmt) 639 return false; 640 641 for (Instruction *Inst : Stmt->getInstructions()) 642 if (isDebugCall(Inst)) 643 return true; 644 645 if (Stmt->isRegionStmt()) { 646 for (BasicBlock *RBB : Stmt->getRegion()->blocks()) 647 if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB)) 648 return true; 649 } 650 651 return false; 652 } 653