1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Small functions that help with Scop and LLVM-IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/Support/ScopHelper.h" 15 #include "polly/Options.h" 16 #include "polly/ScopInfo.h" 17 #include "polly/Support/SCEVValidator.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/RegionInfo.h" 20 #include "llvm/Analysis/RegionInfoImpl.h" 21 #include "llvm/Analysis/ScalarEvolution.h" 22 #include "llvm/Analysis/ScalarEvolutionExpander.h" 23 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 28 29 using namespace llvm; 30 using namespace polly; 31 32 #define DEBUG_TYPE "polly-scop-helper" 33 34 static cl::opt<bool> PollyAllowErrorBlocks( 35 "polly-allow-error-blocks", 36 cl::desc("Allow to speculate on the execution of 'error blocks'."), 37 cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); 38 39 static cl::list<std::string> DebugFunctions( 40 "polly-debug-func", 41 cl::desc("Allow calls to the specified functions in SCoPs even if their " 42 "side-effects are unknown. This can be used to do debug output in " 43 "Polly-transformed code."), 44 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory)); 45 46 // Ensures that there is just one predecessor to the entry node from outside the 47 // region. 48 // The identity of the region entry node is preserved. 49 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI, 50 RegionInfo *RI) { 51 BasicBlock *EnteringBB = R->getEnteringBlock(); 52 BasicBlock *Entry = R->getEntry(); 53 54 // Before (one of): 55 // 56 // \ / // 57 // EnteringBB // 58 // | \------> // 59 // \ / | // 60 // Entry <--\ Entry <--\ // 61 // / \ / / \ / // 62 // .... .... // 63 64 // Create single entry edge if the region has multiple entry edges. 65 if (!EnteringBB) { 66 SmallVector<BasicBlock *, 4> Preds; 67 for (BasicBlock *P : predecessors(Entry)) 68 if (!R->contains(P)) 69 Preds.push_back(P); 70 71 BasicBlock *NewEntering = 72 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI); 73 74 if (RI) { 75 // The exit block of predecessing regions must be changed to NewEntering 76 for (BasicBlock *ExitPred : predecessors(NewEntering)) { 77 Region *RegionOfPred = RI->getRegionFor(ExitPred); 78 if (RegionOfPred->getExit() != Entry) 79 continue; 80 81 while (!RegionOfPred->isTopLevelRegion() && 82 RegionOfPred->getExit() == Entry) { 83 RegionOfPred->replaceExit(NewEntering); 84 RegionOfPred = RegionOfPred->getParent(); 85 } 86 } 87 88 // Make all ancestors use EnteringBB as entry; there might be edges to it 89 Region *AncestorR = R->getParent(); 90 RI->setRegionFor(NewEntering, AncestorR); 91 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) { 92 AncestorR->replaceEntry(NewEntering); 93 AncestorR = AncestorR->getParent(); 94 } 95 } 96 97 EnteringBB = NewEntering; 98 } 99 assert(R->getEnteringBlock() == EnteringBB); 100 101 // After: 102 // 103 // \ / // 104 // EnteringBB // 105 // | // 106 // | // 107 // Entry <--\ // 108 // / \ / // 109 // .... // 110 } 111 112 // Ensure that the region has a single block that branches to the exit node. 113 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI, 114 RegionInfo *RI) { 115 BasicBlock *ExitBB = R->getExit(); 116 BasicBlock *ExitingBB = R->getExitingBlock(); 117 118 // Before: 119 // 120 // (Region) ______/ // 121 // \ | / // 122 // ExitBB // 123 // / \ // 124 125 if (!ExitingBB) { 126 SmallVector<BasicBlock *, 4> Preds; 127 for (BasicBlock *P : predecessors(ExitBB)) 128 if (R->contains(P)) 129 Preds.push_back(P); 130 131 // Preds[0] Preds[1] otherBB // 132 // \ | ________/ // 133 // \ | / // 134 // BB // 135 ExitingBB = 136 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI); 137 // Preds[0] Preds[1] otherBB // 138 // \ / / // 139 // BB.region_exiting / // 140 // \ / // 141 // BB // 142 143 if (RI) 144 RI->setRegionFor(ExitingBB, R); 145 146 // Change the exit of nested regions, but not the region itself, 147 R->replaceExitRecursive(ExitingBB); 148 R->replaceExit(ExitBB); 149 } 150 assert(ExitingBB == R->getExitingBlock()); 151 152 // After: 153 // 154 // \ / // 155 // ExitingBB _____/ // 156 // \ / // 157 // ExitBB // 158 // / \ // 159 } 160 161 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI, 162 RegionInfo *RI) { 163 assert(R && !R->isTopLevelRegion()); 164 assert(!RI || RI == R->getRegionInfo()); 165 assert((!RI || DT) && 166 "RegionInfo requires DominatorTree to be updated as well"); 167 168 simplifyRegionEntry(R, DT, LI, RI); 169 simplifyRegionExit(R, DT, LI, RI); 170 assert(R->isSimple()); 171 } 172 173 // Split the block into two successive blocks. 174 // 175 // Like llvm::SplitBlock, but also preserves RegionInfo 176 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt, 177 DominatorTree *DT, llvm::LoopInfo *LI, 178 RegionInfo *RI) { 179 assert(Old && SplitPt); 180 181 // Before: 182 // 183 // \ / // 184 // Old // 185 // / \ // 186 187 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI); 188 189 if (RI) { 190 Region *R = RI->getRegionFor(Old); 191 RI->setRegionFor(NewBlock, R); 192 } 193 194 // After: 195 // 196 // \ / // 197 // Old // 198 // | // 199 // NewBlock // 200 // / \ // 201 202 return NewBlock; 203 } 204 205 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT, 206 LoopInfo *LI, RegionInfo *RI) { 207 // Find first non-alloca instruction. Every basic block has a non-alloca 208 // instruction, as every well formed basic block has a terminator. 209 BasicBlock::iterator I = EntryBlock->begin(); 210 while (isa<AllocaInst>(I)) 211 ++I; 212 213 // splitBlock updates DT, LI and RI. 214 splitBlock(EntryBlock, &*I, DT, LI, RI); 215 } 216 217 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) { 218 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 219 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 220 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>(); 221 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 222 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>(); 223 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr; 224 225 // splitBlock updates DT, LI and RI. 226 polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI); 227 } 228 229 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem 230 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want 231 /// however to generate new code if the instruction is in the analyzed region 232 /// and we generate code outside/in front of that region. Hence, we generate the 233 /// code for the SDiv/SRem operands in front of the analyzed region and then 234 /// create a new SDiv/SRem operation there too. 235 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> { 236 friend struct SCEVVisitor<ScopExpander, const SCEV *>; 237 238 explicit ScopExpander(const Region &R, ScalarEvolution &SE, 239 const DataLayout &DL, const char *Name, ValueMapT *VMap, 240 BasicBlock *RTCBB) 241 : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R), 242 VMap(VMap), RTCBB(RTCBB) {} 243 244 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) { 245 // If we generate code in the region we will immediately fall back to the 246 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if 247 // needed replace them by copies computed in the entering block. 248 if (!R.contains(I)) 249 E = visit(E); 250 return Expander.expandCodeFor(E, Ty, I); 251 } 252 253 const SCEV *visit(const SCEV *E) { 254 // Cache the expansion results for intermediate SCEV expressions. A SCEV 255 // expression can refer to an operand multiple times (e.g. "x*x), so 256 // a naive visitor takes exponential time. 257 if (SCEVCache.count(E)) 258 return SCEVCache[E]; 259 const SCEV *Result = SCEVVisitor::visit(E); 260 SCEVCache[E] = Result; 261 return Result; 262 } 263 264 private: 265 SCEVExpander Expander; 266 ScalarEvolution &SE; 267 const char *Name; 268 const Region &R; 269 ValueMapT *VMap; 270 BasicBlock *RTCBB; 271 DenseMap<const SCEV *, const SCEV *> SCEVCache; 272 273 const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst, 274 Instruction *IP) { 275 if (!Inst || !R.contains(Inst)) 276 return E; 277 278 assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() && 279 !isa<PHINode>(Inst)); 280 281 auto *InstClone = Inst->clone(); 282 for (auto &Op : Inst->operands()) { 283 assert(SE.isSCEVable(Op->getType())); 284 auto *OpSCEV = SE.getSCEV(Op); 285 auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP); 286 InstClone->replaceUsesOfWith(Op, OpClone); 287 } 288 289 InstClone->setName(Name + Inst->getName()); 290 InstClone->insertBefore(IP); 291 return SE.getSCEV(InstClone); 292 } 293 294 const SCEV *visitUnknown(const SCEVUnknown *E) { 295 296 // If a value mapping was given try if the underlying value is remapped. 297 Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr; 298 if (NewVal) { 299 auto *NewE = SE.getSCEV(NewVal); 300 301 // While the mapped value might be different the SCEV representation might 302 // not be. To this end we will check before we go into recursion here. 303 if (E != NewE) 304 return visit(NewE); 305 } 306 307 Instruction *Inst = dyn_cast<Instruction>(E->getValue()); 308 Instruction *IP; 309 if (Inst && !R.contains(Inst)) 310 IP = Inst; 311 else if (Inst && RTCBB->getParent() == Inst->getFunction()) 312 IP = RTCBB->getTerminator(); 313 else 314 IP = RTCBB->getParent()->getEntryBlock().getTerminator(); 315 316 if (!Inst || (Inst->getOpcode() != Instruction::SRem && 317 Inst->getOpcode() != Instruction::SDiv)) 318 return visitGenericInst(E, Inst, IP); 319 320 const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0)); 321 const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1)); 322 323 if (!SE.isKnownNonZero(RHSScev)) 324 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 325 326 Value *LHS = expandCodeFor(LHSScev, E->getType(), IP); 327 Value *RHS = expandCodeFor(RHSScev, E->getType(), IP); 328 329 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(), 330 LHS, RHS, Inst->getName() + Name, IP); 331 return SE.getSCEV(Inst); 332 } 333 334 /// The following functions will just traverse the SCEV and rebuild it with 335 /// the new operands returned by the traversal. 336 /// 337 ///{ 338 const SCEV *visitConstant(const SCEVConstant *E) { return E; } 339 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) { 340 return SE.getTruncateExpr(visit(E->getOperand()), E->getType()); 341 } 342 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) { 343 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType()); 344 } 345 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) { 346 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType()); 347 } 348 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) { 349 auto *RHSScev = visit(E->getRHS()); 350 if (!SE.isKnownNonZero(RHSScev)) 351 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1)); 352 return SE.getUDivExpr(visit(E->getLHS()), RHSScev); 353 } 354 const SCEV *visitAddExpr(const SCEVAddExpr *E) { 355 SmallVector<const SCEV *, 4> NewOps; 356 for (const SCEV *Op : E->operands()) 357 NewOps.push_back(visit(Op)); 358 return SE.getAddExpr(NewOps); 359 } 360 const SCEV *visitMulExpr(const SCEVMulExpr *E) { 361 SmallVector<const SCEV *, 4> NewOps; 362 for (const SCEV *Op : E->operands()) 363 NewOps.push_back(visit(Op)); 364 return SE.getMulExpr(NewOps); 365 } 366 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) { 367 SmallVector<const SCEV *, 4> NewOps; 368 for (const SCEV *Op : E->operands()) 369 NewOps.push_back(visit(Op)); 370 return SE.getUMaxExpr(NewOps); 371 } 372 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) { 373 SmallVector<const SCEV *, 4> NewOps; 374 for (const SCEV *Op : E->operands()) 375 NewOps.push_back(visit(Op)); 376 return SE.getSMaxExpr(NewOps); 377 } 378 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) { 379 SmallVector<const SCEV *, 4> NewOps; 380 for (const SCEV *Op : E->operands()) 381 NewOps.push_back(visit(Op)); 382 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags()); 383 } 384 ///} 385 }; 386 387 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL, 388 const char *Name, const SCEV *E, Type *Ty, 389 Instruction *IP, ValueMapT *VMap, 390 BasicBlock *RTCBB) { 391 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB); 392 return Expander.expandCodeFor(E, Ty, IP); 393 } 394 395 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI, 396 const DominatorTree &DT) { 397 if (!PollyAllowErrorBlocks) 398 return false; 399 400 if (isa<UnreachableInst>(BB.getTerminator())) 401 return true; 402 403 if (LI.isLoopHeader(&BB)) 404 return false; 405 406 // Basic blocks that are always executed are not considered error blocks, 407 // as their execution can not be a rare event. 408 bool DominatesAllPredecessors = true; 409 if (R.isTopLevelRegion()) { 410 for (BasicBlock &I : *R.getEntry()->getParent()) 411 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I)) 412 DominatesAllPredecessors = false; 413 } else { 414 for (auto Pred : predecessors(R.getExit())) 415 if (R.contains(Pred) && !DT.dominates(&BB, Pred)) 416 DominatesAllPredecessors = false; 417 } 418 419 if (DominatesAllPredecessors) 420 return false; 421 422 for (Instruction &Inst : BB) 423 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) { 424 if (isDebugCall(CI)) 425 continue; 426 427 if (isIgnoredIntrinsic(CI)) 428 continue; 429 430 // memset, memcpy and memmove are modeled intrinsics. 431 if (isa<MemSetInst>(CI) || isa<MemTransferInst>(CI)) 432 continue; 433 434 if (!CI->doesNotAccessMemory()) 435 return true; 436 if (CI->doesNotReturn()) 437 return true; 438 } 439 440 return false; 441 } 442 443 Value *polly::getConditionFromTerminator(Instruction *TI) { 444 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) { 445 if (BR->isUnconditional()) 446 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext())); 447 448 return BR->getCondition(); 449 } 450 451 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) 452 return SI->getCondition(); 453 454 return nullptr; 455 } 456 457 static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R, 458 ScalarEvolution &SE) { 459 for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) { 460 const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L); 461 Loop *OuterLoop = R.outermostLoopInRegion(L); 462 if (!SE.isLoopInvariant(PtrSCEV, OuterLoop)) 463 return true; 464 } 465 return false; 466 } 467 468 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI, 469 ScalarEvolution &SE, const DominatorTree &DT, 470 const InvariantLoadsSetTy &KnownInvariantLoads) { 471 Loop *L = LI.getLoopFor(LInst->getParent()); 472 auto *Ptr = LInst->getPointerOperand(); 473 474 // A LoadInst is hoistable if the address it is loading from is also 475 // invariant; in this case: another invariant load (whether that address 476 // is also not written to has to be checked separately) 477 // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst 478 // pattern generated by the Chapel frontend, but generally this applies 479 // for any chain of instruction that does not also depend on any 480 // induction variable 481 if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) { 482 if (!hasVariantIndex(GepInst, L, R, SE)) { 483 if (auto *DecidingLoad = 484 dyn_cast<LoadInst>(GepInst->getPointerOperand())) { 485 if (KnownInvariantLoads.count(DecidingLoad)) 486 return true; 487 } 488 } 489 } 490 491 const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L); 492 while (L && R.contains(L)) { 493 if (!SE.isLoopInvariant(PtrSCEV, L)) 494 return false; 495 L = L->getParentLoop(); 496 } 497 498 for (auto *User : Ptr->users()) { 499 auto *UserI = dyn_cast<Instruction>(User); 500 if (!UserI || !R.contains(UserI)) 501 continue; 502 if (!UserI->mayWriteToMemory()) 503 continue; 504 505 auto &BB = *UserI->getParent(); 506 if (DT.dominates(&BB, LInst->getParent())) 507 return false; 508 509 bool DominatesAllPredecessors = true; 510 if (R.isTopLevelRegion()) { 511 for (BasicBlock &I : *R.getEntry()->getParent()) 512 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I)) 513 DominatesAllPredecessors = false; 514 } else { 515 for (auto Pred : predecessors(R.getExit())) 516 if (R.contains(Pred) && !DT.dominates(&BB, Pred)) 517 DominatesAllPredecessors = false; 518 } 519 520 if (!DominatesAllPredecessors) 521 continue; 522 523 return false; 524 } 525 526 return true; 527 } 528 529 bool polly::isIgnoredIntrinsic(const Value *V) { 530 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 531 switch (IT->getIntrinsicID()) { 532 // Lifetime markers are supported/ignored. 533 case llvm::Intrinsic::lifetime_start: 534 case llvm::Intrinsic::lifetime_end: 535 // Invariant markers are supported/ignored. 536 case llvm::Intrinsic::invariant_start: 537 case llvm::Intrinsic::invariant_end: 538 // Some misc annotations are supported/ignored. 539 case llvm::Intrinsic::var_annotation: 540 case llvm::Intrinsic::ptr_annotation: 541 case llvm::Intrinsic::annotation: 542 case llvm::Intrinsic::donothing: 543 case llvm::Intrinsic::assume: 544 // Some debug info intrinsics are supported/ignored. 545 case llvm::Intrinsic::dbg_value: 546 case llvm::Intrinsic::dbg_declare: 547 return true; 548 default: 549 break; 550 } 551 } 552 return false; 553 } 554 555 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE, 556 Loop *Scope) { 557 if (!V || !SE->isSCEVable(V->getType())) 558 return false; 559 560 const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads(); 561 if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope)) 562 if (!isa<SCEVCouldNotCompute>(Scev)) 563 if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS)) 564 return true; 565 566 return false; 567 } 568 569 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) { 570 Instruction *UI = dyn_cast<Instruction>(U.getUser()); 571 if (!UI) 572 return nullptr; 573 574 if (PHINode *PHI = dyn_cast<PHINode>(UI)) 575 return PHI->getIncomingBlock(U); 576 577 return UI->getParent(); 578 } 579 580 std::tuple<std::vector<const SCEV *>, std::vector<int>> 581 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) { 582 std::vector<const SCEV *> Subscripts; 583 std::vector<int> Sizes; 584 585 Type *Ty = GEP->getPointerOperandType(); 586 587 bool DroppedFirstDim = false; 588 589 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 590 591 const SCEV *Expr = SE.getSCEV(GEP->getOperand(i)); 592 593 if (i == 1) { 594 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 595 Ty = PtrTy->getElementType(); 596 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 597 Ty = ArrayTy->getElementType(); 598 } else { 599 Subscripts.clear(); 600 Sizes.clear(); 601 break; 602 } 603 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 604 if (Const->getValue()->isZero()) { 605 DroppedFirstDim = true; 606 continue; 607 } 608 Subscripts.push_back(Expr); 609 continue; 610 } 611 612 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 613 if (!ArrayTy) { 614 Subscripts.clear(); 615 Sizes.clear(); 616 break; 617 } 618 619 Subscripts.push_back(Expr); 620 if (!(DroppedFirstDim && i == 2)) 621 Sizes.push_back(ArrayTy->getNumElements()); 622 623 Ty = ArrayTy->getElementType(); 624 } 625 626 return std::make_tuple(Subscripts, Sizes); 627 } 628 629 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI, 630 const BoxedLoopsSetTy &BoxedLoops) { 631 while (BoxedLoops.count(L)) 632 L = L->getParentLoop(); 633 return L; 634 } 635 636 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, 637 llvm::LoopInfo &LI, 638 const BoxedLoopsSetTy &BoxedLoops) { 639 Loop *L = LI.getLoopFor(BB); 640 return getFirstNonBoxedLoopFor(L, LI, BoxedLoops); 641 } 642 643 bool polly::isDebugCall(Instruction *Inst) { 644 auto *CI = dyn_cast<CallInst>(Inst); 645 if (!CI) 646 return false; 647 648 Function *CF = CI->getCalledFunction(); 649 if (!CF) 650 return false; 651 652 return std::find(DebugFunctions.begin(), DebugFunctions.end(), 653 CF->getName()) != DebugFunctions.end(); 654 } 655 656 static bool hasDebugCall(BasicBlock *BB) { 657 for (Instruction &Inst : *BB) { 658 if (isDebugCall(&Inst)) 659 return true; 660 } 661 return false; 662 } 663 664 bool polly::hasDebugCall(ScopStmt *Stmt) { 665 // Quick skip if no debug functions have been defined. 666 if (DebugFunctions.empty()) 667 return false; 668 669 if (!Stmt) 670 return false; 671 672 for (Instruction *Inst : Stmt->getInstructions()) 673 if (isDebugCall(Inst)) 674 return true; 675 676 if (Stmt->isRegionStmt()) { 677 for (BasicBlock *RBB : Stmt->getRegion()->blocks()) 678 if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB)) 679 return true; 680 } 681 682 return false; 683 } 684