1 //===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Small functions that help with Scop and LLVM-IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "polly/Support/ScopHelper.h"
14 #include "polly/Options.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/Support/SCEVValidator.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/RegionInfo.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpander.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
23 
24 using namespace llvm;
25 using namespace polly;
26 
27 #define DEBUG_TYPE "polly-scop-helper"
28 
29 static cl::opt<bool> PollyAllowErrorBlocks(
30     "polly-allow-error-blocks",
31     cl::desc("Allow to speculate on the execution of 'error blocks'."),
32     cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
33 
34 static cl::list<std::string> DebugFunctions(
35     "polly-debug-func",
36     cl::desc("Allow calls to the specified functions in SCoPs even if their "
37              "side-effects are unknown. This can be used to do debug output in "
38              "Polly-transformed code."),
39     cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
40 
41 // Ensures that there is just one predecessor to the entry node from outside the
42 // region.
43 // The identity of the region entry node is preserved.
44 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
45                                 RegionInfo *RI) {
46   BasicBlock *EnteringBB = R->getEnteringBlock();
47   BasicBlock *Entry = R->getEntry();
48 
49   // Before (one of):
50   //
51   //                       \    /            //
52   //                      EnteringBB         //
53   //                        |    \------>    //
54   //   \   /                |                //
55   //   Entry <--\         Entry <--\         //
56   //   /   \    /         /   \    /         //
57   //        ....               ....          //
58 
59   // Create single entry edge if the region has multiple entry edges.
60   if (!EnteringBB) {
61     SmallVector<BasicBlock *, 4> Preds;
62     for (BasicBlock *P : predecessors(Entry))
63       if (!R->contains(P))
64         Preds.push_back(P);
65 
66     BasicBlock *NewEntering =
67         SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
68 
69     if (RI) {
70       // The exit block of predecessing regions must be changed to NewEntering
71       for (BasicBlock *ExitPred : predecessors(NewEntering)) {
72         Region *RegionOfPred = RI->getRegionFor(ExitPred);
73         if (RegionOfPred->getExit() != Entry)
74           continue;
75 
76         while (!RegionOfPred->isTopLevelRegion() &&
77                RegionOfPred->getExit() == Entry) {
78           RegionOfPred->replaceExit(NewEntering);
79           RegionOfPred = RegionOfPred->getParent();
80         }
81       }
82 
83       // Make all ancestors use EnteringBB as entry; there might be edges to it
84       Region *AncestorR = R->getParent();
85       RI->setRegionFor(NewEntering, AncestorR);
86       while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
87         AncestorR->replaceEntry(NewEntering);
88         AncestorR = AncestorR->getParent();
89       }
90     }
91 
92     EnteringBB = NewEntering;
93   }
94   assert(R->getEnteringBlock() == EnteringBB);
95 
96   // After:
97   //
98   //    \    /       //
99   //  EnteringBB     //
100   //      |          //
101   //      |          //
102   //    Entry <--\   //
103   //    /   \    /   //
104   //         ....    //
105 }
106 
107 // Ensure that the region has a single block that branches to the exit node.
108 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
109                                RegionInfo *RI) {
110   BasicBlock *ExitBB = R->getExit();
111   BasicBlock *ExitingBB = R->getExitingBlock();
112 
113   // Before:
114   //
115   //   (Region)   ______/  //
116   //      \  |   /         //
117   //       ExitBB          //
118   //       /    \          //
119 
120   if (!ExitingBB) {
121     SmallVector<BasicBlock *, 4> Preds;
122     for (BasicBlock *P : predecessors(ExitBB))
123       if (R->contains(P))
124         Preds.push_back(P);
125 
126     //  Preds[0] Preds[1]      otherBB //
127     //         \  |  ________/         //
128     //          \ | /                  //
129     //           BB                    //
130     ExitingBB =
131         SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
132     // Preds[0] Preds[1]      otherBB  //
133     //        \  /           /         //
134     // BB.region_exiting    /          //
135     //                  \  /           //
136     //                   BB            //
137 
138     if (RI)
139       RI->setRegionFor(ExitingBB, R);
140 
141     // Change the exit of nested regions, but not the region itself,
142     R->replaceExitRecursive(ExitingBB);
143     R->replaceExit(ExitBB);
144   }
145   assert(ExitingBB == R->getExitingBlock());
146 
147   // After:
148   //
149   //     \   /                //
150   //    ExitingBB     _____/  //
151   //          \      /        //
152   //           ExitBB         //
153   //           /    \         //
154 }
155 
156 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
157                            RegionInfo *RI) {
158   assert(R && !R->isTopLevelRegion());
159   assert(!RI || RI == R->getRegionInfo());
160   assert((!RI || DT) &&
161          "RegionInfo requires DominatorTree to be updated as well");
162 
163   simplifyRegionEntry(R, DT, LI, RI);
164   simplifyRegionExit(R, DT, LI, RI);
165   assert(R->isSimple());
166 }
167 
168 // Split the block into two successive blocks.
169 //
170 // Like llvm::SplitBlock, but also preserves RegionInfo
171 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
172                               DominatorTree *DT, llvm::LoopInfo *LI,
173                               RegionInfo *RI) {
174   assert(Old && SplitPt);
175 
176   // Before:
177   //
178   //  \   /  //
179   //   Old   //
180   //  /   \  //
181 
182   BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
183 
184   if (RI) {
185     Region *R = RI->getRegionFor(Old);
186     RI->setRegionFor(NewBlock, R);
187   }
188 
189   // After:
190   //
191   //   \   /    //
192   //    Old     //
193   //     |      //
194   //  NewBlock  //
195   //   /   \    //
196 
197   return NewBlock;
198 }
199 
200 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
201                                      LoopInfo *LI, RegionInfo *RI) {
202   // Find first non-alloca instruction. Every basic block has a non-alloca
203   // instruction, as every well formed basic block has a terminator.
204   BasicBlock::iterator I = EntryBlock->begin();
205   while (isa<AllocaInst>(I))
206     ++I;
207 
208   // splitBlock updates DT, LI and RI.
209   splitBlock(EntryBlock, &*I, DT, LI, RI);
210 }
211 
212 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
213   auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
214   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
215   auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
216   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
217   RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
218   RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
219 
220   // splitBlock updates DT, LI and RI.
221   polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
222 }
223 
224 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
225 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want
226 /// however to generate new code if the instruction is in the analyzed region
227 /// and we generate code outside/in front of that region. Hence, we generate the
228 /// code for the SDiv/SRem operands in front of the analyzed region and then
229 /// create a new SDiv/SRem operation there too.
230 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
231   friend struct SCEVVisitor<ScopExpander, const SCEV *>;
232 
233   explicit ScopExpander(const Region &R, ScalarEvolution &SE,
234                         const DataLayout &DL, const char *Name, ValueMapT *VMap,
235                         BasicBlock *RTCBB)
236       : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
237         VMap(VMap), RTCBB(RTCBB) {}
238 
239   Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
240     // If we generate code in the region we will immediately fall back to the
241     // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
242     // needed replace them by copies computed in the entering block.
243     if (!R.contains(I))
244       E = visit(E);
245     return Expander.expandCodeFor(E, Ty, I);
246   }
247 
248   const SCEV *visit(const SCEV *E) {
249     // Cache the expansion results for intermediate SCEV expressions. A SCEV
250     // expression can refer to an operand multiple times (e.g. "x*x), so
251     // a naive visitor takes exponential time.
252     if (SCEVCache.count(E))
253       return SCEVCache[E];
254     const SCEV *Result = SCEVVisitor::visit(E);
255     SCEVCache[E] = Result;
256     return Result;
257   }
258 
259 private:
260   SCEVExpander Expander;
261   ScalarEvolution &SE;
262   const char *Name;
263   const Region &R;
264   ValueMapT *VMap;
265   BasicBlock *RTCBB;
266   DenseMap<const SCEV *, const SCEV *> SCEVCache;
267 
268   const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
269                                Instruction *IP) {
270     if (!Inst || !R.contains(Inst))
271       return E;
272 
273     assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
274            !isa<PHINode>(Inst));
275 
276     auto *InstClone = Inst->clone();
277     for (auto &Op : Inst->operands()) {
278       assert(SE.isSCEVable(Op->getType()));
279       auto *OpSCEV = SE.getSCEV(Op);
280       auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
281       InstClone->replaceUsesOfWith(Op, OpClone);
282     }
283 
284     InstClone->setName(Name + Inst->getName());
285     InstClone->insertBefore(IP);
286     return SE.getSCEV(InstClone);
287   }
288 
289   const SCEV *visitUnknown(const SCEVUnknown *E) {
290 
291     // If a value mapping was given try if the underlying value is remapped.
292     Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
293     if (NewVal) {
294       auto *NewE = SE.getSCEV(NewVal);
295 
296       // While the mapped value might be different the SCEV representation might
297       // not be. To this end we will check before we go into recursion here.
298       if (E != NewE)
299         return visit(NewE);
300     }
301 
302     Instruction *Inst = dyn_cast<Instruction>(E->getValue());
303     Instruction *IP;
304     if (Inst && !R.contains(Inst))
305       IP = Inst;
306     else if (Inst && RTCBB->getParent() == Inst->getFunction())
307       IP = RTCBB->getTerminator();
308     else
309       IP = RTCBB->getParent()->getEntryBlock().getTerminator();
310 
311     if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
312                   Inst->getOpcode() != Instruction::SDiv))
313       return visitGenericInst(E, Inst, IP);
314 
315     const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
316     const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
317 
318     if (!SE.isKnownNonZero(RHSScev))
319       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
320 
321     Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
322     Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
323 
324     Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
325                                   LHS, RHS, Inst->getName() + Name, IP);
326     return SE.getSCEV(Inst);
327   }
328 
329   /// The following functions will just traverse the SCEV and rebuild it with
330   /// the new operands returned by the traversal.
331   ///
332   ///{
333   const SCEV *visitConstant(const SCEVConstant *E) { return E; }
334   const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
335     return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
336   }
337   const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
338     return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
339   }
340   const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
341     return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
342   }
343   const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
344     auto *RHSScev = visit(E->getRHS());
345     if (!SE.isKnownNonZero(RHSScev))
346       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
347     return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
348   }
349   const SCEV *visitAddExpr(const SCEVAddExpr *E) {
350     SmallVector<const SCEV *, 4> NewOps;
351     for (const SCEV *Op : E->operands())
352       NewOps.push_back(visit(Op));
353     return SE.getAddExpr(NewOps);
354   }
355   const SCEV *visitMulExpr(const SCEVMulExpr *E) {
356     SmallVector<const SCEV *, 4> NewOps;
357     for (const SCEV *Op : E->operands())
358       NewOps.push_back(visit(Op));
359     return SE.getMulExpr(NewOps);
360   }
361   const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
362     SmallVector<const SCEV *, 4> NewOps;
363     for (const SCEV *Op : E->operands())
364       NewOps.push_back(visit(Op));
365     return SE.getUMaxExpr(NewOps);
366   }
367   const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
368     SmallVector<const SCEV *, 4> NewOps;
369     for (const SCEV *Op : E->operands())
370       NewOps.push_back(visit(Op));
371     return SE.getSMaxExpr(NewOps);
372   }
373   const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
374     SmallVector<const SCEV *, 4> NewOps;
375     for (const SCEV *Op : E->operands())
376       NewOps.push_back(visit(Op));
377     return SE.getUMinExpr(NewOps);
378   }
379   const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
380     SmallVector<const SCEV *, 4> NewOps;
381     for (const SCEV *Op : E->operands())
382       NewOps.push_back(visit(Op));
383     return SE.getSMinExpr(NewOps);
384   }
385   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
386     SmallVector<const SCEV *, 4> NewOps;
387     for (const SCEV *Op : E->operands())
388       NewOps.push_back(visit(Op));
389     return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
390   }
391   ///}
392 };
393 
394 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
395                             const char *Name, const SCEV *E, Type *Ty,
396                             Instruction *IP, ValueMapT *VMap,
397                             BasicBlock *RTCBB) {
398   ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
399   return Expander.expandCodeFor(E, Ty, IP);
400 }
401 
402 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
403                          const DominatorTree &DT) {
404   if (!PollyAllowErrorBlocks)
405     return false;
406 
407   if (isa<UnreachableInst>(BB.getTerminator()))
408     return true;
409 
410   if (LI.isLoopHeader(&BB))
411     return false;
412 
413   // Basic blocks that are always executed are not considered error blocks,
414   // as their execution can not be a rare event.
415   bool DominatesAllPredecessors = true;
416   if (R.isTopLevelRegion()) {
417     for (BasicBlock &I : *R.getEntry()->getParent())
418       if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
419         DominatesAllPredecessors = false;
420   } else {
421     for (auto Pred : predecessors(R.getExit()))
422       if (R.contains(Pred) && !DT.dominates(&BB, Pred))
423         DominatesAllPredecessors = false;
424   }
425 
426   if (DominatesAllPredecessors)
427     return false;
428 
429   for (Instruction &Inst : BB)
430     if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
431       if (isDebugCall(CI))
432         continue;
433 
434       if (isIgnoredIntrinsic(CI))
435         continue;
436 
437       // memset, memcpy and memmove are modeled intrinsics.
438       if (isa<MemSetInst>(CI) || isa<MemTransferInst>(CI))
439         continue;
440 
441       if (!CI->doesNotAccessMemory())
442         return true;
443       if (CI->doesNotReturn())
444         return true;
445     }
446 
447   return false;
448 }
449 
450 Value *polly::getConditionFromTerminator(Instruction *TI) {
451   if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
452     if (BR->isUnconditional())
453       return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
454 
455     return BR->getCondition();
456   }
457 
458   if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
459     return SI->getCondition();
460 
461   return nullptr;
462 }
463 
464 Loop *polly::getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
465   // Start with the smallest loop containing the entry and expand that
466   // loop until it contains all blocks in the region. If there is a loop
467   // containing all blocks in the region check if it is itself contained
468   // and if so take the parent loop as it will be the smallest containing
469   // the region but not contained by it.
470   Loop *L = LI.getLoopFor(S.getEntry());
471   while (L) {
472     bool AllContained = true;
473     for (auto *BB : S.blocks())
474       AllContained &= L->contains(BB);
475     if (AllContained)
476       break;
477     L = L->getParentLoop();
478   }
479 
480   return L ? (S.contains(L) ? L->getParentLoop() : L) : nullptr;
481 }
482 
483 unsigned polly::getNumBlocksInLoop(Loop *L) {
484   unsigned NumBlocks = L->getNumBlocks();
485   SmallVector<BasicBlock *, 4> ExitBlocks;
486   L->getExitBlocks(ExitBlocks);
487 
488   for (auto ExitBlock : ExitBlocks) {
489     if (isa<UnreachableInst>(ExitBlock->getTerminator()))
490       NumBlocks++;
491   }
492   return NumBlocks;
493 }
494 
495 unsigned polly::getNumBlocksInRegionNode(RegionNode *RN) {
496   if (!RN->isSubRegion())
497     return 1;
498 
499   Region *R = RN->getNodeAs<Region>();
500   return std::distance(R->block_begin(), R->block_end());
501 }
502 
503 Loop *polly::getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
504   if (!RN->isSubRegion()) {
505     BasicBlock *BB = RN->getNodeAs<BasicBlock>();
506     Loop *L = LI.getLoopFor(BB);
507 
508     // Unreachable statements are not considered to belong to a LLVM loop, as
509     // they are not part of an actual loop in the control flow graph.
510     // Nevertheless, we handle certain unreachable statements that are common
511     // when modeling run-time bounds checks as being part of the loop to be
512     // able to model them and to later eliminate the run-time bounds checks.
513     //
514     // Specifically, for basic blocks that terminate in an unreachable and
515     // where the immediate predecessor is part of a loop, we assume these
516     // basic blocks belong to the loop the predecessor belongs to. This
517     // allows us to model the following code.
518     //
519     // for (i = 0; i < N; i++) {
520     //   if (i > 1024)
521     //     abort();            <- this abort might be translated to an
522     //                            unreachable
523     //
524     //   A[i] = ...
525     // }
526     if (!L && isa<UnreachableInst>(BB->getTerminator()) && BB->getPrevNode())
527       L = LI.getLoopFor(BB->getPrevNode());
528     return L;
529   }
530 
531   Region *NonAffineSubRegion = RN->getNodeAs<Region>();
532   Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
533   while (L && NonAffineSubRegion->contains(L))
534     L = L->getParentLoop();
535   return L;
536 }
537 
538 static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
539                             ScalarEvolution &SE) {
540   for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
541     const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
542     Loop *OuterLoop = R.outermostLoopInRegion(L);
543     if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
544       return true;
545   }
546   return false;
547 }
548 
549 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
550                             ScalarEvolution &SE, const DominatorTree &DT,
551                             const InvariantLoadsSetTy &KnownInvariantLoads) {
552   Loop *L = LI.getLoopFor(LInst->getParent());
553   auto *Ptr = LInst->getPointerOperand();
554 
555   // A LoadInst is hoistable if the address it is loading from is also
556   // invariant; in this case: another invariant load (whether that address
557   // is also not written to has to be checked separately)
558   // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
559   // pattern generated by the Chapel frontend, but generally this applies
560   // for any chain of instruction that does not also depend on any
561   // induction variable
562   if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
563     if (!hasVariantIndex(GepInst, L, R, SE)) {
564       if (auto *DecidingLoad =
565               dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
566         if (KnownInvariantLoads.count(DecidingLoad))
567           return true;
568       }
569     }
570   }
571 
572   const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
573   while (L && R.contains(L)) {
574     if (!SE.isLoopInvariant(PtrSCEV, L))
575       return false;
576     L = L->getParentLoop();
577   }
578 
579   for (auto *User : Ptr->users()) {
580     auto *UserI = dyn_cast<Instruction>(User);
581     if (!UserI || !R.contains(UserI))
582       continue;
583     if (!UserI->mayWriteToMemory())
584       continue;
585 
586     auto &BB = *UserI->getParent();
587     if (DT.dominates(&BB, LInst->getParent()))
588       return false;
589 
590     bool DominatesAllPredecessors = true;
591     if (R.isTopLevelRegion()) {
592       for (BasicBlock &I : *R.getEntry()->getParent())
593         if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
594           DominatesAllPredecessors = false;
595     } else {
596       for (auto Pred : predecessors(R.getExit()))
597         if (R.contains(Pred) && !DT.dominates(&BB, Pred))
598           DominatesAllPredecessors = false;
599     }
600 
601     if (!DominatesAllPredecessors)
602       continue;
603 
604     return false;
605   }
606 
607   return true;
608 }
609 
610 bool polly::isIgnoredIntrinsic(const Value *V) {
611   if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
612     switch (IT->getIntrinsicID()) {
613     // Lifetime markers are supported/ignored.
614     case llvm::Intrinsic::lifetime_start:
615     case llvm::Intrinsic::lifetime_end:
616     // Invariant markers are supported/ignored.
617     case llvm::Intrinsic::invariant_start:
618     case llvm::Intrinsic::invariant_end:
619     // Some misc annotations are supported/ignored.
620     case llvm::Intrinsic::var_annotation:
621     case llvm::Intrinsic::ptr_annotation:
622     case llvm::Intrinsic::annotation:
623     case llvm::Intrinsic::donothing:
624     case llvm::Intrinsic::assume:
625     // Some debug info intrinsics are supported/ignored.
626     case llvm::Intrinsic::dbg_value:
627     case llvm::Intrinsic::dbg_declare:
628       return true;
629     default:
630       break;
631     }
632   }
633   return false;
634 }
635 
636 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
637                           Loop *Scope) {
638   if (!V || !SE->isSCEVable(V->getType()))
639     return false;
640 
641   const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
642   if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
643     if (!isa<SCEVCouldNotCompute>(Scev))
644       if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
645         return true;
646 
647   return false;
648 }
649 
650 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
651   Instruction *UI = dyn_cast<Instruction>(U.getUser());
652   if (!UI)
653     return nullptr;
654 
655   if (PHINode *PHI = dyn_cast<PHINode>(UI))
656     return PHI->getIncomingBlock(U);
657 
658   return UI->getParent();
659 }
660 
661 std::tuple<std::vector<const SCEV *>, std::vector<int>>
662 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
663   std::vector<const SCEV *> Subscripts;
664   std::vector<int> Sizes;
665 
666   Type *Ty = GEP->getPointerOperandType();
667 
668   bool DroppedFirstDim = false;
669 
670   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
671 
672     const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
673 
674     if (i == 1) {
675       if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
676         Ty = PtrTy->getElementType();
677       } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
678         Ty = ArrayTy->getElementType();
679       } else {
680         Subscripts.clear();
681         Sizes.clear();
682         break;
683       }
684       if (auto *Const = dyn_cast<SCEVConstant>(Expr))
685         if (Const->getValue()->isZero()) {
686           DroppedFirstDim = true;
687           continue;
688         }
689       Subscripts.push_back(Expr);
690       continue;
691     }
692 
693     auto *ArrayTy = dyn_cast<ArrayType>(Ty);
694     if (!ArrayTy) {
695       Subscripts.clear();
696       Sizes.clear();
697       break;
698     }
699 
700     Subscripts.push_back(Expr);
701     if (!(DroppedFirstDim && i == 2))
702       Sizes.push_back(ArrayTy->getNumElements());
703 
704     Ty = ArrayTy->getElementType();
705   }
706 
707   return std::make_tuple(Subscripts, Sizes);
708 }
709 
710 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
711                                            const BoxedLoopsSetTy &BoxedLoops) {
712   while (BoxedLoops.count(L))
713     L = L->getParentLoop();
714   return L;
715 }
716 
717 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
718                                            llvm::LoopInfo &LI,
719                                            const BoxedLoopsSetTy &BoxedLoops) {
720   Loop *L = LI.getLoopFor(BB);
721   return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
722 }
723 
724 bool polly::isDebugCall(Instruction *Inst) {
725   auto *CI = dyn_cast<CallInst>(Inst);
726   if (!CI)
727     return false;
728 
729   Function *CF = CI->getCalledFunction();
730   if (!CF)
731     return false;
732 
733   return std::find(DebugFunctions.begin(), DebugFunctions.end(),
734                    CF->getName()) != DebugFunctions.end();
735 }
736 
737 static bool hasDebugCall(BasicBlock *BB) {
738   for (Instruction &Inst : *BB) {
739     if (isDebugCall(&Inst))
740       return true;
741   }
742   return false;
743 }
744 
745 bool polly::hasDebugCall(ScopStmt *Stmt) {
746   // Quick skip if no debug functions have been defined.
747   if (DebugFunctions.empty())
748     return false;
749 
750   if (!Stmt)
751     return false;
752 
753   for (Instruction *Inst : Stmt->getInstructions())
754     if (isDebugCall(Inst))
755       return true;
756 
757   if (Stmt->isRegionStmt()) {
758     for (BasicBlock *RBB : Stmt->getRegion()->blocks())
759       if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
760         return true;
761   }
762 
763   return false;
764 }
765