1 //===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Small functions that help with Scop and LLVM-IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "polly/Support/ScopHelper.h"
14 #include "polly/Options.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/Support/SCEVValidator.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/RegionInfo.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
22 #include "llvm/Transforms/Utils/LoopUtils.h"
23 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
24 
25 using namespace llvm;
26 using namespace polly;
27 
28 #define DEBUG_TYPE "polly-scop-helper"
29 
30 static cl::list<std::string> DebugFunctions(
31     "polly-debug-func",
32     cl::desc("Allow calls to the specified functions in SCoPs even if their "
33              "side-effects are unknown. This can be used to do debug output in "
34              "Polly-transformed code."),
35     cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
36 
37 // Ensures that there is just one predecessor to the entry node from outside the
38 // region.
39 // The identity of the region entry node is preserved.
40 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
41                                 RegionInfo *RI) {
42   BasicBlock *EnteringBB = R->getEnteringBlock();
43   BasicBlock *Entry = R->getEntry();
44 
45   // Before (one of):
46   //
47   //                       \    /            //
48   //                      EnteringBB         //
49   //                        |    \------>    //
50   //   \   /                |                //
51   //   Entry <--\         Entry <--\         //
52   //   /   \    /         /   \    /         //
53   //        ....               ....          //
54 
55   // Create single entry edge if the region has multiple entry edges.
56   if (!EnteringBB) {
57     SmallVector<BasicBlock *, 4> Preds;
58     for (BasicBlock *P : predecessors(Entry))
59       if (!R->contains(P))
60         Preds.push_back(P);
61 
62     BasicBlock *NewEntering =
63         SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
64 
65     if (RI) {
66       // The exit block of predecessing regions must be changed to NewEntering
67       for (BasicBlock *ExitPred : predecessors(NewEntering)) {
68         Region *RegionOfPred = RI->getRegionFor(ExitPred);
69         if (RegionOfPred->getExit() != Entry)
70           continue;
71 
72         while (!RegionOfPred->isTopLevelRegion() &&
73                RegionOfPred->getExit() == Entry) {
74           RegionOfPred->replaceExit(NewEntering);
75           RegionOfPred = RegionOfPred->getParent();
76         }
77       }
78 
79       // Make all ancestors use EnteringBB as entry; there might be edges to it
80       Region *AncestorR = R->getParent();
81       RI->setRegionFor(NewEntering, AncestorR);
82       while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
83         AncestorR->replaceEntry(NewEntering);
84         AncestorR = AncestorR->getParent();
85       }
86     }
87 
88     EnteringBB = NewEntering;
89   }
90   assert(R->getEnteringBlock() == EnteringBB);
91 
92   // After:
93   //
94   //    \    /       //
95   //  EnteringBB     //
96   //      |          //
97   //      |          //
98   //    Entry <--\   //
99   //    /   \    /   //
100   //         ....    //
101 }
102 
103 // Ensure that the region has a single block that branches to the exit node.
104 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
105                                RegionInfo *RI) {
106   BasicBlock *ExitBB = R->getExit();
107   BasicBlock *ExitingBB = R->getExitingBlock();
108 
109   // Before:
110   //
111   //   (Region)   ______/  //
112   //      \  |   /         //
113   //       ExitBB          //
114   //       /    \          //
115 
116   if (!ExitingBB) {
117     SmallVector<BasicBlock *, 4> Preds;
118     for (BasicBlock *P : predecessors(ExitBB))
119       if (R->contains(P))
120         Preds.push_back(P);
121 
122     //  Preds[0] Preds[1]      otherBB //
123     //         \  |  ________/         //
124     //          \ | /                  //
125     //           BB                    //
126     ExitingBB =
127         SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
128     // Preds[0] Preds[1]      otherBB  //
129     //        \  /           /         //
130     // BB.region_exiting    /          //
131     //                  \  /           //
132     //                   BB            //
133 
134     if (RI)
135       RI->setRegionFor(ExitingBB, R);
136 
137     // Change the exit of nested regions, but not the region itself,
138     R->replaceExitRecursive(ExitingBB);
139     R->replaceExit(ExitBB);
140   }
141   assert(ExitingBB == R->getExitingBlock());
142 
143   // After:
144   //
145   //     \   /                //
146   //    ExitingBB     _____/  //
147   //          \      /        //
148   //           ExitBB         //
149   //           /    \         //
150 }
151 
152 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
153                            RegionInfo *RI) {
154   assert(R && !R->isTopLevelRegion());
155   assert(!RI || RI == R->getRegionInfo());
156   assert((!RI || DT) &&
157          "RegionInfo requires DominatorTree to be updated as well");
158 
159   simplifyRegionEntry(R, DT, LI, RI);
160   simplifyRegionExit(R, DT, LI, RI);
161   assert(R->isSimple());
162 }
163 
164 // Split the block into two successive blocks.
165 //
166 // Like llvm::SplitBlock, but also preserves RegionInfo
167 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
168                               DominatorTree *DT, llvm::LoopInfo *LI,
169                               RegionInfo *RI) {
170   assert(Old && SplitPt);
171 
172   // Before:
173   //
174   //  \   /  //
175   //   Old   //
176   //  /   \  //
177 
178   BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
179 
180   if (RI) {
181     Region *R = RI->getRegionFor(Old);
182     RI->setRegionFor(NewBlock, R);
183   }
184 
185   // After:
186   //
187   //   \   /    //
188   //    Old     //
189   //     |      //
190   //  NewBlock  //
191   //   /   \    //
192 
193   return NewBlock;
194 }
195 
196 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
197                                      LoopInfo *LI, RegionInfo *RI) {
198   // Find first non-alloca instruction. Every basic block has a non-alloca
199   // instruction, as every well formed basic block has a terminator.
200   BasicBlock::iterator I = EntryBlock->begin();
201   while (isa<AllocaInst>(I))
202     ++I;
203 
204   // splitBlock updates DT, LI and RI.
205   splitBlock(EntryBlock, &*I, DT, LI, RI);
206 }
207 
208 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
209   auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
210   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
211   auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
212   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
213   RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
214   RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
215 
216   // splitBlock updates DT, LI and RI.
217   polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
218 }
219 
220 void polly::recordAssumption(polly::RecordedAssumptionsTy *RecordedAssumptions,
221                              polly::AssumptionKind Kind, isl::set Set,
222                              DebugLoc Loc, polly::AssumptionSign Sign,
223                              BasicBlock *BB, bool RTC) {
224   assert((Set.is_params() || BB) &&
225          "Assumptions without a basic block must be parameter sets");
226   if (RecordedAssumptions)
227     RecordedAssumptions->push_back({Kind, Sign, Set, Loc, BB, RTC});
228 }
229 
230 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
231 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want
232 /// however to generate new code if the instruction is in the analyzed region
233 /// and we generate code outside/in front of that region. Hence, we generate the
234 /// code for the SDiv/SRem operands in front of the analyzed region and then
235 /// create a new SDiv/SRem operation there too.
236 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
237   friend struct SCEVVisitor<ScopExpander, const SCEV *>;
238 
239   explicit ScopExpander(const Region &R, ScalarEvolution &SE,
240                         const DataLayout &DL, const char *Name, ValueMapT *VMap,
241                         BasicBlock *RTCBB)
242       : Expander(SE, DL, Name, /*PreserveLCSSA=*/false), SE(SE), Name(Name),
243         R(R), VMap(VMap), RTCBB(RTCBB) {}
244 
245   Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
246     // If we generate code in the region we will immediately fall back to the
247     // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
248     // needed replace them by copies computed in the entering block.
249     if (!R.contains(I))
250       E = visit(E);
251     return Expander.expandCodeFor(E, Ty, I);
252   }
253 
254   const SCEV *visit(const SCEV *E) {
255     // Cache the expansion results for intermediate SCEV expressions. A SCEV
256     // expression can refer to an operand multiple times (e.g. "x*x), so
257     // a naive visitor takes exponential time.
258     if (SCEVCache.count(E))
259       return SCEVCache[E];
260     const SCEV *Result = SCEVVisitor::visit(E);
261     SCEVCache[E] = Result;
262     return Result;
263   }
264 
265 private:
266   SCEVExpander Expander;
267   ScalarEvolution &SE;
268   const char *Name;
269   const Region &R;
270   ValueMapT *VMap;
271   BasicBlock *RTCBB;
272   DenseMap<const SCEV *, const SCEV *> SCEVCache;
273 
274   const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
275                                Instruction *IP) {
276     if (!Inst || !R.contains(Inst))
277       return E;
278 
279     assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
280            !isa<PHINode>(Inst));
281 
282     auto *InstClone = Inst->clone();
283     for (auto &Op : Inst->operands()) {
284       assert(SE.isSCEVable(Op->getType()));
285       auto *OpSCEV = SE.getSCEV(Op);
286       auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
287       InstClone->replaceUsesOfWith(Op, OpClone);
288     }
289 
290     InstClone->setName(Name + Inst->getName());
291     InstClone->insertBefore(IP);
292     return SE.getSCEV(InstClone);
293   }
294 
295   const SCEV *visitUnknown(const SCEVUnknown *E) {
296 
297     // If a value mapping was given try if the underlying value is remapped.
298     Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
299     if (NewVal) {
300       auto *NewE = SE.getSCEV(NewVal);
301 
302       // While the mapped value might be different the SCEV representation might
303       // not be. To this end we will check before we go into recursion here.
304       if (E != NewE)
305         return visit(NewE);
306     }
307 
308     Instruction *Inst = dyn_cast<Instruction>(E->getValue());
309     Instruction *IP;
310     if (Inst && !R.contains(Inst))
311       IP = Inst;
312     else if (Inst && RTCBB->getParent() == Inst->getFunction())
313       IP = RTCBB->getTerminator();
314     else
315       IP = RTCBB->getParent()->getEntryBlock().getTerminator();
316 
317     if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
318                   Inst->getOpcode() != Instruction::SDiv))
319       return visitGenericInst(E, Inst, IP);
320 
321     const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
322     const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
323 
324     if (!SE.isKnownNonZero(RHSScev))
325       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
326 
327     Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
328     Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
329 
330     Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
331                                   LHS, RHS, Inst->getName() + Name, IP);
332     return SE.getSCEV(Inst);
333   }
334 
335   /// The following functions will just traverse the SCEV and rebuild it with
336   /// the new operands returned by the traversal.
337   ///
338   ///{
339   const SCEV *visitConstant(const SCEVConstant *E) { return E; }
340   const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *E) {
341     return SE.getPtrToIntExpr(visit(E->getOperand()), E->getType());
342   }
343   const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
344     return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
345   }
346   const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
347     return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
348   }
349   const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
350     return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
351   }
352   const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
353     auto *RHSScev = visit(E->getRHS());
354     if (!SE.isKnownNonZero(RHSScev))
355       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
356     return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
357   }
358   const SCEV *visitAddExpr(const SCEVAddExpr *E) {
359     SmallVector<const SCEV *, 4> NewOps;
360     for (const SCEV *Op : E->operands())
361       NewOps.push_back(visit(Op));
362     return SE.getAddExpr(NewOps);
363   }
364   const SCEV *visitMulExpr(const SCEVMulExpr *E) {
365     SmallVector<const SCEV *, 4> NewOps;
366     for (const SCEV *Op : E->operands())
367       NewOps.push_back(visit(Op));
368     return SE.getMulExpr(NewOps);
369   }
370   const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
371     SmallVector<const SCEV *, 4> NewOps;
372     for (const SCEV *Op : E->operands())
373       NewOps.push_back(visit(Op));
374     return SE.getUMaxExpr(NewOps);
375   }
376   const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
377     SmallVector<const SCEV *, 4> NewOps;
378     for (const SCEV *Op : E->operands())
379       NewOps.push_back(visit(Op));
380     return SE.getSMaxExpr(NewOps);
381   }
382   const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
383     SmallVector<const SCEV *, 4> NewOps;
384     for (const SCEV *Op : E->operands())
385       NewOps.push_back(visit(Op));
386     return SE.getUMinExpr(NewOps);
387   }
388   const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
389     SmallVector<const SCEV *, 4> NewOps;
390     for (const SCEV *Op : E->operands())
391       NewOps.push_back(visit(Op));
392     return SE.getSMinExpr(NewOps);
393   }
394   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
395     SmallVector<const SCEV *, 4> NewOps;
396     for (const SCEV *Op : E->operands())
397       NewOps.push_back(visit(Op));
398     return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
399   }
400   ///}
401 };
402 
403 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
404                             const char *Name, const SCEV *E, Type *Ty,
405                             Instruction *IP, ValueMapT *VMap,
406                             BasicBlock *RTCBB) {
407   ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
408   return Expander.expandCodeFor(E, Ty, IP);
409 }
410 
411 Value *polly::getConditionFromTerminator(Instruction *TI) {
412   if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
413     if (BR->isUnconditional())
414       return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
415 
416     return BR->getCondition();
417   }
418 
419   if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
420     return SI->getCondition();
421 
422   return nullptr;
423 }
424 
425 Loop *polly::getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
426   // Start with the smallest loop containing the entry and expand that
427   // loop until it contains all blocks in the region. If there is a loop
428   // containing all blocks in the region check if it is itself contained
429   // and if so take the parent loop as it will be the smallest containing
430   // the region but not contained by it.
431   Loop *L = LI.getLoopFor(S.getEntry());
432   while (L) {
433     bool AllContained = true;
434     for (auto *BB : S.blocks())
435       AllContained &= L->contains(BB);
436     if (AllContained)
437       break;
438     L = L->getParentLoop();
439   }
440 
441   return L ? (S.contains(L) ? L->getParentLoop() : L) : nullptr;
442 }
443 
444 unsigned polly::getNumBlocksInLoop(Loop *L) {
445   unsigned NumBlocks = L->getNumBlocks();
446   SmallVector<BasicBlock *, 4> ExitBlocks;
447   L->getExitBlocks(ExitBlocks);
448 
449   for (auto ExitBlock : ExitBlocks) {
450     if (isa<UnreachableInst>(ExitBlock->getTerminator()))
451       NumBlocks++;
452   }
453   return NumBlocks;
454 }
455 
456 unsigned polly::getNumBlocksInRegionNode(RegionNode *RN) {
457   if (!RN->isSubRegion())
458     return 1;
459 
460   Region *R = RN->getNodeAs<Region>();
461   return std::distance(R->block_begin(), R->block_end());
462 }
463 
464 Loop *polly::getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
465   if (!RN->isSubRegion()) {
466     BasicBlock *BB = RN->getNodeAs<BasicBlock>();
467     Loop *L = LI.getLoopFor(BB);
468 
469     // Unreachable statements are not considered to belong to a LLVM loop, as
470     // they are not part of an actual loop in the control flow graph.
471     // Nevertheless, we handle certain unreachable statements that are common
472     // when modeling run-time bounds checks as being part of the loop to be
473     // able to model them and to later eliminate the run-time bounds checks.
474     //
475     // Specifically, for basic blocks that terminate in an unreachable and
476     // where the immediate predecessor is part of a loop, we assume these
477     // basic blocks belong to the loop the predecessor belongs to. This
478     // allows us to model the following code.
479     //
480     // for (i = 0; i < N; i++) {
481     //   if (i > 1024)
482     //     abort();            <- this abort might be translated to an
483     //                            unreachable
484     //
485     //   A[i] = ...
486     // }
487     if (!L && isa<UnreachableInst>(BB->getTerminator()) && BB->getPrevNode())
488       L = LI.getLoopFor(BB->getPrevNode());
489     return L;
490   }
491 
492   Region *NonAffineSubRegion = RN->getNodeAs<Region>();
493   Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
494   while (L && NonAffineSubRegion->contains(L))
495     L = L->getParentLoop();
496   return L;
497 }
498 
499 static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
500                             ScalarEvolution &SE) {
501   for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
502     const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
503     Loop *OuterLoop = R.outermostLoopInRegion(L);
504     if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
505       return true;
506   }
507   return false;
508 }
509 
510 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
511                             ScalarEvolution &SE, const DominatorTree &DT,
512                             const InvariantLoadsSetTy &KnownInvariantLoads) {
513   Loop *L = LI.getLoopFor(LInst->getParent());
514   auto *Ptr = LInst->getPointerOperand();
515 
516   // A LoadInst is hoistable if the address it is loading from is also
517   // invariant; in this case: another invariant load (whether that address
518   // is also not written to has to be checked separately)
519   // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
520   // pattern generated by the Chapel frontend, but generally this applies
521   // for any chain of instruction that does not also depend on any
522   // induction variable
523   if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
524     if (!hasVariantIndex(GepInst, L, R, SE)) {
525       if (auto *DecidingLoad =
526               dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
527         if (KnownInvariantLoads.count(DecidingLoad))
528           return true;
529       }
530     }
531   }
532 
533   const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
534   while (L && R.contains(L)) {
535     if (!SE.isLoopInvariant(PtrSCEV, L))
536       return false;
537     L = L->getParentLoop();
538   }
539 
540   for (auto *User : Ptr->users()) {
541     auto *UserI = dyn_cast<Instruction>(User);
542     if (!UserI || !R.contains(UserI))
543       continue;
544     if (!UserI->mayWriteToMemory())
545       continue;
546 
547     auto &BB = *UserI->getParent();
548     if (DT.dominates(&BB, LInst->getParent()))
549       return false;
550 
551     bool DominatesAllPredecessors = true;
552     if (R.isTopLevelRegion()) {
553       for (BasicBlock &I : *R.getEntry()->getParent())
554         if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
555           DominatesAllPredecessors = false;
556     } else {
557       for (auto Pred : predecessors(R.getExit()))
558         if (R.contains(Pred) && !DT.dominates(&BB, Pred))
559           DominatesAllPredecessors = false;
560     }
561 
562     if (!DominatesAllPredecessors)
563       continue;
564 
565     return false;
566   }
567 
568   return true;
569 }
570 
571 bool polly::isIgnoredIntrinsic(const Value *V) {
572   if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
573     switch (IT->getIntrinsicID()) {
574     // Lifetime markers are supported/ignored.
575     case llvm::Intrinsic::lifetime_start:
576     case llvm::Intrinsic::lifetime_end:
577     // Invariant markers are supported/ignored.
578     case llvm::Intrinsic::invariant_start:
579     case llvm::Intrinsic::invariant_end:
580     // Some misc annotations are supported/ignored.
581     case llvm::Intrinsic::var_annotation:
582     case llvm::Intrinsic::ptr_annotation:
583     case llvm::Intrinsic::annotation:
584     case llvm::Intrinsic::donothing:
585     case llvm::Intrinsic::assume:
586     // Some debug info intrinsics are supported/ignored.
587     case llvm::Intrinsic::dbg_value:
588     case llvm::Intrinsic::dbg_declare:
589       return true;
590     default:
591       break;
592     }
593   }
594   return false;
595 }
596 
597 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
598                           Loop *Scope) {
599   if (!V || !SE->isSCEVable(V->getType()))
600     return false;
601 
602   const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
603   if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
604     if (!isa<SCEVCouldNotCompute>(Scev))
605       if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
606         return true;
607 
608   return false;
609 }
610 
611 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
612   Instruction *UI = dyn_cast<Instruction>(U.getUser());
613   if (!UI)
614     return nullptr;
615 
616   if (PHINode *PHI = dyn_cast<PHINode>(UI))
617     return PHI->getIncomingBlock(U);
618 
619   return UI->getParent();
620 }
621 
622 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
623                                            const BoxedLoopsSetTy &BoxedLoops) {
624   while (BoxedLoops.count(L))
625     L = L->getParentLoop();
626   return L;
627 }
628 
629 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
630                                            llvm::LoopInfo &LI,
631                                            const BoxedLoopsSetTy &BoxedLoops) {
632   Loop *L = LI.getLoopFor(BB);
633   return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
634 }
635 
636 bool polly::isDebugCall(Instruction *Inst) {
637   auto *CI = dyn_cast<CallInst>(Inst);
638   if (!CI)
639     return false;
640 
641   Function *CF = CI->getCalledFunction();
642   if (!CF)
643     return false;
644 
645   return std::find(DebugFunctions.begin(), DebugFunctions.end(),
646                    CF->getName()) != DebugFunctions.end();
647 }
648 
649 static bool hasDebugCall(BasicBlock *BB) {
650   for (Instruction &Inst : *BB) {
651     if (isDebugCall(&Inst))
652       return true;
653   }
654   return false;
655 }
656 
657 bool polly::hasDebugCall(ScopStmt *Stmt) {
658   // Quick skip if no debug functions have been defined.
659   if (DebugFunctions.empty())
660     return false;
661 
662   if (!Stmt)
663     return false;
664 
665   for (Instruction *Inst : Stmt->getInstructions())
666     if (isDebugCall(Inst))
667       return true;
668 
669   if (Stmt->isRegionStmt()) {
670     for (BasicBlock *RBB : Stmt->getRegion()->blocks())
671       if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
672         return true;
673   }
674 
675   return false;
676 }
677 
678 /// Find a property in a LoopID.
679 static MDNode *findNamedMetadataNode(MDNode *LoopMD, StringRef Name) {
680   if (!LoopMD)
681     return nullptr;
682   for (const MDOperand &X : drop_begin(LoopMD->operands(), 1)) {
683     auto *OpNode = dyn_cast<MDNode>(X.get());
684     if (!OpNode)
685       continue;
686 
687     auto *OpName = dyn_cast<MDString>(OpNode->getOperand(0));
688     if (!OpName)
689       continue;
690     if (OpName->getString() == Name)
691       return OpNode;
692   }
693   return nullptr;
694 }
695 
696 static Optional<const MDOperand *> findNamedMetadataArg(MDNode *LoopID,
697                                                         StringRef Name) {
698   MDNode *MD = findNamedMetadataNode(LoopID, Name);
699   if (!MD)
700     return None;
701   switch (MD->getNumOperands()) {
702   case 1:
703     return nullptr;
704   case 2:
705     return &MD->getOperand(1);
706   default:
707     llvm_unreachable("loop metadata has 0 or 1 operand");
708   }
709 }
710 
711 Optional<Metadata *> polly::findMetadataOperand(MDNode *LoopMD,
712                                                 StringRef Name) {
713   MDNode *MD = findNamedMetadataNode(LoopMD, Name);
714   if (!MD)
715     return None;
716   switch (MD->getNumOperands()) {
717   case 1:
718     return nullptr;
719   case 2:
720     return MD->getOperand(1).get();
721   default:
722     llvm_unreachable("loop metadata must have 0 or 1 operands");
723   }
724 }
725 
726 static Optional<bool> getOptionalBoolLoopAttribute(MDNode *LoopID,
727                                                    StringRef Name) {
728   MDNode *MD = findNamedMetadataNode(LoopID, Name);
729   if (!MD)
730     return None;
731   switch (MD->getNumOperands()) {
732   case 1:
733     return true;
734   case 2:
735     if (ConstantInt *IntMD =
736             mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get()))
737       return IntMD->getZExtValue();
738     return true;
739   }
740   llvm_unreachable("unexpected number of options");
741 }
742 
743 bool polly::getBooleanLoopAttribute(MDNode *LoopID, StringRef Name) {
744   return getOptionalBoolLoopAttribute(LoopID, Name).getValueOr(false);
745 }
746 
747 llvm::Optional<int> polly::getOptionalIntLoopAttribute(MDNode *LoopID,
748                                                        StringRef Name) {
749   const MDOperand *AttrMD =
750       findNamedMetadataArg(LoopID, Name).getValueOr(nullptr);
751   if (!AttrMD)
752     return None;
753 
754   ConstantInt *IntMD = mdconst::extract_or_null<ConstantInt>(AttrMD->get());
755   if (!IntMD)
756     return None;
757 
758   return IntMD->getSExtValue();
759 }
760 
761 bool polly::hasDisableAllTransformsHint(Loop *L) {
762   return llvm::hasDisableAllTransformsHint(L);
763 }
764 
765 bool polly::hasDisableAllTransformsHint(llvm::MDNode *LoopID) {
766   return getBooleanLoopAttribute(LoopID, "llvm.loop.disable_nonforced");
767 }
768 
769 isl::id polly::getIslLoopAttr(isl::ctx Ctx, BandAttr *Attr) {
770   assert(Attr && "Must be a valid BandAttr");
771 
772   // The name "Loop" signals that this id contains a pointer to a BandAttr.
773   // The ScheduleOptimizer also uses the string "Inter iteration alias-free" in
774   // markers, but it's user pointer is an llvm::Value.
775   isl::id Result = isl::id::alloc(Ctx, "Loop with Metadata", Attr);
776   Result = isl::manage(isl_id_set_free_user(Result.release(), [](void *Ptr) {
777     BandAttr *Attr = reinterpret_cast<BandAttr *>(Ptr);
778     delete Attr;
779   }));
780   return Result;
781 }
782 
783 isl::id polly::createIslLoopAttr(isl::ctx Ctx, Loop *L) {
784   if (!L)
785     return {};
786 
787   // A loop without metadata does not need to be annotated.
788   MDNode *LoopID = L->getLoopID();
789   if (!LoopID)
790     return {};
791 
792   BandAttr *Attr = new BandAttr();
793   Attr->OriginalLoop = L;
794   Attr->Metadata = L->getLoopID();
795 
796   return getIslLoopAttr(Ctx, Attr);
797 }
798 
799 bool polly::isLoopAttr(const isl::id &Id) {
800   if (Id.is_null())
801     return false;
802 
803   return Id.get_name() == "Loop with Metadata";
804 }
805 
806 BandAttr *polly::getLoopAttr(const isl::id &Id) {
807   if (!isLoopAttr(Id))
808     return nullptr;
809 
810   return reinterpret_cast<BandAttr *>(Id.get_user());
811 }
812