1 //===- ScopInfo.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Create a polyhedral description for a static control flow region.
10 //
11 // The pass creates a polyhedral description of the Scops detected by the Scop
12 // detection derived from their LLVM-IR code.
13 //
14 // This representation is shared among several tools in the polyhedral
15 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "polly/ScopInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopBuilder.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/Support/GICHelper.h"
25 #include "polly/Support/ISLOStream.h"
26 #include "polly/Support/ISLTools.h"
27 #include "polly/Support/SCEVAffinator.h"
28 #include "polly/Support/SCEVValidator.h"
29 #include "polly/Support/ScopHelper.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/PostOrderIterator.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/Loads.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/RegionInfo.h"
43 #include "llvm/Analysis/RegionIterator.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/ConstantRange.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/PassManager.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/InitializePasses.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "isl/aff.h"
65 #include "isl/local_space.h"
66 #include "isl/map.h"
67 #include "isl/options.h"
68 #include "isl/set.h"
69 #include <cassert>
70 
71 using namespace llvm;
72 using namespace polly;
73 
74 #define DEBUG_TYPE "polly-scops"
75 
76 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
77 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
78 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
79 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
80 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
81 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
82 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
83 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
84 STATISTIC(AssumptionsInvariantLoad,
85           "Number of invariant loads assumptions taken.");
86 STATISTIC(AssumptionsDelinearization,
87           "Number of delinearization assumptions taken.");
88 
89 STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
90 STATISTIC(NumLoopsInScop, "Number of loops in scops");
91 STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
92 STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
93 
94 STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
95 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
96 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
97 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
98 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
99 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
100 STATISTIC(NumScopsDepthLarger,
101           "Number of scops with maximal loop depth 6 and larger");
102 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
103 
104 STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
105 STATISTIC(
106     NumValueWritesInLoops,
107     "Number of scalar value writes nested in affine loops after ScopInfo");
108 STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
109 STATISTIC(NumPHIWritesInLoops,
110           "Number of scalar phi writes nested in affine loops after ScopInfo");
111 STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
112 STATISTIC(NumSingletonWritesInLoops,
113           "Number of singleton writes nested in affine loops after ScopInfo");
114 
115 unsigned const polly::MaxDisjunctsInDomain = 20;
116 
117 // The number of disjunct in the context after which we stop to add more
118 // disjuncts. This parameter is there to avoid exponential growth in the
119 // number of disjunct when adding non-convex sets to the context.
120 static int const MaxDisjunctsInContext = 4;
121 
122 // Be a bit more generous for the defined behavior context which is used less
123 // often.
124 static int const MaxDisjunktsInDefinedBehaviourContext = 8;
125 
126 static cl::opt<bool> PollyRemarksMinimal(
127     "polly-remarks-minimal",
128     cl::desc("Do not emit remarks about assumptions that are known"),
129     cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
130 
131 static cl::opt<bool>
132     IslOnErrorAbort("polly-on-isl-error-abort",
133                     cl::desc("Abort if an isl error is encountered"),
134                     cl::init(true), cl::cat(PollyCategory));
135 
136 static cl::opt<bool> PollyPreciseInbounds(
137     "polly-precise-inbounds",
138     cl::desc("Take more precise inbounds assumptions (do not scale well)"),
139     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
140 
141 static cl::opt<bool> PollyIgnoreParamBounds(
142     "polly-ignore-parameter-bounds",
143     cl::desc(
144         "Do not add parameter bounds and do no gist simplify sets accordingly"),
145     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
146 
147 static cl::opt<bool> PollyPreciseFoldAccesses(
148     "polly-precise-fold-accesses",
149     cl::desc("Fold memory accesses to model more possible delinearizations "
150              "(does not scale well)"),
151     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
152 
153 bool polly::UseInstructionNames;
154 
155 static cl::opt<bool, true> XUseInstructionNames(
156     "polly-use-llvm-names",
157     cl::desc("Use LLVM-IR names when deriving statement names"),
158     cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
159     cl::ZeroOrMore, cl::cat(PollyCategory));
160 
161 static cl::opt<bool> PollyPrintInstructions(
162     "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
163     cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
164 
165 static cl::list<std::string> IslArgs("polly-isl-arg",
166                                      cl::value_desc("argument"),
167                                      cl::desc("Option passed to ISL"),
168                                      cl::ZeroOrMore, cl::cat(PollyCategory));
169 
170 //===----------------------------------------------------------------------===//
171 
172 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
173                                     int dim, isl::dim type) {
174   isl::val V;
175   isl::ctx Ctx = S.ctx();
176 
177   // The upper and lower bound for a parameter value is derived either from
178   // the data type of the parameter or from the - possibly more restrictive -
179   // range metadata.
180   V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
181   S = S.lower_bound_val(type, dim, V);
182   V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
183   S = S.upper_bound_val(type, dim, V);
184 
185   if (Range.isFullSet())
186     return S;
187 
188   if (S.n_basic_set().release() > MaxDisjunctsInContext)
189     return S;
190 
191   // In case of signed wrapping, we can refine the set of valid values by
192   // excluding the part not covered by the wrapping range.
193   if (Range.isSignWrappedSet()) {
194     V = valFromAPInt(Ctx.get(), Range.getLower(), true);
195     isl::set SLB = S.lower_bound_val(type, dim, V);
196 
197     V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
198     V = V.sub(1);
199     isl::set SUB = S.upper_bound_val(type, dim, V);
200     S = SLB.unite(SUB);
201   }
202 
203   return S;
204 }
205 
206 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
207   LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
208   if (!BasePtrLI)
209     return nullptr;
210 
211   if (!S->contains(BasePtrLI))
212     return nullptr;
213 
214   ScalarEvolution &SE = *S->getSE();
215 
216   auto *OriginBaseSCEV =
217       SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
218   if (!OriginBaseSCEV)
219     return nullptr;
220 
221   auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
222   if (!OriginBaseSCEVUnknown)
223     return nullptr;
224 
225   return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
226                              MemoryKind::Array);
227 }
228 
229 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
230                              ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
231                              const DataLayout &DL, Scop *S,
232                              const char *BaseName)
233     : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
234   std::string BasePtrName =
235       BaseName ? BaseName
236                : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
237                                       Kind == MemoryKind::PHI ? "__phi" : "",
238                                       UseInstructionNames);
239   Id = isl::id::alloc(Ctx, BasePtrName, this);
240 
241   updateSizes(Sizes);
242 
243   if (!BasePtr || Kind != MemoryKind::Array) {
244     BasePtrOriginSAI = nullptr;
245     return;
246   }
247 
248   BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
249   if (BasePtrOriginSAI)
250     const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
251 }
252 
253 ScopArrayInfo::~ScopArrayInfo() = default;
254 
255 isl::space ScopArrayInfo::getSpace() const {
256   auto Space = isl::space(Id.ctx(), 0, getNumberOfDimensions());
257   Space = Space.set_tuple_id(isl::dim::set, Id);
258   return Space;
259 }
260 
261 bool ScopArrayInfo::isReadOnly() {
262   isl::union_set WriteSet = S.getWrites().range();
263   isl::space Space = getSpace();
264   WriteSet = WriteSet.extract_set(Space);
265 
266   return bool(WriteSet.is_empty());
267 }
268 
269 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
270   if (Array->getElementType() != getElementType())
271     return false;
272 
273   if (Array->getNumberOfDimensions() != getNumberOfDimensions())
274     return false;
275 
276   for (unsigned i = 0; i < getNumberOfDimensions(); i++)
277     if (Array->getDimensionSize(i) != getDimensionSize(i))
278       return false;
279 
280   return true;
281 }
282 
283 void ScopArrayInfo::updateElementType(Type *NewElementType) {
284   if (NewElementType == ElementType)
285     return;
286 
287   auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
288   auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
289 
290   if (NewElementSize == OldElementSize || NewElementSize == 0)
291     return;
292 
293   if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
294     ElementType = NewElementType;
295   } else {
296     auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
297     ElementType = IntegerType::get(ElementType->getContext(), GCD);
298   }
299 }
300 
301 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
302                                 bool CheckConsistency) {
303   int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
304   int ExtraDimsNew = NewSizes.size() - SharedDims;
305   int ExtraDimsOld = DimensionSizes.size() - SharedDims;
306 
307   if (CheckConsistency) {
308     for (int i = 0; i < SharedDims; i++) {
309       auto *NewSize = NewSizes[i + ExtraDimsNew];
310       auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
311       if (NewSize && KnownSize && NewSize != KnownSize)
312         return false;
313     }
314 
315     if (DimensionSizes.size() >= NewSizes.size())
316       return true;
317   }
318 
319   DimensionSizes.clear();
320   DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
321                         NewSizes.end());
322   DimensionSizesPw.clear();
323   for (const SCEV *Expr : DimensionSizes) {
324     if (!Expr) {
325       DimensionSizesPw.push_back(isl::pw_aff());
326       continue;
327     }
328     isl::pw_aff Size = S.getPwAffOnly(Expr);
329     DimensionSizesPw.push_back(Size);
330   }
331   return true;
332 }
333 
334 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
335 
336 int ScopArrayInfo::getElemSizeInBytes() const {
337   return DL.getTypeAllocSize(ElementType);
338 }
339 
340 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
341 
342 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
343 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
344 #endif
345 
346 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
347   OS.indent(8) << *getElementType() << " " << getName();
348   unsigned u = 0;
349 
350   if (getNumberOfDimensions() > 0 && !getDimensionSize(0)) {
351     OS << "[*]";
352     u++;
353   }
354   for (; u < getNumberOfDimensions(); u++) {
355     OS << "[";
356 
357     if (SizeAsPwAff) {
358       isl::pw_aff Size = getDimensionSizePw(u);
359       OS << " " << Size << " ";
360     } else {
361       OS << *getDimensionSize(u);
362     }
363 
364     OS << "]";
365   }
366 
367   OS << ";";
368 
369   if (BasePtrOriginSAI)
370     OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
371 
372   OS << " // Element size " << getElemSizeInBytes() << "\n";
373 }
374 
375 const ScopArrayInfo *
376 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
377   isl::id Id = PMA.get_tuple_id(isl::dim::out);
378   assert(!Id.is_null() && "Output dimension didn't have an ID");
379   return getFromId(Id);
380 }
381 
382 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
383   void *User = Id.get_user();
384   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
385   return SAI;
386 }
387 
388 void MemoryAccess::wrapConstantDimensions() {
389   auto *SAI = getScopArrayInfo();
390   isl::space ArraySpace = SAI->getSpace();
391   isl::ctx Ctx = ArraySpace.ctx();
392   unsigned DimsArray = SAI->getNumberOfDimensions();
393 
394   isl::multi_aff DivModAff = isl::multi_aff::identity(
395       ArraySpace.map_from_domain_and_range(ArraySpace));
396   isl::local_space LArraySpace = isl::local_space(ArraySpace);
397 
398   // Begin with last dimension, to iteratively carry into higher dimensions.
399   for (int i = DimsArray - 1; i > 0; i--) {
400     auto *DimSize = SAI->getDimensionSize(i);
401     auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
402 
403     // This transformation is not applicable to dimensions with dynamic size.
404     if (!DimSizeCst)
405       continue;
406 
407     // This transformation is not applicable to dimensions of size zero.
408     if (DimSize->isZero())
409       continue;
410 
411     isl::val DimSizeVal =
412         valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
413     isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
414     isl::aff PrevVar =
415         isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
416 
417     // Compute: index % size
418     // Modulo must apply in the divide of the previous iteration, if any.
419     isl::aff Modulo = Var.mod(DimSizeVal);
420     Modulo = Modulo.pullback(DivModAff);
421 
422     // Compute: floor(index / size)
423     isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
424     Divide = Divide.floor();
425     Divide = Divide.add(PrevVar);
426     Divide = Divide.pullback(DivModAff);
427 
428     // Apply Modulo and Divide.
429     DivModAff = DivModAff.set_aff(i, Modulo);
430     DivModAff = DivModAff.set_aff(i - 1, Divide);
431   }
432 
433   // Apply all modulo/divides on the accesses.
434   isl::map Relation = AccessRelation;
435   Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
436   Relation = Relation.detect_equalities();
437   AccessRelation = Relation;
438 }
439 
440 void MemoryAccess::updateDimensionality() {
441   auto *SAI = getScopArrayInfo();
442   isl::space ArraySpace = SAI->getSpace();
443   isl::space AccessSpace = AccessRelation.get_space().range();
444   isl::ctx Ctx = ArraySpace.ctx();
445 
446   unsigned DimsArray = unsignedFromIslSize(ArraySpace.dim(isl::dim::set));
447   unsigned DimsAccess = unsignedFromIslSize(AccessSpace.dim(isl::dim::set));
448   assert(DimsArray >= DimsAccess);
449   unsigned DimsMissing = DimsArray - DimsAccess;
450 
451   auto *BB = getStatement()->getEntryBlock();
452   auto &DL = BB->getModule()->getDataLayout();
453   unsigned ArrayElemSize = SAI->getElemSizeInBytes();
454   unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
455 
456   isl::map Map = isl::map::from_domain_and_range(
457       isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
458 
459   for (auto i : seq<unsigned>(0, DimsMissing))
460     Map = Map.fix_si(isl::dim::out, i, 0);
461 
462   for (auto i : seq<unsigned>(DimsMissing, DimsArray))
463     Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
464 
465   AccessRelation = AccessRelation.apply_range(Map);
466 
467   // For the non delinearized arrays, divide the access function of the last
468   // subscript by the size of the elements in the array.
469   //
470   // A stride one array access in C expressed as A[i] is expressed in
471   // LLVM-IR as something like A[i * elementsize]. This hides the fact that
472   // two subsequent values of 'i' index two values that are stored next to
473   // each other in memory. By this division we make this characteristic
474   // obvious again. If the base pointer was accessed with offsets not divisible
475   // by the accesses element size, we will have chosen a smaller ArrayElemSize
476   // that divides the offsets of all accesses to this base pointer.
477   if (DimsAccess == 1) {
478     isl::val V = isl::val(Ctx, ArrayElemSize);
479     AccessRelation = AccessRelation.floordiv_val(V);
480   }
481 
482   // We currently do this only if we added at least one dimension, which means
483   // some dimension's indices have not been specified, an indicator that some
484   // index values have been added together.
485   // TODO: Investigate general usefulness; Effect on unit tests is to make index
486   // expressions more complicated.
487   if (DimsMissing)
488     wrapConstantDimensions();
489 
490   if (!isAffine())
491     computeBoundsOnAccessRelation(ArrayElemSize);
492 
493   // Introduce multi-element accesses in case the type loaded by this memory
494   // access is larger than the canonical element type of the array.
495   //
496   // An access ((float *)A)[i] to an array char *A is modeled as
497   // {[i] -> A[o] : 4 i <= o <= 4 i + 3
498   if (ElemBytes > ArrayElemSize) {
499     assert(ElemBytes % ArrayElemSize == 0 &&
500            "Loaded element size should be multiple of canonical element size");
501     assert(DimsArray >= 1);
502     isl::map Map = isl::map::from_domain_and_range(
503         isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
504     for (auto i : seq<unsigned>(0, DimsArray - 1))
505       Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
506 
507     isl::constraint C;
508     isl::local_space LS;
509 
510     LS = isl::local_space(Map.get_space());
511     int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
512 
513     C = isl::constraint::alloc_inequality(LS);
514     C = C.set_constant_val(isl::val(Ctx, Num - 1));
515     C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
516     C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
517     Map = Map.add_constraint(C);
518 
519     C = isl::constraint::alloc_inequality(LS);
520     C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
521     C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
522     C = C.set_constant_val(isl::val(Ctx, 0));
523     Map = Map.add_constraint(C);
524     AccessRelation = AccessRelation.apply_range(Map);
525   }
526 }
527 
528 const std::string
529 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
530   switch (RT) {
531   case MemoryAccess::RT_NONE:
532     llvm_unreachable("Requested a reduction operator string for a memory "
533                      "access which isn't a reduction");
534   case MemoryAccess::RT_ADD:
535     return "+";
536   case MemoryAccess::RT_MUL:
537     return "*";
538   case MemoryAccess::RT_BOR:
539     return "|";
540   case MemoryAccess::RT_BXOR:
541     return "^";
542   case MemoryAccess::RT_BAND:
543     return "&";
544   }
545   llvm_unreachable("Unknown reduction type");
546 }
547 
548 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
549   isl::id ArrayId = getArrayId();
550   void *User = ArrayId.get_user();
551   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
552   return SAI;
553 }
554 
555 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
556   isl::id ArrayId = getLatestArrayId();
557   void *User = ArrayId.get_user();
558   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
559   return SAI;
560 }
561 
562 isl::id MemoryAccess::getOriginalArrayId() const {
563   return AccessRelation.get_tuple_id(isl::dim::out);
564 }
565 
566 isl::id MemoryAccess::getLatestArrayId() const {
567   if (!hasNewAccessRelation())
568     return getOriginalArrayId();
569   return NewAccessRelation.get_tuple_id(isl::dim::out);
570 }
571 
572 isl::map MemoryAccess::getAddressFunction() const {
573   return getAccessRelation().lexmin();
574 }
575 
576 isl::pw_multi_aff
577 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
578   isl::map Schedule, ScheduledAccRel;
579   isl::union_set UDomain;
580 
581   UDomain = getStatement()->getDomain();
582   USchedule = USchedule.intersect_domain(UDomain);
583   Schedule = isl::map::from_union_map(USchedule);
584   ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
585   return isl::pw_multi_aff::from_map(ScheduledAccRel);
586 }
587 
588 isl::map MemoryAccess::getOriginalAccessRelation() const {
589   return AccessRelation;
590 }
591 
592 std::string MemoryAccess::getOriginalAccessRelationStr() const {
593   return stringFromIslObj(AccessRelation);
594 }
595 
596 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
597   return AccessRelation.get_space();
598 }
599 
600 isl::map MemoryAccess::getNewAccessRelation() const {
601   return NewAccessRelation;
602 }
603 
604 std::string MemoryAccess::getNewAccessRelationStr() const {
605   return stringFromIslObj(NewAccessRelation);
606 }
607 
608 std::string MemoryAccess::getAccessRelationStr() const {
609   return stringFromIslObj(getAccessRelation());
610 }
611 
612 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
613   isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
614   Space = Space.align_params(Statement->getDomainSpace());
615 
616   return isl::basic_map::from_domain_and_range(
617       isl::basic_set::universe(Statement->getDomainSpace()),
618       isl::basic_set::universe(Space));
619 }
620 
621 // Formalize no out-of-bound access assumption
622 //
623 // When delinearizing array accesses we optimistically assume that the
624 // delinearized accesses do not access out of bound locations (the subscript
625 // expression of each array evaluates for each statement instance that is
626 // executed to a value that is larger than zero and strictly smaller than the
627 // size of the corresponding dimension). The only exception is the outermost
628 // dimension for which we do not need to assume any upper bound.  At this point
629 // we formalize this assumption to ensure that at code generation time the
630 // relevant run-time checks can be generated.
631 //
632 // To find the set of constraints necessary to avoid out of bound accesses, we
633 // first build the set of data locations that are not within array bounds. We
634 // then apply the reverse access relation to obtain the set of iterations that
635 // may contain invalid accesses and reduce this set of iterations to the ones
636 // that are actually executed by intersecting them with the domain of the
637 // statement. If we now project out all loop dimensions, we obtain a set of
638 // parameters that may cause statement instances to be executed that may
639 // possibly yield out of bound memory accesses. The complement of these
640 // constraints is the set of constraints that needs to be assumed to ensure such
641 // statement instances are never executed.
642 isl::set MemoryAccess::assumeNoOutOfBound() {
643   auto *SAI = getScopArrayInfo();
644   isl::space Space = getOriginalAccessRelationSpace().range();
645   isl::set Outside = isl::set::empty(Space);
646   for (int i = 1, Size = Space.dim(isl::dim::set).release(); i < Size; ++i) {
647     isl::local_space LS(Space);
648     isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
649     isl::pw_aff Zero = isl::pw_aff(LS);
650 
651     isl::set DimOutside = Var.lt_set(Zero);
652     isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
653     SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set).release());
654     SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
655     DimOutside = DimOutside.unite(SizeE.le_set(Var));
656 
657     Outside = Outside.unite(DimOutside);
658   }
659 
660   Outside = Outside.apply(getAccessRelation().reverse());
661   Outside = Outside.intersect(Statement->getDomain());
662   Outside = Outside.params();
663 
664   // Remove divs to avoid the construction of overly complicated assumptions.
665   // Doing so increases the set of parameter combinations that are assumed to
666   // not appear. This is always save, but may make the resulting run-time check
667   // bail out more often than strictly necessary.
668   Outside = Outside.remove_divs();
669   Outside = Outside.complement();
670 
671   if (!PollyPreciseInbounds)
672     Outside = Outside.gist_params(Statement->getDomain().params());
673   return Outside;
674 }
675 
676 void MemoryAccess::buildMemIntrinsicAccessRelation() {
677   assert(isMemoryIntrinsic());
678   assert(Subscripts.size() == 2 && Sizes.size() == 1);
679 
680   isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
681   isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
682 
683   isl::map LengthMap;
684   if (Subscripts[1] == nullptr) {
685     LengthMap = isl::map::universe(SubscriptMap.get_space());
686   } else {
687     isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
688     LengthMap = isl::map::from_pw_aff(LengthPWA);
689     isl::space RangeSpace = LengthMap.get_space().range();
690     LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
691   }
692   LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
693   LengthMap = LengthMap.align_params(SubscriptMap.get_space());
694   SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
695   LengthMap = LengthMap.sum(SubscriptMap);
696   AccessRelation =
697       LengthMap.set_tuple_id(isl::dim::in, getStatement()->getDomainId());
698 }
699 
700 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
701   ScalarEvolution *SE = Statement->getParent()->getSE();
702 
703   auto MAI = MemAccInst(getAccessInstruction());
704   if (isa<MemIntrinsic>(MAI))
705     return;
706 
707   Value *Ptr = MAI.getPointerOperand();
708   if (!Ptr || !SE->isSCEVable(Ptr->getType()))
709     return;
710 
711   auto *PtrSCEV = SE->getSCEV(Ptr);
712   if (isa<SCEVCouldNotCompute>(PtrSCEV))
713     return;
714 
715   auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
716   if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
717     PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
718 
719   const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
720   if (Range.isFullSet())
721     return;
722 
723   if (Range.isUpperWrapped() || Range.isSignWrappedSet())
724     return;
725 
726   bool isWrapping = Range.isSignWrappedSet();
727 
728   unsigned BW = Range.getBitWidth();
729   const auto One = APInt(BW, 1);
730   const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
731   const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
732 
733   auto Min = LB.sdiv(APInt(BW, ElementSize));
734   auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
735 
736   assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
737 
738   isl::map Relation = AccessRelation;
739   isl::set AccessRange = Relation.range();
740   AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
741                                     isl::dim::set);
742   AccessRelation = Relation.intersect_range(AccessRange);
743 }
744 
745 void MemoryAccess::foldAccessRelation() {
746   if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
747     return;
748 
749   int Size = Subscripts.size();
750 
751   isl::map NewAccessRelation = AccessRelation;
752 
753   for (int i = Size - 2; i >= 0; --i) {
754     isl::space Space;
755     isl::map MapOne, MapTwo;
756     isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
757 
758     isl::space SpaceSize = DimSize.get_space();
759     isl::id ParamId = SpaceSize.get_dim_id(isl::dim::param, 0);
760 
761     Space = AccessRelation.get_space();
762     Space = Space.range().map_from_set();
763     Space = Space.align_params(SpaceSize);
764 
765     int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
766 
767     MapOne = isl::map::universe(Space);
768     for (int j = 0; j < Size; ++j)
769       MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
770     MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
771 
772     MapTwo = isl::map::universe(Space);
773     for (int j = 0; j < Size; ++j)
774       if (j < i || j > i + 1)
775         MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
776 
777     isl::local_space LS(Space);
778     isl::constraint C;
779     C = isl::constraint::alloc_equality(LS);
780     C = C.set_constant_si(-1);
781     C = C.set_coefficient_si(isl::dim::in, i, 1);
782     C = C.set_coefficient_si(isl::dim::out, i, -1);
783     MapTwo = MapTwo.add_constraint(C);
784     C = isl::constraint::alloc_equality(LS);
785     C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
786     C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
787     C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
788     MapTwo = MapTwo.add_constraint(C);
789     MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
790 
791     MapOne = MapOne.unite(MapTwo);
792     NewAccessRelation = NewAccessRelation.apply_range(MapOne);
793   }
794 
795   isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
796   isl::space Space = Statement->getDomainSpace();
797   NewAccessRelation = NewAccessRelation.set_tuple_id(
798       isl::dim::in, Space.get_tuple_id(isl::dim::set));
799   NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
800   NewAccessRelation = NewAccessRelation.gist_domain(Statement->getDomain());
801 
802   // Access dimension folding might in certain cases increase the number of
803   // disjuncts in the memory access, which can possibly complicate the generated
804   // run-time checks and can lead to costly compilation.
805   if (!PollyPreciseFoldAccesses && NewAccessRelation.n_basic_map().release() >
806                                        AccessRelation.n_basic_map().release()) {
807   } else {
808     AccessRelation = NewAccessRelation;
809   }
810 }
811 
812 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
813   assert(AccessRelation.is_null() && "AccessRelation already built");
814 
815   // Initialize the invalid domain which describes all iterations for which the
816   // access relation is not modeled correctly.
817   isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
818   InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
819 
820   isl::ctx Ctx = Id.ctx();
821   isl::id BaseAddrId = SAI->getBasePtrId();
822 
823   if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
824     buildMemIntrinsicAccessRelation();
825     AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
826     return;
827   }
828 
829   if (!isAffine()) {
830     // We overapproximate non-affine accesses with a possible access to the
831     // whole array. For read accesses it does not make a difference, if an
832     // access must or may happen. However, for write accesses it is important to
833     // differentiate between writes that must happen and writes that may happen.
834     if (AccessRelation.is_null())
835       AccessRelation = createBasicAccessMap(Statement);
836 
837     AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
838     return;
839   }
840 
841   isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
842   AccessRelation = isl::map::universe(Space);
843 
844   for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
845     isl::pw_aff Affine = getPwAff(Subscripts[i]);
846     isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
847     AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
848   }
849 
850   Space = Statement->getDomainSpace();
851   AccessRelation = AccessRelation.set_tuple_id(
852       isl::dim::in, Space.get_tuple_id(isl::dim::set));
853   AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
854 
855   AccessRelation = AccessRelation.gist_domain(Statement->getDomain());
856 }
857 
858 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
859                            AccessType AccType, Value *BaseAddress,
860                            Type *ElementType, bool Affine,
861                            ArrayRef<const SCEV *> Subscripts,
862                            ArrayRef<const SCEV *> Sizes, Value *AccessValue,
863                            MemoryKind Kind)
864     : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(),
865       BaseAddr(BaseAddress), ElementType(ElementType),
866       Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
867       AccessValue(AccessValue), IsAffine(Affine),
868       Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(),
869       NewAccessRelation() {
870   static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
871   const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
872 
873   std::string IdName = Stmt->getBaseName() + Access;
874   Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
875 }
876 
877 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
878     : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
879       InvalidDomain(), AccessRelation(), NewAccessRelation(AccRel) {
880   isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
881   auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
882   Sizes.push_back(nullptr);
883   for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
884     Sizes.push_back(SAI->getDimensionSize(i));
885   ElementType = SAI->getElementType();
886   BaseAddr = SAI->getBasePtr();
887   static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
888   const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
889 
890   std::string IdName = Stmt->getBaseName() + Access;
891   Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
892 }
893 
894 MemoryAccess::~MemoryAccess() = default;
895 
896 void MemoryAccess::realignParams() {
897   isl::set Ctx = Statement->getParent()->getContext();
898   InvalidDomain = InvalidDomain.gist_params(Ctx);
899   AccessRelation = AccessRelation.gist_params(Ctx);
900 
901   // Predictable parameter order is required for JSON imports. Ensure alignment
902   // by explicitly calling align_params.
903   isl::space CtxSpace = Ctx.get_space();
904   InvalidDomain = InvalidDomain.align_params(CtxSpace);
905   AccessRelation = AccessRelation.align_params(CtxSpace);
906 }
907 
908 const std::string MemoryAccess::getReductionOperatorStr() const {
909   return MemoryAccess::getReductionOperatorStr(getReductionType());
910 }
911 
912 isl::id MemoryAccess::getId() const { return Id; }
913 
914 raw_ostream &polly::operator<<(raw_ostream &OS,
915                                MemoryAccess::ReductionType RT) {
916   if (RT == MemoryAccess::RT_NONE)
917     OS << "NONE";
918   else
919     OS << MemoryAccess::getReductionOperatorStr(RT);
920   return OS;
921 }
922 
923 void MemoryAccess::print(raw_ostream &OS) const {
924   switch (AccType) {
925   case READ:
926     OS.indent(12) << "ReadAccess :=\t";
927     break;
928   case MUST_WRITE:
929     OS.indent(12) << "MustWriteAccess :=\t";
930     break;
931   case MAY_WRITE:
932     OS.indent(12) << "MayWriteAccess :=\t";
933     break;
934   }
935 
936   OS << "[Reduction Type: " << getReductionType() << "] ";
937 
938   OS << "[Scalar: " << isScalarKind() << "]\n";
939   OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
940   if (hasNewAccessRelation())
941     OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
942 }
943 
944 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
945 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
946 #endif
947 
948 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
949   auto *Stmt = getStatement();
950   PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
951   isl::set StmtDom = getStatement()->getDomain();
952   StmtDom = StmtDom.reset_tuple_id();
953   isl::set NewInvalidDom = StmtDom.intersect(PWAC.second);
954   InvalidDomain = InvalidDomain.unite(NewInvalidDom);
955   return PWAC.first;
956 }
957 
958 // Create a map in the size of the provided set domain, that maps from the
959 // one element of the provided set domain to another element of the provided
960 // set domain.
961 // The mapping is limited to all points that are equal in all but the last
962 // dimension and for which the last dimension of the input is strict smaller
963 // than the last dimension of the output.
964 //
965 //   getEqualAndLarger(set[i0, i1, ..., iX]):
966 //
967 //   set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
968 //     : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
969 //
970 static isl::map getEqualAndLarger(isl::space SetDomain) {
971   isl::space Space = SetDomain.map_from_set();
972   isl::map Map = isl::map::universe(Space);
973   unsigned lastDimension = Map.domain_tuple_dim().release() - 1;
974 
975   // Set all but the last dimension to be equal for the input and output
976   //
977   //   input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
978   //     : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
979   for (unsigned i = 0; i < lastDimension; ++i)
980     Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
981 
982   // Set the last dimension of the input to be strict smaller than the
983   // last dimension of the output.
984   //
985   //   input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
986   Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
987   return Map;
988 }
989 
990 isl::set MemoryAccess::getStride(isl::map Schedule) const {
991   isl::map AccessRelation = getAccessRelation();
992   isl::space Space = Schedule.get_space().range();
993   isl::map NextScatt = getEqualAndLarger(Space);
994 
995   Schedule = Schedule.reverse();
996   NextScatt = NextScatt.lexmin();
997 
998   NextScatt = NextScatt.apply_range(Schedule);
999   NextScatt = NextScatt.apply_range(AccessRelation);
1000   NextScatt = NextScatt.apply_domain(Schedule);
1001   NextScatt = NextScatt.apply_domain(AccessRelation);
1002 
1003   isl::set Deltas = NextScatt.deltas();
1004   return Deltas;
1005 }
1006 
1007 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1008   isl::set Stride, StrideX;
1009   bool IsStrideX;
1010 
1011   Stride = getStride(Schedule);
1012   StrideX = isl::set::universe(Stride.get_space());
1013   int Size = unsignedFromIslSize(StrideX.tuple_dim());
1014   for (auto i : seq<int>(0, Size - 1))
1015     StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1016   StrideX = StrideX.fix_si(isl::dim::set, Size - 1, StrideWidth);
1017   IsStrideX = Stride.is_subset(StrideX);
1018 
1019   return IsStrideX;
1020 }
1021 
1022 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1023   return isStrideX(Schedule, 0);
1024 }
1025 
1026 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1027   return isStrideX(Schedule, 1);
1028 }
1029 
1030 void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1031   AccessRelation = NewAccess;
1032 }
1033 
1034 void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1035   assert(!NewAccess.is_null());
1036 
1037 #ifndef NDEBUG
1038   // Check domain space compatibility.
1039   isl::space NewSpace = NewAccess.get_space();
1040   isl::space NewDomainSpace = NewSpace.domain();
1041   isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1042   assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1043 
1044   // Reads must be executed unconditionally. Writes might be executed in a
1045   // subdomain only.
1046   if (isRead()) {
1047     // Check whether there is an access for every statement instance.
1048     isl::set StmtDomain = getStatement()->getDomain();
1049     isl::set DefinedContext =
1050         getStatement()->getParent()->getBestKnownDefinedBehaviorContext();
1051     StmtDomain = StmtDomain.intersect_params(DefinedContext);
1052     isl::set NewDomain = NewAccess.domain();
1053     assert(!StmtDomain.is_subset(NewDomain).is_false() &&
1054            "Partial READ accesses not supported");
1055   }
1056 
1057   isl::space NewAccessSpace = NewAccess.get_space();
1058   assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1059          "Must specify the array that is accessed");
1060   isl::id NewArrayId = NewAccessSpace.get_tuple_id(isl::dim::set);
1061   auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1062   assert(SAI && "Must set a ScopArrayInfo");
1063 
1064   if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1065     InvariantEquivClassTy *EqClass =
1066         getStatement()->getParent()->lookupInvariantEquivClass(
1067             SAI->getBasePtr());
1068     assert(EqClass &&
1069            "Access functions to indirect arrays must have an invariant and "
1070            "hoisted base pointer");
1071   }
1072 
1073   // Check whether access dimensions correspond to number of dimensions of the
1074   // accesses array.
1075   unsigned Dims = SAI->getNumberOfDimensions();
1076   unsigned SpaceSize = unsignedFromIslSize(NewAccessSpace.dim(isl::dim::set));
1077   assert(SpaceSize == Dims && "Access dims must match array dims");
1078 #endif
1079 
1080   NewAccess = NewAccess.gist_params(getStatement()->getParent()->getContext());
1081   NewAccess = NewAccess.gist_domain(getStatement()->getDomain());
1082   NewAccessRelation = NewAccess;
1083 }
1084 
1085 bool MemoryAccess::isLatestPartialAccess() const {
1086   isl::set StmtDom = getStatement()->getDomain();
1087   isl::set AccDom = getLatestAccessRelation().domain();
1088 
1089   return !StmtDom.is_subset(AccDom);
1090 }
1091 
1092 //===----------------------------------------------------------------------===//
1093 
1094 isl::map ScopStmt::getSchedule() const {
1095   isl::set Domain = getDomain();
1096   if (Domain.is_empty())
1097     return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1098   auto Schedule = getParent()->getSchedule();
1099   if (Schedule.is_null())
1100     return {};
1101   Schedule = Schedule.intersect_domain(isl::union_set(Domain));
1102   if (Schedule.is_empty())
1103     return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1104   isl::map M = M.from_union_map(Schedule);
1105   M = M.coalesce();
1106   M = M.gist_domain(Domain);
1107   M = M.coalesce();
1108   return M;
1109 }
1110 
1111 void ScopStmt::restrictDomain(isl::set NewDomain) {
1112   assert(NewDomain.is_subset(Domain) &&
1113          "New domain is not a subset of old domain!");
1114   Domain = NewDomain;
1115 }
1116 
1117 void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1118   Instruction *AccessInst = Access->getAccessInstruction();
1119 
1120   if (Access->isArrayKind()) {
1121     MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1122     MAL.emplace_front(Access);
1123   } else if (Access->isValueKind() && Access->isWrite()) {
1124     Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1125     assert(!ValueWrites.lookup(AccessVal));
1126 
1127     ValueWrites[AccessVal] = Access;
1128   } else if (Access->isValueKind() && Access->isRead()) {
1129     Value *AccessVal = Access->getAccessValue();
1130     assert(!ValueReads.lookup(AccessVal));
1131 
1132     ValueReads[AccessVal] = Access;
1133   } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1134     PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1135     assert(!PHIWrites.lookup(PHI));
1136 
1137     PHIWrites[PHI] = Access;
1138   } else if (Access->isAnyPHIKind() && Access->isRead()) {
1139     PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1140     assert(!PHIReads.lookup(PHI));
1141 
1142     PHIReads[PHI] = Access;
1143   }
1144 
1145   if (Prepend) {
1146     MemAccs.insert(MemAccs.begin(), Access);
1147     return;
1148   }
1149   MemAccs.push_back(Access);
1150 }
1151 
1152 void ScopStmt::realignParams() {
1153   for (MemoryAccess *MA : *this)
1154     MA->realignParams();
1155 
1156   simplify(InvalidDomain);
1157   simplify(Domain);
1158 
1159   isl::set Ctx = Parent.getContext();
1160   InvalidDomain = InvalidDomain.gist_params(Ctx);
1161   Domain = Domain.gist_params(Ctx);
1162 
1163   // Predictable parameter order is required for JSON imports. Ensure alignment
1164   // by explicitly calling align_params.
1165   isl::space CtxSpace = Ctx.get_space();
1166   InvalidDomain = InvalidDomain.align_params(CtxSpace);
1167   Domain = Domain.align_params(CtxSpace);
1168 }
1169 
1170 ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1171                    Loop *SurroundingLoop,
1172                    std::vector<Instruction *> EntryBlockInstructions)
1173     : Parent(parent), InvalidDomain(), Domain(), R(&R), Build(), BaseName(Name),
1174       SurroundingLoop(SurroundingLoop), Instructions(EntryBlockInstructions) {}
1175 
1176 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1177                    Loop *SurroundingLoop,
1178                    std::vector<Instruction *> Instructions)
1179     : Parent(parent), InvalidDomain(), Domain(), BB(&bb), Build(),
1180       BaseName(Name), SurroundingLoop(SurroundingLoop),
1181       Instructions(Instructions) {}
1182 
1183 ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1184                    isl::set NewDomain)
1185     : Parent(parent), InvalidDomain(), Domain(NewDomain), Build() {
1186   BaseName = getIslCompatibleName("CopyStmt_", "",
1187                                   std::to_string(parent.getCopyStmtsNum()));
1188   isl::id Id = isl::id::alloc(getIslCtx(), getBaseName(), this);
1189   Domain = Domain.set_tuple_id(Id);
1190   TargetRel = TargetRel.set_tuple_id(isl::dim::in, Id);
1191   auto *Access =
1192       new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1193   parent.addAccessFunction(Access);
1194   addAccess(Access);
1195   SourceRel = SourceRel.set_tuple_id(isl::dim::in, Id);
1196   Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1197   parent.addAccessFunction(Access);
1198   addAccess(Access);
1199 }
1200 
1201 ScopStmt::~ScopStmt() = default;
1202 
1203 std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Domain); }
1204 
1205 std::string ScopStmt::getScheduleStr() const {
1206   return stringFromIslObj(getSchedule());
1207 }
1208 
1209 void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1210 
1211 BasicBlock *ScopStmt::getEntryBlock() const {
1212   if (isBlockStmt())
1213     return getBasicBlock();
1214   return getRegion()->getEntry();
1215 }
1216 
1217 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1218 
1219 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1220 
1221 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1222   return NestLoops[Dimension];
1223 }
1224 
1225 isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1226 
1227 isl::set ScopStmt::getDomain() const { return Domain; }
1228 
1229 isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1230 
1231 isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1232 
1233 void ScopStmt::printInstructions(raw_ostream &OS) const {
1234   OS << "Instructions {\n";
1235 
1236   for (Instruction *Inst : Instructions)
1237     OS.indent(16) << *Inst << "\n";
1238 
1239   OS.indent(12) << "}\n";
1240 }
1241 
1242 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1243   OS << "\t" << getBaseName() << "\n";
1244   OS.indent(12) << "Domain :=\n";
1245 
1246   if (!Domain.is_null()) {
1247     OS.indent(16) << getDomainStr() << ";\n";
1248   } else
1249     OS.indent(16) << "n/a\n";
1250 
1251   OS.indent(12) << "Schedule :=\n";
1252 
1253   if (!Domain.is_null()) {
1254     OS.indent(16) << getScheduleStr() << ";\n";
1255   } else
1256     OS.indent(16) << "n/a\n";
1257 
1258   for (MemoryAccess *Access : MemAccs)
1259     Access->print(OS);
1260 
1261   if (PrintInstructions)
1262     printInstructions(OS.indent(12));
1263 }
1264 
1265 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1266 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1267 #endif
1268 
1269 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1270   if (MA->isRead() && MA->isOriginalValueKind()) {
1271     bool Found = ValueReads.erase(MA->getAccessValue());
1272     (void)Found;
1273     assert(Found && "Expected access data not found");
1274   }
1275   if (MA->isWrite() && MA->isOriginalValueKind()) {
1276     bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1277     (void)Found;
1278     assert(Found && "Expected access data not found");
1279   }
1280   if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1281     bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1282     (void)Found;
1283     assert(Found && "Expected access data not found");
1284   }
1285   if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1286     bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1287     (void)Found;
1288     assert(Found && "Expected access data not found");
1289   }
1290 }
1291 
1292 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1293   // Remove the memory accesses from this statement together with all scalar
1294   // accesses that were caused by it. MemoryKind::Value READs have no access
1295   // instruction, hence would not be removed by this function. However, it is
1296   // only used for invariant LoadInst accesses, its arguments are always affine,
1297   // hence synthesizable, and therefore there are no MemoryKind::Value READ
1298   // accesses to be removed.
1299   auto Predicate = [&](MemoryAccess *Acc) {
1300     return Acc->getAccessInstruction() == MA->getAccessInstruction();
1301   };
1302   for (auto *MA : MemAccs) {
1303     if (Predicate(MA)) {
1304       removeAccessData(MA);
1305       Parent.removeAccessData(MA);
1306     }
1307   }
1308   llvm::erase_if(MemAccs, Predicate);
1309   InstructionToAccess.erase(MA->getAccessInstruction());
1310 }
1311 
1312 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1313   if (AfterHoisting) {
1314     auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
1315     assert(MAIt != MemAccs.end());
1316     MemAccs.erase(MAIt);
1317 
1318     removeAccessData(MA);
1319     Parent.removeAccessData(MA);
1320   }
1321 
1322   auto It = InstructionToAccess.find(MA->getAccessInstruction());
1323   if (It != InstructionToAccess.end()) {
1324     It->second.remove(MA);
1325     if (It->second.empty())
1326       InstructionToAccess.erase(MA->getAccessInstruction());
1327   }
1328 }
1329 
1330 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1331   MemoryAccess *Access = lookupInputAccessOf(V);
1332   if (Access)
1333     return Access;
1334 
1335   ScopArrayInfo *SAI =
1336       Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
1337   Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1338                             true, {}, {}, V, MemoryKind::Value);
1339   Parent.addAccessFunction(Access);
1340   Access->buildAccessRelation(SAI);
1341   addAccess(Access);
1342   Parent.addAccessData(Access);
1343   return Access;
1344 }
1345 
1346 raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1347   S.print(OS, PollyPrintInstructions);
1348   return OS;
1349 }
1350 
1351 //===----------------------------------------------------------------------===//
1352 /// Scop class implement
1353 
1354 void Scop::setContext(isl::set NewContext) {
1355   Context = NewContext.align_params(Context.get_space());
1356 }
1357 
1358 namespace {
1359 
1360 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1361 struct SCEVSensitiveParameterRewriter
1362     : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1363   const ValueToValueMap &VMap;
1364 
1365 public:
1366   SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1367                                  ScalarEvolution &SE)
1368       : SCEVRewriteVisitor(SE), VMap(VMap) {}
1369 
1370   static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1371                              const ValueToValueMap &VMap) {
1372     SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1373     return SSPR.visit(E);
1374   }
1375 
1376   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1377     auto *Start = visit(E->getStart());
1378     auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1379                                     visit(E->getStepRecurrence(SE)),
1380                                     E->getLoop(), SCEV::FlagAnyWrap);
1381     return SE.getAddExpr(Start, AddRec);
1382   }
1383 
1384   const SCEV *visitUnknown(const SCEVUnknown *E) {
1385     if (auto *NewValue = VMap.lookup(E->getValue()))
1386       return SE.getUnknown(NewValue);
1387     return E;
1388   }
1389 };
1390 
1391 /// Check whether we should remap a SCEV expression.
1392 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1393   const ValueToValueMap &VMap;
1394   bool FoundInside = false;
1395   const Scop *S;
1396 
1397 public:
1398   SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1399                      const Scop *S)
1400       : SCEVTraversal(*this), VMap(VMap), S(S) {}
1401 
1402   static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1403                          const ValueToValueMap &VMap, const Scop *S) {
1404     SCEVFindInsideScop SFIS(VMap, SE, S);
1405     SFIS.visitAll(E);
1406     return SFIS.FoundInside;
1407   }
1408 
1409   bool follow(const SCEV *E) {
1410     if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
1411       FoundInside |= S->getRegion().contains(AddRec->getLoop());
1412     } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
1413       if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
1414         FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
1415     }
1416     return !FoundInside;
1417   }
1418 
1419   bool isDone() { return FoundInside; }
1420 };
1421 } // end anonymous namespace
1422 
1423 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1424   // Check whether it makes sense to rewrite the SCEV.  (ScalarEvolution
1425   // doesn't like addition between an AddRec and an expression that
1426   // doesn't have a dominance relationship with it.)
1427   if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
1428     return E;
1429 
1430   // Rewrite SCEV.
1431   return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
1432 }
1433 
1434 void Scop::createParameterId(const SCEV *Parameter) {
1435   assert(Parameters.count(Parameter));
1436   assert(!ParameterIds.count(Parameter));
1437 
1438   std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
1439 
1440   if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
1441     Value *Val = ValueParameter->getValue();
1442 
1443     if (UseInstructionNames) {
1444       // If this parameter references a specific Value and this value has a name
1445       // we use this name as it is likely to be unique and more useful than just
1446       // a number.
1447       if (Val->hasName())
1448         ParameterName = Val->getName().str();
1449       else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1450         auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1451         if (LoadOrigin->hasName()) {
1452           ParameterName += "_loaded_from_";
1453           ParameterName +=
1454               LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1455         }
1456       }
1457     }
1458 
1459     ParameterName = getIslCompatibleName("", ParameterName, "");
1460   }
1461 
1462   isl::id Id = isl::id::alloc(getIslCtx(), ParameterName,
1463                               const_cast<void *>((const void *)Parameter));
1464   ParameterIds[Parameter] = Id;
1465 }
1466 
1467 void Scop::addParams(const ParameterSetTy &NewParameters) {
1468   for (const SCEV *Parameter : NewParameters) {
1469     // Normalize the SCEV to get the representing element for an invariant load.
1470     Parameter = extractConstantFactor(Parameter, *SE).second;
1471     Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1472 
1473     if (Parameters.insert(Parameter))
1474       createParameterId(Parameter);
1475   }
1476 }
1477 
1478 isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1479   // Normalize the SCEV to get the representing element for an invariant load.
1480   Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1481   return ParameterIds.lookup(Parameter);
1482 }
1483 
1484 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1485   return DT.dominates(BB, getEntry());
1486 }
1487 
1488 void Scop::buildContext() {
1489   isl::space Space = isl::space::params_alloc(getIslCtx(), 0);
1490   Context = isl::set::universe(Space);
1491   InvalidContext = isl::set::empty(Space);
1492   AssumedContext = isl::set::universe(Space);
1493   DefinedBehaviorContext = isl::set::universe(Space);
1494 }
1495 
1496 void Scop::addParameterBounds() {
1497   unsigned PDim = 0;
1498   for (auto *Parameter : Parameters) {
1499     ConstantRange SRange = SE->getSignedRange(Parameter);
1500     Context = addRangeBoundsToSet(Context, SRange, PDim++, isl::dim::param);
1501   }
1502   intersectDefinedBehavior(Context, AS_ASSUMPTION);
1503 }
1504 
1505 void Scop::realignParams() {
1506   if (PollyIgnoreParamBounds)
1507     return;
1508 
1509   // Add all parameters into a common model.
1510   isl::space Space = getFullParamSpace();
1511 
1512   // Align the parameters of all data structures to the model.
1513   Context = Context.align_params(Space);
1514   AssumedContext = AssumedContext.align_params(Space);
1515   InvalidContext = InvalidContext.align_params(Space);
1516 
1517   // As all parameters are known add bounds to them.
1518   addParameterBounds();
1519 
1520   for (ScopStmt &Stmt : *this)
1521     Stmt.realignParams();
1522   // Simplify the schedule according to the context too.
1523   Schedule = Schedule.gist_domain_params(getContext());
1524 
1525   // Predictable parameter order is required for JSON imports. Ensure alignment
1526   // by explicitly calling align_params.
1527   Schedule = Schedule.align_params(Space);
1528 }
1529 
1530 static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1531                                           const Scop &S) {
1532   // If we have modeled all blocks in the SCoP that have side effects we can
1533   // simplify the context with the constraints that are needed for anything to
1534   // be executed at all. However, if we have error blocks in the SCoP we already
1535   // assumed some parameter combinations cannot occur and removed them from the
1536   // domains, thus we cannot use the remaining domain to simplify the
1537   // assumptions.
1538   if (!S.hasErrorBlock()) {
1539     auto DomainParameters = S.getDomains().params();
1540     AssumptionContext = AssumptionContext.gist_params(DomainParameters);
1541   }
1542 
1543   AssumptionContext = AssumptionContext.gist_params(S.getContext());
1544   return AssumptionContext;
1545 }
1546 
1547 void Scop::simplifyContexts() {
1548   // The parameter constraints of the iteration domains give us a set of
1549   // constraints that need to hold for all cases where at least a single
1550   // statement iteration is executed in the whole scop. We now simplify the
1551   // assumed context under the assumption that such constraints hold and at
1552   // least a single statement iteration is executed. For cases where no
1553   // statement instances are executed, the assumptions we have taken about
1554   // the executed code do not matter and can be changed.
1555   //
1556   // WARNING: This only holds if the assumptions we have taken do not reduce
1557   //          the set of statement instances that are executed. Otherwise we
1558   //          may run into a case where the iteration domains suggest that
1559   //          for a certain set of parameter constraints no code is executed,
1560   //          but in the original program some computation would have been
1561   //          performed. In such a case, modifying the run-time conditions and
1562   //          possibly influencing the run-time check may cause certain scops
1563   //          to not be executed.
1564   //
1565   // Example:
1566   //
1567   //   When delinearizing the following code:
1568   //
1569   //     for (long i = 0; i < 100; i++)
1570   //       for (long j = 0; j < m; j++)
1571   //         A[i+p][j] = 1.0;
1572   //
1573   //   we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1574   //   otherwise we would access out of bound data. Now, knowing that code is
1575   //   only executed for the case m >= 0, it is sufficient to assume p >= 0.
1576   AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
1577   InvalidContext = InvalidContext.align_params(getParamSpace());
1578   simplify(DefinedBehaviorContext);
1579   DefinedBehaviorContext = DefinedBehaviorContext.align_params(getParamSpace());
1580 }
1581 
1582 isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1583   return getDomainConditions(Stmt->getEntryBlock());
1584 }
1585 
1586 isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1587   auto DIt = DomainMap.find(BB);
1588   if (DIt != DomainMap.end())
1589     return DIt->getSecond();
1590 
1591   auto &RI = *R.getRegionInfo();
1592   auto *BBR = RI.getRegionFor(BB);
1593   while (BBR->getEntry() == BB)
1594     BBR = BBR->getParent();
1595   return getDomainConditions(BBR->getEntry());
1596 }
1597 
1598 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1599            DominatorTree &DT, ScopDetection::DetectionContext &DC,
1600            OptimizationRemarkEmitter &ORE, int ID)
1601     : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1602       R(R), name(None), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1603       ORE(ORE), Affinator(this, LI), ID(ID) {
1604 
1605   // Options defaults that are different from ISL's.
1606   isl_options_set_schedule_serialize_sccs(IslCtx.get(), true);
1607 
1608   SmallVector<char *, 8> IslArgv;
1609   IslArgv.reserve(1 + IslArgs.size());
1610 
1611   // Substitute for program name.
1612   IslArgv.push_back(const_cast<char *>("-polly-isl-arg"));
1613 
1614   for (std::string &Arg : IslArgs)
1615     IslArgv.push_back(const_cast<char *>(Arg.c_str()));
1616 
1617   // Abort if unknown argument is passed.
1618   // Note that "-V" (print isl version) will always call exit(0), so we cannot
1619   // avoid ISL aborting the program at this point.
1620   unsigned IslParseFlags = ISL_ARG_ALL;
1621 
1622   isl_ctx_parse_options(IslCtx.get(), IslArgv.size(), IslArgv.data(),
1623                         IslParseFlags);
1624 
1625   if (IslOnErrorAbort)
1626     isl_options_set_on_error(getIslCtx().get(), ISL_ON_ERROR_ABORT);
1627   buildContext();
1628 }
1629 
1630 Scop::~Scop() = default;
1631 
1632 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1633   for (Instruction *Inst : Stmt.getInstructions())
1634     InstStmtMap.erase(Inst);
1635 
1636   if (Stmt.isRegionStmt()) {
1637     for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1638       StmtMap.erase(BB);
1639       // Skip entry basic block, as its instructions are already deleted as
1640       // part of the statement's instruction list.
1641       if (BB == Stmt.getEntryBlock())
1642         continue;
1643       for (Instruction &Inst : *BB)
1644         InstStmtMap.erase(&Inst);
1645     }
1646   } else {
1647     auto StmtMapIt = StmtMap.find(Stmt.getBasicBlock());
1648     if (StmtMapIt != StmtMap.end())
1649       StmtMapIt->second.erase(std::remove(StmtMapIt->second.begin(),
1650                                           StmtMapIt->second.end(), &Stmt),
1651                               StmtMapIt->second.end());
1652     for (Instruction *Inst : Stmt.getInstructions())
1653       InstStmtMap.erase(Inst);
1654   }
1655 }
1656 
1657 void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1658                        bool AfterHoisting) {
1659   for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1660     if (!ShouldDelete(*StmtIt)) {
1661       StmtIt++;
1662       continue;
1663     }
1664 
1665     // Start with removing all of the statement's accesses including erasing it
1666     // from all maps that are pointing to them.
1667     // Make a temporary copy because removing MAs invalidates the iterator.
1668     SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1669     for (MemoryAccess *MA : MAList)
1670       StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1671 
1672     removeFromStmtMap(*StmtIt);
1673     StmtIt = Stmts.erase(StmtIt);
1674   }
1675 }
1676 
1677 void Scop::removeStmtNotInDomainMap() {
1678   removeStmts([this](ScopStmt &Stmt) -> bool {
1679     isl::set Domain = DomainMap.lookup(Stmt.getEntryBlock());
1680     if (Domain.is_null())
1681       return true;
1682     return Domain.is_empty();
1683   });
1684 }
1685 
1686 void Scop::simplifySCoP(bool AfterHoisting) {
1687   removeStmts(
1688       [AfterHoisting](ScopStmt &Stmt) -> bool {
1689         // Never delete statements that contain calls to debug functions.
1690         if (hasDebugCall(&Stmt))
1691           return false;
1692 
1693         bool RemoveStmt = Stmt.isEmpty();
1694 
1695         // Remove read only statements only after invariant load hoisting.
1696         if (!RemoveStmt && AfterHoisting) {
1697           bool OnlyRead = true;
1698           for (MemoryAccess *MA : Stmt) {
1699             if (MA->isRead())
1700               continue;
1701 
1702             OnlyRead = false;
1703             break;
1704           }
1705 
1706           RemoveStmt = OnlyRead;
1707         }
1708         return RemoveStmt;
1709       },
1710       AfterHoisting);
1711 }
1712 
1713 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1714   LoadInst *LInst = dyn_cast<LoadInst>(Val);
1715   if (!LInst)
1716     return nullptr;
1717 
1718   if (Value *Rep = InvEquivClassVMap.lookup(LInst))
1719     LInst = cast<LoadInst>(Rep);
1720 
1721   Type *Ty = LInst->getType();
1722   const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
1723   for (auto &IAClass : InvariantEquivClasses) {
1724     if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1725       continue;
1726 
1727     auto &MAs = IAClass.InvariantAccesses;
1728     for (auto *MA : MAs)
1729       if (MA->getAccessInstruction() == Val)
1730         return &IAClass;
1731   }
1732 
1733   return nullptr;
1734 }
1735 
1736 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1737                                               ArrayRef<const SCEV *> Sizes,
1738                                               MemoryKind Kind,
1739                                               const char *BaseName) {
1740   assert((BasePtr || BaseName) &&
1741          "BasePtr and BaseName can not be nullptr at the same time.");
1742   assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1743   auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
1744                       : ScopArrayNameMap[BaseName];
1745   if (!SAI) {
1746     auto &DL = getFunction().getParent()->getDataLayout();
1747     SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1748                                 DL, this, BaseName));
1749     ScopArrayInfoSet.insert(SAI.get());
1750   } else {
1751     SAI->updateElementType(ElementType);
1752     // In case of mismatching array sizes, we bail out by setting the run-time
1753     // context to false.
1754     if (!SAI->updateSizes(Sizes))
1755       invalidate(DELINEARIZATION, DebugLoc());
1756   }
1757   return SAI.get();
1758 }
1759 
1760 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1761                                          const std::string &BaseName,
1762                                          const std::vector<unsigned> &Sizes) {
1763   auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
1764   std::vector<const SCEV *> SCEVSizes;
1765 
1766   for (auto size : Sizes)
1767     if (size)
1768       SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
1769     else
1770       SCEVSizes.push_back(nullptr);
1771 
1772   auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
1773                                        MemoryKind::Array, BaseName.c_str());
1774   return SAI;
1775 }
1776 
1777 ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1778   auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
1779   return SAI;
1780 }
1781 
1782 ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1783   auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1784   assert(SAI && "No ScopArrayInfo available for this base pointer");
1785   return SAI;
1786 }
1787 
1788 std::string Scop::getContextStr() const {
1789   return stringFromIslObj(getContext());
1790 }
1791 
1792 std::string Scop::getAssumedContextStr() const {
1793   assert(!AssumedContext.is_null() && "Assumed context not yet built");
1794   return stringFromIslObj(AssumedContext);
1795 }
1796 
1797 std::string Scop::getInvalidContextStr() const {
1798   return stringFromIslObj(InvalidContext);
1799 }
1800 
1801 std::string Scop::getNameStr() const {
1802   std::string ExitName, EntryName;
1803   std::tie(EntryName, ExitName) = getEntryExitStr();
1804   return EntryName + "---" + ExitName;
1805 }
1806 
1807 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1808   std::string ExitName, EntryName;
1809   raw_string_ostream ExitStr(ExitName);
1810   raw_string_ostream EntryStr(EntryName);
1811 
1812   R.getEntry()->printAsOperand(EntryStr, false);
1813   EntryStr.str();
1814 
1815   if (R.getExit()) {
1816     R.getExit()->printAsOperand(ExitStr, false);
1817     ExitStr.str();
1818   } else
1819     ExitName = "FunctionExit";
1820 
1821   return std::make_pair(EntryName, ExitName);
1822 }
1823 
1824 isl::set Scop::getContext() const { return Context; }
1825 
1826 isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1827 
1828 isl::space Scop::getFullParamSpace() const {
1829 
1830   isl::space Space = isl::space::params_alloc(getIslCtx(), ParameterIds.size());
1831 
1832   unsigned PDim = 0;
1833   for (const SCEV *Parameter : Parameters) {
1834     isl::id Id = getIdForParam(Parameter);
1835     Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1836   }
1837 
1838   return Space;
1839 }
1840 
1841 isl::set Scop::getAssumedContext() const {
1842   assert(!AssumedContext.is_null() && "Assumed context not yet built");
1843   return AssumedContext;
1844 }
1845 
1846 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1847   if (PollyProcessUnprofitable)
1848     return true;
1849 
1850   if (isEmpty())
1851     return false;
1852 
1853   unsigned OptimizableStmtsOrLoops = 0;
1854   for (auto &Stmt : *this) {
1855     if (Stmt.getNumIterators() == 0)
1856       continue;
1857 
1858     bool ContainsArrayAccs = false;
1859     bool ContainsScalarAccs = false;
1860     for (auto *MA : Stmt) {
1861       if (MA->isRead())
1862         continue;
1863       ContainsArrayAccs |= MA->isLatestArrayKind();
1864       ContainsScalarAccs |= MA->isLatestScalarKind();
1865     }
1866 
1867     if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1868       OptimizableStmtsOrLoops += Stmt.getNumIterators();
1869   }
1870 
1871   return OptimizableStmtsOrLoops > 1;
1872 }
1873 
1874 bool Scop::hasFeasibleRuntimeContext() const {
1875   if (Stmts.empty())
1876     return false;
1877 
1878   isl::set PositiveContext = getAssumedContext();
1879   isl::set NegativeContext = getInvalidContext();
1880   PositiveContext = PositiveContext.intersect_params(Context);
1881   PositiveContext = PositiveContext.intersect_params(getDomains().params());
1882   return PositiveContext.is_empty().is_false() &&
1883          PositiveContext.is_subset(NegativeContext).is_false();
1884 }
1885 
1886 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
1887   Value *PointerBase = MA->getOriginalBaseAddr();
1888 
1889   auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
1890   if (!PointerBaseInst)
1891     return nullptr;
1892 
1893   auto *BasePtrStmt = getStmtFor(PointerBaseInst);
1894   if (!BasePtrStmt)
1895     return nullptr;
1896 
1897   return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
1898 }
1899 
1900 static std::string toString(AssumptionKind Kind) {
1901   switch (Kind) {
1902   case ALIASING:
1903     return "No-aliasing";
1904   case INBOUNDS:
1905     return "Inbounds";
1906   case WRAPPING:
1907     return "No-overflows";
1908   case UNSIGNED:
1909     return "Signed-unsigned";
1910   case COMPLEXITY:
1911     return "Low complexity";
1912   case PROFITABLE:
1913     return "Profitable";
1914   case ERRORBLOCK:
1915     return "No-error";
1916   case INFINITELOOP:
1917     return "Finite loop";
1918   case INVARIANTLOAD:
1919     return "Invariant load";
1920   case DELINEARIZATION:
1921     return "Delinearization";
1922   }
1923   llvm_unreachable("Unknown AssumptionKind!");
1924 }
1925 
1926 bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
1927   if (Sign == AS_ASSUMPTION) {
1928     if (Context.is_subset(Set))
1929       return false;
1930 
1931     if (AssumedContext.is_subset(Set))
1932       return false;
1933   } else {
1934     if (Set.is_disjoint(Context))
1935       return false;
1936 
1937     if (Set.is_subset(InvalidContext))
1938       return false;
1939   }
1940   return true;
1941 }
1942 
1943 bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
1944                            AssumptionSign Sign, BasicBlock *BB) {
1945   if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
1946     return false;
1947 
1948   // Do never emit trivial assumptions as they only clutter the output.
1949   if (!PollyRemarksMinimal) {
1950     isl::set Univ;
1951     if (Sign == AS_ASSUMPTION)
1952       Univ = isl::set::universe(Set.get_space());
1953 
1954     bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
1955                      (Sign == AS_ASSUMPTION && Univ.is_equal(Set));
1956 
1957     if (IsTrivial)
1958       return false;
1959   }
1960 
1961   switch (Kind) {
1962   case ALIASING:
1963     AssumptionsAliasing++;
1964     break;
1965   case INBOUNDS:
1966     AssumptionsInbounds++;
1967     break;
1968   case WRAPPING:
1969     AssumptionsWrapping++;
1970     break;
1971   case UNSIGNED:
1972     AssumptionsUnsigned++;
1973     break;
1974   case COMPLEXITY:
1975     AssumptionsComplexity++;
1976     break;
1977   case PROFITABLE:
1978     AssumptionsUnprofitable++;
1979     break;
1980   case ERRORBLOCK:
1981     AssumptionsErrorBlock++;
1982     break;
1983   case INFINITELOOP:
1984     AssumptionsInfiniteLoop++;
1985     break;
1986   case INVARIANTLOAD:
1987     AssumptionsInvariantLoad++;
1988     break;
1989   case DELINEARIZATION:
1990     AssumptionsDelinearization++;
1991     break;
1992   }
1993 
1994   auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
1995   std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Set);
1996   if (BB)
1997     ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
1998              << Msg);
1999   else
2000     ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2001                                         R.getEntry())
2002              << Msg);
2003   return true;
2004 }
2005 
2006 void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2007                          AssumptionSign Sign, BasicBlock *BB,
2008                          bool RequiresRTC) {
2009   // Simplify the assumptions/restrictions first.
2010   Set = Set.gist_params(getContext());
2011   intersectDefinedBehavior(Set, Sign);
2012 
2013   if (!RequiresRTC)
2014     return;
2015 
2016   if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2017     return;
2018 
2019   if (Sign == AS_ASSUMPTION)
2020     AssumedContext = AssumedContext.intersect(Set).coalesce();
2021   else
2022     InvalidContext = InvalidContext.unite(Set).coalesce();
2023 }
2024 
2025 void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
2026   if (DefinedBehaviorContext.is_null())
2027     return;
2028 
2029   if (Sign == AS_ASSUMPTION)
2030     DefinedBehaviorContext = DefinedBehaviorContext.intersect(Set);
2031   else
2032     DefinedBehaviorContext = DefinedBehaviorContext.subtract(Set);
2033 
2034   // Limit the complexity of the context. If complexity is exceeded, simplify
2035   // the set and check again.
2036   if (DefinedBehaviorContext.n_basic_set().release() >
2037       MaxDisjunktsInDefinedBehaviourContext) {
2038     simplify(DefinedBehaviorContext);
2039     if (DefinedBehaviorContext.n_basic_set().release() >
2040         MaxDisjunktsInDefinedBehaviourContext)
2041       DefinedBehaviorContext = {};
2042   }
2043 }
2044 
2045 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2046   LLVM_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2047   addAssumption(Kind, isl::set::empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
2048 }
2049 
2050 isl::set Scop::getInvalidContext() const { return InvalidContext; }
2051 
2052 void Scop::printContext(raw_ostream &OS) const {
2053   OS << "Context:\n";
2054   OS.indent(4) << Context << "\n";
2055 
2056   OS.indent(4) << "Assumed Context:\n";
2057   OS.indent(4) << AssumedContext << "\n";
2058 
2059   OS.indent(4) << "Invalid Context:\n";
2060   OS.indent(4) << InvalidContext << "\n";
2061 
2062   OS.indent(4) << "Defined Behavior Context:\n";
2063   if (!DefinedBehaviorContext.is_null())
2064     OS.indent(4) << DefinedBehaviorContext << "\n";
2065   else
2066     OS.indent(4) << "<unavailable>\n";
2067 
2068   unsigned Dim = 0;
2069   for (const SCEV *Parameter : Parameters)
2070     OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
2071 }
2072 
2073 void Scop::printAliasAssumptions(raw_ostream &OS) const {
2074   int noOfGroups = 0;
2075   for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2076     if (Pair.second.size() == 0)
2077       noOfGroups += 1;
2078     else
2079       noOfGroups += Pair.second.size();
2080   }
2081 
2082   OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
2083   if (MinMaxAliasGroups.empty()) {
2084     OS.indent(8) << "n/a\n";
2085     return;
2086   }
2087 
2088   for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2089 
2090     // If the group has no read only accesses print the write accesses.
2091     if (Pair.second.empty()) {
2092       OS.indent(8) << "[[";
2093       for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2094         OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2095            << ">";
2096       }
2097       OS << " ]]\n";
2098     }
2099 
2100     for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2101       OS.indent(8) << "[[";
2102       OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2103       for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2104         OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2105            << ">";
2106       }
2107       OS << " ]]\n";
2108     }
2109   }
2110 }
2111 
2112 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2113   OS << "Statements {\n";
2114 
2115   for (const ScopStmt &Stmt : *this) {
2116     OS.indent(4);
2117     Stmt.print(OS, PrintInstructions);
2118   }
2119 
2120   OS.indent(4) << "}\n";
2121 }
2122 
2123 void Scop::printArrayInfo(raw_ostream &OS) const {
2124   OS << "Arrays {\n";
2125 
2126   for (auto &Array : arrays())
2127     Array->print(OS);
2128 
2129   OS.indent(4) << "}\n";
2130 
2131   OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
2132 
2133   for (auto &Array : arrays())
2134     Array->print(OS, /* SizeAsPwAff */ true);
2135 
2136   OS.indent(4) << "}\n";
2137 }
2138 
2139 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2140   OS.indent(4) << "Function: " << getFunction().getName() << "\n";
2141   OS.indent(4) << "Region: " << getNameStr() << "\n";
2142   OS.indent(4) << "Max Loop Depth:  " << getMaxLoopDepth() << "\n";
2143   OS.indent(4) << "Invariant Accesses: {\n";
2144   for (const auto &IAClass : InvariantEquivClasses) {
2145     const auto &MAs = IAClass.InvariantAccesses;
2146     if (MAs.empty()) {
2147       OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2148     } else {
2149       MAs.front()->print(OS);
2150       OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
2151                     << "\n";
2152     }
2153   }
2154   OS.indent(4) << "}\n";
2155   printContext(OS.indent(4));
2156   printArrayInfo(OS.indent(4));
2157   printAliasAssumptions(OS);
2158   printStatements(OS.indent(4), PrintInstructions);
2159 }
2160 
2161 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2162 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
2163 #endif
2164 
2165 isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2166 
2167 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2168                                  bool NonNegative,
2169                                  RecordedAssumptionsTy *RecordedAssumptions) {
2170   // First try to use the SCEVAffinator to generate a piecewise defined
2171   // affine function from @p E in the context of @p BB. If that tasks becomes to
2172   // complex the affinator might return a nullptr. In such a case we invalidate
2173   // the SCoP and return a dummy value. This way we do not need to add error
2174   // handling code to all users of this function.
2175   auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2176   if (!PWAC.first.is_null()) {
2177     // TODO: We could use a heuristic and either use:
2178     //         SCEVAffinator::takeNonNegativeAssumption
2179     //       or
2180     //         SCEVAffinator::interpretAsUnsigned
2181     //       to deal with unsigned or "NonNegative" SCEVs.
2182     if (NonNegative)
2183       Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2184     return PWAC;
2185   }
2186 
2187   auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2188   invalidate(COMPLEXITY, DL, BB);
2189   return Affinator.getPwAff(SE->getZero(E->getType()), BB, RecordedAssumptions);
2190 }
2191 
2192 isl::union_set Scop::getDomains() const {
2193   isl_space *EmptySpace = isl_space_params_alloc(getIslCtx().get(), 0);
2194   isl_union_set *Domain = isl_union_set_empty(EmptySpace);
2195 
2196   for (const ScopStmt &Stmt : *this)
2197     Domain = isl_union_set_add_set(Domain, Stmt.getDomain().release());
2198 
2199   return isl::manage(Domain);
2200 }
2201 
2202 isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2203                                RecordedAssumptionsTy *RecordedAssumptions) {
2204   PWACtx PWAC = getPwAff(E, BB, RecordedAssumptions);
2205   return PWAC.first;
2206 }
2207 
2208 isl::union_map
2209 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2210   isl::union_map Accesses = isl::union_map::empty(getIslCtx());
2211 
2212   for (ScopStmt &Stmt : *this) {
2213     for (MemoryAccess *MA : Stmt) {
2214       if (!Predicate(*MA))
2215         continue;
2216 
2217       isl::set Domain = Stmt.getDomain();
2218       isl::map AccessDomain = MA->getAccessRelation();
2219       AccessDomain = AccessDomain.intersect_domain(Domain);
2220       Accesses = Accesses.unite(AccessDomain);
2221     }
2222   }
2223 
2224   return Accesses.coalesce();
2225 }
2226 
2227 isl::union_map Scop::getMustWrites() {
2228   return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
2229 }
2230 
2231 isl::union_map Scop::getMayWrites() {
2232   return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
2233 }
2234 
2235 isl::union_map Scop::getWrites() {
2236   return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
2237 }
2238 
2239 isl::union_map Scop::getReads() {
2240   return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
2241 }
2242 
2243 isl::union_map Scop::getAccesses() {
2244   return getAccessesOfType([](MemoryAccess &MA) { return true; });
2245 }
2246 
2247 isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2248   return getAccessesOfType(
2249       [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2250 }
2251 
2252 isl::union_map Scop::getSchedule() const {
2253   auto Tree = getScheduleTree();
2254   return Tree.get_map();
2255 }
2256 
2257 isl::schedule Scop::getScheduleTree() const {
2258   return Schedule.intersect_domain(getDomains());
2259 }
2260 
2261 void Scop::setSchedule(isl::union_map NewSchedule) {
2262   auto S = isl::schedule::from_domain(getDomains());
2263   Schedule = S.insert_partial_schedule(
2264       isl::multi_union_pw_aff::from_union_map(NewSchedule));
2265   ScheduleModified = true;
2266 }
2267 
2268 void Scop::setScheduleTree(isl::schedule NewSchedule) {
2269   Schedule = NewSchedule;
2270   ScheduleModified = true;
2271 }
2272 
2273 bool Scop::restrictDomains(isl::union_set Domain) {
2274   bool Changed = false;
2275   for (ScopStmt &Stmt : *this) {
2276     isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2277     isl::union_set NewStmtDomain = StmtDomain.intersect(Domain);
2278 
2279     if (StmtDomain.is_subset(NewStmtDomain))
2280       continue;
2281 
2282     Changed = true;
2283 
2284     NewStmtDomain = NewStmtDomain.coalesce();
2285 
2286     if (NewStmtDomain.is_empty())
2287       Stmt.restrictDomain(isl::set::empty(Stmt.getDomainSpace()));
2288     else
2289       Stmt.restrictDomain(isl::set(NewStmtDomain));
2290   }
2291   return Changed;
2292 }
2293 
2294 ScalarEvolution *Scop::getSE() const { return SE; }
2295 
2296 void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2297                        std::vector<Instruction *> Instructions) {
2298   assert(BB && "Unexpected nullptr!");
2299   Stmts.emplace_back(*this, *BB, Name, SurroundingLoop, Instructions);
2300   auto *Stmt = &Stmts.back();
2301   StmtMap[BB].push_back(Stmt);
2302   for (Instruction *Inst : Instructions) {
2303     assert(!InstStmtMap.count(Inst) &&
2304            "Unexpected statement corresponding to the instruction.");
2305     InstStmtMap[Inst] = Stmt;
2306   }
2307 }
2308 
2309 void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2310                        std::vector<Instruction *> Instructions) {
2311   assert(R && "Unexpected nullptr!");
2312   Stmts.emplace_back(*this, *R, Name, SurroundingLoop, Instructions);
2313   auto *Stmt = &Stmts.back();
2314 
2315   for (Instruction *Inst : Instructions) {
2316     assert(!InstStmtMap.count(Inst) &&
2317            "Unexpected statement corresponding to the instruction.");
2318     InstStmtMap[Inst] = Stmt;
2319   }
2320 
2321   for (BasicBlock *BB : R->blocks()) {
2322     StmtMap[BB].push_back(Stmt);
2323     if (BB == R->getEntry())
2324       continue;
2325     for (Instruction &Inst : *BB) {
2326       assert(!InstStmtMap.count(&Inst) &&
2327              "Unexpected statement corresponding to the instruction.");
2328       InstStmtMap[&Inst] = Stmt;
2329     }
2330   }
2331 }
2332 
2333 ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2334                             isl::set Domain) {
2335 #ifndef NDEBUG
2336   isl::set SourceDomain = SourceRel.domain();
2337   isl::set TargetDomain = TargetRel.domain();
2338   assert(Domain.is_subset(TargetDomain) &&
2339          "Target access not defined for complete statement domain");
2340   assert(Domain.is_subset(SourceDomain) &&
2341          "Source access not defined for complete statement domain");
2342 #endif
2343   Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
2344   CopyStmtsNum++;
2345   return &(Stmts.back());
2346 }
2347 
2348 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2349   auto StmtMapIt = StmtMap.find(BB);
2350   if (StmtMapIt == StmtMap.end())
2351     return {};
2352   return StmtMapIt->second;
2353 }
2354 
2355 ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2356   auto *PHI = cast<PHINode>(U.getUser());
2357   BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2358 
2359   // If the value is a non-synthesizable from the incoming block, use the
2360   // statement that contains it as user statement.
2361   if (auto *IncomingInst = dyn_cast<Instruction>(U.get())) {
2362     if (IncomingInst->getParent() == IncomingBB) {
2363       if (ScopStmt *IncomingStmt = getStmtFor(IncomingInst))
2364         return IncomingStmt;
2365     }
2366   }
2367 
2368   // Otherwise, use the epilogue/last statement.
2369   return getLastStmtFor(IncomingBB);
2370 }
2371 
2372 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2373   ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2374   if (!StmtList.empty())
2375     return StmtList.back();
2376   return nullptr;
2377 }
2378 
2379 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2380   if (RN->isSubRegion())
2381     return getStmtListFor(RN->getNodeAs<Region>());
2382   return getStmtListFor(RN->getNodeAs<BasicBlock>());
2383 }
2384 
2385 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2386   return getStmtListFor(R->getEntry());
2387 }
2388 
2389 int Scop::getRelativeLoopDepth(const Loop *L) const {
2390   if (!L || !R.contains(L))
2391     return -1;
2392   // outermostLoopInRegion always returns nullptr for top level regions
2393   if (R.isTopLevelRegion()) {
2394     // LoopInfo's depths start at 1, we start at 0
2395     return L->getLoopDepth() - 1;
2396   } else {
2397     Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
2398     assert(OuterLoop);
2399     return L->getLoopDepth() - OuterLoop->getLoopDepth();
2400   }
2401 }
2402 
2403 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2404   for (auto &SAI : arrays()) {
2405     if (SAI->getName() == BaseName)
2406       return SAI;
2407   }
2408   return nullptr;
2409 }
2410 
2411 void Scop::addAccessData(MemoryAccess *Access) {
2412   const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2413   assert(SAI && "can only use after access relations have been constructed");
2414 
2415   if (Access->isOriginalValueKind() && Access->isRead())
2416     ValueUseAccs[SAI].push_back(Access);
2417   else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2418     PHIIncomingAccs[SAI].push_back(Access);
2419 }
2420 
2421 void Scop::removeAccessData(MemoryAccess *Access) {
2422   if (Access->isOriginalValueKind() && Access->isWrite()) {
2423     ValueDefAccs.erase(Access->getAccessValue());
2424   } else if (Access->isOriginalValueKind() && Access->isRead()) {
2425     auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2426     auto NewEnd = std::remove(Uses.begin(), Uses.end(), Access);
2427     Uses.erase(NewEnd, Uses.end());
2428   } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2429     PHINode *PHI = cast<PHINode>(Access->getAccessInstruction());
2430     PHIReadAccs.erase(PHI);
2431   } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2432     auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2433     auto NewEnd = std::remove(Incomings.begin(), Incomings.end(), Access);
2434     Incomings.erase(NewEnd, Incomings.end());
2435   }
2436 }
2437 
2438 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2439   assert(SAI->isValueKind());
2440 
2441   Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
2442   if (!Val)
2443     return nullptr;
2444 
2445   return ValueDefAccs.lookup(Val);
2446 }
2447 
2448 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2449   assert(SAI->isValueKind());
2450   auto It = ValueUseAccs.find(SAI);
2451   if (It == ValueUseAccs.end())
2452     return {};
2453   return It->second;
2454 }
2455 
2456 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2457   assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2458 
2459   if (SAI->isExitPHIKind())
2460     return nullptr;
2461 
2462   PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
2463   return PHIReadAccs.lookup(PHI);
2464 }
2465 
2466 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2467   assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2468   auto It = PHIIncomingAccs.find(SAI);
2469   if (It == PHIIncomingAccs.end())
2470     return {};
2471   return It->second;
2472 }
2473 
2474 bool Scop::isEscaping(Instruction *Inst) {
2475   assert(contains(Inst) && "The concept of escaping makes only sense for "
2476                            "values defined inside the SCoP");
2477 
2478   for (Use &Use : Inst->uses()) {
2479     BasicBlock *UserBB = getUseBlock(Use);
2480     if (!contains(UserBB))
2481       return true;
2482 
2483     // When the SCoP region exit needs to be simplified, PHIs in the region exit
2484     // move to a new basic block such that its incoming blocks are not in the
2485     // SCoP anymore.
2486     if (hasSingleExitEdge() && isa<PHINode>(Use.getUser()) &&
2487         isExit(cast<PHINode>(Use.getUser())->getParent()))
2488       return true;
2489   }
2490   return false;
2491 }
2492 
2493 void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2494   AssumptionsAliasing += step;
2495 }
2496 
2497 Scop::ScopStatistics Scop::getStatistics() const {
2498   ScopStatistics Result;
2499 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2500   auto LoopStat = ScopDetection::countBeneficialLoops(&R, *SE, *getLI(), 0);
2501 
2502   int NumTotalLoops = LoopStat.NumLoops;
2503   Result.NumBoxedLoops = getBoxedLoops().size();
2504   Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2505 
2506   for (const ScopStmt &Stmt : *this) {
2507     isl::set Domain = Stmt.getDomain().intersect_params(getContext());
2508     bool IsInLoop = Stmt.getNumIterators() >= 1;
2509     for (MemoryAccess *MA : Stmt) {
2510       if (!MA->isWrite())
2511         continue;
2512 
2513       if (MA->isLatestValueKind()) {
2514         Result.NumValueWrites += 1;
2515         if (IsInLoop)
2516           Result.NumValueWritesInLoops += 1;
2517       }
2518 
2519       if (MA->isLatestAnyPHIKind()) {
2520         Result.NumPHIWrites += 1;
2521         if (IsInLoop)
2522           Result.NumPHIWritesInLoops += 1;
2523       }
2524 
2525       isl::set AccSet =
2526           MA->getAccessRelation().intersect_domain(Domain).range();
2527       if (AccSet.is_singleton()) {
2528         Result.NumSingletonWrites += 1;
2529         if (IsInLoop)
2530           Result.NumSingletonWritesInLoops += 1;
2531       }
2532     }
2533   }
2534 #endif
2535   return Result;
2536 }
2537 
2538 raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2539   scop.print(OS, PollyPrintInstructions);
2540   return OS;
2541 }
2542 
2543 //===----------------------------------------------------------------------===//
2544 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2545   AU.addRequired<LoopInfoWrapperPass>();
2546   AU.addRequired<RegionInfoPass>();
2547   AU.addRequired<DominatorTreeWrapperPass>();
2548   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2549   AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2550   AU.addRequired<AAResultsWrapperPass>();
2551   AU.addRequired<AssumptionCacheTracker>();
2552   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2553   AU.setPreservesAll();
2554 }
2555 
2556 void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2557                               Scop::ScopStatistics ScopStats) {
2558   assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2559 
2560   NumScops++;
2561   NumLoopsInScop += Stats.NumLoops;
2562   MaxNumLoopsInScop =
2563       std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
2564 
2565   if (Stats.MaxDepth == 0)
2566     NumScopsDepthZero++;
2567   else if (Stats.MaxDepth == 1)
2568     NumScopsDepthOne++;
2569   else if (Stats.MaxDepth == 2)
2570     NumScopsDepthTwo++;
2571   else if (Stats.MaxDepth == 3)
2572     NumScopsDepthThree++;
2573   else if (Stats.MaxDepth == 4)
2574     NumScopsDepthFour++;
2575   else if (Stats.MaxDepth == 5)
2576     NumScopsDepthFive++;
2577   else
2578     NumScopsDepthLarger++;
2579 
2580   NumAffineLoops += ScopStats.NumAffineLoops;
2581   NumBoxedLoops += ScopStats.NumBoxedLoops;
2582 
2583   NumValueWrites += ScopStats.NumValueWrites;
2584   NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2585   NumPHIWrites += ScopStats.NumPHIWrites;
2586   NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2587   NumSingletonWrites += ScopStats.NumSingletonWrites;
2588   NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2589 }
2590 
2591 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2592   auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2593 
2594   if (!SD.isMaxRegionInScop(*R))
2595     return false;
2596 
2597   Function *F = R->getEntry()->getParent();
2598   auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2599   auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2600   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2601   auto const &DL = F->getParent()->getDataLayout();
2602   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2603   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
2604   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2605 
2606   ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2607   S = SB.getScop(); // take ownership of scop object
2608 
2609 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2610   if (S) {
2611     ScopDetection::LoopStats Stats =
2612         ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2613     updateLoopCountStatistic(Stats, S->getStatistics());
2614   }
2615 #endif
2616 
2617   return false;
2618 }
2619 
2620 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2621   if (S)
2622     S->print(OS, PollyPrintInstructions);
2623   else
2624     OS << "Invalid Scop!\n";
2625 }
2626 
2627 char ScopInfoRegionPass::ID = 0;
2628 
2629 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2630 
2631 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2632                       "Polly - Create polyhedral description of Scops", false,
2633                       false);
2634 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2635 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2636 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2637 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2638 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2639 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2640 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2641 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2642                     "Polly - Create polyhedral description of Scops", false,
2643                     false)
2644 
2645 //===----------------------------------------------------------------------===//
2646 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2647                    LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2648                    AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2649     : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2650   recompute();
2651 }
2652 
2653 void ScopInfo::recompute() {
2654   RegionToScopMap.clear();
2655   /// Create polyhedral description of scops for all the valid regions of a
2656   /// function.
2657   for (auto &It : SD) {
2658     Region *R = const_cast<Region *>(It);
2659     if (!SD.isMaxRegionInScop(*R))
2660       continue;
2661 
2662     ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2663     std::unique_ptr<Scop> S = SB.getScop();
2664     if (!S)
2665       continue;
2666 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2667     ScopDetection::LoopStats Stats =
2668         ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2669     updateLoopCountStatistic(Stats, S->getStatistics());
2670 #endif
2671     bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
2672     assert(Inserted && "Building Scop for the same region twice!");
2673     (void)Inserted;
2674   }
2675 }
2676 
2677 bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2678                           FunctionAnalysisManager::Invalidator &Inv) {
2679   // Check whether the analysis, all analyses on functions have been preserved
2680   // or anything we're holding references to is being invalidated
2681   auto PAC = PA.getChecker<ScopInfoAnalysis>();
2682   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2683          Inv.invalidate<ScopAnalysis>(F, PA) ||
2684          Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2685          Inv.invalidate<LoopAnalysis>(F, PA) ||
2686          Inv.invalidate<AAManager>(F, PA) ||
2687          Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
2688          Inv.invalidate<AssumptionAnalysis>(F, PA);
2689 }
2690 
2691 AnalysisKey ScopInfoAnalysis::Key;
2692 
2693 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2694                                                FunctionAnalysisManager &FAM) {
2695   auto &SD = FAM.getResult<ScopAnalysis>(F);
2696   auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2697   auto &LI = FAM.getResult<LoopAnalysis>(F);
2698   auto &AA = FAM.getResult<AAManager>(F);
2699   auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2700   auto &AC = FAM.getResult<AssumptionAnalysis>(F);
2701   auto &DL = F.getParent()->getDataLayout();
2702   auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2703   return {DL, SD, SE, LI, AA, DT, AC, ORE};
2704 }
2705 
2706 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2707                                            FunctionAnalysisManager &FAM) {
2708   auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
2709   // Since the legacy PM processes Scops in bottom up, we print them in reverse
2710   // order here to keep the output persistent
2711   for (auto &It : reverse(SI)) {
2712     if (It.second)
2713       It.second->print(Stream, PollyPrintInstructions);
2714     else
2715       Stream << "Invalid Scop!\n";
2716   }
2717   return PreservedAnalyses::all();
2718 }
2719 
2720 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2721   AU.addRequired<LoopInfoWrapperPass>();
2722   AU.addRequired<RegionInfoPass>();
2723   AU.addRequired<DominatorTreeWrapperPass>();
2724   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2725   AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2726   AU.addRequired<AAResultsWrapperPass>();
2727   AU.addRequired<AssumptionCacheTracker>();
2728   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2729   AU.setPreservesAll();
2730 }
2731 
2732 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2733   auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2734   auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2735   auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2736   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2737   auto const &DL = F.getParent()->getDataLayout();
2738   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2739   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2740   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2741 
2742   Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2743   return false;
2744 }
2745 
2746 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2747   for (auto &It : *Result) {
2748     if (It.second)
2749       It.second->print(OS, PollyPrintInstructions);
2750     else
2751       OS << "Invalid Scop!\n";
2752   }
2753 }
2754 
2755 char ScopInfoWrapperPass::ID = 0;
2756 
2757 Pass *polly::createScopInfoWrapperPassPass() {
2758   return new ScopInfoWrapperPass();
2759 }
2760 
2761 INITIALIZE_PASS_BEGIN(
2762     ScopInfoWrapperPass, "polly-function-scops",
2763     "Polly - Create polyhedral description of all Scops of a function", false,
2764     false);
2765 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2766 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2767 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2768 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2769 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2770 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2771 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2772 INITIALIZE_PASS_END(
2773     ScopInfoWrapperPass, "polly-function-scops",
2774     "Polly - Create polyhedral description of all Scops of a function", false,
2775     false)
2776