1 //===- ScopInfo.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Create a polyhedral description for a static control flow region.
10 //
11 // The pass creates a polyhedral description of the Scops detected by the Scop
12 // detection derived from their LLVM-IR code.
13 //
14 // This representation is shared among several tools in the polyhedral
15 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "polly/ScopInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopBuilder.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/Support/GICHelper.h"
25 #include "polly/Support/ISLOStream.h"
26 #include "polly/Support/ISLTools.h"
27 #include "polly/Support/SCEVAffinator.h"
28 #include "polly/Support/SCEVValidator.h"
29 #include "polly/Support/ScopHelper.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/PostOrderIterator.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/Loads.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/RegionInfo.h"
43 #include "llvm/Analysis/RegionIterator.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/ConstantRange.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/PassManager.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/InitializePasses.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "isl/aff.h"
65 #include "isl/local_space.h"
66 #include "isl/map.h"
67 #include "isl/options.h"
68 #include "isl/set.h"
69 #include <cassert>
70 
71 using namespace llvm;
72 using namespace polly;
73 
74 #define DEBUG_TYPE "polly-scops"
75 
76 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
77 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
78 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
79 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
80 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
81 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
82 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
83 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
84 STATISTIC(AssumptionsInvariantLoad,
85           "Number of invariant loads assumptions taken.");
86 STATISTIC(AssumptionsDelinearization,
87           "Number of delinearization assumptions taken.");
88 
89 STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
90 STATISTIC(NumLoopsInScop, "Number of loops in scops");
91 STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
92 STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
93 
94 STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
95 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
96 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
97 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
98 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
99 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
100 STATISTIC(NumScopsDepthLarger,
101           "Number of scops with maximal loop depth 6 and larger");
102 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
103 
104 STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
105 STATISTIC(
106     NumValueWritesInLoops,
107     "Number of scalar value writes nested in affine loops after ScopInfo");
108 STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
109 STATISTIC(NumPHIWritesInLoops,
110           "Number of scalar phi writes nested in affine loops after ScopInfo");
111 STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
112 STATISTIC(NumSingletonWritesInLoops,
113           "Number of singleton writes nested in affine loops after ScopInfo");
114 
115 int const polly::MaxDisjunctsInDomain = 20;
116 
117 // The number of disjunct in the context after which we stop to add more
118 // disjuncts. This parameter is there to avoid exponential growth in the
119 // number of disjunct when adding non-convex sets to the context.
120 static int const MaxDisjunctsInContext = 4;
121 
122 // Be a bit more generous for the defined behavior context which is used less
123 // often.
124 static int const MaxDisjunktsInDefinedBehaviourContext = 8;
125 
126 static cl::opt<bool> PollyRemarksMinimal(
127     "polly-remarks-minimal",
128     cl::desc("Do not emit remarks about assumptions that are known"),
129     cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
130 
131 static cl::opt<bool>
132     IslOnErrorAbort("polly-on-isl-error-abort",
133                     cl::desc("Abort if an isl error is encountered"),
134                     cl::init(true), cl::cat(PollyCategory));
135 
136 static cl::opt<bool> PollyPreciseInbounds(
137     "polly-precise-inbounds",
138     cl::desc("Take more precise inbounds assumptions (do not scale well)"),
139     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
140 
141 static cl::opt<bool> PollyIgnoreParamBounds(
142     "polly-ignore-parameter-bounds",
143     cl::desc(
144         "Do not add parameter bounds and do no gist simplify sets accordingly"),
145     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
146 
147 static cl::opt<bool> PollyPreciseFoldAccesses(
148     "polly-precise-fold-accesses",
149     cl::desc("Fold memory accesses to model more possible delinearizations "
150              "(does not scale well)"),
151     cl::Hidden, cl::init(false), cl::cat(PollyCategory));
152 
153 bool polly::UseInstructionNames;
154 
155 static cl::opt<bool, true> XUseInstructionNames(
156     "polly-use-llvm-names",
157     cl::desc("Use LLVM-IR names when deriving statement names"),
158     cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
159     cl::ZeroOrMore, cl::cat(PollyCategory));
160 
161 static cl::opt<bool> PollyPrintInstructions(
162     "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
163     cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
164 
165 static cl::list<std::string> IslArgs("polly-isl-arg",
166                                      cl::value_desc("argument"),
167                                      cl::desc("Option passed to ISL"),
168                                      cl::ZeroOrMore, cl::cat(PollyCategory));
169 
170 //===----------------------------------------------------------------------===//
171 
172 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
173                                     int dim, isl::dim type) {
174   isl::val V;
175   isl::ctx Ctx = S.get_ctx();
176 
177   // The upper and lower bound for a parameter value is derived either from
178   // the data type of the parameter or from the - possibly more restrictive -
179   // range metadata.
180   V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
181   S = S.lower_bound_val(type, dim, V);
182   V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
183   S = S.upper_bound_val(type, dim, V);
184 
185   if (Range.isFullSet())
186     return S;
187 
188   if (S.n_basic_set() > MaxDisjunctsInContext)
189     return S;
190 
191   // In case of signed wrapping, we can refine the set of valid values by
192   // excluding the part not covered by the wrapping range.
193   if (Range.isSignWrappedSet()) {
194     V = valFromAPInt(Ctx.get(), Range.getLower(), true);
195     isl::set SLB = S.lower_bound_val(type, dim, V);
196 
197     V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
198     V = V.sub_ui(1);
199     isl::set SUB = S.upper_bound_val(type, dim, V);
200     S = SLB.unite(SUB);
201   }
202 
203   return S;
204 }
205 
206 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
207   LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
208   if (!BasePtrLI)
209     return nullptr;
210 
211   if (!S->contains(BasePtrLI))
212     return nullptr;
213 
214   ScalarEvolution &SE = *S->getSE();
215 
216   auto *OriginBaseSCEV =
217       SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
218   if (!OriginBaseSCEV)
219     return nullptr;
220 
221   auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
222   if (!OriginBaseSCEVUnknown)
223     return nullptr;
224 
225   return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
226                              MemoryKind::Array);
227 }
228 
229 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
230                              ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
231                              const DataLayout &DL, Scop *S,
232                              const char *BaseName)
233     : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
234   std::string BasePtrName =
235       BaseName ? BaseName
236                : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
237                                       Kind == MemoryKind::PHI ? "__phi" : "",
238                                       UseInstructionNames);
239   Id = isl::id::alloc(Ctx, BasePtrName, this);
240 
241   updateSizes(Sizes);
242 
243   if (!BasePtr || Kind != MemoryKind::Array) {
244     BasePtrOriginSAI = nullptr;
245     return;
246   }
247 
248   BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
249   if (BasePtrOriginSAI)
250     const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
251 }
252 
253 ScopArrayInfo::~ScopArrayInfo() = default;
254 
255 isl::space ScopArrayInfo::getSpace() const {
256   auto Space = isl::space(Id.get_ctx(), 0, getNumberOfDimensions());
257   Space = Space.set_tuple_id(isl::dim::set, Id);
258   return Space;
259 }
260 
261 bool ScopArrayInfo::isReadOnly() {
262   isl::union_set WriteSet = S.getWrites().range();
263   isl::space Space = getSpace();
264   WriteSet = WriteSet.extract_set(Space);
265 
266   return bool(WriteSet.is_empty());
267 }
268 
269 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
270   if (Array->getElementType() != getElementType())
271     return false;
272 
273   if (Array->getNumberOfDimensions() != getNumberOfDimensions())
274     return false;
275 
276   for (unsigned i = 0; i < getNumberOfDimensions(); i++)
277     if (Array->getDimensionSize(i) != getDimensionSize(i))
278       return false;
279 
280   return true;
281 }
282 
283 void ScopArrayInfo::updateElementType(Type *NewElementType) {
284   if (NewElementType == ElementType)
285     return;
286 
287   auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
288   auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
289 
290   if (NewElementSize == OldElementSize || NewElementSize == 0)
291     return;
292 
293   if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
294     ElementType = NewElementType;
295   } else {
296     auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
297     ElementType = IntegerType::get(ElementType->getContext(), GCD);
298   }
299 }
300 
301 /// Make the ScopArrayInfo model a Fortran Array
302 void ScopArrayInfo::applyAndSetFAD(Value *FAD) {
303   assert(FAD && "got invalid Fortran array descriptor");
304   if (this->FAD) {
305     assert(this->FAD == FAD &&
306            "receiving different array descriptors for same array");
307     return;
308   }
309 
310   assert(DimensionSizesPw.size() > 0 && !DimensionSizesPw[0]);
311   assert(!this->FAD);
312   this->FAD = FAD;
313 
314   isl::space Space(S.getIslCtx(), 1, 0);
315 
316   std::string param_name = getName();
317   param_name += "_fortranarr_size";
318   isl::id IdPwAff = isl::id::alloc(S.getIslCtx(), param_name, this);
319 
320   Space = Space.set_dim_id(isl::dim::param, 0, IdPwAff);
321   isl::pw_aff PwAff =
322       isl::aff::var_on_domain(isl::local_space(Space), isl::dim::param, 0);
323 
324   DimensionSizesPw[0] = PwAff;
325 }
326 
327 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
328                                 bool CheckConsistency) {
329   int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
330   int ExtraDimsNew = NewSizes.size() - SharedDims;
331   int ExtraDimsOld = DimensionSizes.size() - SharedDims;
332 
333   if (CheckConsistency) {
334     for (int i = 0; i < SharedDims; i++) {
335       auto *NewSize = NewSizes[i + ExtraDimsNew];
336       auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
337       if (NewSize && KnownSize && NewSize != KnownSize)
338         return false;
339     }
340 
341     if (DimensionSizes.size() >= NewSizes.size())
342       return true;
343   }
344 
345   DimensionSizes.clear();
346   DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
347                         NewSizes.end());
348   DimensionSizesPw.clear();
349   for (const SCEV *Expr : DimensionSizes) {
350     if (!Expr) {
351       DimensionSizesPw.push_back(nullptr);
352       continue;
353     }
354     isl::pw_aff Size = S.getPwAffOnly(Expr);
355     DimensionSizesPw.push_back(Size);
356   }
357   return true;
358 }
359 
360 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
361 
362 int ScopArrayInfo::getElemSizeInBytes() const {
363   return DL.getTypeAllocSize(ElementType);
364 }
365 
366 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
367 
368 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
369 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
370 #endif
371 
372 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
373   OS.indent(8) << *getElementType() << " " << getName();
374   unsigned u = 0;
375   // If this is a Fortran array, then we can print the outermost dimension
376   // as a isl_pw_aff even though there is no SCEV information.
377   bool IsOutermostSizeKnown = SizeAsPwAff && FAD;
378 
379   if (!IsOutermostSizeKnown && getNumberOfDimensions() > 0 &&
380       !getDimensionSize(0)) {
381     OS << "[*]";
382     u++;
383   }
384   for (; u < getNumberOfDimensions(); u++) {
385     OS << "[";
386 
387     if (SizeAsPwAff) {
388       isl::pw_aff Size = getDimensionSizePw(u);
389       OS << " " << Size << " ";
390     } else {
391       OS << *getDimensionSize(u);
392     }
393 
394     OS << "]";
395   }
396 
397   OS << ";";
398 
399   if (BasePtrOriginSAI)
400     OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
401 
402   OS << " // Element size " << getElemSizeInBytes() << "\n";
403 }
404 
405 const ScopArrayInfo *
406 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
407   isl::id Id = PMA.get_tuple_id(isl::dim::out);
408   assert(!Id.is_null() && "Output dimension didn't have an ID");
409   return getFromId(Id);
410 }
411 
412 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
413   void *User = Id.get_user();
414   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
415   return SAI;
416 }
417 
418 void MemoryAccess::wrapConstantDimensions() {
419   auto *SAI = getScopArrayInfo();
420   isl::space ArraySpace = SAI->getSpace();
421   isl::ctx Ctx = ArraySpace.get_ctx();
422   unsigned DimsArray = SAI->getNumberOfDimensions();
423 
424   isl::multi_aff DivModAff = isl::multi_aff::identity(
425       ArraySpace.map_from_domain_and_range(ArraySpace));
426   isl::local_space LArraySpace = isl::local_space(ArraySpace);
427 
428   // Begin with last dimension, to iteratively carry into higher dimensions.
429   for (int i = DimsArray - 1; i > 0; i--) {
430     auto *DimSize = SAI->getDimensionSize(i);
431     auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
432 
433     // This transformation is not applicable to dimensions with dynamic size.
434     if (!DimSizeCst)
435       continue;
436 
437     // This transformation is not applicable to dimensions of size zero.
438     if (DimSize->isZero())
439       continue;
440 
441     isl::val DimSizeVal =
442         valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
443     isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
444     isl::aff PrevVar =
445         isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
446 
447     // Compute: index % size
448     // Modulo must apply in the divide of the previous iteration, if any.
449     isl::aff Modulo = Var.mod(DimSizeVal);
450     Modulo = Modulo.pullback(DivModAff);
451 
452     // Compute: floor(index / size)
453     isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
454     Divide = Divide.floor();
455     Divide = Divide.add(PrevVar);
456     Divide = Divide.pullback(DivModAff);
457 
458     // Apply Modulo and Divide.
459     DivModAff = DivModAff.set_aff(i, Modulo);
460     DivModAff = DivModAff.set_aff(i - 1, Divide);
461   }
462 
463   // Apply all modulo/divides on the accesses.
464   isl::map Relation = AccessRelation;
465   Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
466   Relation = Relation.detect_equalities();
467   AccessRelation = Relation;
468 }
469 
470 void MemoryAccess::updateDimensionality() {
471   auto *SAI = getScopArrayInfo();
472   isl::space ArraySpace = SAI->getSpace();
473   isl::space AccessSpace = AccessRelation.get_space().range();
474   isl::ctx Ctx = ArraySpace.get_ctx();
475 
476   auto DimsArray = ArraySpace.dim(isl::dim::set);
477   auto DimsAccess = AccessSpace.dim(isl::dim::set);
478   auto DimsMissing = DimsArray - DimsAccess;
479 
480   auto *BB = getStatement()->getEntryBlock();
481   auto &DL = BB->getModule()->getDataLayout();
482   unsigned ArrayElemSize = SAI->getElemSizeInBytes();
483   unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
484 
485   isl::map Map = isl::map::from_domain_and_range(
486       isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
487 
488   for (auto i : seq<isl_size>(0, DimsMissing))
489     Map = Map.fix_si(isl::dim::out, i, 0);
490 
491   for (auto i : seq<isl_size>(DimsMissing, DimsArray))
492     Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
493 
494   AccessRelation = AccessRelation.apply_range(Map);
495 
496   // For the non delinearized arrays, divide the access function of the last
497   // subscript by the size of the elements in the array.
498   //
499   // A stride one array access in C expressed as A[i] is expressed in
500   // LLVM-IR as something like A[i * elementsize]. This hides the fact that
501   // two subsequent values of 'i' index two values that are stored next to
502   // each other in memory. By this division we make this characteristic
503   // obvious again. If the base pointer was accessed with offsets not divisible
504   // by the accesses element size, we will have chosen a smaller ArrayElemSize
505   // that divides the offsets of all accesses to this base pointer.
506   if (DimsAccess == 1) {
507     isl::val V = isl::val(Ctx, ArrayElemSize);
508     AccessRelation = AccessRelation.floordiv_val(V);
509   }
510 
511   // We currently do this only if we added at least one dimension, which means
512   // some dimension's indices have not been specified, an indicator that some
513   // index values have been added together.
514   // TODO: Investigate general usefulness; Effect on unit tests is to make index
515   // expressions more complicated.
516   if (DimsMissing)
517     wrapConstantDimensions();
518 
519   if (!isAffine())
520     computeBoundsOnAccessRelation(ArrayElemSize);
521 
522   // Introduce multi-element accesses in case the type loaded by this memory
523   // access is larger than the canonical element type of the array.
524   //
525   // An access ((float *)A)[i] to an array char *A is modeled as
526   // {[i] -> A[o] : 4 i <= o <= 4 i + 3
527   if (ElemBytes > ArrayElemSize) {
528     assert(ElemBytes % ArrayElemSize == 0 &&
529            "Loaded element size should be multiple of canonical element size");
530     isl::map Map = isl::map::from_domain_and_range(
531         isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
532     for (auto i : seq<isl_size>(0, DimsArray - 1))
533       Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
534 
535     isl::constraint C;
536     isl::local_space LS;
537 
538     LS = isl::local_space(Map.get_space());
539     int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
540 
541     C = isl::constraint::alloc_inequality(LS);
542     C = C.set_constant_val(isl::val(Ctx, Num - 1));
543     C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
544     C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
545     Map = Map.add_constraint(C);
546 
547     C = isl::constraint::alloc_inequality(LS);
548     C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
549     C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
550     C = C.set_constant_val(isl::val(Ctx, 0));
551     Map = Map.add_constraint(C);
552     AccessRelation = AccessRelation.apply_range(Map);
553   }
554 }
555 
556 const std::string
557 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
558   switch (RT) {
559   case MemoryAccess::RT_NONE:
560     llvm_unreachable("Requested a reduction operator string for a memory "
561                      "access which isn't a reduction");
562   case MemoryAccess::RT_ADD:
563     return "+";
564   case MemoryAccess::RT_MUL:
565     return "*";
566   case MemoryAccess::RT_BOR:
567     return "|";
568   case MemoryAccess::RT_BXOR:
569     return "^";
570   case MemoryAccess::RT_BAND:
571     return "&";
572   }
573   llvm_unreachable("Unknown reduction type");
574 }
575 
576 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
577   isl::id ArrayId = getArrayId();
578   void *User = ArrayId.get_user();
579   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
580   return SAI;
581 }
582 
583 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
584   isl::id ArrayId = getLatestArrayId();
585   void *User = ArrayId.get_user();
586   const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
587   return SAI;
588 }
589 
590 isl::id MemoryAccess::getOriginalArrayId() const {
591   return AccessRelation.get_tuple_id(isl::dim::out);
592 }
593 
594 isl::id MemoryAccess::getLatestArrayId() const {
595   if (!hasNewAccessRelation())
596     return getOriginalArrayId();
597   return NewAccessRelation.get_tuple_id(isl::dim::out);
598 }
599 
600 isl::map MemoryAccess::getAddressFunction() const {
601   return getAccessRelation().lexmin();
602 }
603 
604 isl::pw_multi_aff
605 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
606   isl::map Schedule, ScheduledAccRel;
607   isl::union_set UDomain;
608 
609   UDomain = getStatement()->getDomain();
610   USchedule = USchedule.intersect_domain(UDomain);
611   Schedule = isl::map::from_union_map(USchedule);
612   ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
613   return isl::pw_multi_aff::from_map(ScheduledAccRel);
614 }
615 
616 isl::map MemoryAccess::getOriginalAccessRelation() const {
617   return AccessRelation;
618 }
619 
620 std::string MemoryAccess::getOriginalAccessRelationStr() const {
621   return AccessRelation.to_str();
622 }
623 
624 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
625   return AccessRelation.get_space();
626 }
627 
628 isl::map MemoryAccess::getNewAccessRelation() const {
629   return NewAccessRelation;
630 }
631 
632 std::string MemoryAccess::getNewAccessRelationStr() const {
633   return NewAccessRelation.to_str();
634 }
635 
636 std::string MemoryAccess::getAccessRelationStr() const {
637   return getAccessRelation().to_str();
638 }
639 
640 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
641   isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
642   Space = Space.align_params(Statement->getDomainSpace());
643 
644   return isl::basic_map::from_domain_and_range(
645       isl::basic_set::universe(Statement->getDomainSpace()),
646       isl::basic_set::universe(Space));
647 }
648 
649 // Formalize no out-of-bound access assumption
650 //
651 // When delinearizing array accesses we optimistically assume that the
652 // delinearized accesses do not access out of bound locations (the subscript
653 // expression of each array evaluates for each statement instance that is
654 // executed to a value that is larger than zero and strictly smaller than the
655 // size of the corresponding dimension). The only exception is the outermost
656 // dimension for which we do not need to assume any upper bound.  At this point
657 // we formalize this assumption to ensure that at code generation time the
658 // relevant run-time checks can be generated.
659 //
660 // To find the set of constraints necessary to avoid out of bound accesses, we
661 // first build the set of data locations that are not within array bounds. We
662 // then apply the reverse access relation to obtain the set of iterations that
663 // may contain invalid accesses and reduce this set of iterations to the ones
664 // that are actually executed by intersecting them with the domain of the
665 // statement. If we now project out all loop dimensions, we obtain a set of
666 // parameters that may cause statement instances to be executed that may
667 // possibly yield out of bound memory accesses. The complement of these
668 // constraints is the set of constraints that needs to be assumed to ensure such
669 // statement instances are never executed.
670 isl::set MemoryAccess::assumeNoOutOfBound() {
671   auto *SAI = getScopArrayInfo();
672   isl::space Space = getOriginalAccessRelationSpace().range();
673   isl::set Outside = isl::set::empty(Space);
674   for (int i = 1, Size = Space.dim(isl::dim::set); i < Size; ++i) {
675     isl::local_space LS(Space);
676     isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
677     isl::pw_aff Zero = isl::pw_aff(LS);
678 
679     isl::set DimOutside = Var.lt_set(Zero);
680     isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
681     SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set));
682     SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
683     DimOutside = DimOutside.unite(SizeE.le_set(Var));
684 
685     Outside = Outside.unite(DimOutside);
686   }
687 
688   Outside = Outside.apply(getAccessRelation().reverse());
689   Outside = Outside.intersect(Statement->getDomain());
690   Outside = Outside.params();
691 
692   // Remove divs to avoid the construction of overly complicated assumptions.
693   // Doing so increases the set of parameter combinations that are assumed to
694   // not appear. This is always save, but may make the resulting run-time check
695   // bail out more often than strictly necessary.
696   Outside = Outside.remove_divs();
697   Outside = Outside.complement();
698 
699   if (!PollyPreciseInbounds)
700     Outside = Outside.gist_params(Statement->getDomain().params());
701   return Outside;
702 }
703 
704 void MemoryAccess::buildMemIntrinsicAccessRelation() {
705   assert(isMemoryIntrinsic());
706   assert(Subscripts.size() == 2 && Sizes.size() == 1);
707 
708   isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
709   isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
710 
711   isl::map LengthMap;
712   if (Subscripts[1] == nullptr) {
713     LengthMap = isl::map::universe(SubscriptMap.get_space());
714   } else {
715     isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
716     LengthMap = isl::map::from_pw_aff(LengthPWA);
717     isl::space RangeSpace = LengthMap.get_space().range();
718     LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
719   }
720   LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
721   LengthMap = LengthMap.align_params(SubscriptMap.get_space());
722   SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
723   LengthMap = LengthMap.sum(SubscriptMap);
724   AccessRelation =
725       LengthMap.set_tuple_id(isl::dim::in, getStatement()->getDomainId());
726 }
727 
728 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
729   ScalarEvolution *SE = Statement->getParent()->getSE();
730 
731   auto MAI = MemAccInst(getAccessInstruction());
732   if (isa<MemIntrinsic>(MAI))
733     return;
734 
735   Value *Ptr = MAI.getPointerOperand();
736   if (!Ptr || !SE->isSCEVable(Ptr->getType()))
737     return;
738 
739   auto *PtrSCEV = SE->getSCEV(Ptr);
740   if (isa<SCEVCouldNotCompute>(PtrSCEV))
741     return;
742 
743   auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
744   if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
745     PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
746 
747   const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
748   if (Range.isFullSet())
749     return;
750 
751   if (Range.isUpperWrapped() || Range.isSignWrappedSet())
752     return;
753 
754   bool isWrapping = Range.isSignWrappedSet();
755 
756   unsigned BW = Range.getBitWidth();
757   const auto One = APInt(BW, 1);
758   const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
759   const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
760 
761   auto Min = LB.sdiv(APInt(BW, ElementSize));
762   auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
763 
764   assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
765 
766   isl::map Relation = AccessRelation;
767   isl::set AccessRange = Relation.range();
768   AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
769                                     isl::dim::set);
770   AccessRelation = Relation.intersect_range(AccessRange);
771 }
772 
773 void MemoryAccess::foldAccessRelation() {
774   if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
775     return;
776 
777   int Size = Subscripts.size();
778 
779   isl::map NewAccessRelation = AccessRelation;
780 
781   for (int i = Size - 2; i >= 0; --i) {
782     isl::space Space;
783     isl::map MapOne, MapTwo;
784     isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
785 
786     isl::space SpaceSize = DimSize.get_space();
787     isl::id ParamId = SpaceSize.get_dim_id(isl::dim::param, 0);
788 
789     Space = AccessRelation.get_space();
790     Space = Space.range().map_from_set();
791     Space = Space.align_params(SpaceSize);
792 
793     int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
794 
795     MapOne = isl::map::universe(Space);
796     for (int j = 0; j < Size; ++j)
797       MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
798     MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
799 
800     MapTwo = isl::map::universe(Space);
801     for (int j = 0; j < Size; ++j)
802       if (j < i || j > i + 1)
803         MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
804 
805     isl::local_space LS(Space);
806     isl::constraint C;
807     C = isl::constraint::alloc_equality(LS);
808     C = C.set_constant_si(-1);
809     C = C.set_coefficient_si(isl::dim::in, i, 1);
810     C = C.set_coefficient_si(isl::dim::out, i, -1);
811     MapTwo = MapTwo.add_constraint(C);
812     C = isl::constraint::alloc_equality(LS);
813     C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
814     C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
815     C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
816     MapTwo = MapTwo.add_constraint(C);
817     MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
818 
819     MapOne = MapOne.unite(MapTwo);
820     NewAccessRelation = NewAccessRelation.apply_range(MapOne);
821   }
822 
823   isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
824   isl::space Space = Statement->getDomainSpace();
825   NewAccessRelation = NewAccessRelation.set_tuple_id(
826       isl::dim::in, Space.get_tuple_id(isl::dim::set));
827   NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
828   NewAccessRelation = NewAccessRelation.gist_domain(Statement->getDomain());
829 
830   // Access dimension folding might in certain cases increase the number of
831   // disjuncts in the memory access, which can possibly complicate the generated
832   // run-time checks and can lead to costly compilation.
833   if (!PollyPreciseFoldAccesses &&
834       NewAccessRelation.n_basic_map() > AccessRelation.n_basic_map()) {
835   } else {
836     AccessRelation = NewAccessRelation;
837   }
838 }
839 
840 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
841   assert(AccessRelation.is_null() && "AccessRelation already built");
842 
843   // Initialize the invalid domain which describes all iterations for which the
844   // access relation is not modeled correctly.
845   isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
846   InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
847 
848   isl::ctx Ctx = Id.get_ctx();
849   isl::id BaseAddrId = SAI->getBasePtrId();
850 
851   if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
852     buildMemIntrinsicAccessRelation();
853     AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
854     return;
855   }
856 
857   if (!isAffine()) {
858     // We overapproximate non-affine accesses with a possible access to the
859     // whole array. For read accesses it does not make a difference, if an
860     // access must or may happen. However, for write accesses it is important to
861     // differentiate between writes that must happen and writes that may happen.
862     if (AccessRelation.is_null())
863       AccessRelation = createBasicAccessMap(Statement);
864 
865     AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
866     return;
867   }
868 
869   isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
870   AccessRelation = isl::map::universe(Space);
871 
872   for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
873     isl::pw_aff Affine = getPwAff(Subscripts[i]);
874     isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
875     AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
876   }
877 
878   Space = Statement->getDomainSpace();
879   AccessRelation = AccessRelation.set_tuple_id(
880       isl::dim::in, Space.get_tuple_id(isl::dim::set));
881   AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
882 
883   AccessRelation = AccessRelation.gist_domain(Statement->getDomain());
884 }
885 
886 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
887                            AccessType AccType, Value *BaseAddress,
888                            Type *ElementType, bool Affine,
889                            ArrayRef<const SCEV *> Subscripts,
890                            ArrayRef<const SCEV *> Sizes, Value *AccessValue,
891                            MemoryKind Kind)
892     : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(nullptr),
893       BaseAddr(BaseAddress), ElementType(ElementType),
894       Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
895       AccessValue(AccessValue), IsAffine(Affine),
896       Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
897       NewAccessRelation(nullptr), FAD(nullptr) {
898   static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
899   const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
900 
901   std::string IdName = Stmt->getBaseName() + Access;
902   Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
903 }
904 
905 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
906     : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
907       InvalidDomain(nullptr), AccessRelation(nullptr),
908       NewAccessRelation(AccRel), FAD(nullptr) {
909   isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
910   auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
911   Sizes.push_back(nullptr);
912   for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
913     Sizes.push_back(SAI->getDimensionSize(i));
914   ElementType = SAI->getElementType();
915   BaseAddr = SAI->getBasePtr();
916   static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
917   const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
918 
919   std::string IdName = Stmt->getBaseName() + Access;
920   Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
921 }
922 
923 MemoryAccess::~MemoryAccess() = default;
924 
925 void MemoryAccess::realignParams() {
926   isl::set Ctx = Statement->getParent()->getContext();
927   InvalidDomain = InvalidDomain.gist_params(Ctx);
928   AccessRelation = AccessRelation.gist_params(Ctx);
929 
930   // Predictable parameter order is required for JSON imports. Ensure alignment
931   // by explicitly calling align_params.
932   isl::space CtxSpace = Ctx.get_space();
933   InvalidDomain = InvalidDomain.align_params(CtxSpace);
934   AccessRelation = AccessRelation.align_params(CtxSpace);
935 }
936 
937 const std::string MemoryAccess::getReductionOperatorStr() const {
938   return MemoryAccess::getReductionOperatorStr(getReductionType());
939 }
940 
941 isl::id MemoryAccess::getId() const { return Id; }
942 
943 raw_ostream &polly::operator<<(raw_ostream &OS,
944                                MemoryAccess::ReductionType RT) {
945   if (RT == MemoryAccess::RT_NONE)
946     OS << "NONE";
947   else
948     OS << MemoryAccess::getReductionOperatorStr(RT);
949   return OS;
950 }
951 
952 void MemoryAccess::setFortranArrayDescriptor(Value *FAD) { this->FAD = FAD; }
953 
954 void MemoryAccess::print(raw_ostream &OS) const {
955   switch (AccType) {
956   case READ:
957     OS.indent(12) << "ReadAccess :=\t";
958     break;
959   case MUST_WRITE:
960     OS.indent(12) << "MustWriteAccess :=\t";
961     break;
962   case MAY_WRITE:
963     OS.indent(12) << "MayWriteAccess :=\t";
964     break;
965   }
966 
967   OS << "[Reduction Type: " << getReductionType() << "] ";
968 
969   if (FAD) {
970     OS << "[Fortran array descriptor: " << FAD->getName();
971     OS << "] ";
972   };
973 
974   OS << "[Scalar: " << isScalarKind() << "]\n";
975   OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
976   if (hasNewAccessRelation())
977     OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
978 }
979 
980 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
981 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
982 #endif
983 
984 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
985   auto *Stmt = getStatement();
986   PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
987   isl::set StmtDom = getStatement()->getDomain();
988   StmtDom = StmtDom.reset_tuple_id();
989   isl::set NewInvalidDom = StmtDom.intersect(PWAC.second);
990   InvalidDomain = InvalidDomain.unite(NewInvalidDom);
991   return PWAC.first;
992 }
993 
994 // Create a map in the size of the provided set domain, that maps from the
995 // one element of the provided set domain to another element of the provided
996 // set domain.
997 // The mapping is limited to all points that are equal in all but the last
998 // dimension and for which the last dimension of the input is strict smaller
999 // than the last dimension of the output.
1000 //
1001 //   getEqualAndLarger(set[i0, i1, ..., iX]):
1002 //
1003 //   set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
1004 //     : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
1005 //
1006 static isl::map getEqualAndLarger(isl::space SetDomain) {
1007   isl::space Space = SetDomain.map_from_set();
1008   isl::map Map = isl::map::universe(Space);
1009   unsigned lastDimension = Map.dim(isl::dim::in) - 1;
1010 
1011   // Set all but the last dimension to be equal for the input and output
1012   //
1013   //   input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
1014   //     : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
1015   for (unsigned i = 0; i < lastDimension; ++i)
1016     Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
1017 
1018   // Set the last dimension of the input to be strict smaller than the
1019   // last dimension of the output.
1020   //
1021   //   input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
1022   Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
1023   return Map;
1024 }
1025 
1026 isl::set MemoryAccess::getStride(isl::map Schedule) const {
1027   isl::map AccessRelation = getAccessRelation();
1028   isl::space Space = Schedule.get_space().range();
1029   isl::map NextScatt = getEqualAndLarger(Space);
1030 
1031   Schedule = Schedule.reverse();
1032   NextScatt = NextScatt.lexmin();
1033 
1034   NextScatt = NextScatt.apply_range(Schedule);
1035   NextScatt = NextScatt.apply_range(AccessRelation);
1036   NextScatt = NextScatt.apply_domain(Schedule);
1037   NextScatt = NextScatt.apply_domain(AccessRelation);
1038 
1039   isl::set Deltas = NextScatt.deltas();
1040   return Deltas;
1041 }
1042 
1043 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1044   isl::set Stride, StrideX;
1045   bool IsStrideX;
1046 
1047   Stride = getStride(Schedule);
1048   StrideX = isl::set::universe(Stride.get_space());
1049   for (auto i : seq<isl_size>(0, StrideX.dim(isl::dim::set) - 1))
1050     StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1051   StrideX = StrideX.fix_si(isl::dim::set, StrideX.dim(isl::dim::set) - 1,
1052                            StrideWidth);
1053   IsStrideX = Stride.is_subset(StrideX);
1054 
1055   return IsStrideX;
1056 }
1057 
1058 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1059   return isStrideX(Schedule, 0);
1060 }
1061 
1062 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1063   return isStrideX(Schedule, 1);
1064 }
1065 
1066 void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1067   AccessRelation = NewAccess;
1068 }
1069 
1070 void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1071   assert(NewAccess);
1072 
1073 #ifndef NDEBUG
1074   // Check domain space compatibility.
1075   isl::space NewSpace = NewAccess.get_space();
1076   isl::space NewDomainSpace = NewSpace.domain();
1077   isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1078   assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1079 
1080   // Reads must be executed unconditionally. Writes might be executed in a
1081   // subdomain only.
1082   if (isRead()) {
1083     // Check whether there is an access for every statement instance.
1084     isl::set StmtDomain = getStatement()->getDomain();
1085     isl::set DefinedContext =
1086         getStatement()->getParent()->getBestKnownDefinedBehaviorContext();
1087     StmtDomain = StmtDomain.intersect_params(DefinedContext);
1088     isl::set NewDomain = NewAccess.domain();
1089     assert(!StmtDomain.is_subset(NewDomain).is_false() &&
1090            "Partial READ accesses not supported");
1091   }
1092 
1093   isl::space NewAccessSpace = NewAccess.get_space();
1094   assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1095          "Must specify the array that is accessed");
1096   isl::id NewArrayId = NewAccessSpace.get_tuple_id(isl::dim::set);
1097   auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1098   assert(SAI && "Must set a ScopArrayInfo");
1099 
1100   if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1101     InvariantEquivClassTy *EqClass =
1102         getStatement()->getParent()->lookupInvariantEquivClass(
1103             SAI->getBasePtr());
1104     assert(EqClass &&
1105            "Access functions to indirect arrays must have an invariant and "
1106            "hoisted base pointer");
1107   }
1108 
1109   // Check whether access dimensions correspond to number of dimensions of the
1110   // accesses array.
1111   isl_size Dims = SAI->getNumberOfDimensions();
1112   assert(NewAccessSpace.dim(isl::dim::set) == Dims &&
1113          "Access dims must match array dims");
1114 #endif
1115 
1116   NewAccess = NewAccess.gist_params(getStatement()->getParent()->getContext());
1117   NewAccess = NewAccess.gist_domain(getStatement()->getDomain());
1118   NewAccessRelation = NewAccess;
1119 }
1120 
1121 bool MemoryAccess::isLatestPartialAccess() const {
1122   isl::set StmtDom = getStatement()->getDomain();
1123   isl::set AccDom = getLatestAccessRelation().domain();
1124 
1125   return !StmtDom.is_subset(AccDom);
1126 }
1127 
1128 //===----------------------------------------------------------------------===//
1129 
1130 isl::map ScopStmt::getSchedule() const {
1131   isl::set Domain = getDomain();
1132   if (Domain.is_empty())
1133     return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1134   auto Schedule = getParent()->getSchedule();
1135   if (!Schedule)
1136     return nullptr;
1137   Schedule = Schedule.intersect_domain(isl::union_set(Domain));
1138   if (Schedule.is_empty())
1139     return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1140   isl::map M = M.from_union_map(Schedule);
1141   M = M.coalesce();
1142   M = M.gist_domain(Domain);
1143   M = M.coalesce();
1144   return M;
1145 }
1146 
1147 void ScopStmt::restrictDomain(isl::set NewDomain) {
1148   assert(NewDomain.is_subset(Domain) &&
1149          "New domain is not a subset of old domain!");
1150   Domain = NewDomain;
1151 }
1152 
1153 void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1154   Instruction *AccessInst = Access->getAccessInstruction();
1155 
1156   if (Access->isArrayKind()) {
1157     MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1158     MAL.emplace_front(Access);
1159   } else if (Access->isValueKind() && Access->isWrite()) {
1160     Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1161     assert(!ValueWrites.lookup(AccessVal));
1162 
1163     ValueWrites[AccessVal] = Access;
1164   } else if (Access->isValueKind() && Access->isRead()) {
1165     Value *AccessVal = Access->getAccessValue();
1166     assert(!ValueReads.lookup(AccessVal));
1167 
1168     ValueReads[AccessVal] = Access;
1169   } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1170     PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1171     assert(!PHIWrites.lookup(PHI));
1172 
1173     PHIWrites[PHI] = Access;
1174   } else if (Access->isAnyPHIKind() && Access->isRead()) {
1175     PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1176     assert(!PHIReads.lookup(PHI));
1177 
1178     PHIReads[PHI] = Access;
1179   }
1180 
1181   if (Prepend) {
1182     MemAccs.insert(MemAccs.begin(), Access);
1183     return;
1184   }
1185   MemAccs.push_back(Access);
1186 }
1187 
1188 void ScopStmt::realignParams() {
1189   for (MemoryAccess *MA : *this)
1190     MA->realignParams();
1191 
1192   isl::set Ctx = Parent.getContext();
1193   InvalidDomain = InvalidDomain.gist_params(Ctx);
1194   Domain = Domain.gist_params(Ctx);
1195 
1196   // Predictable parameter order is required for JSON imports. Ensure alignment
1197   // by explicitly calling align_params.
1198   isl::space CtxSpace = Ctx.get_space();
1199   InvalidDomain = InvalidDomain.align_params(CtxSpace);
1200   Domain = Domain.align_params(CtxSpace);
1201 }
1202 
1203 ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1204                    Loop *SurroundingLoop,
1205                    std::vector<Instruction *> EntryBlockInstructions)
1206     : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), R(&R),
1207       Build(nullptr), BaseName(Name), SurroundingLoop(SurroundingLoop),
1208       Instructions(EntryBlockInstructions) {}
1209 
1210 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1211                    Loop *SurroundingLoop,
1212                    std::vector<Instruction *> Instructions)
1213     : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(&bb),
1214       Build(nullptr), BaseName(Name), SurroundingLoop(SurroundingLoop),
1215       Instructions(Instructions) {}
1216 
1217 ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1218                    isl::set NewDomain)
1219     : Parent(parent), InvalidDomain(nullptr), Domain(NewDomain),
1220       Build(nullptr) {
1221   BaseName = getIslCompatibleName("CopyStmt_", "",
1222                                   std::to_string(parent.getCopyStmtsNum()));
1223   isl::id Id = isl::id::alloc(getIslCtx(), getBaseName(), this);
1224   Domain = Domain.set_tuple_id(Id);
1225   TargetRel = TargetRel.set_tuple_id(isl::dim::in, Id);
1226   auto *Access =
1227       new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1228   parent.addAccessFunction(Access);
1229   addAccess(Access);
1230   SourceRel = SourceRel.set_tuple_id(isl::dim::in, Id);
1231   Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1232   parent.addAccessFunction(Access);
1233   addAccess(Access);
1234 }
1235 
1236 ScopStmt::~ScopStmt() = default;
1237 
1238 std::string ScopStmt::getDomainStr() const { return Domain.to_str(); }
1239 
1240 std::string ScopStmt::getScheduleStr() const {
1241   auto *S = getSchedule().release();
1242   if (!S)
1243     return {};
1244   auto Str = stringFromIslObj(S);
1245   isl_map_free(S);
1246   return Str;
1247 }
1248 
1249 void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1250 
1251 BasicBlock *ScopStmt::getEntryBlock() const {
1252   if (isBlockStmt())
1253     return getBasicBlock();
1254   return getRegion()->getEntry();
1255 }
1256 
1257 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1258 
1259 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1260 
1261 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1262   return NestLoops[Dimension];
1263 }
1264 
1265 isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1266 
1267 isl::set ScopStmt::getDomain() const { return Domain; }
1268 
1269 isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1270 
1271 isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1272 
1273 void ScopStmt::printInstructions(raw_ostream &OS) const {
1274   OS << "Instructions {\n";
1275 
1276   for (Instruction *Inst : Instructions)
1277     OS.indent(16) << *Inst << "\n";
1278 
1279   OS.indent(12) << "}\n";
1280 }
1281 
1282 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1283   OS << "\t" << getBaseName() << "\n";
1284   OS.indent(12) << "Domain :=\n";
1285 
1286   if (Domain) {
1287     OS.indent(16) << getDomainStr() << ";\n";
1288   } else
1289     OS.indent(16) << "n/a\n";
1290 
1291   OS.indent(12) << "Schedule :=\n";
1292 
1293   if (Domain) {
1294     OS.indent(16) << getScheduleStr() << ";\n";
1295   } else
1296     OS.indent(16) << "n/a\n";
1297 
1298   for (MemoryAccess *Access : MemAccs)
1299     Access->print(OS);
1300 
1301   if (PrintInstructions)
1302     printInstructions(OS.indent(12));
1303 }
1304 
1305 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1306 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1307 #endif
1308 
1309 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1310   if (MA->isRead() && MA->isOriginalValueKind()) {
1311     bool Found = ValueReads.erase(MA->getAccessValue());
1312     (void)Found;
1313     assert(Found && "Expected access data not found");
1314   }
1315   if (MA->isWrite() && MA->isOriginalValueKind()) {
1316     bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1317     (void)Found;
1318     assert(Found && "Expected access data not found");
1319   }
1320   if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1321     bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1322     (void)Found;
1323     assert(Found && "Expected access data not found");
1324   }
1325   if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1326     bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1327     (void)Found;
1328     assert(Found && "Expected access data not found");
1329   }
1330 }
1331 
1332 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1333   // Remove the memory accesses from this statement together with all scalar
1334   // accesses that were caused by it. MemoryKind::Value READs have no access
1335   // instruction, hence would not be removed by this function. However, it is
1336   // only used for invariant LoadInst accesses, its arguments are always affine,
1337   // hence synthesizable, and therefore there are no MemoryKind::Value READ
1338   // accesses to be removed.
1339   auto Predicate = [&](MemoryAccess *Acc) {
1340     return Acc->getAccessInstruction() == MA->getAccessInstruction();
1341   };
1342   for (auto *MA : MemAccs) {
1343     if (Predicate(MA)) {
1344       removeAccessData(MA);
1345       Parent.removeAccessData(MA);
1346     }
1347   }
1348   MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
1349                 MemAccs.end());
1350   InstructionToAccess.erase(MA->getAccessInstruction());
1351 }
1352 
1353 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1354   if (AfterHoisting) {
1355     auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
1356     assert(MAIt != MemAccs.end());
1357     MemAccs.erase(MAIt);
1358 
1359     removeAccessData(MA);
1360     Parent.removeAccessData(MA);
1361   }
1362 
1363   auto It = InstructionToAccess.find(MA->getAccessInstruction());
1364   if (It != InstructionToAccess.end()) {
1365     It->second.remove(MA);
1366     if (It->second.empty())
1367       InstructionToAccess.erase(MA->getAccessInstruction());
1368   }
1369 }
1370 
1371 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1372   MemoryAccess *Access = lookupInputAccessOf(V);
1373   if (Access)
1374     return Access;
1375 
1376   ScopArrayInfo *SAI =
1377       Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
1378   Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1379                             true, {}, {}, V, MemoryKind::Value);
1380   Parent.addAccessFunction(Access);
1381   Access->buildAccessRelation(SAI);
1382   addAccess(Access);
1383   Parent.addAccessData(Access);
1384   return Access;
1385 }
1386 
1387 raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1388   S.print(OS, PollyPrintInstructions);
1389   return OS;
1390 }
1391 
1392 //===----------------------------------------------------------------------===//
1393 /// Scop class implement
1394 
1395 void Scop::setContext(isl::set NewContext) {
1396   Context = NewContext.align_params(Context.get_space());
1397 }
1398 
1399 namespace {
1400 
1401 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1402 struct SCEVSensitiveParameterRewriter
1403     : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1404   const ValueToValueMap &VMap;
1405 
1406 public:
1407   SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1408                                  ScalarEvolution &SE)
1409       : SCEVRewriteVisitor(SE), VMap(VMap) {}
1410 
1411   static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1412                              const ValueToValueMap &VMap) {
1413     SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1414     return SSPR.visit(E);
1415   }
1416 
1417   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1418     auto *Start = visit(E->getStart());
1419     auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1420                                     visit(E->getStepRecurrence(SE)),
1421                                     E->getLoop(), SCEV::FlagAnyWrap);
1422     return SE.getAddExpr(Start, AddRec);
1423   }
1424 
1425   const SCEV *visitUnknown(const SCEVUnknown *E) {
1426     if (auto *NewValue = VMap.lookup(E->getValue()))
1427       return SE.getUnknown(NewValue);
1428     return E;
1429   }
1430 };
1431 
1432 /// Check whether we should remap a SCEV expression.
1433 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1434   const ValueToValueMap &VMap;
1435   bool FoundInside = false;
1436   const Scop *S;
1437 
1438 public:
1439   SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1440                      const Scop *S)
1441       : SCEVTraversal(*this), VMap(VMap), S(S) {}
1442 
1443   static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1444                          const ValueToValueMap &VMap, const Scop *S) {
1445     SCEVFindInsideScop SFIS(VMap, SE, S);
1446     SFIS.visitAll(E);
1447     return SFIS.FoundInside;
1448   }
1449 
1450   bool follow(const SCEV *E) {
1451     if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
1452       FoundInside |= S->getRegion().contains(AddRec->getLoop());
1453     } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
1454       if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
1455         FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
1456     }
1457     return !FoundInside;
1458   }
1459 
1460   bool isDone() { return FoundInside; }
1461 };
1462 } // end anonymous namespace
1463 
1464 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1465   // Check whether it makes sense to rewrite the SCEV.  (ScalarEvolution
1466   // doesn't like addition between an AddRec and an expression that
1467   // doesn't have a dominance relationship with it.)
1468   if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
1469     return E;
1470 
1471   // Rewrite SCEV.
1472   return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
1473 }
1474 
1475 // This table of function names is used to translate parameter names in more
1476 // human-readable names. This makes it easier to interpret Polly analysis
1477 // results.
1478 StringMap<std::string> KnownNames = {
1479     {"_Z13get_global_idj", "global_id"},
1480     {"_Z12get_local_idj", "local_id"},
1481     {"_Z15get_global_sizej", "global_size"},
1482     {"_Z14get_local_sizej", "local_size"},
1483     {"_Z12get_work_dimv", "work_dim"},
1484     {"_Z17get_global_offsetj", "global_offset"},
1485     {"_Z12get_group_idj", "group_id"},
1486     {"_Z14get_num_groupsj", "num_groups"},
1487 };
1488 
1489 static std::string getCallParamName(CallInst *Call) {
1490   std::string Result;
1491   raw_string_ostream OS(Result);
1492   std::string Name = Call->getCalledFunction()->getName().str();
1493 
1494   auto Iterator = KnownNames.find(Name);
1495   if (Iterator != KnownNames.end())
1496     Name = "__" + Iterator->getValue();
1497   OS << Name;
1498   for (auto &Operand : Call->arg_operands()) {
1499     ConstantInt *Op = cast<ConstantInt>(&Operand);
1500     OS << "_" << Op->getValue();
1501   }
1502   OS.flush();
1503   return Result;
1504 }
1505 
1506 void Scop::createParameterId(const SCEV *Parameter) {
1507   assert(Parameters.count(Parameter));
1508   assert(!ParameterIds.count(Parameter));
1509 
1510   std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
1511 
1512   if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
1513     Value *Val = ValueParameter->getValue();
1514     CallInst *Call = dyn_cast<CallInst>(Val);
1515 
1516     if (Call && isConstCall(Call)) {
1517       ParameterName = getCallParamName(Call);
1518     } else if (UseInstructionNames) {
1519       // If this parameter references a specific Value and this value has a name
1520       // we use this name as it is likely to be unique and more useful than just
1521       // a number.
1522       if (Val->hasName())
1523         ParameterName = Val->getName().str();
1524       else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1525         auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1526         if (LoadOrigin->hasName()) {
1527           ParameterName += "_loaded_from_";
1528           ParameterName +=
1529               LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1530         }
1531       }
1532     }
1533 
1534     ParameterName = getIslCompatibleName("", ParameterName, "");
1535   }
1536 
1537   isl::id Id = isl::id::alloc(getIslCtx(), ParameterName,
1538                               const_cast<void *>((const void *)Parameter));
1539   ParameterIds[Parameter] = Id;
1540 }
1541 
1542 void Scop::addParams(const ParameterSetTy &NewParameters) {
1543   for (const SCEV *Parameter : NewParameters) {
1544     // Normalize the SCEV to get the representing element for an invariant load.
1545     Parameter = extractConstantFactor(Parameter, *SE).second;
1546     Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1547 
1548     if (Parameters.insert(Parameter))
1549       createParameterId(Parameter);
1550   }
1551 }
1552 
1553 isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1554   // Normalize the SCEV to get the representing element for an invariant load.
1555   Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1556   return ParameterIds.lookup(Parameter);
1557 }
1558 
1559 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1560   return DT.dominates(BB, getEntry());
1561 }
1562 
1563 void Scop::buildContext() {
1564   isl::space Space = isl::space::params_alloc(getIslCtx(), 0);
1565   Context = isl::set::universe(Space);
1566   InvalidContext = isl::set::empty(Space);
1567   AssumedContext = isl::set::universe(Space);
1568   DefinedBehaviorContext = isl::set::universe(Space);
1569 }
1570 
1571 void Scop::addParameterBounds() {
1572   unsigned PDim = 0;
1573   for (auto *Parameter : Parameters) {
1574     ConstantRange SRange = SE->getSignedRange(Parameter);
1575     Context = addRangeBoundsToSet(Context, SRange, PDim++, isl::dim::param);
1576   }
1577   intersectDefinedBehavior(Context, AS_ASSUMPTION);
1578 }
1579 
1580 static std::vector<isl::id> getFortranArrayIds(Scop::array_range Arrays) {
1581   std::vector<isl::id> OutermostSizeIds;
1582   for (auto Array : Arrays) {
1583     // To check if an array is a Fortran array, we check if it has a isl_pw_aff
1584     // for its outermost dimension. Fortran arrays will have this since the
1585     // outermost dimension size can be picked up from their runtime description.
1586     // TODO: actually need to check if it has a FAD, but for now this works.
1587     if (Array->getNumberOfDimensions() > 0) {
1588       isl::pw_aff PwAff = Array->getDimensionSizePw(0);
1589       if (!PwAff)
1590         continue;
1591 
1592       isl::id Id = PwAff.get_dim_id(isl::dim::param, 0);
1593       assert(!Id.is_null() &&
1594              "Invalid Id for PwAff expression in Fortran array");
1595       OutermostSizeIds.push_back(Id);
1596     }
1597   }
1598   return OutermostSizeIds;
1599 }
1600 
1601 // The FORTRAN array size parameters are known to be non-negative.
1602 static isl::set boundFortranArrayParams(isl::set Context,
1603                                         Scop::array_range Arrays) {
1604   std::vector<isl::id> OutermostSizeIds;
1605   OutermostSizeIds = getFortranArrayIds(Arrays);
1606 
1607   for (isl::id Id : OutermostSizeIds) {
1608     int dim = Context.find_dim_by_id(isl::dim::param, Id);
1609     Context = Context.lower_bound_si(isl::dim::param, dim, 0);
1610   }
1611 
1612   return Context;
1613 }
1614 
1615 void Scop::realignParams() {
1616   if (PollyIgnoreParamBounds)
1617     return;
1618 
1619   // Add all parameters into a common model.
1620   isl::space Space = getFullParamSpace();
1621 
1622   // Align the parameters of all data structures to the model.
1623   Context = Context.align_params(Space);
1624   AssumedContext = AssumedContext.align_params(Space);
1625   InvalidContext = InvalidContext.align_params(Space);
1626 
1627   // Bound the size of the fortran array dimensions.
1628   Context = boundFortranArrayParams(Context, arrays());
1629 
1630   // As all parameters are known add bounds to them.
1631   addParameterBounds();
1632 
1633   for (ScopStmt &Stmt : *this)
1634     Stmt.realignParams();
1635   // Simplify the schedule according to the context too.
1636   Schedule = Schedule.gist_domain_params(getContext());
1637 
1638   // Predictable parameter order is required for JSON imports. Ensure alignment
1639   // by explicitly calling align_params.
1640   Schedule = Schedule.align_params(Space);
1641 }
1642 
1643 static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1644                                           const Scop &S) {
1645   // If we have modeled all blocks in the SCoP that have side effects we can
1646   // simplify the context with the constraints that are needed for anything to
1647   // be executed at all. However, if we have error blocks in the SCoP we already
1648   // assumed some parameter combinations cannot occur and removed them from the
1649   // domains, thus we cannot use the remaining domain to simplify the
1650   // assumptions.
1651   if (!S.hasErrorBlock()) {
1652     auto DomainParameters = S.getDomains().params();
1653     AssumptionContext = AssumptionContext.gist_params(DomainParameters);
1654   }
1655 
1656   AssumptionContext = AssumptionContext.gist_params(S.getContext());
1657   return AssumptionContext;
1658 }
1659 
1660 void Scop::simplifyContexts() {
1661   // The parameter constraints of the iteration domains give us a set of
1662   // constraints that need to hold for all cases where at least a single
1663   // statement iteration is executed in the whole scop. We now simplify the
1664   // assumed context under the assumption that such constraints hold and at
1665   // least a single statement iteration is executed. For cases where no
1666   // statement instances are executed, the assumptions we have taken about
1667   // the executed code do not matter and can be changed.
1668   //
1669   // WARNING: This only holds if the assumptions we have taken do not reduce
1670   //          the set of statement instances that are executed. Otherwise we
1671   //          may run into a case where the iteration domains suggest that
1672   //          for a certain set of parameter constraints no code is executed,
1673   //          but in the original program some computation would have been
1674   //          performed. In such a case, modifying the run-time conditions and
1675   //          possibly influencing the run-time check may cause certain scops
1676   //          to not be executed.
1677   //
1678   // Example:
1679   //
1680   //   When delinearizing the following code:
1681   //
1682   //     for (long i = 0; i < 100; i++)
1683   //       for (long j = 0; j < m; j++)
1684   //         A[i+p][j] = 1.0;
1685   //
1686   //   we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1687   //   otherwise we would access out of bound data. Now, knowing that code is
1688   //   only executed for the case m >= 0, it is sufficient to assume p >= 0.
1689   AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
1690   InvalidContext = InvalidContext.align_params(getParamSpace());
1691   simplify(DefinedBehaviorContext);
1692   DefinedBehaviorContext = DefinedBehaviorContext.align_params(getParamSpace());
1693 }
1694 
1695 isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1696   return getDomainConditions(Stmt->getEntryBlock());
1697 }
1698 
1699 isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1700   auto DIt = DomainMap.find(BB);
1701   if (DIt != DomainMap.end())
1702     return DIt->getSecond();
1703 
1704   auto &RI = *R.getRegionInfo();
1705   auto *BBR = RI.getRegionFor(BB);
1706   while (BBR->getEntry() == BB)
1707     BBR = BBR->getParent();
1708   return getDomainConditions(BBR->getEntry());
1709 }
1710 
1711 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1712            DominatorTree &DT, ScopDetection::DetectionContext &DC,
1713            OptimizationRemarkEmitter &ORE, int ID)
1714     : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1715       R(R), name(None), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1716       ORE(ORE), Affinator(this, LI), ID(ID) {
1717   SmallVector<char *, 8> IslArgv;
1718   IslArgv.reserve(1 + IslArgs.size());
1719 
1720   // Substitute for program name.
1721   IslArgv.push_back(const_cast<char *>("-polly-isl-arg"));
1722 
1723   for (std::string &Arg : IslArgs)
1724     IslArgv.push_back(const_cast<char *>(Arg.c_str()));
1725 
1726   // Abort if unknown argument is passed.
1727   // Note that "-V" (print isl version) will always call exit(0), so we cannot
1728   // avoid ISL aborting the program at this point.
1729   unsigned IslParseFlags = ISL_ARG_ALL;
1730 
1731   isl_ctx_parse_options(IslCtx.get(), IslArgv.size(), IslArgv.data(),
1732                         IslParseFlags);
1733 
1734   if (IslOnErrorAbort)
1735     isl_options_set_on_error(getIslCtx().get(), ISL_ON_ERROR_ABORT);
1736   buildContext();
1737 }
1738 
1739 Scop::~Scop() = default;
1740 
1741 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1742   for (Instruction *Inst : Stmt.getInstructions())
1743     InstStmtMap.erase(Inst);
1744 
1745   if (Stmt.isRegionStmt()) {
1746     for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1747       StmtMap.erase(BB);
1748       // Skip entry basic block, as its instructions are already deleted as
1749       // part of the statement's instruction list.
1750       if (BB == Stmt.getEntryBlock())
1751         continue;
1752       for (Instruction &Inst : *BB)
1753         InstStmtMap.erase(&Inst);
1754     }
1755   } else {
1756     auto StmtMapIt = StmtMap.find(Stmt.getBasicBlock());
1757     if (StmtMapIt != StmtMap.end())
1758       StmtMapIt->second.erase(std::remove(StmtMapIt->second.begin(),
1759                                           StmtMapIt->second.end(), &Stmt),
1760                               StmtMapIt->second.end());
1761     for (Instruction *Inst : Stmt.getInstructions())
1762       InstStmtMap.erase(Inst);
1763   }
1764 }
1765 
1766 void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1767                        bool AfterHoisting) {
1768   for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1769     if (!ShouldDelete(*StmtIt)) {
1770       StmtIt++;
1771       continue;
1772     }
1773 
1774     // Start with removing all of the statement's accesses including erasing it
1775     // from all maps that are pointing to them.
1776     // Make a temporary copy because removing MAs invalidates the iterator.
1777     SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1778     for (MemoryAccess *MA : MAList)
1779       StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1780 
1781     removeFromStmtMap(*StmtIt);
1782     StmtIt = Stmts.erase(StmtIt);
1783   }
1784 }
1785 
1786 void Scop::removeStmtNotInDomainMap() {
1787   removeStmts([this](ScopStmt &Stmt) -> bool {
1788     isl::set Domain = DomainMap.lookup(Stmt.getEntryBlock());
1789     if (!Domain)
1790       return true;
1791     return Domain.is_empty();
1792   });
1793 }
1794 
1795 void Scop::simplifySCoP(bool AfterHoisting) {
1796   removeStmts(
1797       [AfterHoisting](ScopStmt &Stmt) -> bool {
1798         // Never delete statements that contain calls to debug functions.
1799         if (hasDebugCall(&Stmt))
1800           return false;
1801 
1802         bool RemoveStmt = Stmt.isEmpty();
1803 
1804         // Remove read only statements only after invariant load hoisting.
1805         if (!RemoveStmt && AfterHoisting) {
1806           bool OnlyRead = true;
1807           for (MemoryAccess *MA : Stmt) {
1808             if (MA->isRead())
1809               continue;
1810 
1811             OnlyRead = false;
1812             break;
1813           }
1814 
1815           RemoveStmt = OnlyRead;
1816         }
1817         return RemoveStmt;
1818       },
1819       AfterHoisting);
1820 }
1821 
1822 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1823   LoadInst *LInst = dyn_cast<LoadInst>(Val);
1824   if (!LInst)
1825     return nullptr;
1826 
1827   if (Value *Rep = InvEquivClassVMap.lookup(LInst))
1828     LInst = cast<LoadInst>(Rep);
1829 
1830   Type *Ty = LInst->getType();
1831   const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
1832   for (auto &IAClass : InvariantEquivClasses) {
1833     if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1834       continue;
1835 
1836     auto &MAs = IAClass.InvariantAccesses;
1837     for (auto *MA : MAs)
1838       if (MA->getAccessInstruction() == Val)
1839         return &IAClass;
1840   }
1841 
1842   return nullptr;
1843 }
1844 
1845 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1846                                               ArrayRef<const SCEV *> Sizes,
1847                                               MemoryKind Kind,
1848                                               const char *BaseName) {
1849   assert((BasePtr || BaseName) &&
1850          "BasePtr and BaseName can not be nullptr at the same time.");
1851   assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1852   auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
1853                       : ScopArrayNameMap[BaseName];
1854   if (!SAI) {
1855     auto &DL = getFunction().getParent()->getDataLayout();
1856     SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1857                                 DL, this, BaseName));
1858     ScopArrayInfoSet.insert(SAI.get());
1859   } else {
1860     SAI->updateElementType(ElementType);
1861     // In case of mismatching array sizes, we bail out by setting the run-time
1862     // context to false.
1863     if (!SAI->updateSizes(Sizes))
1864       invalidate(DELINEARIZATION, DebugLoc());
1865   }
1866   return SAI.get();
1867 }
1868 
1869 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1870                                          const std::string &BaseName,
1871                                          const std::vector<unsigned> &Sizes) {
1872   auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
1873   std::vector<const SCEV *> SCEVSizes;
1874 
1875   for (auto size : Sizes)
1876     if (size)
1877       SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
1878     else
1879       SCEVSizes.push_back(nullptr);
1880 
1881   auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
1882                                        MemoryKind::Array, BaseName.c_str());
1883   return SAI;
1884 }
1885 
1886 ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1887   auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
1888   return SAI;
1889 }
1890 
1891 ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1892   auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1893   assert(SAI && "No ScopArrayInfo available for this base pointer");
1894   return SAI;
1895 }
1896 
1897 std::string Scop::getContextStr() const { return getContext().to_str(); }
1898 
1899 std::string Scop::getAssumedContextStr() const {
1900   assert(AssumedContext && "Assumed context not yet built");
1901   return AssumedContext.to_str();
1902 }
1903 
1904 std::string Scop::getInvalidContextStr() const {
1905   return InvalidContext.to_str();
1906 }
1907 
1908 std::string Scop::getNameStr() const {
1909   std::string ExitName, EntryName;
1910   std::tie(EntryName, ExitName) = getEntryExitStr();
1911   return EntryName + "---" + ExitName;
1912 }
1913 
1914 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1915   std::string ExitName, EntryName;
1916   raw_string_ostream ExitStr(ExitName);
1917   raw_string_ostream EntryStr(EntryName);
1918 
1919   R.getEntry()->printAsOperand(EntryStr, false);
1920   EntryStr.str();
1921 
1922   if (R.getExit()) {
1923     R.getExit()->printAsOperand(ExitStr, false);
1924     ExitStr.str();
1925   } else
1926     ExitName = "FunctionExit";
1927 
1928   return std::make_pair(EntryName, ExitName);
1929 }
1930 
1931 isl::set Scop::getContext() const { return Context; }
1932 
1933 isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1934 
1935 isl::space Scop::getFullParamSpace() const {
1936   std::vector<isl::id> FortranIDs;
1937   FortranIDs = getFortranArrayIds(arrays());
1938 
1939   isl::space Space = isl::space::params_alloc(
1940       getIslCtx(), ParameterIds.size() + FortranIDs.size());
1941 
1942   unsigned PDim = 0;
1943   for (const SCEV *Parameter : Parameters) {
1944     isl::id Id = getIdForParam(Parameter);
1945     Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1946   }
1947 
1948   for (isl::id Id : FortranIDs)
1949     Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1950 
1951   return Space;
1952 }
1953 
1954 isl::set Scop::getAssumedContext() const {
1955   assert(AssumedContext && "Assumed context not yet built");
1956   return AssumedContext;
1957 }
1958 
1959 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1960   if (PollyProcessUnprofitable)
1961     return true;
1962 
1963   if (isEmpty())
1964     return false;
1965 
1966   unsigned OptimizableStmtsOrLoops = 0;
1967   for (auto &Stmt : *this) {
1968     if (Stmt.getNumIterators() == 0)
1969       continue;
1970 
1971     bool ContainsArrayAccs = false;
1972     bool ContainsScalarAccs = false;
1973     for (auto *MA : Stmt) {
1974       if (MA->isRead())
1975         continue;
1976       ContainsArrayAccs |= MA->isLatestArrayKind();
1977       ContainsScalarAccs |= MA->isLatestScalarKind();
1978     }
1979 
1980     if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1981       OptimizableStmtsOrLoops += Stmt.getNumIterators();
1982   }
1983 
1984   return OptimizableStmtsOrLoops > 1;
1985 }
1986 
1987 bool Scop::hasFeasibleRuntimeContext() const {
1988   if (Stmts.empty())
1989     return false;
1990 
1991   isl::set PositiveContext = getAssumedContext();
1992   isl::set NegativeContext = getInvalidContext();
1993   PositiveContext = PositiveContext.intersect_params(Context);
1994   PositiveContext = PositiveContext.intersect_params(getDomains().params());
1995   return PositiveContext.is_empty().is_false() &&
1996          PositiveContext.is_subset(NegativeContext).is_false();
1997 }
1998 
1999 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
2000   Value *PointerBase = MA->getOriginalBaseAddr();
2001 
2002   auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
2003   if (!PointerBaseInst)
2004     return nullptr;
2005 
2006   auto *BasePtrStmt = getStmtFor(PointerBaseInst);
2007   if (!BasePtrStmt)
2008     return nullptr;
2009 
2010   return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
2011 }
2012 
2013 static std::string toString(AssumptionKind Kind) {
2014   switch (Kind) {
2015   case ALIASING:
2016     return "No-aliasing";
2017   case INBOUNDS:
2018     return "Inbounds";
2019   case WRAPPING:
2020     return "No-overflows";
2021   case UNSIGNED:
2022     return "Signed-unsigned";
2023   case COMPLEXITY:
2024     return "Low complexity";
2025   case PROFITABLE:
2026     return "Profitable";
2027   case ERRORBLOCK:
2028     return "No-error";
2029   case INFINITELOOP:
2030     return "Finite loop";
2031   case INVARIANTLOAD:
2032     return "Invariant load";
2033   case DELINEARIZATION:
2034     return "Delinearization";
2035   }
2036   llvm_unreachable("Unknown AssumptionKind!");
2037 }
2038 
2039 bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
2040   if (Sign == AS_ASSUMPTION) {
2041     if (Context.is_subset(Set))
2042       return false;
2043 
2044     if (AssumedContext.is_subset(Set))
2045       return false;
2046   } else {
2047     if (Set.is_disjoint(Context))
2048       return false;
2049 
2050     if (Set.is_subset(InvalidContext))
2051       return false;
2052   }
2053   return true;
2054 }
2055 
2056 bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2057                            AssumptionSign Sign, BasicBlock *BB) {
2058   if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
2059     return false;
2060 
2061   // Do never emit trivial assumptions as they only clutter the output.
2062   if (!PollyRemarksMinimal) {
2063     isl::set Univ;
2064     if (Sign == AS_ASSUMPTION)
2065       Univ = isl::set::universe(Set.get_space());
2066 
2067     bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
2068                      (Sign == AS_ASSUMPTION && Univ.is_equal(Set));
2069 
2070     if (IsTrivial)
2071       return false;
2072   }
2073 
2074   switch (Kind) {
2075   case ALIASING:
2076     AssumptionsAliasing++;
2077     break;
2078   case INBOUNDS:
2079     AssumptionsInbounds++;
2080     break;
2081   case WRAPPING:
2082     AssumptionsWrapping++;
2083     break;
2084   case UNSIGNED:
2085     AssumptionsUnsigned++;
2086     break;
2087   case COMPLEXITY:
2088     AssumptionsComplexity++;
2089     break;
2090   case PROFITABLE:
2091     AssumptionsUnprofitable++;
2092     break;
2093   case ERRORBLOCK:
2094     AssumptionsErrorBlock++;
2095     break;
2096   case INFINITELOOP:
2097     AssumptionsInfiniteLoop++;
2098     break;
2099   case INVARIANTLOAD:
2100     AssumptionsInvariantLoad++;
2101     break;
2102   case DELINEARIZATION:
2103     AssumptionsDelinearization++;
2104     break;
2105   }
2106 
2107   auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
2108   std::string Msg = toString(Kind) + Suffix + Set.to_str();
2109   if (BB)
2110     ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
2111              << Msg);
2112   else
2113     ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2114                                         R.getEntry())
2115              << Msg);
2116   return true;
2117 }
2118 
2119 void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2120                          AssumptionSign Sign, BasicBlock *BB,
2121                          bool RequiresRTC) {
2122   // Simplify the assumptions/restrictions first.
2123   Set = Set.gist_params(getContext());
2124   intersectDefinedBehavior(Set, Sign);
2125 
2126   if (!RequiresRTC)
2127     return;
2128 
2129   if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2130     return;
2131 
2132   if (Sign == AS_ASSUMPTION)
2133     AssumedContext = AssumedContext.intersect(Set).coalesce();
2134   else
2135     InvalidContext = InvalidContext.unite(Set).coalesce();
2136 }
2137 
2138 void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
2139   if (!DefinedBehaviorContext)
2140     return;
2141 
2142   if (Sign == AS_ASSUMPTION)
2143     DefinedBehaviorContext = DefinedBehaviorContext.intersect(Set);
2144   else
2145     DefinedBehaviorContext = DefinedBehaviorContext.subtract(Set);
2146 
2147   // Limit the complexity of the context. If complexity is exceeded, simplify
2148   // the set and check again.
2149   if (DefinedBehaviorContext.n_basic_set() >
2150       MaxDisjunktsInDefinedBehaviourContext) {
2151     simplify(DefinedBehaviorContext);
2152     if (DefinedBehaviorContext.n_basic_set() >
2153         MaxDisjunktsInDefinedBehaviourContext)
2154       DefinedBehaviorContext = nullptr;
2155   }
2156 }
2157 
2158 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2159   LLVM_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2160   addAssumption(Kind, isl::set::empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
2161 }
2162 
2163 isl::set Scop::getInvalidContext() const { return InvalidContext; }
2164 
2165 void Scop::printContext(raw_ostream &OS) const {
2166   OS << "Context:\n";
2167   OS.indent(4) << Context << "\n";
2168 
2169   OS.indent(4) << "Assumed Context:\n";
2170   OS.indent(4) << AssumedContext << "\n";
2171 
2172   OS.indent(4) << "Invalid Context:\n";
2173   OS.indent(4) << InvalidContext << "\n";
2174 
2175   OS.indent(4) << "Defined Behavior Context:\n";
2176   if (DefinedBehaviorContext)
2177     OS.indent(4) << DefinedBehaviorContext << "\n";
2178   else
2179     OS.indent(4) << "<unavailable>\n";
2180 
2181   unsigned Dim = 0;
2182   for (const SCEV *Parameter : Parameters)
2183     OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
2184 }
2185 
2186 void Scop::printAliasAssumptions(raw_ostream &OS) const {
2187   int noOfGroups = 0;
2188   for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2189     if (Pair.second.size() == 0)
2190       noOfGroups += 1;
2191     else
2192       noOfGroups += Pair.second.size();
2193   }
2194 
2195   OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
2196   if (MinMaxAliasGroups.empty()) {
2197     OS.indent(8) << "n/a\n";
2198     return;
2199   }
2200 
2201   for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2202 
2203     // If the group has no read only accesses print the write accesses.
2204     if (Pair.second.empty()) {
2205       OS.indent(8) << "[[";
2206       for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2207         OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2208            << ">";
2209       }
2210       OS << " ]]\n";
2211     }
2212 
2213     for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2214       OS.indent(8) << "[[";
2215       OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2216       for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2217         OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2218            << ">";
2219       }
2220       OS << " ]]\n";
2221     }
2222   }
2223 }
2224 
2225 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2226   OS << "Statements {\n";
2227 
2228   for (const ScopStmt &Stmt : *this) {
2229     OS.indent(4);
2230     Stmt.print(OS, PrintInstructions);
2231   }
2232 
2233   OS.indent(4) << "}\n";
2234 }
2235 
2236 void Scop::printArrayInfo(raw_ostream &OS) const {
2237   OS << "Arrays {\n";
2238 
2239   for (auto &Array : arrays())
2240     Array->print(OS);
2241 
2242   OS.indent(4) << "}\n";
2243 
2244   OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
2245 
2246   for (auto &Array : arrays())
2247     Array->print(OS, /* SizeAsPwAff */ true);
2248 
2249   OS.indent(4) << "}\n";
2250 }
2251 
2252 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2253   OS.indent(4) << "Function: " << getFunction().getName() << "\n";
2254   OS.indent(4) << "Region: " << getNameStr() << "\n";
2255   OS.indent(4) << "Max Loop Depth:  " << getMaxLoopDepth() << "\n";
2256   OS.indent(4) << "Invariant Accesses: {\n";
2257   for (const auto &IAClass : InvariantEquivClasses) {
2258     const auto &MAs = IAClass.InvariantAccesses;
2259     if (MAs.empty()) {
2260       OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2261     } else {
2262       MAs.front()->print(OS);
2263       OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
2264                     << "\n";
2265     }
2266   }
2267   OS.indent(4) << "}\n";
2268   printContext(OS.indent(4));
2269   printArrayInfo(OS.indent(4));
2270   printAliasAssumptions(OS);
2271   printStatements(OS.indent(4), PrintInstructions);
2272 }
2273 
2274 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2275 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
2276 #endif
2277 
2278 isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2279 
2280 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2281                                  bool NonNegative,
2282                                  RecordedAssumptionsTy *RecordedAssumptions) {
2283   // First try to use the SCEVAffinator to generate a piecewise defined
2284   // affine function from @p E in the context of @p BB. If that tasks becomes to
2285   // complex the affinator might return a nullptr. In such a case we invalidate
2286   // the SCoP and return a dummy value. This way we do not need to add error
2287   // handling code to all users of this function.
2288   auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2289   if (PWAC.first) {
2290     // TODO: We could use a heuristic and either use:
2291     //         SCEVAffinator::takeNonNegativeAssumption
2292     //       or
2293     //         SCEVAffinator::interpretAsUnsigned
2294     //       to deal with unsigned or "NonNegative" SCEVs.
2295     if (NonNegative)
2296       Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2297     return PWAC;
2298   }
2299 
2300   auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2301   invalidate(COMPLEXITY, DL, BB);
2302   return Affinator.getPwAff(SE->getZero(E->getType()), BB, RecordedAssumptions);
2303 }
2304 
2305 isl::union_set Scop::getDomains() const {
2306   isl_space *EmptySpace = isl_space_params_alloc(getIslCtx().get(), 0);
2307   isl_union_set *Domain = isl_union_set_empty(EmptySpace);
2308 
2309   for (const ScopStmt &Stmt : *this)
2310     Domain = isl_union_set_add_set(Domain, Stmt.getDomain().release());
2311 
2312   return isl::manage(Domain);
2313 }
2314 
2315 isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2316                                RecordedAssumptionsTy *RecordedAssumptions) {
2317   PWACtx PWAC = getPwAff(E, BB, RecordedAssumptions);
2318   return PWAC.first;
2319 }
2320 
2321 isl::union_map
2322 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2323   isl::union_map Accesses = isl::union_map::empty(getParamSpace());
2324 
2325   for (ScopStmt &Stmt : *this) {
2326     for (MemoryAccess *MA : Stmt) {
2327       if (!Predicate(*MA))
2328         continue;
2329 
2330       isl::set Domain = Stmt.getDomain();
2331       isl::map AccessDomain = MA->getAccessRelation();
2332       AccessDomain = AccessDomain.intersect_domain(Domain);
2333       Accesses = Accesses.add_map(AccessDomain);
2334     }
2335   }
2336 
2337   return Accesses.coalesce();
2338 }
2339 
2340 isl::union_map Scop::getMustWrites() {
2341   return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
2342 }
2343 
2344 isl::union_map Scop::getMayWrites() {
2345   return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
2346 }
2347 
2348 isl::union_map Scop::getWrites() {
2349   return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
2350 }
2351 
2352 isl::union_map Scop::getReads() {
2353   return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
2354 }
2355 
2356 isl::union_map Scop::getAccesses() {
2357   return getAccessesOfType([](MemoryAccess &MA) { return true; });
2358 }
2359 
2360 isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2361   return getAccessesOfType(
2362       [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2363 }
2364 
2365 isl::union_map Scop::getSchedule() const {
2366   auto Tree = getScheduleTree();
2367   return Tree.get_map();
2368 }
2369 
2370 isl::schedule Scop::getScheduleTree() const {
2371   return Schedule.intersect_domain(getDomains());
2372 }
2373 
2374 void Scop::setSchedule(isl::union_map NewSchedule) {
2375   auto S = isl::schedule::from_domain(getDomains());
2376   Schedule = S.insert_partial_schedule(
2377       isl::multi_union_pw_aff::from_union_map(NewSchedule));
2378   ScheduleModified = true;
2379 }
2380 
2381 void Scop::setScheduleTree(isl::schedule NewSchedule) {
2382   Schedule = NewSchedule;
2383   ScheduleModified = true;
2384 }
2385 
2386 bool Scop::restrictDomains(isl::union_set Domain) {
2387   bool Changed = false;
2388   for (ScopStmt &Stmt : *this) {
2389     isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2390     isl::union_set NewStmtDomain = StmtDomain.intersect(Domain);
2391 
2392     if (StmtDomain.is_subset(NewStmtDomain))
2393       continue;
2394 
2395     Changed = true;
2396 
2397     NewStmtDomain = NewStmtDomain.coalesce();
2398 
2399     if (NewStmtDomain.is_empty())
2400       Stmt.restrictDomain(isl::set::empty(Stmt.getDomainSpace()));
2401     else
2402       Stmt.restrictDomain(isl::set(NewStmtDomain));
2403   }
2404   return Changed;
2405 }
2406 
2407 ScalarEvolution *Scop::getSE() const { return SE; }
2408 
2409 void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2410                        std::vector<Instruction *> Instructions) {
2411   assert(BB && "Unexpected nullptr!");
2412   Stmts.emplace_back(*this, *BB, Name, SurroundingLoop, Instructions);
2413   auto *Stmt = &Stmts.back();
2414   StmtMap[BB].push_back(Stmt);
2415   for (Instruction *Inst : Instructions) {
2416     assert(!InstStmtMap.count(Inst) &&
2417            "Unexpected statement corresponding to the instruction.");
2418     InstStmtMap[Inst] = Stmt;
2419   }
2420 }
2421 
2422 void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2423                        std::vector<Instruction *> Instructions) {
2424   assert(R && "Unexpected nullptr!");
2425   Stmts.emplace_back(*this, *R, Name, SurroundingLoop, Instructions);
2426   auto *Stmt = &Stmts.back();
2427 
2428   for (Instruction *Inst : Instructions) {
2429     assert(!InstStmtMap.count(Inst) &&
2430            "Unexpected statement corresponding to the instruction.");
2431     InstStmtMap[Inst] = Stmt;
2432   }
2433 
2434   for (BasicBlock *BB : R->blocks()) {
2435     StmtMap[BB].push_back(Stmt);
2436     if (BB == R->getEntry())
2437       continue;
2438     for (Instruction &Inst : *BB) {
2439       assert(!InstStmtMap.count(&Inst) &&
2440              "Unexpected statement corresponding to the instruction.");
2441       InstStmtMap[&Inst] = Stmt;
2442     }
2443   }
2444 }
2445 
2446 ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2447                             isl::set Domain) {
2448 #ifndef NDEBUG
2449   isl::set SourceDomain = SourceRel.domain();
2450   isl::set TargetDomain = TargetRel.domain();
2451   assert(Domain.is_subset(TargetDomain) &&
2452          "Target access not defined for complete statement domain");
2453   assert(Domain.is_subset(SourceDomain) &&
2454          "Source access not defined for complete statement domain");
2455 #endif
2456   Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
2457   CopyStmtsNum++;
2458   return &(Stmts.back());
2459 }
2460 
2461 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2462   auto StmtMapIt = StmtMap.find(BB);
2463   if (StmtMapIt == StmtMap.end())
2464     return {};
2465   return StmtMapIt->second;
2466 }
2467 
2468 ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2469   auto *PHI = cast<PHINode>(U.getUser());
2470   BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2471 
2472   // If the value is a non-synthesizable from the incoming block, use the
2473   // statement that contains it as user statement.
2474   if (auto *IncomingInst = dyn_cast<Instruction>(U.get())) {
2475     if (IncomingInst->getParent() == IncomingBB) {
2476       if (ScopStmt *IncomingStmt = getStmtFor(IncomingInst))
2477         return IncomingStmt;
2478     }
2479   }
2480 
2481   // Otherwise, use the epilogue/last statement.
2482   return getLastStmtFor(IncomingBB);
2483 }
2484 
2485 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2486   ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2487   if (!StmtList.empty())
2488     return StmtList.back();
2489   return nullptr;
2490 }
2491 
2492 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2493   if (RN->isSubRegion())
2494     return getStmtListFor(RN->getNodeAs<Region>());
2495   return getStmtListFor(RN->getNodeAs<BasicBlock>());
2496 }
2497 
2498 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2499   return getStmtListFor(R->getEntry());
2500 }
2501 
2502 int Scop::getRelativeLoopDepth(const Loop *L) const {
2503   if (!L || !R.contains(L))
2504     return -1;
2505   // outermostLoopInRegion always returns nullptr for top level regions
2506   if (R.isTopLevelRegion()) {
2507     // LoopInfo's depths start at 1, we start at 0
2508     return L->getLoopDepth() - 1;
2509   } else {
2510     Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
2511     assert(OuterLoop);
2512     return L->getLoopDepth() - OuterLoop->getLoopDepth();
2513   }
2514 }
2515 
2516 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2517   for (auto &SAI : arrays()) {
2518     if (SAI->getName() == BaseName)
2519       return SAI;
2520   }
2521   return nullptr;
2522 }
2523 
2524 void Scop::addAccessData(MemoryAccess *Access) {
2525   const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2526   assert(SAI && "can only use after access relations have been constructed");
2527 
2528   if (Access->isOriginalValueKind() && Access->isRead())
2529     ValueUseAccs[SAI].push_back(Access);
2530   else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2531     PHIIncomingAccs[SAI].push_back(Access);
2532 }
2533 
2534 void Scop::removeAccessData(MemoryAccess *Access) {
2535   if (Access->isOriginalValueKind() && Access->isWrite()) {
2536     ValueDefAccs.erase(Access->getAccessValue());
2537   } else if (Access->isOriginalValueKind() && Access->isRead()) {
2538     auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2539     auto NewEnd = std::remove(Uses.begin(), Uses.end(), Access);
2540     Uses.erase(NewEnd, Uses.end());
2541   } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2542     PHINode *PHI = cast<PHINode>(Access->getAccessInstruction());
2543     PHIReadAccs.erase(PHI);
2544   } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2545     auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2546     auto NewEnd = std::remove(Incomings.begin(), Incomings.end(), Access);
2547     Incomings.erase(NewEnd, Incomings.end());
2548   }
2549 }
2550 
2551 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2552   assert(SAI->isValueKind());
2553 
2554   Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
2555   if (!Val)
2556     return nullptr;
2557 
2558   return ValueDefAccs.lookup(Val);
2559 }
2560 
2561 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2562   assert(SAI->isValueKind());
2563   auto It = ValueUseAccs.find(SAI);
2564   if (It == ValueUseAccs.end())
2565     return {};
2566   return It->second;
2567 }
2568 
2569 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2570   assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2571 
2572   if (SAI->isExitPHIKind())
2573     return nullptr;
2574 
2575   PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
2576   return PHIReadAccs.lookup(PHI);
2577 }
2578 
2579 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2580   assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2581   auto It = PHIIncomingAccs.find(SAI);
2582   if (It == PHIIncomingAccs.end())
2583     return {};
2584   return It->second;
2585 }
2586 
2587 bool Scop::isEscaping(Instruction *Inst) {
2588   assert(contains(Inst) && "The concept of escaping makes only sense for "
2589                            "values defined inside the SCoP");
2590 
2591   for (Use &Use : Inst->uses()) {
2592     BasicBlock *UserBB = getUseBlock(Use);
2593     if (!contains(UserBB))
2594       return true;
2595 
2596     // When the SCoP region exit needs to be simplified, PHIs in the region exit
2597     // move to a new basic block such that its incoming blocks are not in the
2598     // SCoP anymore.
2599     if (hasSingleExitEdge() && isa<PHINode>(Use.getUser()) &&
2600         isExit(cast<PHINode>(Use.getUser())->getParent()))
2601       return true;
2602   }
2603   return false;
2604 }
2605 
2606 void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2607   AssumptionsAliasing += step;
2608 }
2609 
2610 Scop::ScopStatistics Scop::getStatistics() const {
2611   ScopStatistics Result;
2612 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2613   auto LoopStat = ScopDetection::countBeneficialLoops(&R, *SE, *getLI(), 0);
2614 
2615   int NumTotalLoops = LoopStat.NumLoops;
2616   Result.NumBoxedLoops = getBoxedLoops().size();
2617   Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2618 
2619   for (const ScopStmt &Stmt : *this) {
2620     isl::set Domain = Stmt.getDomain().intersect_params(getContext());
2621     bool IsInLoop = Stmt.getNumIterators() >= 1;
2622     for (MemoryAccess *MA : Stmt) {
2623       if (!MA->isWrite())
2624         continue;
2625 
2626       if (MA->isLatestValueKind()) {
2627         Result.NumValueWrites += 1;
2628         if (IsInLoop)
2629           Result.NumValueWritesInLoops += 1;
2630       }
2631 
2632       if (MA->isLatestAnyPHIKind()) {
2633         Result.NumPHIWrites += 1;
2634         if (IsInLoop)
2635           Result.NumPHIWritesInLoops += 1;
2636       }
2637 
2638       isl::set AccSet =
2639           MA->getAccessRelation().intersect_domain(Domain).range();
2640       if (AccSet.is_singleton()) {
2641         Result.NumSingletonWrites += 1;
2642         if (IsInLoop)
2643           Result.NumSingletonWritesInLoops += 1;
2644       }
2645     }
2646   }
2647 #endif
2648   return Result;
2649 }
2650 
2651 raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2652   scop.print(OS, PollyPrintInstructions);
2653   return OS;
2654 }
2655 
2656 //===----------------------------------------------------------------------===//
2657 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2658   AU.addRequired<LoopInfoWrapperPass>();
2659   AU.addRequired<RegionInfoPass>();
2660   AU.addRequired<DominatorTreeWrapperPass>();
2661   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2662   AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2663   AU.addRequired<AAResultsWrapperPass>();
2664   AU.addRequired<AssumptionCacheTracker>();
2665   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2666   AU.setPreservesAll();
2667 }
2668 
2669 void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2670                               Scop::ScopStatistics ScopStats) {
2671   assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2672 
2673   NumScops++;
2674   NumLoopsInScop += Stats.NumLoops;
2675   MaxNumLoopsInScop =
2676       std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
2677 
2678   if (Stats.MaxDepth == 0)
2679     NumScopsDepthZero++;
2680   else if (Stats.MaxDepth == 1)
2681     NumScopsDepthOne++;
2682   else if (Stats.MaxDepth == 2)
2683     NumScopsDepthTwo++;
2684   else if (Stats.MaxDepth == 3)
2685     NumScopsDepthThree++;
2686   else if (Stats.MaxDepth == 4)
2687     NumScopsDepthFour++;
2688   else if (Stats.MaxDepth == 5)
2689     NumScopsDepthFive++;
2690   else
2691     NumScopsDepthLarger++;
2692 
2693   NumAffineLoops += ScopStats.NumAffineLoops;
2694   NumBoxedLoops += ScopStats.NumBoxedLoops;
2695 
2696   NumValueWrites += ScopStats.NumValueWrites;
2697   NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2698   NumPHIWrites += ScopStats.NumPHIWrites;
2699   NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2700   NumSingletonWrites += ScopStats.NumSingletonWrites;
2701   NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2702 }
2703 
2704 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2705   auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2706 
2707   if (!SD.isMaxRegionInScop(*R))
2708     return false;
2709 
2710   Function *F = R->getEntry()->getParent();
2711   auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2712   auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2713   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2714   auto const &DL = F->getParent()->getDataLayout();
2715   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2716   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
2717   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2718 
2719   ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2720   S = SB.getScop(); // take ownership of scop object
2721 
2722 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2723   if (S) {
2724     ScopDetection::LoopStats Stats =
2725         ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2726     updateLoopCountStatistic(Stats, S->getStatistics());
2727   }
2728 #endif
2729 
2730   return false;
2731 }
2732 
2733 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2734   if (S)
2735     S->print(OS, PollyPrintInstructions);
2736   else
2737     OS << "Invalid Scop!\n";
2738 }
2739 
2740 char ScopInfoRegionPass::ID = 0;
2741 
2742 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2743 
2744 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2745                       "Polly - Create polyhedral description of Scops", false,
2746                       false);
2747 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2748 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2749 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2750 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2751 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2752 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2753 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2754 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2755                     "Polly - Create polyhedral description of Scops", false,
2756                     false)
2757 
2758 //===----------------------------------------------------------------------===//
2759 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2760                    LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2761                    AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2762     : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2763   recompute();
2764 }
2765 
2766 void ScopInfo::recompute() {
2767   RegionToScopMap.clear();
2768   /// Create polyhedral description of scops for all the valid regions of a
2769   /// function.
2770   for (auto &It : SD) {
2771     Region *R = const_cast<Region *>(It);
2772     if (!SD.isMaxRegionInScop(*R))
2773       continue;
2774 
2775     ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2776     std::unique_ptr<Scop> S = SB.getScop();
2777     if (!S)
2778       continue;
2779 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2780     ScopDetection::LoopStats Stats =
2781         ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2782     updateLoopCountStatistic(Stats, S->getStatistics());
2783 #endif
2784     bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
2785     assert(Inserted && "Building Scop for the same region twice!");
2786     (void)Inserted;
2787   }
2788 }
2789 
2790 bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2791                           FunctionAnalysisManager::Invalidator &Inv) {
2792   // Check whether the analysis, all analyses on functions have been preserved
2793   // or anything we're holding references to is being invalidated
2794   auto PAC = PA.getChecker<ScopInfoAnalysis>();
2795   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2796          Inv.invalidate<ScopAnalysis>(F, PA) ||
2797          Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2798          Inv.invalidate<LoopAnalysis>(F, PA) ||
2799          Inv.invalidate<AAManager>(F, PA) ||
2800          Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
2801          Inv.invalidate<AssumptionAnalysis>(F, PA);
2802 }
2803 
2804 AnalysisKey ScopInfoAnalysis::Key;
2805 
2806 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2807                                                FunctionAnalysisManager &FAM) {
2808   auto &SD = FAM.getResult<ScopAnalysis>(F);
2809   auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2810   auto &LI = FAM.getResult<LoopAnalysis>(F);
2811   auto &AA = FAM.getResult<AAManager>(F);
2812   auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2813   auto &AC = FAM.getResult<AssumptionAnalysis>(F);
2814   auto &DL = F.getParent()->getDataLayout();
2815   auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2816   return {DL, SD, SE, LI, AA, DT, AC, ORE};
2817 }
2818 
2819 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2820                                            FunctionAnalysisManager &FAM) {
2821   auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
2822   // Since the legacy PM processes Scops in bottom up, we print them in reverse
2823   // order here to keep the output persistent
2824   for (auto &It : reverse(SI)) {
2825     if (It.second)
2826       It.second->print(Stream, PollyPrintInstructions);
2827     else
2828       Stream << "Invalid Scop!\n";
2829   }
2830   return PreservedAnalyses::all();
2831 }
2832 
2833 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2834   AU.addRequired<LoopInfoWrapperPass>();
2835   AU.addRequired<RegionInfoPass>();
2836   AU.addRequired<DominatorTreeWrapperPass>();
2837   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2838   AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2839   AU.addRequired<AAResultsWrapperPass>();
2840   AU.addRequired<AssumptionCacheTracker>();
2841   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2842   AU.setPreservesAll();
2843 }
2844 
2845 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2846   auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2847   auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2848   auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2849   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2850   auto const &DL = F.getParent()->getDataLayout();
2851   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2852   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2853   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2854 
2855   Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2856   return false;
2857 }
2858 
2859 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2860   for (auto &It : *Result) {
2861     if (It.second)
2862       It.second->print(OS, PollyPrintInstructions);
2863     else
2864       OS << "Invalid Scop!\n";
2865   }
2866 }
2867 
2868 char ScopInfoWrapperPass::ID = 0;
2869 
2870 Pass *polly::createScopInfoWrapperPassPass() {
2871   return new ScopInfoWrapperPass();
2872 }
2873 
2874 INITIALIZE_PASS_BEGIN(
2875     ScopInfoWrapperPass, "polly-function-scops",
2876     "Polly - Create polyhedral description of all Scops of a function", false,
2877     false);
2878 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2879 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2880 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2881 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2882 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2883 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2884 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2885 INITIALIZE_PASS_END(
2886     ScopInfoWrapperPass, "polly-function-scops",
2887     "Polly - Create polyhedral description of all Scops of a function", false,
2888     false)
2889