1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopPassManager.h"
18 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
19 #include "llvm/Analysis/ScalarEvolutionExpander.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PassManager.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "loop-accesses"
31 
32 static cl::opt<unsigned, true>
33 VectorizationFactor("force-vector-width", cl::Hidden,
34                     cl::desc("Sets the SIMD width. Zero is autoselect."),
35                     cl::location(VectorizerParams::VectorizationFactor));
36 unsigned VectorizerParams::VectorizationFactor;
37 
38 static cl::opt<unsigned, true>
39 VectorizationInterleave("force-vector-interleave", cl::Hidden,
40                         cl::desc("Sets the vectorization interleave count. "
41                                  "Zero is autoselect."),
42                         cl::location(
43                             VectorizerParams::VectorizationInterleave));
44 unsigned VectorizerParams::VectorizationInterleave;
45 
46 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
47     "runtime-memory-check-threshold", cl::Hidden,
48     cl::desc("When performing memory disambiguation checks at runtime do not "
49              "generate more than this number of comparisons (default = 8)."),
50     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
51 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
52 
53 /// \brief The maximum iterations used to merge memory checks
54 static cl::opt<unsigned> MemoryCheckMergeThreshold(
55     "memory-check-merge-threshold", cl::Hidden,
56     cl::desc("Maximum number of comparisons done when trying to merge "
57              "runtime memory checks. (default = 100)"),
58     cl::init(100));
59 
60 /// Maximum SIMD width.
61 const unsigned VectorizerParams::MaxVectorWidth = 64;
62 
63 /// \brief We collect dependences up to this threshold.
64 static cl::opt<unsigned>
65     MaxDependences("max-dependences", cl::Hidden,
66                    cl::desc("Maximum number of dependences collected by "
67                             "loop-access analysis (default = 100)"),
68                    cl::init(100));
69 
70 /// This enables versioning on the strides of symbolically striding memory
71 /// accesses in code like the following.
72 ///   for (i = 0; i < N; ++i)
73 ///     A[i * Stride1] += B[i * Stride2] ...
74 ///
75 /// Will be roughly translated to
76 ///    if (Stride1 == 1 && Stride2 == 1) {
77 ///      for (i = 0; i < N; i+=4)
78 ///       A[i:i+3] += ...
79 ///    } else
80 ///      ...
81 static cl::opt<bool> EnableMemAccessVersioning(
82     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
83     cl::desc("Enable symbolic stride memory access versioning"));
84 
85 /// \brief Enable store-to-load forwarding conflict detection. This option can
86 /// be disabled for correctness testing.
87 static cl::opt<bool> EnableForwardingConflictDetection(
88     "store-to-load-forwarding-conflict-detection", cl::Hidden,
89     cl::desc("Enable conflict detection in loop-access analysis"),
90     cl::init(true));
91 
92 bool VectorizerParams::isInterleaveForced() {
93   return ::VectorizationInterleave.getNumOccurrences() > 0;
94 }
95 
96 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message,
97                                     const Loop *TheLoop, const char *PassName,
98                                     OptimizationRemarkEmitter &ORE) {
99   DebugLoc DL = TheLoop->getStartLoc();
100   const Value *V = TheLoop->getHeader();
101   if (const Instruction *I = Message.getInstr()) {
102     DL = I->getDebugLoc();
103     V = I->getParent();
104   }
105   ORE.emitOptimizationRemarkAnalysis(PassName, DL, V, Message.str());
106 }
107 
108 Value *llvm::stripIntegerCast(Value *V) {
109   if (auto *CI = dyn_cast<CastInst>(V))
110     if (CI->getOperand(0)->getType()->isIntegerTy())
111       return CI->getOperand(0);
112   return V;
113 }
114 
115 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
116                                             const ValueToValueMap &PtrToStride,
117                                             Value *Ptr, Value *OrigPtr) {
118   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
119 
120   // If there is an entry in the map return the SCEV of the pointer with the
121   // symbolic stride replaced by one.
122   ValueToValueMap::const_iterator SI =
123       PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
124   if (SI != PtrToStride.end()) {
125     Value *StrideVal = SI->second;
126 
127     // Strip casts.
128     StrideVal = stripIntegerCast(StrideVal);
129 
130     // Replace symbolic stride by one.
131     Value *One = ConstantInt::get(StrideVal->getType(), 1);
132     ValueToValueMap RewriteMap;
133     RewriteMap[StrideVal] = One;
134 
135     ScalarEvolution *SE = PSE.getSE();
136     const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
137     const auto *CT =
138         static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
139 
140     PSE.addPredicate(*SE->getEqualPredicate(U, CT));
141     auto *Expr = PSE.getSCEV(Ptr);
142 
143     DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr
144                  << "\n");
145     return Expr;
146   }
147 
148   // Otherwise, just return the SCEV of the original pointer.
149   return OrigSCEV;
150 }
151 
152 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
153                                     unsigned DepSetId, unsigned ASId,
154                                     const ValueToValueMap &Strides,
155                                     PredicatedScalarEvolution &PSE) {
156   // Get the stride replaced scev.
157   const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
158   ScalarEvolution *SE = PSE.getSE();
159 
160   const SCEV *ScStart;
161   const SCEV *ScEnd;
162 
163   if (SE->isLoopInvariant(Sc, Lp))
164     ScStart = ScEnd = Sc;
165   else {
166     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
167     assert(AR && "Invalid addrec expression");
168     const SCEV *Ex = PSE.getBackedgeTakenCount();
169 
170     ScStart = AR->getStart();
171     ScEnd = AR->evaluateAtIteration(Ex, *SE);
172     const SCEV *Step = AR->getStepRecurrence(*SE);
173 
174     // For expressions with negative step, the upper bound is ScStart and the
175     // lower bound is ScEnd.
176     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
177       if (CStep->getValue()->isNegative())
178         std::swap(ScStart, ScEnd);
179     } else {
180       // Fallback case: the step is not constant, but the we can still
181       // get the upper and lower bounds of the interval by using min/max
182       // expressions.
183       ScStart = SE->getUMinExpr(ScStart, ScEnd);
184       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
185     }
186   }
187 
188   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
189 }
190 
191 SmallVector<RuntimePointerChecking::PointerCheck, 4>
192 RuntimePointerChecking::generateChecks() const {
193   SmallVector<PointerCheck, 4> Checks;
194 
195   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
196     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
197       const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I];
198       const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J];
199 
200       if (needsChecking(CGI, CGJ))
201         Checks.push_back(std::make_pair(&CGI, &CGJ));
202     }
203   }
204   return Checks;
205 }
206 
207 void RuntimePointerChecking::generateChecks(
208     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
209   assert(Checks.empty() && "Checks is not empty");
210   groupChecks(DepCands, UseDependencies);
211   Checks = generateChecks();
212 }
213 
214 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M,
215                                            const CheckingPtrGroup &N) const {
216   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
217     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
218       if (needsChecking(M.Members[I], N.Members[J]))
219         return true;
220   return false;
221 }
222 
223 /// Compare \p I and \p J and return the minimum.
224 /// Return nullptr in case we couldn't find an answer.
225 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
226                                    ScalarEvolution *SE) {
227   const SCEV *Diff = SE->getMinusSCEV(J, I);
228   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
229 
230   if (!C)
231     return nullptr;
232   if (C->getValue()->isNegative())
233     return J;
234   return I;
235 }
236 
237 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) {
238   const SCEV *Start = RtCheck.Pointers[Index].Start;
239   const SCEV *End = RtCheck.Pointers[Index].End;
240 
241   // Compare the starts and ends with the known minimum and maximum
242   // of this set. We need to know how we compare against the min/max
243   // of the set in order to be able to emit memchecks.
244   const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
245   if (!Min0)
246     return false;
247 
248   const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
249   if (!Min1)
250     return false;
251 
252   // Update the low bound  expression if we've found a new min value.
253   if (Min0 == Start)
254     Low = Start;
255 
256   // Update the high bound expression if we've found a new max value.
257   if (Min1 != End)
258     High = End;
259 
260   Members.push_back(Index);
261   return true;
262 }
263 
264 void RuntimePointerChecking::groupChecks(
265     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
266   // We build the groups from dependency candidates equivalence classes
267   // because:
268   //    - We know that pointers in the same equivalence class share
269   //      the same underlying object and therefore there is a chance
270   //      that we can compare pointers
271   //    - We wouldn't be able to merge two pointers for which we need
272   //      to emit a memcheck. The classes in DepCands are already
273   //      conveniently built such that no two pointers in the same
274   //      class need checking against each other.
275 
276   // We use the following (greedy) algorithm to construct the groups
277   // For every pointer in the equivalence class:
278   //   For each existing group:
279   //   - if the difference between this pointer and the min/max bounds
280   //     of the group is a constant, then make the pointer part of the
281   //     group and update the min/max bounds of that group as required.
282 
283   CheckingGroups.clear();
284 
285   // If we need to check two pointers to the same underlying object
286   // with a non-constant difference, we shouldn't perform any pointer
287   // grouping with those pointers. This is because we can easily get
288   // into cases where the resulting check would return false, even when
289   // the accesses are safe.
290   //
291   // The following example shows this:
292   // for (i = 0; i < 1000; ++i)
293   //   a[5000 + i * m] = a[i] + a[i + 9000]
294   //
295   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
296   // (0, 10000) which is always false. However, if m is 1, there is no
297   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
298   // us to perform an accurate check in this case.
299   //
300   // The above case requires that we have an UnknownDependence between
301   // accesses to the same underlying object. This cannot happen unless
302   // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies
303   // is also false. In this case we will use the fallback path and create
304   // separate checking groups for all pointers.
305 
306   // If we don't have the dependency partitions, construct a new
307   // checking pointer group for each pointer. This is also required
308   // for correctness, because in this case we can have checking between
309   // pointers to the same underlying object.
310   if (!UseDependencies) {
311     for (unsigned I = 0; I < Pointers.size(); ++I)
312       CheckingGroups.push_back(CheckingPtrGroup(I, *this));
313     return;
314   }
315 
316   unsigned TotalComparisons = 0;
317 
318   DenseMap<Value *, unsigned> PositionMap;
319   for (unsigned Index = 0; Index < Pointers.size(); ++Index)
320     PositionMap[Pointers[Index].PointerValue] = Index;
321 
322   // We need to keep track of what pointers we've already seen so we
323   // don't process them twice.
324   SmallSet<unsigned, 2> Seen;
325 
326   // Go through all equivalence classes, get the "pointer check groups"
327   // and add them to the overall solution. We use the order in which accesses
328   // appear in 'Pointers' to enforce determinism.
329   for (unsigned I = 0; I < Pointers.size(); ++I) {
330     // We've seen this pointer before, and therefore already processed
331     // its equivalence class.
332     if (Seen.count(I))
333       continue;
334 
335     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
336                                            Pointers[I].IsWritePtr);
337 
338     SmallVector<CheckingPtrGroup, 2> Groups;
339     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
340 
341     // Because DepCands is constructed by visiting accesses in the order in
342     // which they appear in alias sets (which is deterministic) and the
343     // iteration order within an equivalence class member is only dependent on
344     // the order in which unions and insertions are performed on the
345     // equivalence class, the iteration order is deterministic.
346     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
347          MI != ME; ++MI) {
348       unsigned Pointer = PositionMap[MI->getPointer()];
349       bool Merged = false;
350       // Mark this pointer as seen.
351       Seen.insert(Pointer);
352 
353       // Go through all the existing sets and see if we can find one
354       // which can include this pointer.
355       for (CheckingPtrGroup &Group : Groups) {
356         // Don't perform more than a certain amount of comparisons.
357         // This should limit the cost of grouping the pointers to something
358         // reasonable.  If we do end up hitting this threshold, the algorithm
359         // will create separate groups for all remaining pointers.
360         if (TotalComparisons > MemoryCheckMergeThreshold)
361           break;
362 
363         TotalComparisons++;
364 
365         if (Group.addPointer(Pointer)) {
366           Merged = true;
367           break;
368         }
369       }
370 
371       if (!Merged)
372         // We couldn't add this pointer to any existing set or the threshold
373         // for the number of comparisons has been reached. Create a new group
374         // to hold the current pointer.
375         Groups.push_back(CheckingPtrGroup(Pointer, *this));
376     }
377 
378     // We've computed the grouped checks for this partition.
379     // Save the results and continue with the next one.
380     std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups));
381   }
382 }
383 
384 bool RuntimePointerChecking::arePointersInSamePartition(
385     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
386     unsigned PtrIdx2) {
387   return (PtrToPartition[PtrIdx1] != -1 &&
388           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
389 }
390 
391 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
392   const PointerInfo &PointerI = Pointers[I];
393   const PointerInfo &PointerJ = Pointers[J];
394 
395   // No need to check if two readonly pointers intersect.
396   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
397     return false;
398 
399   // Only need to check pointers between two different dependency sets.
400   if (PointerI.DependencySetId == PointerJ.DependencySetId)
401     return false;
402 
403   // Only need to check pointers in the same alias set.
404   if (PointerI.AliasSetId != PointerJ.AliasSetId)
405     return false;
406 
407   return true;
408 }
409 
410 void RuntimePointerChecking::printChecks(
411     raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
412     unsigned Depth) const {
413   unsigned N = 0;
414   for (const auto &Check : Checks) {
415     const auto &First = Check.first->Members, &Second = Check.second->Members;
416 
417     OS.indent(Depth) << "Check " << N++ << ":\n";
418 
419     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
420     for (unsigned K = 0; K < First.size(); ++K)
421       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
422 
423     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
424     for (unsigned K = 0; K < Second.size(); ++K)
425       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
426   }
427 }
428 
429 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
430 
431   OS.indent(Depth) << "Run-time memory checks:\n";
432   printChecks(OS, Checks, Depth);
433 
434   OS.indent(Depth) << "Grouped accesses:\n";
435   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
436     const auto &CG = CheckingGroups[I];
437 
438     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
439     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
440                          << ")\n";
441     for (unsigned J = 0; J < CG.Members.size(); ++J) {
442       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
443                            << "\n";
444     }
445   }
446 }
447 
448 namespace {
449 /// \brief Analyses memory accesses in a loop.
450 ///
451 /// Checks whether run time pointer checks are needed and builds sets for data
452 /// dependence checking.
453 class AccessAnalysis {
454 public:
455   /// \brief Read or write access location.
456   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
457   typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
458 
459   AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
460                  MemoryDepChecker::DepCandidates &DA,
461                  PredicatedScalarEvolution &PSE)
462       : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
463         PSE(PSE) {}
464 
465   /// \brief Register a load  and whether it is only read from.
466   void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
467     Value *Ptr = const_cast<Value*>(Loc.Ptr);
468     AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
469     Accesses.insert(MemAccessInfo(Ptr, false));
470     if (IsReadOnly)
471       ReadOnlyPtr.insert(Ptr);
472   }
473 
474   /// \brief Register a store.
475   void addStore(MemoryLocation &Loc) {
476     Value *Ptr = const_cast<Value*>(Loc.Ptr);
477     AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
478     Accesses.insert(MemAccessInfo(Ptr, true));
479   }
480 
481   /// \brief Check whether we can check the pointers at runtime for
482   /// non-intersection.
483   ///
484   /// Returns true if we need no check or if we do and we can generate them
485   /// (i.e. the pointers have computable bounds).
486   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
487                        Loop *TheLoop, const ValueToValueMap &Strides,
488                        bool ShouldCheckWrap = false);
489 
490   /// \brief Goes over all memory accesses, checks whether a RT check is needed
491   /// and builds sets of dependent accesses.
492   void buildDependenceSets() {
493     processMemAccesses();
494   }
495 
496   /// \brief Initial processing of memory accesses determined that we need to
497   /// perform dependency checking.
498   ///
499   /// Note that this can later be cleared if we retry memcheck analysis without
500   /// dependency checking (i.e. ShouldRetryWithRuntimeCheck).
501   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
502 
503   /// We decided that no dependence analysis would be used.  Reset the state.
504   void resetDepChecks(MemoryDepChecker &DepChecker) {
505     CheckDeps.clear();
506     DepChecker.clearDependences();
507   }
508 
509   MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; }
510 
511 private:
512   typedef SetVector<MemAccessInfo> PtrAccessSet;
513 
514   /// \brief Go over all memory access and check whether runtime pointer checks
515   /// are needed and build sets of dependency check candidates.
516   void processMemAccesses();
517 
518   /// Set of all accesses.
519   PtrAccessSet Accesses;
520 
521   const DataLayout &DL;
522 
523   /// Set of accesses that need a further dependence check.
524   MemAccessInfoSet CheckDeps;
525 
526   /// Set of pointers that are read only.
527   SmallPtrSet<Value*, 16> ReadOnlyPtr;
528 
529   /// An alias set tracker to partition the access set by underlying object and
530   //intrinsic property (such as TBAA metadata).
531   AliasSetTracker AST;
532 
533   LoopInfo *LI;
534 
535   /// Sets of potentially dependent accesses - members of one set share an
536   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
537   /// dependence check.
538   MemoryDepChecker::DepCandidates &DepCands;
539 
540   /// \brief Initial processing of memory accesses determined that we may need
541   /// to add memchecks.  Perform the analysis to determine the necessary checks.
542   ///
543   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
544   /// memcheck analysis without dependency checking
545   /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared
546   /// while this remains set if we have potentially dependent accesses.
547   bool IsRTCheckAnalysisNeeded;
548 
549   /// The SCEV predicate containing all the SCEV-related assumptions.
550   PredicatedScalarEvolution &PSE;
551 };
552 
553 } // end anonymous namespace
554 
555 /// \brief Check whether a pointer can participate in a runtime bounds check.
556 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
557                                 const ValueToValueMap &Strides, Value *Ptr,
558                                 Loop *L) {
559   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
560 
561   // The bounds for loop-invariant pointer is trivial.
562   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
563     return true;
564 
565   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
566   if (!AR)
567     return false;
568 
569   return AR->isAffine();
570 }
571 
572 /// \brief Check whether a pointer address cannot wrap.
573 static bool isNoWrap(PredicatedScalarEvolution &PSE,
574                      const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
575   const SCEV *PtrScev = PSE.getSCEV(Ptr);
576   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
577     return true;
578 
579   int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
580   return Stride == 1;
581 }
582 
583 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
584                                      ScalarEvolution *SE, Loop *TheLoop,
585                                      const ValueToValueMap &StridesMap,
586                                      bool ShouldCheckWrap) {
587   // Find pointers with computable bounds. We are going to use this information
588   // to place a runtime bound check.
589   bool CanDoRT = true;
590 
591   bool NeedRTCheck = false;
592   if (!IsRTCheckAnalysisNeeded) return true;
593 
594   bool IsDepCheckNeeded = isDependencyCheckNeeded();
595 
596   // We assign a consecutive id to access from different alias sets.
597   // Accesses between different groups doesn't need to be checked.
598   unsigned ASId = 1;
599   for (auto &AS : AST) {
600     int NumReadPtrChecks = 0;
601     int NumWritePtrChecks = 0;
602 
603     // We assign consecutive id to access from different dependence sets.
604     // Accesses within the same set don't need a runtime check.
605     unsigned RunningDepId = 1;
606     DenseMap<Value *, unsigned> DepSetId;
607 
608     for (auto A : AS) {
609       Value *Ptr = A.getValue();
610       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
611       MemAccessInfo Access(Ptr, IsWrite);
612 
613       if (IsWrite)
614         ++NumWritePtrChecks;
615       else
616         ++NumReadPtrChecks;
617 
618       if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) &&
619           // When we run after a failing dependency check we have to make sure
620           // we don't have wrapping pointers.
621           (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) {
622         // The id of the dependence set.
623         unsigned DepId;
624 
625         if (IsDepCheckNeeded) {
626           Value *Leader = DepCands.getLeaderValue(Access).getPointer();
627           unsigned &LeaderId = DepSetId[Leader];
628           if (!LeaderId)
629             LeaderId = RunningDepId++;
630           DepId = LeaderId;
631         } else
632           // Each access has its own dependence set.
633           DepId = RunningDepId++;
634 
635         RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
636 
637         DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
638       } else {
639         DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
640         CanDoRT = false;
641       }
642     }
643 
644     // If we have at least two writes or one write and a read then we need to
645     // check them.  But there is no need to checks if there is only one
646     // dependence set for this alias set.
647     //
648     // Note that this function computes CanDoRT and NeedRTCheck independently.
649     // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer
650     // for which we couldn't find the bounds but we don't actually need to emit
651     // any checks so it does not matter.
652     if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2))
653       NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 &&
654                                                  NumWritePtrChecks >= 1));
655 
656     ++ASId;
657   }
658 
659   // If the pointers that we would use for the bounds comparison have different
660   // address spaces, assume the values aren't directly comparable, so we can't
661   // use them for the runtime check. We also have to assume they could
662   // overlap. In the future there should be metadata for whether address spaces
663   // are disjoint.
664   unsigned NumPointers = RtCheck.Pointers.size();
665   for (unsigned i = 0; i < NumPointers; ++i) {
666     for (unsigned j = i + 1; j < NumPointers; ++j) {
667       // Only need to check pointers between two different dependency sets.
668       if (RtCheck.Pointers[i].DependencySetId ==
669           RtCheck.Pointers[j].DependencySetId)
670        continue;
671       // Only need to check pointers in the same alias set.
672       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
673         continue;
674 
675       Value *PtrI = RtCheck.Pointers[i].PointerValue;
676       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
677 
678       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
679       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
680       if (ASi != ASj) {
681         DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
682                        " different address spaces\n");
683         return false;
684       }
685     }
686   }
687 
688   if (NeedRTCheck && CanDoRT)
689     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
690 
691   DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
692                << " pointer comparisons.\n");
693 
694   RtCheck.Need = NeedRTCheck;
695 
696   bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT;
697   if (!CanDoRTIfNeeded)
698     RtCheck.reset();
699   return CanDoRTIfNeeded;
700 }
701 
702 void AccessAnalysis::processMemAccesses() {
703   // We process the set twice: first we process read-write pointers, last we
704   // process read-only pointers. This allows us to skip dependence tests for
705   // read-only pointers.
706 
707   DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
708   DEBUG(dbgs() << "  AST: "; AST.dump());
709   DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
710   DEBUG({
711     for (auto A : Accesses)
712       dbgs() << "\t" << *A.getPointer() << " (" <<
713                 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
714                                          "read-only" : "read")) << ")\n";
715   });
716 
717   // The AliasSetTracker has nicely partitioned our pointers by metadata
718   // compatibility and potential for underlying-object overlap. As a result, we
719   // only need to check for potential pointer dependencies within each alias
720   // set.
721   for (auto &AS : AST) {
722     // Note that both the alias-set tracker and the alias sets themselves used
723     // linked lists internally and so the iteration order here is deterministic
724     // (matching the original instruction order within each set).
725 
726     bool SetHasWrite = false;
727 
728     // Map of pointers to last access encountered.
729     typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
730     UnderlyingObjToAccessMap ObjToLastAccess;
731 
732     // Set of access to check after all writes have been processed.
733     PtrAccessSet DeferredAccesses;
734 
735     // Iterate over each alias set twice, once to process read/write pointers,
736     // and then to process read-only pointers.
737     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
738       bool UseDeferred = SetIteration > 0;
739       PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
740 
741       for (auto AV : AS) {
742         Value *Ptr = AV.getValue();
743 
744         // For a single memory access in AliasSetTracker, Accesses may contain
745         // both read and write, and they both need to be handled for CheckDeps.
746         for (auto AC : S) {
747           if (AC.getPointer() != Ptr)
748             continue;
749 
750           bool IsWrite = AC.getInt();
751 
752           // If we're using the deferred access set, then it contains only
753           // reads.
754           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
755           if (UseDeferred && !IsReadOnlyPtr)
756             continue;
757           // Otherwise, the pointer must be in the PtrAccessSet, either as a
758           // read or a write.
759           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
760                   S.count(MemAccessInfo(Ptr, false))) &&
761                  "Alias-set pointer not in the access set?");
762 
763           MemAccessInfo Access(Ptr, IsWrite);
764           DepCands.insert(Access);
765 
766           // Memorize read-only pointers for later processing and skip them in
767           // the first round (they need to be checked after we have seen all
768           // write pointers). Note: we also mark pointer that are not
769           // consecutive as "read-only" pointers (so that we check
770           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
771           if (!UseDeferred && IsReadOnlyPtr) {
772             DeferredAccesses.insert(Access);
773             continue;
774           }
775 
776           // If this is a write - check other reads and writes for conflicts. If
777           // this is a read only check other writes for conflicts (but only if
778           // there is no other write to the ptr - this is an optimization to
779           // catch "a[i] = a[i] + " without having to do a dependence check).
780           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
781             CheckDeps.insert(Access);
782             IsRTCheckAnalysisNeeded = true;
783           }
784 
785           if (IsWrite)
786             SetHasWrite = true;
787 
788           // Create sets of pointers connected by a shared alias set and
789           // underlying object.
790           typedef SmallVector<Value *, 16> ValueVector;
791           ValueVector TempObjects;
792 
793           GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
794           DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n");
795           for (Value *UnderlyingObj : TempObjects) {
796             // nullptr never alias, don't join sets for pointer that have "null"
797             // in their UnderlyingObjects list.
798             if (isa<ConstantPointerNull>(UnderlyingObj))
799               continue;
800 
801             UnderlyingObjToAccessMap::iterator Prev =
802                 ObjToLastAccess.find(UnderlyingObj);
803             if (Prev != ObjToLastAccess.end())
804               DepCands.unionSets(Access, Prev->second);
805 
806             ObjToLastAccess[UnderlyingObj] = Access;
807             DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
808           }
809         }
810       }
811     }
812   }
813 }
814 
815 static bool isInBoundsGep(Value *Ptr) {
816   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
817     return GEP->isInBounds();
818   return false;
819 }
820 
821 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
822 /// i.e. monotonically increasing/decreasing.
823 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
824                            PredicatedScalarEvolution &PSE, const Loop *L) {
825   // FIXME: This should probably only return true for NUW.
826   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
827     return true;
828 
829   // Scalar evolution does not propagate the non-wrapping flags to values that
830   // are derived from a non-wrapping induction variable because non-wrapping
831   // could be flow-sensitive.
832   //
833   // Look through the potentially overflowing instruction to try to prove
834   // non-wrapping for the *specific* value of Ptr.
835 
836   // The arithmetic implied by an inbounds GEP can't overflow.
837   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
838   if (!GEP || !GEP->isInBounds())
839     return false;
840 
841   // Make sure there is only one non-const index and analyze that.
842   Value *NonConstIndex = nullptr;
843   for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
844     if (!isa<ConstantInt>(Index)) {
845       if (NonConstIndex)
846         return false;
847       NonConstIndex = Index;
848     }
849   if (!NonConstIndex)
850     // The recurrence is on the pointer, ignore for now.
851     return false;
852 
853   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
854   // AddRec using a NSW operation.
855   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
856     if (OBO->hasNoSignedWrap() &&
857         // Assume constant for other the operand so that the AddRec can be
858         // easily found.
859         isa<ConstantInt>(OBO->getOperand(1))) {
860       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
861 
862       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
863         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
864     }
865 
866   return false;
867 }
868 
869 /// \brief Check whether the access through \p Ptr has a constant stride.
870 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
871                            const Loop *Lp, const ValueToValueMap &StridesMap,
872                            bool Assume) {
873   Type *Ty = Ptr->getType();
874   assert(Ty->isPointerTy() && "Unexpected non-ptr");
875 
876   // Make sure that the pointer does not point to aggregate types.
877   auto *PtrTy = cast<PointerType>(Ty);
878   if (PtrTy->getElementType()->isAggregateType()) {
879     DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr
880                  << "\n");
881     return 0;
882   }
883 
884   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
885 
886   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
887   if (Assume && !AR)
888     AR = PSE.getAsAddRec(Ptr);
889 
890   if (!AR) {
891     DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
892                  << " SCEV: " << *PtrScev << "\n");
893     return 0;
894   }
895 
896   // The accesss function must stride over the innermost loop.
897   if (Lp != AR->getLoop()) {
898     DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
899           *Ptr << " SCEV: " << *AR << "\n");
900     return 0;
901   }
902 
903   // The address calculation must not wrap. Otherwise, a dependence could be
904   // inverted.
905   // An inbounds getelementptr that is a AddRec with a unit stride
906   // cannot wrap per definition. The unit stride requirement is checked later.
907   // An getelementptr without an inbounds attribute and unit stride would have
908   // to access the pointer value "0" which is undefined behavior in address
909   // space 0, therefore we can also vectorize this case.
910   bool IsInBoundsGEP = isInBoundsGep(Ptr);
911   bool IsNoWrapAddRec =
912       PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
913       isNoWrapAddRec(Ptr, AR, PSE, Lp);
914   bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
915   if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
916     if (Assume) {
917       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
918       IsNoWrapAddRec = true;
919       DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
920                    << "LAA:   Pointer: " << *Ptr << "\n"
921                    << "LAA:   SCEV: " << *AR << "\n"
922                    << "LAA:   Added an overflow assumption\n");
923     } else {
924       DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
925                    << *Ptr << " SCEV: " << *AR << "\n");
926       return 0;
927     }
928   }
929 
930   // Check the step is constant.
931   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
932 
933   // Calculate the pointer stride and check if it is constant.
934   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
935   if (!C) {
936     DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
937           " SCEV: " << *AR << "\n");
938     return 0;
939   }
940 
941   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
942   int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
943   const APInt &APStepVal = C->getAPInt();
944 
945   // Huge step value - give up.
946   if (APStepVal.getBitWidth() > 64)
947     return 0;
948 
949   int64_t StepVal = APStepVal.getSExtValue();
950 
951   // Strided access.
952   int64_t Stride = StepVal / Size;
953   int64_t Rem = StepVal % Size;
954   if (Rem)
955     return 0;
956 
957   // If the SCEV could wrap but we have an inbounds gep with a unit stride we
958   // know we can't "wrap around the address space". In case of address space
959   // zero we know that this won't happen without triggering undefined behavior.
960   if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
961       Stride != 1 && Stride != -1) {
962     if (Assume) {
963       // We can avoid this case by adding a run-time check.
964       DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
965                    << "inbouds or in address space 0 may wrap:\n"
966                    << "LAA:   Pointer: " << *Ptr << "\n"
967                    << "LAA:   SCEV: " << *AR << "\n"
968                    << "LAA:   Added an overflow assumption\n");
969       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
970     } else
971       return 0;
972   }
973 
974   return Stride;
975 }
976 
977 /// Take the pointer operand from the Load/Store instruction.
978 /// Returns NULL if this is not a valid Load/Store instruction.
979 static Value *getPointerOperand(Value *I) {
980   if (auto *LI = dyn_cast<LoadInst>(I))
981     return LI->getPointerOperand();
982   if (auto *SI = dyn_cast<StoreInst>(I))
983     return SI->getPointerOperand();
984   return nullptr;
985 }
986 
987 /// Take the address space operand from the Load/Store instruction.
988 /// Returns -1 if this is not a valid Load/Store instruction.
989 static unsigned getAddressSpaceOperand(Value *I) {
990   if (LoadInst *L = dyn_cast<LoadInst>(I))
991     return L->getPointerAddressSpace();
992   if (StoreInst *S = dyn_cast<StoreInst>(I))
993     return S->getPointerAddressSpace();
994   return -1;
995 }
996 
997 /// Returns true if the memory operations \p A and \p B are consecutive.
998 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
999                                ScalarEvolution &SE, bool CheckType) {
1000   Value *PtrA = getPointerOperand(A);
1001   Value *PtrB = getPointerOperand(B);
1002   unsigned ASA = getAddressSpaceOperand(A);
1003   unsigned ASB = getAddressSpaceOperand(B);
1004 
1005   // Check that the address spaces match and that the pointers are valid.
1006   if (!PtrA || !PtrB || (ASA != ASB))
1007     return false;
1008 
1009   // Make sure that A and B are different pointers.
1010   if (PtrA == PtrB)
1011     return false;
1012 
1013   // Make sure that A and B have the same type if required.
1014   if(CheckType && PtrA->getType() != PtrB->getType())
1015       return false;
1016 
1017   unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
1018   Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1019   APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
1020 
1021   APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
1022   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1023   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1024 
1025   //  OffsetDelta = OffsetB - OffsetA;
1026   const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1027   const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1028   const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1029   const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV);
1030   const APInt &OffsetDelta = OffsetDeltaC->getAPInt();
1031   // Check if they are based on the same pointer. That makes the offsets
1032   // sufficient.
1033   if (PtrA == PtrB)
1034     return OffsetDelta == Size;
1035 
1036   // Compute the necessary base pointer delta to have the necessary final delta
1037   // equal to the size.
1038   // BaseDelta = Size - OffsetDelta;
1039   const SCEV *SizeSCEV = SE.getConstant(Size);
1040   const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1041 
1042   // Otherwise compute the distance with SCEV between the base pointers.
1043   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1044   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1045   const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1046   return X == PtrSCEVB;
1047 }
1048 
1049 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1050   switch (Type) {
1051   case NoDep:
1052   case Forward:
1053   case BackwardVectorizable:
1054     return true;
1055 
1056   case Unknown:
1057   case ForwardButPreventsForwarding:
1058   case Backward:
1059   case BackwardVectorizableButPreventsForwarding:
1060     return false;
1061   }
1062   llvm_unreachable("unexpected DepType!");
1063 }
1064 
1065 bool MemoryDepChecker::Dependence::isBackward() const {
1066   switch (Type) {
1067   case NoDep:
1068   case Forward:
1069   case ForwardButPreventsForwarding:
1070   case Unknown:
1071     return false;
1072 
1073   case BackwardVectorizable:
1074   case Backward:
1075   case BackwardVectorizableButPreventsForwarding:
1076     return true;
1077   }
1078   llvm_unreachable("unexpected DepType!");
1079 }
1080 
1081 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1082   return isBackward() || Type == Unknown;
1083 }
1084 
1085 bool MemoryDepChecker::Dependence::isForward() const {
1086   switch (Type) {
1087   case Forward:
1088   case ForwardButPreventsForwarding:
1089     return true;
1090 
1091   case NoDep:
1092   case Unknown:
1093   case BackwardVectorizable:
1094   case Backward:
1095   case BackwardVectorizableButPreventsForwarding:
1096     return false;
1097   }
1098   llvm_unreachable("unexpected DepType!");
1099 }
1100 
1101 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1102                                                     uint64_t TypeByteSize) {
1103   // If loads occur at a distance that is not a multiple of a feasible vector
1104   // factor store-load forwarding does not take place.
1105   // Positive dependences might cause troubles because vectorizing them might
1106   // prevent store-load forwarding making vectorized code run a lot slower.
1107   //   a[i] = a[i-3] ^ a[i-8];
1108   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1109   //   hence on your typical architecture store-load forwarding does not take
1110   //   place. Vectorizing in such cases does not make sense.
1111   // Store-load forwarding distance.
1112 
1113   // After this many iterations store-to-load forwarding conflicts should not
1114   // cause any slowdowns.
1115   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1116   // Maximum vector factor.
1117   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1118       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1119 
1120   // Compute the smallest VF at which the store and load would be misaligned.
1121   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1122        VF *= 2) {
1123     // If the number of vector iteration between the store and the load are
1124     // small we could incur conflicts.
1125     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1126       MaxVFWithoutSLForwardIssues = (VF >>= 1);
1127       break;
1128     }
1129   }
1130 
1131   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1132     DEBUG(dbgs() << "LAA: Distance " << Distance
1133                  << " that could cause a store-load forwarding conflict\n");
1134     return true;
1135   }
1136 
1137   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1138       MaxVFWithoutSLForwardIssues !=
1139           VectorizerParams::MaxVectorWidth * TypeByteSize)
1140     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1141   return false;
1142 }
1143 
1144 /// \brief Check the dependence for two accesses with the same stride \p Stride.
1145 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1146 /// bytes.
1147 ///
1148 /// \returns true if they are independent.
1149 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1150                                           uint64_t TypeByteSize) {
1151   assert(Stride > 1 && "The stride must be greater than 1");
1152   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1153   assert(Distance > 0 && "The distance must be non-zero");
1154 
1155   // Skip if the distance is not multiple of type byte size.
1156   if (Distance % TypeByteSize)
1157     return false;
1158 
1159   uint64_t ScaledDist = Distance / TypeByteSize;
1160 
1161   // No dependence if the scaled distance is not multiple of the stride.
1162   // E.g.
1163   //      for (i = 0; i < 1024 ; i += 4)
1164   //        A[i+2] = A[i] + 1;
1165   //
1166   // Two accesses in memory (scaled distance is 2, stride is 4):
1167   //     | A[0] |      |      |      | A[4] |      |      |      |
1168   //     |      |      | A[2] |      |      |      | A[6] |      |
1169   //
1170   // E.g.
1171   //      for (i = 0; i < 1024 ; i += 3)
1172   //        A[i+4] = A[i] + 1;
1173   //
1174   // Two accesses in memory (scaled distance is 4, stride is 3):
1175   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1176   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1177   return ScaledDist % Stride;
1178 }
1179 
1180 MemoryDepChecker::Dependence::DepType
1181 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1182                               const MemAccessInfo &B, unsigned BIdx,
1183                               const ValueToValueMap &Strides) {
1184   assert (AIdx < BIdx && "Must pass arguments in program order");
1185 
1186   Value *APtr = A.getPointer();
1187   Value *BPtr = B.getPointer();
1188   bool AIsWrite = A.getInt();
1189   bool BIsWrite = B.getInt();
1190 
1191   // Two reads are independent.
1192   if (!AIsWrite && !BIsWrite)
1193     return Dependence::NoDep;
1194 
1195   // We cannot check pointers in different address spaces.
1196   if (APtr->getType()->getPointerAddressSpace() !=
1197       BPtr->getType()->getPointerAddressSpace())
1198     return Dependence::Unknown;
1199 
1200   int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1201   int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1202 
1203   const SCEV *Src = PSE.getSCEV(APtr);
1204   const SCEV *Sink = PSE.getSCEV(BPtr);
1205 
1206   // If the induction step is negative we have to invert source and sink of the
1207   // dependence.
1208   if (StrideAPtr < 0) {
1209     std::swap(APtr, BPtr);
1210     std::swap(Src, Sink);
1211     std::swap(AIsWrite, BIsWrite);
1212     std::swap(AIdx, BIdx);
1213     std::swap(StrideAPtr, StrideBPtr);
1214   }
1215 
1216   const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1217 
1218   DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1219                << "(Induction step: " << StrideAPtr << ")\n");
1220   DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1221                << *InstMap[BIdx] << ": " << *Dist << "\n");
1222 
1223   // Need accesses with constant stride. We don't want to vectorize
1224   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1225   // the address space.
1226   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1227     DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1228     return Dependence::Unknown;
1229   }
1230 
1231   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1232   if (!C) {
1233     DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1234     ShouldRetryWithRuntimeCheck = true;
1235     return Dependence::Unknown;
1236   }
1237 
1238   Type *ATy = APtr->getType()->getPointerElementType();
1239   Type *BTy = BPtr->getType()->getPointerElementType();
1240   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1241   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1242 
1243   const APInt &Val = C->getAPInt();
1244   int64_t Distance = Val.getSExtValue();
1245   uint64_t Stride = std::abs(StrideAPtr);
1246 
1247   // Attempt to prove strided accesses independent.
1248   if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1249       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1250     DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1251     return Dependence::NoDep;
1252   }
1253 
1254   // Negative distances are not plausible dependencies.
1255   if (Val.isNegative()) {
1256     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1257     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1258         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1259          ATy != BTy)) {
1260       DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1261       return Dependence::ForwardButPreventsForwarding;
1262     }
1263 
1264     DEBUG(dbgs() << "LAA: Dependence is negative\n");
1265     return Dependence::Forward;
1266   }
1267 
1268   // Write to the same location with the same size.
1269   // Could be improved to assert type sizes are the same (i32 == float, etc).
1270   if (Val == 0) {
1271     if (ATy == BTy)
1272       return Dependence::Forward;
1273     DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
1274     return Dependence::Unknown;
1275   }
1276 
1277   assert(Val.isStrictlyPositive() && "Expect a positive value");
1278 
1279   if (ATy != BTy) {
1280     DEBUG(dbgs() <<
1281           "LAA: ReadWrite-Write positive dependency with different types\n");
1282     return Dependence::Unknown;
1283   }
1284 
1285   // Bail out early if passed-in parameters make vectorization not feasible.
1286   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1287                            VectorizerParams::VectorizationFactor : 1);
1288   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1289                            VectorizerParams::VectorizationInterleave : 1);
1290   // The minimum number of iterations for a vectorized/unrolled version.
1291   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1292 
1293   // It's not vectorizable if the distance is smaller than the minimum distance
1294   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1295   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1296   // TypeByteSize (No need to plus the last gap distance).
1297   //
1298   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1299   //      foo(int *A) {
1300   //        int *B = (int *)((char *)A + 14);
1301   //        for (i = 0 ; i < 1024 ; i += 2)
1302   //          B[i] = A[i] + 1;
1303   //      }
1304   //
1305   // Two accesses in memory (stride is 2):
1306   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1307   //                              | B[0] |      | B[2] |      | B[4] |
1308   //
1309   // Distance needs for vectorizing iterations except the last iteration:
1310   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1311   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1312   //
1313   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1314   // 12, which is less than distance.
1315   //
1316   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1317   // the minimum distance needed is 28, which is greater than distance. It is
1318   // not safe to do vectorization.
1319   uint64_t MinDistanceNeeded =
1320       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1321   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1322     DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance
1323                  << '\n');
1324     return Dependence::Backward;
1325   }
1326 
1327   // Unsafe if the minimum distance needed is greater than max safe distance.
1328   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1329     DEBUG(dbgs() << "LAA: Failure because it needs at least "
1330                  << MinDistanceNeeded << " size in bytes");
1331     return Dependence::Backward;
1332   }
1333 
1334   // Positive distance bigger than max vectorization factor.
1335   // FIXME: Should use max factor instead of max distance in bytes, which could
1336   // not handle different types.
1337   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1338   //      void foo (int *A, char *B) {
1339   //        for (unsigned i = 0; i < 1024; i++) {
1340   //          A[i+2] = A[i] + 1;
1341   //          B[i+2] = B[i] + 1;
1342   //        }
1343   //      }
1344   //
1345   // This case is currently unsafe according to the max safe distance. If we
1346   // analyze the two accesses on array B, the max safe dependence distance
1347   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1348   // is 8, which is less than 2 and forbidden vectorization, But actually
1349   // both A and B could be vectorized by 2 iterations.
1350   MaxSafeDepDistBytes =
1351       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1352 
1353   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1354   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1355       couldPreventStoreLoadForward(Distance, TypeByteSize))
1356     return Dependence::BackwardVectorizableButPreventsForwarding;
1357 
1358   DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1359                << " with max VF = "
1360                << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n');
1361 
1362   return Dependence::BackwardVectorizable;
1363 }
1364 
1365 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1366                                    MemAccessInfoSet &CheckDeps,
1367                                    const ValueToValueMap &Strides) {
1368 
1369   MaxSafeDepDistBytes = -1;
1370   while (!CheckDeps.empty()) {
1371     MemAccessInfo CurAccess = *CheckDeps.begin();
1372 
1373     // Get the relevant memory access set.
1374     EquivalenceClasses<MemAccessInfo>::iterator I =
1375       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1376 
1377     // Check accesses within this set.
1378     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1379         AccessSets.member_begin(I);
1380     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1381         AccessSets.member_end();
1382 
1383     // Check every access pair.
1384     while (AI != AE) {
1385       CheckDeps.erase(*AI);
1386       EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
1387       while (OI != AE) {
1388         // Check every accessing instruction pair in program order.
1389         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1390              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1391           for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
1392                I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
1393             auto A = std::make_pair(&*AI, *I1);
1394             auto B = std::make_pair(&*OI, *I2);
1395 
1396             assert(*I1 != *I2);
1397             if (*I1 > *I2)
1398               std::swap(A, B);
1399 
1400             Dependence::DepType Type =
1401                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1402             SafeForVectorization &= Dependence::isSafeForVectorization(Type);
1403 
1404             // Gather dependences unless we accumulated MaxDependences
1405             // dependences.  In that case return as soon as we find the first
1406             // unsafe dependence.  This puts a limit on this quadratic
1407             // algorithm.
1408             if (RecordDependences) {
1409               if (Type != Dependence::NoDep)
1410                 Dependences.push_back(Dependence(A.second, B.second, Type));
1411 
1412               if (Dependences.size() >= MaxDependences) {
1413                 RecordDependences = false;
1414                 Dependences.clear();
1415                 DEBUG(dbgs() << "Too many dependences, stopped recording\n");
1416               }
1417             }
1418             if (!RecordDependences && !SafeForVectorization)
1419               return false;
1420           }
1421         ++OI;
1422       }
1423       AI++;
1424     }
1425   }
1426 
1427   DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1428   return SafeForVectorization;
1429 }
1430 
1431 SmallVector<Instruction *, 4>
1432 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1433   MemAccessInfo Access(Ptr, isWrite);
1434   auto &IndexVector = Accesses.find(Access)->second;
1435 
1436   SmallVector<Instruction *, 4> Insts;
1437   std::transform(IndexVector.begin(), IndexVector.end(),
1438                  std::back_inserter(Insts),
1439                  [&](unsigned Idx) { return this->InstMap[Idx]; });
1440   return Insts;
1441 }
1442 
1443 const char *MemoryDepChecker::Dependence::DepName[] = {
1444     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1445     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1446 
1447 void MemoryDepChecker::Dependence::print(
1448     raw_ostream &OS, unsigned Depth,
1449     const SmallVectorImpl<Instruction *> &Instrs) const {
1450   OS.indent(Depth) << DepName[Type] << ":\n";
1451   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1452   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1453 }
1454 
1455 bool LoopAccessInfo::canAnalyzeLoop() {
1456   // We need to have a loop header.
1457   DEBUG(dbgs() << "LAA: Found a loop in "
1458                << TheLoop->getHeader()->getParent()->getName() << ": "
1459                << TheLoop->getHeader()->getName() << '\n');
1460 
1461   // We can only analyze innermost loops.
1462   if (!TheLoop->empty()) {
1463     DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1464     emitAnalysis(LoopAccessReport() << "loop is not the innermost loop");
1465     return false;
1466   }
1467 
1468   // We must have a single backedge.
1469   if (TheLoop->getNumBackEdges() != 1) {
1470     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1471     emitAnalysis(
1472         LoopAccessReport() <<
1473         "loop control flow is not understood by analyzer");
1474     return false;
1475   }
1476 
1477   // We must have a single exiting block.
1478   if (!TheLoop->getExitingBlock()) {
1479     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1480     emitAnalysis(
1481         LoopAccessReport() <<
1482         "loop control flow is not understood by analyzer");
1483     return false;
1484   }
1485 
1486   // We only handle bottom-tested loops, i.e. loop in which the condition is
1487   // checked at the end of each iteration. With that we can assume that all
1488   // instructions in the loop are executed the same number of times.
1489   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1490     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1491     emitAnalysis(
1492         LoopAccessReport() <<
1493         "loop control flow is not understood by analyzer");
1494     return false;
1495   }
1496 
1497   // ScalarEvolution needs to be able to find the exit count.
1498   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1499   if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1500     emitAnalysis(LoopAccessReport()
1501                  << "could not determine number of loop iterations");
1502     DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1503     return false;
1504   }
1505 
1506   return true;
1507 }
1508 
1509 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
1510                                  const TargetLibraryInfo *TLI,
1511                                  DominatorTree *DT) {
1512   typedef SmallPtrSet<Value*, 16> ValueSet;
1513 
1514   // Holds the Load and Store instructions.
1515   SmallVector<LoadInst *, 16> Loads;
1516   SmallVector<StoreInst *, 16> Stores;
1517 
1518   // Holds all the different accesses in the loop.
1519   unsigned NumReads = 0;
1520   unsigned NumReadWrites = 0;
1521 
1522   PtrRtChecking->Pointers.clear();
1523   PtrRtChecking->Need = false;
1524 
1525   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1526 
1527   // For each block.
1528   for (BasicBlock *BB : TheLoop->blocks()) {
1529     // Scan the BB and collect legal loads and stores.
1530     for (Instruction &I : *BB) {
1531       // If this is a load, save it. If this instruction can read from memory
1532       // but is not a load, then we quit. Notice that we don't handle function
1533       // calls that read or write.
1534       if (I.mayReadFromMemory()) {
1535         // Many math library functions read the rounding mode. We will only
1536         // vectorize a loop if it contains known function calls that don't set
1537         // the flag. Therefore, it is safe to ignore this read from memory.
1538         auto *Call = dyn_cast<CallInst>(&I);
1539         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1540           continue;
1541 
1542         // If the function has an explicit vectorized counterpart, we can safely
1543         // assume that it can be vectorized.
1544         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1545             TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
1546           continue;
1547 
1548         auto *Ld = dyn_cast<LoadInst>(&I);
1549         if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
1550           emitAnalysis(LoopAccessReport(Ld)
1551                        << "read with atomic ordering or volatile read");
1552           DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1553           CanVecMem = false;
1554           return;
1555         }
1556         NumLoads++;
1557         Loads.push_back(Ld);
1558         DepChecker->addAccess(Ld);
1559         if (EnableMemAccessVersioning)
1560           collectStridedAccess(Ld);
1561         continue;
1562       }
1563 
1564       // Save 'store' instructions. Abort if other instructions write to memory.
1565       if (I.mayWriteToMemory()) {
1566         auto *St = dyn_cast<StoreInst>(&I);
1567         if (!St) {
1568           emitAnalysis(LoopAccessReport(St)
1569                        << "instruction cannot be vectorized");
1570           CanVecMem = false;
1571           return;
1572         }
1573         if (!St->isSimple() && !IsAnnotatedParallel) {
1574           emitAnalysis(LoopAccessReport(St)
1575                        << "write with atomic ordering or volatile write");
1576           DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1577           CanVecMem = false;
1578           return;
1579         }
1580         NumStores++;
1581         Stores.push_back(St);
1582         DepChecker->addAccess(St);
1583         if (EnableMemAccessVersioning)
1584           collectStridedAccess(St);
1585       }
1586     } // Next instr.
1587   } // Next block.
1588 
1589   // Now we have two lists that hold the loads and the stores.
1590   // Next, we find the pointers that they use.
1591 
1592   // Check if we see any stores. If there are no stores, then we don't
1593   // care if the pointers are *restrict*.
1594   if (!Stores.size()) {
1595     DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1596     CanVecMem = true;
1597     return;
1598   }
1599 
1600   MemoryDepChecker::DepCandidates DependentAccesses;
1601   AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1602                           AA, LI, DependentAccesses, *PSE);
1603 
1604   // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1605   // multiple times on the same object. If the ptr is accessed twice, once
1606   // for read and once for write, it will only appear once (on the write
1607   // list). This is okay, since we are going to check for conflicts between
1608   // writes and between reads and writes, but not between reads and reads.
1609   ValueSet Seen;
1610 
1611   for (StoreInst *ST : Stores) {
1612     Value *Ptr = ST->getPointerOperand();
1613     // Check for store to loop invariant address.
1614     StoreToLoopInvariantAddress |= isUniform(Ptr);
1615     // If we did *not* see this pointer before, insert it to  the read-write
1616     // list. At this phase it is only a 'write' list.
1617     if (Seen.insert(Ptr).second) {
1618       ++NumReadWrites;
1619 
1620       MemoryLocation Loc = MemoryLocation::get(ST);
1621       // The TBAA metadata could have a control dependency on the predication
1622       // condition, so we cannot rely on it when determining whether or not we
1623       // need runtime pointer checks.
1624       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1625         Loc.AATags.TBAA = nullptr;
1626 
1627       Accesses.addStore(Loc);
1628     }
1629   }
1630 
1631   if (IsAnnotatedParallel) {
1632     DEBUG(dbgs()
1633           << "LAA: A loop annotated parallel, ignore memory dependency "
1634           << "checks.\n");
1635     CanVecMem = true;
1636     return;
1637   }
1638 
1639   for (LoadInst *LD : Loads) {
1640     Value *Ptr = LD->getPointerOperand();
1641     // If we did *not* see this pointer before, insert it to the
1642     // read list. If we *did* see it before, then it is already in
1643     // the read-write list. This allows us to vectorize expressions
1644     // such as A[i] += x;  Because the address of A[i] is a read-write
1645     // pointer. This only works if the index of A[i] is consecutive.
1646     // If the address of i is unknown (for example A[B[i]]) then we may
1647     // read a few words, modify, and write a few words, and some of the
1648     // words may be written to the same address.
1649     bool IsReadOnlyPtr = false;
1650     if (Seen.insert(Ptr).second ||
1651         !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
1652       ++NumReads;
1653       IsReadOnlyPtr = true;
1654     }
1655 
1656     MemoryLocation Loc = MemoryLocation::get(LD);
1657     // The TBAA metadata could have a control dependency on the predication
1658     // condition, so we cannot rely on it when determining whether or not we
1659     // need runtime pointer checks.
1660     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1661       Loc.AATags.TBAA = nullptr;
1662 
1663     Accesses.addLoad(Loc, IsReadOnlyPtr);
1664   }
1665 
1666   // If we write (or read-write) to a single destination and there are no
1667   // other reads in this loop then is it safe to vectorize.
1668   if (NumReadWrites == 1 && NumReads == 0) {
1669     DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1670     CanVecMem = true;
1671     return;
1672   }
1673 
1674   // Build dependence sets and check whether we need a runtime pointer bounds
1675   // check.
1676   Accesses.buildDependenceSets();
1677 
1678   // Find pointers with computable bounds. We are going to use this information
1679   // to place a runtime bound check.
1680   bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
1681                                                   TheLoop, SymbolicStrides);
1682   if (!CanDoRTIfNeeded) {
1683     emitAnalysis(LoopAccessReport() << "cannot identify array bounds");
1684     DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
1685                  << "the array bounds.\n");
1686     CanVecMem = false;
1687     return;
1688   }
1689 
1690   DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1691 
1692   CanVecMem = true;
1693   if (Accesses.isDependencyCheckNeeded()) {
1694     DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1695     CanVecMem = DepChecker->areDepsSafe(
1696         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
1697     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
1698 
1699     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
1700       DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1701 
1702       // Clear the dependency checks. We assume they are not needed.
1703       Accesses.resetDepChecks(*DepChecker);
1704 
1705       PtrRtChecking->reset();
1706       PtrRtChecking->Need = true;
1707 
1708       auto *SE = PSE->getSE();
1709       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
1710                                                  SymbolicStrides, true);
1711 
1712       // Check that we found the bounds for the pointer.
1713       if (!CanDoRTIfNeeded) {
1714         emitAnalysis(LoopAccessReport()
1715                      << "cannot check memory dependencies at runtime");
1716         DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1717         CanVecMem = false;
1718         return;
1719       }
1720 
1721       CanVecMem = true;
1722     }
1723   }
1724 
1725   if (CanVecMem)
1726     DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
1727                  << (PtrRtChecking->Need ? "" : " don't")
1728                  << " need runtime memory checks.\n");
1729   else {
1730     emitAnalysis(
1731         LoopAccessReport()
1732         << "unsafe dependent memory operations in loop. Use "
1733            "#pragma loop distribute(enable) to allow loop distribution "
1734            "to attempt to isolate the offending operations into a separate "
1735            "loop");
1736     DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
1737   }
1738 }
1739 
1740 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1741                                            DominatorTree *DT)  {
1742   assert(TheLoop->contains(BB) && "Unknown block used");
1743 
1744   // Blocks that do not dominate the latch need predication.
1745   BasicBlock* Latch = TheLoop->getLoopLatch();
1746   return !DT->dominates(BB, Latch);
1747 }
1748 
1749 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) {
1750   assert(!Report && "Multiple reports generated");
1751   Report = Message;
1752 }
1753 
1754 bool LoopAccessInfo::isUniform(Value *V) const {
1755   return (PSE->getSE()->isLoopInvariant(PSE->getSE()->getSCEV(V), TheLoop));
1756 }
1757 
1758 // FIXME: this function is currently a duplicate of the one in
1759 // LoopVectorize.cpp.
1760 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
1761                                  Instruction *Loc) {
1762   if (FirstInst)
1763     return FirstInst;
1764   if (Instruction *I = dyn_cast<Instruction>(V))
1765     return I->getParent() == Loc->getParent() ? I : nullptr;
1766   return nullptr;
1767 }
1768 
1769 namespace {
1770 /// \brief IR Values for the lower and upper bounds of a pointer evolution.  We
1771 /// need to use value-handles because SCEV expansion can invalidate previously
1772 /// expanded values.  Thus expansion of a pointer can invalidate the bounds for
1773 /// a previous one.
1774 struct PointerBounds {
1775   TrackingVH<Value> Start;
1776   TrackingVH<Value> End;
1777 };
1778 } // end anonymous namespace
1779 
1780 /// \brief Expand code for the lower and upper bound of the pointer group \p CG
1781 /// in \p TheLoop.  \return the values for the bounds.
1782 static PointerBounds
1783 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
1784              Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE,
1785              const RuntimePointerChecking &PtrRtChecking) {
1786   Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue;
1787   const SCEV *Sc = SE->getSCEV(Ptr);
1788 
1789   if (SE->isLoopInvariant(Sc, TheLoop)) {
1790     DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr
1791                  << "\n");
1792     return {Ptr, Ptr};
1793   } else {
1794     unsigned AS = Ptr->getType()->getPointerAddressSpace();
1795     LLVMContext &Ctx = Loc->getContext();
1796 
1797     // Use this type for pointer arithmetic.
1798     Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1799     Value *Start = nullptr, *End = nullptr;
1800 
1801     DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
1802     Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
1803     End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
1804     DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n");
1805     return {Start, End};
1806   }
1807 }
1808 
1809 /// \brief Turns a collection of checks into a collection of expanded upper and
1810 /// lower bounds for both pointers in the check.
1811 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds(
1812     const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks,
1813     Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp,
1814     const RuntimePointerChecking &PtrRtChecking) {
1815   SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1816 
1817   // Here we're relying on the SCEV Expander's cache to only emit code for the
1818   // same bounds once.
1819   std::transform(
1820       PointerChecks.begin(), PointerChecks.end(),
1821       std::back_inserter(ChecksWithBounds),
1822       [&](const RuntimePointerChecking::PointerCheck &Check) {
1823         PointerBounds
1824           First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking),
1825           Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking);
1826         return std::make_pair(First, Second);
1827       });
1828 
1829   return ChecksWithBounds;
1830 }
1831 
1832 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks(
1833     Instruction *Loc,
1834     const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks)
1835     const {
1836   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1837   auto *SE = PSE->getSE();
1838   SCEVExpander Exp(*SE, DL, "induction");
1839   auto ExpandedChecks =
1840       expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking);
1841 
1842   LLVMContext &Ctx = Loc->getContext();
1843   Instruction *FirstInst = nullptr;
1844   IRBuilder<> ChkBuilder(Loc);
1845   // Our instructions might fold to a constant.
1846   Value *MemoryRuntimeCheck = nullptr;
1847 
1848   for (const auto &Check : ExpandedChecks) {
1849     const PointerBounds &A = Check.first, &B = Check.second;
1850     // Check if two pointers (A and B) conflict where conflict is computed as:
1851     // start(A) <= end(B) && start(B) <= end(A)
1852     unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
1853     unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
1854 
1855     assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
1856            (AS1 == A.End->getType()->getPointerAddressSpace()) &&
1857            "Trying to bounds check pointers with different address spaces");
1858 
1859     Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1860     Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1861 
1862     Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
1863     Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
1864     Value *End0 =   ChkBuilder.CreateBitCast(A.End,   PtrArithTy1, "bc");
1865     Value *End1 =   ChkBuilder.CreateBitCast(B.End,   PtrArithTy0, "bc");
1866 
1867     Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
1868     FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
1869     Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
1870     FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
1871     Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1872     FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1873     if (MemoryRuntimeCheck) {
1874       IsConflict =
1875           ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
1876       FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1877     }
1878     MemoryRuntimeCheck = IsConflict;
1879   }
1880 
1881   if (!MemoryRuntimeCheck)
1882     return std::make_pair(nullptr, nullptr);
1883 
1884   // We have to do this trickery because the IRBuilder might fold the check to a
1885   // constant expression in which case there is no Instruction anchored in a
1886   // the block.
1887   Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
1888                                                  ConstantInt::getTrue(Ctx));
1889   ChkBuilder.Insert(Check, "memcheck.conflict");
1890   FirstInst = getFirstInst(FirstInst, Check, Loc);
1891   return std::make_pair(FirstInst, Check);
1892 }
1893 
1894 std::pair<Instruction *, Instruction *>
1895 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const {
1896   if (!PtrRtChecking->Need)
1897     return std::make_pair(nullptr, nullptr);
1898 
1899   return addRuntimeChecks(Loc, PtrRtChecking->getChecks());
1900 }
1901 
1902 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
1903   Value *Ptr = nullptr;
1904   if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
1905     Ptr = LI->getPointerOperand();
1906   else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
1907     Ptr = SI->getPointerOperand();
1908   else
1909     return;
1910 
1911   Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
1912   if (!Stride)
1913     return;
1914 
1915   DEBUG(dbgs() << "LAA: Found a strided access that we can version");
1916   DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
1917   SymbolicStrides[Ptr] = Stride;
1918   StrideSet.insert(Stride);
1919 }
1920 
1921 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
1922                                const TargetLibraryInfo *TLI, AliasAnalysis *AA,
1923                                DominatorTree *DT, LoopInfo *LI)
1924     : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)),
1925       PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)),
1926       DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
1927       NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
1928       StoreToLoopInvariantAddress(false) {
1929   if (canAnalyzeLoop())
1930     analyzeLoop(AA, LI, TLI, DT);
1931 }
1932 
1933 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
1934   if (CanVecMem) {
1935     OS.indent(Depth) << "Memory dependences are safe";
1936     if (MaxSafeDepDistBytes != -1ULL)
1937       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
1938          << " bytes";
1939     if (PtrRtChecking->Need)
1940       OS << " with run-time checks";
1941     OS << "\n";
1942   }
1943 
1944   if (Report)
1945     OS.indent(Depth) << "Report: " << Report->str() << "\n";
1946 
1947   if (auto *Dependences = DepChecker->getDependences()) {
1948     OS.indent(Depth) << "Dependences:\n";
1949     for (auto &Dep : *Dependences) {
1950       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
1951       OS << "\n";
1952     }
1953   } else
1954     OS.indent(Depth) << "Too many dependences, not recorded\n";
1955 
1956   // List the pair of accesses need run-time checks to prove independence.
1957   PtrRtChecking->print(OS, Depth);
1958   OS << "\n";
1959 
1960   OS.indent(Depth) << "Store to invariant address was "
1961                    << (StoreToLoopInvariantAddress ? "" : "not ")
1962                    << "found in loop.\n";
1963 
1964   OS.indent(Depth) << "SCEV assumptions:\n";
1965   PSE->getUnionPredicate().print(OS, Depth);
1966 
1967   OS << "\n";
1968 
1969   OS.indent(Depth) << "Expressions re-written:\n";
1970   PSE->print(OS, Depth);
1971 }
1972 
1973 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
1974   auto &LAI = LoopAccessInfoMap[L];
1975 
1976   if (!LAI)
1977     LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
1978 
1979   return *LAI.get();
1980 }
1981 
1982 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
1983   LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
1984 
1985   for (Loop *TopLevelLoop : *LI)
1986     for (Loop *L : depth_first(TopLevelLoop)) {
1987       OS.indent(2) << L->getHeader()->getName() << ":\n";
1988       auto &LAI = LAA.getInfo(L);
1989       LAI.print(OS, 4);
1990     }
1991 }
1992 
1993 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
1994   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1995   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1996   TLI = TLIP ? &TLIP->getTLI() : nullptr;
1997   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1998   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1999   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2000 
2001   return false;
2002 }
2003 
2004 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2005     AU.addRequired<ScalarEvolutionWrapperPass>();
2006     AU.addRequired<AAResultsWrapperPass>();
2007     AU.addRequired<DominatorTreeWrapperPass>();
2008     AU.addRequired<LoopInfoWrapperPass>();
2009 
2010     AU.setPreservesAll();
2011 }
2012 
2013 char LoopAccessLegacyAnalysis::ID = 0;
2014 static const char laa_name[] = "Loop Access Analysis";
2015 #define LAA_NAME "loop-accesses"
2016 
2017 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2018 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2019 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2020 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2021 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2022 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2023 
2024 char LoopAccessAnalysis::PassID;
2025 
2026 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, AnalysisManager<Loop> &AM) {
2027   const AnalysisManager<Function> &FAM =
2028       AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
2029   Function &F = *L.getHeader()->getParent();
2030   auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(F);
2031   auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(F);
2032   auto *AA = FAM.getCachedResult<AAManager>(F);
2033   auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
2034   auto *LI = FAM.getCachedResult<LoopAnalysis>(F);
2035   if (!SE)
2036     report_fatal_error(
2037         "ScalarEvolution must have been cached at a higher level");
2038   if (!AA)
2039     report_fatal_error("AliasAnalysis must have been cached at a higher level");
2040   if (!DT)
2041     report_fatal_error("DominatorTree must have been cached at a higher level");
2042   if (!LI)
2043     report_fatal_error("LoopInfo must have been cached at a higher level");
2044   return LoopAccessInfo(&L, SE, TLI, AA, DT, LI);
2045 }
2046 
2047 PreservedAnalyses LoopAccessInfoPrinterPass::run(Loop &L,
2048                                                  AnalysisManager<Loop> &AM) {
2049   Function &F = *L.getHeader()->getParent();
2050   auto &LAI = AM.getResult<LoopAccessAnalysis>(L);
2051   OS << "Loop access info in function '" << F.getName() << "':\n";
2052   OS.indent(2) << L.getHeader()->getName() << ":\n";
2053   LAI.print(OS, 4);
2054   return PreservedAnalyses::all();
2055 }
2056 
2057 namespace llvm {
2058   Pass *createLAAPass() {
2059     return new LoopAccessLegacyAnalysis();
2060   }
2061 }
2062