1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <cstdlib>
64 #include <iterator>
65 #include <utility>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "loop-accesses"
71 
72 static cl::opt<unsigned, true>
73 VectorizationFactor("force-vector-width", cl::Hidden,
74                     cl::desc("Sets the SIMD width. Zero is autoselect."),
75                     cl::location(VectorizerParams::VectorizationFactor));
76 unsigned VectorizerParams::VectorizationFactor;
77 
78 static cl::opt<unsigned, true>
79 VectorizationInterleave("force-vector-interleave", cl::Hidden,
80                         cl::desc("Sets the vectorization interleave count. "
81                                  "Zero is autoselect."),
82                         cl::location(
83                             VectorizerParams::VectorizationInterleave));
84 unsigned VectorizerParams::VectorizationInterleave;
85 
86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
87     "runtime-memory-check-threshold", cl::Hidden,
88     cl::desc("When performing memory disambiguation checks at runtime do not "
89              "generate more than this number of comparisons (default = 8)."),
90     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
92 
93 /// The maximum iterations used to merge memory checks
94 static cl::opt<unsigned> MemoryCheckMergeThreshold(
95     "memory-check-merge-threshold", cl::Hidden,
96     cl::desc("Maximum number of comparisons done when trying to merge "
97              "runtime memory checks. (default = 100)"),
98     cl::init(100));
99 
100 /// Maximum SIMD width.
101 const unsigned VectorizerParams::MaxVectorWidth = 64;
102 
103 /// We collect dependences up to this threshold.
104 static cl::opt<unsigned>
105     MaxDependences("max-dependences", cl::Hidden,
106                    cl::desc("Maximum number of dependences collected by "
107                             "loop-access analysis (default = 100)"),
108                    cl::init(100));
109 
110 /// This enables versioning on the strides of symbolically striding memory
111 /// accesses in code like the following.
112 ///   for (i = 0; i < N; ++i)
113 ///     A[i * Stride1] += B[i * Stride2] ...
114 ///
115 /// Will be roughly translated to
116 ///    if (Stride1 == 1 && Stride2 == 1) {
117 ///      for (i = 0; i < N; i+=4)
118 ///       A[i:i+3] += ...
119 ///    } else
120 ///      ...
121 static cl::opt<bool> EnableMemAccessVersioning(
122     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
123     cl::desc("Enable symbolic stride memory access versioning"));
124 
125 /// Enable store-to-load forwarding conflict detection. This option can
126 /// be disabled for correctness testing.
127 static cl::opt<bool> EnableForwardingConflictDetection(
128     "store-to-load-forwarding-conflict-detection", cl::Hidden,
129     cl::desc("Enable conflict detection in loop-access analysis"),
130     cl::init(true));
131 
132 bool VectorizerParams::isInterleaveForced() {
133   return ::VectorizationInterleave.getNumOccurrences() > 0;
134 }
135 
136 Value *llvm::stripIntegerCast(Value *V) {
137   if (auto *CI = dyn_cast<CastInst>(V))
138     if (CI->getOperand(0)->getType()->isIntegerTy())
139       return CI->getOperand(0);
140   return V;
141 }
142 
143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
144                                             const ValueToValueMap &PtrToStride,
145                                             Value *Ptr) {
146   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
147 
148   // If there is an entry in the map return the SCEV of the pointer with the
149   // symbolic stride replaced by one.
150   ValueToValueMap::const_iterator SI = PtrToStride.find(Ptr);
151   if (SI == PtrToStride.end())
152     // For a non-symbolic stride, just return the original expression.
153     return OrigSCEV;
154 
155   Value *StrideVal = stripIntegerCast(SI->second);
156 
157   ScalarEvolution *SE = PSE.getSE();
158   const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
159   const auto *CT =
160     static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
161 
162   PSE.addPredicate(*SE->getEqualPredicate(U, CT));
163   auto *Expr = PSE.getSCEV(Ptr);
164 
165   LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
166 	     << " by: " << *Expr << "\n");
167   return Expr;
168 }
169 
170 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
171     unsigned Index, RuntimePointerChecking &RtCheck)
172     : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
173       AddressSpace(RtCheck.Pointers[Index]
174                        .PointerValue->getType()
175                        ->getPointerAddressSpace()) {
176   Members.push_back(Index);
177 }
178 
179 /// Calculate Start and End points of memory access.
180 /// Let's assume A is the first access and B is a memory access on N-th loop
181 /// iteration. Then B is calculated as:
182 ///   B = A + Step*N .
183 /// Step value may be positive or negative.
184 /// N is a calculated back-edge taken count:
185 ///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
186 /// Start and End points are calculated in the following way:
187 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
188 /// where SizeOfElt is the size of single memory access in bytes.
189 ///
190 /// There is no conflict when the intervals are disjoint:
191 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
192 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, Type *AccessTy,
193                                     bool WritePtr, unsigned DepSetId,
194                                     unsigned ASId,
195                                     const ValueToValueMap &Strides,
196                                     PredicatedScalarEvolution &PSE) {
197   // Get the stride replaced scev.
198   const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
199   ScalarEvolution *SE = PSE.getSE();
200 
201   const SCEV *ScStart;
202   const SCEV *ScEnd;
203 
204   if (SE->isLoopInvariant(Sc, Lp)) {
205     ScStart = ScEnd = Sc;
206   } else {
207     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
208     assert(AR && "Invalid addrec expression");
209     const SCEV *Ex = PSE.getBackedgeTakenCount();
210 
211     ScStart = AR->getStart();
212     ScEnd = AR->evaluateAtIteration(Ex, *SE);
213     const SCEV *Step = AR->getStepRecurrence(*SE);
214 
215     // For expressions with negative step, the upper bound is ScStart and the
216     // lower bound is ScEnd.
217     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
218       if (CStep->getValue()->isNegative())
219         std::swap(ScStart, ScEnd);
220     } else {
221       // Fallback case: the step is not constant, but we can still
222       // get the upper and lower bounds of the interval by using min/max
223       // expressions.
224       ScStart = SE->getUMinExpr(ScStart, ScEnd);
225       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
226     }
227   }
228   // Add the size of the pointed element to ScEnd.
229   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
230   Type *IdxTy = DL.getIndexType(Ptr->getType());
231   const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
232   ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
233 
234   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
235 }
236 
237 SmallVector<RuntimePointerCheck, 4>
238 RuntimePointerChecking::generateChecks() const {
239   SmallVector<RuntimePointerCheck, 4> Checks;
240 
241   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
242     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
243       const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
244       const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
245 
246       if (needsChecking(CGI, CGJ))
247         Checks.push_back(std::make_pair(&CGI, &CGJ));
248     }
249   }
250   return Checks;
251 }
252 
253 void RuntimePointerChecking::generateChecks(
254     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
255   assert(Checks.empty() && "Checks is not empty");
256   groupChecks(DepCands, UseDependencies);
257   Checks = generateChecks();
258 }
259 
260 bool RuntimePointerChecking::needsChecking(
261     const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
262   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
263     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
264       if (needsChecking(M.Members[I], N.Members[J]))
265         return true;
266   return false;
267 }
268 
269 /// Compare \p I and \p J and return the minimum.
270 /// Return nullptr in case we couldn't find an answer.
271 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
272                                    ScalarEvolution *SE) {
273   const SCEV *Diff = SE->getMinusSCEV(J, I);
274   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
275 
276   if (!C)
277     return nullptr;
278   if (C->getValue()->isNegative())
279     return J;
280   return I;
281 }
282 
283 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
284                                          RuntimePointerChecking &RtCheck) {
285   return addPointer(
286       Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
287       RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
288       *RtCheck.SE);
289 }
290 
291 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
292                                          const SCEV *End, unsigned AS,
293                                          ScalarEvolution &SE) {
294   assert(AddressSpace == AS &&
295          "all pointers in a checking group must be in the same address space");
296 
297   // Compare the starts and ends with the known minimum and maximum
298   // of this set. We need to know how we compare against the min/max
299   // of the set in order to be able to emit memchecks.
300   const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
301   if (!Min0)
302     return false;
303 
304   const SCEV *Min1 = getMinFromExprs(End, High, &SE);
305   if (!Min1)
306     return false;
307 
308   // Update the low bound  expression if we've found a new min value.
309   if (Min0 == Start)
310     Low = Start;
311 
312   // Update the high bound expression if we've found a new max value.
313   if (Min1 != End)
314     High = End;
315 
316   Members.push_back(Index);
317   return true;
318 }
319 
320 void RuntimePointerChecking::groupChecks(
321     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
322   // We build the groups from dependency candidates equivalence classes
323   // because:
324   //    - We know that pointers in the same equivalence class share
325   //      the same underlying object and therefore there is a chance
326   //      that we can compare pointers
327   //    - We wouldn't be able to merge two pointers for which we need
328   //      to emit a memcheck. The classes in DepCands are already
329   //      conveniently built such that no two pointers in the same
330   //      class need checking against each other.
331 
332   // We use the following (greedy) algorithm to construct the groups
333   // For every pointer in the equivalence class:
334   //   For each existing group:
335   //   - if the difference between this pointer and the min/max bounds
336   //     of the group is a constant, then make the pointer part of the
337   //     group and update the min/max bounds of that group as required.
338 
339   CheckingGroups.clear();
340 
341   // If we need to check two pointers to the same underlying object
342   // with a non-constant difference, we shouldn't perform any pointer
343   // grouping with those pointers. This is because we can easily get
344   // into cases where the resulting check would return false, even when
345   // the accesses are safe.
346   //
347   // The following example shows this:
348   // for (i = 0; i < 1000; ++i)
349   //   a[5000 + i * m] = a[i] + a[i + 9000]
350   //
351   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
352   // (0, 10000) which is always false. However, if m is 1, there is no
353   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
354   // us to perform an accurate check in this case.
355   //
356   // The above case requires that we have an UnknownDependence between
357   // accesses to the same underlying object. This cannot happen unless
358   // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
359   // is also false. In this case we will use the fallback path and create
360   // separate checking groups for all pointers.
361 
362   // If we don't have the dependency partitions, construct a new
363   // checking pointer group for each pointer. This is also required
364   // for correctness, because in this case we can have checking between
365   // pointers to the same underlying object.
366   if (!UseDependencies) {
367     for (unsigned I = 0; I < Pointers.size(); ++I)
368       CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
369     return;
370   }
371 
372   unsigned TotalComparisons = 0;
373 
374   DenseMap<Value *, unsigned> PositionMap;
375   for (unsigned Index = 0; Index < Pointers.size(); ++Index)
376     PositionMap[Pointers[Index].PointerValue] = Index;
377 
378   // We need to keep track of what pointers we've already seen so we
379   // don't process them twice.
380   SmallSet<unsigned, 2> Seen;
381 
382   // Go through all equivalence classes, get the "pointer check groups"
383   // and add them to the overall solution. We use the order in which accesses
384   // appear in 'Pointers' to enforce determinism.
385   for (unsigned I = 0; I < Pointers.size(); ++I) {
386     // We've seen this pointer before, and therefore already processed
387     // its equivalence class.
388     if (Seen.count(I))
389       continue;
390 
391     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
392                                            Pointers[I].IsWritePtr);
393 
394     SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
395     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
396 
397     // Because DepCands is constructed by visiting accesses in the order in
398     // which they appear in alias sets (which is deterministic) and the
399     // iteration order within an equivalence class member is only dependent on
400     // the order in which unions and insertions are performed on the
401     // equivalence class, the iteration order is deterministic.
402     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
403          MI != ME; ++MI) {
404       auto PointerI = PositionMap.find(MI->getPointer());
405       assert(PointerI != PositionMap.end() &&
406              "pointer in equivalence class not found in PositionMap");
407       unsigned Pointer = PointerI->second;
408       bool Merged = false;
409       // Mark this pointer as seen.
410       Seen.insert(Pointer);
411 
412       // Go through all the existing sets and see if we can find one
413       // which can include this pointer.
414       for (RuntimeCheckingPtrGroup &Group : Groups) {
415         // Don't perform more than a certain amount of comparisons.
416         // This should limit the cost of grouping the pointers to something
417         // reasonable.  If we do end up hitting this threshold, the algorithm
418         // will create separate groups for all remaining pointers.
419         if (TotalComparisons > MemoryCheckMergeThreshold)
420           break;
421 
422         TotalComparisons++;
423 
424         if (Group.addPointer(Pointer, *this)) {
425           Merged = true;
426           break;
427         }
428       }
429 
430       if (!Merged)
431         // We couldn't add this pointer to any existing set or the threshold
432         // for the number of comparisons has been reached. Create a new group
433         // to hold the current pointer.
434         Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
435     }
436 
437     // We've computed the grouped checks for this partition.
438     // Save the results and continue with the next one.
439     llvm::copy(Groups, std::back_inserter(CheckingGroups));
440   }
441 }
442 
443 bool RuntimePointerChecking::arePointersInSamePartition(
444     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
445     unsigned PtrIdx2) {
446   return (PtrToPartition[PtrIdx1] != -1 &&
447           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
448 }
449 
450 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
451   const PointerInfo &PointerI = Pointers[I];
452   const PointerInfo &PointerJ = Pointers[J];
453 
454   // No need to check if two readonly pointers intersect.
455   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
456     return false;
457 
458   // Only need to check pointers between two different dependency sets.
459   if (PointerI.DependencySetId == PointerJ.DependencySetId)
460     return false;
461 
462   // Only need to check pointers in the same alias set.
463   if (PointerI.AliasSetId != PointerJ.AliasSetId)
464     return false;
465 
466   return true;
467 }
468 
469 void RuntimePointerChecking::printChecks(
470     raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
471     unsigned Depth) const {
472   unsigned N = 0;
473   for (const auto &Check : Checks) {
474     const auto &First = Check.first->Members, &Second = Check.second->Members;
475 
476     OS.indent(Depth) << "Check " << N++ << ":\n";
477 
478     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
479     for (unsigned K = 0; K < First.size(); ++K)
480       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
481 
482     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
483     for (unsigned K = 0; K < Second.size(); ++K)
484       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
485   }
486 }
487 
488 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
489 
490   OS.indent(Depth) << "Run-time memory checks:\n";
491   printChecks(OS, Checks, Depth);
492 
493   OS.indent(Depth) << "Grouped accesses:\n";
494   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
495     const auto &CG = CheckingGroups[I];
496 
497     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
498     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
499                          << ")\n";
500     for (unsigned J = 0; J < CG.Members.size(); ++J) {
501       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
502                            << "\n";
503     }
504   }
505 }
506 
507 namespace {
508 
509 /// Analyses memory accesses in a loop.
510 ///
511 /// Checks whether run time pointer checks are needed and builds sets for data
512 /// dependence checking.
513 class AccessAnalysis {
514 public:
515   /// Read or write access location.
516   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
517   typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
518 
519   AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
520                  MemoryDepChecker::DepCandidates &DA,
521                  PredicatedScalarEvolution &PSE)
522       : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), PSE(PSE) {}
523 
524   /// Register a load  and whether it is only read from.
525   void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
526     Value *Ptr = const_cast<Value*>(Loc.Ptr);
527     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
528     Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
529     if (IsReadOnly)
530       ReadOnlyPtr.insert(Ptr);
531   }
532 
533   /// Register a store.
534   void addStore(MemoryLocation &Loc, Type *AccessTy) {
535     Value *Ptr = const_cast<Value*>(Loc.Ptr);
536     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
537     Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
538   }
539 
540   /// Check if we can emit a run-time no-alias check for \p Access.
541   ///
542   /// Returns true if we can emit a run-time no alias check for \p Access.
543   /// If we can check this access, this also adds it to a dependence set and
544   /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
545   /// we will attempt to use additional run-time checks in order to get
546   /// the bounds of the pointer.
547   bool createCheckForAccess(RuntimePointerChecking &RtCheck,
548                             MemAccessInfo Access, Type *AccessTy,
549                             const ValueToValueMap &Strides,
550                             DenseMap<Value *, unsigned> &DepSetId,
551                             Loop *TheLoop, unsigned &RunningDepId,
552                             unsigned ASId, bool ShouldCheckStride, bool Assume);
553 
554   /// Check whether we can check the pointers at runtime for
555   /// non-intersection.
556   ///
557   /// Returns true if we need no check or if we do and we can generate them
558   /// (i.e. the pointers have computable bounds).
559   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
560                        Loop *TheLoop, const ValueToValueMap &Strides,
561                        Value *&UncomputablePtr, bool ShouldCheckWrap = false);
562 
563   /// Goes over all memory accesses, checks whether a RT check is needed
564   /// and builds sets of dependent accesses.
565   void buildDependenceSets() {
566     processMemAccesses();
567   }
568 
569   /// Initial processing of memory accesses determined that we need to
570   /// perform dependency checking.
571   ///
572   /// Note that this can later be cleared if we retry memcheck analysis without
573   /// dependency checking (i.e. FoundNonConstantDistanceDependence).
574   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
575 
576   /// We decided that no dependence analysis would be used.  Reset the state.
577   void resetDepChecks(MemoryDepChecker &DepChecker) {
578     CheckDeps.clear();
579     DepChecker.clearDependences();
580   }
581 
582   MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
583 
584 private:
585   typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
586 
587   /// Go over all memory access and check whether runtime pointer checks
588   /// are needed and build sets of dependency check candidates.
589   void processMemAccesses();
590 
591   /// Map of all accesses. Values are the types used to access memory pointed to
592   /// by the pointer.
593   PtrAccessMap Accesses;
594 
595   /// The loop being checked.
596   const Loop *TheLoop;
597 
598   /// List of accesses that need a further dependence check.
599   MemAccessInfoList CheckDeps;
600 
601   /// Set of pointers that are read only.
602   SmallPtrSet<Value*, 16> ReadOnlyPtr;
603 
604   /// An alias set tracker to partition the access set by underlying object and
605   //intrinsic property (such as TBAA metadata).
606   AliasSetTracker AST;
607 
608   LoopInfo *LI;
609 
610   /// Sets of potentially dependent accesses - members of one set share an
611   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
612   /// dependence check.
613   MemoryDepChecker::DepCandidates &DepCands;
614 
615   /// Initial processing of memory accesses determined that we may need
616   /// to add memchecks.  Perform the analysis to determine the necessary checks.
617   ///
618   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
619   /// memcheck analysis without dependency checking
620   /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
621   /// cleared while this remains set if we have potentially dependent accesses.
622   bool IsRTCheckAnalysisNeeded = false;
623 
624   /// The SCEV predicate containing all the SCEV-related assumptions.
625   PredicatedScalarEvolution &PSE;
626 };
627 
628 } // end anonymous namespace
629 
630 /// Check whether a pointer can participate in a runtime bounds check.
631 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
632 /// by adding run-time checks (overflow checks) if necessary.
633 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
634                                 const ValueToValueMap &Strides, Value *Ptr,
635                                 Loop *L, bool Assume) {
636   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
637 
638   // The bounds for loop-invariant pointer is trivial.
639   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
640     return true;
641 
642   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
643 
644   if (!AR && Assume)
645     AR = PSE.getAsAddRec(Ptr);
646 
647   if (!AR)
648     return false;
649 
650   return AR->isAffine();
651 }
652 
653 /// Check whether a pointer address cannot wrap.
654 static bool isNoWrap(PredicatedScalarEvolution &PSE,
655                      const ValueToValueMap &Strides, Value *Ptr, Type *AccessTy,
656                      Loop *L) {
657   const SCEV *PtrScev = PSE.getSCEV(Ptr);
658   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
659     return true;
660 
661   int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides);
662   if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
663     return true;
664 
665   return false;
666 }
667 
668 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
669                           function_ref<void(Value *)> AddPointer) {
670   SmallPtrSet<Value *, 8> Visited;
671   SmallVector<Value *> WorkList;
672   WorkList.push_back(StartPtr);
673 
674   while (!WorkList.empty()) {
675     Value *Ptr = WorkList.pop_back_val();
676     if (!Visited.insert(Ptr).second)
677       continue;
678     auto *PN = dyn_cast<PHINode>(Ptr);
679     // SCEV does not look through non-header PHIs inside the loop. Such phis
680     // can be analyzed by adding separate accesses for each incoming pointer
681     // value.
682     if (PN && InnermostLoop.contains(PN->getParent()) &&
683         PN->getParent() != InnermostLoop.getHeader()) {
684       for (const Use &Inc : PN->incoming_values())
685         WorkList.push_back(Inc);
686     } else
687       AddPointer(Ptr);
688   }
689 }
690 
691 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
692                                           MemAccessInfo Access, Type *AccessTy,
693                                           const ValueToValueMap &StridesMap,
694                                           DenseMap<Value *, unsigned> &DepSetId,
695                                           Loop *TheLoop, unsigned &RunningDepId,
696                                           unsigned ASId, bool ShouldCheckWrap,
697                                           bool Assume) {
698   Value *Ptr = Access.getPointer();
699 
700   if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
701     return false;
702 
703   // When we run after a failing dependency check we have to make sure
704   // we don't have wrapping pointers.
705   if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
706     auto *Expr = PSE.getSCEV(Ptr);
707     if (!Assume || !isa<SCEVAddRecExpr>(Expr))
708       return false;
709     PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
710   }
711 
712   // The id of the dependence set.
713   unsigned DepId;
714 
715   if (isDependencyCheckNeeded()) {
716     Value *Leader = DepCands.getLeaderValue(Access).getPointer();
717     unsigned &LeaderId = DepSetId[Leader];
718     if (!LeaderId)
719       LeaderId = RunningDepId++;
720     DepId = LeaderId;
721   } else
722     // Each access has its own dependence set.
723     DepId = RunningDepId++;
724 
725   bool IsWrite = Access.getInt();
726   RtCheck.insert(TheLoop, Ptr, AccessTy, IsWrite, DepId, ASId, StridesMap, PSE);
727   LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
728 
729   return true;
730 }
731 
732 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
733                                      ScalarEvolution *SE, Loop *TheLoop,
734                                      const ValueToValueMap &StridesMap,
735                                      Value *&UncomputablePtr, bool ShouldCheckWrap) {
736   // Find pointers with computable bounds. We are going to use this information
737   // to place a runtime bound check.
738   bool CanDoRT = true;
739 
740   bool MayNeedRTCheck = false;
741   if (!IsRTCheckAnalysisNeeded) return true;
742 
743   bool IsDepCheckNeeded = isDependencyCheckNeeded();
744 
745   // We assign a consecutive id to access from different alias sets.
746   // Accesses between different groups doesn't need to be checked.
747   unsigned ASId = 0;
748   for (auto &AS : AST) {
749     int NumReadPtrChecks = 0;
750     int NumWritePtrChecks = 0;
751     bool CanDoAliasSetRT = true;
752     ++ASId;
753 
754     // We assign consecutive id to access from different dependence sets.
755     // Accesses within the same set don't need a runtime check.
756     unsigned RunningDepId = 1;
757     DenseMap<Value *, unsigned> DepSetId;
758 
759     SmallVector<MemAccessInfo, 4> Retries;
760 
761     // First, count how many write and read accesses are in the alias set. Also
762     // collect MemAccessInfos for later.
763     SmallVector<MemAccessInfo, 4> AccessInfos;
764     for (const auto &A : AS) {
765       Value *Ptr = A.getValue();
766       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
767 
768       if (IsWrite)
769         ++NumWritePtrChecks;
770       else
771         ++NumReadPtrChecks;
772       AccessInfos.emplace_back(Ptr, IsWrite);
773     }
774 
775     // We do not need runtime checks for this alias set, if there are no writes
776     // or a single write and no reads.
777     if (NumWritePtrChecks == 0 ||
778         (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
779       assert((AS.size() <= 1 ||
780               all_of(AS,
781                      [this](auto AC) {
782                        MemAccessInfo AccessWrite(AC.getValue(), true);
783                        return DepCands.findValue(AccessWrite) == DepCands.end();
784                      })) &&
785              "Can only skip updating CanDoRT below, if all entries in AS "
786              "are reads or there is at most 1 entry");
787       continue;
788     }
789 
790     for (auto &Access : AccessInfos) {
791       for (auto &AccessTy : Accesses[Access]) {
792         if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
793                                   DepSetId, TheLoop, RunningDepId, ASId,
794                                   ShouldCheckWrap, false)) {
795           LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
796                             << *Access.getPointer() << '\n');
797           Retries.push_back(Access);
798           CanDoAliasSetRT = false;
799         }
800       }
801     }
802 
803     // Note that this function computes CanDoRT and MayNeedRTCheck
804     // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
805     // we have a pointer for which we couldn't find the bounds but we don't
806     // actually need to emit any checks so it does not matter.
807     //
808     // We need runtime checks for this alias set, if there are at least 2
809     // dependence sets (in which case RunningDepId > 2) or if we need to re-try
810     // any bound checks (because in that case the number of dependence sets is
811     // incomplete).
812     bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
813 
814     // We need to perform run-time alias checks, but some pointers had bounds
815     // that couldn't be checked.
816     if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
817       // Reset the CanDoSetRt flag and retry all accesses that have failed.
818       // We know that we need these checks, so we can now be more aggressive
819       // and add further checks if required (overflow checks).
820       CanDoAliasSetRT = true;
821       for (auto Access : Retries) {
822         for (auto &AccessTy : Accesses[Access]) {
823           if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
824                                     DepSetId, TheLoop, RunningDepId, ASId,
825                                     ShouldCheckWrap, /*Assume=*/true)) {
826             CanDoAliasSetRT = false;
827             UncomputablePtr = Access.getPointer();
828             break;
829           }
830         }
831       }
832     }
833 
834     CanDoRT &= CanDoAliasSetRT;
835     MayNeedRTCheck |= NeedsAliasSetRTCheck;
836     ++ASId;
837   }
838 
839   // If the pointers that we would use for the bounds comparison have different
840   // address spaces, assume the values aren't directly comparable, so we can't
841   // use them for the runtime check. We also have to assume they could
842   // overlap. In the future there should be metadata for whether address spaces
843   // are disjoint.
844   unsigned NumPointers = RtCheck.Pointers.size();
845   for (unsigned i = 0; i < NumPointers; ++i) {
846     for (unsigned j = i + 1; j < NumPointers; ++j) {
847       // Only need to check pointers between two different dependency sets.
848       if (RtCheck.Pointers[i].DependencySetId ==
849           RtCheck.Pointers[j].DependencySetId)
850        continue;
851       // Only need to check pointers in the same alias set.
852       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
853         continue;
854 
855       Value *PtrI = RtCheck.Pointers[i].PointerValue;
856       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
857 
858       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
859       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
860       if (ASi != ASj) {
861         LLVM_DEBUG(
862             dbgs() << "LAA: Runtime check would require comparison between"
863                       " different address spaces\n");
864         return false;
865       }
866     }
867   }
868 
869   if (MayNeedRTCheck && CanDoRT)
870     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
871 
872   LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
873                     << " pointer comparisons.\n");
874 
875   // If we can do run-time checks, but there are no checks, no runtime checks
876   // are needed. This can happen when all pointers point to the same underlying
877   // object for example.
878   RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
879 
880   bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
881   if (!CanDoRTIfNeeded)
882     RtCheck.reset();
883   return CanDoRTIfNeeded;
884 }
885 
886 void AccessAnalysis::processMemAccesses() {
887   // We process the set twice: first we process read-write pointers, last we
888   // process read-only pointers. This allows us to skip dependence tests for
889   // read-only pointers.
890 
891   LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
892   LLVM_DEBUG(dbgs() << "  AST: "; AST.dump());
893   LLVM_DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
894   LLVM_DEBUG({
895     for (auto A : Accesses)
896       dbgs() << "\t" << *A.first.getPointer() << " ("
897              << (A.first.getInt()
898                      ? "write"
899                      : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
900                                                                 : "read"))
901              << ")\n";
902   });
903 
904   // The AliasSetTracker has nicely partitioned our pointers by metadata
905   // compatibility and potential for underlying-object overlap. As a result, we
906   // only need to check for potential pointer dependencies within each alias
907   // set.
908   for (const auto &AS : AST) {
909     // Note that both the alias-set tracker and the alias sets themselves used
910     // linked lists internally and so the iteration order here is deterministic
911     // (matching the original instruction order within each set).
912 
913     bool SetHasWrite = false;
914 
915     // Map of pointers to last access encountered.
916     typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
917     UnderlyingObjToAccessMap ObjToLastAccess;
918 
919     // Set of access to check after all writes have been processed.
920     PtrAccessMap DeferredAccesses;
921 
922     // Iterate over each alias set twice, once to process read/write pointers,
923     // and then to process read-only pointers.
924     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
925       bool UseDeferred = SetIteration > 0;
926       PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
927 
928       for (const auto &AV : AS) {
929         Value *Ptr = AV.getValue();
930 
931         // For a single memory access in AliasSetTracker, Accesses may contain
932         // both read and write, and they both need to be handled for CheckDeps.
933         for (const auto &AC : S) {
934           if (AC.first.getPointer() != Ptr)
935             continue;
936 
937           bool IsWrite = AC.first.getInt();
938 
939           // If we're using the deferred access set, then it contains only
940           // reads.
941           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
942           if (UseDeferred && !IsReadOnlyPtr)
943             continue;
944           // Otherwise, the pointer must be in the PtrAccessSet, either as a
945           // read or a write.
946           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
947                   S.count(MemAccessInfo(Ptr, false))) &&
948                  "Alias-set pointer not in the access set?");
949 
950           MemAccessInfo Access(Ptr, IsWrite);
951           DepCands.insert(Access);
952 
953           // Memorize read-only pointers for later processing and skip them in
954           // the first round (they need to be checked after we have seen all
955           // write pointers). Note: we also mark pointer that are not
956           // consecutive as "read-only" pointers (so that we check
957           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
958           if (!UseDeferred && IsReadOnlyPtr) {
959             // We only use the pointer keys, the types vector values don't
960             // matter.
961             DeferredAccesses.insert({Access, {}});
962             continue;
963           }
964 
965           // If this is a write - check other reads and writes for conflicts. If
966           // this is a read only check other writes for conflicts (but only if
967           // there is no other write to the ptr - this is an optimization to
968           // catch "a[i] = a[i] + " without having to do a dependence check).
969           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
970             CheckDeps.push_back(Access);
971             IsRTCheckAnalysisNeeded = true;
972           }
973 
974           if (IsWrite)
975             SetHasWrite = true;
976 
977           // Create sets of pointers connected by a shared alias set and
978           // underlying object.
979           typedef SmallVector<const Value *, 16> ValueVector;
980           ValueVector TempObjects;
981 
982           getUnderlyingObjects(Ptr, TempObjects, LI);
983           LLVM_DEBUG(dbgs()
984                      << "Underlying objects for pointer " << *Ptr << "\n");
985           for (const Value *UnderlyingObj : TempObjects) {
986             // nullptr never alias, don't join sets for pointer that have "null"
987             // in their UnderlyingObjects list.
988             if (isa<ConstantPointerNull>(UnderlyingObj) &&
989                 !NullPointerIsDefined(
990                     TheLoop->getHeader()->getParent(),
991                     UnderlyingObj->getType()->getPointerAddressSpace()))
992               continue;
993 
994             UnderlyingObjToAccessMap::iterator Prev =
995                 ObjToLastAccess.find(UnderlyingObj);
996             if (Prev != ObjToLastAccess.end())
997               DepCands.unionSets(Access, Prev->second);
998 
999             ObjToLastAccess[UnderlyingObj] = Access;
1000             LLVM_DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
1001           }
1002         }
1003       }
1004     }
1005   }
1006 }
1007 
1008 static bool isInBoundsGep(Value *Ptr) {
1009   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
1010     return GEP->isInBounds();
1011   return false;
1012 }
1013 
1014 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1015 /// i.e. monotonically increasing/decreasing.
1016 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1017                            PredicatedScalarEvolution &PSE, const Loop *L) {
1018   // FIXME: This should probably only return true for NUW.
1019   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1020     return true;
1021 
1022   // Scalar evolution does not propagate the non-wrapping flags to values that
1023   // are derived from a non-wrapping induction variable because non-wrapping
1024   // could be flow-sensitive.
1025   //
1026   // Look through the potentially overflowing instruction to try to prove
1027   // non-wrapping for the *specific* value of Ptr.
1028 
1029   // The arithmetic implied by an inbounds GEP can't overflow.
1030   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1031   if (!GEP || !GEP->isInBounds())
1032     return false;
1033 
1034   // Make sure there is only one non-const index and analyze that.
1035   Value *NonConstIndex = nullptr;
1036   for (Value *Index : GEP->indices())
1037     if (!isa<ConstantInt>(Index)) {
1038       if (NonConstIndex)
1039         return false;
1040       NonConstIndex = Index;
1041     }
1042   if (!NonConstIndex)
1043     // The recurrence is on the pointer, ignore for now.
1044     return false;
1045 
1046   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
1047   // AddRec using a NSW operation.
1048   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1049     if (OBO->hasNoSignedWrap() &&
1050         // Assume constant for other the operand so that the AddRec can be
1051         // easily found.
1052         isa<ConstantInt>(OBO->getOperand(1))) {
1053       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1054 
1055       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1056         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1057     }
1058 
1059   return false;
1060 }
1061 
1062 /// Check whether the access through \p Ptr has a constant stride.
1063 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
1064                            Value *Ptr, const Loop *Lp,
1065                            const ValueToValueMap &StridesMap, bool Assume,
1066                            bool ShouldCheckWrap) {
1067   Type *Ty = Ptr->getType();
1068   assert(Ty->isPointerTy() && "Unexpected non-ptr");
1069 
1070   if (isa<ScalableVectorType>(AccessTy)) {
1071     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1072                       << "\n");
1073     return 0;
1074   }
1075 
1076   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1077 
1078   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1079   if (Assume && !AR)
1080     AR = PSE.getAsAddRec(Ptr);
1081 
1082   if (!AR) {
1083     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1084                       << " SCEV: " << *PtrScev << "\n");
1085     return 0;
1086   }
1087 
1088   // The access function must stride over the innermost loop.
1089   if (Lp != AR->getLoop()) {
1090     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1091                       << *Ptr << " SCEV: " << *AR << "\n");
1092     return 0;
1093   }
1094 
1095   // The address calculation must not wrap. Otherwise, a dependence could be
1096   // inverted.
1097   // An inbounds getelementptr that is a AddRec with a unit stride
1098   // cannot wrap per definition. The unit stride requirement is checked later.
1099   // An getelementptr without an inbounds attribute and unit stride would have
1100   // to access the pointer value "0" which is undefined behavior in address
1101   // space 0, therefore we can also vectorize this case.
1102   unsigned AddrSpace = Ty->getPointerAddressSpace();
1103   bool IsInBoundsGEP = isInBoundsGep(Ptr);
1104   bool IsNoWrapAddRec = !ShouldCheckWrap ||
1105     PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1106     isNoWrapAddRec(Ptr, AR, PSE, Lp);
1107   if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1108       NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace)) {
1109     if (Assume) {
1110       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1111       IsNoWrapAddRec = true;
1112       LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1113                         << "LAA:   Pointer: " << *Ptr << "\n"
1114                         << "LAA:   SCEV: " << *AR << "\n"
1115                         << "LAA:   Added an overflow assumption\n");
1116     } else {
1117       LLVM_DEBUG(
1118           dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1119                  << *Ptr << " SCEV: " << *AR << "\n");
1120       return 0;
1121     }
1122   }
1123 
1124   // Check the step is constant.
1125   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1126 
1127   // Calculate the pointer stride and check if it is constant.
1128   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1129   if (!C) {
1130     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1131                       << " SCEV: " << *AR << "\n");
1132     return 0;
1133   }
1134 
1135   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1136   TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1137   int64_t Size = AllocSize.getFixedSize();
1138   const APInt &APStepVal = C->getAPInt();
1139 
1140   // Huge step value - give up.
1141   if (APStepVal.getBitWidth() > 64)
1142     return 0;
1143 
1144   int64_t StepVal = APStepVal.getSExtValue();
1145 
1146   // Strided access.
1147   int64_t Stride = StepVal / Size;
1148   int64_t Rem = StepVal % Size;
1149   if (Rem)
1150     return 0;
1151 
1152   // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1153   // know we can't "wrap around the address space". In case of address space
1154   // zero we know that this won't happen without triggering undefined behavior.
1155   if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1156       (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1157                                               AddrSpace))) {
1158     if (Assume) {
1159       // We can avoid this case by adding a run-time check.
1160       LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1161                         << "inbounds or in address space 0 may wrap:\n"
1162                         << "LAA:   Pointer: " << *Ptr << "\n"
1163                         << "LAA:   SCEV: " << *AR << "\n"
1164                         << "LAA:   Added an overflow assumption\n");
1165       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1166     } else
1167       return 0;
1168   }
1169 
1170   return Stride;
1171 }
1172 
1173 Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
1174                                     Value *PtrB, const DataLayout &DL,
1175                                     ScalarEvolution &SE, bool StrictCheck,
1176                                     bool CheckType) {
1177   assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1178   assert(cast<PointerType>(PtrA->getType())
1179              ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
1180   assert(cast<PointerType>(PtrB->getType())
1181              ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type");
1182 
1183   // Make sure that A and B are different pointers.
1184   if (PtrA == PtrB)
1185     return 0;
1186 
1187   // Make sure that the element types are the same if required.
1188   if (CheckType && ElemTyA != ElemTyB)
1189     return None;
1190 
1191   unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1192   unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1193 
1194   // Check that the address spaces match.
1195   if (ASA != ASB)
1196     return None;
1197   unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1198 
1199   APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1200   Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1201   Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1202 
1203   int Val;
1204   if (PtrA1 == PtrB1) {
1205     // Retrieve the address space again as pointer stripping now tracks through
1206     // `addrspacecast`.
1207     ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1208     ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1209     // Check that the address spaces match and that the pointers are valid.
1210     if (ASA != ASB)
1211       return None;
1212 
1213     IdxWidth = DL.getIndexSizeInBits(ASA);
1214     OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1215     OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1216 
1217     OffsetB -= OffsetA;
1218     Val = OffsetB.getSExtValue();
1219   } else {
1220     // Otherwise compute the distance with SCEV between the base pointers.
1221     const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1222     const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1223     const auto *Diff =
1224         dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1225     if (!Diff)
1226       return None;
1227     Val = Diff->getAPInt().getSExtValue();
1228   }
1229   int Size = DL.getTypeStoreSize(ElemTyA);
1230   int Dist = Val / Size;
1231 
1232   // Ensure that the calculated distance matches the type-based one after all
1233   // the bitcasts removal in the provided pointers.
1234   if (!StrictCheck || Dist * Size == Val)
1235     return Dist;
1236   return None;
1237 }
1238 
1239 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1240                            const DataLayout &DL, ScalarEvolution &SE,
1241                            SmallVectorImpl<unsigned> &SortedIndices) {
1242   assert(llvm::all_of(
1243              VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1244          "Expected list of pointer operands.");
1245   // Walk over the pointers, and map each of them to an offset relative to
1246   // first pointer in the array.
1247   Value *Ptr0 = VL[0];
1248 
1249   using DistOrdPair = std::pair<int64_t, int>;
1250   auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) {
1251     return L.first < R.first;
1252   };
1253   std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1254   Offsets.emplace(0, 0);
1255   int Cnt = 1;
1256   bool IsConsecutive = true;
1257   for (auto *Ptr : VL.drop_front()) {
1258     Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1259                                          /*StrictCheck=*/true);
1260     if (!Diff)
1261       return false;
1262 
1263     // Check if the pointer with the same offset is found.
1264     int64_t Offset = *Diff;
1265     auto Res = Offsets.emplace(Offset, Cnt);
1266     if (!Res.second)
1267       return false;
1268     // Consecutive order if the inserted element is the last one.
1269     IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1270     ++Cnt;
1271   }
1272   SortedIndices.clear();
1273   if (!IsConsecutive) {
1274     // Fill SortedIndices array only if it is non-consecutive.
1275     SortedIndices.resize(VL.size());
1276     Cnt = 0;
1277     for (const std::pair<int64_t, int> &Pair : Offsets) {
1278       SortedIndices[Cnt] = Pair.second;
1279       ++Cnt;
1280     }
1281   }
1282   return true;
1283 }
1284 
1285 /// Returns true if the memory operations \p A and \p B are consecutive.
1286 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1287                                ScalarEvolution &SE, bool CheckType) {
1288   Value *PtrA = getLoadStorePointerOperand(A);
1289   Value *PtrB = getLoadStorePointerOperand(B);
1290   if (!PtrA || !PtrB)
1291     return false;
1292   Type *ElemTyA = getLoadStoreType(A);
1293   Type *ElemTyB = getLoadStoreType(B);
1294   Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1295                                        /*StrictCheck=*/true, CheckType);
1296   return Diff && *Diff == 1;
1297 }
1298 
1299 void MemoryDepChecker::addAccess(StoreInst *SI) {
1300   visitPointers(SI->getPointerOperand(), *InnermostLoop,
1301                 [this, SI](Value *Ptr) {
1302                   Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1303                   InstMap.push_back(SI);
1304                   ++AccessIdx;
1305                 });
1306 }
1307 
1308 void MemoryDepChecker::addAccess(LoadInst *LI) {
1309   visitPointers(LI->getPointerOperand(), *InnermostLoop,
1310                 [this, LI](Value *Ptr) {
1311                   Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1312                   InstMap.push_back(LI);
1313                   ++AccessIdx;
1314                 });
1315 }
1316 
1317 MemoryDepChecker::VectorizationSafetyStatus
1318 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1319   switch (Type) {
1320   case NoDep:
1321   case Forward:
1322   case BackwardVectorizable:
1323     return VectorizationSafetyStatus::Safe;
1324 
1325   case Unknown:
1326     return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1327   case ForwardButPreventsForwarding:
1328   case Backward:
1329   case BackwardVectorizableButPreventsForwarding:
1330     return VectorizationSafetyStatus::Unsafe;
1331   }
1332   llvm_unreachable("unexpected DepType!");
1333 }
1334 
1335 bool MemoryDepChecker::Dependence::isBackward() const {
1336   switch (Type) {
1337   case NoDep:
1338   case Forward:
1339   case ForwardButPreventsForwarding:
1340   case Unknown:
1341     return false;
1342 
1343   case BackwardVectorizable:
1344   case Backward:
1345   case BackwardVectorizableButPreventsForwarding:
1346     return true;
1347   }
1348   llvm_unreachable("unexpected DepType!");
1349 }
1350 
1351 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1352   return isBackward() || Type == Unknown;
1353 }
1354 
1355 bool MemoryDepChecker::Dependence::isForward() const {
1356   switch (Type) {
1357   case Forward:
1358   case ForwardButPreventsForwarding:
1359     return true;
1360 
1361   case NoDep:
1362   case Unknown:
1363   case BackwardVectorizable:
1364   case Backward:
1365   case BackwardVectorizableButPreventsForwarding:
1366     return false;
1367   }
1368   llvm_unreachable("unexpected DepType!");
1369 }
1370 
1371 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1372                                                     uint64_t TypeByteSize) {
1373   // If loads occur at a distance that is not a multiple of a feasible vector
1374   // factor store-load forwarding does not take place.
1375   // Positive dependences might cause troubles because vectorizing them might
1376   // prevent store-load forwarding making vectorized code run a lot slower.
1377   //   a[i] = a[i-3] ^ a[i-8];
1378   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1379   //   hence on your typical architecture store-load forwarding does not take
1380   //   place. Vectorizing in such cases does not make sense.
1381   // Store-load forwarding distance.
1382 
1383   // After this many iterations store-to-load forwarding conflicts should not
1384   // cause any slowdowns.
1385   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1386   // Maximum vector factor.
1387   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1388       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1389 
1390   // Compute the smallest VF at which the store and load would be misaligned.
1391   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1392        VF *= 2) {
1393     // If the number of vector iteration between the store and the load are
1394     // small we could incur conflicts.
1395     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1396       MaxVFWithoutSLForwardIssues = (VF >> 1);
1397       break;
1398     }
1399   }
1400 
1401   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1402     LLVM_DEBUG(
1403         dbgs() << "LAA: Distance " << Distance
1404                << " that could cause a store-load forwarding conflict\n");
1405     return true;
1406   }
1407 
1408   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1409       MaxVFWithoutSLForwardIssues !=
1410           VectorizerParams::MaxVectorWidth * TypeByteSize)
1411     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1412   return false;
1413 }
1414 
1415 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1416   if (Status < S)
1417     Status = S;
1418 }
1419 
1420 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1421 /// memory accesses, that have the same stride whose absolute value is given
1422 /// in \p Stride, and that have the same type size \p TypeByteSize,
1423 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1424 /// possible to prove statically that the dependence distance is larger
1425 /// than the range that the accesses will travel through the execution of
1426 /// the loop. If so, return true; false otherwise. This is useful for
1427 /// example in loops such as the following (PR31098):
1428 ///     for (i = 0; i < D; ++i) {
1429 ///                = out[i];
1430 ///       out[i+D] =
1431 ///     }
1432 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1433                                      const SCEV &BackedgeTakenCount,
1434                                      const SCEV &Dist, uint64_t Stride,
1435                                      uint64_t TypeByteSize) {
1436 
1437   // If we can prove that
1438   //      (**) |Dist| > BackedgeTakenCount * Step
1439   // where Step is the absolute stride of the memory accesses in bytes,
1440   // then there is no dependence.
1441   //
1442   // Rationale:
1443   // We basically want to check if the absolute distance (|Dist/Step|)
1444   // is >= the loop iteration count (or > BackedgeTakenCount).
1445   // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1446   // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1447   // that the dependence distance is >= VF; This is checked elsewhere.
1448   // But in some cases we can prune unknown dependence distances early, and
1449   // even before selecting the VF, and without a runtime test, by comparing
1450   // the distance against the loop iteration count. Since the vectorized code
1451   // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1452   // also guarantees that distance >= VF.
1453   //
1454   const uint64_t ByteStride = Stride * TypeByteSize;
1455   const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1456   const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1457 
1458   const SCEV *CastedDist = &Dist;
1459   const SCEV *CastedProduct = Product;
1460   uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1461   uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1462 
1463   // The dependence distance can be positive/negative, so we sign extend Dist;
1464   // The multiplication of the absolute stride in bytes and the
1465   // backedgeTakenCount is non-negative, so we zero extend Product.
1466   if (DistTypeSize > ProductTypeSize)
1467     CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1468   else
1469     CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1470 
1471   // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
1472   // (If so, then we have proven (**) because |Dist| >= Dist)
1473   const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1474   if (SE.isKnownPositive(Minus))
1475     return true;
1476 
1477   // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
1478   // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1479   const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1480   Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1481   if (SE.isKnownPositive(Minus))
1482     return true;
1483 
1484   return false;
1485 }
1486 
1487 /// Check the dependence for two accesses with the same stride \p Stride.
1488 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1489 /// bytes.
1490 ///
1491 /// \returns true if they are independent.
1492 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1493                                           uint64_t TypeByteSize) {
1494   assert(Stride > 1 && "The stride must be greater than 1");
1495   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1496   assert(Distance > 0 && "The distance must be non-zero");
1497 
1498   // Skip if the distance is not multiple of type byte size.
1499   if (Distance % TypeByteSize)
1500     return false;
1501 
1502   uint64_t ScaledDist = Distance / TypeByteSize;
1503 
1504   // No dependence if the scaled distance is not multiple of the stride.
1505   // E.g.
1506   //      for (i = 0; i < 1024 ; i += 4)
1507   //        A[i+2] = A[i] + 1;
1508   //
1509   // Two accesses in memory (scaled distance is 2, stride is 4):
1510   //     | A[0] |      |      |      | A[4] |      |      |      |
1511   //     |      |      | A[2] |      |      |      | A[6] |      |
1512   //
1513   // E.g.
1514   //      for (i = 0; i < 1024 ; i += 3)
1515   //        A[i+4] = A[i] + 1;
1516   //
1517   // Two accesses in memory (scaled distance is 4, stride is 3):
1518   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1519   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1520   return ScaledDist % Stride;
1521 }
1522 
1523 MemoryDepChecker::Dependence::DepType
1524 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1525                               const MemAccessInfo &B, unsigned BIdx,
1526                               const ValueToValueMap &Strides) {
1527   assert (AIdx < BIdx && "Must pass arguments in program order");
1528 
1529   Value *APtr = A.getPointer();
1530   Value *BPtr = B.getPointer();
1531   bool AIsWrite = A.getInt();
1532   bool BIsWrite = B.getInt();
1533   Type *ATy = getLoadStoreType(InstMap[AIdx]);
1534   Type *BTy = getLoadStoreType(InstMap[BIdx]);
1535 
1536   // Two reads are independent.
1537   if (!AIsWrite && !BIsWrite)
1538     return Dependence::NoDep;
1539 
1540   // We cannot check pointers in different address spaces.
1541   if (APtr->getType()->getPointerAddressSpace() !=
1542       BPtr->getType()->getPointerAddressSpace())
1543     return Dependence::Unknown;
1544 
1545   int64_t StrideAPtr =
1546       getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true);
1547   int64_t StrideBPtr =
1548       getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true);
1549 
1550   const SCEV *Src = PSE.getSCEV(APtr);
1551   const SCEV *Sink = PSE.getSCEV(BPtr);
1552 
1553   // If the induction step is negative we have to invert source and sink of the
1554   // dependence.
1555   if (StrideAPtr < 0) {
1556     std::swap(APtr, BPtr);
1557     std::swap(ATy, BTy);
1558     std::swap(Src, Sink);
1559     std::swap(AIsWrite, BIsWrite);
1560     std::swap(AIdx, BIdx);
1561     std::swap(StrideAPtr, StrideBPtr);
1562   }
1563 
1564   const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1565 
1566   LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1567                     << "(Induction step: " << StrideAPtr << ")\n");
1568   LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1569                     << *InstMap[BIdx] << ": " << *Dist << "\n");
1570 
1571   // Need accesses with constant stride. We don't want to vectorize
1572   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1573   // the address space.
1574   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1575     LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1576     return Dependence::Unknown;
1577   }
1578 
1579   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1580   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1581   bool HasSameSize =
1582       DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1583   uint64_t Stride = std::abs(StrideAPtr);
1584   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1585   if (!C) {
1586     if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1587         isSafeDependenceDistance(DL, *(PSE.getSE()),
1588                                  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1589                                  TypeByteSize))
1590       return Dependence::NoDep;
1591 
1592     LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1593     FoundNonConstantDistanceDependence = true;
1594     return Dependence::Unknown;
1595   }
1596 
1597   const APInt &Val = C->getAPInt();
1598   int64_t Distance = Val.getSExtValue();
1599 
1600   // Attempt to prove strided accesses independent.
1601   if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1602       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1603     LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1604     return Dependence::NoDep;
1605   }
1606 
1607   // Negative distances are not plausible dependencies.
1608   if (Val.isNegative()) {
1609     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1610     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1611         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1612          !HasSameSize)) {
1613       LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1614       return Dependence::ForwardButPreventsForwarding;
1615     }
1616 
1617     LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1618     return Dependence::Forward;
1619   }
1620 
1621   // Write to the same location with the same size.
1622   if (Val == 0) {
1623     if (HasSameSize)
1624       return Dependence::Forward;
1625     LLVM_DEBUG(
1626         dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1627     return Dependence::Unknown;
1628   }
1629 
1630   assert(Val.isStrictlyPositive() && "Expect a positive value");
1631 
1632   if (!HasSameSize) {
1633     LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1634                          "different type sizes\n");
1635     return Dependence::Unknown;
1636   }
1637 
1638   // Bail out early if passed-in parameters make vectorization not feasible.
1639   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1640                            VectorizerParams::VectorizationFactor : 1);
1641   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1642                            VectorizerParams::VectorizationInterleave : 1);
1643   // The minimum number of iterations for a vectorized/unrolled version.
1644   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1645 
1646   // It's not vectorizable if the distance is smaller than the minimum distance
1647   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1648   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1649   // TypeByteSize (No need to plus the last gap distance).
1650   //
1651   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1652   //      foo(int *A) {
1653   //        int *B = (int *)((char *)A + 14);
1654   //        for (i = 0 ; i < 1024 ; i += 2)
1655   //          B[i] = A[i] + 1;
1656   //      }
1657   //
1658   // Two accesses in memory (stride is 2):
1659   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1660   //                              | B[0] |      | B[2] |      | B[4] |
1661   //
1662   // Distance needs for vectorizing iterations except the last iteration:
1663   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1664   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1665   //
1666   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1667   // 12, which is less than distance.
1668   //
1669   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1670   // the minimum distance needed is 28, which is greater than distance. It is
1671   // not safe to do vectorization.
1672   uint64_t MinDistanceNeeded =
1673       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1674   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1675     LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1676                       << Distance << '\n');
1677     return Dependence::Backward;
1678   }
1679 
1680   // Unsafe if the minimum distance needed is greater than max safe distance.
1681   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1682     LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1683                       << MinDistanceNeeded << " size in bytes");
1684     return Dependence::Backward;
1685   }
1686 
1687   // Positive distance bigger than max vectorization factor.
1688   // FIXME: Should use max factor instead of max distance in bytes, which could
1689   // not handle different types.
1690   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1691   //      void foo (int *A, char *B) {
1692   //        for (unsigned i = 0; i < 1024; i++) {
1693   //          A[i+2] = A[i] + 1;
1694   //          B[i+2] = B[i] + 1;
1695   //        }
1696   //      }
1697   //
1698   // This case is currently unsafe according to the max safe distance. If we
1699   // analyze the two accesses on array B, the max safe dependence distance
1700   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1701   // is 8, which is less than 2 and forbidden vectorization, But actually
1702   // both A and B could be vectorized by 2 iterations.
1703   MaxSafeDepDistBytes =
1704       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1705 
1706   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1707   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1708       couldPreventStoreLoadForward(Distance, TypeByteSize))
1709     return Dependence::BackwardVectorizableButPreventsForwarding;
1710 
1711   uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1712   LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1713                     << " with max VF = " << MaxVF << '\n');
1714   uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1715   MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
1716   return Dependence::BackwardVectorizable;
1717 }
1718 
1719 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1720                                    MemAccessInfoList &CheckDeps,
1721                                    const ValueToValueMap &Strides) {
1722 
1723   MaxSafeDepDistBytes = -1;
1724   SmallPtrSet<MemAccessInfo, 8> Visited;
1725   for (MemAccessInfo CurAccess : CheckDeps) {
1726     if (Visited.count(CurAccess))
1727       continue;
1728 
1729     // Get the relevant memory access set.
1730     EquivalenceClasses<MemAccessInfo>::iterator I =
1731       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1732 
1733     // Check accesses within this set.
1734     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1735         AccessSets.member_begin(I);
1736     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1737         AccessSets.member_end();
1738 
1739     // Check every access pair.
1740     while (AI != AE) {
1741       Visited.insert(*AI);
1742       bool AIIsWrite = AI->getInt();
1743       // Check loads only against next equivalent class, but stores also against
1744       // other stores in the same equivalence class - to the same address.
1745       EquivalenceClasses<MemAccessInfo>::member_iterator OI =
1746           (AIIsWrite ? AI : std::next(AI));
1747       while (OI != AE) {
1748         // Check every accessing instruction pair in program order.
1749         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1750              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1751           // Scan all accesses of another equivalence class, but only the next
1752           // accesses of the same equivalent class.
1753           for (std::vector<unsigned>::iterator
1754                    I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
1755                    I2E = (OI == AI ? I1E : Accesses[*OI].end());
1756                I2 != I2E; ++I2) {
1757             auto A = std::make_pair(&*AI, *I1);
1758             auto B = std::make_pair(&*OI, *I2);
1759 
1760             assert(*I1 != *I2);
1761             if (*I1 > *I2)
1762               std::swap(A, B);
1763 
1764             Dependence::DepType Type =
1765                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1766             mergeInStatus(Dependence::isSafeForVectorization(Type));
1767 
1768             // Gather dependences unless we accumulated MaxDependences
1769             // dependences.  In that case return as soon as we find the first
1770             // unsafe dependence.  This puts a limit on this quadratic
1771             // algorithm.
1772             if (RecordDependences) {
1773               if (Type != Dependence::NoDep)
1774                 Dependences.push_back(Dependence(A.second, B.second, Type));
1775 
1776               if (Dependences.size() >= MaxDependences) {
1777                 RecordDependences = false;
1778                 Dependences.clear();
1779                 LLVM_DEBUG(dbgs()
1780                            << "Too many dependences, stopped recording\n");
1781               }
1782             }
1783             if (!RecordDependences && !isSafeForVectorization())
1784               return false;
1785           }
1786         ++OI;
1787       }
1788       AI++;
1789     }
1790   }
1791 
1792   LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1793   return isSafeForVectorization();
1794 }
1795 
1796 SmallVector<Instruction *, 4>
1797 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1798   MemAccessInfo Access(Ptr, isWrite);
1799   auto &IndexVector = Accesses.find(Access)->second;
1800 
1801   SmallVector<Instruction *, 4> Insts;
1802   transform(IndexVector,
1803                  std::back_inserter(Insts),
1804                  [&](unsigned Idx) { return this->InstMap[Idx]; });
1805   return Insts;
1806 }
1807 
1808 const char *MemoryDepChecker::Dependence::DepName[] = {
1809     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1810     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1811 
1812 void MemoryDepChecker::Dependence::print(
1813     raw_ostream &OS, unsigned Depth,
1814     const SmallVectorImpl<Instruction *> &Instrs) const {
1815   OS.indent(Depth) << DepName[Type] << ":\n";
1816   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1817   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1818 }
1819 
1820 bool LoopAccessInfo::canAnalyzeLoop() {
1821   // We need to have a loop header.
1822   LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1823                     << TheLoop->getHeader()->getParent()->getName() << ": "
1824                     << TheLoop->getHeader()->getName() << '\n');
1825 
1826   // We can only analyze innermost loops.
1827   if (!TheLoop->isInnermost()) {
1828     LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1829     recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1830     return false;
1831   }
1832 
1833   // We must have a single backedge.
1834   if (TheLoop->getNumBackEdges() != 1) {
1835     LLVM_DEBUG(
1836         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1837     recordAnalysis("CFGNotUnderstood")
1838         << "loop control flow is not understood by analyzer";
1839     return false;
1840   }
1841 
1842   // ScalarEvolution needs to be able to find the exit count.
1843   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1844   if (isa<SCEVCouldNotCompute>(ExitCount)) {
1845     recordAnalysis("CantComputeNumberOfIterations")
1846         << "could not determine number of loop iterations";
1847     LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1848     return false;
1849   }
1850 
1851   return true;
1852 }
1853 
1854 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
1855                                  const TargetLibraryInfo *TLI,
1856                                  DominatorTree *DT) {
1857   // Holds the Load and Store instructions.
1858   SmallVector<LoadInst *, 16> Loads;
1859   SmallVector<StoreInst *, 16> Stores;
1860 
1861   // Holds all the different accesses in the loop.
1862   unsigned NumReads = 0;
1863   unsigned NumReadWrites = 0;
1864 
1865   bool HasComplexMemInst = false;
1866 
1867   // A runtime check is only legal to insert if there are no convergent calls.
1868   HasConvergentOp = false;
1869 
1870   PtrRtChecking->Pointers.clear();
1871   PtrRtChecking->Need = false;
1872 
1873   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1874 
1875   const bool EnableMemAccessVersioningOfLoop =
1876       EnableMemAccessVersioning &&
1877       !TheLoop->getHeader()->getParent()->hasOptSize();
1878 
1879   // For each block.
1880   for (BasicBlock *BB : TheLoop->blocks()) {
1881     // Scan the BB and collect legal loads and stores. Also detect any
1882     // convergent instructions.
1883     for (Instruction &I : *BB) {
1884       if (auto *Call = dyn_cast<CallBase>(&I)) {
1885         if (Call->isConvergent())
1886           HasConvergentOp = true;
1887       }
1888 
1889       // With both a non-vectorizable memory instruction and a convergent
1890       // operation, found in this loop, no reason to continue the search.
1891       if (HasComplexMemInst && HasConvergentOp) {
1892         CanVecMem = false;
1893         return;
1894       }
1895 
1896       // Avoid hitting recordAnalysis multiple times.
1897       if (HasComplexMemInst)
1898         continue;
1899 
1900       // If this is a load, save it. If this instruction can read from memory
1901       // but is not a load, then we quit. Notice that we don't handle function
1902       // calls that read or write.
1903       if (I.mayReadFromMemory()) {
1904         // Many math library functions read the rounding mode. We will only
1905         // vectorize a loop if it contains known function calls that don't set
1906         // the flag. Therefore, it is safe to ignore this read from memory.
1907         auto *Call = dyn_cast<CallInst>(&I);
1908         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1909           continue;
1910 
1911         // If the function has an explicit vectorized counterpart, we can safely
1912         // assume that it can be vectorized.
1913         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1914             !VFDatabase::getMappings(*Call).empty())
1915           continue;
1916 
1917         auto *Ld = dyn_cast<LoadInst>(&I);
1918         if (!Ld) {
1919           recordAnalysis("CantVectorizeInstruction", Ld)
1920             << "instruction cannot be vectorized";
1921           HasComplexMemInst = true;
1922           continue;
1923         }
1924         if (!Ld->isSimple() && !IsAnnotatedParallel) {
1925           recordAnalysis("NonSimpleLoad", Ld)
1926               << "read with atomic ordering or volatile read";
1927           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1928           HasComplexMemInst = true;
1929           continue;
1930         }
1931         NumLoads++;
1932         Loads.push_back(Ld);
1933         DepChecker->addAccess(Ld);
1934         if (EnableMemAccessVersioningOfLoop)
1935           collectStridedAccess(Ld);
1936         continue;
1937       }
1938 
1939       // Save 'store' instructions. Abort if other instructions write to memory.
1940       if (I.mayWriteToMemory()) {
1941         auto *St = dyn_cast<StoreInst>(&I);
1942         if (!St) {
1943           recordAnalysis("CantVectorizeInstruction", St)
1944               << "instruction cannot be vectorized";
1945           HasComplexMemInst = true;
1946           continue;
1947         }
1948         if (!St->isSimple() && !IsAnnotatedParallel) {
1949           recordAnalysis("NonSimpleStore", St)
1950               << "write with atomic ordering or volatile write";
1951           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1952           HasComplexMemInst = true;
1953           continue;
1954         }
1955         NumStores++;
1956         Stores.push_back(St);
1957         DepChecker->addAccess(St);
1958         if (EnableMemAccessVersioningOfLoop)
1959           collectStridedAccess(St);
1960       }
1961     } // Next instr.
1962   } // Next block.
1963 
1964   if (HasComplexMemInst) {
1965     CanVecMem = false;
1966     return;
1967   }
1968 
1969   // Now we have two lists that hold the loads and the stores.
1970   // Next, we find the pointers that they use.
1971 
1972   // Check if we see any stores. If there are no stores, then we don't
1973   // care if the pointers are *restrict*.
1974   if (!Stores.size()) {
1975     LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1976     CanVecMem = true;
1977     return;
1978   }
1979 
1980   MemoryDepChecker::DepCandidates DependentAccesses;
1981   AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
1982 
1983   // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
1984   // multiple times on the same object. If the ptr is accessed twice, once
1985   // for read and once for write, it will only appear once (on the write
1986   // list). This is okay, since we are going to check for conflicts between
1987   // writes and between reads and writes, but not between reads and reads.
1988   SmallSet<std::pair<Value *, Type *>, 16> Seen;
1989 
1990   // Record uniform store addresses to identify if we have multiple stores
1991   // to the same address.
1992   SmallPtrSet<Value *, 16> UniformStores;
1993 
1994   for (StoreInst *ST : Stores) {
1995     Value *Ptr = ST->getPointerOperand();
1996 
1997     if (isUniform(Ptr))
1998       HasDependenceInvolvingLoopInvariantAddress |=
1999           !UniformStores.insert(Ptr).second;
2000 
2001     // If we did *not* see this pointer before, insert it to  the read-write
2002     // list. At this phase it is only a 'write' list.
2003     Type *AccessTy = getLoadStoreType(ST);
2004     if (Seen.insert({Ptr, AccessTy}).second) {
2005       ++NumReadWrites;
2006 
2007       MemoryLocation Loc = MemoryLocation::get(ST);
2008       // The TBAA metadata could have a control dependency on the predication
2009       // condition, so we cannot rely on it when determining whether or not we
2010       // need runtime pointer checks.
2011       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2012         Loc.AATags.TBAA = nullptr;
2013 
2014       visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2015                     [&Accesses, AccessTy, Loc](Value *Ptr) {
2016                       MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2017                       Accesses.addStore(NewLoc, AccessTy);
2018                     });
2019     }
2020   }
2021 
2022   if (IsAnnotatedParallel) {
2023     LLVM_DEBUG(
2024         dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2025                << "checks.\n");
2026     CanVecMem = true;
2027     return;
2028   }
2029 
2030   for (LoadInst *LD : Loads) {
2031     Value *Ptr = LD->getPointerOperand();
2032     // If we did *not* see this pointer before, insert it to the
2033     // read list. If we *did* see it before, then it is already in
2034     // the read-write list. This allows us to vectorize expressions
2035     // such as A[i] += x;  Because the address of A[i] is a read-write
2036     // pointer. This only works if the index of A[i] is consecutive.
2037     // If the address of i is unknown (for example A[B[i]]) then we may
2038     // read a few words, modify, and write a few words, and some of the
2039     // words may be written to the same address.
2040     bool IsReadOnlyPtr = false;
2041     Type *AccessTy = getLoadStoreType(LD);
2042     if (Seen.insert({Ptr, AccessTy}).second ||
2043         !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides)) {
2044       ++NumReads;
2045       IsReadOnlyPtr = true;
2046     }
2047 
2048     // See if there is an unsafe dependency between a load to a uniform address and
2049     // store to the same uniform address.
2050     if (UniformStores.count(Ptr)) {
2051       LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2052                            "load and uniform store to the same address!\n");
2053       HasDependenceInvolvingLoopInvariantAddress = true;
2054     }
2055 
2056     MemoryLocation Loc = MemoryLocation::get(LD);
2057     // The TBAA metadata could have a control dependency on the predication
2058     // condition, so we cannot rely on it when determining whether or not we
2059     // need runtime pointer checks.
2060     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2061       Loc.AATags.TBAA = nullptr;
2062 
2063     visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2064                   [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2065                     MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2066                     Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2067                   });
2068   }
2069 
2070   // If we write (or read-write) to a single destination and there are no
2071   // other reads in this loop then is it safe to vectorize.
2072   if (NumReadWrites == 1 && NumReads == 0) {
2073     LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2074     CanVecMem = true;
2075     return;
2076   }
2077 
2078   // Build dependence sets and check whether we need a runtime pointer bounds
2079   // check.
2080   Accesses.buildDependenceSets();
2081 
2082   // Find pointers with computable bounds. We are going to use this information
2083   // to place a runtime bound check.
2084   Value *UncomputablePtr = nullptr;
2085   bool CanDoRTIfNeeded =
2086       Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2087                                SymbolicStrides, UncomputablePtr, false);
2088   if (!CanDoRTIfNeeded) {
2089     auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2090     recordAnalysis("CantIdentifyArrayBounds", I)
2091         << "cannot identify array bounds";
2092     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2093                       << "the array bounds.\n");
2094     CanVecMem = false;
2095     return;
2096   }
2097 
2098   LLVM_DEBUG(
2099     dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2100 
2101   CanVecMem = true;
2102   if (Accesses.isDependencyCheckNeeded()) {
2103     LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2104     CanVecMem = DepChecker->areDepsSafe(
2105         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2106     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2107 
2108     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2109       LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2110 
2111       // Clear the dependency checks. We assume they are not needed.
2112       Accesses.resetDepChecks(*DepChecker);
2113 
2114       PtrRtChecking->reset();
2115       PtrRtChecking->Need = true;
2116 
2117       auto *SE = PSE->getSE();
2118       UncomputablePtr = nullptr;
2119       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2120           *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2121 
2122       // Check that we found the bounds for the pointer.
2123       if (!CanDoRTIfNeeded) {
2124         auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2125         recordAnalysis("CantCheckMemDepsAtRunTime", I)
2126             << "cannot check memory dependencies at runtime";
2127         LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2128         CanVecMem = false;
2129         return;
2130       }
2131 
2132       CanVecMem = true;
2133     }
2134   }
2135 
2136   if (HasConvergentOp) {
2137     recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2138       << "cannot add control dependency to convergent operation";
2139     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2140                          "would be needed with a convergent operation\n");
2141     CanVecMem = false;
2142     return;
2143   }
2144 
2145   if (CanVecMem)
2146     LLVM_DEBUG(
2147         dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
2148                << (PtrRtChecking->Need ? "" : " don't")
2149                << " need runtime memory checks.\n");
2150   else
2151     emitUnsafeDependenceRemark();
2152 }
2153 
2154 void LoopAccessInfo::emitUnsafeDependenceRemark() {
2155   auto Deps = getDepChecker().getDependences();
2156   if (!Deps)
2157     return;
2158   auto Found = std::find_if(
2159       Deps->begin(), Deps->end(), [](const MemoryDepChecker::Dependence &D) {
2160         return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2161                MemoryDepChecker::VectorizationSafetyStatus::Safe;
2162       });
2163   if (Found == Deps->end())
2164     return;
2165   MemoryDepChecker::Dependence Dep = *Found;
2166 
2167   LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2168 
2169   // Emit remark for first unsafe dependence
2170   OptimizationRemarkAnalysis &R =
2171       recordAnalysis("UnsafeDep", Dep.getDestination(*this))
2172       << "unsafe dependent memory operations in loop. Use "
2173          "#pragma loop distribute(enable) to allow loop distribution "
2174          "to attempt to isolate the offending operations into a separate "
2175          "loop";
2176 
2177   switch (Dep.Type) {
2178   case MemoryDepChecker::Dependence::NoDep:
2179   case MemoryDepChecker::Dependence::Forward:
2180   case MemoryDepChecker::Dependence::BackwardVectorizable:
2181     llvm_unreachable("Unexpected dependence");
2182   case MemoryDepChecker::Dependence::Backward:
2183     R << "\nBackward loop carried data dependence.";
2184     break;
2185   case MemoryDepChecker::Dependence::ForwardButPreventsForwarding:
2186     R << "\nForward loop carried data dependence that prevents "
2187          "store-to-load forwarding.";
2188     break;
2189   case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding:
2190     R << "\nBackward loop carried data dependence that prevents "
2191          "store-to-load forwarding.";
2192     break;
2193   case MemoryDepChecker::Dependence::Unknown:
2194     R << "\nUnknown data dependence.";
2195     break;
2196   }
2197 
2198   if (Instruction *I = Dep.getSource(*this)) {
2199     DebugLoc SourceLoc = I->getDebugLoc();
2200     if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2201       SourceLoc = DD->getDebugLoc();
2202     if (SourceLoc)
2203       R << " Memory location is the same as accessed at "
2204         << ore::NV("Location", SourceLoc);
2205   }
2206 }
2207 
2208 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2209                                            DominatorTree *DT)  {
2210   assert(TheLoop->contains(BB) && "Unknown block used");
2211 
2212   // Blocks that do not dominate the latch need predication.
2213   BasicBlock* Latch = TheLoop->getLoopLatch();
2214   return !DT->dominates(BB, Latch);
2215 }
2216 
2217 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2218                                                            Instruction *I) {
2219   assert(!Report && "Multiple reports generated");
2220 
2221   Value *CodeRegion = TheLoop->getHeader();
2222   DebugLoc DL = TheLoop->getStartLoc();
2223 
2224   if (I) {
2225     CodeRegion = I->getParent();
2226     // If there is no debug location attached to the instruction, revert back to
2227     // using the loop's.
2228     if (I->getDebugLoc())
2229       DL = I->getDebugLoc();
2230   }
2231 
2232   Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2233                                                    CodeRegion);
2234   return *Report;
2235 }
2236 
2237 bool LoopAccessInfo::isUniform(Value *V) const {
2238   auto *SE = PSE->getSE();
2239   // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2240   // never considered uniform.
2241   // TODO: Is this really what we want? Even without FP SCEV, we may want some
2242   // trivially loop-invariant FP values to be considered uniform.
2243   if (!SE->isSCEVable(V->getType()))
2244     return false;
2245   return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2246 }
2247 
2248 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2249   Value *Ptr = getLoadStorePointerOperand(MemAccess);
2250   if (!Ptr)
2251     return;
2252 
2253   Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2254   if (!Stride)
2255     return;
2256 
2257   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2258                        "versioning:");
2259   LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2260 
2261   // Avoid adding the "Stride == 1" predicate when we know that
2262   // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2263   // or zero iteration loop, as Trip-Count <= Stride == 1.
2264   //
2265   // TODO: We are currently not making a very informed decision on when it is
2266   // beneficial to apply stride versioning. It might make more sense that the
2267   // users of this analysis (such as the vectorizer) will trigger it, based on
2268   // their specific cost considerations; For example, in cases where stride
2269   // versioning does  not help resolving memory accesses/dependences, the
2270   // vectorizer should evaluate the cost of the runtime test, and the benefit
2271   // of various possible stride specializations, considering the alternatives
2272   // of using gather/scatters (if available).
2273 
2274   const SCEV *StrideExpr = PSE->getSCEV(Stride);
2275   const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2276 
2277   // Match the types so we can compare the stride and the BETakenCount.
2278   // The Stride can be positive/negative, so we sign extend Stride;
2279   // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2280   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2281   uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2282   uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2283   const SCEV *CastedStride = StrideExpr;
2284   const SCEV *CastedBECount = BETakenCount;
2285   ScalarEvolution *SE = PSE->getSE();
2286   if (BETypeSize >= StrideTypeSize)
2287     CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2288   else
2289     CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2290   const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2291   // Since TripCount == BackEdgeTakenCount + 1, checking:
2292   // "Stride >= TripCount" is equivalent to checking:
2293   // Stride - BETakenCount > 0
2294   if (SE->isKnownPositive(StrideMinusBETaken)) {
2295     LLVM_DEBUG(
2296         dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2297                   "Stride==1 predicate will imply that the loop executes "
2298                   "at most once.\n");
2299     return;
2300   }
2301   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2302 
2303   SymbolicStrides[Ptr] = Stride;
2304   StrideSet.insert(Stride);
2305 }
2306 
2307 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2308                                const TargetLibraryInfo *TLI, AAResults *AA,
2309                                DominatorTree *DT, LoopInfo *LI)
2310     : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2311       PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)),
2312       DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2313   if (canAnalyzeLoop())
2314     analyzeLoop(AA, LI, TLI, DT);
2315 }
2316 
2317 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2318   if (CanVecMem) {
2319     OS.indent(Depth) << "Memory dependences are safe";
2320     if (MaxSafeDepDistBytes != -1ULL)
2321       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2322          << " bytes";
2323     if (PtrRtChecking->Need)
2324       OS << " with run-time checks";
2325     OS << "\n";
2326   }
2327 
2328   if (HasConvergentOp)
2329     OS.indent(Depth) << "Has convergent operation in loop\n";
2330 
2331   if (Report)
2332     OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2333 
2334   if (auto *Dependences = DepChecker->getDependences()) {
2335     OS.indent(Depth) << "Dependences:\n";
2336     for (auto &Dep : *Dependences) {
2337       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2338       OS << "\n";
2339     }
2340   } else
2341     OS.indent(Depth) << "Too many dependences, not recorded\n";
2342 
2343   // List the pair of accesses need run-time checks to prove independence.
2344   PtrRtChecking->print(OS, Depth);
2345   OS << "\n";
2346 
2347   OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2348                    << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2349                    << "found in loop.\n";
2350 
2351   OS.indent(Depth) << "SCEV assumptions:\n";
2352   PSE->getPredicate().print(OS, Depth);
2353 
2354   OS << "\n";
2355 
2356   OS.indent(Depth) << "Expressions re-written:\n";
2357   PSE->print(OS, Depth);
2358 }
2359 
2360 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) {
2361   initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
2362 }
2363 
2364 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2365   auto &LAI = LoopAccessInfoMap[L];
2366 
2367   if (!LAI)
2368     LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2369 
2370   return *LAI.get();
2371 }
2372 
2373 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2374   LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2375 
2376   for (Loop *TopLevelLoop : *LI)
2377     for (Loop *L : depth_first(TopLevelLoop)) {
2378       OS.indent(2) << L->getHeader()->getName() << ":\n";
2379       auto &LAI = LAA.getInfo(L);
2380       LAI.print(OS, 4);
2381     }
2382 }
2383 
2384 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2385   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2386   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2387   TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2388   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2389   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2390   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2391 
2392   return false;
2393 }
2394 
2395 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2396   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2397   AU.addRequiredTransitive<AAResultsWrapperPass>();
2398   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2399   AU.addRequiredTransitive<LoopInfoWrapperPass>();
2400 
2401   AU.setPreservesAll();
2402 }
2403 
2404 char LoopAccessLegacyAnalysis::ID = 0;
2405 static const char laa_name[] = "Loop Access Analysis";
2406 #define LAA_NAME "loop-accesses"
2407 
2408 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2409 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2410 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2411 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2412 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2413 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2414 
2415 AnalysisKey LoopAccessAnalysis::Key;
2416 
2417 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2418                                        LoopStandardAnalysisResults &AR) {
2419   return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2420 }
2421 
2422 namespace llvm {
2423 
2424   Pass *createLAAPass() {
2425     return new LoopAccessLegacyAnalysis();
2426   }
2427 
2428 } // end namespace llvm
2429