1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/EquivalenceClasses.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AliasSetTracker.h"
29 #include "llvm/Analysis/LoopAnalysisManager.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/ScalarEvolution.h"
34 #include "llvm/Analysis/ScalarEvolutionExpander.h"
35 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/Operator.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/IR/ValueHandle.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <cstdint>
65 #include <cstdlib>
66 #include <iterator>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 
72 #define DEBUG_TYPE "loop-accesses"
73 
74 static cl::opt<unsigned, true>
75 VectorizationFactor("force-vector-width", cl::Hidden,
76                     cl::desc("Sets the SIMD width. Zero is autoselect."),
77                     cl::location(VectorizerParams::VectorizationFactor));
78 unsigned VectorizerParams::VectorizationFactor;
79 
80 static cl::opt<unsigned, true>
81 VectorizationInterleave("force-vector-interleave", cl::Hidden,
82                         cl::desc("Sets the vectorization interleave count. "
83                                  "Zero is autoselect."),
84                         cl::location(
85                             VectorizerParams::VectorizationInterleave));
86 unsigned VectorizerParams::VectorizationInterleave;
87 
88 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
89     "runtime-memory-check-threshold", cl::Hidden,
90     cl::desc("When performing memory disambiguation checks at runtime do not "
91              "generate more than this number of comparisons (default = 8)."),
92     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
93 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
94 
95 /// \brief The maximum iterations used to merge memory checks
96 static cl::opt<unsigned> MemoryCheckMergeThreshold(
97     "memory-check-merge-threshold", cl::Hidden,
98     cl::desc("Maximum number of comparisons done when trying to merge "
99              "runtime memory checks. (default = 100)"),
100     cl::init(100));
101 
102 /// Maximum SIMD width.
103 const unsigned VectorizerParams::MaxVectorWidth = 64;
104 
105 /// \brief We collect dependences up to this threshold.
106 static cl::opt<unsigned>
107     MaxDependences("max-dependences", cl::Hidden,
108                    cl::desc("Maximum number of dependences collected by "
109                             "loop-access analysis (default = 100)"),
110                    cl::init(100));
111 
112 /// This enables versioning on the strides of symbolically striding memory
113 /// accesses in code like the following.
114 ///   for (i = 0; i < N; ++i)
115 ///     A[i * Stride1] += B[i * Stride2] ...
116 ///
117 /// Will be roughly translated to
118 ///    if (Stride1 == 1 && Stride2 == 1) {
119 ///      for (i = 0; i < N; i+=4)
120 ///       A[i:i+3] += ...
121 ///    } else
122 ///      ...
123 static cl::opt<bool> EnableMemAccessVersioning(
124     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125     cl::desc("Enable symbolic stride memory access versioning"));
126 
127 /// \brief Enable store-to-load forwarding conflict detection. This option can
128 /// be disabled for correctness testing.
129 static cl::opt<bool> EnableForwardingConflictDetection(
130     "store-to-load-forwarding-conflict-detection", cl::Hidden,
131     cl::desc("Enable conflict detection in loop-access analysis"),
132     cl::init(true));
133 
134 bool VectorizerParams::isInterleaveForced() {
135   return ::VectorizationInterleave.getNumOccurrences() > 0;
136 }
137 
138 Value *llvm::stripIntegerCast(Value *V) {
139   if (auto *CI = dyn_cast<CastInst>(V))
140     if (CI->getOperand(0)->getType()->isIntegerTy())
141       return CI->getOperand(0);
142   return V;
143 }
144 
145 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
146                                             const ValueToValueMap &PtrToStride,
147                                             Value *Ptr, Value *OrigPtr) {
148   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
149 
150   // If there is an entry in the map return the SCEV of the pointer with the
151   // symbolic stride replaced by one.
152   ValueToValueMap::const_iterator SI =
153       PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
154   if (SI != PtrToStride.end()) {
155     Value *StrideVal = SI->second;
156 
157     // Strip casts.
158     StrideVal = stripIntegerCast(StrideVal);
159 
160     ScalarEvolution *SE = PSE.getSE();
161     const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
162     const auto *CT =
163         static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
164 
165     PSE.addPredicate(*SE->getEqualPredicate(U, CT));
166     auto *Expr = PSE.getSCEV(Ptr);
167 
168     DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr
169                  << "\n");
170     return Expr;
171   }
172 
173   // Otherwise, just return the SCEV of the original pointer.
174   return OrigSCEV;
175 }
176 
177 /// Calculate Start and End points of memory access.
178 /// Let's assume A is the first access and B is a memory access on N-th loop
179 /// iteration. Then B is calculated as:
180 ///   B = A + Step*N .
181 /// Step value may be positive or negative.
182 /// N is a calculated back-edge taken count:
183 ///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
184 /// Start and End points are calculated in the following way:
185 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
186 /// where SizeOfElt is the size of single memory access in bytes.
187 ///
188 /// There is no conflict when the intervals are disjoint:
189 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
190 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
191                                     unsigned DepSetId, unsigned ASId,
192                                     const ValueToValueMap &Strides,
193                                     PredicatedScalarEvolution &PSE) {
194   // Get the stride replaced scev.
195   const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
196   ScalarEvolution *SE = PSE.getSE();
197 
198   const SCEV *ScStart;
199   const SCEV *ScEnd;
200 
201   if (SE->isLoopInvariant(Sc, Lp))
202     ScStart = ScEnd = Sc;
203   else {
204     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
205     assert(AR && "Invalid addrec expression");
206     const SCEV *Ex = PSE.getBackedgeTakenCount();
207 
208     ScStart = AR->getStart();
209     ScEnd = AR->evaluateAtIteration(Ex, *SE);
210     const SCEV *Step = AR->getStepRecurrence(*SE);
211 
212     // For expressions with negative step, the upper bound is ScStart and the
213     // lower bound is ScEnd.
214     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
215       if (CStep->getValue()->isNegative())
216         std::swap(ScStart, ScEnd);
217     } else {
218       // Fallback case: the step is not constant, but we can still
219       // get the upper and lower bounds of the interval by using min/max
220       // expressions.
221       ScStart = SE->getUMinExpr(ScStart, ScEnd);
222       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
223     }
224     // Add the size of the pointed element to ScEnd.
225     unsigned EltSize =
226       Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
227     const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
228     ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
229   }
230 
231   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
232 }
233 
234 SmallVector<RuntimePointerChecking::PointerCheck, 4>
235 RuntimePointerChecking::generateChecks() const {
236   SmallVector<PointerCheck, 4> Checks;
237 
238   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
239     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
240       const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I];
241       const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J];
242 
243       if (needsChecking(CGI, CGJ))
244         Checks.push_back(std::make_pair(&CGI, &CGJ));
245     }
246   }
247   return Checks;
248 }
249 
250 void RuntimePointerChecking::generateChecks(
251     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
252   assert(Checks.empty() && "Checks is not empty");
253   groupChecks(DepCands, UseDependencies);
254   Checks = generateChecks();
255 }
256 
257 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M,
258                                            const CheckingPtrGroup &N) const {
259   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
260     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
261       if (needsChecking(M.Members[I], N.Members[J]))
262         return true;
263   return false;
264 }
265 
266 /// Compare \p I and \p J and return the minimum.
267 /// Return nullptr in case we couldn't find an answer.
268 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
269                                    ScalarEvolution *SE) {
270   const SCEV *Diff = SE->getMinusSCEV(J, I);
271   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
272 
273   if (!C)
274     return nullptr;
275   if (C->getValue()->isNegative())
276     return J;
277   return I;
278 }
279 
280 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) {
281   const SCEV *Start = RtCheck.Pointers[Index].Start;
282   const SCEV *End = RtCheck.Pointers[Index].End;
283 
284   // Compare the starts and ends with the known minimum and maximum
285   // of this set. We need to know how we compare against the min/max
286   // of the set in order to be able to emit memchecks.
287   const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
288   if (!Min0)
289     return false;
290 
291   const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
292   if (!Min1)
293     return false;
294 
295   // Update the low bound  expression if we've found a new min value.
296   if (Min0 == Start)
297     Low = Start;
298 
299   // Update the high bound expression if we've found a new max value.
300   if (Min1 != End)
301     High = End;
302 
303   Members.push_back(Index);
304   return true;
305 }
306 
307 void RuntimePointerChecking::groupChecks(
308     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
309   // We build the groups from dependency candidates equivalence classes
310   // because:
311   //    - We know that pointers in the same equivalence class share
312   //      the same underlying object and therefore there is a chance
313   //      that we can compare pointers
314   //    - We wouldn't be able to merge two pointers for which we need
315   //      to emit a memcheck. The classes in DepCands are already
316   //      conveniently built such that no two pointers in the same
317   //      class need checking against each other.
318 
319   // We use the following (greedy) algorithm to construct the groups
320   // For every pointer in the equivalence class:
321   //   For each existing group:
322   //   - if the difference between this pointer and the min/max bounds
323   //     of the group is a constant, then make the pointer part of the
324   //     group and update the min/max bounds of that group as required.
325 
326   CheckingGroups.clear();
327 
328   // If we need to check two pointers to the same underlying object
329   // with a non-constant difference, we shouldn't perform any pointer
330   // grouping with those pointers. This is because we can easily get
331   // into cases where the resulting check would return false, even when
332   // the accesses are safe.
333   //
334   // The following example shows this:
335   // for (i = 0; i < 1000; ++i)
336   //   a[5000 + i * m] = a[i] + a[i + 9000]
337   //
338   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
339   // (0, 10000) which is always false. However, if m is 1, there is no
340   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
341   // us to perform an accurate check in this case.
342   //
343   // The above case requires that we have an UnknownDependence between
344   // accesses to the same underlying object. This cannot happen unless
345   // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies
346   // is also false. In this case we will use the fallback path and create
347   // separate checking groups for all pointers.
348 
349   // If we don't have the dependency partitions, construct a new
350   // checking pointer group for each pointer. This is also required
351   // for correctness, because in this case we can have checking between
352   // pointers to the same underlying object.
353   if (!UseDependencies) {
354     for (unsigned I = 0; I < Pointers.size(); ++I)
355       CheckingGroups.push_back(CheckingPtrGroup(I, *this));
356     return;
357   }
358 
359   unsigned TotalComparisons = 0;
360 
361   DenseMap<Value *, unsigned> PositionMap;
362   for (unsigned Index = 0; Index < Pointers.size(); ++Index)
363     PositionMap[Pointers[Index].PointerValue] = Index;
364 
365   // We need to keep track of what pointers we've already seen so we
366   // don't process them twice.
367   SmallSet<unsigned, 2> Seen;
368 
369   // Go through all equivalence classes, get the "pointer check groups"
370   // and add them to the overall solution. We use the order in which accesses
371   // appear in 'Pointers' to enforce determinism.
372   for (unsigned I = 0; I < Pointers.size(); ++I) {
373     // We've seen this pointer before, and therefore already processed
374     // its equivalence class.
375     if (Seen.count(I))
376       continue;
377 
378     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
379                                            Pointers[I].IsWritePtr);
380 
381     SmallVector<CheckingPtrGroup, 2> Groups;
382     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
383 
384     // Because DepCands is constructed by visiting accesses in the order in
385     // which they appear in alias sets (which is deterministic) and the
386     // iteration order within an equivalence class member is only dependent on
387     // the order in which unions and insertions are performed on the
388     // equivalence class, the iteration order is deterministic.
389     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
390          MI != ME; ++MI) {
391       unsigned Pointer = PositionMap[MI->getPointer()];
392       bool Merged = false;
393       // Mark this pointer as seen.
394       Seen.insert(Pointer);
395 
396       // Go through all the existing sets and see if we can find one
397       // which can include this pointer.
398       for (CheckingPtrGroup &Group : Groups) {
399         // Don't perform more than a certain amount of comparisons.
400         // This should limit the cost of grouping the pointers to something
401         // reasonable.  If we do end up hitting this threshold, the algorithm
402         // will create separate groups for all remaining pointers.
403         if (TotalComparisons > MemoryCheckMergeThreshold)
404           break;
405 
406         TotalComparisons++;
407 
408         if (Group.addPointer(Pointer)) {
409           Merged = true;
410           break;
411         }
412       }
413 
414       if (!Merged)
415         // We couldn't add this pointer to any existing set or the threshold
416         // for the number of comparisons has been reached. Create a new group
417         // to hold the current pointer.
418         Groups.push_back(CheckingPtrGroup(Pointer, *this));
419     }
420 
421     // We've computed the grouped checks for this partition.
422     // Save the results and continue with the next one.
423     std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups));
424   }
425 }
426 
427 bool RuntimePointerChecking::arePointersInSamePartition(
428     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
429     unsigned PtrIdx2) {
430   return (PtrToPartition[PtrIdx1] != -1 &&
431           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
432 }
433 
434 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
435   const PointerInfo &PointerI = Pointers[I];
436   const PointerInfo &PointerJ = Pointers[J];
437 
438   // No need to check if two readonly pointers intersect.
439   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
440     return false;
441 
442   // Only need to check pointers between two different dependency sets.
443   if (PointerI.DependencySetId == PointerJ.DependencySetId)
444     return false;
445 
446   // Only need to check pointers in the same alias set.
447   if (PointerI.AliasSetId != PointerJ.AliasSetId)
448     return false;
449 
450   return true;
451 }
452 
453 void RuntimePointerChecking::printChecks(
454     raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
455     unsigned Depth) const {
456   unsigned N = 0;
457   for (const auto &Check : Checks) {
458     const auto &First = Check.first->Members, &Second = Check.second->Members;
459 
460     OS.indent(Depth) << "Check " << N++ << ":\n";
461 
462     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
463     for (unsigned K = 0; K < First.size(); ++K)
464       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
465 
466     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
467     for (unsigned K = 0; K < Second.size(); ++K)
468       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
469   }
470 }
471 
472 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
473 
474   OS.indent(Depth) << "Run-time memory checks:\n";
475   printChecks(OS, Checks, Depth);
476 
477   OS.indent(Depth) << "Grouped accesses:\n";
478   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
479     const auto &CG = CheckingGroups[I];
480 
481     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
482     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
483                          << ")\n";
484     for (unsigned J = 0; J < CG.Members.size(); ++J) {
485       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
486                            << "\n";
487     }
488   }
489 }
490 
491 namespace {
492 
493 /// \brief Analyses memory accesses in a loop.
494 ///
495 /// Checks whether run time pointer checks are needed and builds sets for data
496 /// dependence checking.
497 class AccessAnalysis {
498 public:
499   /// \brief Read or write access location.
500   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
501   typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
502 
503   AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
504                  MemoryDepChecker::DepCandidates &DA,
505                  PredicatedScalarEvolution &PSE)
506       : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
507         PSE(PSE) {}
508 
509   /// \brief Register a load  and whether it is only read from.
510   void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
511     Value *Ptr = const_cast<Value*>(Loc.Ptr);
512     AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
513     Accesses.insert(MemAccessInfo(Ptr, false));
514     if (IsReadOnly)
515       ReadOnlyPtr.insert(Ptr);
516   }
517 
518   /// \brief Register a store.
519   void addStore(MemoryLocation &Loc) {
520     Value *Ptr = const_cast<Value*>(Loc.Ptr);
521     AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
522     Accesses.insert(MemAccessInfo(Ptr, true));
523   }
524 
525   /// \brief Check if we can emit a run-time no-alias check for \p Access.
526   ///
527   /// Returns true if we can emit a run-time no alias check for \p Access.
528   /// If we can check this access, this also adds it to a dependence set and
529   /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
530   /// we will attempt to use additional run-time checks in order to get
531   /// the bounds of the pointer.
532   bool createCheckForAccess(RuntimePointerChecking &RtCheck,
533                             MemAccessInfo Access,
534                             const ValueToValueMap &Strides,
535                             DenseMap<Value *, unsigned> &DepSetId,
536                             Loop *TheLoop, unsigned &RunningDepId,
537                             unsigned ASId, bool ShouldCheckStride,
538                             bool Assume);
539 
540   /// \brief Check whether we can check the pointers at runtime for
541   /// non-intersection.
542   ///
543   /// Returns true if we need no check or if we do and we can generate them
544   /// (i.e. the pointers have computable bounds).
545   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
546                        Loop *TheLoop, const ValueToValueMap &Strides,
547                        bool ShouldCheckWrap = false);
548 
549   /// \brief Goes over all memory accesses, checks whether a RT check is needed
550   /// and builds sets of dependent accesses.
551   void buildDependenceSets() {
552     processMemAccesses();
553   }
554 
555   /// \brief Initial processing of memory accesses determined that we need to
556   /// perform dependency checking.
557   ///
558   /// Note that this can later be cleared if we retry memcheck analysis without
559   /// dependency checking (i.e. ShouldRetryWithRuntimeCheck).
560   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
561 
562   /// We decided that no dependence analysis would be used.  Reset the state.
563   void resetDepChecks(MemoryDepChecker &DepChecker) {
564     CheckDeps.clear();
565     DepChecker.clearDependences();
566   }
567 
568   MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
569 
570 private:
571   typedef SetVector<MemAccessInfo> PtrAccessSet;
572 
573   /// \brief Go over all memory access and check whether runtime pointer checks
574   /// are needed and build sets of dependency check candidates.
575   void processMemAccesses();
576 
577   /// Set of all accesses.
578   PtrAccessSet Accesses;
579 
580   const DataLayout &DL;
581 
582   /// List of accesses that need a further dependence check.
583   MemAccessInfoList CheckDeps;
584 
585   /// Set of pointers that are read only.
586   SmallPtrSet<Value*, 16> ReadOnlyPtr;
587 
588   /// An alias set tracker to partition the access set by underlying object and
589   //intrinsic property (such as TBAA metadata).
590   AliasSetTracker AST;
591 
592   LoopInfo *LI;
593 
594   /// Sets of potentially dependent accesses - members of one set share an
595   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
596   /// dependence check.
597   MemoryDepChecker::DepCandidates &DepCands;
598 
599   /// \brief Initial processing of memory accesses determined that we may need
600   /// to add memchecks.  Perform the analysis to determine the necessary checks.
601   ///
602   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
603   /// memcheck analysis without dependency checking
604   /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared
605   /// while this remains set if we have potentially dependent accesses.
606   bool IsRTCheckAnalysisNeeded;
607 
608   /// The SCEV predicate containing all the SCEV-related assumptions.
609   PredicatedScalarEvolution &PSE;
610 };
611 
612 } // end anonymous namespace
613 
614 /// \brief Check whether a pointer can participate in a runtime bounds check.
615 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
616 /// by adding run-time checks (overflow checks) if necessary.
617 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
618                                 const ValueToValueMap &Strides, Value *Ptr,
619                                 Loop *L, bool Assume) {
620   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
621 
622   // The bounds for loop-invariant pointer is trivial.
623   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
624     return true;
625 
626   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
627 
628   if (!AR && Assume)
629     AR = PSE.getAsAddRec(Ptr);
630 
631   if (!AR)
632     return false;
633 
634   return AR->isAffine();
635 }
636 
637 /// \brief Check whether a pointer address cannot wrap.
638 static bool isNoWrap(PredicatedScalarEvolution &PSE,
639                      const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
640   const SCEV *PtrScev = PSE.getSCEV(Ptr);
641   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
642     return true;
643 
644   int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
645   if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
646     return true;
647 
648   return false;
649 }
650 
651 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
652                                           MemAccessInfo Access,
653                                           const ValueToValueMap &StridesMap,
654                                           DenseMap<Value *, unsigned> &DepSetId,
655                                           Loop *TheLoop, unsigned &RunningDepId,
656                                           unsigned ASId, bool ShouldCheckWrap,
657                                           bool Assume) {
658   Value *Ptr = Access.getPointer();
659 
660   if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
661     return false;
662 
663   // When we run after a failing dependency check we have to make sure
664   // we don't have wrapping pointers.
665   if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) {
666     auto *Expr = PSE.getSCEV(Ptr);
667     if (!Assume || !isa<SCEVAddRecExpr>(Expr))
668       return false;
669     PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
670   }
671 
672   // The id of the dependence set.
673   unsigned DepId;
674 
675   if (isDependencyCheckNeeded()) {
676     Value *Leader = DepCands.getLeaderValue(Access).getPointer();
677     unsigned &LeaderId = DepSetId[Leader];
678     if (!LeaderId)
679       LeaderId = RunningDepId++;
680     DepId = LeaderId;
681   } else
682     // Each access has its own dependence set.
683     DepId = RunningDepId++;
684 
685   bool IsWrite = Access.getInt();
686   RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
687   DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
688 
689   return true;
690  }
691 
692 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
693                                      ScalarEvolution *SE, Loop *TheLoop,
694                                      const ValueToValueMap &StridesMap,
695                                      bool ShouldCheckWrap) {
696   // Find pointers with computable bounds. We are going to use this information
697   // to place a runtime bound check.
698   bool CanDoRT = true;
699 
700   bool NeedRTCheck = false;
701   if (!IsRTCheckAnalysisNeeded) return true;
702 
703   bool IsDepCheckNeeded = isDependencyCheckNeeded();
704 
705   // We assign a consecutive id to access from different alias sets.
706   // Accesses between different groups doesn't need to be checked.
707   unsigned ASId = 1;
708   for (auto &AS : AST) {
709     int NumReadPtrChecks = 0;
710     int NumWritePtrChecks = 0;
711     bool CanDoAliasSetRT = true;
712 
713     // We assign consecutive id to access from different dependence sets.
714     // Accesses within the same set don't need a runtime check.
715     unsigned RunningDepId = 1;
716     DenseMap<Value *, unsigned> DepSetId;
717 
718     SmallVector<MemAccessInfo, 4> Retries;
719 
720     for (auto A : AS) {
721       Value *Ptr = A.getValue();
722       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
723       MemAccessInfo Access(Ptr, IsWrite);
724 
725       if (IsWrite)
726         ++NumWritePtrChecks;
727       else
728         ++NumReadPtrChecks;
729 
730       if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
731                                 RunningDepId, ASId, ShouldCheckWrap, false)) {
732         DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
733         Retries.push_back(Access);
734         CanDoAliasSetRT = false;
735       }
736     }
737 
738     // If we have at least two writes or one write and a read then we need to
739     // check them.  But there is no need to checks if there is only one
740     // dependence set for this alias set.
741     //
742     // Note that this function computes CanDoRT and NeedRTCheck independently.
743     // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer
744     // for which we couldn't find the bounds but we don't actually need to emit
745     // any checks so it does not matter.
746     bool NeedsAliasSetRTCheck = false;
747     if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2))
748       NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 ||
749                              (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1));
750 
751     // We need to perform run-time alias checks, but some pointers had bounds
752     // that couldn't be checked.
753     if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
754       // Reset the CanDoSetRt flag and retry all accesses that have failed.
755       // We know that we need these checks, so we can now be more aggressive
756       // and add further checks if required (overflow checks).
757       CanDoAliasSetRT = true;
758       for (auto Access : Retries)
759         if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId,
760                                   TheLoop, RunningDepId, ASId,
761                                   ShouldCheckWrap, /*Assume=*/true)) {
762           CanDoAliasSetRT = false;
763           break;
764         }
765     }
766 
767     CanDoRT &= CanDoAliasSetRT;
768     NeedRTCheck |= NeedsAliasSetRTCheck;
769     ++ASId;
770   }
771 
772   // If the pointers that we would use for the bounds comparison have different
773   // address spaces, assume the values aren't directly comparable, so we can't
774   // use them for the runtime check. We also have to assume they could
775   // overlap. In the future there should be metadata for whether address spaces
776   // are disjoint.
777   unsigned NumPointers = RtCheck.Pointers.size();
778   for (unsigned i = 0; i < NumPointers; ++i) {
779     for (unsigned j = i + 1; j < NumPointers; ++j) {
780       // Only need to check pointers between two different dependency sets.
781       if (RtCheck.Pointers[i].DependencySetId ==
782           RtCheck.Pointers[j].DependencySetId)
783        continue;
784       // Only need to check pointers in the same alias set.
785       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
786         continue;
787 
788       Value *PtrI = RtCheck.Pointers[i].PointerValue;
789       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
790 
791       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
792       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
793       if (ASi != ASj) {
794         DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
795                        " different address spaces\n");
796         return false;
797       }
798     }
799   }
800 
801   if (NeedRTCheck && CanDoRT)
802     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
803 
804   DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
805                << " pointer comparisons.\n");
806 
807   RtCheck.Need = NeedRTCheck;
808 
809   bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT;
810   if (!CanDoRTIfNeeded)
811     RtCheck.reset();
812   return CanDoRTIfNeeded;
813 }
814 
815 void AccessAnalysis::processMemAccesses() {
816   // We process the set twice: first we process read-write pointers, last we
817   // process read-only pointers. This allows us to skip dependence tests for
818   // read-only pointers.
819 
820   DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
821   DEBUG(dbgs() << "  AST: "; AST.dump());
822   DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
823   DEBUG({
824     for (auto A : Accesses)
825       dbgs() << "\t" << *A.getPointer() << " (" <<
826                 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
827                                          "read-only" : "read")) << ")\n";
828   });
829 
830   // The AliasSetTracker has nicely partitioned our pointers by metadata
831   // compatibility and potential for underlying-object overlap. As a result, we
832   // only need to check for potential pointer dependencies within each alias
833   // set.
834   for (auto &AS : AST) {
835     // Note that both the alias-set tracker and the alias sets themselves used
836     // linked lists internally and so the iteration order here is deterministic
837     // (matching the original instruction order within each set).
838 
839     bool SetHasWrite = false;
840 
841     // Map of pointers to last access encountered.
842     typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
843     UnderlyingObjToAccessMap ObjToLastAccess;
844 
845     // Set of access to check after all writes have been processed.
846     PtrAccessSet DeferredAccesses;
847 
848     // Iterate over each alias set twice, once to process read/write pointers,
849     // and then to process read-only pointers.
850     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
851       bool UseDeferred = SetIteration > 0;
852       PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
853 
854       for (auto AV : AS) {
855         Value *Ptr = AV.getValue();
856 
857         // For a single memory access in AliasSetTracker, Accesses may contain
858         // both read and write, and they both need to be handled for CheckDeps.
859         for (auto AC : S) {
860           if (AC.getPointer() != Ptr)
861             continue;
862 
863           bool IsWrite = AC.getInt();
864 
865           // If we're using the deferred access set, then it contains only
866           // reads.
867           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
868           if (UseDeferred && !IsReadOnlyPtr)
869             continue;
870           // Otherwise, the pointer must be in the PtrAccessSet, either as a
871           // read or a write.
872           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
873                   S.count(MemAccessInfo(Ptr, false))) &&
874                  "Alias-set pointer not in the access set?");
875 
876           MemAccessInfo Access(Ptr, IsWrite);
877           DepCands.insert(Access);
878 
879           // Memorize read-only pointers for later processing and skip them in
880           // the first round (they need to be checked after we have seen all
881           // write pointers). Note: we also mark pointer that are not
882           // consecutive as "read-only" pointers (so that we check
883           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
884           if (!UseDeferred && IsReadOnlyPtr) {
885             DeferredAccesses.insert(Access);
886             continue;
887           }
888 
889           // If this is a write - check other reads and writes for conflicts. If
890           // this is a read only check other writes for conflicts (but only if
891           // there is no other write to the ptr - this is an optimization to
892           // catch "a[i] = a[i] + " without having to do a dependence check).
893           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
894             CheckDeps.push_back(Access);
895             IsRTCheckAnalysisNeeded = true;
896           }
897 
898           if (IsWrite)
899             SetHasWrite = true;
900 
901           // Create sets of pointers connected by a shared alias set and
902           // underlying object.
903           typedef SmallVector<Value *, 16> ValueVector;
904           ValueVector TempObjects;
905 
906           GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
907           DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n");
908           for (Value *UnderlyingObj : TempObjects) {
909             // nullptr never alias, don't join sets for pointer that have "null"
910             // in their UnderlyingObjects list.
911             if (isa<ConstantPointerNull>(UnderlyingObj))
912               continue;
913 
914             UnderlyingObjToAccessMap::iterator Prev =
915                 ObjToLastAccess.find(UnderlyingObj);
916             if (Prev != ObjToLastAccess.end())
917               DepCands.unionSets(Access, Prev->second);
918 
919             ObjToLastAccess[UnderlyingObj] = Access;
920             DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
921           }
922         }
923       }
924     }
925   }
926 }
927 
928 static bool isInBoundsGep(Value *Ptr) {
929   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
930     return GEP->isInBounds();
931   return false;
932 }
933 
934 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
935 /// i.e. monotonically increasing/decreasing.
936 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
937                            PredicatedScalarEvolution &PSE, const Loop *L) {
938   // FIXME: This should probably only return true for NUW.
939   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
940     return true;
941 
942   // Scalar evolution does not propagate the non-wrapping flags to values that
943   // are derived from a non-wrapping induction variable because non-wrapping
944   // could be flow-sensitive.
945   //
946   // Look through the potentially overflowing instruction to try to prove
947   // non-wrapping for the *specific* value of Ptr.
948 
949   // The arithmetic implied by an inbounds GEP can't overflow.
950   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
951   if (!GEP || !GEP->isInBounds())
952     return false;
953 
954   // Make sure there is only one non-const index and analyze that.
955   Value *NonConstIndex = nullptr;
956   for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
957     if (!isa<ConstantInt>(Index)) {
958       if (NonConstIndex)
959         return false;
960       NonConstIndex = Index;
961     }
962   if (!NonConstIndex)
963     // The recurrence is on the pointer, ignore for now.
964     return false;
965 
966   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
967   // AddRec using a NSW operation.
968   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
969     if (OBO->hasNoSignedWrap() &&
970         // Assume constant for other the operand so that the AddRec can be
971         // easily found.
972         isa<ConstantInt>(OBO->getOperand(1))) {
973       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
974 
975       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
976         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
977     }
978 
979   return false;
980 }
981 
982 /// \brief Check whether the access through \p Ptr has a constant stride.
983 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
984                            const Loop *Lp, const ValueToValueMap &StridesMap,
985                            bool Assume, bool ShouldCheckWrap) {
986   Type *Ty = Ptr->getType();
987   assert(Ty->isPointerTy() && "Unexpected non-ptr");
988 
989   // Make sure that the pointer does not point to aggregate types.
990   auto *PtrTy = cast<PointerType>(Ty);
991   if (PtrTy->getElementType()->isAggregateType()) {
992     DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr
993                  << "\n");
994     return 0;
995   }
996 
997   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
998 
999   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1000   if (Assume && !AR)
1001     AR = PSE.getAsAddRec(Ptr);
1002 
1003   if (!AR) {
1004     DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1005                  << " SCEV: " << *PtrScev << "\n");
1006     return 0;
1007   }
1008 
1009   // The accesss function must stride over the innermost loop.
1010   if (Lp != AR->getLoop()) {
1011     DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
1012           *Ptr << " SCEV: " << *AR << "\n");
1013     return 0;
1014   }
1015 
1016   // The address calculation must not wrap. Otherwise, a dependence could be
1017   // inverted.
1018   // An inbounds getelementptr that is a AddRec with a unit stride
1019   // cannot wrap per definition. The unit stride requirement is checked later.
1020   // An getelementptr without an inbounds attribute and unit stride would have
1021   // to access the pointer value "0" which is undefined behavior in address
1022   // space 0, therefore we can also vectorize this case.
1023   bool IsInBoundsGEP = isInBoundsGep(Ptr);
1024   bool IsNoWrapAddRec = !ShouldCheckWrap ||
1025     PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1026     isNoWrapAddRec(Ptr, AR, PSE, Lp);
1027   bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
1028   if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
1029     if (Assume) {
1030       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1031       IsNoWrapAddRec = true;
1032       DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1033                    << "LAA:   Pointer: " << *Ptr << "\n"
1034                    << "LAA:   SCEV: " << *AR << "\n"
1035                    << "LAA:   Added an overflow assumption\n");
1036     } else {
1037       DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1038                    << *Ptr << " SCEV: " << *AR << "\n");
1039       return 0;
1040     }
1041   }
1042 
1043   // Check the step is constant.
1044   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1045 
1046   // Calculate the pointer stride and check if it is constant.
1047   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1048   if (!C) {
1049     DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
1050           " SCEV: " << *AR << "\n");
1051     return 0;
1052   }
1053 
1054   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1055   int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1056   const APInt &APStepVal = C->getAPInt();
1057 
1058   // Huge step value - give up.
1059   if (APStepVal.getBitWidth() > 64)
1060     return 0;
1061 
1062   int64_t StepVal = APStepVal.getSExtValue();
1063 
1064   // Strided access.
1065   int64_t Stride = StepVal / Size;
1066   int64_t Rem = StepVal % Size;
1067   if (Rem)
1068     return 0;
1069 
1070   // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1071   // know we can't "wrap around the address space". In case of address space
1072   // zero we know that this won't happen without triggering undefined behavior.
1073   if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
1074       Stride != 1 && Stride != -1) {
1075     if (Assume) {
1076       // We can avoid this case by adding a run-time check.
1077       DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1078                    << "inbouds or in address space 0 may wrap:\n"
1079                    << "LAA:   Pointer: " << *Ptr << "\n"
1080                    << "LAA:   SCEV: " << *AR << "\n"
1081                    << "LAA:   Added an overflow assumption\n");
1082       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1083     } else
1084       return 0;
1085   }
1086 
1087   return Stride;
1088 }
1089 
1090 /// Take the pointer operand from the Load/Store instruction.
1091 /// Returns NULL if this is not a valid Load/Store instruction.
1092 static Value *getPointerOperand(Value *I) {
1093   if (auto *LI = dyn_cast<LoadInst>(I))
1094     return LI->getPointerOperand();
1095   if (auto *SI = dyn_cast<StoreInst>(I))
1096     return SI->getPointerOperand();
1097   return nullptr;
1098 }
1099 
1100 /// Take the address space operand from the Load/Store instruction.
1101 /// Returns -1 if this is not a valid Load/Store instruction.
1102 static unsigned getAddressSpaceOperand(Value *I) {
1103   if (LoadInst *L = dyn_cast<LoadInst>(I))
1104     return L->getPointerAddressSpace();
1105   if (StoreInst *S = dyn_cast<StoreInst>(I))
1106     return S->getPointerAddressSpace();
1107   return -1;
1108 }
1109 
1110 // TODO:This API can be improved by using the permutation of given width as the
1111 // accesses are entered into the map.
1112 bool llvm::sortLoadAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
1113                            ScalarEvolution &SE,
1114                            SmallVectorImpl<Value *> &Sorted,
1115                            SmallVectorImpl<unsigned> *Mask) {
1116   SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
1117   OffValPairs.reserve(VL.size());
1118   Sorted.reserve(VL.size());
1119 
1120   // Walk over the pointers, and map each of them to an offset relative to
1121   // first pointer in the array.
1122   Value *Ptr0 = getPointerOperand(VL[0]);
1123   const SCEV *Scev0 = SE.getSCEV(Ptr0);
1124   Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
1125   PointerType *PtrTy = dyn_cast<PointerType>(Ptr0->getType());
1126   uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1127 
1128   for (auto *Val : VL) {
1129     // The only kind of access we care about here is load.
1130     if (!isa<LoadInst>(Val))
1131       return false;
1132 
1133     Value *Ptr = getPointerOperand(Val);
1134     assert(Ptr && "Expected value to have a pointer operand.");
1135     // If a pointer refers to a different underlying object, bail - the
1136     // pointers are by definition incomparable.
1137     Value *CurrObj = GetUnderlyingObject(Ptr, DL);
1138     if (CurrObj != Obj0)
1139       return false;
1140 
1141     const SCEVConstant *Diff =
1142         dyn_cast<SCEVConstant>(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0));
1143     // The pointers may not have a constant offset from each other, or SCEV
1144     // may just not be smart enough to figure out they do. Regardless,
1145     // there's nothing we can do.
1146     if (!Diff || static_cast<unsigned>(Diff->getAPInt().abs().getSExtValue()) >
1147                      (VL.size() - 1) * Size)
1148       return false;
1149 
1150     OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val);
1151   }
1152   SmallVector<unsigned, 4> UseOrder(VL.size());
1153   for (unsigned i = 0; i < VL.size(); i++) {
1154     UseOrder[i] = i;
1155   }
1156 
1157   // Sort the memory accesses and keep the order of their uses in UseOrder.
1158   std::sort(UseOrder.begin(), UseOrder.end(),
1159             [&OffValPairs](unsigned Left, unsigned Right) {
1160             return OffValPairs[Left].first < OffValPairs[Right].first;
1161             });
1162 
1163   for (unsigned i = 0; i < VL.size(); i++)
1164     Sorted.emplace_back(OffValPairs[UseOrder[i]].second);
1165 
1166   // Sort UseOrder to compute the Mask.
1167   if (Mask) {
1168     Mask->reserve(VL.size());
1169     for (unsigned i = 0; i < VL.size(); i++)
1170       Mask->emplace_back(i);
1171     std::sort(Mask->begin(), Mask->end(),
1172               [&UseOrder](unsigned Left, unsigned Right) {
1173               return UseOrder[Left] < UseOrder[Right];
1174               });
1175   }
1176 
1177   return true;
1178 }
1179 
1180 
1181 /// Returns true if the memory operations \p A and \p B are consecutive.
1182 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1183                                ScalarEvolution &SE, bool CheckType) {
1184   Value *PtrA = getPointerOperand(A);
1185   Value *PtrB = getPointerOperand(B);
1186   unsigned ASA = getAddressSpaceOperand(A);
1187   unsigned ASB = getAddressSpaceOperand(B);
1188 
1189   // Check that the address spaces match and that the pointers are valid.
1190   if (!PtrA || !PtrB || (ASA != ASB))
1191     return false;
1192 
1193   // Make sure that A and B are different pointers.
1194   if (PtrA == PtrB)
1195     return false;
1196 
1197   // Make sure that A and B have the same type if required.
1198   if (CheckType && PtrA->getType() != PtrB->getType())
1199     return false;
1200 
1201   unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
1202   Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1203   APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
1204 
1205   APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
1206   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1207   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1208 
1209   //  OffsetDelta = OffsetB - OffsetA;
1210   const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1211   const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1212   const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1213   const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV);
1214   const APInt &OffsetDelta = OffsetDeltaC->getAPInt();
1215   // Check if they are based on the same pointer. That makes the offsets
1216   // sufficient.
1217   if (PtrA == PtrB)
1218     return OffsetDelta == Size;
1219 
1220   // Compute the necessary base pointer delta to have the necessary final delta
1221   // equal to the size.
1222   // BaseDelta = Size - OffsetDelta;
1223   const SCEV *SizeSCEV = SE.getConstant(Size);
1224   const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1225 
1226   // Otherwise compute the distance with SCEV between the base pointers.
1227   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1228   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1229   const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1230   return X == PtrSCEVB;
1231 }
1232 
1233 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1234   switch (Type) {
1235   case NoDep:
1236   case Forward:
1237   case BackwardVectorizable:
1238     return true;
1239 
1240   case Unknown:
1241   case ForwardButPreventsForwarding:
1242   case Backward:
1243   case BackwardVectorizableButPreventsForwarding:
1244     return false;
1245   }
1246   llvm_unreachable("unexpected DepType!");
1247 }
1248 
1249 bool MemoryDepChecker::Dependence::isBackward() const {
1250   switch (Type) {
1251   case NoDep:
1252   case Forward:
1253   case ForwardButPreventsForwarding:
1254   case Unknown:
1255     return false;
1256 
1257   case BackwardVectorizable:
1258   case Backward:
1259   case BackwardVectorizableButPreventsForwarding:
1260     return true;
1261   }
1262   llvm_unreachable("unexpected DepType!");
1263 }
1264 
1265 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1266   return isBackward() || Type == Unknown;
1267 }
1268 
1269 bool MemoryDepChecker::Dependence::isForward() const {
1270   switch (Type) {
1271   case Forward:
1272   case ForwardButPreventsForwarding:
1273     return true;
1274 
1275   case NoDep:
1276   case Unknown:
1277   case BackwardVectorizable:
1278   case Backward:
1279   case BackwardVectorizableButPreventsForwarding:
1280     return false;
1281   }
1282   llvm_unreachable("unexpected DepType!");
1283 }
1284 
1285 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1286                                                     uint64_t TypeByteSize) {
1287   // If loads occur at a distance that is not a multiple of a feasible vector
1288   // factor store-load forwarding does not take place.
1289   // Positive dependences might cause troubles because vectorizing them might
1290   // prevent store-load forwarding making vectorized code run a lot slower.
1291   //   a[i] = a[i-3] ^ a[i-8];
1292   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1293   //   hence on your typical architecture store-load forwarding does not take
1294   //   place. Vectorizing in such cases does not make sense.
1295   // Store-load forwarding distance.
1296 
1297   // After this many iterations store-to-load forwarding conflicts should not
1298   // cause any slowdowns.
1299   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1300   // Maximum vector factor.
1301   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1302       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1303 
1304   // Compute the smallest VF at which the store and load would be misaligned.
1305   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1306        VF *= 2) {
1307     // If the number of vector iteration between the store and the load are
1308     // small we could incur conflicts.
1309     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1310       MaxVFWithoutSLForwardIssues = (VF >>= 1);
1311       break;
1312     }
1313   }
1314 
1315   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1316     DEBUG(dbgs() << "LAA: Distance " << Distance
1317                  << " that could cause a store-load forwarding conflict\n");
1318     return true;
1319   }
1320 
1321   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1322       MaxVFWithoutSLForwardIssues !=
1323           VectorizerParams::MaxVectorWidth * TypeByteSize)
1324     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1325   return false;
1326 }
1327 
1328 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1329 /// memory accesses, that have the same stride whose absolute value is given
1330 /// in \p Stride, and that have the same type size \p TypeByteSize,
1331 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1332 /// possible to prove statically that the dependence distance is larger
1333 /// than the range that the accesses will travel through the execution of
1334 /// the loop. If so, return true; false otherwise. This is useful for
1335 /// example in loops such as the following (PR31098):
1336 ///     for (i = 0; i < D; ++i) {
1337 ///                = out[i];
1338 ///       out[i+D] =
1339 ///     }
1340 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1341                                      const SCEV &BackedgeTakenCount,
1342                                      const SCEV &Dist, uint64_t Stride,
1343                                      uint64_t TypeByteSize) {
1344 
1345   // If we can prove that
1346   //      (**) |Dist| > BackedgeTakenCount * Step
1347   // where Step is the absolute stride of the memory accesses in bytes,
1348   // then there is no dependence.
1349   //
1350   // Ratioanle:
1351   // We basically want to check if the absolute distance (|Dist/Step|)
1352   // is >= the loop iteration count (or > BackedgeTakenCount).
1353   // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1354   // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1355   // that the dependence distance is >= VF; This is checked elsewhere.
1356   // But in some cases we can prune unknown dependence distances early, and
1357   // even before selecting the VF, and without a runtime test, by comparing
1358   // the distance against the loop iteration count. Since the vectorized code
1359   // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1360   // also guarantees that distance >= VF.
1361   //
1362   const uint64_t ByteStride = Stride * TypeByteSize;
1363   const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1364   const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1365 
1366   const SCEV *CastedDist = &Dist;
1367   const SCEV *CastedProduct = Product;
1368   uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1369   uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1370 
1371   // The dependence distance can be positive/negative, so we sign extend Dist;
1372   // The multiplication of the absolute stride in bytes and the
1373   // backdgeTakenCount is non-negative, so we zero extend Product.
1374   if (DistTypeSize > ProductTypeSize)
1375     CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1376   else
1377     CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1378 
1379   // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
1380   // (If so, then we have proven (**) because |Dist| >= Dist)
1381   const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1382   if (SE.isKnownPositive(Minus))
1383     return true;
1384 
1385   // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
1386   // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1387   const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1388   Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1389   if (SE.isKnownPositive(Minus))
1390     return true;
1391 
1392   return false;
1393 }
1394 
1395 /// \brief Check the dependence for two accesses with the same stride \p Stride.
1396 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1397 /// bytes.
1398 ///
1399 /// \returns true if they are independent.
1400 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1401                                           uint64_t TypeByteSize) {
1402   assert(Stride > 1 && "The stride must be greater than 1");
1403   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1404   assert(Distance > 0 && "The distance must be non-zero");
1405 
1406   // Skip if the distance is not multiple of type byte size.
1407   if (Distance % TypeByteSize)
1408     return false;
1409 
1410   uint64_t ScaledDist = Distance / TypeByteSize;
1411 
1412   // No dependence if the scaled distance is not multiple of the stride.
1413   // E.g.
1414   //      for (i = 0; i < 1024 ; i += 4)
1415   //        A[i+2] = A[i] + 1;
1416   //
1417   // Two accesses in memory (scaled distance is 2, stride is 4):
1418   //     | A[0] |      |      |      | A[4] |      |      |      |
1419   //     |      |      | A[2] |      |      |      | A[6] |      |
1420   //
1421   // E.g.
1422   //      for (i = 0; i < 1024 ; i += 3)
1423   //        A[i+4] = A[i] + 1;
1424   //
1425   // Two accesses in memory (scaled distance is 4, stride is 3):
1426   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1427   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1428   return ScaledDist % Stride;
1429 }
1430 
1431 MemoryDepChecker::Dependence::DepType
1432 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1433                               const MemAccessInfo &B, unsigned BIdx,
1434                               const ValueToValueMap &Strides) {
1435   assert (AIdx < BIdx && "Must pass arguments in program order");
1436 
1437   Value *APtr = A.getPointer();
1438   Value *BPtr = B.getPointer();
1439   bool AIsWrite = A.getInt();
1440   bool BIsWrite = B.getInt();
1441 
1442   // Two reads are independent.
1443   if (!AIsWrite && !BIsWrite)
1444     return Dependence::NoDep;
1445 
1446   // We cannot check pointers in different address spaces.
1447   if (APtr->getType()->getPointerAddressSpace() !=
1448       BPtr->getType()->getPointerAddressSpace())
1449     return Dependence::Unknown;
1450 
1451   int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1452   int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1453 
1454   const SCEV *Src = PSE.getSCEV(APtr);
1455   const SCEV *Sink = PSE.getSCEV(BPtr);
1456 
1457   // If the induction step is negative we have to invert source and sink of the
1458   // dependence.
1459   if (StrideAPtr < 0) {
1460     std::swap(APtr, BPtr);
1461     std::swap(Src, Sink);
1462     std::swap(AIsWrite, BIsWrite);
1463     std::swap(AIdx, BIdx);
1464     std::swap(StrideAPtr, StrideBPtr);
1465   }
1466 
1467   const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1468 
1469   DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1470                << "(Induction step: " << StrideAPtr << ")\n");
1471   DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1472                << *InstMap[BIdx] << ": " << *Dist << "\n");
1473 
1474   // Need accesses with constant stride. We don't want to vectorize
1475   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1476   // the address space.
1477   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1478     DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1479     return Dependence::Unknown;
1480   }
1481 
1482   Type *ATy = APtr->getType()->getPointerElementType();
1483   Type *BTy = BPtr->getType()->getPointerElementType();
1484   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1485   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1486   uint64_t Stride = std::abs(StrideAPtr);
1487   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1488   if (!C) {
1489     if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
1490         isSafeDependenceDistance(DL, *(PSE.getSE()),
1491                                  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1492                                  TypeByteSize))
1493       return Dependence::NoDep;
1494 
1495     DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1496     ShouldRetryWithRuntimeCheck = true;
1497     return Dependence::Unknown;
1498   }
1499 
1500   const APInt &Val = C->getAPInt();
1501   int64_t Distance = Val.getSExtValue();
1502 
1503   // Attempt to prove strided accesses independent.
1504   if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1505       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1506     DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1507     return Dependence::NoDep;
1508   }
1509 
1510   // Negative distances are not plausible dependencies.
1511   if (Val.isNegative()) {
1512     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1513     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1514         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1515          ATy != BTy)) {
1516       DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1517       return Dependence::ForwardButPreventsForwarding;
1518     }
1519 
1520     DEBUG(dbgs() << "LAA: Dependence is negative\n");
1521     return Dependence::Forward;
1522   }
1523 
1524   // Write to the same location with the same size.
1525   // Could be improved to assert type sizes are the same (i32 == float, etc).
1526   if (Val == 0) {
1527     if (ATy == BTy)
1528       return Dependence::Forward;
1529     DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
1530     return Dependence::Unknown;
1531   }
1532 
1533   assert(Val.isStrictlyPositive() && "Expect a positive value");
1534 
1535   if (ATy != BTy) {
1536     DEBUG(dbgs() <<
1537           "LAA: ReadWrite-Write positive dependency with different types\n");
1538     return Dependence::Unknown;
1539   }
1540 
1541   // Bail out early if passed-in parameters make vectorization not feasible.
1542   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1543                            VectorizerParams::VectorizationFactor : 1);
1544   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1545                            VectorizerParams::VectorizationInterleave : 1);
1546   // The minimum number of iterations for a vectorized/unrolled version.
1547   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1548 
1549   // It's not vectorizable if the distance is smaller than the minimum distance
1550   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1551   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1552   // TypeByteSize (No need to plus the last gap distance).
1553   //
1554   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1555   //      foo(int *A) {
1556   //        int *B = (int *)((char *)A + 14);
1557   //        for (i = 0 ; i < 1024 ; i += 2)
1558   //          B[i] = A[i] + 1;
1559   //      }
1560   //
1561   // Two accesses in memory (stride is 2):
1562   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1563   //                              | B[0] |      | B[2] |      | B[4] |
1564   //
1565   // Distance needs for vectorizing iterations except the last iteration:
1566   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1567   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1568   //
1569   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1570   // 12, which is less than distance.
1571   //
1572   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1573   // the minimum distance needed is 28, which is greater than distance. It is
1574   // not safe to do vectorization.
1575   uint64_t MinDistanceNeeded =
1576       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1577   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1578     DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance
1579                  << '\n');
1580     return Dependence::Backward;
1581   }
1582 
1583   // Unsafe if the minimum distance needed is greater than max safe distance.
1584   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1585     DEBUG(dbgs() << "LAA: Failure because it needs at least "
1586                  << MinDistanceNeeded << " size in bytes");
1587     return Dependence::Backward;
1588   }
1589 
1590   // Positive distance bigger than max vectorization factor.
1591   // FIXME: Should use max factor instead of max distance in bytes, which could
1592   // not handle different types.
1593   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1594   //      void foo (int *A, char *B) {
1595   //        for (unsigned i = 0; i < 1024; i++) {
1596   //          A[i+2] = A[i] + 1;
1597   //          B[i+2] = B[i] + 1;
1598   //        }
1599   //      }
1600   //
1601   // This case is currently unsafe according to the max safe distance. If we
1602   // analyze the two accesses on array B, the max safe dependence distance
1603   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1604   // is 8, which is less than 2 and forbidden vectorization, But actually
1605   // both A and B could be vectorized by 2 iterations.
1606   MaxSafeDepDistBytes =
1607       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1608 
1609   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1610   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1611       couldPreventStoreLoadForward(Distance, TypeByteSize))
1612     return Dependence::BackwardVectorizableButPreventsForwarding;
1613 
1614   uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1615   DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1616                << " with max VF = " << MaxVF << '\n');
1617   uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1618   MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits);
1619   return Dependence::BackwardVectorizable;
1620 }
1621 
1622 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1623                                    MemAccessInfoList &CheckDeps,
1624                                    const ValueToValueMap &Strides) {
1625 
1626   MaxSafeDepDistBytes = -1;
1627   SmallPtrSet<MemAccessInfo, 8> Visited;
1628   for (MemAccessInfo CurAccess : CheckDeps) {
1629     if (Visited.count(CurAccess))
1630       continue;
1631 
1632     // Get the relevant memory access set.
1633     EquivalenceClasses<MemAccessInfo>::iterator I =
1634       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1635 
1636     // Check accesses within this set.
1637     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1638         AccessSets.member_begin(I);
1639     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1640         AccessSets.member_end();
1641 
1642     // Check every access pair.
1643     while (AI != AE) {
1644       Visited.insert(*AI);
1645       EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
1646       while (OI != AE) {
1647         // Check every accessing instruction pair in program order.
1648         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1649              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1650           for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
1651                I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
1652             auto A = std::make_pair(&*AI, *I1);
1653             auto B = std::make_pair(&*OI, *I2);
1654 
1655             assert(*I1 != *I2);
1656             if (*I1 > *I2)
1657               std::swap(A, B);
1658 
1659             Dependence::DepType Type =
1660                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1661             SafeForVectorization &= Dependence::isSafeForVectorization(Type);
1662 
1663             // Gather dependences unless we accumulated MaxDependences
1664             // dependences.  In that case return as soon as we find the first
1665             // unsafe dependence.  This puts a limit on this quadratic
1666             // algorithm.
1667             if (RecordDependences) {
1668               if (Type != Dependence::NoDep)
1669                 Dependences.push_back(Dependence(A.second, B.second, Type));
1670 
1671               if (Dependences.size() >= MaxDependences) {
1672                 RecordDependences = false;
1673                 Dependences.clear();
1674                 DEBUG(dbgs() << "Too many dependences, stopped recording\n");
1675               }
1676             }
1677             if (!RecordDependences && !SafeForVectorization)
1678               return false;
1679           }
1680         ++OI;
1681       }
1682       AI++;
1683     }
1684   }
1685 
1686   DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1687   return SafeForVectorization;
1688 }
1689 
1690 SmallVector<Instruction *, 4>
1691 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1692   MemAccessInfo Access(Ptr, isWrite);
1693   auto &IndexVector = Accesses.find(Access)->second;
1694 
1695   SmallVector<Instruction *, 4> Insts;
1696   transform(IndexVector,
1697                  std::back_inserter(Insts),
1698                  [&](unsigned Idx) { return this->InstMap[Idx]; });
1699   return Insts;
1700 }
1701 
1702 const char *MemoryDepChecker::Dependence::DepName[] = {
1703     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1704     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1705 
1706 void MemoryDepChecker::Dependence::print(
1707     raw_ostream &OS, unsigned Depth,
1708     const SmallVectorImpl<Instruction *> &Instrs) const {
1709   OS.indent(Depth) << DepName[Type] << ":\n";
1710   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1711   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1712 }
1713 
1714 bool LoopAccessInfo::canAnalyzeLoop() {
1715   // We need to have a loop header.
1716   DEBUG(dbgs() << "LAA: Found a loop in "
1717                << TheLoop->getHeader()->getParent()->getName() << ": "
1718                << TheLoop->getHeader()->getName() << '\n');
1719 
1720   // We can only analyze innermost loops.
1721   if (!TheLoop->empty()) {
1722     DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1723     recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1724     return false;
1725   }
1726 
1727   // We must have a single backedge.
1728   if (TheLoop->getNumBackEdges() != 1) {
1729     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1730     recordAnalysis("CFGNotUnderstood")
1731         << "loop control flow is not understood by analyzer";
1732     return false;
1733   }
1734 
1735   // We must have a single exiting block.
1736   if (!TheLoop->getExitingBlock()) {
1737     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1738     recordAnalysis("CFGNotUnderstood")
1739         << "loop control flow is not understood by analyzer";
1740     return false;
1741   }
1742 
1743   // We only handle bottom-tested loops, i.e. loop in which the condition is
1744   // checked at the end of each iteration. With that we can assume that all
1745   // instructions in the loop are executed the same number of times.
1746   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1747     DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1748     recordAnalysis("CFGNotUnderstood")
1749         << "loop control flow is not understood by analyzer";
1750     return false;
1751   }
1752 
1753   // ScalarEvolution needs to be able to find the exit count.
1754   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1755   if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1756     recordAnalysis("CantComputeNumberOfIterations")
1757         << "could not determine number of loop iterations";
1758     DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1759     return false;
1760   }
1761 
1762   return true;
1763 }
1764 
1765 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
1766                                  const TargetLibraryInfo *TLI,
1767                                  DominatorTree *DT) {
1768   typedef SmallPtrSet<Value*, 16> ValueSet;
1769 
1770   // Holds the Load and Store instructions.
1771   SmallVector<LoadInst *, 16> Loads;
1772   SmallVector<StoreInst *, 16> Stores;
1773 
1774   // Holds all the different accesses in the loop.
1775   unsigned NumReads = 0;
1776   unsigned NumReadWrites = 0;
1777 
1778   PtrRtChecking->Pointers.clear();
1779   PtrRtChecking->Need = false;
1780 
1781   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1782 
1783   // For each block.
1784   for (BasicBlock *BB : TheLoop->blocks()) {
1785     // Scan the BB and collect legal loads and stores.
1786     for (Instruction &I : *BB) {
1787       // If this is a load, save it. If this instruction can read from memory
1788       // but is not a load, then we quit. Notice that we don't handle function
1789       // calls that read or write.
1790       if (I.mayReadFromMemory()) {
1791         // Many math library functions read the rounding mode. We will only
1792         // vectorize a loop if it contains known function calls that don't set
1793         // the flag. Therefore, it is safe to ignore this read from memory.
1794         auto *Call = dyn_cast<CallInst>(&I);
1795         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1796           continue;
1797 
1798         // If the function has an explicit vectorized counterpart, we can safely
1799         // assume that it can be vectorized.
1800         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1801             TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
1802           continue;
1803 
1804         auto *Ld = dyn_cast<LoadInst>(&I);
1805         if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
1806           recordAnalysis("NonSimpleLoad", Ld)
1807               << "read with atomic ordering or volatile read";
1808           DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1809           CanVecMem = false;
1810           return;
1811         }
1812         NumLoads++;
1813         Loads.push_back(Ld);
1814         DepChecker->addAccess(Ld);
1815         if (EnableMemAccessVersioning)
1816           collectStridedAccess(Ld);
1817         continue;
1818       }
1819 
1820       // Save 'store' instructions. Abort if other instructions write to memory.
1821       if (I.mayWriteToMemory()) {
1822         auto *St = dyn_cast<StoreInst>(&I);
1823         if (!St) {
1824           recordAnalysis("CantVectorizeInstruction", St)
1825               << "instruction cannot be vectorized";
1826           CanVecMem = false;
1827           return;
1828         }
1829         if (!St->isSimple() && !IsAnnotatedParallel) {
1830           recordAnalysis("NonSimpleStore", St)
1831               << "write with atomic ordering or volatile write";
1832           DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1833           CanVecMem = false;
1834           return;
1835         }
1836         NumStores++;
1837         Stores.push_back(St);
1838         DepChecker->addAccess(St);
1839         if (EnableMemAccessVersioning)
1840           collectStridedAccess(St);
1841       }
1842     } // Next instr.
1843   } // Next block.
1844 
1845   // Now we have two lists that hold the loads and the stores.
1846   // Next, we find the pointers that they use.
1847 
1848   // Check if we see any stores. If there are no stores, then we don't
1849   // care if the pointers are *restrict*.
1850   if (!Stores.size()) {
1851     DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1852     CanVecMem = true;
1853     return;
1854   }
1855 
1856   MemoryDepChecker::DepCandidates DependentAccesses;
1857   AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1858                           AA, LI, DependentAccesses, *PSE);
1859 
1860   // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1861   // multiple times on the same object. If the ptr is accessed twice, once
1862   // for read and once for write, it will only appear once (on the write
1863   // list). This is okay, since we are going to check for conflicts between
1864   // writes and between reads and writes, but not between reads and reads.
1865   ValueSet Seen;
1866 
1867   for (StoreInst *ST : Stores) {
1868     Value *Ptr = ST->getPointerOperand();
1869     // Check for store to loop invariant address.
1870     StoreToLoopInvariantAddress |= isUniform(Ptr);
1871     // If we did *not* see this pointer before, insert it to  the read-write
1872     // list. At this phase it is only a 'write' list.
1873     if (Seen.insert(Ptr).second) {
1874       ++NumReadWrites;
1875 
1876       MemoryLocation Loc = MemoryLocation::get(ST);
1877       // The TBAA metadata could have a control dependency on the predication
1878       // condition, so we cannot rely on it when determining whether or not we
1879       // need runtime pointer checks.
1880       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1881         Loc.AATags.TBAA = nullptr;
1882 
1883       Accesses.addStore(Loc);
1884     }
1885   }
1886 
1887   if (IsAnnotatedParallel) {
1888     DEBUG(dbgs()
1889           << "LAA: A loop annotated parallel, ignore memory dependency "
1890           << "checks.\n");
1891     CanVecMem = true;
1892     return;
1893   }
1894 
1895   for (LoadInst *LD : Loads) {
1896     Value *Ptr = LD->getPointerOperand();
1897     // If we did *not* see this pointer before, insert it to the
1898     // read list. If we *did* see it before, then it is already in
1899     // the read-write list. This allows us to vectorize expressions
1900     // such as A[i] += x;  Because the address of A[i] is a read-write
1901     // pointer. This only works if the index of A[i] is consecutive.
1902     // If the address of i is unknown (for example A[B[i]]) then we may
1903     // read a few words, modify, and write a few words, and some of the
1904     // words may be written to the same address.
1905     bool IsReadOnlyPtr = false;
1906     if (Seen.insert(Ptr).second ||
1907         !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
1908       ++NumReads;
1909       IsReadOnlyPtr = true;
1910     }
1911 
1912     MemoryLocation Loc = MemoryLocation::get(LD);
1913     // The TBAA metadata could have a control dependency on the predication
1914     // condition, so we cannot rely on it when determining whether or not we
1915     // need runtime pointer checks.
1916     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1917       Loc.AATags.TBAA = nullptr;
1918 
1919     Accesses.addLoad(Loc, IsReadOnlyPtr);
1920   }
1921 
1922   // If we write (or read-write) to a single destination and there are no
1923   // other reads in this loop then is it safe to vectorize.
1924   if (NumReadWrites == 1 && NumReads == 0) {
1925     DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1926     CanVecMem = true;
1927     return;
1928   }
1929 
1930   // Build dependence sets and check whether we need a runtime pointer bounds
1931   // check.
1932   Accesses.buildDependenceSets();
1933 
1934   // Find pointers with computable bounds. We are going to use this information
1935   // to place a runtime bound check.
1936   bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
1937                                                   TheLoop, SymbolicStrides);
1938   if (!CanDoRTIfNeeded) {
1939     recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
1940     DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
1941                  << "the array bounds.\n");
1942     CanVecMem = false;
1943     return;
1944   }
1945 
1946   DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1947 
1948   CanVecMem = true;
1949   if (Accesses.isDependencyCheckNeeded()) {
1950     DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1951     CanVecMem = DepChecker->areDepsSafe(
1952         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
1953     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
1954 
1955     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
1956       DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1957 
1958       // Clear the dependency checks. We assume they are not needed.
1959       Accesses.resetDepChecks(*DepChecker);
1960 
1961       PtrRtChecking->reset();
1962       PtrRtChecking->Need = true;
1963 
1964       auto *SE = PSE->getSE();
1965       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
1966                                                  SymbolicStrides, true);
1967 
1968       // Check that we found the bounds for the pointer.
1969       if (!CanDoRTIfNeeded) {
1970         recordAnalysis("CantCheckMemDepsAtRunTime")
1971             << "cannot check memory dependencies at runtime";
1972         DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1973         CanVecMem = false;
1974         return;
1975       }
1976 
1977       CanVecMem = true;
1978     }
1979   }
1980 
1981   if (CanVecMem)
1982     DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
1983                  << (PtrRtChecking->Need ? "" : " don't")
1984                  << " need runtime memory checks.\n");
1985   else {
1986     recordAnalysis("UnsafeMemDep")
1987         << "unsafe dependent memory operations in loop. Use "
1988            "#pragma loop distribute(enable) to allow loop distribution "
1989            "to attempt to isolate the offending operations into a separate "
1990            "loop";
1991     DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
1992   }
1993 }
1994 
1995 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1996                                            DominatorTree *DT)  {
1997   assert(TheLoop->contains(BB) && "Unknown block used");
1998 
1999   // Blocks that do not dominate the latch need predication.
2000   BasicBlock* Latch = TheLoop->getLoopLatch();
2001   return !DT->dominates(BB, Latch);
2002 }
2003 
2004 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2005                                                            Instruction *I) {
2006   assert(!Report && "Multiple reports generated");
2007 
2008   Value *CodeRegion = TheLoop->getHeader();
2009   DebugLoc DL = TheLoop->getStartLoc();
2010 
2011   if (I) {
2012     CodeRegion = I->getParent();
2013     // If there is no debug location attached to the instruction, revert back to
2014     // using the loop's.
2015     if (I->getDebugLoc())
2016       DL = I->getDebugLoc();
2017   }
2018 
2019   Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2020                                                    CodeRegion);
2021   return *Report;
2022 }
2023 
2024 bool LoopAccessInfo::isUniform(Value *V) const {
2025   auto *SE = PSE->getSE();
2026   // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2027   // never considered uniform.
2028   // TODO: Is this really what we want? Even without FP SCEV, we may want some
2029   // trivially loop-invariant FP values to be considered uniform.
2030   if (!SE->isSCEVable(V->getType()))
2031     return false;
2032   return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2033 }
2034 
2035 // FIXME: this function is currently a duplicate of the one in
2036 // LoopVectorize.cpp.
2037 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
2038                                  Instruction *Loc) {
2039   if (FirstInst)
2040     return FirstInst;
2041   if (Instruction *I = dyn_cast<Instruction>(V))
2042     return I->getParent() == Loc->getParent() ? I : nullptr;
2043   return nullptr;
2044 }
2045 
2046 namespace {
2047 
2048 /// \brief IR Values for the lower and upper bounds of a pointer evolution.  We
2049 /// need to use value-handles because SCEV expansion can invalidate previously
2050 /// expanded values.  Thus expansion of a pointer can invalidate the bounds for
2051 /// a previous one.
2052 struct PointerBounds {
2053   TrackingVH<Value> Start;
2054   TrackingVH<Value> End;
2055 };
2056 
2057 } // end anonymous namespace
2058 
2059 /// \brief Expand code for the lower and upper bound of the pointer group \p CG
2060 /// in \p TheLoop.  \return the values for the bounds.
2061 static PointerBounds
2062 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
2063              Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE,
2064              const RuntimePointerChecking &PtrRtChecking) {
2065   Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue;
2066   const SCEV *Sc = SE->getSCEV(Ptr);
2067 
2068   unsigned AS = Ptr->getType()->getPointerAddressSpace();
2069   LLVMContext &Ctx = Loc->getContext();
2070 
2071   // Use this type for pointer arithmetic.
2072   Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
2073 
2074   if (SE->isLoopInvariant(Sc, TheLoop)) {
2075     DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr
2076                  << "\n");
2077     // Ptr could be in the loop body. If so, expand a new one at the correct
2078     // location.
2079     Instruction *Inst = dyn_cast<Instruction>(Ptr);
2080     Value *NewPtr = (Inst && TheLoop->contains(Inst))
2081                         ? Exp.expandCodeFor(Sc, PtrArithTy, Loc)
2082                         : Ptr;
2083     // We must return a half-open range, which means incrementing Sc.
2084     const SCEV *ScPlusOne = SE->getAddExpr(Sc, SE->getOne(PtrArithTy));
2085     Value *NewPtrPlusOne = Exp.expandCodeFor(ScPlusOne, PtrArithTy, Loc);
2086     return {NewPtr, NewPtrPlusOne};
2087   } else {
2088     Value *Start = nullptr, *End = nullptr;
2089     DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
2090     Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
2091     End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
2092     DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n");
2093     return {Start, End};
2094   }
2095 }
2096 
2097 /// \brief Turns a collection of checks into a collection of expanded upper and
2098 /// lower bounds for both pointers in the check.
2099 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds(
2100     const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks,
2101     Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp,
2102     const RuntimePointerChecking &PtrRtChecking) {
2103   SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
2104 
2105   // Here we're relying on the SCEV Expander's cache to only emit code for the
2106   // same bounds once.
2107   transform(
2108       PointerChecks, std::back_inserter(ChecksWithBounds),
2109       [&](const RuntimePointerChecking::PointerCheck &Check) {
2110         PointerBounds
2111           First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking),
2112           Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking);
2113         return std::make_pair(First, Second);
2114       });
2115 
2116   return ChecksWithBounds;
2117 }
2118 
2119 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks(
2120     Instruction *Loc,
2121     const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks)
2122     const {
2123   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2124   auto *SE = PSE->getSE();
2125   SCEVExpander Exp(*SE, DL, "induction");
2126   auto ExpandedChecks =
2127       expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking);
2128 
2129   LLVMContext &Ctx = Loc->getContext();
2130   Instruction *FirstInst = nullptr;
2131   IRBuilder<> ChkBuilder(Loc);
2132   // Our instructions might fold to a constant.
2133   Value *MemoryRuntimeCheck = nullptr;
2134 
2135   for (const auto &Check : ExpandedChecks) {
2136     const PointerBounds &A = Check.first, &B = Check.second;
2137     // Check if two pointers (A and B) conflict where conflict is computed as:
2138     // start(A) <= end(B) && start(B) <= end(A)
2139     unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
2140     unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
2141 
2142     assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
2143            (AS1 == A.End->getType()->getPointerAddressSpace()) &&
2144            "Trying to bounds check pointers with different address spaces");
2145 
2146     Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
2147     Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
2148 
2149     Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
2150     Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
2151     Value *End0 =   ChkBuilder.CreateBitCast(A.End,   PtrArithTy1, "bc");
2152     Value *End1 =   ChkBuilder.CreateBitCast(B.End,   PtrArithTy0, "bc");
2153 
2154     // [A|B].Start points to the first accessed byte under base [A|B].
2155     // [A|B].End points to the last accessed byte, plus one.
2156     // There is no conflict when the intervals are disjoint:
2157     // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
2158     //
2159     // bound0 = (B.Start < A.End)
2160     // bound1 = (A.Start < B.End)
2161     //  IsConflict = bound0 & bound1
2162     Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
2163     FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
2164     Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
2165     FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
2166     Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
2167     FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2168     if (MemoryRuntimeCheck) {
2169       IsConflict =
2170           ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
2171       FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2172     }
2173     MemoryRuntimeCheck = IsConflict;
2174   }
2175 
2176   if (!MemoryRuntimeCheck)
2177     return std::make_pair(nullptr, nullptr);
2178 
2179   // We have to do this trickery because the IRBuilder might fold the check to a
2180   // constant expression in which case there is no Instruction anchored in a
2181   // the block.
2182   Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
2183                                                  ConstantInt::getTrue(Ctx));
2184   ChkBuilder.Insert(Check, "memcheck.conflict");
2185   FirstInst = getFirstInst(FirstInst, Check, Loc);
2186   return std::make_pair(FirstInst, Check);
2187 }
2188 
2189 std::pair<Instruction *, Instruction *>
2190 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const {
2191   if (!PtrRtChecking->Need)
2192     return std::make_pair(nullptr, nullptr);
2193 
2194   return addRuntimeChecks(Loc, PtrRtChecking->getChecks());
2195 }
2196 
2197 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2198   Value *Ptr = nullptr;
2199   if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
2200     Ptr = LI->getPointerOperand();
2201   else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
2202     Ptr = SI->getPointerOperand();
2203   else
2204     return;
2205 
2206   Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2207   if (!Stride)
2208     return;
2209 
2210   DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2211                   "versioning:");
2212   DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2213 
2214   // Avoid adding the "Stride == 1" predicate when we know that
2215   // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2216   // or zero iteration loop, as Trip-Count <= Stride == 1.
2217   //
2218   // TODO: We are currently not making a very informed decision on when it is
2219   // beneficial to apply stride versioning. It might make more sense that the
2220   // users of this analysis (such as the vectorizer) will trigger it, based on
2221   // their specific cost considerations; For example, in cases where stride
2222   // versioning does  not help resolving memory accesses/dependences, the
2223   // vectorizer should evaluate the cost of the runtime test, and the benefit
2224   // of various possible stride specializations, considering the alternatives
2225   // of using gather/scatters (if available).
2226 
2227   const SCEV *StrideExpr = PSE->getSCEV(Stride);
2228   const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2229 
2230   // Match the types so we can compare the stride and the BETakenCount.
2231   // The Stride can be positive/negative, so we sign extend Stride;
2232   // The backdgeTakenCount is non-negative, so we zero extend BETakenCount.
2233   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2234   uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2235   uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2236   const SCEV *CastedStride = StrideExpr;
2237   const SCEV *CastedBECount = BETakenCount;
2238   ScalarEvolution *SE = PSE->getSE();
2239   if (BETypeSize >= StrideTypeSize)
2240     CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2241   else
2242     CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2243   const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2244   // Since TripCount == BackEdgeTakenCount + 1, checking:
2245   // "Stride >= TripCount" is equivalent to checking:
2246   // Stride - BETakenCount > 0
2247   if (SE->isKnownPositive(StrideMinusBETaken)) {
2248     DEBUG(dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2249                     "Stride==1 predicate will imply that the loop executes "
2250                     "at most once.\n");
2251     return;
2252   }
2253   DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2254 
2255   SymbolicStrides[Ptr] = Stride;
2256   StrideSet.insert(Stride);
2257 }
2258 
2259 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2260                                const TargetLibraryInfo *TLI, AliasAnalysis *AA,
2261                                DominatorTree *DT, LoopInfo *LI)
2262     : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2263       PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)),
2264       DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2265       NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2266       StoreToLoopInvariantAddress(false) {
2267   if (canAnalyzeLoop())
2268     analyzeLoop(AA, LI, TLI, DT);
2269 }
2270 
2271 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2272   if (CanVecMem) {
2273     OS.indent(Depth) << "Memory dependences are safe";
2274     if (MaxSafeDepDistBytes != -1ULL)
2275       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2276          << " bytes";
2277     if (PtrRtChecking->Need)
2278       OS << " with run-time checks";
2279     OS << "\n";
2280   }
2281 
2282   if (Report)
2283     OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2284 
2285   if (auto *Dependences = DepChecker->getDependences()) {
2286     OS.indent(Depth) << "Dependences:\n";
2287     for (auto &Dep : *Dependences) {
2288       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2289       OS << "\n";
2290     }
2291   } else
2292     OS.indent(Depth) << "Too many dependences, not recorded\n";
2293 
2294   // List the pair of accesses need run-time checks to prove independence.
2295   PtrRtChecking->print(OS, Depth);
2296   OS << "\n";
2297 
2298   OS.indent(Depth) << "Store to invariant address was "
2299                    << (StoreToLoopInvariantAddress ? "" : "not ")
2300                    << "found in loop.\n";
2301 
2302   OS.indent(Depth) << "SCEV assumptions:\n";
2303   PSE->getUnionPredicate().print(OS, Depth);
2304 
2305   OS << "\n";
2306 
2307   OS.indent(Depth) << "Expressions re-written:\n";
2308   PSE->print(OS, Depth);
2309 }
2310 
2311 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2312   auto &LAI = LoopAccessInfoMap[L];
2313 
2314   if (!LAI)
2315     LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2316 
2317   return *LAI.get();
2318 }
2319 
2320 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2321   LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2322 
2323   for (Loop *TopLevelLoop : *LI)
2324     for (Loop *L : depth_first(TopLevelLoop)) {
2325       OS.indent(2) << L->getHeader()->getName() << ":\n";
2326       auto &LAI = LAA.getInfo(L);
2327       LAI.print(OS, 4);
2328     }
2329 }
2330 
2331 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2332   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2333   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2334   TLI = TLIP ? &TLIP->getTLI() : nullptr;
2335   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2336   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2337   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2338 
2339   return false;
2340 }
2341 
2342 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2343     AU.addRequired<ScalarEvolutionWrapperPass>();
2344     AU.addRequired<AAResultsWrapperPass>();
2345     AU.addRequired<DominatorTreeWrapperPass>();
2346     AU.addRequired<LoopInfoWrapperPass>();
2347 
2348     AU.setPreservesAll();
2349 }
2350 
2351 char LoopAccessLegacyAnalysis::ID = 0;
2352 static const char laa_name[] = "Loop Access Analysis";
2353 #define LAA_NAME "loop-accesses"
2354 
2355 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2356 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2357 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2358 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2359 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2360 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2361 
2362 AnalysisKey LoopAccessAnalysis::Key;
2363 
2364 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2365                                        LoopStandardAnalysisResults &AR) {
2366   return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2367 }
2368 
2369 namespace llvm {
2370 
2371   Pass *createLAAPass() {
2372     return new LoopAccessLegacyAnalysis();
2373   }
2374 
2375 } // end namespace llvm
2376