1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/EquivalenceClasses.h" 20 #include "llvm/ADT/PointerIntPair.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AliasSetTracker.h" 29 #include "llvm/Analysis/LoopAnalysisManager.h" 30 #include "llvm/Analysis/LoopInfo.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 33 #include "llvm/Analysis/ScalarEvolution.h" 34 #include "llvm/Analysis/ScalarEvolutionExpander.h" 35 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/Analysis/VectorUtils.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugLoc.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/InstrTypes.h" 49 #include "llvm/IR/Instruction.h" 50 #include "llvm/IR/Instructions.h" 51 #include "llvm/IR/Operator.h" 52 #include "llvm/IR/PassManager.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/IR/ValueHandle.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <cstdlib> 66 #include <iterator> 67 #include <utility> 68 #include <vector> 69 70 using namespace llvm; 71 72 #define DEBUG_TYPE "loop-accesses" 73 74 static cl::opt<unsigned, true> 75 VectorizationFactor("force-vector-width", cl::Hidden, 76 cl::desc("Sets the SIMD width. Zero is autoselect."), 77 cl::location(VectorizerParams::VectorizationFactor)); 78 unsigned VectorizerParams::VectorizationFactor; 79 80 static cl::opt<unsigned, true> 81 VectorizationInterleave("force-vector-interleave", cl::Hidden, 82 cl::desc("Sets the vectorization interleave count. " 83 "Zero is autoselect."), 84 cl::location( 85 VectorizerParams::VectorizationInterleave)); 86 unsigned VectorizerParams::VectorizationInterleave; 87 88 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 89 "runtime-memory-check-threshold", cl::Hidden, 90 cl::desc("When performing memory disambiguation checks at runtime do not " 91 "generate more than this number of comparisons (default = 8)."), 92 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 93 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 94 95 /// \brief The maximum iterations used to merge memory checks 96 static cl::opt<unsigned> MemoryCheckMergeThreshold( 97 "memory-check-merge-threshold", cl::Hidden, 98 cl::desc("Maximum number of comparisons done when trying to merge " 99 "runtime memory checks. (default = 100)"), 100 cl::init(100)); 101 102 /// Maximum SIMD width. 103 const unsigned VectorizerParams::MaxVectorWidth = 64; 104 105 /// \brief We collect dependences up to this threshold. 106 static cl::opt<unsigned> 107 MaxDependences("max-dependences", cl::Hidden, 108 cl::desc("Maximum number of dependences collected by " 109 "loop-access analysis (default = 100)"), 110 cl::init(100)); 111 112 /// This enables versioning on the strides of symbolically striding memory 113 /// accesses in code like the following. 114 /// for (i = 0; i < N; ++i) 115 /// A[i * Stride1] += B[i * Stride2] ... 116 /// 117 /// Will be roughly translated to 118 /// if (Stride1 == 1 && Stride2 == 1) { 119 /// for (i = 0; i < N; i+=4) 120 /// A[i:i+3] += ... 121 /// } else 122 /// ... 123 static cl::opt<bool> EnableMemAccessVersioning( 124 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 125 cl::desc("Enable symbolic stride memory access versioning")); 126 127 /// \brief Enable store-to-load forwarding conflict detection. This option can 128 /// be disabled for correctness testing. 129 static cl::opt<bool> EnableForwardingConflictDetection( 130 "store-to-load-forwarding-conflict-detection", cl::Hidden, 131 cl::desc("Enable conflict detection in loop-access analysis"), 132 cl::init(true)); 133 134 bool VectorizerParams::isInterleaveForced() { 135 return ::VectorizationInterleave.getNumOccurrences() > 0; 136 } 137 138 Value *llvm::stripIntegerCast(Value *V) { 139 if (auto *CI = dyn_cast<CastInst>(V)) 140 if (CI->getOperand(0)->getType()->isIntegerTy()) 141 return CI->getOperand(0); 142 return V; 143 } 144 145 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 146 const ValueToValueMap &PtrToStride, 147 Value *Ptr, Value *OrigPtr) { 148 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 149 150 // If there is an entry in the map return the SCEV of the pointer with the 151 // symbolic stride replaced by one. 152 ValueToValueMap::const_iterator SI = 153 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 154 if (SI != PtrToStride.end()) { 155 Value *StrideVal = SI->second; 156 157 // Strip casts. 158 StrideVal = stripIntegerCast(StrideVal); 159 160 ScalarEvolution *SE = PSE.getSE(); 161 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 162 const auto *CT = 163 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 164 165 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 166 auto *Expr = PSE.getSCEV(Ptr); 167 168 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 169 << "\n"); 170 return Expr; 171 } 172 173 // Otherwise, just return the SCEV of the original pointer. 174 return OrigSCEV; 175 } 176 177 /// Calculate Start and End points of memory access. 178 /// Let's assume A is the first access and B is a memory access on N-th loop 179 /// iteration. Then B is calculated as: 180 /// B = A + Step*N . 181 /// Step value may be positive or negative. 182 /// N is a calculated back-edge taken count: 183 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 184 /// Start and End points are calculated in the following way: 185 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 186 /// where SizeOfElt is the size of single memory access in bytes. 187 /// 188 /// There is no conflict when the intervals are disjoint: 189 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 190 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 191 unsigned DepSetId, unsigned ASId, 192 const ValueToValueMap &Strides, 193 PredicatedScalarEvolution &PSE) { 194 // Get the stride replaced scev. 195 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 196 ScalarEvolution *SE = PSE.getSE(); 197 198 const SCEV *ScStart; 199 const SCEV *ScEnd; 200 201 if (SE->isLoopInvariant(Sc, Lp)) 202 ScStart = ScEnd = Sc; 203 else { 204 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 205 assert(AR && "Invalid addrec expression"); 206 const SCEV *Ex = PSE.getBackedgeTakenCount(); 207 208 ScStart = AR->getStart(); 209 ScEnd = AR->evaluateAtIteration(Ex, *SE); 210 const SCEV *Step = AR->getStepRecurrence(*SE); 211 212 // For expressions with negative step, the upper bound is ScStart and the 213 // lower bound is ScEnd. 214 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 215 if (CStep->getValue()->isNegative()) 216 std::swap(ScStart, ScEnd); 217 } else { 218 // Fallback case: the step is not constant, but we can still 219 // get the upper and lower bounds of the interval by using min/max 220 // expressions. 221 ScStart = SE->getUMinExpr(ScStart, ScEnd); 222 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 223 } 224 // Add the size of the pointed element to ScEnd. 225 unsigned EltSize = 226 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8; 227 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize); 228 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 229 } 230 231 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 232 } 233 234 SmallVector<RuntimePointerChecking::PointerCheck, 4> 235 RuntimePointerChecking::generateChecks() const { 236 SmallVector<PointerCheck, 4> Checks; 237 238 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 239 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 240 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 241 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 242 243 if (needsChecking(CGI, CGJ)) 244 Checks.push_back(std::make_pair(&CGI, &CGJ)); 245 } 246 } 247 return Checks; 248 } 249 250 void RuntimePointerChecking::generateChecks( 251 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 252 assert(Checks.empty() && "Checks is not empty"); 253 groupChecks(DepCands, UseDependencies); 254 Checks = generateChecks(); 255 } 256 257 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 258 const CheckingPtrGroup &N) const { 259 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 260 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 261 if (needsChecking(M.Members[I], N.Members[J])) 262 return true; 263 return false; 264 } 265 266 /// Compare \p I and \p J and return the minimum. 267 /// Return nullptr in case we couldn't find an answer. 268 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 269 ScalarEvolution *SE) { 270 const SCEV *Diff = SE->getMinusSCEV(J, I); 271 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 272 273 if (!C) 274 return nullptr; 275 if (C->getValue()->isNegative()) 276 return J; 277 return I; 278 } 279 280 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 281 const SCEV *Start = RtCheck.Pointers[Index].Start; 282 const SCEV *End = RtCheck.Pointers[Index].End; 283 284 // Compare the starts and ends with the known minimum and maximum 285 // of this set. We need to know how we compare against the min/max 286 // of the set in order to be able to emit memchecks. 287 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 288 if (!Min0) 289 return false; 290 291 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 292 if (!Min1) 293 return false; 294 295 // Update the low bound expression if we've found a new min value. 296 if (Min0 == Start) 297 Low = Start; 298 299 // Update the high bound expression if we've found a new max value. 300 if (Min1 != End) 301 High = End; 302 303 Members.push_back(Index); 304 return true; 305 } 306 307 void RuntimePointerChecking::groupChecks( 308 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 309 // We build the groups from dependency candidates equivalence classes 310 // because: 311 // - We know that pointers in the same equivalence class share 312 // the same underlying object and therefore there is a chance 313 // that we can compare pointers 314 // - We wouldn't be able to merge two pointers for which we need 315 // to emit a memcheck. The classes in DepCands are already 316 // conveniently built such that no two pointers in the same 317 // class need checking against each other. 318 319 // We use the following (greedy) algorithm to construct the groups 320 // For every pointer in the equivalence class: 321 // For each existing group: 322 // - if the difference between this pointer and the min/max bounds 323 // of the group is a constant, then make the pointer part of the 324 // group and update the min/max bounds of that group as required. 325 326 CheckingGroups.clear(); 327 328 // If we need to check two pointers to the same underlying object 329 // with a non-constant difference, we shouldn't perform any pointer 330 // grouping with those pointers. This is because we can easily get 331 // into cases where the resulting check would return false, even when 332 // the accesses are safe. 333 // 334 // The following example shows this: 335 // for (i = 0; i < 1000; ++i) 336 // a[5000 + i * m] = a[i] + a[i + 9000] 337 // 338 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 339 // (0, 10000) which is always false. However, if m is 1, there is no 340 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 341 // us to perform an accurate check in this case. 342 // 343 // The above case requires that we have an UnknownDependence between 344 // accesses to the same underlying object. This cannot happen unless 345 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 346 // is also false. In this case we will use the fallback path and create 347 // separate checking groups for all pointers. 348 349 // If we don't have the dependency partitions, construct a new 350 // checking pointer group for each pointer. This is also required 351 // for correctness, because in this case we can have checking between 352 // pointers to the same underlying object. 353 if (!UseDependencies) { 354 for (unsigned I = 0; I < Pointers.size(); ++I) 355 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 356 return; 357 } 358 359 unsigned TotalComparisons = 0; 360 361 DenseMap<Value *, unsigned> PositionMap; 362 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 363 PositionMap[Pointers[Index].PointerValue] = Index; 364 365 // We need to keep track of what pointers we've already seen so we 366 // don't process them twice. 367 SmallSet<unsigned, 2> Seen; 368 369 // Go through all equivalence classes, get the "pointer check groups" 370 // and add them to the overall solution. We use the order in which accesses 371 // appear in 'Pointers' to enforce determinism. 372 for (unsigned I = 0; I < Pointers.size(); ++I) { 373 // We've seen this pointer before, and therefore already processed 374 // its equivalence class. 375 if (Seen.count(I)) 376 continue; 377 378 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 379 Pointers[I].IsWritePtr); 380 381 SmallVector<CheckingPtrGroup, 2> Groups; 382 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 383 384 // Because DepCands is constructed by visiting accesses in the order in 385 // which they appear in alias sets (which is deterministic) and the 386 // iteration order within an equivalence class member is only dependent on 387 // the order in which unions and insertions are performed on the 388 // equivalence class, the iteration order is deterministic. 389 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 390 MI != ME; ++MI) { 391 unsigned Pointer = PositionMap[MI->getPointer()]; 392 bool Merged = false; 393 // Mark this pointer as seen. 394 Seen.insert(Pointer); 395 396 // Go through all the existing sets and see if we can find one 397 // which can include this pointer. 398 for (CheckingPtrGroup &Group : Groups) { 399 // Don't perform more than a certain amount of comparisons. 400 // This should limit the cost of grouping the pointers to something 401 // reasonable. If we do end up hitting this threshold, the algorithm 402 // will create separate groups for all remaining pointers. 403 if (TotalComparisons > MemoryCheckMergeThreshold) 404 break; 405 406 TotalComparisons++; 407 408 if (Group.addPointer(Pointer)) { 409 Merged = true; 410 break; 411 } 412 } 413 414 if (!Merged) 415 // We couldn't add this pointer to any existing set or the threshold 416 // for the number of comparisons has been reached. Create a new group 417 // to hold the current pointer. 418 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 419 } 420 421 // We've computed the grouped checks for this partition. 422 // Save the results and continue with the next one. 423 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 424 } 425 } 426 427 bool RuntimePointerChecking::arePointersInSamePartition( 428 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 429 unsigned PtrIdx2) { 430 return (PtrToPartition[PtrIdx1] != -1 && 431 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 432 } 433 434 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 435 const PointerInfo &PointerI = Pointers[I]; 436 const PointerInfo &PointerJ = Pointers[J]; 437 438 // No need to check if two readonly pointers intersect. 439 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 440 return false; 441 442 // Only need to check pointers between two different dependency sets. 443 if (PointerI.DependencySetId == PointerJ.DependencySetId) 444 return false; 445 446 // Only need to check pointers in the same alias set. 447 if (PointerI.AliasSetId != PointerJ.AliasSetId) 448 return false; 449 450 return true; 451 } 452 453 void RuntimePointerChecking::printChecks( 454 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 455 unsigned Depth) const { 456 unsigned N = 0; 457 for (const auto &Check : Checks) { 458 const auto &First = Check.first->Members, &Second = Check.second->Members; 459 460 OS.indent(Depth) << "Check " << N++ << ":\n"; 461 462 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 463 for (unsigned K = 0; K < First.size(); ++K) 464 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 465 466 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 467 for (unsigned K = 0; K < Second.size(); ++K) 468 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 469 } 470 } 471 472 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 473 474 OS.indent(Depth) << "Run-time memory checks:\n"; 475 printChecks(OS, Checks, Depth); 476 477 OS.indent(Depth) << "Grouped accesses:\n"; 478 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 479 const auto &CG = CheckingGroups[I]; 480 481 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 482 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 483 << ")\n"; 484 for (unsigned J = 0; J < CG.Members.size(); ++J) { 485 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 486 << "\n"; 487 } 488 } 489 } 490 491 namespace { 492 493 /// \brief Analyses memory accesses in a loop. 494 /// 495 /// Checks whether run time pointer checks are needed and builds sets for data 496 /// dependence checking. 497 class AccessAnalysis { 498 public: 499 /// \brief Read or write access location. 500 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 501 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 502 503 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 504 MemoryDepChecker::DepCandidates &DA, 505 PredicatedScalarEvolution &PSE) 506 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 507 PSE(PSE) {} 508 509 /// \brief Register a load and whether it is only read from. 510 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 511 Value *Ptr = const_cast<Value*>(Loc.Ptr); 512 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 513 Accesses.insert(MemAccessInfo(Ptr, false)); 514 if (IsReadOnly) 515 ReadOnlyPtr.insert(Ptr); 516 } 517 518 /// \brief Register a store. 519 void addStore(MemoryLocation &Loc) { 520 Value *Ptr = const_cast<Value*>(Loc.Ptr); 521 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 522 Accesses.insert(MemAccessInfo(Ptr, true)); 523 } 524 525 /// \brief Check whether we can check the pointers at runtime for 526 /// non-intersection. 527 /// 528 /// Returns true if we need no check or if we do and we can generate them 529 /// (i.e. the pointers have computable bounds). 530 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 531 Loop *TheLoop, const ValueToValueMap &Strides, 532 bool ShouldCheckWrap = false); 533 534 /// \brief Goes over all memory accesses, checks whether a RT check is needed 535 /// and builds sets of dependent accesses. 536 void buildDependenceSets() { 537 processMemAccesses(); 538 } 539 540 /// \brief Initial processing of memory accesses determined that we need to 541 /// perform dependency checking. 542 /// 543 /// Note that this can later be cleared if we retry memcheck analysis without 544 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 545 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 546 547 /// We decided that no dependence analysis would be used. Reset the state. 548 void resetDepChecks(MemoryDepChecker &DepChecker) { 549 CheckDeps.clear(); 550 DepChecker.clearDependences(); 551 } 552 553 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 554 555 private: 556 typedef SetVector<MemAccessInfo> PtrAccessSet; 557 558 /// \brief Go over all memory access and check whether runtime pointer checks 559 /// are needed and build sets of dependency check candidates. 560 void processMemAccesses(); 561 562 /// Set of all accesses. 563 PtrAccessSet Accesses; 564 565 const DataLayout &DL; 566 567 /// Set of accesses that need a further dependence check. 568 MemAccessInfoSet CheckDeps; 569 570 /// Set of pointers that are read only. 571 SmallPtrSet<Value*, 16> ReadOnlyPtr; 572 573 /// An alias set tracker to partition the access set by underlying object and 574 //intrinsic property (such as TBAA metadata). 575 AliasSetTracker AST; 576 577 LoopInfo *LI; 578 579 /// Sets of potentially dependent accesses - members of one set share an 580 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 581 /// dependence check. 582 MemoryDepChecker::DepCandidates &DepCands; 583 584 /// \brief Initial processing of memory accesses determined that we may need 585 /// to add memchecks. Perform the analysis to determine the necessary checks. 586 /// 587 /// Note that, this is different from isDependencyCheckNeeded. When we retry 588 /// memcheck analysis without dependency checking 589 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 590 /// while this remains set if we have potentially dependent accesses. 591 bool IsRTCheckAnalysisNeeded; 592 593 /// The SCEV predicate containing all the SCEV-related assumptions. 594 PredicatedScalarEvolution &PSE; 595 }; 596 597 } // end anonymous namespace 598 599 /// \brief Check whether a pointer can participate in a runtime bounds check. 600 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 601 const ValueToValueMap &Strides, Value *Ptr, 602 Loop *L) { 603 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 604 605 // The bounds for loop-invariant pointer is trivial. 606 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 607 return true; 608 609 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 610 if (!AR) 611 return false; 612 613 return AR->isAffine(); 614 } 615 616 /// \brief Check whether a pointer address cannot wrap. 617 static bool isNoWrap(PredicatedScalarEvolution &PSE, 618 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 619 const SCEV *PtrScev = PSE.getSCEV(Ptr); 620 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 621 return true; 622 623 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides); 624 return Stride == 1; 625 } 626 627 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 628 ScalarEvolution *SE, Loop *TheLoop, 629 const ValueToValueMap &StridesMap, 630 bool ShouldCheckWrap) { 631 // Find pointers with computable bounds. We are going to use this information 632 // to place a runtime bound check. 633 bool CanDoRT = true; 634 635 bool NeedRTCheck = false; 636 if (!IsRTCheckAnalysisNeeded) return true; 637 638 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 639 640 // We assign a consecutive id to access from different alias sets. 641 // Accesses between different groups doesn't need to be checked. 642 unsigned ASId = 1; 643 for (auto &AS : AST) { 644 int NumReadPtrChecks = 0; 645 int NumWritePtrChecks = 0; 646 647 // We assign consecutive id to access from different dependence sets. 648 // Accesses within the same set don't need a runtime check. 649 unsigned RunningDepId = 1; 650 DenseMap<Value *, unsigned> DepSetId; 651 652 for (auto A : AS) { 653 Value *Ptr = A.getValue(); 654 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 655 MemAccessInfo Access(Ptr, IsWrite); 656 657 if (IsWrite) 658 ++NumWritePtrChecks; 659 else 660 ++NumReadPtrChecks; 661 662 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 663 // When we run after a failing dependency check we have to make sure 664 // we don't have wrapping pointers. 665 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) { 666 // The id of the dependence set. 667 unsigned DepId; 668 669 if (IsDepCheckNeeded) { 670 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 671 unsigned &LeaderId = DepSetId[Leader]; 672 if (!LeaderId) 673 LeaderId = RunningDepId++; 674 DepId = LeaderId; 675 } else 676 // Each access has its own dependence set. 677 DepId = RunningDepId++; 678 679 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 680 681 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 682 } else { 683 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 684 CanDoRT = false; 685 } 686 } 687 688 // If we have at least two writes or one write and a read then we need to 689 // check them. But there is no need to checks if there is only one 690 // dependence set for this alias set. 691 // 692 // Note that this function computes CanDoRT and NeedRTCheck independently. 693 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 694 // for which we couldn't find the bounds but we don't actually need to emit 695 // any checks so it does not matter. 696 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 697 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 698 NumWritePtrChecks >= 1)); 699 700 ++ASId; 701 } 702 703 // If the pointers that we would use for the bounds comparison have different 704 // address spaces, assume the values aren't directly comparable, so we can't 705 // use them for the runtime check. We also have to assume they could 706 // overlap. In the future there should be metadata for whether address spaces 707 // are disjoint. 708 unsigned NumPointers = RtCheck.Pointers.size(); 709 for (unsigned i = 0; i < NumPointers; ++i) { 710 for (unsigned j = i + 1; j < NumPointers; ++j) { 711 // Only need to check pointers between two different dependency sets. 712 if (RtCheck.Pointers[i].DependencySetId == 713 RtCheck.Pointers[j].DependencySetId) 714 continue; 715 // Only need to check pointers in the same alias set. 716 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 717 continue; 718 719 Value *PtrI = RtCheck.Pointers[i].PointerValue; 720 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 721 722 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 723 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 724 if (ASi != ASj) { 725 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 726 " different address spaces\n"); 727 return false; 728 } 729 } 730 } 731 732 if (NeedRTCheck && CanDoRT) 733 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 734 735 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 736 << " pointer comparisons.\n"); 737 738 RtCheck.Need = NeedRTCheck; 739 740 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 741 if (!CanDoRTIfNeeded) 742 RtCheck.reset(); 743 return CanDoRTIfNeeded; 744 } 745 746 void AccessAnalysis::processMemAccesses() { 747 // We process the set twice: first we process read-write pointers, last we 748 // process read-only pointers. This allows us to skip dependence tests for 749 // read-only pointers. 750 751 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 752 DEBUG(dbgs() << " AST: "; AST.dump()); 753 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 754 DEBUG({ 755 for (auto A : Accesses) 756 dbgs() << "\t" << *A.getPointer() << " (" << 757 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 758 "read-only" : "read")) << ")\n"; 759 }); 760 761 // The AliasSetTracker has nicely partitioned our pointers by metadata 762 // compatibility and potential for underlying-object overlap. As a result, we 763 // only need to check for potential pointer dependencies within each alias 764 // set. 765 for (auto &AS : AST) { 766 // Note that both the alias-set tracker and the alias sets themselves used 767 // linked lists internally and so the iteration order here is deterministic 768 // (matching the original instruction order within each set). 769 770 bool SetHasWrite = false; 771 772 // Map of pointers to last access encountered. 773 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 774 UnderlyingObjToAccessMap ObjToLastAccess; 775 776 // Set of access to check after all writes have been processed. 777 PtrAccessSet DeferredAccesses; 778 779 // Iterate over each alias set twice, once to process read/write pointers, 780 // and then to process read-only pointers. 781 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 782 bool UseDeferred = SetIteration > 0; 783 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 784 785 for (auto AV : AS) { 786 Value *Ptr = AV.getValue(); 787 788 // For a single memory access in AliasSetTracker, Accesses may contain 789 // both read and write, and they both need to be handled for CheckDeps. 790 for (auto AC : S) { 791 if (AC.getPointer() != Ptr) 792 continue; 793 794 bool IsWrite = AC.getInt(); 795 796 // If we're using the deferred access set, then it contains only 797 // reads. 798 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 799 if (UseDeferred && !IsReadOnlyPtr) 800 continue; 801 // Otherwise, the pointer must be in the PtrAccessSet, either as a 802 // read or a write. 803 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 804 S.count(MemAccessInfo(Ptr, false))) && 805 "Alias-set pointer not in the access set?"); 806 807 MemAccessInfo Access(Ptr, IsWrite); 808 DepCands.insert(Access); 809 810 // Memorize read-only pointers for later processing and skip them in 811 // the first round (they need to be checked after we have seen all 812 // write pointers). Note: we also mark pointer that are not 813 // consecutive as "read-only" pointers (so that we check 814 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 815 if (!UseDeferred && IsReadOnlyPtr) { 816 DeferredAccesses.insert(Access); 817 continue; 818 } 819 820 // If this is a write - check other reads and writes for conflicts. If 821 // this is a read only check other writes for conflicts (but only if 822 // there is no other write to the ptr - this is an optimization to 823 // catch "a[i] = a[i] + " without having to do a dependence check). 824 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 825 CheckDeps.insert(Access); 826 IsRTCheckAnalysisNeeded = true; 827 } 828 829 if (IsWrite) 830 SetHasWrite = true; 831 832 // Create sets of pointers connected by a shared alias set and 833 // underlying object. 834 typedef SmallVector<Value *, 16> ValueVector; 835 ValueVector TempObjects; 836 837 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 838 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 839 for (Value *UnderlyingObj : TempObjects) { 840 // nullptr never alias, don't join sets for pointer that have "null" 841 // in their UnderlyingObjects list. 842 if (isa<ConstantPointerNull>(UnderlyingObj)) 843 continue; 844 845 UnderlyingObjToAccessMap::iterator Prev = 846 ObjToLastAccess.find(UnderlyingObj); 847 if (Prev != ObjToLastAccess.end()) 848 DepCands.unionSets(Access, Prev->second); 849 850 ObjToLastAccess[UnderlyingObj] = Access; 851 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 852 } 853 } 854 } 855 } 856 } 857 } 858 859 static bool isInBoundsGep(Value *Ptr) { 860 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 861 return GEP->isInBounds(); 862 return false; 863 } 864 865 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 866 /// i.e. monotonically increasing/decreasing. 867 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 868 PredicatedScalarEvolution &PSE, const Loop *L) { 869 // FIXME: This should probably only return true for NUW. 870 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 871 return true; 872 873 // Scalar evolution does not propagate the non-wrapping flags to values that 874 // are derived from a non-wrapping induction variable because non-wrapping 875 // could be flow-sensitive. 876 // 877 // Look through the potentially overflowing instruction to try to prove 878 // non-wrapping for the *specific* value of Ptr. 879 880 // The arithmetic implied by an inbounds GEP can't overflow. 881 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 882 if (!GEP || !GEP->isInBounds()) 883 return false; 884 885 // Make sure there is only one non-const index and analyze that. 886 Value *NonConstIndex = nullptr; 887 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) 888 if (!isa<ConstantInt>(Index)) { 889 if (NonConstIndex) 890 return false; 891 NonConstIndex = Index; 892 } 893 if (!NonConstIndex) 894 // The recurrence is on the pointer, ignore for now. 895 return false; 896 897 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 898 // AddRec using a NSW operation. 899 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 900 if (OBO->hasNoSignedWrap() && 901 // Assume constant for other the operand so that the AddRec can be 902 // easily found. 903 isa<ConstantInt>(OBO->getOperand(1))) { 904 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 905 906 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 907 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 908 } 909 910 return false; 911 } 912 913 /// \brief Check whether the access through \p Ptr has a constant stride. 914 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 915 const Loop *Lp, const ValueToValueMap &StridesMap, 916 bool Assume, bool ShouldCheckWrap) { 917 Type *Ty = Ptr->getType(); 918 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 919 920 // Make sure that the pointer does not point to aggregate types. 921 auto *PtrTy = cast<PointerType>(Ty); 922 if (PtrTy->getElementType()->isAggregateType()) { 923 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr 924 << "\n"); 925 return 0; 926 } 927 928 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 929 930 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 931 if (Assume && !AR) 932 AR = PSE.getAsAddRec(Ptr); 933 934 if (!AR) { 935 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 936 << " SCEV: " << *PtrScev << "\n"); 937 return 0; 938 } 939 940 // The accesss function must stride over the innermost loop. 941 if (Lp != AR->getLoop()) { 942 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 943 *Ptr << " SCEV: " << *AR << "\n"); 944 return 0; 945 } 946 947 // The address calculation must not wrap. Otherwise, a dependence could be 948 // inverted. 949 // An inbounds getelementptr that is a AddRec with a unit stride 950 // cannot wrap per definition. The unit stride requirement is checked later. 951 // An getelementptr without an inbounds attribute and unit stride would have 952 // to access the pointer value "0" which is undefined behavior in address 953 // space 0, therefore we can also vectorize this case. 954 bool IsInBoundsGEP = isInBoundsGep(Ptr); 955 bool IsNoWrapAddRec = !ShouldCheckWrap || 956 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 957 isNoWrapAddRec(Ptr, AR, PSE, Lp); 958 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 959 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 960 if (Assume) { 961 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 962 IsNoWrapAddRec = true; 963 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 964 << "LAA: Pointer: " << *Ptr << "\n" 965 << "LAA: SCEV: " << *AR << "\n" 966 << "LAA: Added an overflow assumption\n"); 967 } else { 968 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 969 << *Ptr << " SCEV: " << *AR << "\n"); 970 return 0; 971 } 972 } 973 974 // Check the step is constant. 975 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 976 977 // Calculate the pointer stride and check if it is constant. 978 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 979 if (!C) { 980 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 981 " SCEV: " << *AR << "\n"); 982 return 0; 983 } 984 985 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 986 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 987 const APInt &APStepVal = C->getAPInt(); 988 989 // Huge step value - give up. 990 if (APStepVal.getBitWidth() > 64) 991 return 0; 992 993 int64_t StepVal = APStepVal.getSExtValue(); 994 995 // Strided access. 996 int64_t Stride = StepVal / Size; 997 int64_t Rem = StepVal % Size; 998 if (Rem) 999 return 0; 1000 1001 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1002 // know we can't "wrap around the address space". In case of address space 1003 // zero we know that this won't happen without triggering undefined behavior. 1004 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 1005 Stride != 1 && Stride != -1) { 1006 if (Assume) { 1007 // We can avoid this case by adding a run-time check. 1008 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1009 << "inbouds or in address space 0 may wrap:\n" 1010 << "LAA: Pointer: " << *Ptr << "\n" 1011 << "LAA: SCEV: " << *AR << "\n" 1012 << "LAA: Added an overflow assumption\n"); 1013 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1014 } else 1015 return 0; 1016 } 1017 1018 return Stride; 1019 } 1020 1021 /// Take the pointer operand from the Load/Store instruction. 1022 /// Returns NULL if this is not a valid Load/Store instruction. 1023 static Value *getPointerOperand(Value *I) { 1024 if (auto *LI = dyn_cast<LoadInst>(I)) 1025 return LI->getPointerOperand(); 1026 if (auto *SI = dyn_cast<StoreInst>(I)) 1027 return SI->getPointerOperand(); 1028 return nullptr; 1029 } 1030 1031 /// Take the address space operand from the Load/Store instruction. 1032 /// Returns -1 if this is not a valid Load/Store instruction. 1033 static unsigned getAddressSpaceOperand(Value *I) { 1034 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1035 return L->getPointerAddressSpace(); 1036 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1037 return S->getPointerAddressSpace(); 1038 return -1; 1039 } 1040 1041 bool llvm::sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL, 1042 ScalarEvolution &SE, 1043 SmallVectorImpl<Value *> &Sorted) { 1044 SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs; 1045 OffValPairs.reserve(VL.size()); 1046 Sorted.reserve(VL.size()); 1047 1048 // Walk over the pointers, and map each of them to an offset relative to 1049 // first pointer in the array. 1050 Value *Ptr0 = getPointerOperand(VL[0]); 1051 const SCEV *Scev0 = SE.getSCEV(Ptr0); 1052 Value *Obj0 = GetUnderlyingObject(Ptr0, DL); 1053 1054 for (auto *Val : VL) { 1055 // The only kind of access we care about here is load. 1056 if (!isa<LoadInst>(Val)) 1057 return false; 1058 1059 Value *Ptr = getPointerOperand(Val); 1060 assert(Ptr && "Expected value to have a pointer operand."); 1061 1062 // If a pointer refers to a different underlying object, bail - the 1063 // pointers are by definition incomparable. 1064 Value *CurrObj = GetUnderlyingObject(Ptr, DL); 1065 if (CurrObj != Obj0) 1066 return false; 1067 1068 const SCEVConstant *Diff = 1069 dyn_cast<SCEVConstant>(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0)); 1070 1071 // The pointers may not have a constant offset from each other, or SCEV 1072 // may just not be smart enough to figure out they do. Regardless, 1073 // there's nothing we can do. 1074 if (!Diff) 1075 return false; 1076 1077 OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val); 1078 } 1079 1080 std::sort(OffValPairs.begin(), OffValPairs.end(), 1081 [](const std::pair<int64_t, Value *> &Left, 1082 const std::pair<int64_t, Value *> &Right) { 1083 return Left.first < Right.first; 1084 }); 1085 1086 for (auto &it : OffValPairs) 1087 Sorted.push_back(it.second); 1088 1089 return true; 1090 } 1091 1092 /// Returns true if the memory operations \p A and \p B are consecutive. 1093 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1094 ScalarEvolution &SE, bool CheckType) { 1095 Value *PtrA = getPointerOperand(A); 1096 Value *PtrB = getPointerOperand(B); 1097 unsigned ASA = getAddressSpaceOperand(A); 1098 unsigned ASB = getAddressSpaceOperand(B); 1099 1100 // Check that the address spaces match and that the pointers are valid. 1101 if (!PtrA || !PtrB || (ASA != ASB)) 1102 return false; 1103 1104 // Make sure that A and B are different pointers. 1105 if (PtrA == PtrB) 1106 return false; 1107 1108 // Make sure that A and B have the same type if required. 1109 if (CheckType && PtrA->getType() != PtrB->getType()) 1110 return false; 1111 1112 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1113 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1114 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1115 1116 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1117 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1118 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1119 1120 // OffsetDelta = OffsetB - OffsetA; 1121 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1122 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1123 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1124 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1125 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1126 // Check if they are based on the same pointer. That makes the offsets 1127 // sufficient. 1128 if (PtrA == PtrB) 1129 return OffsetDelta == Size; 1130 1131 // Compute the necessary base pointer delta to have the necessary final delta 1132 // equal to the size. 1133 // BaseDelta = Size - OffsetDelta; 1134 const SCEV *SizeSCEV = SE.getConstant(Size); 1135 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1136 1137 // Otherwise compute the distance with SCEV between the base pointers. 1138 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1139 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1140 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1141 return X == PtrSCEVB; 1142 } 1143 1144 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1145 switch (Type) { 1146 case NoDep: 1147 case Forward: 1148 case BackwardVectorizable: 1149 return true; 1150 1151 case Unknown: 1152 case ForwardButPreventsForwarding: 1153 case Backward: 1154 case BackwardVectorizableButPreventsForwarding: 1155 return false; 1156 } 1157 llvm_unreachable("unexpected DepType!"); 1158 } 1159 1160 bool MemoryDepChecker::Dependence::isBackward() const { 1161 switch (Type) { 1162 case NoDep: 1163 case Forward: 1164 case ForwardButPreventsForwarding: 1165 case Unknown: 1166 return false; 1167 1168 case BackwardVectorizable: 1169 case Backward: 1170 case BackwardVectorizableButPreventsForwarding: 1171 return true; 1172 } 1173 llvm_unreachable("unexpected DepType!"); 1174 } 1175 1176 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1177 return isBackward() || Type == Unknown; 1178 } 1179 1180 bool MemoryDepChecker::Dependence::isForward() const { 1181 switch (Type) { 1182 case Forward: 1183 case ForwardButPreventsForwarding: 1184 return true; 1185 1186 case NoDep: 1187 case Unknown: 1188 case BackwardVectorizable: 1189 case Backward: 1190 case BackwardVectorizableButPreventsForwarding: 1191 return false; 1192 } 1193 llvm_unreachable("unexpected DepType!"); 1194 } 1195 1196 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1197 uint64_t TypeByteSize) { 1198 // If loads occur at a distance that is not a multiple of a feasible vector 1199 // factor store-load forwarding does not take place. 1200 // Positive dependences might cause troubles because vectorizing them might 1201 // prevent store-load forwarding making vectorized code run a lot slower. 1202 // a[i] = a[i-3] ^ a[i-8]; 1203 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1204 // hence on your typical architecture store-load forwarding does not take 1205 // place. Vectorizing in such cases does not make sense. 1206 // Store-load forwarding distance. 1207 1208 // After this many iterations store-to-load forwarding conflicts should not 1209 // cause any slowdowns. 1210 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1211 // Maximum vector factor. 1212 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1213 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1214 1215 // Compute the smallest VF at which the store and load would be misaligned. 1216 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1217 VF *= 2) { 1218 // If the number of vector iteration between the store and the load are 1219 // small we could incur conflicts. 1220 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1221 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1222 break; 1223 } 1224 } 1225 1226 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1227 DEBUG(dbgs() << "LAA: Distance " << Distance 1228 << " that could cause a store-load forwarding conflict\n"); 1229 return true; 1230 } 1231 1232 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1233 MaxVFWithoutSLForwardIssues != 1234 VectorizerParams::MaxVectorWidth * TypeByteSize) 1235 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1236 return false; 1237 } 1238 1239 /// Given a non-constant (unknown) dependence-distance \p Dist between two 1240 /// memory accesses, that have the same stride whose absolute value is given 1241 /// in \p Stride, and that have the same type size \p TypeByteSize, 1242 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is 1243 /// possible to prove statically that the dependence distance is larger 1244 /// than the range that the accesses will travel through the execution of 1245 /// the loop. If so, return true; false otherwise. This is useful for 1246 /// example in loops such as the following (PR31098): 1247 /// for (i = 0; i < D; ++i) { 1248 /// = out[i]; 1249 /// out[i+D] = 1250 /// } 1251 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, 1252 const SCEV &BackedgeTakenCount, 1253 const SCEV &Dist, uint64_t Stride, 1254 uint64_t TypeByteSize) { 1255 1256 // If we can prove that 1257 // (**) |Dist| > BackedgeTakenCount * Step 1258 // where Step is the absolute stride of the memory accesses in bytes, 1259 // then there is no dependence. 1260 // 1261 // Ratioanle: 1262 // We basically want to check if the absolute distance (|Dist/Step|) 1263 // is >= the loop iteration count (or > BackedgeTakenCount). 1264 // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 1265 // Section 4.2.1); Note, that for vectorization it is sufficient to prove 1266 // that the dependence distance is >= VF; This is checked elsewhere. 1267 // But in some cases we can prune unknown dependence distances early, and 1268 // even before selecting the VF, and without a runtime test, by comparing 1269 // the distance against the loop iteration count. Since the vectorized code 1270 // will be executed only if LoopCount >= VF, proving distance >= LoopCount 1271 // also guarantees that distance >= VF. 1272 // 1273 const uint64_t ByteStride = Stride * TypeByteSize; 1274 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); 1275 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); 1276 1277 const SCEV *CastedDist = &Dist; 1278 const SCEV *CastedProduct = Product; 1279 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); 1280 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); 1281 1282 // The dependence distance can be positive/negative, so we sign extend Dist; 1283 // The multiplication of the absolute stride in bytes and the 1284 // backdgeTakenCount is non-negative, so we zero extend Product. 1285 if (DistTypeSize > ProductTypeSize) 1286 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); 1287 else 1288 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); 1289 1290 // Is Dist - (BackedgeTakenCount * Step) > 0 ? 1291 // (If so, then we have proven (**) because |Dist| >= Dist) 1292 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); 1293 if (SE.isKnownPositive(Minus)) 1294 return true; 1295 1296 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ? 1297 // (If so, then we have proven (**) because |Dist| >= -1*Dist) 1298 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); 1299 Minus = SE.getMinusSCEV(NegDist, CastedProduct); 1300 if (SE.isKnownPositive(Minus)) 1301 return true; 1302 1303 return false; 1304 } 1305 1306 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1307 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1308 /// bytes. 1309 /// 1310 /// \returns true if they are independent. 1311 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1312 uint64_t TypeByteSize) { 1313 assert(Stride > 1 && "The stride must be greater than 1"); 1314 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1315 assert(Distance > 0 && "The distance must be non-zero"); 1316 1317 // Skip if the distance is not multiple of type byte size. 1318 if (Distance % TypeByteSize) 1319 return false; 1320 1321 uint64_t ScaledDist = Distance / TypeByteSize; 1322 1323 // No dependence if the scaled distance is not multiple of the stride. 1324 // E.g. 1325 // for (i = 0; i < 1024 ; i += 4) 1326 // A[i+2] = A[i] + 1; 1327 // 1328 // Two accesses in memory (scaled distance is 2, stride is 4): 1329 // | A[0] | | | | A[4] | | | | 1330 // | | | A[2] | | | | A[6] | | 1331 // 1332 // E.g. 1333 // for (i = 0; i < 1024 ; i += 3) 1334 // A[i+4] = A[i] + 1; 1335 // 1336 // Two accesses in memory (scaled distance is 4, stride is 3): 1337 // | A[0] | | | A[3] | | | A[6] | | | 1338 // | | | | | A[4] | | | A[7] | | 1339 return ScaledDist % Stride; 1340 } 1341 1342 MemoryDepChecker::Dependence::DepType 1343 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1344 const MemAccessInfo &B, unsigned BIdx, 1345 const ValueToValueMap &Strides) { 1346 assert (AIdx < BIdx && "Must pass arguments in program order"); 1347 1348 Value *APtr = A.getPointer(); 1349 Value *BPtr = B.getPointer(); 1350 bool AIsWrite = A.getInt(); 1351 bool BIsWrite = B.getInt(); 1352 1353 // Two reads are independent. 1354 if (!AIsWrite && !BIsWrite) 1355 return Dependence::NoDep; 1356 1357 // We cannot check pointers in different address spaces. 1358 if (APtr->getType()->getPointerAddressSpace() != 1359 BPtr->getType()->getPointerAddressSpace()) 1360 return Dependence::Unknown; 1361 1362 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1363 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1364 1365 const SCEV *Src = PSE.getSCEV(APtr); 1366 const SCEV *Sink = PSE.getSCEV(BPtr); 1367 1368 // If the induction step is negative we have to invert source and sink of the 1369 // dependence. 1370 if (StrideAPtr < 0) { 1371 std::swap(APtr, BPtr); 1372 std::swap(Src, Sink); 1373 std::swap(AIsWrite, BIsWrite); 1374 std::swap(AIdx, BIdx); 1375 std::swap(StrideAPtr, StrideBPtr); 1376 } 1377 1378 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1379 1380 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1381 << "(Induction step: " << StrideAPtr << ")\n"); 1382 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1383 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1384 1385 // Need accesses with constant stride. We don't want to vectorize 1386 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1387 // the address space. 1388 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1389 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1390 return Dependence::Unknown; 1391 } 1392 1393 Type *ATy = APtr->getType()->getPointerElementType(); 1394 Type *BTy = BPtr->getType()->getPointerElementType(); 1395 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1396 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1397 uint64_t Stride = std::abs(StrideAPtr); 1398 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1399 if (!C) { 1400 if (TypeByteSize == DL.getTypeAllocSize(BTy) && 1401 isSafeDependenceDistance(DL, *(PSE.getSE()), 1402 *(PSE.getBackedgeTakenCount()), *Dist, Stride, 1403 TypeByteSize)) 1404 return Dependence::NoDep; 1405 1406 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1407 ShouldRetryWithRuntimeCheck = true; 1408 return Dependence::Unknown; 1409 } 1410 1411 const APInt &Val = C->getAPInt(); 1412 int64_t Distance = Val.getSExtValue(); 1413 1414 // Attempt to prove strided accesses independent. 1415 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1416 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1417 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1418 return Dependence::NoDep; 1419 } 1420 1421 // Negative distances are not plausible dependencies. 1422 if (Val.isNegative()) { 1423 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1424 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1425 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1426 ATy != BTy)) { 1427 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1428 return Dependence::ForwardButPreventsForwarding; 1429 } 1430 1431 DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1432 return Dependence::Forward; 1433 } 1434 1435 // Write to the same location with the same size. 1436 // Could be improved to assert type sizes are the same (i32 == float, etc). 1437 if (Val == 0) { 1438 if (ATy == BTy) 1439 return Dependence::Forward; 1440 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1441 return Dependence::Unknown; 1442 } 1443 1444 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1445 1446 if (ATy != BTy) { 1447 DEBUG(dbgs() << 1448 "LAA: ReadWrite-Write positive dependency with different types\n"); 1449 return Dependence::Unknown; 1450 } 1451 1452 // Bail out early if passed-in parameters make vectorization not feasible. 1453 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1454 VectorizerParams::VectorizationFactor : 1); 1455 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1456 VectorizerParams::VectorizationInterleave : 1); 1457 // The minimum number of iterations for a vectorized/unrolled version. 1458 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1459 1460 // It's not vectorizable if the distance is smaller than the minimum distance 1461 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1462 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1463 // TypeByteSize (No need to plus the last gap distance). 1464 // 1465 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1466 // foo(int *A) { 1467 // int *B = (int *)((char *)A + 14); 1468 // for (i = 0 ; i < 1024 ; i += 2) 1469 // B[i] = A[i] + 1; 1470 // } 1471 // 1472 // Two accesses in memory (stride is 2): 1473 // | A[0] | | A[2] | | A[4] | | A[6] | | 1474 // | B[0] | | B[2] | | B[4] | 1475 // 1476 // Distance needs for vectorizing iterations except the last iteration: 1477 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1478 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1479 // 1480 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1481 // 12, which is less than distance. 1482 // 1483 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1484 // the minimum distance needed is 28, which is greater than distance. It is 1485 // not safe to do vectorization. 1486 uint64_t MinDistanceNeeded = 1487 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1488 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1489 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1490 << '\n'); 1491 return Dependence::Backward; 1492 } 1493 1494 // Unsafe if the minimum distance needed is greater than max safe distance. 1495 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1496 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1497 << MinDistanceNeeded << " size in bytes"); 1498 return Dependence::Backward; 1499 } 1500 1501 // Positive distance bigger than max vectorization factor. 1502 // FIXME: Should use max factor instead of max distance in bytes, which could 1503 // not handle different types. 1504 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1505 // void foo (int *A, char *B) { 1506 // for (unsigned i = 0; i < 1024; i++) { 1507 // A[i+2] = A[i] + 1; 1508 // B[i+2] = B[i] + 1; 1509 // } 1510 // } 1511 // 1512 // This case is currently unsafe according to the max safe distance. If we 1513 // analyze the two accesses on array B, the max safe dependence distance 1514 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1515 // is 8, which is less than 2 and forbidden vectorization, But actually 1516 // both A and B could be vectorized by 2 iterations. 1517 MaxSafeDepDistBytes = 1518 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1519 1520 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1521 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1522 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1523 return Dependence::BackwardVectorizableButPreventsForwarding; 1524 1525 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1526 << " with max VF = " 1527 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1528 1529 return Dependence::BackwardVectorizable; 1530 } 1531 1532 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1533 MemAccessInfoSet &CheckDeps, 1534 const ValueToValueMap &Strides) { 1535 1536 MaxSafeDepDistBytes = -1; 1537 while (!CheckDeps.empty()) { 1538 MemAccessInfo CurAccess = *CheckDeps.begin(); 1539 1540 // Get the relevant memory access set. 1541 EquivalenceClasses<MemAccessInfo>::iterator I = 1542 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1543 1544 // Check accesses within this set. 1545 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1546 AccessSets.member_begin(I); 1547 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1548 AccessSets.member_end(); 1549 1550 // Check every access pair. 1551 while (AI != AE) { 1552 CheckDeps.erase(*AI); 1553 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1554 while (OI != AE) { 1555 // Check every accessing instruction pair in program order. 1556 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1557 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1558 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1559 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1560 auto A = std::make_pair(&*AI, *I1); 1561 auto B = std::make_pair(&*OI, *I2); 1562 1563 assert(*I1 != *I2); 1564 if (*I1 > *I2) 1565 std::swap(A, B); 1566 1567 Dependence::DepType Type = 1568 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1569 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1570 1571 // Gather dependences unless we accumulated MaxDependences 1572 // dependences. In that case return as soon as we find the first 1573 // unsafe dependence. This puts a limit on this quadratic 1574 // algorithm. 1575 if (RecordDependences) { 1576 if (Type != Dependence::NoDep) 1577 Dependences.push_back(Dependence(A.second, B.second, Type)); 1578 1579 if (Dependences.size() >= MaxDependences) { 1580 RecordDependences = false; 1581 Dependences.clear(); 1582 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1583 } 1584 } 1585 if (!RecordDependences && !SafeForVectorization) 1586 return false; 1587 } 1588 ++OI; 1589 } 1590 AI++; 1591 } 1592 } 1593 1594 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1595 return SafeForVectorization; 1596 } 1597 1598 SmallVector<Instruction *, 4> 1599 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1600 MemAccessInfo Access(Ptr, isWrite); 1601 auto &IndexVector = Accesses.find(Access)->second; 1602 1603 SmallVector<Instruction *, 4> Insts; 1604 transform(IndexVector, 1605 std::back_inserter(Insts), 1606 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1607 return Insts; 1608 } 1609 1610 const char *MemoryDepChecker::Dependence::DepName[] = { 1611 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1612 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1613 1614 void MemoryDepChecker::Dependence::print( 1615 raw_ostream &OS, unsigned Depth, 1616 const SmallVectorImpl<Instruction *> &Instrs) const { 1617 OS.indent(Depth) << DepName[Type] << ":\n"; 1618 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1619 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1620 } 1621 1622 bool LoopAccessInfo::canAnalyzeLoop() { 1623 // We need to have a loop header. 1624 DEBUG(dbgs() << "LAA: Found a loop in " 1625 << TheLoop->getHeader()->getParent()->getName() << ": " 1626 << TheLoop->getHeader()->getName() << '\n'); 1627 1628 // We can only analyze innermost loops. 1629 if (!TheLoop->empty()) { 1630 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1631 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1632 return false; 1633 } 1634 1635 // We must have a single backedge. 1636 if (TheLoop->getNumBackEdges() != 1) { 1637 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1638 recordAnalysis("CFGNotUnderstood") 1639 << "loop control flow is not understood by analyzer"; 1640 return false; 1641 } 1642 1643 // We must have a single exiting block. 1644 if (!TheLoop->getExitingBlock()) { 1645 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1646 recordAnalysis("CFGNotUnderstood") 1647 << "loop control flow is not understood by analyzer"; 1648 return false; 1649 } 1650 1651 // We only handle bottom-tested loops, i.e. loop in which the condition is 1652 // checked at the end of each iteration. With that we can assume that all 1653 // instructions in the loop are executed the same number of times. 1654 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1655 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1656 recordAnalysis("CFGNotUnderstood") 1657 << "loop control flow is not understood by analyzer"; 1658 return false; 1659 } 1660 1661 // ScalarEvolution needs to be able to find the exit count. 1662 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1663 if (ExitCount == PSE->getSE()->getCouldNotCompute()) { 1664 recordAnalysis("CantComputeNumberOfIterations") 1665 << "could not determine number of loop iterations"; 1666 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1667 return false; 1668 } 1669 1670 return true; 1671 } 1672 1673 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI, 1674 const TargetLibraryInfo *TLI, 1675 DominatorTree *DT) { 1676 typedef SmallPtrSet<Value*, 16> ValueSet; 1677 1678 // Holds the Load and Store instructions. 1679 SmallVector<LoadInst *, 16> Loads; 1680 SmallVector<StoreInst *, 16> Stores; 1681 1682 // Holds all the different accesses in the loop. 1683 unsigned NumReads = 0; 1684 unsigned NumReadWrites = 0; 1685 1686 PtrRtChecking->Pointers.clear(); 1687 PtrRtChecking->Need = false; 1688 1689 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1690 1691 // For each block. 1692 for (BasicBlock *BB : TheLoop->blocks()) { 1693 // Scan the BB and collect legal loads and stores. 1694 for (Instruction &I : *BB) { 1695 // If this is a load, save it. If this instruction can read from memory 1696 // but is not a load, then we quit. Notice that we don't handle function 1697 // calls that read or write. 1698 if (I.mayReadFromMemory()) { 1699 // Many math library functions read the rounding mode. We will only 1700 // vectorize a loop if it contains known function calls that don't set 1701 // the flag. Therefore, it is safe to ignore this read from memory. 1702 auto *Call = dyn_cast<CallInst>(&I); 1703 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1704 continue; 1705 1706 // If the function has an explicit vectorized counterpart, we can safely 1707 // assume that it can be vectorized. 1708 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1709 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1710 continue; 1711 1712 auto *Ld = dyn_cast<LoadInst>(&I); 1713 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1714 recordAnalysis("NonSimpleLoad", Ld) 1715 << "read with atomic ordering or volatile read"; 1716 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1717 CanVecMem = false; 1718 return; 1719 } 1720 NumLoads++; 1721 Loads.push_back(Ld); 1722 DepChecker->addAccess(Ld); 1723 if (EnableMemAccessVersioning) 1724 collectStridedAccess(Ld); 1725 continue; 1726 } 1727 1728 // Save 'store' instructions. Abort if other instructions write to memory. 1729 if (I.mayWriteToMemory()) { 1730 auto *St = dyn_cast<StoreInst>(&I); 1731 if (!St) { 1732 recordAnalysis("CantVectorizeInstruction", St) 1733 << "instruction cannot be vectorized"; 1734 CanVecMem = false; 1735 return; 1736 } 1737 if (!St->isSimple() && !IsAnnotatedParallel) { 1738 recordAnalysis("NonSimpleStore", St) 1739 << "write with atomic ordering or volatile write"; 1740 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1741 CanVecMem = false; 1742 return; 1743 } 1744 NumStores++; 1745 Stores.push_back(St); 1746 DepChecker->addAccess(St); 1747 if (EnableMemAccessVersioning) 1748 collectStridedAccess(St); 1749 } 1750 } // Next instr. 1751 } // Next block. 1752 1753 // Now we have two lists that hold the loads and the stores. 1754 // Next, we find the pointers that they use. 1755 1756 // Check if we see any stores. If there are no stores, then we don't 1757 // care if the pointers are *restrict*. 1758 if (!Stores.size()) { 1759 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1760 CanVecMem = true; 1761 return; 1762 } 1763 1764 MemoryDepChecker::DepCandidates DependentAccesses; 1765 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1766 AA, LI, DependentAccesses, *PSE); 1767 1768 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1769 // multiple times on the same object. If the ptr is accessed twice, once 1770 // for read and once for write, it will only appear once (on the write 1771 // list). This is okay, since we are going to check for conflicts between 1772 // writes and between reads and writes, but not between reads and reads. 1773 ValueSet Seen; 1774 1775 for (StoreInst *ST : Stores) { 1776 Value *Ptr = ST->getPointerOperand(); 1777 // Check for store to loop invariant address. 1778 StoreToLoopInvariantAddress |= isUniform(Ptr); 1779 // If we did *not* see this pointer before, insert it to the read-write 1780 // list. At this phase it is only a 'write' list. 1781 if (Seen.insert(Ptr).second) { 1782 ++NumReadWrites; 1783 1784 MemoryLocation Loc = MemoryLocation::get(ST); 1785 // The TBAA metadata could have a control dependency on the predication 1786 // condition, so we cannot rely on it when determining whether or not we 1787 // need runtime pointer checks. 1788 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1789 Loc.AATags.TBAA = nullptr; 1790 1791 Accesses.addStore(Loc); 1792 } 1793 } 1794 1795 if (IsAnnotatedParallel) { 1796 DEBUG(dbgs() 1797 << "LAA: A loop annotated parallel, ignore memory dependency " 1798 << "checks.\n"); 1799 CanVecMem = true; 1800 return; 1801 } 1802 1803 for (LoadInst *LD : Loads) { 1804 Value *Ptr = LD->getPointerOperand(); 1805 // If we did *not* see this pointer before, insert it to the 1806 // read list. If we *did* see it before, then it is already in 1807 // the read-write list. This allows us to vectorize expressions 1808 // such as A[i] += x; Because the address of A[i] is a read-write 1809 // pointer. This only works if the index of A[i] is consecutive. 1810 // If the address of i is unknown (for example A[B[i]]) then we may 1811 // read a few words, modify, and write a few words, and some of the 1812 // words may be written to the same address. 1813 bool IsReadOnlyPtr = false; 1814 if (Seen.insert(Ptr).second || 1815 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) { 1816 ++NumReads; 1817 IsReadOnlyPtr = true; 1818 } 1819 1820 MemoryLocation Loc = MemoryLocation::get(LD); 1821 // The TBAA metadata could have a control dependency on the predication 1822 // condition, so we cannot rely on it when determining whether or not we 1823 // need runtime pointer checks. 1824 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1825 Loc.AATags.TBAA = nullptr; 1826 1827 Accesses.addLoad(Loc, IsReadOnlyPtr); 1828 } 1829 1830 // If we write (or read-write) to a single destination and there are no 1831 // other reads in this loop then is it safe to vectorize. 1832 if (NumReadWrites == 1 && NumReads == 0) { 1833 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1834 CanVecMem = true; 1835 return; 1836 } 1837 1838 // Build dependence sets and check whether we need a runtime pointer bounds 1839 // check. 1840 Accesses.buildDependenceSets(); 1841 1842 // Find pointers with computable bounds. We are going to use this information 1843 // to place a runtime bound check. 1844 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 1845 TheLoop, SymbolicStrides); 1846 if (!CanDoRTIfNeeded) { 1847 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds"; 1848 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1849 << "the array bounds.\n"); 1850 CanVecMem = false; 1851 return; 1852 } 1853 1854 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1855 1856 CanVecMem = true; 1857 if (Accesses.isDependencyCheckNeeded()) { 1858 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1859 CanVecMem = DepChecker->areDepsSafe( 1860 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 1861 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 1862 1863 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 1864 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1865 1866 // Clear the dependency checks. We assume they are not needed. 1867 Accesses.resetDepChecks(*DepChecker); 1868 1869 PtrRtChecking->reset(); 1870 PtrRtChecking->Need = true; 1871 1872 auto *SE = PSE->getSE(); 1873 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 1874 SymbolicStrides, true); 1875 1876 // Check that we found the bounds for the pointer. 1877 if (!CanDoRTIfNeeded) { 1878 recordAnalysis("CantCheckMemDepsAtRunTime") 1879 << "cannot check memory dependencies at runtime"; 1880 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1881 CanVecMem = false; 1882 return; 1883 } 1884 1885 CanVecMem = true; 1886 } 1887 } 1888 1889 if (CanVecMem) 1890 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1891 << (PtrRtChecking->Need ? "" : " don't") 1892 << " need runtime memory checks.\n"); 1893 else { 1894 recordAnalysis("UnsafeMemDep") 1895 << "unsafe dependent memory operations in loop. Use " 1896 "#pragma loop distribute(enable) to allow loop distribution " 1897 "to attempt to isolate the offending operations into a separate " 1898 "loop"; 1899 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1900 } 1901 } 1902 1903 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1904 DominatorTree *DT) { 1905 assert(TheLoop->contains(BB) && "Unknown block used"); 1906 1907 // Blocks that do not dominate the latch need predication. 1908 BasicBlock* Latch = TheLoop->getLoopLatch(); 1909 return !DT->dominates(BB, Latch); 1910 } 1911 1912 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 1913 Instruction *I) { 1914 assert(!Report && "Multiple reports generated"); 1915 1916 Value *CodeRegion = TheLoop->getHeader(); 1917 DebugLoc DL = TheLoop->getStartLoc(); 1918 1919 if (I) { 1920 CodeRegion = I->getParent(); 1921 // If there is no debug location attached to the instruction, revert back to 1922 // using the loop's. 1923 if (I->getDebugLoc()) 1924 DL = I->getDebugLoc(); 1925 } 1926 1927 Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 1928 CodeRegion); 1929 return *Report; 1930 } 1931 1932 bool LoopAccessInfo::isUniform(Value *V) const { 1933 auto *SE = PSE->getSE(); 1934 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 1935 // never considered uniform. 1936 // TODO: Is this really what we want? Even without FP SCEV, we may want some 1937 // trivially loop-invariant FP values to be considered uniform. 1938 if (!SE->isSCEVable(V->getType())) 1939 return false; 1940 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 1941 } 1942 1943 // FIXME: this function is currently a duplicate of the one in 1944 // LoopVectorize.cpp. 1945 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1946 Instruction *Loc) { 1947 if (FirstInst) 1948 return FirstInst; 1949 if (Instruction *I = dyn_cast<Instruction>(V)) 1950 return I->getParent() == Loc->getParent() ? I : nullptr; 1951 return nullptr; 1952 } 1953 1954 namespace { 1955 1956 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1957 /// need to use value-handles because SCEV expansion can invalidate previously 1958 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1959 /// a previous one. 1960 struct PointerBounds { 1961 TrackingVH<Value> Start; 1962 TrackingVH<Value> End; 1963 }; 1964 1965 } // end anonymous namespace 1966 1967 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1968 /// in \p TheLoop. \return the values for the bounds. 1969 static PointerBounds 1970 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1971 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1972 const RuntimePointerChecking &PtrRtChecking) { 1973 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1974 const SCEV *Sc = SE->getSCEV(Ptr); 1975 1976 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1977 LLVMContext &Ctx = Loc->getContext(); 1978 1979 // Use this type for pointer arithmetic. 1980 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1981 1982 if (SE->isLoopInvariant(Sc, TheLoop)) { 1983 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1984 << "\n"); 1985 // Ptr could be in the loop body. If so, expand a new one at the correct 1986 // location. 1987 Instruction *Inst = dyn_cast<Instruction>(Ptr); 1988 Value *NewPtr = (Inst && TheLoop->contains(Inst)) 1989 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc) 1990 : Ptr; 1991 return {NewPtr, NewPtr}; 1992 } else { 1993 Value *Start = nullptr, *End = nullptr; 1994 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1995 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1996 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1997 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1998 return {Start, End}; 1999 } 2000 } 2001 2002 /// \brief Turns a collection of checks into a collection of expanded upper and 2003 /// lower bounds for both pointers in the check. 2004 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 2005 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 2006 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 2007 const RuntimePointerChecking &PtrRtChecking) { 2008 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 2009 2010 // Here we're relying on the SCEV Expander's cache to only emit code for the 2011 // same bounds once. 2012 transform( 2013 PointerChecks, std::back_inserter(ChecksWithBounds), 2014 [&](const RuntimePointerChecking::PointerCheck &Check) { 2015 PointerBounds 2016 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 2017 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 2018 return std::make_pair(First, Second); 2019 }); 2020 2021 return ChecksWithBounds; 2022 } 2023 2024 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 2025 Instruction *Loc, 2026 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 2027 const { 2028 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2029 auto *SE = PSE->getSE(); 2030 SCEVExpander Exp(*SE, DL, "induction"); 2031 auto ExpandedChecks = 2032 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking); 2033 2034 LLVMContext &Ctx = Loc->getContext(); 2035 Instruction *FirstInst = nullptr; 2036 IRBuilder<> ChkBuilder(Loc); 2037 // Our instructions might fold to a constant. 2038 Value *MemoryRuntimeCheck = nullptr; 2039 2040 for (const auto &Check : ExpandedChecks) { 2041 const PointerBounds &A = Check.first, &B = Check.second; 2042 // Check if two pointers (A and B) conflict where conflict is computed as: 2043 // start(A) <= end(B) && start(B) <= end(A) 2044 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 2045 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 2046 2047 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 2048 (AS1 == A.End->getType()->getPointerAddressSpace()) && 2049 "Trying to bounds check pointers with different address spaces"); 2050 2051 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 2052 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 2053 2054 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 2055 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 2056 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 2057 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 2058 2059 // [A|B].Start points to the first accessed byte under base [A|B]. 2060 // [A|B].End points to the last accessed byte, plus one. 2061 // There is no conflict when the intervals are disjoint: 2062 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End) 2063 // 2064 // bound0 = (B.Start < A.End) 2065 // bound1 = (A.Start < B.End) 2066 // IsConflict = bound0 & bound1 2067 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0"); 2068 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 2069 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1"); 2070 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 2071 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 2072 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2073 if (MemoryRuntimeCheck) { 2074 IsConflict = 2075 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 2076 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2077 } 2078 MemoryRuntimeCheck = IsConflict; 2079 } 2080 2081 if (!MemoryRuntimeCheck) 2082 return std::make_pair(nullptr, nullptr); 2083 2084 // We have to do this trickery because the IRBuilder might fold the check to a 2085 // constant expression in which case there is no Instruction anchored in a 2086 // the block. 2087 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 2088 ConstantInt::getTrue(Ctx)); 2089 ChkBuilder.Insert(Check, "memcheck.conflict"); 2090 FirstInst = getFirstInst(FirstInst, Check, Loc); 2091 return std::make_pair(FirstInst, Check); 2092 } 2093 2094 std::pair<Instruction *, Instruction *> 2095 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 2096 if (!PtrRtChecking->Need) 2097 return std::make_pair(nullptr, nullptr); 2098 2099 return addRuntimeChecks(Loc, PtrRtChecking->getChecks()); 2100 } 2101 2102 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2103 Value *Ptr = nullptr; 2104 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 2105 Ptr = LI->getPointerOperand(); 2106 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 2107 Ptr = SI->getPointerOperand(); 2108 else 2109 return; 2110 2111 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2112 if (!Stride) 2113 return; 2114 2115 DEBUG(dbgs() << "LAA: Found a strided access that we can version"); 2116 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2117 SymbolicStrides[Ptr] = Stride; 2118 StrideSet.insert(Stride); 2119 } 2120 2121 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2122 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 2123 DominatorTree *DT, LoopInfo *LI) 2124 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2125 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)), 2126 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L), 2127 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false), 2128 StoreToLoopInvariantAddress(false) { 2129 if (canAnalyzeLoop()) 2130 analyzeLoop(AA, LI, TLI, DT); 2131 } 2132 2133 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2134 if (CanVecMem) { 2135 OS.indent(Depth) << "Memory dependences are safe"; 2136 if (MaxSafeDepDistBytes != -1ULL) 2137 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2138 << " bytes"; 2139 if (PtrRtChecking->Need) 2140 OS << " with run-time checks"; 2141 OS << "\n"; 2142 } 2143 2144 if (Report) 2145 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2146 2147 if (auto *Dependences = DepChecker->getDependences()) { 2148 OS.indent(Depth) << "Dependences:\n"; 2149 for (auto &Dep : *Dependences) { 2150 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2151 OS << "\n"; 2152 } 2153 } else 2154 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2155 2156 // List the pair of accesses need run-time checks to prove independence. 2157 PtrRtChecking->print(OS, Depth); 2158 OS << "\n"; 2159 2160 OS.indent(Depth) << "Store to invariant address was " 2161 << (StoreToLoopInvariantAddress ? "" : "not ") 2162 << "found in loop.\n"; 2163 2164 OS.indent(Depth) << "SCEV assumptions:\n"; 2165 PSE->getUnionPredicate().print(OS, Depth); 2166 2167 OS << "\n"; 2168 2169 OS.indent(Depth) << "Expressions re-written:\n"; 2170 PSE->print(OS, Depth); 2171 } 2172 2173 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2174 auto &LAI = LoopAccessInfoMap[L]; 2175 2176 if (!LAI) 2177 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2178 2179 return *LAI.get(); 2180 } 2181 2182 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2183 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2184 2185 for (Loop *TopLevelLoop : *LI) 2186 for (Loop *L : depth_first(TopLevelLoop)) { 2187 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2188 auto &LAI = LAA.getInfo(L); 2189 LAI.print(OS, 4); 2190 } 2191 } 2192 2193 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2194 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2195 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2196 TLI = TLIP ? &TLIP->getTLI() : nullptr; 2197 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2198 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2199 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2200 2201 return false; 2202 } 2203 2204 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2205 AU.addRequired<ScalarEvolutionWrapperPass>(); 2206 AU.addRequired<AAResultsWrapperPass>(); 2207 AU.addRequired<DominatorTreeWrapperPass>(); 2208 AU.addRequired<LoopInfoWrapperPass>(); 2209 2210 AU.setPreservesAll(); 2211 } 2212 2213 char LoopAccessLegacyAnalysis::ID = 0; 2214 static const char laa_name[] = "Loop Access Analysis"; 2215 #define LAA_NAME "loop-accesses" 2216 2217 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2218 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2219 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2220 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2221 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2222 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2223 2224 AnalysisKey LoopAccessAnalysis::Key; 2225 2226 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2227 LoopStandardAnalysisResults &AR) { 2228 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2229 } 2230 2231 namespace llvm { 2232 2233 Pass *createLAAPass() { 2234 return new LoopAccessLegacyAnalysis(); 2235 } 2236 2237 } // end namespace llvm 2238