1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The implementation for the loop memory dependence that was originally 10 // developed for the loop vectorizer. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/LoopAccessAnalysis.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/EquivalenceClasses.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AliasSetTracker.h" 28 #include "llvm/Analysis/LoopAnalysisManager.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/Analysis/MemoryLocation.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/ValueTracking.h" 36 #include "llvm/Analysis/VectorUtils.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugLoc.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/DiagnosticInfo.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/IR/ValueHandle.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <cstdlib> 64 #include <iterator> 65 #include <utility> 66 #include <vector> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "loop-accesses" 71 72 static cl::opt<unsigned, true> 73 VectorizationFactor("force-vector-width", cl::Hidden, 74 cl::desc("Sets the SIMD width. Zero is autoselect."), 75 cl::location(VectorizerParams::VectorizationFactor)); 76 unsigned VectorizerParams::VectorizationFactor; 77 78 static cl::opt<unsigned, true> 79 VectorizationInterleave("force-vector-interleave", cl::Hidden, 80 cl::desc("Sets the vectorization interleave count. " 81 "Zero is autoselect."), 82 cl::location( 83 VectorizerParams::VectorizationInterleave)); 84 unsigned VectorizerParams::VectorizationInterleave; 85 86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 87 "runtime-memory-check-threshold", cl::Hidden, 88 cl::desc("When performing memory disambiguation checks at runtime do not " 89 "generate more than this number of comparisons (default = 8)."), 90 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 92 93 /// The maximum iterations used to merge memory checks 94 static cl::opt<unsigned> MemoryCheckMergeThreshold( 95 "memory-check-merge-threshold", cl::Hidden, 96 cl::desc("Maximum number of comparisons done when trying to merge " 97 "runtime memory checks. (default = 100)"), 98 cl::init(100)); 99 100 /// Maximum SIMD width. 101 const unsigned VectorizerParams::MaxVectorWidth = 64; 102 103 /// We collect dependences up to this threshold. 104 static cl::opt<unsigned> 105 MaxDependences("max-dependences", cl::Hidden, 106 cl::desc("Maximum number of dependences collected by " 107 "loop-access analysis (default = 100)"), 108 cl::init(100)); 109 110 /// This enables versioning on the strides of symbolically striding memory 111 /// accesses in code like the following. 112 /// for (i = 0; i < N; ++i) 113 /// A[i * Stride1] += B[i * Stride2] ... 114 /// 115 /// Will be roughly translated to 116 /// if (Stride1 == 1 && Stride2 == 1) { 117 /// for (i = 0; i < N; i+=4) 118 /// A[i:i+3] += ... 119 /// } else 120 /// ... 121 static cl::opt<bool> EnableMemAccessVersioning( 122 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 123 cl::desc("Enable symbolic stride memory access versioning")); 124 125 /// Enable store-to-load forwarding conflict detection. This option can 126 /// be disabled for correctness testing. 127 static cl::opt<bool> EnableForwardingConflictDetection( 128 "store-to-load-forwarding-conflict-detection", cl::Hidden, 129 cl::desc("Enable conflict detection in loop-access analysis"), 130 cl::init(true)); 131 132 bool VectorizerParams::isInterleaveForced() { 133 return ::VectorizationInterleave.getNumOccurrences() > 0; 134 } 135 136 Value *llvm::stripIntegerCast(Value *V) { 137 if (auto *CI = dyn_cast<CastInst>(V)) 138 if (CI->getOperand(0)->getType()->isIntegerTy()) 139 return CI->getOperand(0); 140 return V; 141 } 142 143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 144 const ValueToValueMap &PtrToStride, 145 Value *Ptr, Value *OrigPtr) { 146 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 147 148 // If there is an entry in the map return the SCEV of the pointer with the 149 // symbolic stride replaced by one. 150 ValueToValueMap::const_iterator SI = 151 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 152 if (SI != PtrToStride.end()) { 153 Value *StrideVal = SI->second; 154 155 // Strip casts. 156 StrideVal = stripIntegerCast(StrideVal); 157 158 ScalarEvolution *SE = PSE.getSE(); 159 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 160 const auto *CT = 161 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 162 163 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 164 auto *Expr = PSE.getSCEV(Ptr); 165 166 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV 167 << " by: " << *Expr << "\n"); 168 return Expr; 169 } 170 171 // Otherwise, just return the SCEV of the original pointer. 172 return OrigSCEV; 173 } 174 175 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup( 176 unsigned Index, RuntimePointerChecking &RtCheck) 177 : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End), 178 Low(RtCheck.Pointers[Index].Start) { 179 Members.push_back(Index); 180 } 181 182 /// Calculate Start and End points of memory access. 183 /// Let's assume A is the first access and B is a memory access on N-th loop 184 /// iteration. Then B is calculated as: 185 /// B = A + Step*N . 186 /// Step value may be positive or negative. 187 /// N is a calculated back-edge taken count: 188 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 189 /// Start and End points are calculated in the following way: 190 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 191 /// where SizeOfElt is the size of single memory access in bytes. 192 /// 193 /// There is no conflict when the intervals are disjoint: 194 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 195 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 196 unsigned DepSetId, unsigned ASId, 197 const ValueToValueMap &Strides, 198 PredicatedScalarEvolution &PSE) { 199 // Get the stride replaced scev. 200 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 201 ScalarEvolution *SE = PSE.getSE(); 202 203 const SCEV *ScStart; 204 const SCEV *ScEnd; 205 206 if (SE->isLoopInvariant(Sc, Lp)) 207 ScStart = ScEnd = Sc; 208 else { 209 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 210 assert(AR && "Invalid addrec expression"); 211 const SCEV *Ex = PSE.getBackedgeTakenCount(); 212 213 ScStart = AR->getStart(); 214 ScEnd = AR->evaluateAtIteration(Ex, *SE); 215 const SCEV *Step = AR->getStepRecurrence(*SE); 216 217 // For expressions with negative step, the upper bound is ScStart and the 218 // lower bound is ScEnd. 219 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 220 if (CStep->getValue()->isNegative()) 221 std::swap(ScStart, ScEnd); 222 } else { 223 // Fallback case: the step is not constant, but we can still 224 // get the upper and lower bounds of the interval by using min/max 225 // expressions. 226 ScStart = SE->getUMinExpr(ScStart, ScEnd); 227 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 228 } 229 // Add the size of the pointed element to ScEnd. 230 unsigned EltSize = 231 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8; 232 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize); 233 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 234 } 235 236 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 237 } 238 239 SmallVector<RuntimePointerCheck, 4> 240 RuntimePointerChecking::generateChecks() const { 241 SmallVector<RuntimePointerCheck, 4> Checks; 242 243 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 244 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 245 const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I]; 246 const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J]; 247 248 if (needsChecking(CGI, CGJ)) 249 Checks.push_back(std::make_pair(&CGI, &CGJ)); 250 } 251 } 252 return Checks; 253 } 254 255 void RuntimePointerChecking::generateChecks( 256 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 257 assert(Checks.empty() && "Checks is not empty"); 258 groupChecks(DepCands, UseDependencies); 259 Checks = generateChecks(); 260 } 261 262 bool RuntimePointerChecking::needsChecking( 263 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const { 264 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 265 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 266 if (needsChecking(M.Members[I], N.Members[J])) 267 return true; 268 return false; 269 } 270 271 /// Compare \p I and \p J and return the minimum. 272 /// Return nullptr in case we couldn't find an answer. 273 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 274 ScalarEvolution *SE) { 275 const SCEV *Diff = SE->getMinusSCEV(J, I); 276 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 277 278 if (!C) 279 return nullptr; 280 if (C->getValue()->isNegative()) 281 return J; 282 return I; 283 } 284 285 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index) { 286 const SCEV *Start = RtCheck.Pointers[Index].Start; 287 const SCEV *End = RtCheck.Pointers[Index].End; 288 289 // Compare the starts and ends with the known minimum and maximum 290 // of this set. We need to know how we compare against the min/max 291 // of the set in order to be able to emit memchecks. 292 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 293 if (!Min0) 294 return false; 295 296 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 297 if (!Min1) 298 return false; 299 300 // Update the low bound expression if we've found a new min value. 301 if (Min0 == Start) 302 Low = Start; 303 304 // Update the high bound expression if we've found a new max value. 305 if (Min1 != End) 306 High = End; 307 308 Members.push_back(Index); 309 return true; 310 } 311 312 void RuntimePointerChecking::groupChecks( 313 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 314 // We build the groups from dependency candidates equivalence classes 315 // because: 316 // - We know that pointers in the same equivalence class share 317 // the same underlying object and therefore there is a chance 318 // that we can compare pointers 319 // - We wouldn't be able to merge two pointers for which we need 320 // to emit a memcheck. The classes in DepCands are already 321 // conveniently built such that no two pointers in the same 322 // class need checking against each other. 323 324 // We use the following (greedy) algorithm to construct the groups 325 // For every pointer in the equivalence class: 326 // For each existing group: 327 // - if the difference between this pointer and the min/max bounds 328 // of the group is a constant, then make the pointer part of the 329 // group and update the min/max bounds of that group as required. 330 331 CheckingGroups.clear(); 332 333 // If we need to check two pointers to the same underlying object 334 // with a non-constant difference, we shouldn't perform any pointer 335 // grouping with those pointers. This is because we can easily get 336 // into cases where the resulting check would return false, even when 337 // the accesses are safe. 338 // 339 // The following example shows this: 340 // for (i = 0; i < 1000; ++i) 341 // a[5000 + i * m] = a[i] + a[i + 9000] 342 // 343 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 344 // (0, 10000) which is always false. However, if m is 1, there is no 345 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 346 // us to perform an accurate check in this case. 347 // 348 // The above case requires that we have an UnknownDependence between 349 // accesses to the same underlying object. This cannot happen unless 350 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies 351 // is also false. In this case we will use the fallback path and create 352 // separate checking groups for all pointers. 353 354 // If we don't have the dependency partitions, construct a new 355 // checking pointer group for each pointer. This is also required 356 // for correctness, because in this case we can have checking between 357 // pointers to the same underlying object. 358 if (!UseDependencies) { 359 for (unsigned I = 0; I < Pointers.size(); ++I) 360 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this)); 361 return; 362 } 363 364 unsigned TotalComparisons = 0; 365 366 DenseMap<Value *, unsigned> PositionMap; 367 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 368 PositionMap[Pointers[Index].PointerValue] = Index; 369 370 // We need to keep track of what pointers we've already seen so we 371 // don't process them twice. 372 SmallSet<unsigned, 2> Seen; 373 374 // Go through all equivalence classes, get the "pointer check groups" 375 // and add them to the overall solution. We use the order in which accesses 376 // appear in 'Pointers' to enforce determinism. 377 for (unsigned I = 0; I < Pointers.size(); ++I) { 378 // We've seen this pointer before, and therefore already processed 379 // its equivalence class. 380 if (Seen.count(I)) 381 continue; 382 383 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 384 Pointers[I].IsWritePtr); 385 386 SmallVector<RuntimeCheckingPtrGroup, 2> Groups; 387 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 388 389 // Because DepCands is constructed by visiting accesses in the order in 390 // which they appear in alias sets (which is deterministic) and the 391 // iteration order within an equivalence class member is only dependent on 392 // the order in which unions and insertions are performed on the 393 // equivalence class, the iteration order is deterministic. 394 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 395 MI != ME; ++MI) { 396 unsigned Pointer = PositionMap[MI->getPointer()]; 397 bool Merged = false; 398 // Mark this pointer as seen. 399 Seen.insert(Pointer); 400 401 // Go through all the existing sets and see if we can find one 402 // which can include this pointer. 403 for (RuntimeCheckingPtrGroup &Group : Groups) { 404 // Don't perform more than a certain amount of comparisons. 405 // This should limit the cost of grouping the pointers to something 406 // reasonable. If we do end up hitting this threshold, the algorithm 407 // will create separate groups for all remaining pointers. 408 if (TotalComparisons > MemoryCheckMergeThreshold) 409 break; 410 411 TotalComparisons++; 412 413 if (Group.addPointer(Pointer)) { 414 Merged = true; 415 break; 416 } 417 } 418 419 if (!Merged) 420 // We couldn't add this pointer to any existing set or the threshold 421 // for the number of comparisons has been reached. Create a new group 422 // to hold the current pointer. 423 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this)); 424 } 425 426 // We've computed the grouped checks for this partition. 427 // Save the results and continue with the next one. 428 llvm::copy(Groups, std::back_inserter(CheckingGroups)); 429 } 430 } 431 432 bool RuntimePointerChecking::arePointersInSamePartition( 433 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 434 unsigned PtrIdx2) { 435 return (PtrToPartition[PtrIdx1] != -1 && 436 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 437 } 438 439 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 440 const PointerInfo &PointerI = Pointers[I]; 441 const PointerInfo &PointerJ = Pointers[J]; 442 443 // No need to check if two readonly pointers intersect. 444 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 445 return false; 446 447 // Only need to check pointers between two different dependency sets. 448 if (PointerI.DependencySetId == PointerJ.DependencySetId) 449 return false; 450 451 // Only need to check pointers in the same alias set. 452 if (PointerI.AliasSetId != PointerJ.AliasSetId) 453 return false; 454 455 return true; 456 } 457 458 void RuntimePointerChecking::printChecks( 459 raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks, 460 unsigned Depth) const { 461 unsigned N = 0; 462 for (const auto &Check : Checks) { 463 const auto &First = Check.first->Members, &Second = Check.second->Members; 464 465 OS.indent(Depth) << "Check " << N++ << ":\n"; 466 467 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 468 for (unsigned K = 0; K < First.size(); ++K) 469 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 470 471 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 472 for (unsigned K = 0; K < Second.size(); ++K) 473 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 474 } 475 } 476 477 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 478 479 OS.indent(Depth) << "Run-time memory checks:\n"; 480 printChecks(OS, Checks, Depth); 481 482 OS.indent(Depth) << "Grouped accesses:\n"; 483 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 484 const auto &CG = CheckingGroups[I]; 485 486 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 487 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 488 << ")\n"; 489 for (unsigned J = 0; J < CG.Members.size(); ++J) { 490 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 491 << "\n"; 492 } 493 } 494 } 495 496 namespace { 497 498 /// Analyses memory accesses in a loop. 499 /// 500 /// Checks whether run time pointer checks are needed and builds sets for data 501 /// dependence checking. 502 class AccessAnalysis { 503 public: 504 /// Read or write access location. 505 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 506 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList; 507 508 AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AliasAnalysis *AA, 509 LoopInfo *LI, MemoryDepChecker::DepCandidates &DA, 510 PredicatedScalarEvolution &PSE) 511 : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), 512 IsRTCheckAnalysisNeeded(false), PSE(PSE) {} 513 514 /// Register a load and whether it is only read from. 515 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 516 Value *Ptr = const_cast<Value*>(Loc.Ptr); 517 AST.add(Ptr, LocationSize::unknown(), Loc.AATags); 518 Accesses.insert(MemAccessInfo(Ptr, false)); 519 if (IsReadOnly) 520 ReadOnlyPtr.insert(Ptr); 521 } 522 523 /// Register a store. 524 void addStore(MemoryLocation &Loc) { 525 Value *Ptr = const_cast<Value*>(Loc.Ptr); 526 AST.add(Ptr, LocationSize::unknown(), Loc.AATags); 527 Accesses.insert(MemAccessInfo(Ptr, true)); 528 } 529 530 /// Check if we can emit a run-time no-alias check for \p Access. 531 /// 532 /// Returns true if we can emit a run-time no alias check for \p Access. 533 /// If we can check this access, this also adds it to a dependence set and 534 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true, 535 /// we will attempt to use additional run-time checks in order to get 536 /// the bounds of the pointer. 537 bool createCheckForAccess(RuntimePointerChecking &RtCheck, 538 MemAccessInfo Access, 539 const ValueToValueMap &Strides, 540 DenseMap<Value *, unsigned> &DepSetId, 541 Loop *TheLoop, unsigned &RunningDepId, 542 unsigned ASId, bool ShouldCheckStride, 543 bool Assume); 544 545 /// Check whether we can check the pointers at runtime for 546 /// non-intersection. 547 /// 548 /// Returns true if we need no check or if we do and we can generate them 549 /// (i.e. the pointers have computable bounds). 550 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 551 Loop *TheLoop, const ValueToValueMap &Strides, 552 bool ShouldCheckWrap = false); 553 554 /// Goes over all memory accesses, checks whether a RT check is needed 555 /// and builds sets of dependent accesses. 556 void buildDependenceSets() { 557 processMemAccesses(); 558 } 559 560 /// Initial processing of memory accesses determined that we need to 561 /// perform dependency checking. 562 /// 563 /// Note that this can later be cleared if we retry memcheck analysis without 564 /// dependency checking (i.e. FoundNonConstantDistanceDependence). 565 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 566 567 /// We decided that no dependence analysis would be used. Reset the state. 568 void resetDepChecks(MemoryDepChecker &DepChecker) { 569 CheckDeps.clear(); 570 DepChecker.clearDependences(); 571 } 572 573 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; } 574 575 private: 576 typedef SetVector<MemAccessInfo> PtrAccessSet; 577 578 /// Go over all memory access and check whether runtime pointer checks 579 /// are needed and build sets of dependency check candidates. 580 void processMemAccesses(); 581 582 /// Set of all accesses. 583 PtrAccessSet Accesses; 584 585 const DataLayout &DL; 586 587 /// The loop being checked. 588 const Loop *TheLoop; 589 590 /// List of accesses that need a further dependence check. 591 MemAccessInfoList CheckDeps; 592 593 /// Set of pointers that are read only. 594 SmallPtrSet<Value*, 16> ReadOnlyPtr; 595 596 /// An alias set tracker to partition the access set by underlying object and 597 //intrinsic property (such as TBAA metadata). 598 AliasSetTracker AST; 599 600 LoopInfo *LI; 601 602 /// Sets of potentially dependent accesses - members of one set share an 603 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 604 /// dependence check. 605 MemoryDepChecker::DepCandidates &DepCands; 606 607 /// Initial processing of memory accesses determined that we may need 608 /// to add memchecks. Perform the analysis to determine the necessary checks. 609 /// 610 /// Note that, this is different from isDependencyCheckNeeded. When we retry 611 /// memcheck analysis without dependency checking 612 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is 613 /// cleared while this remains set if we have potentially dependent accesses. 614 bool IsRTCheckAnalysisNeeded; 615 616 /// The SCEV predicate containing all the SCEV-related assumptions. 617 PredicatedScalarEvolution &PSE; 618 }; 619 620 } // end anonymous namespace 621 622 /// Check whether a pointer can participate in a runtime bounds check. 623 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr 624 /// by adding run-time checks (overflow checks) if necessary. 625 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 626 const ValueToValueMap &Strides, Value *Ptr, 627 Loop *L, bool Assume) { 628 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 629 630 // The bounds for loop-invariant pointer is trivial. 631 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 632 return true; 633 634 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 635 636 if (!AR && Assume) 637 AR = PSE.getAsAddRec(Ptr); 638 639 if (!AR) 640 return false; 641 642 return AR->isAffine(); 643 } 644 645 /// Check whether a pointer address cannot wrap. 646 static bool isNoWrap(PredicatedScalarEvolution &PSE, 647 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 648 const SCEV *PtrScev = PSE.getSCEV(Ptr); 649 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 650 return true; 651 652 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides); 653 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW)) 654 return true; 655 656 return false; 657 } 658 659 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck, 660 MemAccessInfo Access, 661 const ValueToValueMap &StridesMap, 662 DenseMap<Value *, unsigned> &DepSetId, 663 Loop *TheLoop, unsigned &RunningDepId, 664 unsigned ASId, bool ShouldCheckWrap, 665 bool Assume) { 666 Value *Ptr = Access.getPointer(); 667 668 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume)) 669 return false; 670 671 // When we run after a failing dependency check we have to make sure 672 // we don't have wrapping pointers. 673 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) { 674 auto *Expr = PSE.getSCEV(Ptr); 675 if (!Assume || !isa<SCEVAddRecExpr>(Expr)) 676 return false; 677 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 678 } 679 680 // The id of the dependence set. 681 unsigned DepId; 682 683 if (isDependencyCheckNeeded()) { 684 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 685 unsigned &LeaderId = DepSetId[Leader]; 686 if (!LeaderId) 687 LeaderId = RunningDepId++; 688 DepId = LeaderId; 689 } else 690 // Each access has its own dependence set. 691 DepId = RunningDepId++; 692 693 bool IsWrite = Access.getInt(); 694 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 695 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 696 697 return true; 698 } 699 700 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 701 ScalarEvolution *SE, Loop *TheLoop, 702 const ValueToValueMap &StridesMap, 703 bool ShouldCheckWrap) { 704 // Find pointers with computable bounds. We are going to use this information 705 // to place a runtime bound check. 706 bool CanDoRT = true; 707 708 bool NeedRTCheck = false; 709 if (!IsRTCheckAnalysisNeeded) return true; 710 711 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 712 713 // We assign a consecutive id to access from different alias sets. 714 // Accesses between different groups doesn't need to be checked. 715 unsigned ASId = 1; 716 for (auto &AS : AST) { 717 int NumReadPtrChecks = 0; 718 int NumWritePtrChecks = 0; 719 bool CanDoAliasSetRT = true; 720 721 // We assign consecutive id to access from different dependence sets. 722 // Accesses within the same set don't need a runtime check. 723 unsigned RunningDepId = 1; 724 DenseMap<Value *, unsigned> DepSetId; 725 726 SmallVector<MemAccessInfo, 4> Retries; 727 728 for (auto A : AS) { 729 Value *Ptr = A.getValue(); 730 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 731 MemAccessInfo Access(Ptr, IsWrite); 732 733 if (IsWrite) 734 ++NumWritePtrChecks; 735 else 736 ++NumReadPtrChecks; 737 738 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop, 739 RunningDepId, ASId, ShouldCheckWrap, false)) { 740 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 741 Retries.push_back(Access); 742 CanDoAliasSetRT = false; 743 } 744 } 745 746 // If we have at least two writes or one write and a read then we need to 747 // check them. But there is no need to checks if there is only one 748 // dependence set for this alias set. 749 // 750 // Note that this function computes CanDoRT and NeedRTCheck independently. 751 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 752 // for which we couldn't find the bounds but we don't actually need to emit 753 // any checks so it does not matter. 754 bool NeedsAliasSetRTCheck = false; 755 if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2)) 756 NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 || 757 (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1)); 758 759 // We need to perform run-time alias checks, but some pointers had bounds 760 // that couldn't be checked. 761 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) { 762 // Reset the CanDoSetRt flag and retry all accesses that have failed. 763 // We know that we need these checks, so we can now be more aggressive 764 // and add further checks if required (overflow checks). 765 CanDoAliasSetRT = true; 766 for (auto Access : Retries) 767 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, 768 TheLoop, RunningDepId, ASId, 769 ShouldCheckWrap, /*Assume=*/true)) { 770 CanDoAliasSetRT = false; 771 break; 772 } 773 } 774 775 CanDoRT &= CanDoAliasSetRT; 776 NeedRTCheck |= NeedsAliasSetRTCheck; 777 ++ASId; 778 } 779 780 // If the pointers that we would use for the bounds comparison have different 781 // address spaces, assume the values aren't directly comparable, so we can't 782 // use them for the runtime check. We also have to assume they could 783 // overlap. In the future there should be metadata for whether address spaces 784 // are disjoint. 785 unsigned NumPointers = RtCheck.Pointers.size(); 786 for (unsigned i = 0; i < NumPointers; ++i) { 787 for (unsigned j = i + 1; j < NumPointers; ++j) { 788 // Only need to check pointers between two different dependency sets. 789 if (RtCheck.Pointers[i].DependencySetId == 790 RtCheck.Pointers[j].DependencySetId) 791 continue; 792 // Only need to check pointers in the same alias set. 793 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 794 continue; 795 796 Value *PtrI = RtCheck.Pointers[i].PointerValue; 797 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 798 799 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 800 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 801 if (ASi != ASj) { 802 LLVM_DEBUG( 803 dbgs() << "LAA: Runtime check would require comparison between" 804 " different address spaces\n"); 805 return false; 806 } 807 } 808 } 809 810 if (NeedRTCheck && CanDoRT) 811 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 812 813 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 814 << " pointer comparisons.\n"); 815 816 RtCheck.Need = NeedRTCheck; 817 818 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 819 if (!CanDoRTIfNeeded) 820 RtCheck.reset(); 821 return CanDoRTIfNeeded; 822 } 823 824 void AccessAnalysis::processMemAccesses() { 825 // We process the set twice: first we process read-write pointers, last we 826 // process read-only pointers. This allows us to skip dependence tests for 827 // read-only pointers. 828 829 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 830 LLVM_DEBUG(dbgs() << " AST: "; AST.dump()); 831 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 832 LLVM_DEBUG({ 833 for (auto A : Accesses) 834 dbgs() << "\t" << *A.getPointer() << " (" << 835 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 836 "read-only" : "read")) << ")\n"; 837 }); 838 839 // The AliasSetTracker has nicely partitioned our pointers by metadata 840 // compatibility and potential for underlying-object overlap. As a result, we 841 // only need to check for potential pointer dependencies within each alias 842 // set. 843 for (auto &AS : AST) { 844 // Note that both the alias-set tracker and the alias sets themselves used 845 // linked lists internally and so the iteration order here is deterministic 846 // (matching the original instruction order within each set). 847 848 bool SetHasWrite = false; 849 850 // Map of pointers to last access encountered. 851 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap; 852 UnderlyingObjToAccessMap ObjToLastAccess; 853 854 // Set of access to check after all writes have been processed. 855 PtrAccessSet DeferredAccesses; 856 857 // Iterate over each alias set twice, once to process read/write pointers, 858 // and then to process read-only pointers. 859 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 860 bool UseDeferred = SetIteration > 0; 861 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 862 863 for (auto AV : AS) { 864 Value *Ptr = AV.getValue(); 865 866 // For a single memory access in AliasSetTracker, Accesses may contain 867 // both read and write, and they both need to be handled for CheckDeps. 868 for (auto AC : S) { 869 if (AC.getPointer() != Ptr) 870 continue; 871 872 bool IsWrite = AC.getInt(); 873 874 // If we're using the deferred access set, then it contains only 875 // reads. 876 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 877 if (UseDeferred && !IsReadOnlyPtr) 878 continue; 879 // Otherwise, the pointer must be in the PtrAccessSet, either as a 880 // read or a write. 881 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 882 S.count(MemAccessInfo(Ptr, false))) && 883 "Alias-set pointer not in the access set?"); 884 885 MemAccessInfo Access(Ptr, IsWrite); 886 DepCands.insert(Access); 887 888 // Memorize read-only pointers for later processing and skip them in 889 // the first round (they need to be checked after we have seen all 890 // write pointers). Note: we also mark pointer that are not 891 // consecutive as "read-only" pointers (so that we check 892 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 893 if (!UseDeferred && IsReadOnlyPtr) { 894 DeferredAccesses.insert(Access); 895 continue; 896 } 897 898 // If this is a write - check other reads and writes for conflicts. If 899 // this is a read only check other writes for conflicts (but only if 900 // there is no other write to the ptr - this is an optimization to 901 // catch "a[i] = a[i] + " without having to do a dependence check). 902 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 903 CheckDeps.push_back(Access); 904 IsRTCheckAnalysisNeeded = true; 905 } 906 907 if (IsWrite) 908 SetHasWrite = true; 909 910 // Create sets of pointers connected by a shared alias set and 911 // underlying object. 912 typedef SmallVector<const Value *, 16> ValueVector; 913 ValueVector TempObjects; 914 915 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 916 LLVM_DEBUG(dbgs() 917 << "Underlying objects for pointer " << *Ptr << "\n"); 918 for (const Value *UnderlyingObj : TempObjects) { 919 // nullptr never alias, don't join sets for pointer that have "null" 920 // in their UnderlyingObjects list. 921 if (isa<ConstantPointerNull>(UnderlyingObj) && 922 !NullPointerIsDefined( 923 TheLoop->getHeader()->getParent(), 924 UnderlyingObj->getType()->getPointerAddressSpace())) 925 continue; 926 927 UnderlyingObjToAccessMap::iterator Prev = 928 ObjToLastAccess.find(UnderlyingObj); 929 if (Prev != ObjToLastAccess.end()) 930 DepCands.unionSets(Access, Prev->second); 931 932 ObjToLastAccess[UnderlyingObj] = Access; 933 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 934 } 935 } 936 } 937 } 938 } 939 } 940 941 static bool isInBoundsGep(Value *Ptr) { 942 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 943 return GEP->isInBounds(); 944 return false; 945 } 946 947 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 948 /// i.e. monotonically increasing/decreasing. 949 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 950 PredicatedScalarEvolution &PSE, const Loop *L) { 951 // FIXME: This should probably only return true for NUW. 952 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 953 return true; 954 955 // Scalar evolution does not propagate the non-wrapping flags to values that 956 // are derived from a non-wrapping induction variable because non-wrapping 957 // could be flow-sensitive. 958 // 959 // Look through the potentially overflowing instruction to try to prove 960 // non-wrapping for the *specific* value of Ptr. 961 962 // The arithmetic implied by an inbounds GEP can't overflow. 963 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 964 if (!GEP || !GEP->isInBounds()) 965 return false; 966 967 // Make sure there is only one non-const index and analyze that. 968 Value *NonConstIndex = nullptr; 969 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) 970 if (!isa<ConstantInt>(Index)) { 971 if (NonConstIndex) 972 return false; 973 NonConstIndex = Index; 974 } 975 if (!NonConstIndex) 976 // The recurrence is on the pointer, ignore for now. 977 return false; 978 979 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 980 // AddRec using a NSW operation. 981 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 982 if (OBO->hasNoSignedWrap() && 983 // Assume constant for other the operand so that the AddRec can be 984 // easily found. 985 isa<ConstantInt>(OBO->getOperand(1))) { 986 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 987 988 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 989 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 990 } 991 992 return false; 993 } 994 995 /// Check whether the access through \p Ptr has a constant stride. 996 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 997 const Loop *Lp, const ValueToValueMap &StridesMap, 998 bool Assume, bool ShouldCheckWrap) { 999 Type *Ty = Ptr->getType(); 1000 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 1001 1002 // Make sure that the pointer does not point to aggregate types. 1003 auto *PtrTy = cast<PointerType>(Ty); 1004 if (PtrTy->getElementType()->isAggregateType()) { 1005 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 1006 << *Ptr << "\n"); 1007 return 0; 1008 } 1009 1010 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 1011 1012 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 1013 if (Assume && !AR) 1014 AR = PSE.getAsAddRec(Ptr); 1015 1016 if (!AR) { 1017 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 1018 << " SCEV: " << *PtrScev << "\n"); 1019 return 0; 1020 } 1021 1022 // The access function must stride over the innermost loop. 1023 if (Lp != AR->getLoop()) { 1024 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " 1025 << *Ptr << " SCEV: " << *AR << "\n"); 1026 return 0; 1027 } 1028 1029 // The address calculation must not wrap. Otherwise, a dependence could be 1030 // inverted. 1031 // An inbounds getelementptr that is a AddRec with a unit stride 1032 // cannot wrap per definition. The unit stride requirement is checked later. 1033 // An getelementptr without an inbounds attribute and unit stride would have 1034 // to access the pointer value "0" which is undefined behavior in address 1035 // space 0, therefore we can also vectorize this case. 1036 bool IsInBoundsGEP = isInBoundsGep(Ptr); 1037 bool IsNoWrapAddRec = !ShouldCheckWrap || 1038 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 1039 isNoWrapAddRec(Ptr, AR, PSE, Lp); 1040 if (!IsNoWrapAddRec && !IsInBoundsGEP && 1041 NullPointerIsDefined(Lp->getHeader()->getParent(), 1042 PtrTy->getAddressSpace())) { 1043 if (Assume) { 1044 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1045 IsNoWrapAddRec = true; 1046 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 1047 << "LAA: Pointer: " << *Ptr << "\n" 1048 << "LAA: SCEV: " << *AR << "\n" 1049 << "LAA: Added an overflow assumption\n"); 1050 } else { 1051 LLVM_DEBUG( 1052 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 1053 << *Ptr << " SCEV: " << *AR << "\n"); 1054 return 0; 1055 } 1056 } 1057 1058 // Check the step is constant. 1059 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 1060 1061 // Calculate the pointer stride and check if it is constant. 1062 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 1063 if (!C) { 1064 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr 1065 << " SCEV: " << *AR << "\n"); 1066 return 0; 1067 } 1068 1069 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 1070 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 1071 const APInt &APStepVal = C->getAPInt(); 1072 1073 // Huge step value - give up. 1074 if (APStepVal.getBitWidth() > 64) 1075 return 0; 1076 1077 int64_t StepVal = APStepVal.getSExtValue(); 1078 1079 // Strided access. 1080 int64_t Stride = StepVal / Size; 1081 int64_t Rem = StepVal % Size; 1082 if (Rem) 1083 return 0; 1084 1085 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1086 // know we can't "wrap around the address space". In case of address space 1087 // zero we know that this won't happen without triggering undefined behavior. 1088 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 && 1089 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(), 1090 PtrTy->getAddressSpace()))) { 1091 if (Assume) { 1092 // We can avoid this case by adding a run-time check. 1093 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1094 << "inbounds or in address space 0 may wrap:\n" 1095 << "LAA: Pointer: " << *Ptr << "\n" 1096 << "LAA: SCEV: " << *AR << "\n" 1097 << "LAA: Added an overflow assumption\n"); 1098 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1099 } else 1100 return 0; 1101 } 1102 1103 return Stride; 1104 } 1105 1106 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL, 1107 ScalarEvolution &SE, 1108 SmallVectorImpl<unsigned> &SortedIndices) { 1109 assert(llvm::all_of( 1110 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 1111 "Expected list of pointer operands."); 1112 SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs; 1113 OffValPairs.reserve(VL.size()); 1114 1115 // Walk over the pointers, and map each of them to an offset relative to 1116 // first pointer in the array. 1117 Value *Ptr0 = VL[0]; 1118 const SCEV *Scev0 = SE.getSCEV(Ptr0); 1119 Value *Obj0 = GetUnderlyingObject(Ptr0, DL); 1120 1121 llvm::SmallSet<int64_t, 4> Offsets; 1122 for (auto *Ptr : VL) { 1123 // TODO: Outline this code as a special, more time consuming, version of 1124 // computeConstantDifference() function. 1125 if (Ptr->getType()->getPointerAddressSpace() != 1126 Ptr0->getType()->getPointerAddressSpace()) 1127 return false; 1128 // If a pointer refers to a different underlying object, bail - the 1129 // pointers are by definition incomparable. 1130 Value *CurrObj = GetUnderlyingObject(Ptr, DL); 1131 if (CurrObj != Obj0) 1132 return false; 1133 1134 const SCEV *Scev = SE.getSCEV(Ptr); 1135 const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0)); 1136 // The pointers may not have a constant offset from each other, or SCEV 1137 // may just not be smart enough to figure out they do. Regardless, 1138 // there's nothing we can do. 1139 if (!Diff) 1140 return false; 1141 1142 // Check if the pointer with the same offset is found. 1143 int64_t Offset = Diff->getAPInt().getSExtValue(); 1144 if (!Offsets.insert(Offset).second) 1145 return false; 1146 OffValPairs.emplace_back(Offset, Ptr); 1147 } 1148 SortedIndices.clear(); 1149 SortedIndices.resize(VL.size()); 1150 std::iota(SortedIndices.begin(), SortedIndices.end(), 0); 1151 1152 // Sort the memory accesses and keep the order of their uses in UseOrder. 1153 llvm::stable_sort(SortedIndices, [&](unsigned Left, unsigned Right) { 1154 return OffValPairs[Left].first < OffValPairs[Right].first; 1155 }); 1156 1157 // Check if the order is consecutive already. 1158 if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) { 1159 return I == SortedIndices[I]; 1160 })) 1161 SortedIndices.clear(); 1162 1163 return true; 1164 } 1165 1166 /// Take the address space operand from the Load/Store instruction. 1167 /// Returns -1 if this is not a valid Load/Store instruction. 1168 static unsigned getAddressSpaceOperand(Value *I) { 1169 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1170 return L->getPointerAddressSpace(); 1171 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1172 return S->getPointerAddressSpace(); 1173 return -1; 1174 } 1175 1176 /// Returns true if the memory operations \p A and \p B are consecutive. 1177 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1178 ScalarEvolution &SE, bool CheckType) { 1179 Value *PtrA = getLoadStorePointerOperand(A); 1180 Value *PtrB = getLoadStorePointerOperand(B); 1181 unsigned ASA = getAddressSpaceOperand(A); 1182 unsigned ASB = getAddressSpaceOperand(B); 1183 1184 // Check that the address spaces match and that the pointers are valid. 1185 if (!PtrA || !PtrB || (ASA != ASB)) 1186 return false; 1187 1188 // Make sure that A and B are different pointers. 1189 if (PtrA == PtrB) 1190 return false; 1191 1192 // Make sure that A and B have the same type if required. 1193 if (CheckType && PtrA->getType() != PtrB->getType()) 1194 return false; 1195 1196 unsigned IdxWidth = DL.getIndexSizeInBits(ASA); 1197 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1198 1199 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); 1200 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1201 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1202 1203 // Retrieve the address space again as pointer stripping now tracks through 1204 // `addrspacecast`. 1205 ASA = cast<PointerType>(PtrA->getType())->getAddressSpace(); 1206 ASB = cast<PointerType>(PtrB->getType())->getAddressSpace(); 1207 // Check that the address spaces match and that the pointers are valid. 1208 if (ASA != ASB) 1209 return false; 1210 1211 IdxWidth = DL.getIndexSizeInBits(ASA); 1212 OffsetA = OffsetA.sextOrTrunc(IdxWidth); 1213 OffsetB = OffsetB.sextOrTrunc(IdxWidth); 1214 1215 APInt Size(IdxWidth, DL.getTypeStoreSize(Ty)); 1216 1217 // OffsetDelta = OffsetB - OffsetA; 1218 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1219 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1220 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1221 const APInt &OffsetDelta = cast<SCEVConstant>(OffsetDeltaSCEV)->getAPInt(); 1222 1223 // Check if they are based on the same pointer. That makes the offsets 1224 // sufficient. 1225 if (PtrA == PtrB) 1226 return OffsetDelta == Size; 1227 1228 // Compute the necessary base pointer delta to have the necessary final delta 1229 // equal to the size. 1230 // BaseDelta = Size - OffsetDelta; 1231 const SCEV *SizeSCEV = SE.getConstant(Size); 1232 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1233 1234 // Otherwise compute the distance with SCEV between the base pointers. 1235 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1236 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1237 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1238 return X == PtrSCEVB; 1239 } 1240 1241 MemoryDepChecker::VectorizationSafetyStatus 1242 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1243 switch (Type) { 1244 case NoDep: 1245 case Forward: 1246 case BackwardVectorizable: 1247 return VectorizationSafetyStatus::Safe; 1248 1249 case Unknown: 1250 return VectorizationSafetyStatus::PossiblySafeWithRtChecks; 1251 case ForwardButPreventsForwarding: 1252 case Backward: 1253 case BackwardVectorizableButPreventsForwarding: 1254 return VectorizationSafetyStatus::Unsafe; 1255 } 1256 llvm_unreachable("unexpected DepType!"); 1257 } 1258 1259 bool MemoryDepChecker::Dependence::isBackward() const { 1260 switch (Type) { 1261 case NoDep: 1262 case Forward: 1263 case ForwardButPreventsForwarding: 1264 case Unknown: 1265 return false; 1266 1267 case BackwardVectorizable: 1268 case Backward: 1269 case BackwardVectorizableButPreventsForwarding: 1270 return true; 1271 } 1272 llvm_unreachable("unexpected DepType!"); 1273 } 1274 1275 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1276 return isBackward() || Type == Unknown; 1277 } 1278 1279 bool MemoryDepChecker::Dependence::isForward() const { 1280 switch (Type) { 1281 case Forward: 1282 case ForwardButPreventsForwarding: 1283 return true; 1284 1285 case NoDep: 1286 case Unknown: 1287 case BackwardVectorizable: 1288 case Backward: 1289 case BackwardVectorizableButPreventsForwarding: 1290 return false; 1291 } 1292 llvm_unreachable("unexpected DepType!"); 1293 } 1294 1295 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1296 uint64_t TypeByteSize) { 1297 // If loads occur at a distance that is not a multiple of a feasible vector 1298 // factor store-load forwarding does not take place. 1299 // Positive dependences might cause troubles because vectorizing them might 1300 // prevent store-load forwarding making vectorized code run a lot slower. 1301 // a[i] = a[i-3] ^ a[i-8]; 1302 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1303 // hence on your typical architecture store-load forwarding does not take 1304 // place. Vectorizing in such cases does not make sense. 1305 // Store-load forwarding distance. 1306 1307 // After this many iterations store-to-load forwarding conflicts should not 1308 // cause any slowdowns. 1309 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1310 // Maximum vector factor. 1311 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1312 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1313 1314 // Compute the smallest VF at which the store and load would be misaligned. 1315 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1316 VF *= 2) { 1317 // If the number of vector iteration between the store and the load are 1318 // small we could incur conflicts. 1319 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1320 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1321 break; 1322 } 1323 } 1324 1325 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1326 LLVM_DEBUG( 1327 dbgs() << "LAA: Distance " << Distance 1328 << " that could cause a store-load forwarding conflict\n"); 1329 return true; 1330 } 1331 1332 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1333 MaxVFWithoutSLForwardIssues != 1334 VectorizerParams::MaxVectorWidth * TypeByteSize) 1335 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1336 return false; 1337 } 1338 1339 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) { 1340 if (Status < S) 1341 Status = S; 1342 } 1343 1344 /// Given a non-constant (unknown) dependence-distance \p Dist between two 1345 /// memory accesses, that have the same stride whose absolute value is given 1346 /// in \p Stride, and that have the same type size \p TypeByteSize, 1347 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is 1348 /// possible to prove statically that the dependence distance is larger 1349 /// than the range that the accesses will travel through the execution of 1350 /// the loop. If so, return true; false otherwise. This is useful for 1351 /// example in loops such as the following (PR31098): 1352 /// for (i = 0; i < D; ++i) { 1353 /// = out[i]; 1354 /// out[i+D] = 1355 /// } 1356 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, 1357 const SCEV &BackedgeTakenCount, 1358 const SCEV &Dist, uint64_t Stride, 1359 uint64_t TypeByteSize) { 1360 1361 // If we can prove that 1362 // (**) |Dist| > BackedgeTakenCount * Step 1363 // where Step is the absolute stride of the memory accesses in bytes, 1364 // then there is no dependence. 1365 // 1366 // Rationale: 1367 // We basically want to check if the absolute distance (|Dist/Step|) 1368 // is >= the loop iteration count (or > BackedgeTakenCount). 1369 // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 1370 // Section 4.2.1); Note, that for vectorization it is sufficient to prove 1371 // that the dependence distance is >= VF; This is checked elsewhere. 1372 // But in some cases we can prune unknown dependence distances early, and 1373 // even before selecting the VF, and without a runtime test, by comparing 1374 // the distance against the loop iteration count. Since the vectorized code 1375 // will be executed only if LoopCount >= VF, proving distance >= LoopCount 1376 // also guarantees that distance >= VF. 1377 // 1378 const uint64_t ByteStride = Stride * TypeByteSize; 1379 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); 1380 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); 1381 1382 const SCEV *CastedDist = &Dist; 1383 const SCEV *CastedProduct = Product; 1384 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); 1385 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); 1386 1387 // The dependence distance can be positive/negative, so we sign extend Dist; 1388 // The multiplication of the absolute stride in bytes and the 1389 // backedgeTakenCount is non-negative, so we zero extend Product. 1390 if (DistTypeSize > ProductTypeSize) 1391 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); 1392 else 1393 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); 1394 1395 // Is Dist - (BackedgeTakenCount * Step) > 0 ? 1396 // (If so, then we have proven (**) because |Dist| >= Dist) 1397 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); 1398 if (SE.isKnownPositive(Minus)) 1399 return true; 1400 1401 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ? 1402 // (If so, then we have proven (**) because |Dist| >= -1*Dist) 1403 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); 1404 Minus = SE.getMinusSCEV(NegDist, CastedProduct); 1405 if (SE.isKnownPositive(Minus)) 1406 return true; 1407 1408 return false; 1409 } 1410 1411 /// Check the dependence for two accesses with the same stride \p Stride. 1412 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1413 /// bytes. 1414 /// 1415 /// \returns true if they are independent. 1416 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1417 uint64_t TypeByteSize) { 1418 assert(Stride > 1 && "The stride must be greater than 1"); 1419 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1420 assert(Distance > 0 && "The distance must be non-zero"); 1421 1422 // Skip if the distance is not multiple of type byte size. 1423 if (Distance % TypeByteSize) 1424 return false; 1425 1426 uint64_t ScaledDist = Distance / TypeByteSize; 1427 1428 // No dependence if the scaled distance is not multiple of the stride. 1429 // E.g. 1430 // for (i = 0; i < 1024 ; i += 4) 1431 // A[i+2] = A[i] + 1; 1432 // 1433 // Two accesses in memory (scaled distance is 2, stride is 4): 1434 // | A[0] | | | | A[4] | | | | 1435 // | | | A[2] | | | | A[6] | | 1436 // 1437 // E.g. 1438 // for (i = 0; i < 1024 ; i += 3) 1439 // A[i+4] = A[i] + 1; 1440 // 1441 // Two accesses in memory (scaled distance is 4, stride is 3): 1442 // | A[0] | | | A[3] | | | A[6] | | | 1443 // | | | | | A[4] | | | A[7] | | 1444 return ScaledDist % Stride; 1445 } 1446 1447 MemoryDepChecker::Dependence::DepType 1448 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1449 const MemAccessInfo &B, unsigned BIdx, 1450 const ValueToValueMap &Strides) { 1451 assert (AIdx < BIdx && "Must pass arguments in program order"); 1452 1453 Value *APtr = A.getPointer(); 1454 Value *BPtr = B.getPointer(); 1455 bool AIsWrite = A.getInt(); 1456 bool BIsWrite = B.getInt(); 1457 1458 // Two reads are independent. 1459 if (!AIsWrite && !BIsWrite) 1460 return Dependence::NoDep; 1461 1462 // We cannot check pointers in different address spaces. 1463 if (APtr->getType()->getPointerAddressSpace() != 1464 BPtr->getType()->getPointerAddressSpace()) 1465 return Dependence::Unknown; 1466 1467 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1468 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1469 1470 const SCEV *Src = PSE.getSCEV(APtr); 1471 const SCEV *Sink = PSE.getSCEV(BPtr); 1472 1473 // If the induction step is negative we have to invert source and sink of the 1474 // dependence. 1475 if (StrideAPtr < 0) { 1476 std::swap(APtr, BPtr); 1477 std::swap(Src, Sink); 1478 std::swap(AIsWrite, BIsWrite); 1479 std::swap(AIdx, BIdx); 1480 std::swap(StrideAPtr, StrideBPtr); 1481 } 1482 1483 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1484 1485 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1486 << "(Induction step: " << StrideAPtr << ")\n"); 1487 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1488 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1489 1490 // Need accesses with constant stride. We don't want to vectorize 1491 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1492 // the address space. 1493 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1494 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1495 return Dependence::Unknown; 1496 } 1497 1498 Type *ATy = APtr->getType()->getPointerElementType(); 1499 Type *BTy = BPtr->getType()->getPointerElementType(); 1500 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1501 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1502 uint64_t Stride = std::abs(StrideAPtr); 1503 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1504 if (!C) { 1505 if (TypeByteSize == DL.getTypeAllocSize(BTy) && 1506 isSafeDependenceDistance(DL, *(PSE.getSE()), 1507 *(PSE.getBackedgeTakenCount()), *Dist, Stride, 1508 TypeByteSize)) 1509 return Dependence::NoDep; 1510 1511 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1512 FoundNonConstantDistanceDependence = true; 1513 return Dependence::Unknown; 1514 } 1515 1516 const APInt &Val = C->getAPInt(); 1517 int64_t Distance = Val.getSExtValue(); 1518 1519 // Attempt to prove strided accesses independent. 1520 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1521 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1522 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1523 return Dependence::NoDep; 1524 } 1525 1526 // Negative distances are not plausible dependencies. 1527 if (Val.isNegative()) { 1528 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1529 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1530 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1531 ATy != BTy)) { 1532 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1533 return Dependence::ForwardButPreventsForwarding; 1534 } 1535 1536 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1537 return Dependence::Forward; 1538 } 1539 1540 // Write to the same location with the same size. 1541 // Could be improved to assert type sizes are the same (i32 == float, etc). 1542 if (Val == 0) { 1543 if (ATy == BTy) 1544 return Dependence::Forward; 1545 LLVM_DEBUG( 1546 dbgs() << "LAA: Zero dependence difference but different types\n"); 1547 return Dependence::Unknown; 1548 } 1549 1550 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1551 1552 if (ATy != BTy) { 1553 LLVM_DEBUG( 1554 dbgs() 1555 << "LAA: ReadWrite-Write positive dependency with different types\n"); 1556 return Dependence::Unknown; 1557 } 1558 1559 // Bail out early if passed-in parameters make vectorization not feasible. 1560 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1561 VectorizerParams::VectorizationFactor : 1); 1562 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1563 VectorizerParams::VectorizationInterleave : 1); 1564 // The minimum number of iterations for a vectorized/unrolled version. 1565 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1566 1567 // It's not vectorizable if the distance is smaller than the minimum distance 1568 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1569 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1570 // TypeByteSize (No need to plus the last gap distance). 1571 // 1572 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1573 // foo(int *A) { 1574 // int *B = (int *)((char *)A + 14); 1575 // for (i = 0 ; i < 1024 ; i += 2) 1576 // B[i] = A[i] + 1; 1577 // } 1578 // 1579 // Two accesses in memory (stride is 2): 1580 // | A[0] | | A[2] | | A[4] | | A[6] | | 1581 // | B[0] | | B[2] | | B[4] | 1582 // 1583 // Distance needs for vectorizing iterations except the last iteration: 1584 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1585 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1586 // 1587 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1588 // 12, which is less than distance. 1589 // 1590 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1591 // the minimum distance needed is 28, which is greater than distance. It is 1592 // not safe to do vectorization. 1593 uint64_t MinDistanceNeeded = 1594 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1595 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1596 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance " 1597 << Distance << '\n'); 1598 return Dependence::Backward; 1599 } 1600 1601 // Unsafe if the minimum distance needed is greater than max safe distance. 1602 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1603 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least " 1604 << MinDistanceNeeded << " size in bytes"); 1605 return Dependence::Backward; 1606 } 1607 1608 // Positive distance bigger than max vectorization factor. 1609 // FIXME: Should use max factor instead of max distance in bytes, which could 1610 // not handle different types. 1611 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1612 // void foo (int *A, char *B) { 1613 // for (unsigned i = 0; i < 1024; i++) { 1614 // A[i+2] = A[i] + 1; 1615 // B[i+2] = B[i] + 1; 1616 // } 1617 // } 1618 // 1619 // This case is currently unsafe according to the max safe distance. If we 1620 // analyze the two accesses on array B, the max safe dependence distance 1621 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1622 // is 8, which is less than 2 and forbidden vectorization, But actually 1623 // both A and B could be vectorized by 2 iterations. 1624 MaxSafeDepDistBytes = 1625 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1626 1627 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1628 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1629 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1630 return Dependence::BackwardVectorizableButPreventsForwarding; 1631 1632 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride); 1633 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1634 << " with max VF = " << MaxVF << '\n'); 1635 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; 1636 MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits); 1637 return Dependence::BackwardVectorizable; 1638 } 1639 1640 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1641 MemAccessInfoList &CheckDeps, 1642 const ValueToValueMap &Strides) { 1643 1644 MaxSafeDepDistBytes = -1; 1645 SmallPtrSet<MemAccessInfo, 8> Visited; 1646 for (MemAccessInfo CurAccess : CheckDeps) { 1647 if (Visited.count(CurAccess)) 1648 continue; 1649 1650 // Get the relevant memory access set. 1651 EquivalenceClasses<MemAccessInfo>::iterator I = 1652 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1653 1654 // Check accesses within this set. 1655 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1656 AccessSets.member_begin(I); 1657 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1658 AccessSets.member_end(); 1659 1660 // Check every access pair. 1661 while (AI != AE) { 1662 Visited.insert(*AI); 1663 bool AIIsWrite = AI->getInt(); 1664 // Check loads only against next equivalent class, but stores also against 1665 // other stores in the same equivalence class - to the same address. 1666 EquivalenceClasses<MemAccessInfo>::member_iterator OI = 1667 (AIIsWrite ? AI : std::next(AI)); 1668 while (OI != AE) { 1669 // Check every accessing instruction pair in program order. 1670 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1671 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1672 // Scan all accesses of another equivalence class, but only the next 1673 // accesses of the same equivalent class. 1674 for (std::vector<unsigned>::iterator 1675 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()), 1676 I2E = (OI == AI ? I1E : Accesses[*OI].end()); 1677 I2 != I2E; ++I2) { 1678 auto A = std::make_pair(&*AI, *I1); 1679 auto B = std::make_pair(&*OI, *I2); 1680 1681 assert(*I1 != *I2); 1682 if (*I1 > *I2) 1683 std::swap(A, B); 1684 1685 Dependence::DepType Type = 1686 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1687 mergeInStatus(Dependence::isSafeForVectorization(Type)); 1688 1689 // Gather dependences unless we accumulated MaxDependences 1690 // dependences. In that case return as soon as we find the first 1691 // unsafe dependence. This puts a limit on this quadratic 1692 // algorithm. 1693 if (RecordDependences) { 1694 if (Type != Dependence::NoDep) 1695 Dependences.push_back(Dependence(A.second, B.second, Type)); 1696 1697 if (Dependences.size() >= MaxDependences) { 1698 RecordDependences = false; 1699 Dependences.clear(); 1700 LLVM_DEBUG(dbgs() 1701 << "Too many dependences, stopped recording\n"); 1702 } 1703 } 1704 if (!RecordDependences && !isSafeForVectorization()) 1705 return false; 1706 } 1707 ++OI; 1708 } 1709 AI++; 1710 } 1711 } 1712 1713 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1714 return isSafeForVectorization(); 1715 } 1716 1717 SmallVector<Instruction *, 4> 1718 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1719 MemAccessInfo Access(Ptr, isWrite); 1720 auto &IndexVector = Accesses.find(Access)->second; 1721 1722 SmallVector<Instruction *, 4> Insts; 1723 transform(IndexVector, 1724 std::back_inserter(Insts), 1725 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1726 return Insts; 1727 } 1728 1729 const char *MemoryDepChecker::Dependence::DepName[] = { 1730 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1731 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1732 1733 void MemoryDepChecker::Dependence::print( 1734 raw_ostream &OS, unsigned Depth, 1735 const SmallVectorImpl<Instruction *> &Instrs) const { 1736 OS.indent(Depth) << DepName[Type] << ":\n"; 1737 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1738 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1739 } 1740 1741 bool LoopAccessInfo::canAnalyzeLoop() { 1742 // We need to have a loop header. 1743 LLVM_DEBUG(dbgs() << "LAA: Found a loop in " 1744 << TheLoop->getHeader()->getParent()->getName() << ": " 1745 << TheLoop->getHeader()->getName() << '\n'); 1746 1747 // We can only analyze innermost loops. 1748 if (!TheLoop->empty()) { 1749 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1750 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1751 return false; 1752 } 1753 1754 // We must have a single backedge. 1755 if (TheLoop->getNumBackEdges() != 1) { 1756 LLVM_DEBUG( 1757 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1758 recordAnalysis("CFGNotUnderstood") 1759 << "loop control flow is not understood by analyzer"; 1760 return false; 1761 } 1762 1763 // We must have a single exiting block. 1764 if (!TheLoop->getExitingBlock()) { 1765 LLVM_DEBUG( 1766 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1767 recordAnalysis("CFGNotUnderstood") 1768 << "loop control flow is not understood by analyzer"; 1769 return false; 1770 } 1771 1772 // We only handle bottom-tested loops, i.e. loop in which the condition is 1773 // checked at the end of each iteration. With that we can assume that all 1774 // instructions in the loop are executed the same number of times. 1775 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1776 LLVM_DEBUG( 1777 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1778 recordAnalysis("CFGNotUnderstood") 1779 << "loop control flow is not understood by analyzer"; 1780 return false; 1781 } 1782 1783 // ScalarEvolution needs to be able to find the exit count. 1784 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1785 if (ExitCount == PSE->getSE()->getCouldNotCompute()) { 1786 recordAnalysis("CantComputeNumberOfIterations") 1787 << "could not determine number of loop iterations"; 1788 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1789 return false; 1790 } 1791 1792 return true; 1793 } 1794 1795 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI, 1796 const TargetLibraryInfo *TLI, 1797 DominatorTree *DT) { 1798 typedef SmallPtrSet<Value*, 16> ValueSet; 1799 1800 // Holds the Load and Store instructions. 1801 SmallVector<LoadInst *, 16> Loads; 1802 SmallVector<StoreInst *, 16> Stores; 1803 1804 // Holds all the different accesses in the loop. 1805 unsigned NumReads = 0; 1806 unsigned NumReadWrites = 0; 1807 1808 bool HasComplexMemInst = false; 1809 1810 // A runtime check is only legal to insert if there are no convergent calls. 1811 HasConvergentOp = false; 1812 1813 PtrRtChecking->Pointers.clear(); 1814 PtrRtChecking->Need = false; 1815 1816 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1817 1818 // For each block. 1819 for (BasicBlock *BB : TheLoop->blocks()) { 1820 // Scan the BB and collect legal loads and stores. Also detect any 1821 // convergent instructions. 1822 for (Instruction &I : *BB) { 1823 if (auto *Call = dyn_cast<CallBase>(&I)) { 1824 if (Call->isConvergent()) 1825 HasConvergentOp = true; 1826 } 1827 1828 // With both a non-vectorizable memory instruction and a convergent 1829 // operation, found in this loop, no reason to continue the search. 1830 if (HasComplexMemInst && HasConvergentOp) { 1831 CanVecMem = false; 1832 return; 1833 } 1834 1835 // Avoid hitting recordAnalysis multiple times. 1836 if (HasComplexMemInst) 1837 continue; 1838 1839 // If this is a load, save it. If this instruction can read from memory 1840 // but is not a load, then we quit. Notice that we don't handle function 1841 // calls that read or write. 1842 if (I.mayReadFromMemory()) { 1843 // Many math library functions read the rounding mode. We will only 1844 // vectorize a loop if it contains known function calls that don't set 1845 // the flag. Therefore, it is safe to ignore this read from memory. 1846 auto *Call = dyn_cast<CallInst>(&I); 1847 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1848 continue; 1849 1850 // If the function has an explicit vectorized counterpart, we can safely 1851 // assume that it can be vectorized. 1852 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1853 !VFDatabase::getMappings(*Call).empty()) 1854 continue; 1855 1856 auto *Ld = dyn_cast<LoadInst>(&I); 1857 if (!Ld) { 1858 recordAnalysis("CantVectorizeInstruction", Ld) 1859 << "instruction cannot be vectorized"; 1860 HasComplexMemInst = true; 1861 continue; 1862 } 1863 if (!Ld->isSimple() && !IsAnnotatedParallel) { 1864 recordAnalysis("NonSimpleLoad", Ld) 1865 << "read with atomic ordering or volatile read"; 1866 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1867 HasComplexMemInst = true; 1868 continue; 1869 } 1870 NumLoads++; 1871 Loads.push_back(Ld); 1872 DepChecker->addAccess(Ld); 1873 if (EnableMemAccessVersioning) 1874 collectStridedAccess(Ld); 1875 continue; 1876 } 1877 1878 // Save 'store' instructions. Abort if other instructions write to memory. 1879 if (I.mayWriteToMemory()) { 1880 auto *St = dyn_cast<StoreInst>(&I); 1881 if (!St) { 1882 recordAnalysis("CantVectorizeInstruction", St) 1883 << "instruction cannot be vectorized"; 1884 HasComplexMemInst = true; 1885 continue; 1886 } 1887 if (!St->isSimple() && !IsAnnotatedParallel) { 1888 recordAnalysis("NonSimpleStore", St) 1889 << "write with atomic ordering or volatile write"; 1890 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1891 HasComplexMemInst = true; 1892 continue; 1893 } 1894 NumStores++; 1895 Stores.push_back(St); 1896 DepChecker->addAccess(St); 1897 if (EnableMemAccessVersioning) 1898 collectStridedAccess(St); 1899 } 1900 } // Next instr. 1901 } // Next block. 1902 1903 if (HasComplexMemInst) { 1904 CanVecMem = false; 1905 return; 1906 } 1907 1908 // Now we have two lists that hold the loads and the stores. 1909 // Next, we find the pointers that they use. 1910 1911 // Check if we see any stores. If there are no stores, then we don't 1912 // care if the pointers are *restrict*. 1913 if (!Stores.size()) { 1914 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1915 CanVecMem = true; 1916 return; 1917 } 1918 1919 MemoryDepChecker::DepCandidates DependentAccesses; 1920 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1921 TheLoop, AA, LI, DependentAccesses, *PSE); 1922 1923 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1924 // multiple times on the same object. If the ptr is accessed twice, once 1925 // for read and once for write, it will only appear once (on the write 1926 // list). This is okay, since we are going to check for conflicts between 1927 // writes and between reads and writes, but not between reads and reads. 1928 ValueSet Seen; 1929 1930 // Record uniform store addresses to identify if we have multiple stores 1931 // to the same address. 1932 ValueSet UniformStores; 1933 1934 for (StoreInst *ST : Stores) { 1935 Value *Ptr = ST->getPointerOperand(); 1936 1937 if (isUniform(Ptr)) 1938 HasDependenceInvolvingLoopInvariantAddress |= 1939 !UniformStores.insert(Ptr).second; 1940 1941 // If we did *not* see this pointer before, insert it to the read-write 1942 // list. At this phase it is only a 'write' list. 1943 if (Seen.insert(Ptr).second) { 1944 ++NumReadWrites; 1945 1946 MemoryLocation Loc = MemoryLocation::get(ST); 1947 // The TBAA metadata could have a control dependency on the predication 1948 // condition, so we cannot rely on it when determining whether or not we 1949 // need runtime pointer checks. 1950 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1951 Loc.AATags.TBAA = nullptr; 1952 1953 Accesses.addStore(Loc); 1954 } 1955 } 1956 1957 if (IsAnnotatedParallel) { 1958 LLVM_DEBUG( 1959 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency " 1960 << "checks.\n"); 1961 CanVecMem = true; 1962 return; 1963 } 1964 1965 for (LoadInst *LD : Loads) { 1966 Value *Ptr = LD->getPointerOperand(); 1967 // If we did *not* see this pointer before, insert it to the 1968 // read list. If we *did* see it before, then it is already in 1969 // the read-write list. This allows us to vectorize expressions 1970 // such as A[i] += x; Because the address of A[i] is a read-write 1971 // pointer. This only works if the index of A[i] is consecutive. 1972 // If the address of i is unknown (for example A[B[i]]) then we may 1973 // read a few words, modify, and write a few words, and some of the 1974 // words may be written to the same address. 1975 bool IsReadOnlyPtr = false; 1976 if (Seen.insert(Ptr).second || 1977 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) { 1978 ++NumReads; 1979 IsReadOnlyPtr = true; 1980 } 1981 1982 // See if there is an unsafe dependency between a load to a uniform address and 1983 // store to the same uniform address. 1984 if (UniformStores.count(Ptr)) { 1985 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform " 1986 "load and uniform store to the same address!\n"); 1987 HasDependenceInvolvingLoopInvariantAddress = true; 1988 } 1989 1990 MemoryLocation Loc = MemoryLocation::get(LD); 1991 // The TBAA metadata could have a control dependency on the predication 1992 // condition, so we cannot rely on it when determining whether or not we 1993 // need runtime pointer checks. 1994 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1995 Loc.AATags.TBAA = nullptr; 1996 1997 Accesses.addLoad(Loc, IsReadOnlyPtr); 1998 } 1999 2000 // If we write (or read-write) to a single destination and there are no 2001 // other reads in this loop then is it safe to vectorize. 2002 if (NumReadWrites == 1 && NumReads == 0) { 2003 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 2004 CanVecMem = true; 2005 return; 2006 } 2007 2008 // Build dependence sets and check whether we need a runtime pointer bounds 2009 // check. 2010 Accesses.buildDependenceSets(); 2011 2012 // Find pointers with computable bounds. We are going to use this information 2013 // to place a runtime bound check. 2014 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 2015 TheLoop, SymbolicStrides); 2016 if (!CanDoRTIfNeeded) { 2017 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds"; 2018 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 2019 << "the array bounds.\n"); 2020 CanVecMem = false; 2021 return; 2022 } 2023 2024 LLVM_DEBUG( 2025 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n"); 2026 2027 CanVecMem = true; 2028 if (Accesses.isDependencyCheckNeeded()) { 2029 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 2030 CanVecMem = DepChecker->areDepsSafe( 2031 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 2032 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 2033 2034 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 2035 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 2036 2037 // Clear the dependency checks. We assume they are not needed. 2038 Accesses.resetDepChecks(*DepChecker); 2039 2040 PtrRtChecking->reset(); 2041 PtrRtChecking->Need = true; 2042 2043 auto *SE = PSE->getSE(); 2044 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 2045 SymbolicStrides, true); 2046 2047 // Check that we found the bounds for the pointer. 2048 if (!CanDoRTIfNeeded) { 2049 recordAnalysis("CantCheckMemDepsAtRunTime") 2050 << "cannot check memory dependencies at runtime"; 2051 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 2052 CanVecMem = false; 2053 return; 2054 } 2055 2056 CanVecMem = true; 2057 } 2058 } 2059 2060 if (HasConvergentOp) { 2061 recordAnalysis("CantInsertRuntimeCheckWithConvergent") 2062 << "cannot add control dependency to convergent operation"; 2063 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check " 2064 "would be needed with a convergent operation\n"); 2065 CanVecMem = false; 2066 return; 2067 } 2068 2069 if (CanVecMem) 2070 LLVM_DEBUG( 2071 dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 2072 << (PtrRtChecking->Need ? "" : " don't") 2073 << " need runtime memory checks.\n"); 2074 else { 2075 recordAnalysis("UnsafeMemDep") 2076 << "unsafe dependent memory operations in loop. Use " 2077 "#pragma loop distribute(enable) to allow loop distribution " 2078 "to attempt to isolate the offending operations into a separate " 2079 "loop"; 2080 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 2081 } 2082 } 2083 2084 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 2085 DominatorTree *DT) { 2086 assert(TheLoop->contains(BB) && "Unknown block used"); 2087 2088 // Blocks that do not dominate the latch need predication. 2089 BasicBlock* Latch = TheLoop->getLoopLatch(); 2090 return !DT->dominates(BB, Latch); 2091 } 2092 2093 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 2094 Instruction *I) { 2095 assert(!Report && "Multiple reports generated"); 2096 2097 Value *CodeRegion = TheLoop->getHeader(); 2098 DebugLoc DL = TheLoop->getStartLoc(); 2099 2100 if (I) { 2101 CodeRegion = I->getParent(); 2102 // If there is no debug location attached to the instruction, revert back to 2103 // using the loop's. 2104 if (I->getDebugLoc()) 2105 DL = I->getDebugLoc(); 2106 } 2107 2108 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 2109 CodeRegion); 2110 return *Report; 2111 } 2112 2113 bool LoopAccessInfo::isUniform(Value *V) const { 2114 auto *SE = PSE->getSE(); 2115 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 2116 // never considered uniform. 2117 // TODO: Is this really what we want? Even without FP SCEV, we may want some 2118 // trivially loop-invariant FP values to be considered uniform. 2119 if (!SE->isSCEVable(V->getType())) 2120 return false; 2121 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 2122 } 2123 2124 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2125 Value *Ptr = nullptr; 2126 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 2127 Ptr = LI->getPointerOperand(); 2128 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 2129 Ptr = SI->getPointerOperand(); 2130 else 2131 return; 2132 2133 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2134 if (!Stride) 2135 return; 2136 2137 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for " 2138 "versioning:"); 2139 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2140 2141 // Avoid adding the "Stride == 1" predicate when we know that 2142 // Stride >= Trip-Count. Such a predicate will effectively optimize a single 2143 // or zero iteration loop, as Trip-Count <= Stride == 1. 2144 // 2145 // TODO: We are currently not making a very informed decision on when it is 2146 // beneficial to apply stride versioning. It might make more sense that the 2147 // users of this analysis (such as the vectorizer) will trigger it, based on 2148 // their specific cost considerations; For example, in cases where stride 2149 // versioning does not help resolving memory accesses/dependences, the 2150 // vectorizer should evaluate the cost of the runtime test, and the benefit 2151 // of various possible stride specializations, considering the alternatives 2152 // of using gather/scatters (if available). 2153 2154 const SCEV *StrideExpr = PSE->getSCEV(Stride); 2155 const SCEV *BETakenCount = PSE->getBackedgeTakenCount(); 2156 2157 // Match the types so we can compare the stride and the BETakenCount. 2158 // The Stride can be positive/negative, so we sign extend Stride; 2159 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount. 2160 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2161 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType()); 2162 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType()); 2163 const SCEV *CastedStride = StrideExpr; 2164 const SCEV *CastedBECount = BETakenCount; 2165 ScalarEvolution *SE = PSE->getSE(); 2166 if (BETypeSize >= StrideTypeSize) 2167 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType()); 2168 else 2169 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType()); 2170 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount); 2171 // Since TripCount == BackEdgeTakenCount + 1, checking: 2172 // "Stride >= TripCount" is equivalent to checking: 2173 // Stride - BETakenCount > 0 2174 if (SE->isKnownPositive(StrideMinusBETaken)) { 2175 LLVM_DEBUG( 2176 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the " 2177 "Stride==1 predicate will imply that the loop executes " 2178 "at most once.\n"); 2179 return; 2180 } 2181 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version."); 2182 2183 SymbolicStrides[Ptr] = Stride; 2184 StrideSet.insert(Stride); 2185 } 2186 2187 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2188 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 2189 DominatorTree *DT, LoopInfo *LI) 2190 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2191 PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)), 2192 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L), 2193 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false), 2194 HasConvergentOp(false), 2195 HasDependenceInvolvingLoopInvariantAddress(false) { 2196 if (canAnalyzeLoop()) 2197 analyzeLoop(AA, LI, TLI, DT); 2198 } 2199 2200 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2201 if (CanVecMem) { 2202 OS.indent(Depth) << "Memory dependences are safe"; 2203 if (MaxSafeDepDistBytes != -1ULL) 2204 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2205 << " bytes"; 2206 if (PtrRtChecking->Need) 2207 OS << " with run-time checks"; 2208 OS << "\n"; 2209 } 2210 2211 if (HasConvergentOp) 2212 OS.indent(Depth) << "Has convergent operation in loop\n"; 2213 2214 if (Report) 2215 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2216 2217 if (auto *Dependences = DepChecker->getDependences()) { 2218 OS.indent(Depth) << "Dependences:\n"; 2219 for (auto &Dep : *Dependences) { 2220 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2221 OS << "\n"; 2222 } 2223 } else 2224 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2225 2226 // List the pair of accesses need run-time checks to prove independence. 2227 PtrRtChecking->print(OS, Depth); 2228 OS << "\n"; 2229 2230 OS.indent(Depth) << "Non vectorizable stores to invariant address were " 2231 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ") 2232 << "found in loop.\n"; 2233 2234 OS.indent(Depth) << "SCEV assumptions:\n"; 2235 PSE->getUnionPredicate().print(OS, Depth); 2236 2237 OS << "\n"; 2238 2239 OS.indent(Depth) << "Expressions re-written:\n"; 2240 PSE->print(OS, Depth); 2241 } 2242 2243 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) { 2244 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry()); 2245 } 2246 2247 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2248 auto &LAI = LoopAccessInfoMap[L]; 2249 2250 if (!LAI) 2251 LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2252 2253 return *LAI.get(); 2254 } 2255 2256 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2257 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2258 2259 for (Loop *TopLevelLoop : *LI) 2260 for (Loop *L : depth_first(TopLevelLoop)) { 2261 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2262 auto &LAI = LAA.getInfo(L); 2263 LAI.print(OS, 4); 2264 } 2265 } 2266 2267 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2268 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2269 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2270 TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2271 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2272 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2273 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2274 2275 return false; 2276 } 2277 2278 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2279 AU.addRequired<ScalarEvolutionWrapperPass>(); 2280 AU.addRequired<AAResultsWrapperPass>(); 2281 AU.addRequired<DominatorTreeWrapperPass>(); 2282 AU.addRequired<LoopInfoWrapperPass>(); 2283 2284 AU.setPreservesAll(); 2285 } 2286 2287 char LoopAccessLegacyAnalysis::ID = 0; 2288 static const char laa_name[] = "Loop Access Analysis"; 2289 #define LAA_NAME "loop-accesses" 2290 2291 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2292 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2293 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2294 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2295 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2296 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2297 2298 AnalysisKey LoopAccessAnalysis::Key; 2299 2300 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2301 LoopStandardAnalysisResults &AR) { 2302 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2303 } 2304 2305 namespace llvm { 2306 2307 Pass *createLAAPass() { 2308 return new LoopAccessLegacyAnalysis(); 2309 } 2310 2311 } // end namespace llvm 2312