1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The implementation for the loop memory dependence that was originally 10 // developed for the loop vectorizer. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/LoopAccessAnalysis.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/EquivalenceClasses.h" 19 #include "llvm/ADT/PointerIntPair.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AliasSetTracker.h" 28 #include "llvm/Analysis/LoopAnalysisManager.h" 29 #include "llvm/Analysis/LoopInfo.h" 30 #include "llvm/Analysis/MemoryLocation.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/ValueTracking.h" 36 #include "llvm/Analysis/VectorUtils.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugLoc.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/DiagnosticInfo.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/IR/ValueHandle.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <iterator> 64 #include <utility> 65 #include <vector> 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "loop-accesses" 70 71 static cl::opt<unsigned, true> 72 VectorizationFactor("force-vector-width", cl::Hidden, 73 cl::desc("Sets the SIMD width. Zero is autoselect."), 74 cl::location(VectorizerParams::VectorizationFactor)); 75 unsigned VectorizerParams::VectorizationFactor; 76 77 static cl::opt<unsigned, true> 78 VectorizationInterleave("force-vector-interleave", cl::Hidden, 79 cl::desc("Sets the vectorization interleave count. " 80 "Zero is autoselect."), 81 cl::location( 82 VectorizerParams::VectorizationInterleave)); 83 unsigned VectorizerParams::VectorizationInterleave; 84 85 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 86 "runtime-memory-check-threshold", cl::Hidden, 87 cl::desc("When performing memory disambiguation checks at runtime do not " 88 "generate more than this number of comparisons (default = 8)."), 89 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 90 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 91 92 /// The maximum iterations used to merge memory checks 93 static cl::opt<unsigned> MemoryCheckMergeThreshold( 94 "memory-check-merge-threshold", cl::Hidden, 95 cl::desc("Maximum number of comparisons done when trying to merge " 96 "runtime memory checks. (default = 100)"), 97 cl::init(100)); 98 99 /// Maximum SIMD width. 100 const unsigned VectorizerParams::MaxVectorWidth = 64; 101 102 /// We collect dependences up to this threshold. 103 static cl::opt<unsigned> 104 MaxDependences("max-dependences", cl::Hidden, 105 cl::desc("Maximum number of dependences collected by " 106 "loop-access analysis (default = 100)"), 107 cl::init(100)); 108 109 /// This enables versioning on the strides of symbolically striding memory 110 /// accesses in code like the following. 111 /// for (i = 0; i < N; ++i) 112 /// A[i * Stride1] += B[i * Stride2] ... 113 /// 114 /// Will be roughly translated to 115 /// if (Stride1 == 1 && Stride2 == 1) { 116 /// for (i = 0; i < N; i+=4) 117 /// A[i:i+3] += ... 118 /// } else 119 /// ... 120 static cl::opt<bool> EnableMemAccessVersioning( 121 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 122 cl::desc("Enable symbolic stride memory access versioning")); 123 124 /// Enable store-to-load forwarding conflict detection. This option can 125 /// be disabled for correctness testing. 126 static cl::opt<bool> EnableForwardingConflictDetection( 127 "store-to-load-forwarding-conflict-detection", cl::Hidden, 128 cl::desc("Enable conflict detection in loop-access analysis"), 129 cl::init(true)); 130 131 bool VectorizerParams::isInterleaveForced() { 132 return ::VectorizationInterleave.getNumOccurrences() > 0; 133 } 134 135 Value *llvm::stripIntegerCast(Value *V) { 136 if (auto *CI = dyn_cast<CastInst>(V)) 137 if (CI->getOperand(0)->getType()->isIntegerTy()) 138 return CI->getOperand(0); 139 return V; 140 } 141 142 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 143 const ValueToValueMap &PtrToStride, 144 Value *Ptr) { 145 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 146 147 // If there is an entry in the map return the SCEV of the pointer with the 148 // symbolic stride replaced by one. 149 ValueToValueMap::const_iterator SI = PtrToStride.find(Ptr); 150 if (SI == PtrToStride.end()) 151 // For a non-symbolic stride, just return the original expression. 152 return OrigSCEV; 153 154 Value *StrideVal = stripIntegerCast(SI->second); 155 156 ScalarEvolution *SE = PSE.getSE(); 157 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 158 const auto *CT = 159 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 160 161 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 162 auto *Expr = PSE.getSCEV(Ptr); 163 164 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV 165 << " by: " << *Expr << "\n"); 166 return Expr; 167 } 168 169 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup( 170 unsigned Index, RuntimePointerChecking &RtCheck) 171 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start), 172 AddressSpace(RtCheck.Pointers[Index] 173 .PointerValue->getType() 174 ->getPointerAddressSpace()) { 175 Members.push_back(Index); 176 } 177 178 /// Calculate Start and End points of memory access. 179 /// Let's assume A is the first access and B is a memory access on N-th loop 180 /// iteration. Then B is calculated as: 181 /// B = A + Step*N . 182 /// Step value may be positive or negative. 183 /// N is a calculated back-edge taken count: 184 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 185 /// Start and End points are calculated in the following way: 186 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 187 /// where SizeOfElt is the size of single memory access in bytes. 188 /// 189 /// There is no conflict when the intervals are disjoint: 190 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 191 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, Type *AccessTy, 192 bool WritePtr, unsigned DepSetId, 193 unsigned ASId, 194 const ValueToValueMap &Strides, 195 PredicatedScalarEvolution &PSE) { 196 // Get the stride replaced scev. 197 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 198 ScalarEvolution *SE = PSE.getSE(); 199 200 const SCEV *ScStart; 201 const SCEV *ScEnd; 202 203 if (SE->isLoopInvariant(Sc, Lp)) { 204 ScStart = ScEnd = Sc; 205 } else { 206 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 207 assert(AR && "Invalid addrec expression"); 208 const SCEV *Ex = PSE.getBackedgeTakenCount(); 209 210 ScStart = AR->getStart(); 211 ScEnd = AR->evaluateAtIteration(Ex, *SE); 212 const SCEV *Step = AR->getStepRecurrence(*SE); 213 214 // For expressions with negative step, the upper bound is ScStart and the 215 // lower bound is ScEnd. 216 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 217 if (CStep->getValue()->isNegative()) 218 std::swap(ScStart, ScEnd); 219 } else { 220 // Fallback case: the step is not constant, but we can still 221 // get the upper and lower bounds of the interval by using min/max 222 // expressions. 223 ScStart = SE->getUMinExpr(ScStart, ScEnd); 224 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 225 } 226 } 227 // Add the size of the pointed element to ScEnd. 228 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 229 Type *IdxTy = DL.getIndexType(Ptr->getType()); 230 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy); 231 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 232 233 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 234 } 235 236 SmallVector<RuntimePointerCheck, 4> 237 RuntimePointerChecking::generateChecks() const { 238 SmallVector<RuntimePointerCheck, 4> Checks; 239 240 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 241 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 242 const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I]; 243 const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J]; 244 245 if (needsChecking(CGI, CGJ)) 246 Checks.push_back(std::make_pair(&CGI, &CGJ)); 247 } 248 } 249 return Checks; 250 } 251 252 void RuntimePointerChecking::generateChecks( 253 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 254 assert(Checks.empty() && "Checks is not empty"); 255 groupChecks(DepCands, UseDependencies); 256 Checks = generateChecks(); 257 } 258 259 bool RuntimePointerChecking::needsChecking( 260 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const { 261 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 262 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 263 if (needsChecking(M.Members[I], N.Members[J])) 264 return true; 265 return false; 266 } 267 268 /// Compare \p I and \p J and return the minimum. 269 /// Return nullptr in case we couldn't find an answer. 270 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 271 ScalarEvolution *SE) { 272 const SCEV *Diff = SE->getMinusSCEV(J, I); 273 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 274 275 if (!C) 276 return nullptr; 277 if (C->getValue()->isNegative()) 278 return J; 279 return I; 280 } 281 282 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, 283 RuntimePointerChecking &RtCheck) { 284 return addPointer( 285 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End, 286 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(), 287 *RtCheck.SE); 288 } 289 290 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start, 291 const SCEV *End, unsigned AS, 292 ScalarEvolution &SE) { 293 assert(AddressSpace == AS && 294 "all pointers in a checking group must be in the same address space"); 295 296 // Compare the starts and ends with the known minimum and maximum 297 // of this set. We need to know how we compare against the min/max 298 // of the set in order to be able to emit memchecks. 299 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE); 300 if (!Min0) 301 return false; 302 303 const SCEV *Min1 = getMinFromExprs(End, High, &SE); 304 if (!Min1) 305 return false; 306 307 // Update the low bound expression if we've found a new min value. 308 if (Min0 == Start) 309 Low = Start; 310 311 // Update the high bound expression if we've found a new max value. 312 if (Min1 != End) 313 High = End; 314 315 Members.push_back(Index); 316 return true; 317 } 318 319 void RuntimePointerChecking::groupChecks( 320 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 321 // We build the groups from dependency candidates equivalence classes 322 // because: 323 // - We know that pointers in the same equivalence class share 324 // the same underlying object and therefore there is a chance 325 // that we can compare pointers 326 // - We wouldn't be able to merge two pointers for which we need 327 // to emit a memcheck. The classes in DepCands are already 328 // conveniently built such that no two pointers in the same 329 // class need checking against each other. 330 331 // We use the following (greedy) algorithm to construct the groups 332 // For every pointer in the equivalence class: 333 // For each existing group: 334 // - if the difference between this pointer and the min/max bounds 335 // of the group is a constant, then make the pointer part of the 336 // group and update the min/max bounds of that group as required. 337 338 CheckingGroups.clear(); 339 340 // If we need to check two pointers to the same underlying object 341 // with a non-constant difference, we shouldn't perform any pointer 342 // grouping with those pointers. This is because we can easily get 343 // into cases where the resulting check would return false, even when 344 // the accesses are safe. 345 // 346 // The following example shows this: 347 // for (i = 0; i < 1000; ++i) 348 // a[5000 + i * m] = a[i] + a[i + 9000] 349 // 350 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 351 // (0, 10000) which is always false. However, if m is 1, there is no 352 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 353 // us to perform an accurate check in this case. 354 // 355 // The above case requires that we have an UnknownDependence between 356 // accesses to the same underlying object. This cannot happen unless 357 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies 358 // is also false. In this case we will use the fallback path and create 359 // separate checking groups for all pointers. 360 361 // If we don't have the dependency partitions, construct a new 362 // checking pointer group for each pointer. This is also required 363 // for correctness, because in this case we can have checking between 364 // pointers to the same underlying object. 365 if (!UseDependencies) { 366 for (unsigned I = 0; I < Pointers.size(); ++I) 367 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this)); 368 return; 369 } 370 371 unsigned TotalComparisons = 0; 372 373 DenseMap<Value *, unsigned> PositionMap; 374 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 375 PositionMap[Pointers[Index].PointerValue] = Index; 376 377 // We need to keep track of what pointers we've already seen so we 378 // don't process them twice. 379 SmallSet<unsigned, 2> Seen; 380 381 // Go through all equivalence classes, get the "pointer check groups" 382 // and add them to the overall solution. We use the order in which accesses 383 // appear in 'Pointers' to enforce determinism. 384 for (unsigned I = 0; I < Pointers.size(); ++I) { 385 // We've seen this pointer before, and therefore already processed 386 // its equivalence class. 387 if (Seen.count(I)) 388 continue; 389 390 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 391 Pointers[I].IsWritePtr); 392 393 SmallVector<RuntimeCheckingPtrGroup, 2> Groups; 394 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 395 396 // Because DepCands is constructed by visiting accesses in the order in 397 // which they appear in alias sets (which is deterministic) and the 398 // iteration order within an equivalence class member is only dependent on 399 // the order in which unions and insertions are performed on the 400 // equivalence class, the iteration order is deterministic. 401 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 402 MI != ME; ++MI) { 403 auto PointerI = PositionMap.find(MI->getPointer()); 404 assert(PointerI != PositionMap.end() && 405 "pointer in equivalence class not found in PositionMap"); 406 unsigned Pointer = PointerI->second; 407 bool Merged = false; 408 // Mark this pointer as seen. 409 Seen.insert(Pointer); 410 411 // Go through all the existing sets and see if we can find one 412 // which can include this pointer. 413 for (RuntimeCheckingPtrGroup &Group : Groups) { 414 // Don't perform more than a certain amount of comparisons. 415 // This should limit the cost of grouping the pointers to something 416 // reasonable. If we do end up hitting this threshold, the algorithm 417 // will create separate groups for all remaining pointers. 418 if (TotalComparisons > MemoryCheckMergeThreshold) 419 break; 420 421 TotalComparisons++; 422 423 if (Group.addPointer(Pointer, *this)) { 424 Merged = true; 425 break; 426 } 427 } 428 429 if (!Merged) 430 // We couldn't add this pointer to any existing set or the threshold 431 // for the number of comparisons has been reached. Create a new group 432 // to hold the current pointer. 433 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this)); 434 } 435 436 // We've computed the grouped checks for this partition. 437 // Save the results and continue with the next one. 438 llvm::copy(Groups, std::back_inserter(CheckingGroups)); 439 } 440 } 441 442 bool RuntimePointerChecking::arePointersInSamePartition( 443 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 444 unsigned PtrIdx2) { 445 return (PtrToPartition[PtrIdx1] != -1 && 446 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 447 } 448 449 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 450 const PointerInfo &PointerI = Pointers[I]; 451 const PointerInfo &PointerJ = Pointers[J]; 452 453 // No need to check if two readonly pointers intersect. 454 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 455 return false; 456 457 // Only need to check pointers between two different dependency sets. 458 if (PointerI.DependencySetId == PointerJ.DependencySetId) 459 return false; 460 461 // Only need to check pointers in the same alias set. 462 if (PointerI.AliasSetId != PointerJ.AliasSetId) 463 return false; 464 465 return true; 466 } 467 468 void RuntimePointerChecking::printChecks( 469 raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks, 470 unsigned Depth) const { 471 unsigned N = 0; 472 for (const auto &Check : Checks) { 473 const auto &First = Check.first->Members, &Second = Check.second->Members; 474 475 OS.indent(Depth) << "Check " << N++ << ":\n"; 476 477 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 478 for (unsigned K = 0; K < First.size(); ++K) 479 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 480 481 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 482 for (unsigned K = 0; K < Second.size(); ++K) 483 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 484 } 485 } 486 487 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 488 489 OS.indent(Depth) << "Run-time memory checks:\n"; 490 printChecks(OS, Checks, Depth); 491 492 OS.indent(Depth) << "Grouped accesses:\n"; 493 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 494 const auto &CG = CheckingGroups[I]; 495 496 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 497 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 498 << ")\n"; 499 for (unsigned J = 0; J < CG.Members.size(); ++J) { 500 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 501 << "\n"; 502 } 503 } 504 } 505 506 namespace { 507 508 /// Analyses memory accesses in a loop. 509 /// 510 /// Checks whether run time pointer checks are needed and builds sets for data 511 /// dependence checking. 512 class AccessAnalysis { 513 public: 514 /// Read or write access location. 515 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 516 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList; 517 518 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI, 519 MemoryDepChecker::DepCandidates &DA, 520 PredicatedScalarEvolution &PSE) 521 : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), PSE(PSE) {} 522 523 /// Register a load and whether it is only read from. 524 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) { 525 Value *Ptr = const_cast<Value*>(Loc.Ptr); 526 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags); 527 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy); 528 if (IsReadOnly) 529 ReadOnlyPtr.insert(Ptr); 530 } 531 532 /// Register a store. 533 void addStore(MemoryLocation &Loc, Type *AccessTy) { 534 Value *Ptr = const_cast<Value*>(Loc.Ptr); 535 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags); 536 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy); 537 } 538 539 /// Check if we can emit a run-time no-alias check for \p Access. 540 /// 541 /// Returns true if we can emit a run-time no alias check for \p Access. 542 /// If we can check this access, this also adds it to a dependence set and 543 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true, 544 /// we will attempt to use additional run-time checks in order to get 545 /// the bounds of the pointer. 546 bool createCheckForAccess(RuntimePointerChecking &RtCheck, 547 MemAccessInfo Access, Type *AccessTy, 548 const ValueToValueMap &Strides, 549 DenseMap<Value *, unsigned> &DepSetId, 550 Loop *TheLoop, unsigned &RunningDepId, 551 unsigned ASId, bool ShouldCheckStride, bool Assume); 552 553 /// Check whether we can check the pointers at runtime for 554 /// non-intersection. 555 /// 556 /// Returns true if we need no check or if we do and we can generate them 557 /// (i.e. the pointers have computable bounds). 558 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 559 Loop *TheLoop, const ValueToValueMap &Strides, 560 Value *&UncomputablePtr, bool ShouldCheckWrap = false); 561 562 /// Goes over all memory accesses, checks whether a RT check is needed 563 /// and builds sets of dependent accesses. 564 void buildDependenceSets() { 565 processMemAccesses(); 566 } 567 568 /// Initial processing of memory accesses determined that we need to 569 /// perform dependency checking. 570 /// 571 /// Note that this can later be cleared if we retry memcheck analysis without 572 /// dependency checking (i.e. FoundNonConstantDistanceDependence). 573 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 574 575 /// We decided that no dependence analysis would be used. Reset the state. 576 void resetDepChecks(MemoryDepChecker &DepChecker) { 577 CheckDeps.clear(); 578 DepChecker.clearDependences(); 579 } 580 581 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; } 582 583 private: 584 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap; 585 586 /// Go over all memory access and check whether runtime pointer checks 587 /// are needed and build sets of dependency check candidates. 588 void processMemAccesses(); 589 590 /// Map of all accesses. Values are the types used to access memory pointed to 591 /// by the pointer. 592 PtrAccessMap Accesses; 593 594 /// The loop being checked. 595 const Loop *TheLoop; 596 597 /// List of accesses that need a further dependence check. 598 MemAccessInfoList CheckDeps; 599 600 /// Set of pointers that are read only. 601 SmallPtrSet<Value*, 16> ReadOnlyPtr; 602 603 /// An alias set tracker to partition the access set by underlying object and 604 //intrinsic property (such as TBAA metadata). 605 AliasSetTracker AST; 606 607 LoopInfo *LI; 608 609 /// Sets of potentially dependent accesses - members of one set share an 610 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 611 /// dependence check. 612 MemoryDepChecker::DepCandidates &DepCands; 613 614 /// Initial processing of memory accesses determined that we may need 615 /// to add memchecks. Perform the analysis to determine the necessary checks. 616 /// 617 /// Note that, this is different from isDependencyCheckNeeded. When we retry 618 /// memcheck analysis without dependency checking 619 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is 620 /// cleared while this remains set if we have potentially dependent accesses. 621 bool IsRTCheckAnalysisNeeded = false; 622 623 /// The SCEV predicate containing all the SCEV-related assumptions. 624 PredicatedScalarEvolution &PSE; 625 }; 626 627 } // end anonymous namespace 628 629 /// Check whether a pointer can participate in a runtime bounds check. 630 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr 631 /// by adding run-time checks (overflow checks) if necessary. 632 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 633 const ValueToValueMap &Strides, Value *Ptr, 634 Loop *L, bool Assume) { 635 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 636 637 // The bounds for loop-invariant pointer is trivial. 638 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 639 return true; 640 641 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 642 643 if (!AR && Assume) 644 AR = PSE.getAsAddRec(Ptr); 645 646 if (!AR) 647 return false; 648 649 return AR->isAffine(); 650 } 651 652 /// Check whether a pointer address cannot wrap. 653 static bool isNoWrap(PredicatedScalarEvolution &PSE, 654 const ValueToValueMap &Strides, Value *Ptr, Type *AccessTy, 655 Loop *L) { 656 const SCEV *PtrScev = PSE.getSCEV(Ptr); 657 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 658 return true; 659 660 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides); 661 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW)) 662 return true; 663 664 return false; 665 } 666 667 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, 668 function_ref<void(Value *)> AddPointer) { 669 SmallPtrSet<Value *, 8> Visited; 670 SmallVector<Value *> WorkList; 671 WorkList.push_back(StartPtr); 672 673 while (!WorkList.empty()) { 674 Value *Ptr = WorkList.pop_back_val(); 675 if (!Visited.insert(Ptr).second) 676 continue; 677 auto *PN = dyn_cast<PHINode>(Ptr); 678 // SCEV does not look through non-header PHIs inside the loop. Such phis 679 // can be analyzed by adding separate accesses for each incoming pointer 680 // value. 681 if (PN && InnermostLoop.contains(PN->getParent()) && 682 PN->getParent() != InnermostLoop.getHeader()) { 683 for (const Use &Inc : PN->incoming_values()) 684 WorkList.push_back(Inc); 685 } else 686 AddPointer(Ptr); 687 } 688 } 689 690 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck, 691 MemAccessInfo Access, Type *AccessTy, 692 const ValueToValueMap &StridesMap, 693 DenseMap<Value *, unsigned> &DepSetId, 694 Loop *TheLoop, unsigned &RunningDepId, 695 unsigned ASId, bool ShouldCheckWrap, 696 bool Assume) { 697 Value *Ptr = Access.getPointer(); 698 699 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume)) 700 return false; 701 702 // When we run after a failing dependency check we have to make sure 703 // we don't have wrapping pointers. 704 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) { 705 auto *Expr = PSE.getSCEV(Ptr); 706 if (!Assume || !isa<SCEVAddRecExpr>(Expr)) 707 return false; 708 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 709 } 710 711 // The id of the dependence set. 712 unsigned DepId; 713 714 if (isDependencyCheckNeeded()) { 715 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 716 unsigned &LeaderId = DepSetId[Leader]; 717 if (!LeaderId) 718 LeaderId = RunningDepId++; 719 DepId = LeaderId; 720 } else 721 // Each access has its own dependence set. 722 DepId = RunningDepId++; 723 724 bool IsWrite = Access.getInt(); 725 RtCheck.insert(TheLoop, Ptr, AccessTy, IsWrite, DepId, ASId, StridesMap, PSE); 726 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 727 728 return true; 729 } 730 731 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 732 ScalarEvolution *SE, Loop *TheLoop, 733 const ValueToValueMap &StridesMap, 734 Value *&UncomputablePtr, bool ShouldCheckWrap) { 735 // Find pointers with computable bounds. We are going to use this information 736 // to place a runtime bound check. 737 bool CanDoRT = true; 738 739 bool MayNeedRTCheck = false; 740 if (!IsRTCheckAnalysisNeeded) return true; 741 742 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 743 744 // We assign a consecutive id to access from different alias sets. 745 // Accesses between different groups doesn't need to be checked. 746 unsigned ASId = 0; 747 for (auto &AS : AST) { 748 int NumReadPtrChecks = 0; 749 int NumWritePtrChecks = 0; 750 bool CanDoAliasSetRT = true; 751 ++ASId; 752 753 // We assign consecutive id to access from different dependence sets. 754 // Accesses within the same set don't need a runtime check. 755 unsigned RunningDepId = 1; 756 DenseMap<Value *, unsigned> DepSetId; 757 758 SmallVector<MemAccessInfo, 4> Retries; 759 760 // First, count how many write and read accesses are in the alias set. Also 761 // collect MemAccessInfos for later. 762 SmallVector<MemAccessInfo, 4> AccessInfos; 763 for (const auto &A : AS) { 764 Value *Ptr = A.getValue(); 765 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 766 767 if (IsWrite) 768 ++NumWritePtrChecks; 769 else 770 ++NumReadPtrChecks; 771 AccessInfos.emplace_back(Ptr, IsWrite); 772 } 773 774 // We do not need runtime checks for this alias set, if there are no writes 775 // or a single write and no reads. 776 if (NumWritePtrChecks == 0 || 777 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) { 778 assert((AS.size() <= 1 || 779 all_of(AS, 780 [this](auto AC) { 781 MemAccessInfo AccessWrite(AC.getValue(), true); 782 return DepCands.findValue(AccessWrite) == DepCands.end(); 783 })) && 784 "Can only skip updating CanDoRT below, if all entries in AS " 785 "are reads or there is at most 1 entry"); 786 continue; 787 } 788 789 for (auto &Access : AccessInfos) { 790 for (auto &AccessTy : Accesses[Access]) { 791 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap, 792 DepSetId, TheLoop, RunningDepId, ASId, 793 ShouldCheckWrap, false)) { 794 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" 795 << *Access.getPointer() << '\n'); 796 Retries.push_back(Access); 797 CanDoAliasSetRT = false; 798 } 799 } 800 } 801 802 // Note that this function computes CanDoRT and MayNeedRTCheck 803 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that 804 // we have a pointer for which we couldn't find the bounds but we don't 805 // actually need to emit any checks so it does not matter. 806 // 807 // We need runtime checks for this alias set, if there are at least 2 808 // dependence sets (in which case RunningDepId > 2) or if we need to re-try 809 // any bound checks (because in that case the number of dependence sets is 810 // incomplete). 811 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty(); 812 813 // We need to perform run-time alias checks, but some pointers had bounds 814 // that couldn't be checked. 815 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) { 816 // Reset the CanDoSetRt flag and retry all accesses that have failed. 817 // We know that we need these checks, so we can now be more aggressive 818 // and add further checks if required (overflow checks). 819 CanDoAliasSetRT = true; 820 for (auto Access : Retries) { 821 for (auto &AccessTy : Accesses[Access]) { 822 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap, 823 DepSetId, TheLoop, RunningDepId, ASId, 824 ShouldCheckWrap, /*Assume=*/true)) { 825 CanDoAliasSetRT = false; 826 UncomputablePtr = Access.getPointer(); 827 break; 828 } 829 } 830 } 831 } 832 833 CanDoRT &= CanDoAliasSetRT; 834 MayNeedRTCheck |= NeedsAliasSetRTCheck; 835 ++ASId; 836 } 837 838 // If the pointers that we would use for the bounds comparison have different 839 // address spaces, assume the values aren't directly comparable, so we can't 840 // use them for the runtime check. We also have to assume they could 841 // overlap. In the future there should be metadata for whether address spaces 842 // are disjoint. 843 unsigned NumPointers = RtCheck.Pointers.size(); 844 for (unsigned i = 0; i < NumPointers; ++i) { 845 for (unsigned j = i + 1; j < NumPointers; ++j) { 846 // Only need to check pointers between two different dependency sets. 847 if (RtCheck.Pointers[i].DependencySetId == 848 RtCheck.Pointers[j].DependencySetId) 849 continue; 850 // Only need to check pointers in the same alias set. 851 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 852 continue; 853 854 Value *PtrI = RtCheck.Pointers[i].PointerValue; 855 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 856 857 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 858 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 859 if (ASi != ASj) { 860 LLVM_DEBUG( 861 dbgs() << "LAA: Runtime check would require comparison between" 862 " different address spaces\n"); 863 return false; 864 } 865 } 866 } 867 868 if (MayNeedRTCheck && CanDoRT) 869 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 870 871 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 872 << " pointer comparisons.\n"); 873 874 // If we can do run-time checks, but there are no checks, no runtime checks 875 // are needed. This can happen when all pointers point to the same underlying 876 // object for example. 877 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck; 878 879 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT; 880 if (!CanDoRTIfNeeded) 881 RtCheck.reset(); 882 return CanDoRTIfNeeded; 883 } 884 885 void AccessAnalysis::processMemAccesses() { 886 // We process the set twice: first we process read-write pointers, last we 887 // process read-only pointers. This allows us to skip dependence tests for 888 // read-only pointers. 889 890 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 891 LLVM_DEBUG(dbgs() << " AST: "; AST.dump()); 892 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 893 LLVM_DEBUG({ 894 for (auto A : Accesses) 895 dbgs() << "\t" << *A.first.getPointer() << " (" 896 << (A.first.getInt() 897 ? "write" 898 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only" 899 : "read")) 900 << ")\n"; 901 }); 902 903 // The AliasSetTracker has nicely partitioned our pointers by metadata 904 // compatibility and potential for underlying-object overlap. As a result, we 905 // only need to check for potential pointer dependencies within each alias 906 // set. 907 for (const auto &AS : AST) { 908 // Note that both the alias-set tracker and the alias sets themselves used 909 // linked lists internally and so the iteration order here is deterministic 910 // (matching the original instruction order within each set). 911 912 bool SetHasWrite = false; 913 914 // Map of pointers to last access encountered. 915 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap; 916 UnderlyingObjToAccessMap ObjToLastAccess; 917 918 // Set of access to check after all writes have been processed. 919 PtrAccessMap DeferredAccesses; 920 921 // Iterate over each alias set twice, once to process read/write pointers, 922 // and then to process read-only pointers. 923 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 924 bool UseDeferred = SetIteration > 0; 925 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses; 926 927 for (const auto &AV : AS) { 928 Value *Ptr = AV.getValue(); 929 930 // For a single memory access in AliasSetTracker, Accesses may contain 931 // both read and write, and they both need to be handled for CheckDeps. 932 for (const auto &AC : S) { 933 if (AC.first.getPointer() != Ptr) 934 continue; 935 936 bool IsWrite = AC.first.getInt(); 937 938 // If we're using the deferred access set, then it contains only 939 // reads. 940 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 941 if (UseDeferred && !IsReadOnlyPtr) 942 continue; 943 // Otherwise, the pointer must be in the PtrAccessSet, either as a 944 // read or a write. 945 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 946 S.count(MemAccessInfo(Ptr, false))) && 947 "Alias-set pointer not in the access set?"); 948 949 MemAccessInfo Access(Ptr, IsWrite); 950 DepCands.insert(Access); 951 952 // Memorize read-only pointers for later processing and skip them in 953 // the first round (they need to be checked after we have seen all 954 // write pointers). Note: we also mark pointer that are not 955 // consecutive as "read-only" pointers (so that we check 956 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 957 if (!UseDeferred && IsReadOnlyPtr) { 958 // We only use the pointer keys, the types vector values don't 959 // matter. 960 DeferredAccesses.insert({Access, {}}); 961 continue; 962 } 963 964 // If this is a write - check other reads and writes for conflicts. If 965 // this is a read only check other writes for conflicts (but only if 966 // there is no other write to the ptr - this is an optimization to 967 // catch "a[i] = a[i] + " without having to do a dependence check). 968 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 969 CheckDeps.push_back(Access); 970 IsRTCheckAnalysisNeeded = true; 971 } 972 973 if (IsWrite) 974 SetHasWrite = true; 975 976 // Create sets of pointers connected by a shared alias set and 977 // underlying object. 978 typedef SmallVector<const Value *, 16> ValueVector; 979 ValueVector TempObjects; 980 981 getUnderlyingObjects(Ptr, TempObjects, LI); 982 LLVM_DEBUG(dbgs() 983 << "Underlying objects for pointer " << *Ptr << "\n"); 984 for (const Value *UnderlyingObj : TempObjects) { 985 // nullptr never alias, don't join sets for pointer that have "null" 986 // in their UnderlyingObjects list. 987 if (isa<ConstantPointerNull>(UnderlyingObj) && 988 !NullPointerIsDefined( 989 TheLoop->getHeader()->getParent(), 990 UnderlyingObj->getType()->getPointerAddressSpace())) 991 continue; 992 993 UnderlyingObjToAccessMap::iterator Prev = 994 ObjToLastAccess.find(UnderlyingObj); 995 if (Prev != ObjToLastAccess.end()) 996 DepCands.unionSets(Access, Prev->second); 997 998 ObjToLastAccess[UnderlyingObj] = Access; 999 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 1000 } 1001 } 1002 } 1003 } 1004 } 1005 } 1006 1007 static bool isInBoundsGep(Value *Ptr) { 1008 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 1009 return GEP->isInBounds(); 1010 return false; 1011 } 1012 1013 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 1014 /// i.e. monotonically increasing/decreasing. 1015 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 1016 PredicatedScalarEvolution &PSE, const Loop *L) { 1017 // FIXME: This should probably only return true for NUW. 1018 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 1019 return true; 1020 1021 // Scalar evolution does not propagate the non-wrapping flags to values that 1022 // are derived from a non-wrapping induction variable because non-wrapping 1023 // could be flow-sensitive. 1024 // 1025 // Look through the potentially overflowing instruction to try to prove 1026 // non-wrapping for the *specific* value of Ptr. 1027 1028 // The arithmetic implied by an inbounds GEP can't overflow. 1029 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1030 if (!GEP || !GEP->isInBounds()) 1031 return false; 1032 1033 // Make sure there is only one non-const index and analyze that. 1034 Value *NonConstIndex = nullptr; 1035 for (Value *Index : GEP->indices()) 1036 if (!isa<ConstantInt>(Index)) { 1037 if (NonConstIndex) 1038 return false; 1039 NonConstIndex = Index; 1040 } 1041 if (!NonConstIndex) 1042 // The recurrence is on the pointer, ignore for now. 1043 return false; 1044 1045 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 1046 // AddRec using a NSW operation. 1047 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 1048 if (OBO->hasNoSignedWrap() && 1049 // Assume constant for other the operand so that the AddRec can be 1050 // easily found. 1051 isa<ConstantInt>(OBO->getOperand(1))) { 1052 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 1053 1054 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 1055 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 1056 } 1057 1058 return false; 1059 } 1060 1061 /// Check whether the access through \p Ptr has a constant stride. 1062 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, 1063 Value *Ptr, const Loop *Lp, 1064 const ValueToValueMap &StridesMap, bool Assume, 1065 bool ShouldCheckWrap) { 1066 Type *Ty = Ptr->getType(); 1067 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 1068 1069 if (isa<ScalableVectorType>(AccessTy)) { 1070 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy 1071 << "\n"); 1072 return 0; 1073 } 1074 1075 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 1076 1077 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 1078 if (Assume && !AR) 1079 AR = PSE.getAsAddRec(Ptr); 1080 1081 if (!AR) { 1082 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 1083 << " SCEV: " << *PtrScev << "\n"); 1084 return 0; 1085 } 1086 1087 // The access function must stride over the innermost loop. 1088 if (Lp != AR->getLoop()) { 1089 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " 1090 << *Ptr << " SCEV: " << *AR << "\n"); 1091 return 0; 1092 } 1093 1094 // The address calculation must not wrap. Otherwise, a dependence could be 1095 // inverted. 1096 // An inbounds getelementptr that is a AddRec with a unit stride 1097 // cannot wrap per definition. The unit stride requirement is checked later. 1098 // An getelementptr without an inbounds attribute and unit stride would have 1099 // to access the pointer value "0" which is undefined behavior in address 1100 // space 0, therefore we can also vectorize this case. 1101 unsigned AddrSpace = Ty->getPointerAddressSpace(); 1102 bool IsInBoundsGEP = isInBoundsGep(Ptr); 1103 bool IsNoWrapAddRec = !ShouldCheckWrap || 1104 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 1105 isNoWrapAddRec(Ptr, AR, PSE, Lp); 1106 if (!IsNoWrapAddRec && !IsInBoundsGEP && 1107 NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace)) { 1108 if (Assume) { 1109 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1110 IsNoWrapAddRec = true; 1111 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 1112 << "LAA: Pointer: " << *Ptr << "\n" 1113 << "LAA: SCEV: " << *AR << "\n" 1114 << "LAA: Added an overflow assumption\n"); 1115 } else { 1116 LLVM_DEBUG( 1117 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 1118 << *Ptr << " SCEV: " << *AR << "\n"); 1119 return 0; 1120 } 1121 } 1122 1123 // Check the step is constant. 1124 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 1125 1126 // Calculate the pointer stride and check if it is constant. 1127 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 1128 if (!C) { 1129 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr 1130 << " SCEV: " << *AR << "\n"); 1131 return 0; 1132 } 1133 1134 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 1135 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy); 1136 int64_t Size = AllocSize.getFixedSize(); 1137 const APInt &APStepVal = C->getAPInt(); 1138 1139 // Huge step value - give up. 1140 if (APStepVal.getBitWidth() > 64) 1141 return 0; 1142 1143 int64_t StepVal = APStepVal.getSExtValue(); 1144 1145 // Strided access. 1146 int64_t Stride = StepVal / Size; 1147 int64_t Rem = StepVal % Size; 1148 if (Rem) 1149 return 0; 1150 1151 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1152 // know we can't "wrap around the address space". In case of address space 1153 // zero we know that this won't happen without triggering undefined behavior. 1154 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 && 1155 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(), 1156 AddrSpace))) { 1157 if (Assume) { 1158 // We can avoid this case by adding a run-time check. 1159 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1160 << "inbounds or in address space 0 may wrap:\n" 1161 << "LAA: Pointer: " << *Ptr << "\n" 1162 << "LAA: SCEV: " << *AR << "\n" 1163 << "LAA: Added an overflow assumption\n"); 1164 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1165 } else 1166 return 0; 1167 } 1168 1169 return Stride; 1170 } 1171 1172 Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, 1173 Value *PtrB, const DataLayout &DL, 1174 ScalarEvolution &SE, bool StrictCheck, 1175 bool CheckType) { 1176 assert(PtrA && PtrB && "Expected non-nullptr pointers."); 1177 assert(cast<PointerType>(PtrA->getType()) 1178 ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type"); 1179 assert(cast<PointerType>(PtrB->getType()) 1180 ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type"); 1181 1182 // Make sure that A and B are different pointers. 1183 if (PtrA == PtrB) 1184 return 0; 1185 1186 // Make sure that the element types are the same if required. 1187 if (CheckType && ElemTyA != ElemTyB) 1188 return None; 1189 1190 unsigned ASA = PtrA->getType()->getPointerAddressSpace(); 1191 unsigned ASB = PtrB->getType()->getPointerAddressSpace(); 1192 1193 // Check that the address spaces match. 1194 if (ASA != ASB) 1195 return None; 1196 unsigned IdxWidth = DL.getIndexSizeInBits(ASA); 1197 1198 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); 1199 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1200 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1201 1202 int Val; 1203 if (PtrA1 == PtrB1) { 1204 // Retrieve the address space again as pointer stripping now tracks through 1205 // `addrspacecast`. 1206 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace(); 1207 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace(); 1208 // Check that the address spaces match and that the pointers are valid. 1209 if (ASA != ASB) 1210 return None; 1211 1212 IdxWidth = DL.getIndexSizeInBits(ASA); 1213 OffsetA = OffsetA.sextOrTrunc(IdxWidth); 1214 OffsetB = OffsetB.sextOrTrunc(IdxWidth); 1215 1216 OffsetB -= OffsetA; 1217 Val = OffsetB.getSExtValue(); 1218 } else { 1219 // Otherwise compute the distance with SCEV between the base pointers. 1220 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1221 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1222 const auto *Diff = 1223 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA)); 1224 if (!Diff) 1225 return None; 1226 Val = Diff->getAPInt().getSExtValue(); 1227 } 1228 int Size = DL.getTypeStoreSize(ElemTyA); 1229 int Dist = Val / Size; 1230 1231 // Ensure that the calculated distance matches the type-based one after all 1232 // the bitcasts removal in the provided pointers. 1233 if (!StrictCheck || Dist * Size == Val) 1234 return Dist; 1235 return None; 1236 } 1237 1238 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 1239 const DataLayout &DL, ScalarEvolution &SE, 1240 SmallVectorImpl<unsigned> &SortedIndices) { 1241 assert(llvm::all_of( 1242 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 1243 "Expected list of pointer operands."); 1244 // Walk over the pointers, and map each of them to an offset relative to 1245 // first pointer in the array. 1246 Value *Ptr0 = VL[0]; 1247 1248 using DistOrdPair = std::pair<int64_t, int>; 1249 auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) { 1250 return L.first < R.first; 1251 }; 1252 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare); 1253 Offsets.emplace(0, 0); 1254 int Cnt = 1; 1255 bool IsConsecutive = true; 1256 for (auto *Ptr : VL.drop_front()) { 1257 Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE, 1258 /*StrictCheck=*/true); 1259 if (!Diff) 1260 return false; 1261 1262 // Check if the pointer with the same offset is found. 1263 int64_t Offset = *Diff; 1264 auto Res = Offsets.emplace(Offset, Cnt); 1265 if (!Res.second) 1266 return false; 1267 // Consecutive order if the inserted element is the last one. 1268 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end(); 1269 ++Cnt; 1270 } 1271 SortedIndices.clear(); 1272 if (!IsConsecutive) { 1273 // Fill SortedIndices array only if it is non-consecutive. 1274 SortedIndices.resize(VL.size()); 1275 Cnt = 0; 1276 for (const std::pair<int64_t, int> &Pair : Offsets) { 1277 SortedIndices[Cnt] = Pair.second; 1278 ++Cnt; 1279 } 1280 } 1281 return true; 1282 } 1283 1284 /// Returns true if the memory operations \p A and \p B are consecutive. 1285 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1286 ScalarEvolution &SE, bool CheckType) { 1287 Value *PtrA = getLoadStorePointerOperand(A); 1288 Value *PtrB = getLoadStorePointerOperand(B); 1289 if (!PtrA || !PtrB) 1290 return false; 1291 Type *ElemTyA = getLoadStoreType(A); 1292 Type *ElemTyB = getLoadStoreType(B); 1293 Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE, 1294 /*StrictCheck=*/true, CheckType); 1295 return Diff && *Diff == 1; 1296 } 1297 1298 void MemoryDepChecker::addAccess(StoreInst *SI) { 1299 visitPointers(SI->getPointerOperand(), *InnermostLoop, 1300 [this, SI](Value *Ptr) { 1301 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx); 1302 InstMap.push_back(SI); 1303 ++AccessIdx; 1304 }); 1305 } 1306 1307 void MemoryDepChecker::addAccess(LoadInst *LI) { 1308 visitPointers(LI->getPointerOperand(), *InnermostLoop, 1309 [this, LI](Value *Ptr) { 1310 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx); 1311 InstMap.push_back(LI); 1312 ++AccessIdx; 1313 }); 1314 } 1315 1316 MemoryDepChecker::VectorizationSafetyStatus 1317 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1318 switch (Type) { 1319 case NoDep: 1320 case Forward: 1321 case BackwardVectorizable: 1322 return VectorizationSafetyStatus::Safe; 1323 1324 case Unknown: 1325 return VectorizationSafetyStatus::PossiblySafeWithRtChecks; 1326 case ForwardButPreventsForwarding: 1327 case Backward: 1328 case BackwardVectorizableButPreventsForwarding: 1329 return VectorizationSafetyStatus::Unsafe; 1330 } 1331 llvm_unreachable("unexpected DepType!"); 1332 } 1333 1334 bool MemoryDepChecker::Dependence::isBackward() const { 1335 switch (Type) { 1336 case NoDep: 1337 case Forward: 1338 case ForwardButPreventsForwarding: 1339 case Unknown: 1340 return false; 1341 1342 case BackwardVectorizable: 1343 case Backward: 1344 case BackwardVectorizableButPreventsForwarding: 1345 return true; 1346 } 1347 llvm_unreachable("unexpected DepType!"); 1348 } 1349 1350 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1351 return isBackward() || Type == Unknown; 1352 } 1353 1354 bool MemoryDepChecker::Dependence::isForward() const { 1355 switch (Type) { 1356 case Forward: 1357 case ForwardButPreventsForwarding: 1358 return true; 1359 1360 case NoDep: 1361 case Unknown: 1362 case BackwardVectorizable: 1363 case Backward: 1364 case BackwardVectorizableButPreventsForwarding: 1365 return false; 1366 } 1367 llvm_unreachable("unexpected DepType!"); 1368 } 1369 1370 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1371 uint64_t TypeByteSize) { 1372 // If loads occur at a distance that is not a multiple of a feasible vector 1373 // factor store-load forwarding does not take place. 1374 // Positive dependences might cause troubles because vectorizing them might 1375 // prevent store-load forwarding making vectorized code run a lot slower. 1376 // a[i] = a[i-3] ^ a[i-8]; 1377 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1378 // hence on your typical architecture store-load forwarding does not take 1379 // place. Vectorizing in such cases does not make sense. 1380 // Store-load forwarding distance. 1381 1382 // After this many iterations store-to-load forwarding conflicts should not 1383 // cause any slowdowns. 1384 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1385 // Maximum vector factor. 1386 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1387 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1388 1389 // Compute the smallest VF at which the store and load would be misaligned. 1390 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1391 VF *= 2) { 1392 // If the number of vector iteration between the store and the load are 1393 // small we could incur conflicts. 1394 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1395 MaxVFWithoutSLForwardIssues = (VF >> 1); 1396 break; 1397 } 1398 } 1399 1400 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1401 LLVM_DEBUG( 1402 dbgs() << "LAA: Distance " << Distance 1403 << " that could cause a store-load forwarding conflict\n"); 1404 return true; 1405 } 1406 1407 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1408 MaxVFWithoutSLForwardIssues != 1409 VectorizerParams::MaxVectorWidth * TypeByteSize) 1410 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1411 return false; 1412 } 1413 1414 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) { 1415 if (Status < S) 1416 Status = S; 1417 } 1418 1419 /// Given a non-constant (unknown) dependence-distance \p Dist between two 1420 /// memory accesses, that have the same stride whose absolute value is given 1421 /// in \p Stride, and that have the same type size \p TypeByteSize, 1422 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is 1423 /// possible to prove statically that the dependence distance is larger 1424 /// than the range that the accesses will travel through the execution of 1425 /// the loop. If so, return true; false otherwise. This is useful for 1426 /// example in loops such as the following (PR31098): 1427 /// for (i = 0; i < D; ++i) { 1428 /// = out[i]; 1429 /// out[i+D] = 1430 /// } 1431 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, 1432 const SCEV &BackedgeTakenCount, 1433 const SCEV &Dist, uint64_t Stride, 1434 uint64_t TypeByteSize) { 1435 1436 // If we can prove that 1437 // (**) |Dist| > BackedgeTakenCount * Step 1438 // where Step is the absolute stride of the memory accesses in bytes, 1439 // then there is no dependence. 1440 // 1441 // Rationale: 1442 // We basically want to check if the absolute distance (|Dist/Step|) 1443 // is >= the loop iteration count (or > BackedgeTakenCount). 1444 // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 1445 // Section 4.2.1); Note, that for vectorization it is sufficient to prove 1446 // that the dependence distance is >= VF; This is checked elsewhere. 1447 // But in some cases we can prune unknown dependence distances early, and 1448 // even before selecting the VF, and without a runtime test, by comparing 1449 // the distance against the loop iteration count. Since the vectorized code 1450 // will be executed only if LoopCount >= VF, proving distance >= LoopCount 1451 // also guarantees that distance >= VF. 1452 // 1453 const uint64_t ByteStride = Stride * TypeByteSize; 1454 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); 1455 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); 1456 1457 const SCEV *CastedDist = &Dist; 1458 const SCEV *CastedProduct = Product; 1459 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); 1460 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); 1461 1462 // The dependence distance can be positive/negative, so we sign extend Dist; 1463 // The multiplication of the absolute stride in bytes and the 1464 // backedgeTakenCount is non-negative, so we zero extend Product. 1465 if (DistTypeSize > ProductTypeSize) 1466 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); 1467 else 1468 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); 1469 1470 // Is Dist - (BackedgeTakenCount * Step) > 0 ? 1471 // (If so, then we have proven (**) because |Dist| >= Dist) 1472 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); 1473 if (SE.isKnownPositive(Minus)) 1474 return true; 1475 1476 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ? 1477 // (If so, then we have proven (**) because |Dist| >= -1*Dist) 1478 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); 1479 Minus = SE.getMinusSCEV(NegDist, CastedProduct); 1480 if (SE.isKnownPositive(Minus)) 1481 return true; 1482 1483 return false; 1484 } 1485 1486 /// Check the dependence for two accesses with the same stride \p Stride. 1487 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1488 /// bytes. 1489 /// 1490 /// \returns true if they are independent. 1491 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1492 uint64_t TypeByteSize) { 1493 assert(Stride > 1 && "The stride must be greater than 1"); 1494 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1495 assert(Distance > 0 && "The distance must be non-zero"); 1496 1497 // Skip if the distance is not multiple of type byte size. 1498 if (Distance % TypeByteSize) 1499 return false; 1500 1501 uint64_t ScaledDist = Distance / TypeByteSize; 1502 1503 // No dependence if the scaled distance is not multiple of the stride. 1504 // E.g. 1505 // for (i = 0; i < 1024 ; i += 4) 1506 // A[i+2] = A[i] + 1; 1507 // 1508 // Two accesses in memory (scaled distance is 2, stride is 4): 1509 // | A[0] | | | | A[4] | | | | 1510 // | | | A[2] | | | | A[6] | | 1511 // 1512 // E.g. 1513 // for (i = 0; i < 1024 ; i += 3) 1514 // A[i+4] = A[i] + 1; 1515 // 1516 // Two accesses in memory (scaled distance is 4, stride is 3): 1517 // | A[0] | | | A[3] | | | A[6] | | | 1518 // | | | | | A[4] | | | A[7] | | 1519 return ScaledDist % Stride; 1520 } 1521 1522 MemoryDepChecker::Dependence::DepType 1523 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1524 const MemAccessInfo &B, unsigned BIdx, 1525 const ValueToValueMap &Strides) { 1526 assert (AIdx < BIdx && "Must pass arguments in program order"); 1527 1528 Value *APtr = A.getPointer(); 1529 Value *BPtr = B.getPointer(); 1530 bool AIsWrite = A.getInt(); 1531 bool BIsWrite = B.getInt(); 1532 Type *ATy = getLoadStoreType(InstMap[AIdx]); 1533 Type *BTy = getLoadStoreType(InstMap[BIdx]); 1534 1535 // Two reads are independent. 1536 if (!AIsWrite && !BIsWrite) 1537 return Dependence::NoDep; 1538 1539 // We cannot check pointers in different address spaces. 1540 if (APtr->getType()->getPointerAddressSpace() != 1541 BPtr->getType()->getPointerAddressSpace()) 1542 return Dependence::Unknown; 1543 1544 int64_t StrideAPtr = 1545 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true); 1546 int64_t StrideBPtr = 1547 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true); 1548 1549 const SCEV *Src = PSE.getSCEV(APtr); 1550 const SCEV *Sink = PSE.getSCEV(BPtr); 1551 1552 // If the induction step is negative we have to invert source and sink of the 1553 // dependence. 1554 if (StrideAPtr < 0) { 1555 std::swap(APtr, BPtr); 1556 std::swap(ATy, BTy); 1557 std::swap(Src, Sink); 1558 std::swap(AIsWrite, BIsWrite); 1559 std::swap(AIdx, BIdx); 1560 std::swap(StrideAPtr, StrideBPtr); 1561 } 1562 1563 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1564 1565 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1566 << "(Induction step: " << StrideAPtr << ")\n"); 1567 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1568 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1569 1570 // Need accesses with constant stride. We don't want to vectorize 1571 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1572 // the address space. 1573 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1574 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1575 return Dependence::Unknown; 1576 } 1577 1578 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1579 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1580 bool HasSameSize = 1581 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy); 1582 uint64_t Stride = std::abs(StrideAPtr); 1583 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1584 if (!C) { 1585 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize && 1586 isSafeDependenceDistance(DL, *(PSE.getSE()), 1587 *(PSE.getBackedgeTakenCount()), *Dist, Stride, 1588 TypeByteSize)) 1589 return Dependence::NoDep; 1590 1591 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1592 FoundNonConstantDistanceDependence = true; 1593 return Dependence::Unknown; 1594 } 1595 1596 const APInt &Val = C->getAPInt(); 1597 int64_t Distance = Val.getSExtValue(); 1598 1599 // Attempt to prove strided accesses independent. 1600 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize && 1601 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1602 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1603 return Dependence::NoDep; 1604 } 1605 1606 // Negative distances are not plausible dependencies. 1607 if (Val.isNegative()) { 1608 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1609 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1610 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1611 !HasSameSize)) { 1612 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1613 return Dependence::ForwardButPreventsForwarding; 1614 } 1615 1616 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1617 return Dependence::Forward; 1618 } 1619 1620 // Write to the same location with the same size. 1621 if (Val == 0) { 1622 if (HasSameSize) 1623 return Dependence::Forward; 1624 LLVM_DEBUG( 1625 dbgs() << "LAA: Zero dependence difference but different type sizes\n"); 1626 return Dependence::Unknown; 1627 } 1628 1629 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1630 1631 if (!HasSameSize) { 1632 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with " 1633 "different type sizes\n"); 1634 return Dependence::Unknown; 1635 } 1636 1637 // Bail out early if passed-in parameters make vectorization not feasible. 1638 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1639 VectorizerParams::VectorizationFactor : 1); 1640 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1641 VectorizerParams::VectorizationInterleave : 1); 1642 // The minimum number of iterations for a vectorized/unrolled version. 1643 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1644 1645 // It's not vectorizable if the distance is smaller than the minimum distance 1646 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1647 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1648 // TypeByteSize (No need to plus the last gap distance). 1649 // 1650 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1651 // foo(int *A) { 1652 // int *B = (int *)((char *)A + 14); 1653 // for (i = 0 ; i < 1024 ; i += 2) 1654 // B[i] = A[i] + 1; 1655 // } 1656 // 1657 // Two accesses in memory (stride is 2): 1658 // | A[0] | | A[2] | | A[4] | | A[6] | | 1659 // | B[0] | | B[2] | | B[4] | 1660 // 1661 // Distance needs for vectorizing iterations except the last iteration: 1662 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1663 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1664 // 1665 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1666 // 12, which is less than distance. 1667 // 1668 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1669 // the minimum distance needed is 28, which is greater than distance. It is 1670 // not safe to do vectorization. 1671 uint64_t MinDistanceNeeded = 1672 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1673 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1674 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance " 1675 << Distance << '\n'); 1676 return Dependence::Backward; 1677 } 1678 1679 // Unsafe if the minimum distance needed is greater than max safe distance. 1680 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1681 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least " 1682 << MinDistanceNeeded << " size in bytes"); 1683 return Dependence::Backward; 1684 } 1685 1686 // Positive distance bigger than max vectorization factor. 1687 // FIXME: Should use max factor instead of max distance in bytes, which could 1688 // not handle different types. 1689 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1690 // void foo (int *A, char *B) { 1691 // for (unsigned i = 0; i < 1024; i++) { 1692 // A[i+2] = A[i] + 1; 1693 // B[i+2] = B[i] + 1; 1694 // } 1695 // } 1696 // 1697 // This case is currently unsafe according to the max safe distance. If we 1698 // analyze the two accesses on array B, the max safe dependence distance 1699 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1700 // is 8, which is less than 2 and forbidden vectorization, But actually 1701 // both A and B could be vectorized by 2 iterations. 1702 MaxSafeDepDistBytes = 1703 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1704 1705 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1706 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1707 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1708 return Dependence::BackwardVectorizableButPreventsForwarding; 1709 1710 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride); 1711 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1712 << " with max VF = " << MaxVF << '\n'); 1713 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; 1714 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits); 1715 return Dependence::BackwardVectorizable; 1716 } 1717 1718 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1719 MemAccessInfoList &CheckDeps, 1720 const ValueToValueMap &Strides) { 1721 1722 MaxSafeDepDistBytes = -1; 1723 SmallPtrSet<MemAccessInfo, 8> Visited; 1724 for (MemAccessInfo CurAccess : CheckDeps) { 1725 if (Visited.count(CurAccess)) 1726 continue; 1727 1728 // Get the relevant memory access set. 1729 EquivalenceClasses<MemAccessInfo>::iterator I = 1730 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1731 1732 // Check accesses within this set. 1733 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1734 AccessSets.member_begin(I); 1735 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1736 AccessSets.member_end(); 1737 1738 // Check every access pair. 1739 while (AI != AE) { 1740 Visited.insert(*AI); 1741 bool AIIsWrite = AI->getInt(); 1742 // Check loads only against next equivalent class, but stores also against 1743 // other stores in the same equivalence class - to the same address. 1744 EquivalenceClasses<MemAccessInfo>::member_iterator OI = 1745 (AIIsWrite ? AI : std::next(AI)); 1746 while (OI != AE) { 1747 // Check every accessing instruction pair in program order. 1748 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1749 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1750 // Scan all accesses of another equivalence class, but only the next 1751 // accesses of the same equivalent class. 1752 for (std::vector<unsigned>::iterator 1753 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()), 1754 I2E = (OI == AI ? I1E : Accesses[*OI].end()); 1755 I2 != I2E; ++I2) { 1756 auto A = std::make_pair(&*AI, *I1); 1757 auto B = std::make_pair(&*OI, *I2); 1758 1759 assert(*I1 != *I2); 1760 if (*I1 > *I2) 1761 std::swap(A, B); 1762 1763 Dependence::DepType Type = 1764 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1765 mergeInStatus(Dependence::isSafeForVectorization(Type)); 1766 1767 // Gather dependences unless we accumulated MaxDependences 1768 // dependences. In that case return as soon as we find the first 1769 // unsafe dependence. This puts a limit on this quadratic 1770 // algorithm. 1771 if (RecordDependences) { 1772 if (Type != Dependence::NoDep) 1773 Dependences.push_back(Dependence(A.second, B.second, Type)); 1774 1775 if (Dependences.size() >= MaxDependences) { 1776 RecordDependences = false; 1777 Dependences.clear(); 1778 LLVM_DEBUG(dbgs() 1779 << "Too many dependences, stopped recording\n"); 1780 } 1781 } 1782 if (!RecordDependences && !isSafeForVectorization()) 1783 return false; 1784 } 1785 ++OI; 1786 } 1787 AI++; 1788 } 1789 } 1790 1791 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1792 return isSafeForVectorization(); 1793 } 1794 1795 SmallVector<Instruction *, 4> 1796 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1797 MemAccessInfo Access(Ptr, isWrite); 1798 auto &IndexVector = Accesses.find(Access)->second; 1799 1800 SmallVector<Instruction *, 4> Insts; 1801 transform(IndexVector, 1802 std::back_inserter(Insts), 1803 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1804 return Insts; 1805 } 1806 1807 const char *MemoryDepChecker::Dependence::DepName[] = { 1808 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1809 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1810 1811 void MemoryDepChecker::Dependence::print( 1812 raw_ostream &OS, unsigned Depth, 1813 const SmallVectorImpl<Instruction *> &Instrs) const { 1814 OS.indent(Depth) << DepName[Type] << ":\n"; 1815 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1816 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1817 } 1818 1819 bool LoopAccessInfo::canAnalyzeLoop() { 1820 // We need to have a loop header. 1821 LLVM_DEBUG(dbgs() << "LAA: Found a loop in " 1822 << TheLoop->getHeader()->getParent()->getName() << ": " 1823 << TheLoop->getHeader()->getName() << '\n'); 1824 1825 // We can only analyze innermost loops. 1826 if (!TheLoop->isInnermost()) { 1827 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1828 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1829 return false; 1830 } 1831 1832 // We must have a single backedge. 1833 if (TheLoop->getNumBackEdges() != 1) { 1834 LLVM_DEBUG( 1835 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1836 recordAnalysis("CFGNotUnderstood") 1837 << "loop control flow is not understood by analyzer"; 1838 return false; 1839 } 1840 1841 // ScalarEvolution needs to be able to find the exit count. 1842 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1843 if (isa<SCEVCouldNotCompute>(ExitCount)) { 1844 recordAnalysis("CantComputeNumberOfIterations") 1845 << "could not determine number of loop iterations"; 1846 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1847 return false; 1848 } 1849 1850 return true; 1851 } 1852 1853 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI, 1854 const TargetLibraryInfo *TLI, 1855 DominatorTree *DT) { 1856 // Holds the Load and Store instructions. 1857 SmallVector<LoadInst *, 16> Loads; 1858 SmallVector<StoreInst *, 16> Stores; 1859 1860 // Holds all the different accesses in the loop. 1861 unsigned NumReads = 0; 1862 unsigned NumReadWrites = 0; 1863 1864 bool HasComplexMemInst = false; 1865 1866 // A runtime check is only legal to insert if there are no convergent calls. 1867 HasConvergentOp = false; 1868 1869 PtrRtChecking->Pointers.clear(); 1870 PtrRtChecking->Need = false; 1871 1872 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1873 1874 const bool EnableMemAccessVersioningOfLoop = 1875 EnableMemAccessVersioning && 1876 !TheLoop->getHeader()->getParent()->hasOptSize(); 1877 1878 // For each block. 1879 for (BasicBlock *BB : TheLoop->blocks()) { 1880 // Scan the BB and collect legal loads and stores. Also detect any 1881 // convergent instructions. 1882 for (Instruction &I : *BB) { 1883 if (auto *Call = dyn_cast<CallBase>(&I)) { 1884 if (Call->isConvergent()) 1885 HasConvergentOp = true; 1886 } 1887 1888 // With both a non-vectorizable memory instruction and a convergent 1889 // operation, found in this loop, no reason to continue the search. 1890 if (HasComplexMemInst && HasConvergentOp) { 1891 CanVecMem = false; 1892 return; 1893 } 1894 1895 // Avoid hitting recordAnalysis multiple times. 1896 if (HasComplexMemInst) 1897 continue; 1898 1899 // If this is a load, save it. If this instruction can read from memory 1900 // but is not a load, then we quit. Notice that we don't handle function 1901 // calls that read or write. 1902 if (I.mayReadFromMemory()) { 1903 // Many math library functions read the rounding mode. We will only 1904 // vectorize a loop if it contains known function calls that don't set 1905 // the flag. Therefore, it is safe to ignore this read from memory. 1906 auto *Call = dyn_cast<CallInst>(&I); 1907 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1908 continue; 1909 1910 // If the function has an explicit vectorized counterpart, we can safely 1911 // assume that it can be vectorized. 1912 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1913 !VFDatabase::getMappings(*Call).empty()) 1914 continue; 1915 1916 auto *Ld = dyn_cast<LoadInst>(&I); 1917 if (!Ld) { 1918 recordAnalysis("CantVectorizeInstruction", Ld) 1919 << "instruction cannot be vectorized"; 1920 HasComplexMemInst = true; 1921 continue; 1922 } 1923 if (!Ld->isSimple() && !IsAnnotatedParallel) { 1924 recordAnalysis("NonSimpleLoad", Ld) 1925 << "read with atomic ordering or volatile read"; 1926 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1927 HasComplexMemInst = true; 1928 continue; 1929 } 1930 NumLoads++; 1931 Loads.push_back(Ld); 1932 DepChecker->addAccess(Ld); 1933 if (EnableMemAccessVersioningOfLoop) 1934 collectStridedAccess(Ld); 1935 continue; 1936 } 1937 1938 // Save 'store' instructions. Abort if other instructions write to memory. 1939 if (I.mayWriteToMemory()) { 1940 auto *St = dyn_cast<StoreInst>(&I); 1941 if (!St) { 1942 recordAnalysis("CantVectorizeInstruction", St) 1943 << "instruction cannot be vectorized"; 1944 HasComplexMemInst = true; 1945 continue; 1946 } 1947 if (!St->isSimple() && !IsAnnotatedParallel) { 1948 recordAnalysis("NonSimpleStore", St) 1949 << "write with atomic ordering or volatile write"; 1950 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1951 HasComplexMemInst = true; 1952 continue; 1953 } 1954 NumStores++; 1955 Stores.push_back(St); 1956 DepChecker->addAccess(St); 1957 if (EnableMemAccessVersioningOfLoop) 1958 collectStridedAccess(St); 1959 } 1960 } // Next instr. 1961 } // Next block. 1962 1963 if (HasComplexMemInst) { 1964 CanVecMem = false; 1965 return; 1966 } 1967 1968 // Now we have two lists that hold the loads and the stores. 1969 // Next, we find the pointers that they use. 1970 1971 // Check if we see any stores. If there are no stores, then we don't 1972 // care if the pointers are *restrict*. 1973 if (!Stores.size()) { 1974 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1975 CanVecMem = true; 1976 return; 1977 } 1978 1979 MemoryDepChecker::DepCandidates DependentAccesses; 1980 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE); 1981 1982 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects 1983 // multiple times on the same object. If the ptr is accessed twice, once 1984 // for read and once for write, it will only appear once (on the write 1985 // list). This is okay, since we are going to check for conflicts between 1986 // writes and between reads and writes, but not between reads and reads. 1987 SmallSet<std::pair<Value *, Type *>, 16> Seen; 1988 1989 // Record uniform store addresses to identify if we have multiple stores 1990 // to the same address. 1991 SmallPtrSet<Value *, 16> UniformStores; 1992 1993 for (StoreInst *ST : Stores) { 1994 Value *Ptr = ST->getPointerOperand(); 1995 1996 if (isUniform(Ptr)) 1997 HasDependenceInvolvingLoopInvariantAddress |= 1998 !UniformStores.insert(Ptr).second; 1999 2000 // If we did *not* see this pointer before, insert it to the read-write 2001 // list. At this phase it is only a 'write' list. 2002 Type *AccessTy = getLoadStoreType(ST); 2003 if (Seen.insert({Ptr, AccessTy}).second) { 2004 ++NumReadWrites; 2005 2006 MemoryLocation Loc = MemoryLocation::get(ST); 2007 // The TBAA metadata could have a control dependency on the predication 2008 // condition, so we cannot rely on it when determining whether or not we 2009 // need runtime pointer checks. 2010 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 2011 Loc.AATags.TBAA = nullptr; 2012 2013 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop, 2014 [&Accesses, AccessTy, Loc](Value *Ptr) { 2015 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr); 2016 Accesses.addStore(NewLoc, AccessTy); 2017 }); 2018 } 2019 } 2020 2021 if (IsAnnotatedParallel) { 2022 LLVM_DEBUG( 2023 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency " 2024 << "checks.\n"); 2025 CanVecMem = true; 2026 return; 2027 } 2028 2029 for (LoadInst *LD : Loads) { 2030 Value *Ptr = LD->getPointerOperand(); 2031 // If we did *not* see this pointer before, insert it to the 2032 // read list. If we *did* see it before, then it is already in 2033 // the read-write list. This allows us to vectorize expressions 2034 // such as A[i] += x; Because the address of A[i] is a read-write 2035 // pointer. This only works if the index of A[i] is consecutive. 2036 // If the address of i is unknown (for example A[B[i]]) then we may 2037 // read a few words, modify, and write a few words, and some of the 2038 // words may be written to the same address. 2039 bool IsReadOnlyPtr = false; 2040 Type *AccessTy = getLoadStoreType(LD); 2041 if (Seen.insert({Ptr, AccessTy}).second || 2042 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides)) { 2043 ++NumReads; 2044 IsReadOnlyPtr = true; 2045 } 2046 2047 // See if there is an unsafe dependency between a load to a uniform address and 2048 // store to the same uniform address. 2049 if (UniformStores.count(Ptr)) { 2050 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform " 2051 "load and uniform store to the same address!\n"); 2052 HasDependenceInvolvingLoopInvariantAddress = true; 2053 } 2054 2055 MemoryLocation Loc = MemoryLocation::get(LD); 2056 // The TBAA metadata could have a control dependency on the predication 2057 // condition, so we cannot rely on it when determining whether or not we 2058 // need runtime pointer checks. 2059 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 2060 Loc.AATags.TBAA = nullptr; 2061 2062 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop, 2063 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) { 2064 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr); 2065 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr); 2066 }); 2067 } 2068 2069 // If we write (or read-write) to a single destination and there are no 2070 // other reads in this loop then is it safe to vectorize. 2071 if (NumReadWrites == 1 && NumReads == 0) { 2072 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 2073 CanVecMem = true; 2074 return; 2075 } 2076 2077 // Build dependence sets and check whether we need a runtime pointer bounds 2078 // check. 2079 Accesses.buildDependenceSets(); 2080 2081 // Find pointers with computable bounds. We are going to use this information 2082 // to place a runtime bound check. 2083 Value *UncomputablePtr = nullptr; 2084 bool CanDoRTIfNeeded = 2085 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop, 2086 SymbolicStrides, UncomputablePtr, false); 2087 if (!CanDoRTIfNeeded) { 2088 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr); 2089 recordAnalysis("CantIdentifyArrayBounds", I) 2090 << "cannot identify array bounds"; 2091 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 2092 << "the array bounds.\n"); 2093 CanVecMem = false; 2094 return; 2095 } 2096 2097 LLVM_DEBUG( 2098 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n"); 2099 2100 CanVecMem = true; 2101 if (Accesses.isDependencyCheckNeeded()) { 2102 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 2103 CanVecMem = DepChecker->areDepsSafe( 2104 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 2105 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 2106 2107 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 2108 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 2109 2110 // Clear the dependency checks. We assume they are not needed. 2111 Accesses.resetDepChecks(*DepChecker); 2112 2113 PtrRtChecking->reset(); 2114 PtrRtChecking->Need = true; 2115 2116 auto *SE = PSE->getSE(); 2117 UncomputablePtr = nullptr; 2118 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT( 2119 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true); 2120 2121 // Check that we found the bounds for the pointer. 2122 if (!CanDoRTIfNeeded) { 2123 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr); 2124 recordAnalysis("CantCheckMemDepsAtRunTime", I) 2125 << "cannot check memory dependencies at runtime"; 2126 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 2127 CanVecMem = false; 2128 return; 2129 } 2130 2131 CanVecMem = true; 2132 } 2133 } 2134 2135 if (HasConvergentOp) { 2136 recordAnalysis("CantInsertRuntimeCheckWithConvergent") 2137 << "cannot add control dependency to convergent operation"; 2138 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check " 2139 "would be needed with a convergent operation\n"); 2140 CanVecMem = false; 2141 return; 2142 } 2143 2144 if (CanVecMem) 2145 LLVM_DEBUG( 2146 dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 2147 << (PtrRtChecking->Need ? "" : " don't") 2148 << " need runtime memory checks.\n"); 2149 else 2150 emitUnsafeDependenceRemark(); 2151 } 2152 2153 void LoopAccessInfo::emitUnsafeDependenceRemark() { 2154 auto Deps = getDepChecker().getDependences(); 2155 if (!Deps) 2156 return; 2157 auto Found = std::find_if( 2158 Deps->begin(), Deps->end(), [](const MemoryDepChecker::Dependence &D) { 2159 return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) != 2160 MemoryDepChecker::VectorizationSafetyStatus::Safe; 2161 }); 2162 if (Found == Deps->end()) 2163 return; 2164 MemoryDepChecker::Dependence Dep = *Found; 2165 2166 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 2167 2168 // Emit remark for first unsafe dependence 2169 OptimizationRemarkAnalysis &R = 2170 recordAnalysis("UnsafeDep", Dep.getDestination(*this)) 2171 << "unsafe dependent memory operations in loop. Use " 2172 "#pragma loop distribute(enable) to allow loop distribution " 2173 "to attempt to isolate the offending operations into a separate " 2174 "loop"; 2175 2176 switch (Dep.Type) { 2177 case MemoryDepChecker::Dependence::NoDep: 2178 case MemoryDepChecker::Dependence::Forward: 2179 case MemoryDepChecker::Dependence::BackwardVectorizable: 2180 llvm_unreachable("Unexpected dependence"); 2181 case MemoryDepChecker::Dependence::Backward: 2182 R << "\nBackward loop carried data dependence."; 2183 break; 2184 case MemoryDepChecker::Dependence::ForwardButPreventsForwarding: 2185 R << "\nForward loop carried data dependence that prevents " 2186 "store-to-load forwarding."; 2187 break; 2188 case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding: 2189 R << "\nBackward loop carried data dependence that prevents " 2190 "store-to-load forwarding."; 2191 break; 2192 case MemoryDepChecker::Dependence::Unknown: 2193 R << "\nUnknown data dependence."; 2194 break; 2195 } 2196 2197 if (Instruction *I = Dep.getSource(*this)) { 2198 DebugLoc SourceLoc = I->getDebugLoc(); 2199 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I))) 2200 SourceLoc = DD->getDebugLoc(); 2201 if (SourceLoc) 2202 R << " Memory location is the same as accessed at " 2203 << ore::NV("Location", SourceLoc); 2204 } 2205 } 2206 2207 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 2208 DominatorTree *DT) { 2209 assert(TheLoop->contains(BB) && "Unknown block used"); 2210 2211 // Blocks that do not dominate the latch need predication. 2212 BasicBlock* Latch = TheLoop->getLoopLatch(); 2213 return !DT->dominates(BB, Latch); 2214 } 2215 2216 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 2217 Instruction *I) { 2218 assert(!Report && "Multiple reports generated"); 2219 2220 Value *CodeRegion = TheLoop->getHeader(); 2221 DebugLoc DL = TheLoop->getStartLoc(); 2222 2223 if (I) { 2224 CodeRegion = I->getParent(); 2225 // If there is no debug location attached to the instruction, revert back to 2226 // using the loop's. 2227 if (I->getDebugLoc()) 2228 DL = I->getDebugLoc(); 2229 } 2230 2231 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 2232 CodeRegion); 2233 return *Report; 2234 } 2235 2236 bool LoopAccessInfo::isUniform(Value *V) const { 2237 auto *SE = PSE->getSE(); 2238 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 2239 // never considered uniform. 2240 // TODO: Is this really what we want? Even without FP SCEV, we may want some 2241 // trivially loop-invariant FP values to be considered uniform. 2242 if (!SE->isSCEVable(V->getType())) 2243 return false; 2244 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 2245 } 2246 2247 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2248 Value *Ptr = getLoadStorePointerOperand(MemAccess); 2249 if (!Ptr) 2250 return; 2251 2252 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2253 if (!Stride) 2254 return; 2255 2256 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for " 2257 "versioning:"); 2258 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2259 2260 // Avoid adding the "Stride == 1" predicate when we know that 2261 // Stride >= Trip-Count. Such a predicate will effectively optimize a single 2262 // or zero iteration loop, as Trip-Count <= Stride == 1. 2263 // 2264 // TODO: We are currently not making a very informed decision on when it is 2265 // beneficial to apply stride versioning. It might make more sense that the 2266 // users of this analysis (such as the vectorizer) will trigger it, based on 2267 // their specific cost considerations; For example, in cases where stride 2268 // versioning does not help resolving memory accesses/dependences, the 2269 // vectorizer should evaluate the cost of the runtime test, and the benefit 2270 // of various possible stride specializations, considering the alternatives 2271 // of using gather/scatters (if available). 2272 2273 const SCEV *StrideExpr = PSE->getSCEV(Stride); 2274 const SCEV *BETakenCount = PSE->getBackedgeTakenCount(); 2275 2276 // Match the types so we can compare the stride and the BETakenCount. 2277 // The Stride can be positive/negative, so we sign extend Stride; 2278 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount. 2279 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2280 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType()); 2281 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType()); 2282 const SCEV *CastedStride = StrideExpr; 2283 const SCEV *CastedBECount = BETakenCount; 2284 ScalarEvolution *SE = PSE->getSE(); 2285 if (BETypeSize >= StrideTypeSize) 2286 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType()); 2287 else 2288 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType()); 2289 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount); 2290 // Since TripCount == BackEdgeTakenCount + 1, checking: 2291 // "Stride >= TripCount" is equivalent to checking: 2292 // Stride - BETakenCount > 0 2293 if (SE->isKnownPositive(StrideMinusBETaken)) { 2294 LLVM_DEBUG( 2295 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the " 2296 "Stride==1 predicate will imply that the loop executes " 2297 "at most once.\n"); 2298 return; 2299 } 2300 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n"); 2301 2302 SymbolicStrides[Ptr] = Stride; 2303 StrideSet.insert(Stride); 2304 } 2305 2306 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2307 const TargetLibraryInfo *TLI, AAResults *AA, 2308 DominatorTree *DT, LoopInfo *LI) 2309 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2310 PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)), 2311 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) { 2312 if (canAnalyzeLoop()) 2313 analyzeLoop(AA, LI, TLI, DT); 2314 } 2315 2316 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2317 if (CanVecMem) { 2318 OS.indent(Depth) << "Memory dependences are safe"; 2319 if (MaxSafeDepDistBytes != -1ULL) 2320 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2321 << " bytes"; 2322 if (PtrRtChecking->Need) 2323 OS << " with run-time checks"; 2324 OS << "\n"; 2325 } 2326 2327 if (HasConvergentOp) 2328 OS.indent(Depth) << "Has convergent operation in loop\n"; 2329 2330 if (Report) 2331 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2332 2333 if (auto *Dependences = DepChecker->getDependences()) { 2334 OS.indent(Depth) << "Dependences:\n"; 2335 for (auto &Dep : *Dependences) { 2336 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2337 OS << "\n"; 2338 } 2339 } else 2340 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2341 2342 // List the pair of accesses need run-time checks to prove independence. 2343 PtrRtChecking->print(OS, Depth); 2344 OS << "\n"; 2345 2346 OS.indent(Depth) << "Non vectorizable stores to invariant address were " 2347 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ") 2348 << "found in loop.\n"; 2349 2350 OS.indent(Depth) << "SCEV assumptions:\n"; 2351 PSE->getPredicate().print(OS, Depth); 2352 2353 OS << "\n"; 2354 2355 OS.indent(Depth) << "Expressions re-written:\n"; 2356 PSE->print(OS, Depth); 2357 } 2358 2359 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) { 2360 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry()); 2361 } 2362 2363 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2364 auto &LAI = LoopAccessInfoMap[L]; 2365 2366 if (!LAI) 2367 LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2368 2369 return *LAI.get(); 2370 } 2371 2372 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2373 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2374 2375 for (Loop *TopLevelLoop : *LI) 2376 for (Loop *L : depth_first(TopLevelLoop)) { 2377 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2378 auto &LAI = LAA.getInfo(L); 2379 LAI.print(OS, 4); 2380 } 2381 } 2382 2383 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2384 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2385 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2386 TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2387 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2388 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2389 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2390 2391 return false; 2392 } 2393 2394 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2395 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>(); 2396 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2397 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2398 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 2399 2400 AU.setPreservesAll(); 2401 } 2402 2403 char LoopAccessLegacyAnalysis::ID = 0; 2404 static const char laa_name[] = "Loop Access Analysis"; 2405 #define LAA_NAME "loop-accesses" 2406 2407 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2408 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2409 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2410 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2411 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2412 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2413 2414 AnalysisKey LoopAccessAnalysis::Key; 2415 2416 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2417 LoopStandardAnalysisResults &AR) { 2418 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2419 } 2420 2421 namespace llvm { 2422 2423 Pass *createLAAPass() { 2424 return new LoopAccessLegacyAnalysis(); 2425 } 2426 2427 } // end namespace llvm 2428