1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/EquivalenceClasses.h" 20 #include "llvm/ADT/PointerIntPair.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AliasSetTracker.h" 29 #include "llvm/Analysis/LoopAnalysisManager.h" 30 #include "llvm/Analysis/LoopInfo.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/ScalarEvolution.h" 34 #include "llvm/Analysis/ScalarEvolutionExpander.h" 35 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/Analysis/VectorUtils.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugLoc.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/InstrTypes.h" 49 #include "llvm/IR/Instruction.h" 50 #include "llvm/IR/Instructions.h" 51 #include "llvm/IR/Operator.h" 52 #include "llvm/IR/PassManager.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/IR/ValueHandle.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <cstdlib> 66 #include <iterator> 67 #include <utility> 68 #include <vector> 69 70 using namespace llvm; 71 72 #define DEBUG_TYPE "loop-accesses" 73 74 static cl::opt<unsigned, true> 75 VectorizationFactor("force-vector-width", cl::Hidden, 76 cl::desc("Sets the SIMD width. Zero is autoselect."), 77 cl::location(VectorizerParams::VectorizationFactor)); 78 unsigned VectorizerParams::VectorizationFactor; 79 80 static cl::opt<unsigned, true> 81 VectorizationInterleave("force-vector-interleave", cl::Hidden, 82 cl::desc("Sets the vectorization interleave count. " 83 "Zero is autoselect."), 84 cl::location( 85 VectorizerParams::VectorizationInterleave)); 86 unsigned VectorizerParams::VectorizationInterleave; 87 88 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 89 "runtime-memory-check-threshold", cl::Hidden, 90 cl::desc("When performing memory disambiguation checks at runtime do not " 91 "generate more than this number of comparisons (default = 8)."), 92 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 93 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 94 95 /// The maximum iterations used to merge memory checks 96 static cl::opt<unsigned> MemoryCheckMergeThreshold( 97 "memory-check-merge-threshold", cl::Hidden, 98 cl::desc("Maximum number of comparisons done when trying to merge " 99 "runtime memory checks. (default = 100)"), 100 cl::init(100)); 101 102 /// Maximum SIMD width. 103 const unsigned VectorizerParams::MaxVectorWidth = 64; 104 105 /// We collect dependences up to this threshold. 106 static cl::opt<unsigned> 107 MaxDependences("max-dependences", cl::Hidden, 108 cl::desc("Maximum number of dependences collected by " 109 "loop-access analysis (default = 100)"), 110 cl::init(100)); 111 112 /// This enables versioning on the strides of symbolically striding memory 113 /// accesses in code like the following. 114 /// for (i = 0; i < N; ++i) 115 /// A[i * Stride1] += B[i * Stride2] ... 116 /// 117 /// Will be roughly translated to 118 /// if (Stride1 == 1 && Stride2 == 1) { 119 /// for (i = 0; i < N; i+=4) 120 /// A[i:i+3] += ... 121 /// } else 122 /// ... 123 static cl::opt<bool> EnableMemAccessVersioning( 124 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 125 cl::desc("Enable symbolic stride memory access versioning")); 126 127 /// Enable store-to-load forwarding conflict detection. This option can 128 /// be disabled for correctness testing. 129 static cl::opt<bool> EnableForwardingConflictDetection( 130 "store-to-load-forwarding-conflict-detection", cl::Hidden, 131 cl::desc("Enable conflict detection in loop-access analysis"), 132 cl::init(true)); 133 134 bool VectorizerParams::isInterleaveForced() { 135 return ::VectorizationInterleave.getNumOccurrences() > 0; 136 } 137 138 Value *llvm::stripIntegerCast(Value *V) { 139 if (auto *CI = dyn_cast<CastInst>(V)) 140 if (CI->getOperand(0)->getType()->isIntegerTy()) 141 return CI->getOperand(0); 142 return V; 143 } 144 145 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 146 const ValueToValueMap &PtrToStride, 147 Value *Ptr, Value *OrigPtr) { 148 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 149 150 // If there is an entry in the map return the SCEV of the pointer with the 151 // symbolic stride replaced by one. 152 ValueToValueMap::const_iterator SI = 153 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 154 if (SI != PtrToStride.end()) { 155 Value *StrideVal = SI->second; 156 157 // Strip casts. 158 StrideVal = stripIntegerCast(StrideVal); 159 160 ScalarEvolution *SE = PSE.getSE(); 161 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 162 const auto *CT = 163 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 164 165 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 166 auto *Expr = PSE.getSCEV(Ptr); 167 168 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV 169 << " by: " << *Expr << "\n"); 170 return Expr; 171 } 172 173 // Otherwise, just return the SCEV of the original pointer. 174 return OrigSCEV; 175 } 176 177 /// Calculate Start and End points of memory access. 178 /// Let's assume A is the first access and B is a memory access on N-th loop 179 /// iteration. Then B is calculated as: 180 /// B = A + Step*N . 181 /// Step value may be positive or negative. 182 /// N is a calculated back-edge taken count: 183 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 184 /// Start and End points are calculated in the following way: 185 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 186 /// where SizeOfElt is the size of single memory access in bytes. 187 /// 188 /// There is no conflict when the intervals are disjoint: 189 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 190 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 191 unsigned DepSetId, unsigned ASId, 192 const ValueToValueMap &Strides, 193 PredicatedScalarEvolution &PSE) { 194 // Get the stride replaced scev. 195 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 196 ScalarEvolution *SE = PSE.getSE(); 197 198 const SCEV *ScStart; 199 const SCEV *ScEnd; 200 201 if (SE->isLoopInvariant(Sc, Lp)) 202 ScStart = ScEnd = Sc; 203 else { 204 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 205 assert(AR && "Invalid addrec expression"); 206 const SCEV *Ex = PSE.getBackedgeTakenCount(); 207 208 ScStart = AR->getStart(); 209 ScEnd = AR->evaluateAtIteration(Ex, *SE); 210 const SCEV *Step = AR->getStepRecurrence(*SE); 211 212 // For expressions with negative step, the upper bound is ScStart and the 213 // lower bound is ScEnd. 214 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 215 if (CStep->getValue()->isNegative()) 216 std::swap(ScStart, ScEnd); 217 } else { 218 // Fallback case: the step is not constant, but we can still 219 // get the upper and lower bounds of the interval by using min/max 220 // expressions. 221 ScStart = SE->getUMinExpr(ScStart, ScEnd); 222 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 223 } 224 // Add the size of the pointed element to ScEnd. 225 unsigned EltSize = 226 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8; 227 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize); 228 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 229 } 230 231 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 232 } 233 234 SmallVector<RuntimePointerChecking::PointerCheck, 4> 235 RuntimePointerChecking::generateChecks() const { 236 SmallVector<PointerCheck, 4> Checks; 237 238 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 239 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 240 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 241 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 242 243 if (needsChecking(CGI, CGJ)) 244 Checks.push_back(std::make_pair(&CGI, &CGJ)); 245 } 246 } 247 return Checks; 248 } 249 250 void RuntimePointerChecking::generateChecks( 251 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 252 assert(Checks.empty() && "Checks is not empty"); 253 groupChecks(DepCands, UseDependencies); 254 Checks = generateChecks(); 255 } 256 257 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 258 const CheckingPtrGroup &N) const { 259 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 260 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 261 if (needsChecking(M.Members[I], N.Members[J])) 262 return true; 263 return false; 264 } 265 266 /// Compare \p I and \p J and return the minimum. 267 /// Return nullptr in case we couldn't find an answer. 268 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 269 ScalarEvolution *SE) { 270 const SCEV *Diff = SE->getMinusSCEV(J, I); 271 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 272 273 if (!C) 274 return nullptr; 275 if (C->getValue()->isNegative()) 276 return J; 277 return I; 278 } 279 280 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 281 const SCEV *Start = RtCheck.Pointers[Index].Start; 282 const SCEV *End = RtCheck.Pointers[Index].End; 283 284 // Compare the starts and ends with the known minimum and maximum 285 // of this set. We need to know how we compare against the min/max 286 // of the set in order to be able to emit memchecks. 287 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 288 if (!Min0) 289 return false; 290 291 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 292 if (!Min1) 293 return false; 294 295 // Update the low bound expression if we've found a new min value. 296 if (Min0 == Start) 297 Low = Start; 298 299 // Update the high bound expression if we've found a new max value. 300 if (Min1 != End) 301 High = End; 302 303 Members.push_back(Index); 304 return true; 305 } 306 307 void RuntimePointerChecking::groupChecks( 308 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 309 // We build the groups from dependency candidates equivalence classes 310 // because: 311 // - We know that pointers in the same equivalence class share 312 // the same underlying object and therefore there is a chance 313 // that we can compare pointers 314 // - We wouldn't be able to merge two pointers for which we need 315 // to emit a memcheck. The classes in DepCands are already 316 // conveniently built such that no two pointers in the same 317 // class need checking against each other. 318 319 // We use the following (greedy) algorithm to construct the groups 320 // For every pointer in the equivalence class: 321 // For each existing group: 322 // - if the difference between this pointer and the min/max bounds 323 // of the group is a constant, then make the pointer part of the 324 // group and update the min/max bounds of that group as required. 325 326 CheckingGroups.clear(); 327 328 // If we need to check two pointers to the same underlying object 329 // with a non-constant difference, we shouldn't perform any pointer 330 // grouping with those pointers. This is because we can easily get 331 // into cases where the resulting check would return false, even when 332 // the accesses are safe. 333 // 334 // The following example shows this: 335 // for (i = 0; i < 1000; ++i) 336 // a[5000 + i * m] = a[i] + a[i + 9000] 337 // 338 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 339 // (0, 10000) which is always false. However, if m is 1, there is no 340 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 341 // us to perform an accurate check in this case. 342 // 343 // The above case requires that we have an UnknownDependence between 344 // accesses to the same underlying object. This cannot happen unless 345 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 346 // is also false. In this case we will use the fallback path and create 347 // separate checking groups for all pointers. 348 349 // If we don't have the dependency partitions, construct a new 350 // checking pointer group for each pointer. This is also required 351 // for correctness, because in this case we can have checking between 352 // pointers to the same underlying object. 353 if (!UseDependencies) { 354 for (unsigned I = 0; I < Pointers.size(); ++I) 355 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 356 return; 357 } 358 359 unsigned TotalComparisons = 0; 360 361 DenseMap<Value *, unsigned> PositionMap; 362 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 363 PositionMap[Pointers[Index].PointerValue] = Index; 364 365 // We need to keep track of what pointers we've already seen so we 366 // don't process them twice. 367 SmallSet<unsigned, 2> Seen; 368 369 // Go through all equivalence classes, get the "pointer check groups" 370 // and add them to the overall solution. We use the order in which accesses 371 // appear in 'Pointers' to enforce determinism. 372 for (unsigned I = 0; I < Pointers.size(); ++I) { 373 // We've seen this pointer before, and therefore already processed 374 // its equivalence class. 375 if (Seen.count(I)) 376 continue; 377 378 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 379 Pointers[I].IsWritePtr); 380 381 SmallVector<CheckingPtrGroup, 2> Groups; 382 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 383 384 // Because DepCands is constructed by visiting accesses in the order in 385 // which they appear in alias sets (which is deterministic) and the 386 // iteration order within an equivalence class member is only dependent on 387 // the order in which unions and insertions are performed on the 388 // equivalence class, the iteration order is deterministic. 389 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 390 MI != ME; ++MI) { 391 unsigned Pointer = PositionMap[MI->getPointer()]; 392 bool Merged = false; 393 // Mark this pointer as seen. 394 Seen.insert(Pointer); 395 396 // Go through all the existing sets and see if we can find one 397 // which can include this pointer. 398 for (CheckingPtrGroup &Group : Groups) { 399 // Don't perform more than a certain amount of comparisons. 400 // This should limit the cost of grouping the pointers to something 401 // reasonable. If we do end up hitting this threshold, the algorithm 402 // will create separate groups for all remaining pointers. 403 if (TotalComparisons > MemoryCheckMergeThreshold) 404 break; 405 406 TotalComparisons++; 407 408 if (Group.addPointer(Pointer)) { 409 Merged = true; 410 break; 411 } 412 } 413 414 if (!Merged) 415 // We couldn't add this pointer to any existing set or the threshold 416 // for the number of comparisons has been reached. Create a new group 417 // to hold the current pointer. 418 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 419 } 420 421 // We've computed the grouped checks for this partition. 422 // Save the results and continue with the next one. 423 llvm::copy(Groups, std::back_inserter(CheckingGroups)); 424 } 425 } 426 427 bool RuntimePointerChecking::arePointersInSamePartition( 428 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 429 unsigned PtrIdx2) { 430 return (PtrToPartition[PtrIdx1] != -1 && 431 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 432 } 433 434 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 435 const PointerInfo &PointerI = Pointers[I]; 436 const PointerInfo &PointerJ = Pointers[J]; 437 438 // No need to check if two readonly pointers intersect. 439 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 440 return false; 441 442 // Only need to check pointers between two different dependency sets. 443 if (PointerI.DependencySetId == PointerJ.DependencySetId) 444 return false; 445 446 // Only need to check pointers in the same alias set. 447 if (PointerI.AliasSetId != PointerJ.AliasSetId) 448 return false; 449 450 return true; 451 } 452 453 void RuntimePointerChecking::printChecks( 454 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 455 unsigned Depth) const { 456 unsigned N = 0; 457 for (const auto &Check : Checks) { 458 const auto &First = Check.first->Members, &Second = Check.second->Members; 459 460 OS.indent(Depth) << "Check " << N++ << ":\n"; 461 462 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 463 for (unsigned K = 0; K < First.size(); ++K) 464 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 465 466 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 467 for (unsigned K = 0; K < Second.size(); ++K) 468 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 469 } 470 } 471 472 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 473 474 OS.indent(Depth) << "Run-time memory checks:\n"; 475 printChecks(OS, Checks, Depth); 476 477 OS.indent(Depth) << "Grouped accesses:\n"; 478 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 479 const auto &CG = CheckingGroups[I]; 480 481 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 482 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 483 << ")\n"; 484 for (unsigned J = 0; J < CG.Members.size(); ++J) { 485 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 486 << "\n"; 487 } 488 } 489 } 490 491 namespace { 492 493 /// Analyses memory accesses in a loop. 494 /// 495 /// Checks whether run time pointer checks are needed and builds sets for data 496 /// dependence checking. 497 class AccessAnalysis { 498 public: 499 /// Read or write access location. 500 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 501 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList; 502 503 AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AliasAnalysis *AA, 504 LoopInfo *LI, MemoryDepChecker::DepCandidates &DA, 505 PredicatedScalarEvolution &PSE) 506 : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), 507 IsRTCheckAnalysisNeeded(false), PSE(PSE) {} 508 509 /// Register a load and whether it is only read from. 510 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 511 Value *Ptr = const_cast<Value*>(Loc.Ptr); 512 AST.add(Ptr, LocationSize::unknown(), Loc.AATags); 513 Accesses.insert(MemAccessInfo(Ptr, false)); 514 if (IsReadOnly) 515 ReadOnlyPtr.insert(Ptr); 516 } 517 518 /// Register a store. 519 void addStore(MemoryLocation &Loc) { 520 Value *Ptr = const_cast<Value*>(Loc.Ptr); 521 AST.add(Ptr, LocationSize::unknown(), Loc.AATags); 522 Accesses.insert(MemAccessInfo(Ptr, true)); 523 } 524 525 /// Check if we can emit a run-time no-alias check for \p Access. 526 /// 527 /// Returns true if we can emit a run-time no alias check for \p Access. 528 /// If we can check this access, this also adds it to a dependence set and 529 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true, 530 /// we will attempt to use additional run-time checks in order to get 531 /// the bounds of the pointer. 532 bool createCheckForAccess(RuntimePointerChecking &RtCheck, 533 MemAccessInfo Access, 534 const ValueToValueMap &Strides, 535 DenseMap<Value *, unsigned> &DepSetId, 536 Loop *TheLoop, unsigned &RunningDepId, 537 unsigned ASId, bool ShouldCheckStride, 538 bool Assume); 539 540 /// Check whether we can check the pointers at runtime for 541 /// non-intersection. 542 /// 543 /// Returns true if we need no check or if we do and we can generate them 544 /// (i.e. the pointers have computable bounds). 545 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 546 Loop *TheLoop, const ValueToValueMap &Strides, 547 bool ShouldCheckWrap = false); 548 549 /// Goes over all memory accesses, checks whether a RT check is needed 550 /// and builds sets of dependent accesses. 551 void buildDependenceSets() { 552 processMemAccesses(); 553 } 554 555 /// Initial processing of memory accesses determined that we need to 556 /// perform dependency checking. 557 /// 558 /// Note that this can later be cleared if we retry memcheck analysis without 559 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 560 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 561 562 /// We decided that no dependence analysis would be used. Reset the state. 563 void resetDepChecks(MemoryDepChecker &DepChecker) { 564 CheckDeps.clear(); 565 DepChecker.clearDependences(); 566 } 567 568 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; } 569 570 private: 571 typedef SetVector<MemAccessInfo> PtrAccessSet; 572 573 /// Go over all memory access and check whether runtime pointer checks 574 /// are needed and build sets of dependency check candidates. 575 void processMemAccesses(); 576 577 /// Set of all accesses. 578 PtrAccessSet Accesses; 579 580 const DataLayout &DL; 581 582 /// The loop being checked. 583 const Loop *TheLoop; 584 585 /// List of accesses that need a further dependence check. 586 MemAccessInfoList CheckDeps; 587 588 /// Set of pointers that are read only. 589 SmallPtrSet<Value*, 16> ReadOnlyPtr; 590 591 /// An alias set tracker to partition the access set by underlying object and 592 //intrinsic property (such as TBAA metadata). 593 AliasSetTracker AST; 594 595 LoopInfo *LI; 596 597 /// Sets of potentially dependent accesses - members of one set share an 598 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 599 /// dependence check. 600 MemoryDepChecker::DepCandidates &DepCands; 601 602 /// Initial processing of memory accesses determined that we may need 603 /// to add memchecks. Perform the analysis to determine the necessary checks. 604 /// 605 /// Note that, this is different from isDependencyCheckNeeded. When we retry 606 /// memcheck analysis without dependency checking 607 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 608 /// while this remains set if we have potentially dependent accesses. 609 bool IsRTCheckAnalysisNeeded; 610 611 /// The SCEV predicate containing all the SCEV-related assumptions. 612 PredicatedScalarEvolution &PSE; 613 }; 614 615 } // end anonymous namespace 616 617 /// Check whether a pointer can participate in a runtime bounds check. 618 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr 619 /// by adding run-time checks (overflow checks) if necessary. 620 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 621 const ValueToValueMap &Strides, Value *Ptr, 622 Loop *L, bool Assume) { 623 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 624 625 // The bounds for loop-invariant pointer is trivial. 626 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 627 return true; 628 629 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 630 631 if (!AR && Assume) 632 AR = PSE.getAsAddRec(Ptr); 633 634 if (!AR) 635 return false; 636 637 return AR->isAffine(); 638 } 639 640 /// Check whether a pointer address cannot wrap. 641 static bool isNoWrap(PredicatedScalarEvolution &PSE, 642 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 643 const SCEV *PtrScev = PSE.getSCEV(Ptr); 644 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 645 return true; 646 647 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides); 648 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW)) 649 return true; 650 651 return false; 652 } 653 654 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck, 655 MemAccessInfo Access, 656 const ValueToValueMap &StridesMap, 657 DenseMap<Value *, unsigned> &DepSetId, 658 Loop *TheLoop, unsigned &RunningDepId, 659 unsigned ASId, bool ShouldCheckWrap, 660 bool Assume) { 661 Value *Ptr = Access.getPointer(); 662 663 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume)) 664 return false; 665 666 // When we run after a failing dependency check we have to make sure 667 // we don't have wrapping pointers. 668 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) { 669 auto *Expr = PSE.getSCEV(Ptr); 670 if (!Assume || !isa<SCEVAddRecExpr>(Expr)) 671 return false; 672 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 673 } 674 675 // The id of the dependence set. 676 unsigned DepId; 677 678 if (isDependencyCheckNeeded()) { 679 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 680 unsigned &LeaderId = DepSetId[Leader]; 681 if (!LeaderId) 682 LeaderId = RunningDepId++; 683 DepId = LeaderId; 684 } else 685 // Each access has its own dependence set. 686 DepId = RunningDepId++; 687 688 bool IsWrite = Access.getInt(); 689 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 690 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 691 692 return true; 693 } 694 695 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 696 ScalarEvolution *SE, Loop *TheLoop, 697 const ValueToValueMap &StridesMap, 698 bool ShouldCheckWrap) { 699 // Find pointers with computable bounds. We are going to use this information 700 // to place a runtime bound check. 701 bool CanDoRT = true; 702 703 bool NeedRTCheck = false; 704 if (!IsRTCheckAnalysisNeeded) return true; 705 706 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 707 708 // We assign a consecutive id to access from different alias sets. 709 // Accesses between different groups doesn't need to be checked. 710 unsigned ASId = 1; 711 for (auto &AS : AST) { 712 int NumReadPtrChecks = 0; 713 int NumWritePtrChecks = 0; 714 bool CanDoAliasSetRT = true; 715 716 // We assign consecutive id to access from different dependence sets. 717 // Accesses within the same set don't need a runtime check. 718 unsigned RunningDepId = 1; 719 DenseMap<Value *, unsigned> DepSetId; 720 721 SmallVector<MemAccessInfo, 4> Retries; 722 723 for (auto A : AS) { 724 Value *Ptr = A.getValue(); 725 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 726 MemAccessInfo Access(Ptr, IsWrite); 727 728 if (IsWrite) 729 ++NumWritePtrChecks; 730 else 731 ++NumReadPtrChecks; 732 733 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop, 734 RunningDepId, ASId, ShouldCheckWrap, false)) { 735 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 736 Retries.push_back(Access); 737 CanDoAliasSetRT = false; 738 } 739 } 740 741 // If we have at least two writes or one write and a read then we need to 742 // check them. But there is no need to checks if there is only one 743 // dependence set for this alias set. 744 // 745 // Note that this function computes CanDoRT and NeedRTCheck independently. 746 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 747 // for which we couldn't find the bounds but we don't actually need to emit 748 // any checks so it does not matter. 749 bool NeedsAliasSetRTCheck = false; 750 if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2)) 751 NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 || 752 (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1)); 753 754 // We need to perform run-time alias checks, but some pointers had bounds 755 // that couldn't be checked. 756 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) { 757 // Reset the CanDoSetRt flag and retry all accesses that have failed. 758 // We know that we need these checks, so we can now be more aggressive 759 // and add further checks if required (overflow checks). 760 CanDoAliasSetRT = true; 761 for (auto Access : Retries) 762 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, 763 TheLoop, RunningDepId, ASId, 764 ShouldCheckWrap, /*Assume=*/true)) { 765 CanDoAliasSetRT = false; 766 break; 767 } 768 } 769 770 CanDoRT &= CanDoAliasSetRT; 771 NeedRTCheck |= NeedsAliasSetRTCheck; 772 ++ASId; 773 } 774 775 // If the pointers that we would use for the bounds comparison have different 776 // address spaces, assume the values aren't directly comparable, so we can't 777 // use them for the runtime check. We also have to assume they could 778 // overlap. In the future there should be metadata for whether address spaces 779 // are disjoint. 780 unsigned NumPointers = RtCheck.Pointers.size(); 781 for (unsigned i = 0; i < NumPointers; ++i) { 782 for (unsigned j = i + 1; j < NumPointers; ++j) { 783 // Only need to check pointers between two different dependency sets. 784 if (RtCheck.Pointers[i].DependencySetId == 785 RtCheck.Pointers[j].DependencySetId) 786 continue; 787 // Only need to check pointers in the same alias set. 788 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 789 continue; 790 791 Value *PtrI = RtCheck.Pointers[i].PointerValue; 792 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 793 794 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 795 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 796 if (ASi != ASj) { 797 LLVM_DEBUG( 798 dbgs() << "LAA: Runtime check would require comparison between" 799 " different address spaces\n"); 800 return false; 801 } 802 } 803 } 804 805 if (NeedRTCheck && CanDoRT) 806 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 807 808 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 809 << " pointer comparisons.\n"); 810 811 RtCheck.Need = NeedRTCheck; 812 813 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 814 if (!CanDoRTIfNeeded) 815 RtCheck.reset(); 816 return CanDoRTIfNeeded; 817 } 818 819 void AccessAnalysis::processMemAccesses() { 820 // We process the set twice: first we process read-write pointers, last we 821 // process read-only pointers. This allows us to skip dependence tests for 822 // read-only pointers. 823 824 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 825 LLVM_DEBUG(dbgs() << " AST: "; AST.dump()); 826 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 827 LLVM_DEBUG({ 828 for (auto A : Accesses) 829 dbgs() << "\t" << *A.getPointer() << " (" << 830 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 831 "read-only" : "read")) << ")\n"; 832 }); 833 834 // The AliasSetTracker has nicely partitioned our pointers by metadata 835 // compatibility and potential for underlying-object overlap. As a result, we 836 // only need to check for potential pointer dependencies within each alias 837 // set. 838 for (auto &AS : AST) { 839 // Note that both the alias-set tracker and the alias sets themselves used 840 // linked lists internally and so the iteration order here is deterministic 841 // (matching the original instruction order within each set). 842 843 bool SetHasWrite = false; 844 845 // Map of pointers to last access encountered. 846 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 847 UnderlyingObjToAccessMap ObjToLastAccess; 848 849 // Set of access to check after all writes have been processed. 850 PtrAccessSet DeferredAccesses; 851 852 // Iterate over each alias set twice, once to process read/write pointers, 853 // and then to process read-only pointers. 854 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 855 bool UseDeferred = SetIteration > 0; 856 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 857 858 for (auto AV : AS) { 859 Value *Ptr = AV.getValue(); 860 861 // For a single memory access in AliasSetTracker, Accesses may contain 862 // both read and write, and they both need to be handled for CheckDeps. 863 for (auto AC : S) { 864 if (AC.getPointer() != Ptr) 865 continue; 866 867 bool IsWrite = AC.getInt(); 868 869 // If we're using the deferred access set, then it contains only 870 // reads. 871 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 872 if (UseDeferred && !IsReadOnlyPtr) 873 continue; 874 // Otherwise, the pointer must be in the PtrAccessSet, either as a 875 // read or a write. 876 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 877 S.count(MemAccessInfo(Ptr, false))) && 878 "Alias-set pointer not in the access set?"); 879 880 MemAccessInfo Access(Ptr, IsWrite); 881 DepCands.insert(Access); 882 883 // Memorize read-only pointers for later processing and skip them in 884 // the first round (they need to be checked after we have seen all 885 // write pointers). Note: we also mark pointer that are not 886 // consecutive as "read-only" pointers (so that we check 887 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 888 if (!UseDeferred && IsReadOnlyPtr) { 889 DeferredAccesses.insert(Access); 890 continue; 891 } 892 893 // If this is a write - check other reads and writes for conflicts. If 894 // this is a read only check other writes for conflicts (but only if 895 // there is no other write to the ptr - this is an optimization to 896 // catch "a[i] = a[i] + " without having to do a dependence check). 897 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 898 CheckDeps.push_back(Access); 899 IsRTCheckAnalysisNeeded = true; 900 } 901 902 if (IsWrite) 903 SetHasWrite = true; 904 905 // Create sets of pointers connected by a shared alias set and 906 // underlying object. 907 typedef SmallVector<Value *, 16> ValueVector; 908 ValueVector TempObjects; 909 910 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 911 LLVM_DEBUG(dbgs() 912 << "Underlying objects for pointer " << *Ptr << "\n"); 913 for (Value *UnderlyingObj : TempObjects) { 914 // nullptr never alias, don't join sets for pointer that have "null" 915 // in their UnderlyingObjects list. 916 if (isa<ConstantPointerNull>(UnderlyingObj) && 917 !NullPointerIsDefined( 918 TheLoop->getHeader()->getParent(), 919 UnderlyingObj->getType()->getPointerAddressSpace())) 920 continue; 921 922 UnderlyingObjToAccessMap::iterator Prev = 923 ObjToLastAccess.find(UnderlyingObj); 924 if (Prev != ObjToLastAccess.end()) 925 DepCands.unionSets(Access, Prev->second); 926 927 ObjToLastAccess[UnderlyingObj] = Access; 928 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 929 } 930 } 931 } 932 } 933 } 934 } 935 936 static bool isInBoundsGep(Value *Ptr) { 937 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 938 return GEP->isInBounds(); 939 return false; 940 } 941 942 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 943 /// i.e. monotonically increasing/decreasing. 944 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 945 PredicatedScalarEvolution &PSE, const Loop *L) { 946 // FIXME: This should probably only return true for NUW. 947 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 948 return true; 949 950 // Scalar evolution does not propagate the non-wrapping flags to values that 951 // are derived from a non-wrapping induction variable because non-wrapping 952 // could be flow-sensitive. 953 // 954 // Look through the potentially overflowing instruction to try to prove 955 // non-wrapping for the *specific* value of Ptr. 956 957 // The arithmetic implied by an inbounds GEP can't overflow. 958 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 959 if (!GEP || !GEP->isInBounds()) 960 return false; 961 962 // Make sure there is only one non-const index and analyze that. 963 Value *NonConstIndex = nullptr; 964 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) 965 if (!isa<ConstantInt>(Index)) { 966 if (NonConstIndex) 967 return false; 968 NonConstIndex = Index; 969 } 970 if (!NonConstIndex) 971 // The recurrence is on the pointer, ignore for now. 972 return false; 973 974 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 975 // AddRec using a NSW operation. 976 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 977 if (OBO->hasNoSignedWrap() && 978 // Assume constant for other the operand so that the AddRec can be 979 // easily found. 980 isa<ConstantInt>(OBO->getOperand(1))) { 981 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 982 983 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 984 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 985 } 986 987 return false; 988 } 989 990 /// Check whether the access through \p Ptr has a constant stride. 991 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 992 const Loop *Lp, const ValueToValueMap &StridesMap, 993 bool Assume, bool ShouldCheckWrap) { 994 Type *Ty = Ptr->getType(); 995 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 996 997 // Make sure that the pointer does not point to aggregate types. 998 auto *PtrTy = cast<PointerType>(Ty); 999 if (PtrTy->getElementType()->isAggregateType()) { 1000 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 1001 << *Ptr << "\n"); 1002 return 0; 1003 } 1004 1005 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 1006 1007 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 1008 if (Assume && !AR) 1009 AR = PSE.getAsAddRec(Ptr); 1010 1011 if (!AR) { 1012 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 1013 << " SCEV: " << *PtrScev << "\n"); 1014 return 0; 1015 } 1016 1017 // The accesss function must stride over the innermost loop. 1018 if (Lp != AR->getLoop()) { 1019 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " 1020 << *Ptr << " SCEV: " << *AR << "\n"); 1021 return 0; 1022 } 1023 1024 // The address calculation must not wrap. Otherwise, a dependence could be 1025 // inverted. 1026 // An inbounds getelementptr that is a AddRec with a unit stride 1027 // cannot wrap per definition. The unit stride requirement is checked later. 1028 // An getelementptr without an inbounds attribute and unit stride would have 1029 // to access the pointer value "0" which is undefined behavior in address 1030 // space 0, therefore we can also vectorize this case. 1031 bool IsInBoundsGEP = isInBoundsGep(Ptr); 1032 bool IsNoWrapAddRec = !ShouldCheckWrap || 1033 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 1034 isNoWrapAddRec(Ptr, AR, PSE, Lp); 1035 if (!IsNoWrapAddRec && !IsInBoundsGEP && 1036 NullPointerIsDefined(Lp->getHeader()->getParent(), 1037 PtrTy->getAddressSpace())) { 1038 if (Assume) { 1039 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1040 IsNoWrapAddRec = true; 1041 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 1042 << "LAA: Pointer: " << *Ptr << "\n" 1043 << "LAA: SCEV: " << *AR << "\n" 1044 << "LAA: Added an overflow assumption\n"); 1045 } else { 1046 LLVM_DEBUG( 1047 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 1048 << *Ptr << " SCEV: " << *AR << "\n"); 1049 return 0; 1050 } 1051 } 1052 1053 // Check the step is constant. 1054 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 1055 1056 // Calculate the pointer stride and check if it is constant. 1057 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 1058 if (!C) { 1059 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr 1060 << " SCEV: " << *AR << "\n"); 1061 return 0; 1062 } 1063 1064 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 1065 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 1066 const APInt &APStepVal = C->getAPInt(); 1067 1068 // Huge step value - give up. 1069 if (APStepVal.getBitWidth() > 64) 1070 return 0; 1071 1072 int64_t StepVal = APStepVal.getSExtValue(); 1073 1074 // Strided access. 1075 int64_t Stride = StepVal / Size; 1076 int64_t Rem = StepVal % Size; 1077 if (Rem) 1078 return 0; 1079 1080 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1081 // know we can't "wrap around the address space". In case of address space 1082 // zero we know that this won't happen without triggering undefined behavior. 1083 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 && 1084 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(), 1085 PtrTy->getAddressSpace()))) { 1086 if (Assume) { 1087 // We can avoid this case by adding a run-time check. 1088 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1089 << "inbouds or in address space 0 may wrap:\n" 1090 << "LAA: Pointer: " << *Ptr << "\n" 1091 << "LAA: SCEV: " << *AR << "\n" 1092 << "LAA: Added an overflow assumption\n"); 1093 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1094 } else 1095 return 0; 1096 } 1097 1098 return Stride; 1099 } 1100 1101 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL, 1102 ScalarEvolution &SE, 1103 SmallVectorImpl<unsigned> &SortedIndices) { 1104 assert(llvm::all_of( 1105 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 1106 "Expected list of pointer operands."); 1107 SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs; 1108 OffValPairs.reserve(VL.size()); 1109 1110 // Walk over the pointers, and map each of them to an offset relative to 1111 // first pointer in the array. 1112 Value *Ptr0 = VL[0]; 1113 const SCEV *Scev0 = SE.getSCEV(Ptr0); 1114 Value *Obj0 = GetUnderlyingObject(Ptr0, DL); 1115 1116 llvm::SmallSet<int64_t, 4> Offsets; 1117 for (auto *Ptr : VL) { 1118 // TODO: Outline this code as a special, more time consuming, version of 1119 // computeConstantDifference() function. 1120 if (Ptr->getType()->getPointerAddressSpace() != 1121 Ptr0->getType()->getPointerAddressSpace()) 1122 return false; 1123 // If a pointer refers to a different underlying object, bail - the 1124 // pointers are by definition incomparable. 1125 Value *CurrObj = GetUnderlyingObject(Ptr, DL); 1126 if (CurrObj != Obj0) 1127 return false; 1128 1129 const SCEV *Scev = SE.getSCEV(Ptr); 1130 const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0)); 1131 // The pointers may not have a constant offset from each other, or SCEV 1132 // may just not be smart enough to figure out they do. Regardless, 1133 // there's nothing we can do. 1134 if (!Diff) 1135 return false; 1136 1137 // Check if the pointer with the same offset is found. 1138 int64_t Offset = Diff->getAPInt().getSExtValue(); 1139 if (!Offsets.insert(Offset).second) 1140 return false; 1141 OffValPairs.emplace_back(Offset, Ptr); 1142 } 1143 SortedIndices.clear(); 1144 SortedIndices.resize(VL.size()); 1145 std::iota(SortedIndices.begin(), SortedIndices.end(), 0); 1146 1147 // Sort the memory accesses and keep the order of their uses in UseOrder. 1148 std::stable_sort(SortedIndices.begin(), SortedIndices.end(), 1149 [&OffValPairs](unsigned Left, unsigned Right) { 1150 return OffValPairs[Left].first < OffValPairs[Right].first; 1151 }); 1152 1153 // Check if the order is consecutive already. 1154 if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) { 1155 return I == SortedIndices[I]; 1156 })) 1157 SortedIndices.clear(); 1158 1159 return true; 1160 } 1161 1162 /// Take the address space operand from the Load/Store instruction. 1163 /// Returns -1 if this is not a valid Load/Store instruction. 1164 static unsigned getAddressSpaceOperand(Value *I) { 1165 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1166 return L->getPointerAddressSpace(); 1167 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1168 return S->getPointerAddressSpace(); 1169 return -1; 1170 } 1171 1172 /// Returns true if the memory operations \p A and \p B are consecutive. 1173 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1174 ScalarEvolution &SE, bool CheckType) { 1175 Value *PtrA = getLoadStorePointerOperand(A); 1176 Value *PtrB = getLoadStorePointerOperand(B); 1177 unsigned ASA = getAddressSpaceOperand(A); 1178 unsigned ASB = getAddressSpaceOperand(B); 1179 1180 // Check that the address spaces match and that the pointers are valid. 1181 if (!PtrA || !PtrB || (ASA != ASB)) 1182 return false; 1183 1184 // Make sure that A and B are different pointers. 1185 if (PtrA == PtrB) 1186 return false; 1187 1188 // Make sure that A and B have the same type if required. 1189 if (CheckType && PtrA->getType() != PtrB->getType()) 1190 return false; 1191 1192 unsigned IdxWidth = DL.getIndexSizeInBits(ASA); 1193 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1194 APInt Size(IdxWidth, DL.getTypeStoreSize(Ty)); 1195 1196 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); 1197 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1198 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1199 1200 // OffsetDelta = OffsetB - OffsetA; 1201 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1202 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1203 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1204 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1205 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1206 // Check if they are based on the same pointer. That makes the offsets 1207 // sufficient. 1208 if (PtrA == PtrB) 1209 return OffsetDelta == Size; 1210 1211 // Compute the necessary base pointer delta to have the necessary final delta 1212 // equal to the size. 1213 // BaseDelta = Size - OffsetDelta; 1214 const SCEV *SizeSCEV = SE.getConstant(Size); 1215 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1216 1217 // Otherwise compute the distance with SCEV between the base pointers. 1218 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1219 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1220 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1221 return X == PtrSCEVB; 1222 } 1223 1224 MemoryDepChecker::VectorizationSafetyStatus 1225 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1226 switch (Type) { 1227 case NoDep: 1228 case Forward: 1229 case BackwardVectorizable: 1230 return VectorizationSafetyStatus::Safe; 1231 1232 case Unknown: 1233 case ForwardButPreventsForwarding: 1234 case Backward: 1235 case BackwardVectorizableButPreventsForwarding: 1236 return VectorizationSafetyStatus::Unsafe; 1237 } 1238 llvm_unreachable("unexpected DepType!"); 1239 } 1240 1241 bool MemoryDepChecker::Dependence::isBackward() const { 1242 switch (Type) { 1243 case NoDep: 1244 case Forward: 1245 case ForwardButPreventsForwarding: 1246 case Unknown: 1247 return false; 1248 1249 case BackwardVectorizable: 1250 case Backward: 1251 case BackwardVectorizableButPreventsForwarding: 1252 return true; 1253 } 1254 llvm_unreachable("unexpected DepType!"); 1255 } 1256 1257 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1258 return isBackward() || Type == Unknown; 1259 } 1260 1261 bool MemoryDepChecker::Dependence::isForward() const { 1262 switch (Type) { 1263 case Forward: 1264 case ForwardButPreventsForwarding: 1265 return true; 1266 1267 case NoDep: 1268 case Unknown: 1269 case BackwardVectorizable: 1270 case Backward: 1271 case BackwardVectorizableButPreventsForwarding: 1272 return false; 1273 } 1274 llvm_unreachable("unexpected DepType!"); 1275 } 1276 1277 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1278 uint64_t TypeByteSize) { 1279 // If loads occur at a distance that is not a multiple of a feasible vector 1280 // factor store-load forwarding does not take place. 1281 // Positive dependences might cause troubles because vectorizing them might 1282 // prevent store-load forwarding making vectorized code run a lot slower. 1283 // a[i] = a[i-3] ^ a[i-8]; 1284 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1285 // hence on your typical architecture store-load forwarding does not take 1286 // place. Vectorizing in such cases does not make sense. 1287 // Store-load forwarding distance. 1288 1289 // After this many iterations store-to-load forwarding conflicts should not 1290 // cause any slowdowns. 1291 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1292 // Maximum vector factor. 1293 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1294 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1295 1296 // Compute the smallest VF at which the store and load would be misaligned. 1297 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1298 VF *= 2) { 1299 // If the number of vector iteration between the store and the load are 1300 // small we could incur conflicts. 1301 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1302 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1303 break; 1304 } 1305 } 1306 1307 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1308 LLVM_DEBUG( 1309 dbgs() << "LAA: Distance " << Distance 1310 << " that could cause a store-load forwarding conflict\n"); 1311 return true; 1312 } 1313 1314 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1315 MaxVFWithoutSLForwardIssues != 1316 VectorizerParams::MaxVectorWidth * TypeByteSize) 1317 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1318 return false; 1319 } 1320 1321 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) { 1322 if (Status < S) 1323 Status = S; 1324 } 1325 1326 /// Given a non-constant (unknown) dependence-distance \p Dist between two 1327 /// memory accesses, that have the same stride whose absolute value is given 1328 /// in \p Stride, and that have the same type size \p TypeByteSize, 1329 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is 1330 /// possible to prove statically that the dependence distance is larger 1331 /// than the range that the accesses will travel through the execution of 1332 /// the loop. If so, return true; false otherwise. This is useful for 1333 /// example in loops such as the following (PR31098): 1334 /// for (i = 0; i < D; ++i) { 1335 /// = out[i]; 1336 /// out[i+D] = 1337 /// } 1338 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, 1339 const SCEV &BackedgeTakenCount, 1340 const SCEV &Dist, uint64_t Stride, 1341 uint64_t TypeByteSize) { 1342 1343 // If we can prove that 1344 // (**) |Dist| > BackedgeTakenCount * Step 1345 // where Step is the absolute stride of the memory accesses in bytes, 1346 // then there is no dependence. 1347 // 1348 // Ratioanle: 1349 // We basically want to check if the absolute distance (|Dist/Step|) 1350 // is >= the loop iteration count (or > BackedgeTakenCount). 1351 // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 1352 // Section 4.2.1); Note, that for vectorization it is sufficient to prove 1353 // that the dependence distance is >= VF; This is checked elsewhere. 1354 // But in some cases we can prune unknown dependence distances early, and 1355 // even before selecting the VF, and without a runtime test, by comparing 1356 // the distance against the loop iteration count. Since the vectorized code 1357 // will be executed only if LoopCount >= VF, proving distance >= LoopCount 1358 // also guarantees that distance >= VF. 1359 // 1360 const uint64_t ByteStride = Stride * TypeByteSize; 1361 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); 1362 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); 1363 1364 const SCEV *CastedDist = &Dist; 1365 const SCEV *CastedProduct = Product; 1366 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); 1367 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); 1368 1369 // The dependence distance can be positive/negative, so we sign extend Dist; 1370 // The multiplication of the absolute stride in bytes and the 1371 // backdgeTakenCount is non-negative, so we zero extend Product. 1372 if (DistTypeSize > ProductTypeSize) 1373 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); 1374 else 1375 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); 1376 1377 // Is Dist - (BackedgeTakenCount * Step) > 0 ? 1378 // (If so, then we have proven (**) because |Dist| >= Dist) 1379 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); 1380 if (SE.isKnownPositive(Minus)) 1381 return true; 1382 1383 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ? 1384 // (If so, then we have proven (**) because |Dist| >= -1*Dist) 1385 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); 1386 Minus = SE.getMinusSCEV(NegDist, CastedProduct); 1387 if (SE.isKnownPositive(Minus)) 1388 return true; 1389 1390 return false; 1391 } 1392 1393 /// Check the dependence for two accesses with the same stride \p Stride. 1394 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1395 /// bytes. 1396 /// 1397 /// \returns true if they are independent. 1398 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1399 uint64_t TypeByteSize) { 1400 assert(Stride > 1 && "The stride must be greater than 1"); 1401 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1402 assert(Distance > 0 && "The distance must be non-zero"); 1403 1404 // Skip if the distance is not multiple of type byte size. 1405 if (Distance % TypeByteSize) 1406 return false; 1407 1408 uint64_t ScaledDist = Distance / TypeByteSize; 1409 1410 // No dependence if the scaled distance is not multiple of the stride. 1411 // E.g. 1412 // for (i = 0; i < 1024 ; i += 4) 1413 // A[i+2] = A[i] + 1; 1414 // 1415 // Two accesses in memory (scaled distance is 2, stride is 4): 1416 // | A[0] | | | | A[4] | | | | 1417 // | | | A[2] | | | | A[6] | | 1418 // 1419 // E.g. 1420 // for (i = 0; i < 1024 ; i += 3) 1421 // A[i+4] = A[i] + 1; 1422 // 1423 // Two accesses in memory (scaled distance is 4, stride is 3): 1424 // | A[0] | | | A[3] | | | A[6] | | | 1425 // | | | | | A[4] | | | A[7] | | 1426 return ScaledDist % Stride; 1427 } 1428 1429 MemoryDepChecker::Dependence::DepType 1430 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1431 const MemAccessInfo &B, unsigned BIdx, 1432 const ValueToValueMap &Strides) { 1433 assert (AIdx < BIdx && "Must pass arguments in program order"); 1434 1435 Value *APtr = A.getPointer(); 1436 Value *BPtr = B.getPointer(); 1437 bool AIsWrite = A.getInt(); 1438 bool BIsWrite = B.getInt(); 1439 1440 // Two reads are independent. 1441 if (!AIsWrite && !BIsWrite) 1442 return Dependence::NoDep; 1443 1444 // We cannot check pointers in different address spaces. 1445 if (APtr->getType()->getPointerAddressSpace() != 1446 BPtr->getType()->getPointerAddressSpace()) 1447 return Dependence::Unknown; 1448 1449 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1450 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1451 1452 const SCEV *Src = PSE.getSCEV(APtr); 1453 const SCEV *Sink = PSE.getSCEV(BPtr); 1454 1455 // If the induction step is negative we have to invert source and sink of the 1456 // dependence. 1457 if (StrideAPtr < 0) { 1458 std::swap(APtr, BPtr); 1459 std::swap(Src, Sink); 1460 std::swap(AIsWrite, BIsWrite); 1461 std::swap(AIdx, BIdx); 1462 std::swap(StrideAPtr, StrideBPtr); 1463 } 1464 1465 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1466 1467 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1468 << "(Induction step: " << StrideAPtr << ")\n"); 1469 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1470 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1471 1472 // Need accesses with constant stride. We don't want to vectorize 1473 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1474 // the address space. 1475 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1476 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1477 return Dependence::Unknown; 1478 } 1479 1480 Type *ATy = APtr->getType()->getPointerElementType(); 1481 Type *BTy = BPtr->getType()->getPointerElementType(); 1482 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1483 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1484 uint64_t Stride = std::abs(StrideAPtr); 1485 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1486 if (!C) { 1487 if (TypeByteSize == DL.getTypeAllocSize(BTy) && 1488 isSafeDependenceDistance(DL, *(PSE.getSE()), 1489 *(PSE.getBackedgeTakenCount()), *Dist, Stride, 1490 TypeByteSize)) 1491 return Dependence::NoDep; 1492 1493 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1494 ShouldRetryWithRuntimeCheck = true; 1495 return Dependence::Unknown; 1496 } 1497 1498 const APInt &Val = C->getAPInt(); 1499 int64_t Distance = Val.getSExtValue(); 1500 1501 // Attempt to prove strided accesses independent. 1502 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1503 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1504 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1505 return Dependence::NoDep; 1506 } 1507 1508 // Negative distances are not plausible dependencies. 1509 if (Val.isNegative()) { 1510 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1511 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1512 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1513 ATy != BTy)) { 1514 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1515 return Dependence::ForwardButPreventsForwarding; 1516 } 1517 1518 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1519 return Dependence::Forward; 1520 } 1521 1522 // Write to the same location with the same size. 1523 // Could be improved to assert type sizes are the same (i32 == float, etc). 1524 if (Val == 0) { 1525 if (ATy == BTy) 1526 return Dependence::Forward; 1527 LLVM_DEBUG( 1528 dbgs() << "LAA: Zero dependence difference but different types\n"); 1529 return Dependence::Unknown; 1530 } 1531 1532 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1533 1534 if (ATy != BTy) { 1535 LLVM_DEBUG( 1536 dbgs() 1537 << "LAA: ReadWrite-Write positive dependency with different types\n"); 1538 return Dependence::Unknown; 1539 } 1540 1541 // Bail out early if passed-in parameters make vectorization not feasible. 1542 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1543 VectorizerParams::VectorizationFactor : 1); 1544 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1545 VectorizerParams::VectorizationInterleave : 1); 1546 // The minimum number of iterations for a vectorized/unrolled version. 1547 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1548 1549 // It's not vectorizable if the distance is smaller than the minimum distance 1550 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1551 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1552 // TypeByteSize (No need to plus the last gap distance). 1553 // 1554 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1555 // foo(int *A) { 1556 // int *B = (int *)((char *)A + 14); 1557 // for (i = 0 ; i < 1024 ; i += 2) 1558 // B[i] = A[i] + 1; 1559 // } 1560 // 1561 // Two accesses in memory (stride is 2): 1562 // | A[0] | | A[2] | | A[4] | | A[6] | | 1563 // | B[0] | | B[2] | | B[4] | 1564 // 1565 // Distance needs for vectorizing iterations except the last iteration: 1566 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1567 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1568 // 1569 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1570 // 12, which is less than distance. 1571 // 1572 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1573 // the minimum distance needed is 28, which is greater than distance. It is 1574 // not safe to do vectorization. 1575 uint64_t MinDistanceNeeded = 1576 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1577 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1578 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance " 1579 << Distance << '\n'); 1580 return Dependence::Backward; 1581 } 1582 1583 // Unsafe if the minimum distance needed is greater than max safe distance. 1584 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1585 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least " 1586 << MinDistanceNeeded << " size in bytes"); 1587 return Dependence::Backward; 1588 } 1589 1590 // Positive distance bigger than max vectorization factor. 1591 // FIXME: Should use max factor instead of max distance in bytes, which could 1592 // not handle different types. 1593 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1594 // void foo (int *A, char *B) { 1595 // for (unsigned i = 0; i < 1024; i++) { 1596 // A[i+2] = A[i] + 1; 1597 // B[i+2] = B[i] + 1; 1598 // } 1599 // } 1600 // 1601 // This case is currently unsafe according to the max safe distance. If we 1602 // analyze the two accesses on array B, the max safe dependence distance 1603 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1604 // is 8, which is less than 2 and forbidden vectorization, But actually 1605 // both A and B could be vectorized by 2 iterations. 1606 MaxSafeDepDistBytes = 1607 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1608 1609 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1610 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1611 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1612 return Dependence::BackwardVectorizableButPreventsForwarding; 1613 1614 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride); 1615 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1616 << " with max VF = " << MaxVF << '\n'); 1617 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; 1618 MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits); 1619 return Dependence::BackwardVectorizable; 1620 } 1621 1622 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1623 MemAccessInfoList &CheckDeps, 1624 const ValueToValueMap &Strides) { 1625 1626 MaxSafeDepDistBytes = -1; 1627 SmallPtrSet<MemAccessInfo, 8> Visited; 1628 for (MemAccessInfo CurAccess : CheckDeps) { 1629 if (Visited.count(CurAccess)) 1630 continue; 1631 1632 // Get the relevant memory access set. 1633 EquivalenceClasses<MemAccessInfo>::iterator I = 1634 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1635 1636 // Check accesses within this set. 1637 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1638 AccessSets.member_begin(I); 1639 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1640 AccessSets.member_end(); 1641 1642 // Check every access pair. 1643 while (AI != AE) { 1644 Visited.insert(*AI); 1645 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1646 while (OI != AE) { 1647 // Check every accessing instruction pair in program order. 1648 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1649 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1650 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1651 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1652 auto A = std::make_pair(&*AI, *I1); 1653 auto B = std::make_pair(&*OI, *I2); 1654 1655 assert(*I1 != *I2); 1656 if (*I1 > *I2) 1657 std::swap(A, B); 1658 1659 Dependence::DepType Type = 1660 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1661 mergeInStatus(Dependence::isSafeForVectorization(Type)); 1662 1663 // Gather dependences unless we accumulated MaxDependences 1664 // dependences. In that case return as soon as we find the first 1665 // unsafe dependence. This puts a limit on this quadratic 1666 // algorithm. 1667 if (RecordDependences) { 1668 if (Type != Dependence::NoDep) 1669 Dependences.push_back(Dependence(A.second, B.second, Type)); 1670 1671 if (Dependences.size() >= MaxDependences) { 1672 RecordDependences = false; 1673 Dependences.clear(); 1674 LLVM_DEBUG(dbgs() 1675 << "Too many dependences, stopped recording\n"); 1676 } 1677 } 1678 if (!RecordDependences && !isSafeForVectorization()) 1679 return false; 1680 } 1681 ++OI; 1682 } 1683 AI++; 1684 } 1685 } 1686 1687 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1688 return isSafeForVectorization(); 1689 } 1690 1691 SmallVector<Instruction *, 4> 1692 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1693 MemAccessInfo Access(Ptr, isWrite); 1694 auto &IndexVector = Accesses.find(Access)->second; 1695 1696 SmallVector<Instruction *, 4> Insts; 1697 transform(IndexVector, 1698 std::back_inserter(Insts), 1699 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1700 return Insts; 1701 } 1702 1703 const char *MemoryDepChecker::Dependence::DepName[] = { 1704 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1705 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1706 1707 void MemoryDepChecker::Dependence::print( 1708 raw_ostream &OS, unsigned Depth, 1709 const SmallVectorImpl<Instruction *> &Instrs) const { 1710 OS.indent(Depth) << DepName[Type] << ":\n"; 1711 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1712 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1713 } 1714 1715 bool LoopAccessInfo::canAnalyzeLoop() { 1716 // We need to have a loop header. 1717 LLVM_DEBUG(dbgs() << "LAA: Found a loop in " 1718 << TheLoop->getHeader()->getParent()->getName() << ": " 1719 << TheLoop->getHeader()->getName() << '\n'); 1720 1721 // We can only analyze innermost loops. 1722 if (!TheLoop->empty()) { 1723 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1724 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1725 return false; 1726 } 1727 1728 // We must have a single backedge. 1729 if (TheLoop->getNumBackEdges() != 1) { 1730 LLVM_DEBUG( 1731 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1732 recordAnalysis("CFGNotUnderstood") 1733 << "loop control flow is not understood by analyzer"; 1734 return false; 1735 } 1736 1737 // We must have a single exiting block. 1738 if (!TheLoop->getExitingBlock()) { 1739 LLVM_DEBUG( 1740 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1741 recordAnalysis("CFGNotUnderstood") 1742 << "loop control flow is not understood by analyzer"; 1743 return false; 1744 } 1745 1746 // We only handle bottom-tested loops, i.e. loop in which the condition is 1747 // checked at the end of each iteration. With that we can assume that all 1748 // instructions in the loop are executed the same number of times. 1749 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1750 LLVM_DEBUG( 1751 dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1752 recordAnalysis("CFGNotUnderstood") 1753 << "loop control flow is not understood by analyzer"; 1754 return false; 1755 } 1756 1757 // ScalarEvolution needs to be able to find the exit count. 1758 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1759 if (ExitCount == PSE->getSE()->getCouldNotCompute()) { 1760 recordAnalysis("CantComputeNumberOfIterations") 1761 << "could not determine number of loop iterations"; 1762 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1763 return false; 1764 } 1765 1766 return true; 1767 } 1768 1769 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI, 1770 const TargetLibraryInfo *TLI, 1771 DominatorTree *DT) { 1772 typedef SmallPtrSet<Value*, 16> ValueSet; 1773 1774 // Holds the Load and Store instructions. 1775 SmallVector<LoadInst *, 16> Loads; 1776 SmallVector<StoreInst *, 16> Stores; 1777 1778 // Holds all the different accesses in the loop. 1779 unsigned NumReads = 0; 1780 unsigned NumReadWrites = 0; 1781 1782 PtrRtChecking->Pointers.clear(); 1783 PtrRtChecking->Need = false; 1784 1785 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1786 1787 // For each block. 1788 for (BasicBlock *BB : TheLoop->blocks()) { 1789 // Scan the BB and collect legal loads and stores. 1790 for (Instruction &I : *BB) { 1791 // If this is a load, save it. If this instruction can read from memory 1792 // but is not a load, then we quit. Notice that we don't handle function 1793 // calls that read or write. 1794 if (I.mayReadFromMemory()) { 1795 // Many math library functions read the rounding mode. We will only 1796 // vectorize a loop if it contains known function calls that don't set 1797 // the flag. Therefore, it is safe to ignore this read from memory. 1798 auto *Call = dyn_cast<CallInst>(&I); 1799 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1800 continue; 1801 1802 // If the function has an explicit vectorized counterpart, we can safely 1803 // assume that it can be vectorized. 1804 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1805 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1806 continue; 1807 1808 auto *Ld = dyn_cast<LoadInst>(&I); 1809 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1810 recordAnalysis("NonSimpleLoad", Ld) 1811 << "read with atomic ordering or volatile read"; 1812 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1813 CanVecMem = false; 1814 return; 1815 } 1816 NumLoads++; 1817 Loads.push_back(Ld); 1818 DepChecker->addAccess(Ld); 1819 if (EnableMemAccessVersioning) 1820 collectStridedAccess(Ld); 1821 continue; 1822 } 1823 1824 // Save 'store' instructions. Abort if other instructions write to memory. 1825 if (I.mayWriteToMemory()) { 1826 auto *St = dyn_cast<StoreInst>(&I); 1827 if (!St) { 1828 recordAnalysis("CantVectorizeInstruction", St) 1829 << "instruction cannot be vectorized"; 1830 CanVecMem = false; 1831 return; 1832 } 1833 if (!St->isSimple() && !IsAnnotatedParallel) { 1834 recordAnalysis("NonSimpleStore", St) 1835 << "write with atomic ordering or volatile write"; 1836 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1837 CanVecMem = false; 1838 return; 1839 } 1840 NumStores++; 1841 Stores.push_back(St); 1842 DepChecker->addAccess(St); 1843 if (EnableMemAccessVersioning) 1844 collectStridedAccess(St); 1845 } 1846 } // Next instr. 1847 } // Next block. 1848 1849 // Now we have two lists that hold the loads and the stores. 1850 // Next, we find the pointers that they use. 1851 1852 // Check if we see any stores. If there are no stores, then we don't 1853 // care if the pointers are *restrict*. 1854 if (!Stores.size()) { 1855 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1856 CanVecMem = true; 1857 return; 1858 } 1859 1860 MemoryDepChecker::DepCandidates DependentAccesses; 1861 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1862 TheLoop, AA, LI, DependentAccesses, *PSE); 1863 1864 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1865 // multiple times on the same object. If the ptr is accessed twice, once 1866 // for read and once for write, it will only appear once (on the write 1867 // list). This is okay, since we are going to check for conflicts between 1868 // writes and between reads and writes, but not between reads and reads. 1869 ValueSet Seen; 1870 1871 // Record uniform store addresses to identify if we have multiple stores 1872 // to the same address. 1873 ValueSet UniformStores; 1874 1875 for (StoreInst *ST : Stores) { 1876 Value *Ptr = ST->getPointerOperand(); 1877 1878 if (isUniform(Ptr)) 1879 HasDependenceInvolvingLoopInvariantAddress |= 1880 !UniformStores.insert(Ptr).second; 1881 1882 // If we did *not* see this pointer before, insert it to the read-write 1883 // list. At this phase it is only a 'write' list. 1884 if (Seen.insert(Ptr).second) { 1885 ++NumReadWrites; 1886 1887 MemoryLocation Loc = MemoryLocation::get(ST); 1888 // The TBAA metadata could have a control dependency on the predication 1889 // condition, so we cannot rely on it when determining whether or not we 1890 // need runtime pointer checks. 1891 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1892 Loc.AATags.TBAA = nullptr; 1893 1894 Accesses.addStore(Loc); 1895 } 1896 } 1897 1898 if (IsAnnotatedParallel) { 1899 LLVM_DEBUG( 1900 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency " 1901 << "checks.\n"); 1902 CanVecMem = true; 1903 return; 1904 } 1905 1906 for (LoadInst *LD : Loads) { 1907 Value *Ptr = LD->getPointerOperand(); 1908 // If we did *not* see this pointer before, insert it to the 1909 // read list. If we *did* see it before, then it is already in 1910 // the read-write list. This allows us to vectorize expressions 1911 // such as A[i] += x; Because the address of A[i] is a read-write 1912 // pointer. This only works if the index of A[i] is consecutive. 1913 // If the address of i is unknown (for example A[B[i]]) then we may 1914 // read a few words, modify, and write a few words, and some of the 1915 // words may be written to the same address. 1916 bool IsReadOnlyPtr = false; 1917 if (Seen.insert(Ptr).second || 1918 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) { 1919 ++NumReads; 1920 IsReadOnlyPtr = true; 1921 } 1922 1923 // See if there is an unsafe dependency between a load to a uniform address and 1924 // store to the same uniform address. 1925 if (UniformStores.count(Ptr)) { 1926 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform " 1927 "load and uniform store to the same address!\n"); 1928 HasDependenceInvolvingLoopInvariantAddress = true; 1929 } 1930 1931 MemoryLocation Loc = MemoryLocation::get(LD); 1932 // The TBAA metadata could have a control dependency on the predication 1933 // condition, so we cannot rely on it when determining whether or not we 1934 // need runtime pointer checks. 1935 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1936 Loc.AATags.TBAA = nullptr; 1937 1938 Accesses.addLoad(Loc, IsReadOnlyPtr); 1939 } 1940 1941 // If we write (or read-write) to a single destination and there are no 1942 // other reads in this loop then is it safe to vectorize. 1943 if (NumReadWrites == 1 && NumReads == 0) { 1944 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1945 CanVecMem = true; 1946 return; 1947 } 1948 1949 // Build dependence sets and check whether we need a runtime pointer bounds 1950 // check. 1951 Accesses.buildDependenceSets(); 1952 1953 // Find pointers with computable bounds. We are going to use this information 1954 // to place a runtime bound check. 1955 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 1956 TheLoop, SymbolicStrides); 1957 if (!CanDoRTIfNeeded) { 1958 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds"; 1959 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1960 << "the array bounds.\n"); 1961 CanVecMem = false; 1962 return; 1963 } 1964 1965 LLVM_DEBUG( 1966 dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1967 1968 CanVecMem = true; 1969 if (Accesses.isDependencyCheckNeeded()) { 1970 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1971 CanVecMem = DepChecker->areDepsSafe( 1972 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 1973 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 1974 1975 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 1976 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1977 1978 // Clear the dependency checks. We assume they are not needed. 1979 Accesses.resetDepChecks(*DepChecker); 1980 1981 PtrRtChecking->reset(); 1982 PtrRtChecking->Need = true; 1983 1984 auto *SE = PSE->getSE(); 1985 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 1986 SymbolicStrides, true); 1987 1988 // Check that we found the bounds for the pointer. 1989 if (!CanDoRTIfNeeded) { 1990 recordAnalysis("CantCheckMemDepsAtRunTime") 1991 << "cannot check memory dependencies at runtime"; 1992 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1993 CanVecMem = false; 1994 return; 1995 } 1996 1997 CanVecMem = true; 1998 } 1999 } 2000 2001 if (CanVecMem) 2002 LLVM_DEBUG( 2003 dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 2004 << (PtrRtChecking->Need ? "" : " don't") 2005 << " need runtime memory checks.\n"); 2006 else { 2007 recordAnalysis("UnsafeMemDep") 2008 << "unsafe dependent memory operations in loop. Use " 2009 "#pragma loop distribute(enable) to allow loop distribution " 2010 "to attempt to isolate the offending operations into a separate " 2011 "loop"; 2012 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 2013 } 2014 } 2015 2016 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 2017 DominatorTree *DT) { 2018 assert(TheLoop->contains(BB) && "Unknown block used"); 2019 2020 // Blocks that do not dominate the latch need predication. 2021 BasicBlock* Latch = TheLoop->getLoopLatch(); 2022 return !DT->dominates(BB, Latch); 2023 } 2024 2025 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 2026 Instruction *I) { 2027 assert(!Report && "Multiple reports generated"); 2028 2029 Value *CodeRegion = TheLoop->getHeader(); 2030 DebugLoc DL = TheLoop->getStartLoc(); 2031 2032 if (I) { 2033 CodeRegion = I->getParent(); 2034 // If there is no debug location attached to the instruction, revert back to 2035 // using the loop's. 2036 if (I->getDebugLoc()) 2037 DL = I->getDebugLoc(); 2038 } 2039 2040 Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 2041 CodeRegion); 2042 return *Report; 2043 } 2044 2045 bool LoopAccessInfo::isUniform(Value *V) const { 2046 auto *SE = PSE->getSE(); 2047 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 2048 // never considered uniform. 2049 // TODO: Is this really what we want? Even without FP SCEV, we may want some 2050 // trivially loop-invariant FP values to be considered uniform. 2051 if (!SE->isSCEVable(V->getType())) 2052 return false; 2053 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 2054 } 2055 2056 // FIXME: this function is currently a duplicate of the one in 2057 // LoopVectorize.cpp. 2058 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 2059 Instruction *Loc) { 2060 if (FirstInst) 2061 return FirstInst; 2062 if (Instruction *I = dyn_cast<Instruction>(V)) 2063 return I->getParent() == Loc->getParent() ? I : nullptr; 2064 return nullptr; 2065 } 2066 2067 namespace { 2068 2069 /// IR Values for the lower and upper bounds of a pointer evolution. We 2070 /// need to use value-handles because SCEV expansion can invalidate previously 2071 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 2072 /// a previous one. 2073 struct PointerBounds { 2074 TrackingVH<Value> Start; 2075 TrackingVH<Value> End; 2076 }; 2077 2078 } // end anonymous namespace 2079 2080 /// Expand code for the lower and upper bound of the pointer group \p CG 2081 /// in \p TheLoop. \return the values for the bounds. 2082 static PointerBounds 2083 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 2084 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 2085 const RuntimePointerChecking &PtrRtChecking) { 2086 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 2087 const SCEV *Sc = SE->getSCEV(Ptr); 2088 2089 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 2090 LLVMContext &Ctx = Loc->getContext(); 2091 2092 // Use this type for pointer arithmetic. 2093 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 2094 2095 if (SE->isLoopInvariant(Sc, TheLoop)) { 2096 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" 2097 << *Ptr << "\n"); 2098 // Ptr could be in the loop body. If so, expand a new one at the correct 2099 // location. 2100 Instruction *Inst = dyn_cast<Instruction>(Ptr); 2101 Value *NewPtr = (Inst && TheLoop->contains(Inst)) 2102 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc) 2103 : Ptr; 2104 // We must return a half-open range, which means incrementing Sc. 2105 const SCEV *ScPlusOne = SE->getAddExpr(Sc, SE->getOne(PtrArithTy)); 2106 Value *NewPtrPlusOne = Exp.expandCodeFor(ScPlusOne, PtrArithTy, Loc); 2107 return {NewPtr, NewPtrPlusOne}; 2108 } else { 2109 Value *Start = nullptr, *End = nullptr; 2110 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 2111 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 2112 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 2113 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High 2114 << "\n"); 2115 return {Start, End}; 2116 } 2117 } 2118 2119 /// Turns a collection of checks into a collection of expanded upper and 2120 /// lower bounds for both pointers in the check. 2121 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 2122 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 2123 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 2124 const RuntimePointerChecking &PtrRtChecking) { 2125 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 2126 2127 // Here we're relying on the SCEV Expander's cache to only emit code for the 2128 // same bounds once. 2129 transform( 2130 PointerChecks, std::back_inserter(ChecksWithBounds), 2131 [&](const RuntimePointerChecking::PointerCheck &Check) { 2132 PointerBounds 2133 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 2134 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 2135 return std::make_pair(First, Second); 2136 }); 2137 2138 return ChecksWithBounds; 2139 } 2140 2141 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 2142 Instruction *Loc, 2143 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 2144 const { 2145 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2146 auto *SE = PSE->getSE(); 2147 SCEVExpander Exp(*SE, DL, "induction"); 2148 auto ExpandedChecks = 2149 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking); 2150 2151 LLVMContext &Ctx = Loc->getContext(); 2152 Instruction *FirstInst = nullptr; 2153 IRBuilder<> ChkBuilder(Loc); 2154 // Our instructions might fold to a constant. 2155 Value *MemoryRuntimeCheck = nullptr; 2156 2157 for (const auto &Check : ExpandedChecks) { 2158 const PointerBounds &A = Check.first, &B = Check.second; 2159 // Check if two pointers (A and B) conflict where conflict is computed as: 2160 // start(A) <= end(B) && start(B) <= end(A) 2161 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 2162 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 2163 2164 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 2165 (AS1 == A.End->getType()->getPointerAddressSpace()) && 2166 "Trying to bounds check pointers with different address spaces"); 2167 2168 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 2169 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 2170 2171 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 2172 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 2173 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 2174 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 2175 2176 // [A|B].Start points to the first accessed byte under base [A|B]. 2177 // [A|B].End points to the last accessed byte, plus one. 2178 // There is no conflict when the intervals are disjoint: 2179 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End) 2180 // 2181 // bound0 = (B.Start < A.End) 2182 // bound1 = (A.Start < B.End) 2183 // IsConflict = bound0 & bound1 2184 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0"); 2185 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 2186 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1"); 2187 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 2188 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 2189 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2190 if (MemoryRuntimeCheck) { 2191 IsConflict = 2192 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 2193 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2194 } 2195 MemoryRuntimeCheck = IsConflict; 2196 } 2197 2198 if (!MemoryRuntimeCheck) 2199 return std::make_pair(nullptr, nullptr); 2200 2201 // We have to do this trickery because the IRBuilder might fold the check to a 2202 // constant expression in which case there is no Instruction anchored in a 2203 // the block. 2204 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 2205 ConstantInt::getTrue(Ctx)); 2206 ChkBuilder.Insert(Check, "memcheck.conflict"); 2207 FirstInst = getFirstInst(FirstInst, Check, Loc); 2208 return std::make_pair(FirstInst, Check); 2209 } 2210 2211 std::pair<Instruction *, Instruction *> 2212 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 2213 if (!PtrRtChecking->Need) 2214 return std::make_pair(nullptr, nullptr); 2215 2216 return addRuntimeChecks(Loc, PtrRtChecking->getChecks()); 2217 } 2218 2219 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2220 Value *Ptr = nullptr; 2221 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 2222 Ptr = LI->getPointerOperand(); 2223 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 2224 Ptr = SI->getPointerOperand(); 2225 else 2226 return; 2227 2228 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2229 if (!Stride) 2230 return; 2231 2232 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for " 2233 "versioning:"); 2234 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2235 2236 // Avoid adding the "Stride == 1" predicate when we know that 2237 // Stride >= Trip-Count. Such a predicate will effectively optimize a single 2238 // or zero iteration loop, as Trip-Count <= Stride == 1. 2239 // 2240 // TODO: We are currently not making a very informed decision on when it is 2241 // beneficial to apply stride versioning. It might make more sense that the 2242 // users of this analysis (such as the vectorizer) will trigger it, based on 2243 // their specific cost considerations; For example, in cases where stride 2244 // versioning does not help resolving memory accesses/dependences, the 2245 // vectorizer should evaluate the cost of the runtime test, and the benefit 2246 // of various possible stride specializations, considering the alternatives 2247 // of using gather/scatters (if available). 2248 2249 const SCEV *StrideExpr = PSE->getSCEV(Stride); 2250 const SCEV *BETakenCount = PSE->getBackedgeTakenCount(); 2251 2252 // Match the types so we can compare the stride and the BETakenCount. 2253 // The Stride can be positive/negative, so we sign extend Stride; 2254 // The backdgeTakenCount is non-negative, so we zero extend BETakenCount. 2255 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 2256 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType()); 2257 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType()); 2258 const SCEV *CastedStride = StrideExpr; 2259 const SCEV *CastedBECount = BETakenCount; 2260 ScalarEvolution *SE = PSE->getSE(); 2261 if (BETypeSize >= StrideTypeSize) 2262 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType()); 2263 else 2264 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType()); 2265 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount); 2266 // Since TripCount == BackEdgeTakenCount + 1, checking: 2267 // "Stride >= TripCount" is equivalent to checking: 2268 // Stride - BETakenCount > 0 2269 if (SE->isKnownPositive(StrideMinusBETaken)) { 2270 LLVM_DEBUG( 2271 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the " 2272 "Stride==1 predicate will imply that the loop executes " 2273 "at most once.\n"); 2274 return; 2275 } 2276 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version."); 2277 2278 SymbolicStrides[Ptr] = Stride; 2279 StrideSet.insert(Stride); 2280 } 2281 2282 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2283 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 2284 DominatorTree *DT, LoopInfo *LI) 2285 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2286 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)), 2287 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L), 2288 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false), 2289 HasDependenceInvolvingLoopInvariantAddress(false) { 2290 if (canAnalyzeLoop()) 2291 analyzeLoop(AA, LI, TLI, DT); 2292 } 2293 2294 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2295 if (CanVecMem) { 2296 OS.indent(Depth) << "Memory dependences are safe"; 2297 if (MaxSafeDepDistBytes != -1ULL) 2298 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2299 << " bytes"; 2300 if (PtrRtChecking->Need) 2301 OS << " with run-time checks"; 2302 OS << "\n"; 2303 } 2304 2305 if (Report) 2306 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2307 2308 if (auto *Dependences = DepChecker->getDependences()) { 2309 OS.indent(Depth) << "Dependences:\n"; 2310 for (auto &Dep : *Dependences) { 2311 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2312 OS << "\n"; 2313 } 2314 } else 2315 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2316 2317 // List the pair of accesses need run-time checks to prove independence. 2318 PtrRtChecking->print(OS, Depth); 2319 OS << "\n"; 2320 2321 OS.indent(Depth) << "Non vectorizable stores to invariant address were " 2322 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ") 2323 << "found in loop.\n"; 2324 2325 OS.indent(Depth) << "SCEV assumptions:\n"; 2326 PSE->getUnionPredicate().print(OS, Depth); 2327 2328 OS << "\n"; 2329 2330 OS.indent(Depth) << "Expressions re-written:\n"; 2331 PSE->print(OS, Depth); 2332 } 2333 2334 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2335 auto &LAI = LoopAccessInfoMap[L]; 2336 2337 if (!LAI) 2338 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2339 2340 return *LAI.get(); 2341 } 2342 2343 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2344 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2345 2346 for (Loop *TopLevelLoop : *LI) 2347 for (Loop *L : depth_first(TopLevelLoop)) { 2348 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2349 auto &LAI = LAA.getInfo(L); 2350 LAI.print(OS, 4); 2351 } 2352 } 2353 2354 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2355 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2356 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2357 TLI = TLIP ? &TLIP->getTLI() : nullptr; 2358 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2359 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2360 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2361 2362 return false; 2363 } 2364 2365 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2366 AU.addRequired<ScalarEvolutionWrapperPass>(); 2367 AU.addRequired<AAResultsWrapperPass>(); 2368 AU.addRequired<DominatorTreeWrapperPass>(); 2369 AU.addRequired<LoopInfoWrapperPass>(); 2370 2371 AU.setPreservesAll(); 2372 } 2373 2374 char LoopAccessLegacyAnalysis::ID = 0; 2375 static const char laa_name[] = "Loop Access Analysis"; 2376 #define LAA_NAME "loop-accesses" 2377 2378 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2379 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2380 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2381 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2382 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2383 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2384 2385 AnalysisKey LoopAccessAnalysis::Key; 2386 2387 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2388 LoopStandardAnalysisResults &AR) { 2389 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2390 } 2391 2392 namespace llvm { 2393 2394 Pass *createLAAPass() { 2395 return new LoopAccessLegacyAnalysis(); 2396 } 2397 2398 } // end namespace llvm 2399