1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/EquivalenceClasses.h" 20 #include "llvm/ADT/PointerIntPair.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AliasSetTracker.h" 29 #include "llvm/Analysis/LoopAnalysisManager.h" 30 #include "llvm/Analysis/LoopInfo.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 33 #include "llvm/Analysis/ScalarEvolution.h" 34 #include "llvm/Analysis/ScalarEvolutionExpander.h" 35 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/Analysis/VectorUtils.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugLoc.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/InstrTypes.h" 49 #include "llvm/IR/Instruction.h" 50 #include "llvm/IR/Instructions.h" 51 #include "llvm/IR/Operator.h" 52 #include "llvm/IR/PassManager.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/IR/ValueHandle.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <cstdlib> 66 #include <iterator> 67 #include <utility> 68 #include <vector> 69 70 using namespace llvm; 71 72 #define DEBUG_TYPE "loop-accesses" 73 74 static cl::opt<unsigned, true> 75 VectorizationFactor("force-vector-width", cl::Hidden, 76 cl::desc("Sets the SIMD width. Zero is autoselect."), 77 cl::location(VectorizerParams::VectorizationFactor)); 78 unsigned VectorizerParams::VectorizationFactor; 79 80 static cl::opt<unsigned, true> 81 VectorizationInterleave("force-vector-interleave", cl::Hidden, 82 cl::desc("Sets the vectorization interleave count. " 83 "Zero is autoselect."), 84 cl::location( 85 VectorizerParams::VectorizationInterleave)); 86 unsigned VectorizerParams::VectorizationInterleave; 87 88 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 89 "runtime-memory-check-threshold", cl::Hidden, 90 cl::desc("When performing memory disambiguation checks at runtime do not " 91 "generate more than this number of comparisons (default = 8)."), 92 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 93 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 94 95 /// \brief The maximum iterations used to merge memory checks 96 static cl::opt<unsigned> MemoryCheckMergeThreshold( 97 "memory-check-merge-threshold", cl::Hidden, 98 cl::desc("Maximum number of comparisons done when trying to merge " 99 "runtime memory checks. (default = 100)"), 100 cl::init(100)); 101 102 /// Maximum SIMD width. 103 const unsigned VectorizerParams::MaxVectorWidth = 64; 104 105 /// \brief We collect dependences up to this threshold. 106 static cl::opt<unsigned> 107 MaxDependences("max-dependences", cl::Hidden, 108 cl::desc("Maximum number of dependences collected by " 109 "loop-access analysis (default = 100)"), 110 cl::init(100)); 111 112 /// This enables versioning on the strides of symbolically striding memory 113 /// accesses in code like the following. 114 /// for (i = 0; i < N; ++i) 115 /// A[i * Stride1] += B[i * Stride2] ... 116 /// 117 /// Will be roughly translated to 118 /// if (Stride1 == 1 && Stride2 == 1) { 119 /// for (i = 0; i < N; i+=4) 120 /// A[i:i+3] += ... 121 /// } else 122 /// ... 123 static cl::opt<bool> EnableMemAccessVersioning( 124 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 125 cl::desc("Enable symbolic stride memory access versioning")); 126 127 /// \brief Enable store-to-load forwarding conflict detection. This option can 128 /// be disabled for correctness testing. 129 static cl::opt<bool> EnableForwardingConflictDetection( 130 "store-to-load-forwarding-conflict-detection", cl::Hidden, 131 cl::desc("Enable conflict detection in loop-access analysis"), 132 cl::init(true)); 133 134 bool VectorizerParams::isInterleaveForced() { 135 return ::VectorizationInterleave.getNumOccurrences() > 0; 136 } 137 138 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 139 const Loop *TheLoop, const char *PassName, 140 OptimizationRemarkEmitter &ORE) { 141 DebugLoc DL = TheLoop->getStartLoc(); 142 const Value *V = TheLoop->getHeader(); 143 if (const Instruction *I = Message.getInstr()) { 144 // If there is no debug location attached to the instruction, revert back to 145 // using the loop's. 146 if (I->getDebugLoc()) 147 DL = I->getDebugLoc(); 148 V = I->getParent(); 149 } 150 ORE.emitOptimizationRemarkAnalysis(PassName, DL, V, Message.str()); 151 } 152 153 Value *llvm::stripIntegerCast(Value *V) { 154 if (auto *CI = dyn_cast<CastInst>(V)) 155 if (CI->getOperand(0)->getType()->isIntegerTy()) 156 return CI->getOperand(0); 157 return V; 158 } 159 160 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 161 const ValueToValueMap &PtrToStride, 162 Value *Ptr, Value *OrigPtr) { 163 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 164 165 // If there is an entry in the map return the SCEV of the pointer with the 166 // symbolic stride replaced by one. 167 ValueToValueMap::const_iterator SI = 168 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 169 if (SI != PtrToStride.end()) { 170 Value *StrideVal = SI->second; 171 172 // Strip casts. 173 StrideVal = stripIntegerCast(StrideVal); 174 175 // Replace symbolic stride by one. 176 Value *One = ConstantInt::get(StrideVal->getType(), 1); 177 ValueToValueMap RewriteMap; 178 RewriteMap[StrideVal] = One; 179 180 ScalarEvolution *SE = PSE.getSE(); 181 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 182 const auto *CT = 183 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 184 185 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 186 auto *Expr = PSE.getSCEV(Ptr); 187 188 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 189 << "\n"); 190 return Expr; 191 } 192 193 // Otherwise, just return the SCEV of the original pointer. 194 return OrigSCEV; 195 } 196 197 /// Calculate Start and End points of memory access. 198 /// Let's assume A is the first access and B is a memory access on N-th loop 199 /// iteration. Then B is calculated as: 200 /// B = A + Step*N . 201 /// Step value may be positive or negative. 202 /// N is a calculated back-edge taken count: 203 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0 204 /// Start and End points are calculated in the following way: 205 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt, 206 /// where SizeOfElt is the size of single memory access in bytes. 207 /// 208 /// There is no conflict when the intervals are disjoint: 209 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End) 210 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 211 unsigned DepSetId, unsigned ASId, 212 const ValueToValueMap &Strides, 213 PredicatedScalarEvolution &PSE) { 214 // Get the stride replaced scev. 215 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 216 ScalarEvolution *SE = PSE.getSE(); 217 218 const SCEV *ScStart; 219 const SCEV *ScEnd; 220 221 if (SE->isLoopInvariant(Sc, Lp)) 222 ScStart = ScEnd = Sc; 223 else { 224 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 225 assert(AR && "Invalid addrec expression"); 226 const SCEV *Ex = PSE.getBackedgeTakenCount(); 227 228 ScStart = AR->getStart(); 229 ScEnd = AR->evaluateAtIteration(Ex, *SE); 230 const SCEV *Step = AR->getStepRecurrence(*SE); 231 232 // For expressions with negative step, the upper bound is ScStart and the 233 // lower bound is ScEnd. 234 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) { 235 if (CStep->getValue()->isNegative()) 236 std::swap(ScStart, ScEnd); 237 } else { 238 // Fallback case: the step is not constant, but we can still 239 // get the upper and lower bounds of the interval by using min/max 240 // expressions. 241 ScStart = SE->getUMinExpr(ScStart, ScEnd); 242 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 243 } 244 // Add the size of the pointed element to ScEnd. 245 unsigned EltSize = 246 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8; 247 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize); 248 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV); 249 } 250 251 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 252 } 253 254 SmallVector<RuntimePointerChecking::PointerCheck, 4> 255 RuntimePointerChecking::generateChecks() const { 256 SmallVector<PointerCheck, 4> Checks; 257 258 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 259 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 260 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 261 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 262 263 if (needsChecking(CGI, CGJ)) 264 Checks.push_back(std::make_pair(&CGI, &CGJ)); 265 } 266 } 267 return Checks; 268 } 269 270 void RuntimePointerChecking::generateChecks( 271 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 272 assert(Checks.empty() && "Checks is not empty"); 273 groupChecks(DepCands, UseDependencies); 274 Checks = generateChecks(); 275 } 276 277 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 278 const CheckingPtrGroup &N) const { 279 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 280 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 281 if (needsChecking(M.Members[I], N.Members[J])) 282 return true; 283 return false; 284 } 285 286 /// Compare \p I and \p J and return the minimum. 287 /// Return nullptr in case we couldn't find an answer. 288 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 289 ScalarEvolution *SE) { 290 const SCEV *Diff = SE->getMinusSCEV(J, I); 291 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 292 293 if (!C) 294 return nullptr; 295 if (C->getValue()->isNegative()) 296 return J; 297 return I; 298 } 299 300 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 301 const SCEV *Start = RtCheck.Pointers[Index].Start; 302 const SCEV *End = RtCheck.Pointers[Index].End; 303 304 // Compare the starts and ends with the known minimum and maximum 305 // of this set. We need to know how we compare against the min/max 306 // of the set in order to be able to emit memchecks. 307 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 308 if (!Min0) 309 return false; 310 311 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 312 if (!Min1) 313 return false; 314 315 // Update the low bound expression if we've found a new min value. 316 if (Min0 == Start) 317 Low = Start; 318 319 // Update the high bound expression if we've found a new max value. 320 if (Min1 != End) 321 High = End; 322 323 Members.push_back(Index); 324 return true; 325 } 326 327 void RuntimePointerChecking::groupChecks( 328 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 329 // We build the groups from dependency candidates equivalence classes 330 // because: 331 // - We know that pointers in the same equivalence class share 332 // the same underlying object and therefore there is a chance 333 // that we can compare pointers 334 // - We wouldn't be able to merge two pointers for which we need 335 // to emit a memcheck. The classes in DepCands are already 336 // conveniently built such that no two pointers in the same 337 // class need checking against each other. 338 339 // We use the following (greedy) algorithm to construct the groups 340 // For every pointer in the equivalence class: 341 // For each existing group: 342 // - if the difference between this pointer and the min/max bounds 343 // of the group is a constant, then make the pointer part of the 344 // group and update the min/max bounds of that group as required. 345 346 CheckingGroups.clear(); 347 348 // If we need to check two pointers to the same underlying object 349 // with a non-constant difference, we shouldn't perform any pointer 350 // grouping with those pointers. This is because we can easily get 351 // into cases where the resulting check would return false, even when 352 // the accesses are safe. 353 // 354 // The following example shows this: 355 // for (i = 0; i < 1000; ++i) 356 // a[5000 + i * m] = a[i] + a[i + 9000] 357 // 358 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 359 // (0, 10000) which is always false. However, if m is 1, there is no 360 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 361 // us to perform an accurate check in this case. 362 // 363 // The above case requires that we have an UnknownDependence between 364 // accesses to the same underlying object. This cannot happen unless 365 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 366 // is also false. In this case we will use the fallback path and create 367 // separate checking groups for all pointers. 368 369 // If we don't have the dependency partitions, construct a new 370 // checking pointer group for each pointer. This is also required 371 // for correctness, because in this case we can have checking between 372 // pointers to the same underlying object. 373 if (!UseDependencies) { 374 for (unsigned I = 0; I < Pointers.size(); ++I) 375 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 376 return; 377 } 378 379 unsigned TotalComparisons = 0; 380 381 DenseMap<Value *, unsigned> PositionMap; 382 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 383 PositionMap[Pointers[Index].PointerValue] = Index; 384 385 // We need to keep track of what pointers we've already seen so we 386 // don't process them twice. 387 SmallSet<unsigned, 2> Seen; 388 389 // Go through all equivalence classes, get the "pointer check groups" 390 // and add them to the overall solution. We use the order in which accesses 391 // appear in 'Pointers' to enforce determinism. 392 for (unsigned I = 0; I < Pointers.size(); ++I) { 393 // We've seen this pointer before, and therefore already processed 394 // its equivalence class. 395 if (Seen.count(I)) 396 continue; 397 398 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 399 Pointers[I].IsWritePtr); 400 401 SmallVector<CheckingPtrGroup, 2> Groups; 402 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 403 404 // Because DepCands is constructed by visiting accesses in the order in 405 // which they appear in alias sets (which is deterministic) and the 406 // iteration order within an equivalence class member is only dependent on 407 // the order in which unions and insertions are performed on the 408 // equivalence class, the iteration order is deterministic. 409 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 410 MI != ME; ++MI) { 411 unsigned Pointer = PositionMap[MI->getPointer()]; 412 bool Merged = false; 413 // Mark this pointer as seen. 414 Seen.insert(Pointer); 415 416 // Go through all the existing sets and see if we can find one 417 // which can include this pointer. 418 for (CheckingPtrGroup &Group : Groups) { 419 // Don't perform more than a certain amount of comparisons. 420 // This should limit the cost of grouping the pointers to something 421 // reasonable. If we do end up hitting this threshold, the algorithm 422 // will create separate groups for all remaining pointers. 423 if (TotalComparisons > MemoryCheckMergeThreshold) 424 break; 425 426 TotalComparisons++; 427 428 if (Group.addPointer(Pointer)) { 429 Merged = true; 430 break; 431 } 432 } 433 434 if (!Merged) 435 // We couldn't add this pointer to any existing set or the threshold 436 // for the number of comparisons has been reached. Create a new group 437 // to hold the current pointer. 438 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 439 } 440 441 // We've computed the grouped checks for this partition. 442 // Save the results and continue with the next one. 443 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 444 } 445 } 446 447 bool RuntimePointerChecking::arePointersInSamePartition( 448 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 449 unsigned PtrIdx2) { 450 return (PtrToPartition[PtrIdx1] != -1 && 451 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 452 } 453 454 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 455 const PointerInfo &PointerI = Pointers[I]; 456 const PointerInfo &PointerJ = Pointers[J]; 457 458 // No need to check if two readonly pointers intersect. 459 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 460 return false; 461 462 // Only need to check pointers between two different dependency sets. 463 if (PointerI.DependencySetId == PointerJ.DependencySetId) 464 return false; 465 466 // Only need to check pointers in the same alias set. 467 if (PointerI.AliasSetId != PointerJ.AliasSetId) 468 return false; 469 470 return true; 471 } 472 473 void RuntimePointerChecking::printChecks( 474 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 475 unsigned Depth) const { 476 unsigned N = 0; 477 for (const auto &Check : Checks) { 478 const auto &First = Check.first->Members, &Second = Check.second->Members; 479 480 OS.indent(Depth) << "Check " << N++ << ":\n"; 481 482 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 483 for (unsigned K = 0; K < First.size(); ++K) 484 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 485 486 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 487 for (unsigned K = 0; K < Second.size(); ++K) 488 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 489 } 490 } 491 492 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 493 494 OS.indent(Depth) << "Run-time memory checks:\n"; 495 printChecks(OS, Checks, Depth); 496 497 OS.indent(Depth) << "Grouped accesses:\n"; 498 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 499 const auto &CG = CheckingGroups[I]; 500 501 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 502 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 503 << ")\n"; 504 for (unsigned J = 0; J < CG.Members.size(); ++J) { 505 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 506 << "\n"; 507 } 508 } 509 } 510 511 namespace { 512 513 /// \brief Analyses memory accesses in a loop. 514 /// 515 /// Checks whether run time pointer checks are needed and builds sets for data 516 /// dependence checking. 517 class AccessAnalysis { 518 public: 519 /// \brief Read or write access location. 520 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 521 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 522 523 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 524 MemoryDepChecker::DepCandidates &DA, 525 PredicatedScalarEvolution &PSE) 526 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 527 PSE(PSE) {} 528 529 /// \brief Register a load and whether it is only read from. 530 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 531 Value *Ptr = const_cast<Value*>(Loc.Ptr); 532 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 533 Accesses.insert(MemAccessInfo(Ptr, false)); 534 if (IsReadOnly) 535 ReadOnlyPtr.insert(Ptr); 536 } 537 538 /// \brief Register a store. 539 void addStore(MemoryLocation &Loc) { 540 Value *Ptr = const_cast<Value*>(Loc.Ptr); 541 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 542 Accesses.insert(MemAccessInfo(Ptr, true)); 543 } 544 545 /// \brief Check whether we can check the pointers at runtime for 546 /// non-intersection. 547 /// 548 /// Returns true if we need no check or if we do and we can generate them 549 /// (i.e. the pointers have computable bounds). 550 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 551 Loop *TheLoop, const ValueToValueMap &Strides, 552 bool ShouldCheckWrap = false); 553 554 /// \brief Goes over all memory accesses, checks whether a RT check is needed 555 /// and builds sets of dependent accesses. 556 void buildDependenceSets() { 557 processMemAccesses(); 558 } 559 560 /// \brief Initial processing of memory accesses determined that we need to 561 /// perform dependency checking. 562 /// 563 /// Note that this can later be cleared if we retry memcheck analysis without 564 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 565 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 566 567 /// We decided that no dependence analysis would be used. Reset the state. 568 void resetDepChecks(MemoryDepChecker &DepChecker) { 569 CheckDeps.clear(); 570 DepChecker.clearDependences(); 571 } 572 573 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 574 575 private: 576 typedef SetVector<MemAccessInfo> PtrAccessSet; 577 578 /// \brief Go over all memory access and check whether runtime pointer checks 579 /// are needed and build sets of dependency check candidates. 580 void processMemAccesses(); 581 582 /// Set of all accesses. 583 PtrAccessSet Accesses; 584 585 const DataLayout &DL; 586 587 /// Set of accesses that need a further dependence check. 588 MemAccessInfoSet CheckDeps; 589 590 /// Set of pointers that are read only. 591 SmallPtrSet<Value*, 16> ReadOnlyPtr; 592 593 /// An alias set tracker to partition the access set by underlying object and 594 //intrinsic property (such as TBAA metadata). 595 AliasSetTracker AST; 596 597 LoopInfo *LI; 598 599 /// Sets of potentially dependent accesses - members of one set share an 600 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 601 /// dependence check. 602 MemoryDepChecker::DepCandidates &DepCands; 603 604 /// \brief Initial processing of memory accesses determined that we may need 605 /// to add memchecks. Perform the analysis to determine the necessary checks. 606 /// 607 /// Note that, this is different from isDependencyCheckNeeded. When we retry 608 /// memcheck analysis without dependency checking 609 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 610 /// while this remains set if we have potentially dependent accesses. 611 bool IsRTCheckAnalysisNeeded; 612 613 /// The SCEV predicate containing all the SCEV-related assumptions. 614 PredicatedScalarEvolution &PSE; 615 }; 616 617 } // end anonymous namespace 618 619 /// \brief Check whether a pointer can participate in a runtime bounds check. 620 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 621 const ValueToValueMap &Strides, Value *Ptr, 622 Loop *L) { 623 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 624 625 // The bounds for loop-invariant pointer is trivial. 626 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 627 return true; 628 629 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 630 if (!AR) 631 return false; 632 633 return AR->isAffine(); 634 } 635 636 /// \brief Check whether a pointer address cannot wrap. 637 static bool isNoWrap(PredicatedScalarEvolution &PSE, 638 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 639 const SCEV *PtrScev = PSE.getSCEV(Ptr); 640 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 641 return true; 642 643 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides); 644 return Stride == 1; 645 } 646 647 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 648 ScalarEvolution *SE, Loop *TheLoop, 649 const ValueToValueMap &StridesMap, 650 bool ShouldCheckWrap) { 651 // Find pointers with computable bounds. We are going to use this information 652 // to place a runtime bound check. 653 bool CanDoRT = true; 654 655 bool NeedRTCheck = false; 656 if (!IsRTCheckAnalysisNeeded) return true; 657 658 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 659 660 // We assign a consecutive id to access from different alias sets. 661 // Accesses between different groups doesn't need to be checked. 662 unsigned ASId = 1; 663 for (auto &AS : AST) { 664 int NumReadPtrChecks = 0; 665 int NumWritePtrChecks = 0; 666 667 // We assign consecutive id to access from different dependence sets. 668 // Accesses within the same set don't need a runtime check. 669 unsigned RunningDepId = 1; 670 DenseMap<Value *, unsigned> DepSetId; 671 672 for (auto A : AS) { 673 Value *Ptr = A.getValue(); 674 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 675 MemAccessInfo Access(Ptr, IsWrite); 676 677 if (IsWrite) 678 ++NumWritePtrChecks; 679 else 680 ++NumReadPtrChecks; 681 682 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 683 // When we run after a failing dependency check we have to make sure 684 // we don't have wrapping pointers. 685 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) { 686 // The id of the dependence set. 687 unsigned DepId; 688 689 if (IsDepCheckNeeded) { 690 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 691 unsigned &LeaderId = DepSetId[Leader]; 692 if (!LeaderId) 693 LeaderId = RunningDepId++; 694 DepId = LeaderId; 695 } else 696 // Each access has its own dependence set. 697 DepId = RunningDepId++; 698 699 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 700 701 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 702 } else { 703 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 704 CanDoRT = false; 705 } 706 } 707 708 // If we have at least two writes or one write and a read then we need to 709 // check them. But there is no need to checks if there is only one 710 // dependence set for this alias set. 711 // 712 // Note that this function computes CanDoRT and NeedRTCheck independently. 713 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 714 // for which we couldn't find the bounds but we don't actually need to emit 715 // any checks so it does not matter. 716 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 717 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 718 NumWritePtrChecks >= 1)); 719 720 ++ASId; 721 } 722 723 // If the pointers that we would use for the bounds comparison have different 724 // address spaces, assume the values aren't directly comparable, so we can't 725 // use them for the runtime check. We also have to assume they could 726 // overlap. In the future there should be metadata for whether address spaces 727 // are disjoint. 728 unsigned NumPointers = RtCheck.Pointers.size(); 729 for (unsigned i = 0; i < NumPointers; ++i) { 730 for (unsigned j = i + 1; j < NumPointers; ++j) { 731 // Only need to check pointers between two different dependency sets. 732 if (RtCheck.Pointers[i].DependencySetId == 733 RtCheck.Pointers[j].DependencySetId) 734 continue; 735 // Only need to check pointers in the same alias set. 736 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 737 continue; 738 739 Value *PtrI = RtCheck.Pointers[i].PointerValue; 740 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 741 742 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 743 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 744 if (ASi != ASj) { 745 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 746 " different address spaces\n"); 747 return false; 748 } 749 } 750 } 751 752 if (NeedRTCheck && CanDoRT) 753 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 754 755 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 756 << " pointer comparisons.\n"); 757 758 RtCheck.Need = NeedRTCheck; 759 760 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 761 if (!CanDoRTIfNeeded) 762 RtCheck.reset(); 763 return CanDoRTIfNeeded; 764 } 765 766 void AccessAnalysis::processMemAccesses() { 767 // We process the set twice: first we process read-write pointers, last we 768 // process read-only pointers. This allows us to skip dependence tests for 769 // read-only pointers. 770 771 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 772 DEBUG(dbgs() << " AST: "; AST.dump()); 773 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 774 DEBUG({ 775 for (auto A : Accesses) 776 dbgs() << "\t" << *A.getPointer() << " (" << 777 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 778 "read-only" : "read")) << ")\n"; 779 }); 780 781 // The AliasSetTracker has nicely partitioned our pointers by metadata 782 // compatibility and potential for underlying-object overlap. As a result, we 783 // only need to check for potential pointer dependencies within each alias 784 // set. 785 for (auto &AS : AST) { 786 // Note that both the alias-set tracker and the alias sets themselves used 787 // linked lists internally and so the iteration order here is deterministic 788 // (matching the original instruction order within each set). 789 790 bool SetHasWrite = false; 791 792 // Map of pointers to last access encountered. 793 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 794 UnderlyingObjToAccessMap ObjToLastAccess; 795 796 // Set of access to check after all writes have been processed. 797 PtrAccessSet DeferredAccesses; 798 799 // Iterate over each alias set twice, once to process read/write pointers, 800 // and then to process read-only pointers. 801 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 802 bool UseDeferred = SetIteration > 0; 803 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 804 805 for (auto AV : AS) { 806 Value *Ptr = AV.getValue(); 807 808 // For a single memory access in AliasSetTracker, Accesses may contain 809 // both read and write, and they both need to be handled for CheckDeps. 810 for (auto AC : S) { 811 if (AC.getPointer() != Ptr) 812 continue; 813 814 bool IsWrite = AC.getInt(); 815 816 // If we're using the deferred access set, then it contains only 817 // reads. 818 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 819 if (UseDeferred && !IsReadOnlyPtr) 820 continue; 821 // Otherwise, the pointer must be in the PtrAccessSet, either as a 822 // read or a write. 823 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 824 S.count(MemAccessInfo(Ptr, false))) && 825 "Alias-set pointer not in the access set?"); 826 827 MemAccessInfo Access(Ptr, IsWrite); 828 DepCands.insert(Access); 829 830 // Memorize read-only pointers for later processing and skip them in 831 // the first round (they need to be checked after we have seen all 832 // write pointers). Note: we also mark pointer that are not 833 // consecutive as "read-only" pointers (so that we check 834 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 835 if (!UseDeferred && IsReadOnlyPtr) { 836 DeferredAccesses.insert(Access); 837 continue; 838 } 839 840 // If this is a write - check other reads and writes for conflicts. If 841 // this is a read only check other writes for conflicts (but only if 842 // there is no other write to the ptr - this is an optimization to 843 // catch "a[i] = a[i] + " without having to do a dependence check). 844 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 845 CheckDeps.insert(Access); 846 IsRTCheckAnalysisNeeded = true; 847 } 848 849 if (IsWrite) 850 SetHasWrite = true; 851 852 // Create sets of pointers connected by a shared alias set and 853 // underlying object. 854 typedef SmallVector<Value *, 16> ValueVector; 855 ValueVector TempObjects; 856 857 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 858 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 859 for (Value *UnderlyingObj : TempObjects) { 860 // nullptr never alias, don't join sets for pointer that have "null" 861 // in their UnderlyingObjects list. 862 if (isa<ConstantPointerNull>(UnderlyingObj)) 863 continue; 864 865 UnderlyingObjToAccessMap::iterator Prev = 866 ObjToLastAccess.find(UnderlyingObj); 867 if (Prev != ObjToLastAccess.end()) 868 DepCands.unionSets(Access, Prev->second); 869 870 ObjToLastAccess[UnderlyingObj] = Access; 871 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 872 } 873 } 874 } 875 } 876 } 877 } 878 879 static bool isInBoundsGep(Value *Ptr) { 880 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 881 return GEP->isInBounds(); 882 return false; 883 } 884 885 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 886 /// i.e. monotonically increasing/decreasing. 887 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 888 PredicatedScalarEvolution &PSE, const Loop *L) { 889 // FIXME: This should probably only return true for NUW. 890 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 891 return true; 892 893 // Scalar evolution does not propagate the non-wrapping flags to values that 894 // are derived from a non-wrapping induction variable because non-wrapping 895 // could be flow-sensitive. 896 // 897 // Look through the potentially overflowing instruction to try to prove 898 // non-wrapping for the *specific* value of Ptr. 899 900 // The arithmetic implied by an inbounds GEP can't overflow. 901 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 902 if (!GEP || !GEP->isInBounds()) 903 return false; 904 905 // Make sure there is only one non-const index and analyze that. 906 Value *NonConstIndex = nullptr; 907 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) 908 if (!isa<ConstantInt>(Index)) { 909 if (NonConstIndex) 910 return false; 911 NonConstIndex = Index; 912 } 913 if (!NonConstIndex) 914 // The recurrence is on the pointer, ignore for now. 915 return false; 916 917 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 918 // AddRec using a NSW operation. 919 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 920 if (OBO->hasNoSignedWrap() && 921 // Assume constant for other the operand so that the AddRec can be 922 // easily found. 923 isa<ConstantInt>(OBO->getOperand(1))) { 924 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 925 926 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 927 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 928 } 929 930 return false; 931 } 932 933 /// \brief Check whether the access through \p Ptr has a constant stride. 934 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 935 const Loop *Lp, const ValueToValueMap &StridesMap, 936 bool Assume, bool ShouldCheckWrap) { 937 Type *Ty = Ptr->getType(); 938 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 939 940 // Make sure that the pointer does not point to aggregate types. 941 auto *PtrTy = cast<PointerType>(Ty); 942 if (PtrTy->getElementType()->isAggregateType()) { 943 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr 944 << "\n"); 945 return 0; 946 } 947 948 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 949 950 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 951 if (Assume && !AR) 952 AR = PSE.getAsAddRec(Ptr); 953 954 if (!AR) { 955 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 956 << " SCEV: " << *PtrScev << "\n"); 957 return 0; 958 } 959 960 // The accesss function must stride over the innermost loop. 961 if (Lp != AR->getLoop()) { 962 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 963 *Ptr << " SCEV: " << *AR << "\n"); 964 return 0; 965 } 966 967 // The address calculation must not wrap. Otherwise, a dependence could be 968 // inverted. 969 // An inbounds getelementptr that is a AddRec with a unit stride 970 // cannot wrap per definition. The unit stride requirement is checked later. 971 // An getelementptr without an inbounds attribute and unit stride would have 972 // to access the pointer value "0" which is undefined behavior in address 973 // space 0, therefore we can also vectorize this case. 974 bool IsInBoundsGEP = isInBoundsGep(Ptr); 975 bool IsNoWrapAddRec = !ShouldCheckWrap || 976 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 977 isNoWrapAddRec(Ptr, AR, PSE, Lp); 978 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 979 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 980 if (Assume) { 981 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 982 IsNoWrapAddRec = true; 983 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 984 << "LAA: Pointer: " << *Ptr << "\n" 985 << "LAA: SCEV: " << *AR << "\n" 986 << "LAA: Added an overflow assumption\n"); 987 } else { 988 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 989 << *Ptr << " SCEV: " << *AR << "\n"); 990 return 0; 991 } 992 } 993 994 // Check the step is constant. 995 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 996 997 // Calculate the pointer stride and check if it is constant. 998 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 999 if (!C) { 1000 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 1001 " SCEV: " << *AR << "\n"); 1002 return 0; 1003 } 1004 1005 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 1006 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 1007 const APInt &APStepVal = C->getAPInt(); 1008 1009 // Huge step value - give up. 1010 if (APStepVal.getBitWidth() > 64) 1011 return 0; 1012 1013 int64_t StepVal = APStepVal.getSExtValue(); 1014 1015 // Strided access. 1016 int64_t Stride = StepVal / Size; 1017 int64_t Rem = StepVal % Size; 1018 if (Rem) 1019 return 0; 1020 1021 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 1022 // know we can't "wrap around the address space". In case of address space 1023 // zero we know that this won't happen without triggering undefined behavior. 1024 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 1025 Stride != 1 && Stride != -1) { 1026 if (Assume) { 1027 // We can avoid this case by adding a run-time check. 1028 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 1029 << "inbouds or in address space 0 may wrap:\n" 1030 << "LAA: Pointer: " << *Ptr << "\n" 1031 << "LAA: SCEV: " << *AR << "\n" 1032 << "LAA: Added an overflow assumption\n"); 1033 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 1034 } else 1035 return 0; 1036 } 1037 1038 return Stride; 1039 } 1040 1041 /// Take the pointer operand from the Load/Store instruction. 1042 /// Returns NULL if this is not a valid Load/Store instruction. 1043 static Value *getPointerOperand(Value *I) { 1044 if (auto *LI = dyn_cast<LoadInst>(I)) 1045 return LI->getPointerOperand(); 1046 if (auto *SI = dyn_cast<StoreInst>(I)) 1047 return SI->getPointerOperand(); 1048 return nullptr; 1049 } 1050 1051 /// Take the address space operand from the Load/Store instruction. 1052 /// Returns -1 if this is not a valid Load/Store instruction. 1053 static unsigned getAddressSpaceOperand(Value *I) { 1054 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1055 return L->getPointerAddressSpace(); 1056 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1057 return S->getPointerAddressSpace(); 1058 return -1; 1059 } 1060 1061 /// Saves the memory accesses after sorting it into vector argument 'Sorted'. 1062 void llvm::sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL, 1063 ScalarEvolution &SE, 1064 SmallVectorImpl<Value *> &Sorted) { 1065 SmallVector<std::pair<int, Value *>, 4> OffValPairs; 1066 for (auto *Val : VL) { 1067 // Compute the constant offset from the base pointer of each memory accesses 1068 // and insert into the vector of key,value pair which needs to be sorted. 1069 Value *Ptr = getPointerOperand(Val); 1070 unsigned AS = getAddressSpaceOperand(Val); 1071 unsigned PtrBitWidth = DL.getPointerSizeInBits(AS); 1072 Type *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1073 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1074 1075 // FIXME: Currently the offsets are assumed to be constant.However this not 1076 // always true as offsets can be variables also and we would need to 1077 // consider the difference of the variable offsets. 1078 APInt Offset(PtrBitWidth, 0); 1079 Ptr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 1080 OffValPairs.push_back(std::make_pair(Offset.getSExtValue(), Val)); 1081 } 1082 std::sort(OffValPairs.begin(), OffValPairs.end(), 1083 [](const std::pair<int, Value *> &Left, 1084 const std::pair<int, Value *> &Right) { 1085 return Left.first < Right.first; 1086 }); 1087 1088 for (auto& it : OffValPairs) 1089 Sorted.push_back(it.second); 1090 } 1091 1092 /// Returns true if the memory operations \p A and \p B are consecutive. 1093 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 1094 ScalarEvolution &SE, bool CheckType) { 1095 Value *PtrA = getPointerOperand(A); 1096 Value *PtrB = getPointerOperand(B); 1097 unsigned ASA = getAddressSpaceOperand(A); 1098 unsigned ASB = getAddressSpaceOperand(B); 1099 1100 // Check that the address spaces match and that the pointers are valid. 1101 if (!PtrA || !PtrB || (ASA != ASB)) 1102 return false; 1103 1104 // Make sure that A and B are different pointers. 1105 if (PtrA == PtrB) 1106 return false; 1107 1108 // Make sure that A and B have the same type if required. 1109 if (CheckType && PtrA->getType() != PtrB->getType()) 1110 return false; 1111 1112 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1113 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1114 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1115 1116 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1117 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1118 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1119 1120 // OffsetDelta = OffsetB - OffsetA; 1121 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1122 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1123 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1124 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1125 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1126 // Check if they are based on the same pointer. That makes the offsets 1127 // sufficient. 1128 if (PtrA == PtrB) 1129 return OffsetDelta == Size; 1130 1131 // Compute the necessary base pointer delta to have the necessary final delta 1132 // equal to the size. 1133 // BaseDelta = Size - OffsetDelta; 1134 const SCEV *SizeSCEV = SE.getConstant(Size); 1135 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1136 1137 // Otherwise compute the distance with SCEV between the base pointers. 1138 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1139 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1140 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1141 return X == PtrSCEVB; 1142 } 1143 1144 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1145 switch (Type) { 1146 case NoDep: 1147 case Forward: 1148 case BackwardVectorizable: 1149 return true; 1150 1151 case Unknown: 1152 case ForwardButPreventsForwarding: 1153 case Backward: 1154 case BackwardVectorizableButPreventsForwarding: 1155 return false; 1156 } 1157 llvm_unreachable("unexpected DepType!"); 1158 } 1159 1160 bool MemoryDepChecker::Dependence::isBackward() const { 1161 switch (Type) { 1162 case NoDep: 1163 case Forward: 1164 case ForwardButPreventsForwarding: 1165 case Unknown: 1166 return false; 1167 1168 case BackwardVectorizable: 1169 case Backward: 1170 case BackwardVectorizableButPreventsForwarding: 1171 return true; 1172 } 1173 llvm_unreachable("unexpected DepType!"); 1174 } 1175 1176 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1177 return isBackward() || Type == Unknown; 1178 } 1179 1180 bool MemoryDepChecker::Dependence::isForward() const { 1181 switch (Type) { 1182 case Forward: 1183 case ForwardButPreventsForwarding: 1184 return true; 1185 1186 case NoDep: 1187 case Unknown: 1188 case BackwardVectorizable: 1189 case Backward: 1190 case BackwardVectorizableButPreventsForwarding: 1191 return false; 1192 } 1193 llvm_unreachable("unexpected DepType!"); 1194 } 1195 1196 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance, 1197 uint64_t TypeByteSize) { 1198 // If loads occur at a distance that is not a multiple of a feasible vector 1199 // factor store-load forwarding does not take place. 1200 // Positive dependences might cause troubles because vectorizing them might 1201 // prevent store-load forwarding making vectorized code run a lot slower. 1202 // a[i] = a[i-3] ^ a[i-8]; 1203 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1204 // hence on your typical architecture store-load forwarding does not take 1205 // place. Vectorizing in such cases does not make sense. 1206 // Store-load forwarding distance. 1207 1208 // After this many iterations store-to-load forwarding conflicts should not 1209 // cause any slowdowns. 1210 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1211 // Maximum vector factor. 1212 uint64_t MaxVFWithoutSLForwardIssues = std::min( 1213 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1214 1215 // Compute the smallest VF at which the store and load would be misaligned. 1216 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1217 VF *= 2) { 1218 // If the number of vector iteration between the store and the load are 1219 // small we could incur conflicts. 1220 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1221 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1222 break; 1223 } 1224 } 1225 1226 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1227 DEBUG(dbgs() << "LAA: Distance " << Distance 1228 << " that could cause a store-load forwarding conflict\n"); 1229 return true; 1230 } 1231 1232 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1233 MaxVFWithoutSLForwardIssues != 1234 VectorizerParams::MaxVectorWidth * TypeByteSize) 1235 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1236 return false; 1237 } 1238 1239 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1240 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1241 /// bytes. 1242 /// 1243 /// \returns true if they are independent. 1244 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, 1245 uint64_t TypeByteSize) { 1246 assert(Stride > 1 && "The stride must be greater than 1"); 1247 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1248 assert(Distance > 0 && "The distance must be non-zero"); 1249 1250 // Skip if the distance is not multiple of type byte size. 1251 if (Distance % TypeByteSize) 1252 return false; 1253 1254 uint64_t ScaledDist = Distance / TypeByteSize; 1255 1256 // No dependence if the scaled distance is not multiple of the stride. 1257 // E.g. 1258 // for (i = 0; i < 1024 ; i += 4) 1259 // A[i+2] = A[i] + 1; 1260 // 1261 // Two accesses in memory (scaled distance is 2, stride is 4): 1262 // | A[0] | | | | A[4] | | | | 1263 // | | | A[2] | | | | A[6] | | 1264 // 1265 // E.g. 1266 // for (i = 0; i < 1024 ; i += 3) 1267 // A[i+4] = A[i] + 1; 1268 // 1269 // Two accesses in memory (scaled distance is 4, stride is 3): 1270 // | A[0] | | | A[3] | | | A[6] | | | 1271 // | | | | | A[4] | | | A[7] | | 1272 return ScaledDist % Stride; 1273 } 1274 1275 MemoryDepChecker::Dependence::DepType 1276 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1277 const MemAccessInfo &B, unsigned BIdx, 1278 const ValueToValueMap &Strides) { 1279 assert (AIdx < BIdx && "Must pass arguments in program order"); 1280 1281 Value *APtr = A.getPointer(); 1282 Value *BPtr = B.getPointer(); 1283 bool AIsWrite = A.getInt(); 1284 bool BIsWrite = B.getInt(); 1285 1286 // Two reads are independent. 1287 if (!AIsWrite && !BIsWrite) 1288 return Dependence::NoDep; 1289 1290 // We cannot check pointers in different address spaces. 1291 if (APtr->getType()->getPointerAddressSpace() != 1292 BPtr->getType()->getPointerAddressSpace()) 1293 return Dependence::Unknown; 1294 1295 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1296 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1297 1298 const SCEV *Src = PSE.getSCEV(APtr); 1299 const SCEV *Sink = PSE.getSCEV(BPtr); 1300 1301 // If the induction step is negative we have to invert source and sink of the 1302 // dependence. 1303 if (StrideAPtr < 0) { 1304 std::swap(APtr, BPtr); 1305 std::swap(Src, Sink); 1306 std::swap(AIsWrite, BIsWrite); 1307 std::swap(AIdx, BIdx); 1308 std::swap(StrideAPtr, StrideBPtr); 1309 } 1310 1311 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1312 1313 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1314 << "(Induction step: " << StrideAPtr << ")\n"); 1315 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1316 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1317 1318 // Need accesses with constant stride. We don't want to vectorize 1319 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1320 // the address space. 1321 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1322 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1323 return Dependence::Unknown; 1324 } 1325 1326 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1327 if (!C) { 1328 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1329 ShouldRetryWithRuntimeCheck = true; 1330 return Dependence::Unknown; 1331 } 1332 1333 Type *ATy = APtr->getType()->getPointerElementType(); 1334 Type *BTy = BPtr->getType()->getPointerElementType(); 1335 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1336 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); 1337 1338 const APInt &Val = C->getAPInt(); 1339 int64_t Distance = Val.getSExtValue(); 1340 uint64_t Stride = std::abs(StrideAPtr); 1341 1342 // Attempt to prove strided accesses independent. 1343 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1344 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1345 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1346 return Dependence::NoDep; 1347 } 1348 1349 // Negative distances are not plausible dependencies. 1350 if (Val.isNegative()) { 1351 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1352 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1353 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1354 ATy != BTy)) { 1355 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1356 return Dependence::ForwardButPreventsForwarding; 1357 } 1358 1359 DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1360 return Dependence::Forward; 1361 } 1362 1363 // Write to the same location with the same size. 1364 // Could be improved to assert type sizes are the same (i32 == float, etc). 1365 if (Val == 0) { 1366 if (ATy == BTy) 1367 return Dependence::Forward; 1368 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1369 return Dependence::Unknown; 1370 } 1371 1372 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1373 1374 if (ATy != BTy) { 1375 DEBUG(dbgs() << 1376 "LAA: ReadWrite-Write positive dependency with different types\n"); 1377 return Dependence::Unknown; 1378 } 1379 1380 // Bail out early if passed-in parameters make vectorization not feasible. 1381 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1382 VectorizerParams::VectorizationFactor : 1); 1383 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1384 VectorizerParams::VectorizationInterleave : 1); 1385 // The minimum number of iterations for a vectorized/unrolled version. 1386 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1387 1388 // It's not vectorizable if the distance is smaller than the minimum distance 1389 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1390 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1391 // TypeByteSize (No need to plus the last gap distance). 1392 // 1393 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1394 // foo(int *A) { 1395 // int *B = (int *)((char *)A + 14); 1396 // for (i = 0 ; i < 1024 ; i += 2) 1397 // B[i] = A[i] + 1; 1398 // } 1399 // 1400 // Two accesses in memory (stride is 2): 1401 // | A[0] | | A[2] | | A[4] | | A[6] | | 1402 // | B[0] | | B[2] | | B[4] | 1403 // 1404 // Distance needs for vectorizing iterations except the last iteration: 1405 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1406 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1407 // 1408 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1409 // 12, which is less than distance. 1410 // 1411 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1412 // the minimum distance needed is 28, which is greater than distance. It is 1413 // not safe to do vectorization. 1414 uint64_t MinDistanceNeeded = 1415 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1416 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) { 1417 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1418 << '\n'); 1419 return Dependence::Backward; 1420 } 1421 1422 // Unsafe if the minimum distance needed is greater than max safe distance. 1423 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1424 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1425 << MinDistanceNeeded << " size in bytes"); 1426 return Dependence::Backward; 1427 } 1428 1429 // Positive distance bigger than max vectorization factor. 1430 // FIXME: Should use max factor instead of max distance in bytes, which could 1431 // not handle different types. 1432 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1433 // void foo (int *A, char *B) { 1434 // for (unsigned i = 0; i < 1024; i++) { 1435 // A[i+2] = A[i] + 1; 1436 // B[i+2] = B[i] + 1; 1437 // } 1438 // } 1439 // 1440 // This case is currently unsafe according to the max safe distance. If we 1441 // analyze the two accesses on array B, the max safe dependence distance 1442 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1443 // is 8, which is less than 2 and forbidden vectorization, But actually 1444 // both A and B could be vectorized by 2 iterations. 1445 MaxSafeDepDistBytes = 1446 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes); 1447 1448 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1449 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1450 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1451 return Dependence::BackwardVectorizableButPreventsForwarding; 1452 1453 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1454 << " with max VF = " 1455 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1456 1457 return Dependence::BackwardVectorizable; 1458 } 1459 1460 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1461 MemAccessInfoSet &CheckDeps, 1462 const ValueToValueMap &Strides) { 1463 1464 MaxSafeDepDistBytes = -1; 1465 while (!CheckDeps.empty()) { 1466 MemAccessInfo CurAccess = *CheckDeps.begin(); 1467 1468 // Get the relevant memory access set. 1469 EquivalenceClasses<MemAccessInfo>::iterator I = 1470 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1471 1472 // Check accesses within this set. 1473 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1474 AccessSets.member_begin(I); 1475 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1476 AccessSets.member_end(); 1477 1478 // Check every access pair. 1479 while (AI != AE) { 1480 CheckDeps.erase(*AI); 1481 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1482 while (OI != AE) { 1483 // Check every accessing instruction pair in program order. 1484 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1485 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1486 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1487 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1488 auto A = std::make_pair(&*AI, *I1); 1489 auto B = std::make_pair(&*OI, *I2); 1490 1491 assert(*I1 != *I2); 1492 if (*I1 > *I2) 1493 std::swap(A, B); 1494 1495 Dependence::DepType Type = 1496 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1497 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1498 1499 // Gather dependences unless we accumulated MaxDependences 1500 // dependences. In that case return as soon as we find the first 1501 // unsafe dependence. This puts a limit on this quadratic 1502 // algorithm. 1503 if (RecordDependences) { 1504 if (Type != Dependence::NoDep) 1505 Dependences.push_back(Dependence(A.second, B.second, Type)); 1506 1507 if (Dependences.size() >= MaxDependences) { 1508 RecordDependences = false; 1509 Dependences.clear(); 1510 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1511 } 1512 } 1513 if (!RecordDependences && !SafeForVectorization) 1514 return false; 1515 } 1516 ++OI; 1517 } 1518 AI++; 1519 } 1520 } 1521 1522 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1523 return SafeForVectorization; 1524 } 1525 1526 SmallVector<Instruction *, 4> 1527 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1528 MemAccessInfo Access(Ptr, isWrite); 1529 auto &IndexVector = Accesses.find(Access)->second; 1530 1531 SmallVector<Instruction *, 4> Insts; 1532 transform(IndexVector, 1533 std::back_inserter(Insts), 1534 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1535 return Insts; 1536 } 1537 1538 const char *MemoryDepChecker::Dependence::DepName[] = { 1539 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1540 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1541 1542 void MemoryDepChecker::Dependence::print( 1543 raw_ostream &OS, unsigned Depth, 1544 const SmallVectorImpl<Instruction *> &Instrs) const { 1545 OS.indent(Depth) << DepName[Type] << ":\n"; 1546 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1547 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1548 } 1549 1550 bool LoopAccessInfo::canAnalyzeLoop() { 1551 // We need to have a loop header. 1552 DEBUG(dbgs() << "LAA: Found a loop in " 1553 << TheLoop->getHeader()->getParent()->getName() << ": " 1554 << TheLoop->getHeader()->getName() << '\n'); 1555 1556 // We can only analyze innermost loops. 1557 if (!TheLoop->empty()) { 1558 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1559 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop"; 1560 return false; 1561 } 1562 1563 // We must have a single backedge. 1564 if (TheLoop->getNumBackEdges() != 1) { 1565 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1566 recordAnalysis("CFGNotUnderstood") 1567 << "loop control flow is not understood by analyzer"; 1568 return false; 1569 } 1570 1571 // We must have a single exiting block. 1572 if (!TheLoop->getExitingBlock()) { 1573 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1574 recordAnalysis("CFGNotUnderstood") 1575 << "loop control flow is not understood by analyzer"; 1576 return false; 1577 } 1578 1579 // We only handle bottom-tested loops, i.e. loop in which the condition is 1580 // checked at the end of each iteration. With that we can assume that all 1581 // instructions in the loop are executed the same number of times. 1582 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1583 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1584 recordAnalysis("CFGNotUnderstood") 1585 << "loop control flow is not understood by analyzer"; 1586 return false; 1587 } 1588 1589 // ScalarEvolution needs to be able to find the exit count. 1590 const SCEV *ExitCount = PSE->getBackedgeTakenCount(); 1591 if (ExitCount == PSE->getSE()->getCouldNotCompute()) { 1592 recordAnalysis("CantComputeNumberOfIterations") 1593 << "could not determine number of loop iterations"; 1594 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1595 return false; 1596 } 1597 1598 return true; 1599 } 1600 1601 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI, 1602 const TargetLibraryInfo *TLI, 1603 DominatorTree *DT) { 1604 typedef SmallPtrSet<Value*, 16> ValueSet; 1605 1606 // Holds the Load and Store instructions. 1607 SmallVector<LoadInst *, 16> Loads; 1608 SmallVector<StoreInst *, 16> Stores; 1609 1610 // Holds all the different accesses in the loop. 1611 unsigned NumReads = 0; 1612 unsigned NumReadWrites = 0; 1613 1614 PtrRtChecking->Pointers.clear(); 1615 PtrRtChecking->Need = false; 1616 1617 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1618 1619 // For each block. 1620 for (BasicBlock *BB : TheLoop->blocks()) { 1621 // Scan the BB and collect legal loads and stores. 1622 for (Instruction &I : *BB) { 1623 // If this is a load, save it. If this instruction can read from memory 1624 // but is not a load, then we quit. Notice that we don't handle function 1625 // calls that read or write. 1626 if (I.mayReadFromMemory()) { 1627 // Many math library functions read the rounding mode. We will only 1628 // vectorize a loop if it contains known function calls that don't set 1629 // the flag. Therefore, it is safe to ignore this read from memory. 1630 auto *Call = dyn_cast<CallInst>(&I); 1631 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1632 continue; 1633 1634 // If the function has an explicit vectorized counterpart, we can safely 1635 // assume that it can be vectorized. 1636 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1637 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1638 continue; 1639 1640 auto *Ld = dyn_cast<LoadInst>(&I); 1641 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1642 recordAnalysis("NonSimpleLoad", Ld) 1643 << "read with atomic ordering or volatile read"; 1644 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1645 CanVecMem = false; 1646 return; 1647 } 1648 NumLoads++; 1649 Loads.push_back(Ld); 1650 DepChecker->addAccess(Ld); 1651 if (EnableMemAccessVersioning) 1652 collectStridedAccess(Ld); 1653 continue; 1654 } 1655 1656 // Save 'store' instructions. Abort if other instructions write to memory. 1657 if (I.mayWriteToMemory()) { 1658 auto *St = dyn_cast<StoreInst>(&I); 1659 if (!St) { 1660 recordAnalysis("CantVectorizeInstruction", St) 1661 << "instruction cannot be vectorized"; 1662 CanVecMem = false; 1663 return; 1664 } 1665 if (!St->isSimple() && !IsAnnotatedParallel) { 1666 recordAnalysis("NonSimpleStore", St) 1667 << "write with atomic ordering or volatile write"; 1668 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1669 CanVecMem = false; 1670 return; 1671 } 1672 NumStores++; 1673 Stores.push_back(St); 1674 DepChecker->addAccess(St); 1675 if (EnableMemAccessVersioning) 1676 collectStridedAccess(St); 1677 } 1678 } // Next instr. 1679 } // Next block. 1680 1681 // Now we have two lists that hold the loads and the stores. 1682 // Next, we find the pointers that they use. 1683 1684 // Check if we see any stores. If there are no stores, then we don't 1685 // care if the pointers are *restrict*. 1686 if (!Stores.size()) { 1687 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1688 CanVecMem = true; 1689 return; 1690 } 1691 1692 MemoryDepChecker::DepCandidates DependentAccesses; 1693 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1694 AA, LI, DependentAccesses, *PSE); 1695 1696 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1697 // multiple times on the same object. If the ptr is accessed twice, once 1698 // for read and once for write, it will only appear once (on the write 1699 // list). This is okay, since we are going to check for conflicts between 1700 // writes and between reads and writes, but not between reads and reads. 1701 ValueSet Seen; 1702 1703 for (StoreInst *ST : Stores) { 1704 Value *Ptr = ST->getPointerOperand(); 1705 // Check for store to loop invariant address. 1706 StoreToLoopInvariantAddress |= isUniform(Ptr); 1707 // If we did *not* see this pointer before, insert it to the read-write 1708 // list. At this phase it is only a 'write' list. 1709 if (Seen.insert(Ptr).second) { 1710 ++NumReadWrites; 1711 1712 MemoryLocation Loc = MemoryLocation::get(ST); 1713 // The TBAA metadata could have a control dependency on the predication 1714 // condition, so we cannot rely on it when determining whether or not we 1715 // need runtime pointer checks. 1716 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1717 Loc.AATags.TBAA = nullptr; 1718 1719 Accesses.addStore(Loc); 1720 } 1721 } 1722 1723 if (IsAnnotatedParallel) { 1724 DEBUG(dbgs() 1725 << "LAA: A loop annotated parallel, ignore memory dependency " 1726 << "checks.\n"); 1727 CanVecMem = true; 1728 return; 1729 } 1730 1731 for (LoadInst *LD : Loads) { 1732 Value *Ptr = LD->getPointerOperand(); 1733 // If we did *not* see this pointer before, insert it to the 1734 // read list. If we *did* see it before, then it is already in 1735 // the read-write list. This allows us to vectorize expressions 1736 // such as A[i] += x; Because the address of A[i] is a read-write 1737 // pointer. This only works if the index of A[i] is consecutive. 1738 // If the address of i is unknown (for example A[B[i]]) then we may 1739 // read a few words, modify, and write a few words, and some of the 1740 // words may be written to the same address. 1741 bool IsReadOnlyPtr = false; 1742 if (Seen.insert(Ptr).second || 1743 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) { 1744 ++NumReads; 1745 IsReadOnlyPtr = true; 1746 } 1747 1748 MemoryLocation Loc = MemoryLocation::get(LD); 1749 // The TBAA metadata could have a control dependency on the predication 1750 // condition, so we cannot rely on it when determining whether or not we 1751 // need runtime pointer checks. 1752 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1753 Loc.AATags.TBAA = nullptr; 1754 1755 Accesses.addLoad(Loc, IsReadOnlyPtr); 1756 } 1757 1758 // If we write (or read-write) to a single destination and there are no 1759 // other reads in this loop then is it safe to vectorize. 1760 if (NumReadWrites == 1 && NumReads == 0) { 1761 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1762 CanVecMem = true; 1763 return; 1764 } 1765 1766 // Build dependence sets and check whether we need a runtime pointer bounds 1767 // check. 1768 Accesses.buildDependenceSets(); 1769 1770 // Find pointers with computable bounds. We are going to use this information 1771 // to place a runtime bound check. 1772 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), 1773 TheLoop, SymbolicStrides); 1774 if (!CanDoRTIfNeeded) { 1775 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds"; 1776 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1777 << "the array bounds.\n"); 1778 CanVecMem = false; 1779 return; 1780 } 1781 1782 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1783 1784 CanVecMem = true; 1785 if (Accesses.isDependencyCheckNeeded()) { 1786 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1787 CanVecMem = DepChecker->areDepsSafe( 1788 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 1789 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes(); 1790 1791 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) { 1792 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1793 1794 // Clear the dependency checks. We assume they are not needed. 1795 Accesses.resetDepChecks(*DepChecker); 1796 1797 PtrRtChecking->reset(); 1798 PtrRtChecking->Need = true; 1799 1800 auto *SE = PSE->getSE(); 1801 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop, 1802 SymbolicStrides, true); 1803 1804 // Check that we found the bounds for the pointer. 1805 if (!CanDoRTIfNeeded) { 1806 recordAnalysis("CantCheckMemDepsAtRunTime") 1807 << "cannot check memory dependencies at runtime"; 1808 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1809 CanVecMem = false; 1810 return; 1811 } 1812 1813 CanVecMem = true; 1814 } 1815 } 1816 1817 if (CanVecMem) 1818 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1819 << (PtrRtChecking->Need ? "" : " don't") 1820 << " need runtime memory checks.\n"); 1821 else { 1822 recordAnalysis("UnsafeMemDep") 1823 << "unsafe dependent memory operations in loop. Use " 1824 "#pragma loop distribute(enable) to allow loop distribution " 1825 "to attempt to isolate the offending operations into a separate " 1826 "loop"; 1827 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1828 } 1829 } 1830 1831 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1832 DominatorTree *DT) { 1833 assert(TheLoop->contains(BB) && "Unknown block used"); 1834 1835 // Blocks that do not dominate the latch need predication. 1836 BasicBlock* Latch = TheLoop->getLoopLatch(); 1837 return !DT->dominates(BB, Latch); 1838 } 1839 1840 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName, 1841 Instruction *I) { 1842 assert(!Report && "Multiple reports generated"); 1843 1844 Value *CodeRegion = TheLoop->getHeader(); 1845 DebugLoc DL = TheLoop->getStartLoc(); 1846 1847 if (I) { 1848 CodeRegion = I->getParent(); 1849 // If there is no debug location attached to the instruction, revert back to 1850 // using the loop's. 1851 if (I->getDebugLoc()) 1852 DL = I->getDebugLoc(); 1853 } 1854 1855 Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL, 1856 CodeRegion); 1857 return *Report; 1858 } 1859 1860 bool LoopAccessInfo::isUniform(Value *V) const { 1861 auto *SE = PSE->getSE(); 1862 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is 1863 // never considered uniform. 1864 // TODO: Is this really what we want? Even without FP SCEV, we may want some 1865 // trivially loop-invariant FP values to be considered uniform. 1866 if (!SE->isSCEVable(V->getType())) 1867 return false; 1868 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 1869 } 1870 1871 // FIXME: this function is currently a duplicate of the one in 1872 // LoopVectorize.cpp. 1873 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1874 Instruction *Loc) { 1875 if (FirstInst) 1876 return FirstInst; 1877 if (Instruction *I = dyn_cast<Instruction>(V)) 1878 return I->getParent() == Loc->getParent() ? I : nullptr; 1879 return nullptr; 1880 } 1881 1882 namespace { 1883 1884 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1885 /// need to use value-handles because SCEV expansion can invalidate previously 1886 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1887 /// a previous one. 1888 struct PointerBounds { 1889 TrackingVH<Value> Start; 1890 TrackingVH<Value> End; 1891 }; 1892 1893 } // end anonymous namespace 1894 1895 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1896 /// in \p TheLoop. \return the values for the bounds. 1897 static PointerBounds 1898 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1899 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1900 const RuntimePointerChecking &PtrRtChecking) { 1901 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1902 const SCEV *Sc = SE->getSCEV(Ptr); 1903 1904 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1905 LLVMContext &Ctx = Loc->getContext(); 1906 1907 // Use this type for pointer arithmetic. 1908 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1909 1910 if (SE->isLoopInvariant(Sc, TheLoop)) { 1911 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1912 << "\n"); 1913 // Ptr could be in the loop body. If so, expand a new one at the correct 1914 // location. 1915 Instruction *Inst = dyn_cast<Instruction>(Ptr); 1916 Value *NewPtr = (Inst && TheLoop->contains(Inst)) 1917 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc) 1918 : Ptr; 1919 return {NewPtr, NewPtr}; 1920 } else { 1921 Value *Start = nullptr, *End = nullptr; 1922 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1923 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1924 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1925 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1926 return {Start, End}; 1927 } 1928 } 1929 1930 /// \brief Turns a collection of checks into a collection of expanded upper and 1931 /// lower bounds for both pointers in the check. 1932 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1933 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1934 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1935 const RuntimePointerChecking &PtrRtChecking) { 1936 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1937 1938 // Here we're relying on the SCEV Expander's cache to only emit code for the 1939 // same bounds once. 1940 transform( 1941 PointerChecks, std::back_inserter(ChecksWithBounds), 1942 [&](const RuntimePointerChecking::PointerCheck &Check) { 1943 PointerBounds 1944 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1945 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1946 return std::make_pair(First, Second); 1947 }); 1948 1949 return ChecksWithBounds; 1950 } 1951 1952 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1953 Instruction *Loc, 1954 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1955 const { 1956 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 1957 auto *SE = PSE->getSE(); 1958 SCEVExpander Exp(*SE, DL, "induction"); 1959 auto ExpandedChecks = 1960 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking); 1961 1962 LLVMContext &Ctx = Loc->getContext(); 1963 Instruction *FirstInst = nullptr; 1964 IRBuilder<> ChkBuilder(Loc); 1965 // Our instructions might fold to a constant. 1966 Value *MemoryRuntimeCheck = nullptr; 1967 1968 for (const auto &Check : ExpandedChecks) { 1969 const PointerBounds &A = Check.first, &B = Check.second; 1970 // Check if two pointers (A and B) conflict where conflict is computed as: 1971 // start(A) <= end(B) && start(B) <= end(A) 1972 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1973 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1974 1975 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1976 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1977 "Trying to bounds check pointers with different address spaces"); 1978 1979 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1980 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1981 1982 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1983 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1984 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1985 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1986 1987 // [A|B].Start points to the first accessed byte under base [A|B]. 1988 // [A|B].End points to the last accessed byte, plus one. 1989 // There is no conflict when the intervals are disjoint: 1990 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End) 1991 // 1992 // bound0 = (B.Start < A.End) 1993 // bound1 = (A.Start < B.End) 1994 // IsConflict = bound0 & bound1 1995 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0"); 1996 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1997 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1"); 1998 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1999 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 2000 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2001 if (MemoryRuntimeCheck) { 2002 IsConflict = 2003 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 2004 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 2005 } 2006 MemoryRuntimeCheck = IsConflict; 2007 } 2008 2009 if (!MemoryRuntimeCheck) 2010 return std::make_pair(nullptr, nullptr); 2011 2012 // We have to do this trickery because the IRBuilder might fold the check to a 2013 // constant expression in which case there is no Instruction anchored in a 2014 // the block. 2015 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 2016 ConstantInt::getTrue(Ctx)); 2017 ChkBuilder.Insert(Check, "memcheck.conflict"); 2018 FirstInst = getFirstInst(FirstInst, Check, Loc); 2019 return std::make_pair(FirstInst, Check); 2020 } 2021 2022 std::pair<Instruction *, Instruction *> 2023 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 2024 if (!PtrRtChecking->Need) 2025 return std::make_pair(nullptr, nullptr); 2026 2027 return addRuntimeChecks(Loc, PtrRtChecking->getChecks()); 2028 } 2029 2030 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 2031 Value *Ptr = nullptr; 2032 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 2033 Ptr = LI->getPointerOperand(); 2034 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 2035 Ptr = SI->getPointerOperand(); 2036 else 2037 return; 2038 2039 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop); 2040 if (!Stride) 2041 return; 2042 2043 DEBUG(dbgs() << "LAA: Found a strided access that we can version"); 2044 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 2045 SymbolicStrides[Ptr] = Stride; 2046 StrideSet.insert(Stride); 2047 } 2048 2049 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 2050 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 2051 DominatorTree *DT, LoopInfo *LI) 2052 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)), 2053 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)), 2054 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L), 2055 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false), 2056 StoreToLoopInvariantAddress(false) { 2057 if (canAnalyzeLoop()) 2058 analyzeLoop(AA, LI, TLI, DT); 2059 } 2060 2061 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 2062 if (CanVecMem) { 2063 OS.indent(Depth) << "Memory dependences are safe"; 2064 if (MaxSafeDepDistBytes != -1ULL) 2065 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 2066 << " bytes"; 2067 if (PtrRtChecking->Need) 2068 OS << " with run-time checks"; 2069 OS << "\n"; 2070 } 2071 2072 if (Report) 2073 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n"; 2074 2075 if (auto *Dependences = DepChecker->getDependences()) { 2076 OS.indent(Depth) << "Dependences:\n"; 2077 for (auto &Dep : *Dependences) { 2078 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions()); 2079 OS << "\n"; 2080 } 2081 } else 2082 OS.indent(Depth) << "Too many dependences, not recorded\n"; 2083 2084 // List the pair of accesses need run-time checks to prove independence. 2085 PtrRtChecking->print(OS, Depth); 2086 OS << "\n"; 2087 2088 OS.indent(Depth) << "Store to invariant address was " 2089 << (StoreToLoopInvariantAddress ? "" : "not ") 2090 << "found in loop.\n"; 2091 2092 OS.indent(Depth) << "SCEV assumptions:\n"; 2093 PSE->getUnionPredicate().print(OS, Depth); 2094 2095 OS << "\n"; 2096 2097 OS.indent(Depth) << "Expressions re-written:\n"; 2098 PSE->print(OS, Depth); 2099 } 2100 2101 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) { 2102 auto &LAI = LoopAccessInfoMap[L]; 2103 2104 if (!LAI) 2105 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI); 2106 2107 return *LAI.get(); 2108 } 2109 2110 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const { 2111 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this); 2112 2113 for (Loop *TopLevelLoop : *LI) 2114 for (Loop *L : depth_first(TopLevelLoop)) { 2115 OS.indent(2) << L->getHeader()->getName() << ":\n"; 2116 auto &LAI = LAA.getInfo(L); 2117 LAI.print(OS, 4); 2118 } 2119 } 2120 2121 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) { 2122 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2123 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2124 TLI = TLIP ? &TLIP->getTLI() : nullptr; 2125 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2126 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2127 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2128 2129 return false; 2130 } 2131 2132 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2133 AU.addRequired<ScalarEvolutionWrapperPass>(); 2134 AU.addRequired<AAResultsWrapperPass>(); 2135 AU.addRequired<DominatorTreeWrapperPass>(); 2136 AU.addRequired<LoopInfoWrapperPass>(); 2137 2138 AU.setPreservesAll(); 2139 } 2140 2141 char LoopAccessLegacyAnalysis::ID = 0; 2142 static const char laa_name[] = "Loop Access Analysis"; 2143 #define LAA_NAME "loop-accesses" 2144 2145 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2146 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2147 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2148 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2149 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2150 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true) 2151 2152 AnalysisKey LoopAccessAnalysis::Key; 2153 2154 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, 2155 LoopStandardAnalysisResults &AR) { 2156 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); 2157 } 2158 2159 namespace llvm { 2160 2161 Pass *createLAAPass() { 2162 return new LoopAccessLegacyAnalysis(); 2163 } 2164 2165 } // end namespace llvm 2166