1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect dependences up to this threshold. 62 static cl::opt<unsigned> 63 MaxDependences("max-dependences", cl::Hidden, 64 cl::desc("Maximum number of dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 /// This enables versioning on the strides of symbolically striding memory 69 /// accesses in code like the following. 70 /// for (i = 0; i < N; ++i) 71 /// A[i * Stride1] += B[i * Stride2] ... 72 /// 73 /// Will be roughly translated to 74 /// if (Stride1 == 1 && Stride2 == 1) { 75 /// for (i = 0; i < N; i+=4) 76 /// A[i:i+3] += ... 77 /// } else 78 /// ... 79 static cl::opt<bool> EnableMemAccessVersioning( 80 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 81 cl::desc("Enable symbolic stride memory access versioning")); 82 83 /// \brief Enable store-to-load forwarding conflict detection. This option can 84 /// be disabled for correctness testing. 85 static cl::opt<bool> EnableForwardingConflictDetection( 86 "store-to-load-forwarding-conflict-detection", cl::Hidden, 87 cl::desc("Enable conflict detection in loop-access analysis"), 88 cl::init(true)); 89 90 bool VectorizerParams::isInterleaveForced() { 91 return ::VectorizationInterleave.getNumOccurrences() > 0; 92 } 93 94 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 95 const Function *TheFunction, 96 const Loop *TheLoop, 97 const char *PassName) { 98 DebugLoc DL = TheLoop->getStartLoc(); 99 if (const Instruction *I = Message.getInstr()) 100 DL = I->getDebugLoc(); 101 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 102 *TheFunction, DL, Message.str()); 103 } 104 105 Value *llvm::stripIntegerCast(Value *V) { 106 if (CastInst *CI = dyn_cast<CastInst>(V)) 107 if (CI->getOperand(0)->getType()->isIntegerTy()) 108 return CI->getOperand(0); 109 return V; 110 } 111 112 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 113 const ValueToValueMap &PtrToStride, 114 Value *Ptr, Value *OrigPtr) { 115 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 116 117 // If there is an entry in the map return the SCEV of the pointer with the 118 // symbolic stride replaced by one. 119 ValueToValueMap::const_iterator SI = 120 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 121 if (SI != PtrToStride.end()) { 122 Value *StrideVal = SI->second; 123 124 // Strip casts. 125 StrideVal = stripIntegerCast(StrideVal); 126 127 // Replace symbolic stride by one. 128 Value *One = ConstantInt::get(StrideVal->getType(), 1); 129 ValueToValueMap RewriteMap; 130 RewriteMap[StrideVal] = One; 131 132 ScalarEvolution *SE = PSE.getSE(); 133 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 134 const auto *CT = 135 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 136 137 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 138 auto *Expr = PSE.getSCEV(Ptr); 139 140 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 141 << "\n"); 142 return Expr; 143 } 144 145 // Otherwise, just return the SCEV of the original pointer. 146 return OrigSCEV; 147 } 148 149 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 150 unsigned DepSetId, unsigned ASId, 151 const ValueToValueMap &Strides, 152 PredicatedScalarEvolution &PSE) { 153 // Get the stride replaced scev. 154 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 155 ScalarEvolution *SE = PSE.getSE(); 156 157 const SCEV *ScStart; 158 const SCEV *ScEnd; 159 160 if (SE->isLoopInvariant(Sc, Lp)) 161 ScStart = ScEnd = Sc; 162 else { 163 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 164 assert(AR && "Invalid addrec expression"); 165 const SCEV *Ex = PSE.getBackedgeTakenCount(); 166 167 ScStart = AR->getStart(); 168 ScEnd = AR->evaluateAtIteration(Ex, *SE); 169 const SCEV *Step = AR->getStepRecurrence(*SE); 170 171 // For expressions with negative step, the upper bound is ScStart and the 172 // lower bound is ScEnd. 173 if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) { 174 if (CStep->getValue()->isNegative()) 175 std::swap(ScStart, ScEnd); 176 } else { 177 // Fallback case: the step is not constant, but the we can still 178 // get the upper and lower bounds of the interval by using min/max 179 // expressions. 180 ScStart = SE->getUMinExpr(ScStart, ScEnd); 181 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 182 } 183 } 184 185 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 186 } 187 188 SmallVector<RuntimePointerChecking::PointerCheck, 4> 189 RuntimePointerChecking::generateChecks() const { 190 SmallVector<PointerCheck, 4> Checks; 191 192 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 193 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 194 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 195 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 196 197 if (needsChecking(CGI, CGJ)) 198 Checks.push_back(std::make_pair(&CGI, &CGJ)); 199 } 200 } 201 return Checks; 202 } 203 204 void RuntimePointerChecking::generateChecks( 205 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 206 assert(Checks.empty() && "Checks is not empty"); 207 groupChecks(DepCands, UseDependencies); 208 Checks = generateChecks(); 209 } 210 211 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 212 const CheckingPtrGroup &N) const { 213 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 214 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 215 if (needsChecking(M.Members[I], N.Members[J])) 216 return true; 217 return false; 218 } 219 220 /// Compare \p I and \p J and return the minimum. 221 /// Return nullptr in case we couldn't find an answer. 222 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 223 ScalarEvolution *SE) { 224 const SCEV *Diff = SE->getMinusSCEV(J, I); 225 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 226 227 if (!C) 228 return nullptr; 229 if (C->getValue()->isNegative()) 230 return J; 231 return I; 232 } 233 234 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 235 const SCEV *Start = RtCheck.Pointers[Index].Start; 236 const SCEV *End = RtCheck.Pointers[Index].End; 237 238 // Compare the starts and ends with the known minimum and maximum 239 // of this set. We need to know how we compare against the min/max 240 // of the set in order to be able to emit memchecks. 241 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 242 if (!Min0) 243 return false; 244 245 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 246 if (!Min1) 247 return false; 248 249 // Update the low bound expression if we've found a new min value. 250 if (Min0 == Start) 251 Low = Start; 252 253 // Update the high bound expression if we've found a new max value. 254 if (Min1 != End) 255 High = End; 256 257 Members.push_back(Index); 258 return true; 259 } 260 261 void RuntimePointerChecking::groupChecks( 262 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 263 // We build the groups from dependency candidates equivalence classes 264 // because: 265 // - We know that pointers in the same equivalence class share 266 // the same underlying object and therefore there is a chance 267 // that we can compare pointers 268 // - We wouldn't be able to merge two pointers for which we need 269 // to emit a memcheck. The classes in DepCands are already 270 // conveniently built such that no two pointers in the same 271 // class need checking against each other. 272 273 // We use the following (greedy) algorithm to construct the groups 274 // For every pointer in the equivalence class: 275 // For each existing group: 276 // - if the difference between this pointer and the min/max bounds 277 // of the group is a constant, then make the pointer part of the 278 // group and update the min/max bounds of that group as required. 279 280 CheckingGroups.clear(); 281 282 // If we need to check two pointers to the same underlying object 283 // with a non-constant difference, we shouldn't perform any pointer 284 // grouping with those pointers. This is because we can easily get 285 // into cases where the resulting check would return false, even when 286 // the accesses are safe. 287 // 288 // The following example shows this: 289 // for (i = 0; i < 1000; ++i) 290 // a[5000 + i * m] = a[i] + a[i + 9000] 291 // 292 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 293 // (0, 10000) which is always false. However, if m is 1, there is no 294 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 295 // us to perform an accurate check in this case. 296 // 297 // The above case requires that we have an UnknownDependence between 298 // accesses to the same underlying object. This cannot happen unless 299 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 300 // is also false. In this case we will use the fallback path and create 301 // separate checking groups for all pointers. 302 303 // If we don't have the dependency partitions, construct a new 304 // checking pointer group for each pointer. This is also required 305 // for correctness, because in this case we can have checking between 306 // pointers to the same underlying object. 307 if (!UseDependencies) { 308 for (unsigned I = 0; I < Pointers.size(); ++I) 309 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 310 return; 311 } 312 313 unsigned TotalComparisons = 0; 314 315 DenseMap<Value *, unsigned> PositionMap; 316 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 317 PositionMap[Pointers[Index].PointerValue] = Index; 318 319 // We need to keep track of what pointers we've already seen so we 320 // don't process them twice. 321 SmallSet<unsigned, 2> Seen; 322 323 // Go through all equivalence classes, get the "pointer check groups" 324 // and add them to the overall solution. We use the order in which accesses 325 // appear in 'Pointers' to enforce determinism. 326 for (unsigned I = 0; I < Pointers.size(); ++I) { 327 // We've seen this pointer before, and therefore already processed 328 // its equivalence class. 329 if (Seen.count(I)) 330 continue; 331 332 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 333 Pointers[I].IsWritePtr); 334 335 SmallVector<CheckingPtrGroup, 2> Groups; 336 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 337 338 // Because DepCands is constructed by visiting accesses in the order in 339 // which they appear in alias sets (which is deterministic) and the 340 // iteration order within an equivalence class member is only dependent on 341 // the order in which unions and insertions are performed on the 342 // equivalence class, the iteration order is deterministic. 343 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 344 MI != ME; ++MI) { 345 unsigned Pointer = PositionMap[MI->getPointer()]; 346 bool Merged = false; 347 // Mark this pointer as seen. 348 Seen.insert(Pointer); 349 350 // Go through all the existing sets and see if we can find one 351 // which can include this pointer. 352 for (CheckingPtrGroup &Group : Groups) { 353 // Don't perform more than a certain amount of comparisons. 354 // This should limit the cost of grouping the pointers to something 355 // reasonable. If we do end up hitting this threshold, the algorithm 356 // will create separate groups for all remaining pointers. 357 if (TotalComparisons > MemoryCheckMergeThreshold) 358 break; 359 360 TotalComparisons++; 361 362 if (Group.addPointer(Pointer)) { 363 Merged = true; 364 break; 365 } 366 } 367 368 if (!Merged) 369 // We couldn't add this pointer to any existing set or the threshold 370 // for the number of comparisons has been reached. Create a new group 371 // to hold the current pointer. 372 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 373 } 374 375 // We've computed the grouped checks for this partition. 376 // Save the results and continue with the next one. 377 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 378 } 379 } 380 381 bool RuntimePointerChecking::arePointersInSamePartition( 382 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 383 unsigned PtrIdx2) { 384 return (PtrToPartition[PtrIdx1] != -1 && 385 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 386 } 387 388 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 389 const PointerInfo &PointerI = Pointers[I]; 390 const PointerInfo &PointerJ = Pointers[J]; 391 392 // No need to check if two readonly pointers intersect. 393 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 394 return false; 395 396 // Only need to check pointers between two different dependency sets. 397 if (PointerI.DependencySetId == PointerJ.DependencySetId) 398 return false; 399 400 // Only need to check pointers in the same alias set. 401 if (PointerI.AliasSetId != PointerJ.AliasSetId) 402 return false; 403 404 return true; 405 } 406 407 void RuntimePointerChecking::printChecks( 408 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 409 unsigned Depth) const { 410 unsigned N = 0; 411 for (const auto &Check : Checks) { 412 const auto &First = Check.first->Members, &Second = Check.second->Members; 413 414 OS.indent(Depth) << "Check " << N++ << ":\n"; 415 416 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 417 for (unsigned K = 0; K < First.size(); ++K) 418 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 419 420 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 421 for (unsigned K = 0; K < Second.size(); ++K) 422 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 423 } 424 } 425 426 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 427 428 OS.indent(Depth) << "Run-time memory checks:\n"; 429 printChecks(OS, Checks, Depth); 430 431 OS.indent(Depth) << "Grouped accesses:\n"; 432 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 433 const auto &CG = CheckingGroups[I]; 434 435 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 436 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 437 << ")\n"; 438 for (unsigned J = 0; J < CG.Members.size(); ++J) { 439 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 440 << "\n"; 441 } 442 } 443 } 444 445 namespace { 446 /// \brief Analyses memory accesses in a loop. 447 /// 448 /// Checks whether run time pointer checks are needed and builds sets for data 449 /// dependence checking. 450 class AccessAnalysis { 451 public: 452 /// \brief Read or write access location. 453 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 454 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 455 456 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 457 MemoryDepChecker::DepCandidates &DA, 458 PredicatedScalarEvolution &PSE) 459 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 460 PSE(PSE) {} 461 462 /// \brief Register a load and whether it is only read from. 463 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 464 Value *Ptr = const_cast<Value*>(Loc.Ptr); 465 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 466 Accesses.insert(MemAccessInfo(Ptr, false)); 467 if (IsReadOnly) 468 ReadOnlyPtr.insert(Ptr); 469 } 470 471 /// \brief Register a store. 472 void addStore(MemoryLocation &Loc) { 473 Value *Ptr = const_cast<Value*>(Loc.Ptr); 474 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 475 Accesses.insert(MemAccessInfo(Ptr, true)); 476 } 477 478 /// \brief Check whether we can check the pointers at runtime for 479 /// non-intersection. 480 /// 481 /// Returns true if we need no check or if we do and we can generate them 482 /// (i.e. the pointers have computable bounds). 483 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 484 Loop *TheLoop, const ValueToValueMap &Strides, 485 bool ShouldCheckWrap = false); 486 487 /// \brief Goes over all memory accesses, checks whether a RT check is needed 488 /// and builds sets of dependent accesses. 489 void buildDependenceSets() { 490 processMemAccesses(); 491 } 492 493 /// \brief Initial processing of memory accesses determined that we need to 494 /// perform dependency checking. 495 /// 496 /// Note that this can later be cleared if we retry memcheck analysis without 497 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 498 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 499 500 /// We decided that no dependence analysis would be used. Reset the state. 501 void resetDepChecks(MemoryDepChecker &DepChecker) { 502 CheckDeps.clear(); 503 DepChecker.clearDependences(); 504 } 505 506 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 507 508 private: 509 typedef SetVector<MemAccessInfo> PtrAccessSet; 510 511 /// \brief Go over all memory access and check whether runtime pointer checks 512 /// are needed and build sets of dependency check candidates. 513 void processMemAccesses(); 514 515 /// Set of all accesses. 516 PtrAccessSet Accesses; 517 518 const DataLayout &DL; 519 520 /// Set of accesses that need a further dependence check. 521 MemAccessInfoSet CheckDeps; 522 523 /// Set of pointers that are read only. 524 SmallPtrSet<Value*, 16> ReadOnlyPtr; 525 526 /// An alias set tracker to partition the access set by underlying object and 527 //intrinsic property (such as TBAA metadata). 528 AliasSetTracker AST; 529 530 LoopInfo *LI; 531 532 /// Sets of potentially dependent accesses - members of one set share an 533 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 534 /// dependence check. 535 MemoryDepChecker::DepCandidates &DepCands; 536 537 /// \brief Initial processing of memory accesses determined that we may need 538 /// to add memchecks. Perform the analysis to determine the necessary checks. 539 /// 540 /// Note that, this is different from isDependencyCheckNeeded. When we retry 541 /// memcheck analysis without dependency checking 542 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 543 /// while this remains set if we have potentially dependent accesses. 544 bool IsRTCheckAnalysisNeeded; 545 546 /// The SCEV predicate containing all the SCEV-related assumptions. 547 PredicatedScalarEvolution &PSE; 548 }; 549 550 } // end anonymous namespace 551 552 /// \brief Check whether a pointer can participate in a runtime bounds check. 553 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 554 const ValueToValueMap &Strides, Value *Ptr, 555 Loop *L) { 556 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 557 558 // The bounds for loop-invariant pointer is trivial. 559 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 560 return true; 561 562 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 563 if (!AR) 564 return false; 565 566 return AR->isAffine(); 567 } 568 569 /// \brief Check whether a pointer address cannot wrap. 570 static bool isNoWrap(PredicatedScalarEvolution &PSE, 571 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 572 const SCEV *PtrScev = PSE.getSCEV(Ptr); 573 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 574 return true; 575 576 int Stride = getPtrStride(PSE, Ptr, L, Strides); 577 return Stride == 1; 578 } 579 580 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 581 ScalarEvolution *SE, Loop *TheLoop, 582 const ValueToValueMap &StridesMap, 583 bool ShouldCheckWrap) { 584 // Find pointers with computable bounds. We are going to use this information 585 // to place a runtime bound check. 586 bool CanDoRT = true; 587 588 bool NeedRTCheck = false; 589 if (!IsRTCheckAnalysisNeeded) return true; 590 591 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 592 593 // We assign a consecutive id to access from different alias sets. 594 // Accesses between different groups doesn't need to be checked. 595 unsigned ASId = 1; 596 for (auto &AS : AST) { 597 int NumReadPtrChecks = 0; 598 int NumWritePtrChecks = 0; 599 600 // We assign consecutive id to access from different dependence sets. 601 // Accesses within the same set don't need a runtime check. 602 unsigned RunningDepId = 1; 603 DenseMap<Value *, unsigned> DepSetId; 604 605 for (auto A : AS) { 606 Value *Ptr = A.getValue(); 607 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 608 MemAccessInfo Access(Ptr, IsWrite); 609 610 if (IsWrite) 611 ++NumWritePtrChecks; 612 else 613 ++NumReadPtrChecks; 614 615 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 616 // When we run after a failing dependency check we have to make sure 617 // we don't have wrapping pointers. 618 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) { 619 // The id of the dependence set. 620 unsigned DepId; 621 622 if (IsDepCheckNeeded) { 623 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 624 unsigned &LeaderId = DepSetId[Leader]; 625 if (!LeaderId) 626 LeaderId = RunningDepId++; 627 DepId = LeaderId; 628 } else 629 // Each access has its own dependence set. 630 DepId = RunningDepId++; 631 632 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 633 634 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 635 } else { 636 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 637 CanDoRT = false; 638 } 639 } 640 641 // If we have at least two writes or one write and a read then we need to 642 // check them. But there is no need to checks if there is only one 643 // dependence set for this alias set. 644 // 645 // Note that this function computes CanDoRT and NeedRTCheck independently. 646 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 647 // for which we couldn't find the bounds but we don't actually need to emit 648 // any checks so it does not matter. 649 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 650 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 651 NumWritePtrChecks >= 1)); 652 653 ++ASId; 654 } 655 656 // If the pointers that we would use for the bounds comparison have different 657 // address spaces, assume the values aren't directly comparable, so we can't 658 // use them for the runtime check. We also have to assume they could 659 // overlap. In the future there should be metadata for whether address spaces 660 // are disjoint. 661 unsigned NumPointers = RtCheck.Pointers.size(); 662 for (unsigned i = 0; i < NumPointers; ++i) { 663 for (unsigned j = i + 1; j < NumPointers; ++j) { 664 // Only need to check pointers between two different dependency sets. 665 if (RtCheck.Pointers[i].DependencySetId == 666 RtCheck.Pointers[j].DependencySetId) 667 continue; 668 // Only need to check pointers in the same alias set. 669 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 670 continue; 671 672 Value *PtrI = RtCheck.Pointers[i].PointerValue; 673 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 674 675 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 676 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 677 if (ASi != ASj) { 678 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 679 " different address spaces\n"); 680 return false; 681 } 682 } 683 } 684 685 if (NeedRTCheck && CanDoRT) 686 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 687 688 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 689 << " pointer comparisons.\n"); 690 691 RtCheck.Need = NeedRTCheck; 692 693 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 694 if (!CanDoRTIfNeeded) 695 RtCheck.reset(); 696 return CanDoRTIfNeeded; 697 } 698 699 void AccessAnalysis::processMemAccesses() { 700 // We process the set twice: first we process read-write pointers, last we 701 // process read-only pointers. This allows us to skip dependence tests for 702 // read-only pointers. 703 704 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 705 DEBUG(dbgs() << " AST: "; AST.dump()); 706 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 707 DEBUG({ 708 for (auto A : Accesses) 709 dbgs() << "\t" << *A.getPointer() << " (" << 710 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 711 "read-only" : "read")) << ")\n"; 712 }); 713 714 // The AliasSetTracker has nicely partitioned our pointers by metadata 715 // compatibility and potential for underlying-object overlap. As a result, we 716 // only need to check for potential pointer dependencies within each alias 717 // set. 718 for (auto &AS : AST) { 719 // Note that both the alias-set tracker and the alias sets themselves used 720 // linked lists internally and so the iteration order here is deterministic 721 // (matching the original instruction order within each set). 722 723 bool SetHasWrite = false; 724 725 // Map of pointers to last access encountered. 726 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 727 UnderlyingObjToAccessMap ObjToLastAccess; 728 729 // Set of access to check after all writes have been processed. 730 PtrAccessSet DeferredAccesses; 731 732 // Iterate over each alias set twice, once to process read/write pointers, 733 // and then to process read-only pointers. 734 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 735 bool UseDeferred = SetIteration > 0; 736 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 737 738 for (auto AV : AS) { 739 Value *Ptr = AV.getValue(); 740 741 // For a single memory access in AliasSetTracker, Accesses may contain 742 // both read and write, and they both need to be handled for CheckDeps. 743 for (auto AC : S) { 744 if (AC.getPointer() != Ptr) 745 continue; 746 747 bool IsWrite = AC.getInt(); 748 749 // If we're using the deferred access set, then it contains only 750 // reads. 751 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 752 if (UseDeferred && !IsReadOnlyPtr) 753 continue; 754 // Otherwise, the pointer must be in the PtrAccessSet, either as a 755 // read or a write. 756 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 757 S.count(MemAccessInfo(Ptr, false))) && 758 "Alias-set pointer not in the access set?"); 759 760 MemAccessInfo Access(Ptr, IsWrite); 761 DepCands.insert(Access); 762 763 // Memorize read-only pointers for later processing and skip them in 764 // the first round (they need to be checked after we have seen all 765 // write pointers). Note: we also mark pointer that are not 766 // consecutive as "read-only" pointers (so that we check 767 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 768 if (!UseDeferred && IsReadOnlyPtr) { 769 DeferredAccesses.insert(Access); 770 continue; 771 } 772 773 // If this is a write - check other reads and writes for conflicts. If 774 // this is a read only check other writes for conflicts (but only if 775 // there is no other write to the ptr - this is an optimization to 776 // catch "a[i] = a[i] + " without having to do a dependence check). 777 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 778 CheckDeps.insert(Access); 779 IsRTCheckAnalysisNeeded = true; 780 } 781 782 if (IsWrite) 783 SetHasWrite = true; 784 785 // Create sets of pointers connected by a shared alias set and 786 // underlying object. 787 typedef SmallVector<Value *, 16> ValueVector; 788 ValueVector TempObjects; 789 790 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 791 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 792 for (Value *UnderlyingObj : TempObjects) { 793 // nullptr never alias, don't join sets for pointer that have "null" 794 // in their UnderlyingObjects list. 795 if (isa<ConstantPointerNull>(UnderlyingObj)) 796 continue; 797 798 UnderlyingObjToAccessMap::iterator Prev = 799 ObjToLastAccess.find(UnderlyingObj); 800 if (Prev != ObjToLastAccess.end()) 801 DepCands.unionSets(Access, Prev->second); 802 803 ObjToLastAccess[UnderlyingObj] = Access; 804 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 805 } 806 } 807 } 808 } 809 } 810 } 811 812 static bool isInBoundsGep(Value *Ptr) { 813 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 814 return GEP->isInBounds(); 815 return false; 816 } 817 818 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 819 /// i.e. monotonically increasing/decreasing. 820 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 821 PredicatedScalarEvolution &PSE, const Loop *L) { 822 // FIXME: This should probably only return true for NUW. 823 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 824 return true; 825 826 // Scalar evolution does not propagate the non-wrapping flags to values that 827 // are derived from a non-wrapping induction variable because non-wrapping 828 // could be flow-sensitive. 829 // 830 // Look through the potentially overflowing instruction to try to prove 831 // non-wrapping for the *specific* value of Ptr. 832 833 // The arithmetic implied by an inbounds GEP can't overflow. 834 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 835 if (!GEP || !GEP->isInBounds()) 836 return false; 837 838 // Make sure there is only one non-const index and analyze that. 839 Value *NonConstIndex = nullptr; 840 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 841 if (!isa<ConstantInt>(*Index)) { 842 if (NonConstIndex) 843 return false; 844 NonConstIndex = *Index; 845 } 846 if (!NonConstIndex) 847 // The recurrence is on the pointer, ignore for now. 848 return false; 849 850 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 851 // AddRec using a NSW operation. 852 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 853 if (OBO->hasNoSignedWrap() && 854 // Assume constant for other the operand so that the AddRec can be 855 // easily found. 856 isa<ConstantInt>(OBO->getOperand(1))) { 857 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 858 859 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 860 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 861 } 862 863 return false; 864 } 865 866 /// \brief Check whether the access through \p Ptr has a constant stride. 867 int llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 868 const Loop *Lp, const ValueToValueMap &StridesMap, 869 bool Assume) { 870 Type *Ty = Ptr->getType(); 871 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 872 873 // Make sure that the pointer does not point to aggregate types. 874 auto *PtrTy = cast<PointerType>(Ty); 875 if (PtrTy->getElementType()->isAggregateType()) { 876 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr 877 << "\n"); 878 return 0; 879 } 880 881 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 882 883 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 884 if (Assume && !AR) 885 AR = PSE.getAsAddRec(Ptr); 886 887 if (!AR) { 888 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 889 << " SCEV: " << *PtrScev << "\n"); 890 return 0; 891 } 892 893 // The accesss function must stride over the innermost loop. 894 if (Lp != AR->getLoop()) { 895 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 896 *Ptr << " SCEV: " << *AR << "\n"); 897 return 0; 898 } 899 900 // The address calculation must not wrap. Otherwise, a dependence could be 901 // inverted. 902 // An inbounds getelementptr that is a AddRec with a unit stride 903 // cannot wrap per definition. The unit stride requirement is checked later. 904 // An getelementptr without an inbounds attribute and unit stride would have 905 // to access the pointer value "0" which is undefined behavior in address 906 // space 0, therefore we can also vectorize this case. 907 bool IsInBoundsGEP = isInBoundsGep(Ptr); 908 bool IsNoWrapAddRec = 909 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 910 isNoWrapAddRec(Ptr, AR, PSE, Lp); 911 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 912 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 913 if (Assume) { 914 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 915 IsNoWrapAddRec = true; 916 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 917 << "LAA: Pointer: " << *Ptr << "\n" 918 << "LAA: SCEV: " << *AR << "\n" 919 << "LAA: Added an overflow assumption\n"); 920 } else { 921 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 922 << *Ptr << " SCEV: " << *AR << "\n"); 923 return 0; 924 } 925 } 926 927 // Check the step is constant. 928 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 929 930 // Calculate the pointer stride and check if it is constant. 931 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 932 if (!C) { 933 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 934 " SCEV: " << *AR << "\n"); 935 return 0; 936 } 937 938 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 939 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 940 const APInt &APStepVal = C->getAPInt(); 941 942 // Huge step value - give up. 943 if (APStepVal.getBitWidth() > 64) 944 return 0; 945 946 int64_t StepVal = APStepVal.getSExtValue(); 947 948 // Strided access. 949 int64_t Stride = StepVal / Size; 950 int64_t Rem = StepVal % Size; 951 if (Rem) 952 return 0; 953 954 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 955 // know we can't "wrap around the address space". In case of address space 956 // zero we know that this won't happen without triggering undefined behavior. 957 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 958 Stride != 1 && Stride != -1) { 959 if (Assume) { 960 // We can avoid this case by adding a run-time check. 961 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 962 << "inbouds or in address space 0 may wrap:\n" 963 << "LAA: Pointer: " << *Ptr << "\n" 964 << "LAA: SCEV: " << *AR << "\n" 965 << "LAA: Added an overflow assumption\n"); 966 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 967 } else 968 return 0; 969 } 970 971 return Stride; 972 } 973 974 /// Take the pointer operand from the Load/Store instruction. 975 /// Returns NULL if this is not a valid Load/Store instruction. 976 static Value *getPointerOperand(Value *I) { 977 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 978 return LI->getPointerOperand(); 979 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 980 return SI->getPointerOperand(); 981 return nullptr; 982 } 983 984 /// Take the address space operand from the Load/Store instruction. 985 /// Returns -1 if this is not a valid Load/Store instruction. 986 static unsigned getAddressSpaceOperand(Value *I) { 987 if (LoadInst *L = dyn_cast<LoadInst>(I)) 988 return L->getPointerAddressSpace(); 989 if (StoreInst *S = dyn_cast<StoreInst>(I)) 990 return S->getPointerAddressSpace(); 991 return -1; 992 } 993 994 /// Returns true if the memory operations \p A and \p B are consecutive. 995 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 996 ScalarEvolution &SE, bool CheckType) { 997 Value *PtrA = getPointerOperand(A); 998 Value *PtrB = getPointerOperand(B); 999 unsigned ASA = getAddressSpaceOperand(A); 1000 unsigned ASB = getAddressSpaceOperand(B); 1001 1002 // Check that the address spaces match and that the pointers are valid. 1003 if (!PtrA || !PtrB || (ASA != ASB)) 1004 return false; 1005 1006 // Make sure that A and B are different pointers. 1007 if (PtrA == PtrB) 1008 return false; 1009 1010 // Make sure that A and B have the same type if required. 1011 if(CheckType && PtrA->getType() != PtrB->getType()) 1012 return false; 1013 1014 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1015 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1016 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1017 1018 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1019 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1020 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1021 1022 // OffsetDelta = OffsetB - OffsetA; 1023 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1024 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1025 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1026 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1027 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1028 // Check if they are based on the same pointer. That makes the offsets 1029 // sufficient. 1030 if (PtrA == PtrB) 1031 return OffsetDelta == Size; 1032 1033 // Compute the necessary base pointer delta to have the necessary final delta 1034 // equal to the size. 1035 // BaseDelta = Size - OffsetDelta; 1036 const SCEV *SizeSCEV = SE.getConstant(Size); 1037 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1038 1039 // Otherwise compute the distance with SCEV between the base pointers. 1040 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1041 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1042 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1043 return X == PtrSCEVB; 1044 } 1045 1046 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1047 switch (Type) { 1048 case NoDep: 1049 case Forward: 1050 case BackwardVectorizable: 1051 return true; 1052 1053 case Unknown: 1054 case ForwardButPreventsForwarding: 1055 case Backward: 1056 case BackwardVectorizableButPreventsForwarding: 1057 return false; 1058 } 1059 llvm_unreachable("unexpected DepType!"); 1060 } 1061 1062 bool MemoryDepChecker::Dependence::isBackward() const { 1063 switch (Type) { 1064 case NoDep: 1065 case Forward: 1066 case ForwardButPreventsForwarding: 1067 case Unknown: 1068 return false; 1069 1070 case BackwardVectorizable: 1071 case Backward: 1072 case BackwardVectorizableButPreventsForwarding: 1073 return true; 1074 } 1075 llvm_unreachable("unexpected DepType!"); 1076 } 1077 1078 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1079 return isBackward() || Type == Unknown; 1080 } 1081 1082 bool MemoryDepChecker::Dependence::isForward() const { 1083 switch (Type) { 1084 case Forward: 1085 case ForwardButPreventsForwarding: 1086 return true; 1087 1088 case NoDep: 1089 case Unknown: 1090 case BackwardVectorizable: 1091 case Backward: 1092 case BackwardVectorizableButPreventsForwarding: 1093 return false; 1094 } 1095 llvm_unreachable("unexpected DepType!"); 1096 } 1097 1098 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 1099 unsigned TypeByteSize) { 1100 // If loads occur at a distance that is not a multiple of a feasible vector 1101 // factor store-load forwarding does not take place. 1102 // Positive dependences might cause troubles because vectorizing them might 1103 // prevent store-load forwarding making vectorized code run a lot slower. 1104 // a[i] = a[i-3] ^ a[i-8]; 1105 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1106 // hence on your typical architecture store-load forwarding does not take 1107 // place. Vectorizing in such cases does not make sense. 1108 // Store-load forwarding distance. 1109 1110 // After this many iterations store-to-load forwarding conflicts should not 1111 // cause any slowdowns. 1112 const unsigned NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1113 // Maximum vector factor. 1114 unsigned MaxVFWithoutSLForwardIssues = std::min( 1115 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1116 1117 // Compute the smallest VF at which the store and load would be misaligned. 1118 for (unsigned VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1119 VF *= 2) { 1120 // If the number of vector iteration between the store and the load are 1121 // small we could incur conflicts. 1122 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1123 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1124 break; 1125 } 1126 } 1127 1128 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1129 DEBUG(dbgs() << "LAA: Distance " << Distance 1130 << " that could cause a store-load forwarding conflict\n"); 1131 return true; 1132 } 1133 1134 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1135 MaxVFWithoutSLForwardIssues != 1136 VectorizerParams::MaxVectorWidth * TypeByteSize) 1137 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1138 return false; 1139 } 1140 1141 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1142 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1143 /// bytes. 1144 /// 1145 /// \returns true if they are independent. 1146 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 1147 unsigned TypeByteSize) { 1148 assert(Stride > 1 && "The stride must be greater than 1"); 1149 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1150 assert(Distance > 0 && "The distance must be non-zero"); 1151 1152 // Skip if the distance is not multiple of type byte size. 1153 if (Distance % TypeByteSize) 1154 return false; 1155 1156 unsigned ScaledDist = Distance / TypeByteSize; 1157 1158 // No dependence if the scaled distance is not multiple of the stride. 1159 // E.g. 1160 // for (i = 0; i < 1024 ; i += 4) 1161 // A[i+2] = A[i] + 1; 1162 // 1163 // Two accesses in memory (scaled distance is 2, stride is 4): 1164 // | A[0] | | | | A[4] | | | | 1165 // | | | A[2] | | | | A[6] | | 1166 // 1167 // E.g. 1168 // for (i = 0; i < 1024 ; i += 3) 1169 // A[i+4] = A[i] + 1; 1170 // 1171 // Two accesses in memory (scaled distance is 4, stride is 3): 1172 // | A[0] | | | A[3] | | | A[6] | | | 1173 // | | | | | A[4] | | | A[7] | | 1174 return ScaledDist % Stride; 1175 } 1176 1177 MemoryDepChecker::Dependence::DepType 1178 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1179 const MemAccessInfo &B, unsigned BIdx, 1180 const ValueToValueMap &Strides) { 1181 assert (AIdx < BIdx && "Must pass arguments in program order"); 1182 1183 Value *APtr = A.getPointer(); 1184 Value *BPtr = B.getPointer(); 1185 bool AIsWrite = A.getInt(); 1186 bool BIsWrite = B.getInt(); 1187 1188 // Two reads are independent. 1189 if (!AIsWrite && !BIsWrite) 1190 return Dependence::NoDep; 1191 1192 // We cannot check pointers in different address spaces. 1193 if (APtr->getType()->getPointerAddressSpace() != 1194 BPtr->getType()->getPointerAddressSpace()) 1195 return Dependence::Unknown; 1196 1197 int StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1198 int StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1199 1200 const SCEV *Src = PSE.getSCEV(APtr); 1201 const SCEV *Sink = PSE.getSCEV(BPtr); 1202 1203 // If the induction step is negative we have to invert source and sink of the 1204 // dependence. 1205 if (StrideAPtr < 0) { 1206 std::swap(APtr, BPtr); 1207 std::swap(Src, Sink); 1208 std::swap(AIsWrite, BIsWrite); 1209 std::swap(AIdx, BIdx); 1210 std::swap(StrideAPtr, StrideBPtr); 1211 } 1212 1213 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1214 1215 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1216 << "(Induction step: " << StrideAPtr << ")\n"); 1217 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1218 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1219 1220 // Need accesses with constant stride. We don't want to vectorize 1221 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1222 // the address space. 1223 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1224 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1225 return Dependence::Unknown; 1226 } 1227 1228 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1229 if (!C) { 1230 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1231 ShouldRetryWithRuntimeCheck = true; 1232 return Dependence::Unknown; 1233 } 1234 1235 Type *ATy = APtr->getType()->getPointerElementType(); 1236 Type *BTy = BPtr->getType()->getPointerElementType(); 1237 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1238 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1239 1240 const APInt &Val = C->getAPInt(); 1241 int64_t Distance = Val.getSExtValue(); 1242 unsigned Stride = std::abs(StrideAPtr); 1243 1244 // Attempt to prove strided accesses independent. 1245 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1246 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1247 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1248 return Dependence::NoDep; 1249 } 1250 1251 // Negative distances are not plausible dependencies. 1252 if (Val.isNegative()) { 1253 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1254 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1255 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1256 ATy != BTy)) { 1257 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1258 return Dependence::ForwardButPreventsForwarding; 1259 } 1260 1261 DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1262 return Dependence::Forward; 1263 } 1264 1265 // Write to the same location with the same size. 1266 // Could be improved to assert type sizes are the same (i32 == float, etc). 1267 if (Val == 0) { 1268 if (ATy == BTy) 1269 return Dependence::Forward; 1270 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1271 return Dependence::Unknown; 1272 } 1273 1274 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1275 1276 if (ATy != BTy) { 1277 DEBUG(dbgs() << 1278 "LAA: ReadWrite-Write positive dependency with different types\n"); 1279 return Dependence::Unknown; 1280 } 1281 1282 // Bail out early if passed-in parameters make vectorization not feasible. 1283 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1284 VectorizerParams::VectorizationFactor : 1); 1285 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1286 VectorizerParams::VectorizationInterleave : 1); 1287 // The minimum number of iterations for a vectorized/unrolled version. 1288 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1289 1290 // It's not vectorizable if the distance is smaller than the minimum distance 1291 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1292 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1293 // TypeByteSize (No need to plus the last gap distance). 1294 // 1295 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1296 // foo(int *A) { 1297 // int *B = (int *)((char *)A + 14); 1298 // for (i = 0 ; i < 1024 ; i += 2) 1299 // B[i] = A[i] + 1; 1300 // } 1301 // 1302 // Two accesses in memory (stride is 2): 1303 // | A[0] | | A[2] | | A[4] | | A[6] | | 1304 // | B[0] | | B[2] | | B[4] | 1305 // 1306 // Distance needs for vectorizing iterations except the last iteration: 1307 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1308 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1309 // 1310 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1311 // 12, which is less than distance. 1312 // 1313 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1314 // the minimum distance needed is 28, which is greater than distance. It is 1315 // not safe to do vectorization. 1316 unsigned MinDistanceNeeded = 1317 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1318 if (MinDistanceNeeded > Distance) { 1319 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1320 << '\n'); 1321 return Dependence::Backward; 1322 } 1323 1324 // Unsafe if the minimum distance needed is greater than max safe distance. 1325 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1326 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1327 << MinDistanceNeeded << " size in bytes"); 1328 return Dependence::Backward; 1329 } 1330 1331 // Positive distance bigger than max vectorization factor. 1332 // FIXME: Should use max factor instead of max distance in bytes, which could 1333 // not handle different types. 1334 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1335 // void foo (int *A, char *B) { 1336 // for (unsigned i = 0; i < 1024; i++) { 1337 // A[i+2] = A[i] + 1; 1338 // B[i+2] = B[i] + 1; 1339 // } 1340 // } 1341 // 1342 // This case is currently unsafe according to the max safe distance. If we 1343 // analyze the two accesses on array B, the max safe dependence distance 1344 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1345 // is 8, which is less than 2 and forbidden vectorization, But actually 1346 // both A and B could be vectorized by 2 iterations. 1347 MaxSafeDepDistBytes = 1348 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1349 1350 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1351 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1352 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1353 return Dependence::BackwardVectorizableButPreventsForwarding; 1354 1355 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1356 << " with max VF = " 1357 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1358 1359 return Dependence::BackwardVectorizable; 1360 } 1361 1362 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1363 MemAccessInfoSet &CheckDeps, 1364 const ValueToValueMap &Strides) { 1365 1366 MaxSafeDepDistBytes = -1U; 1367 while (!CheckDeps.empty()) { 1368 MemAccessInfo CurAccess = *CheckDeps.begin(); 1369 1370 // Get the relevant memory access set. 1371 EquivalenceClasses<MemAccessInfo>::iterator I = 1372 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1373 1374 // Check accesses within this set. 1375 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1376 AccessSets.member_begin(I); 1377 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1378 AccessSets.member_end(); 1379 1380 // Check every access pair. 1381 while (AI != AE) { 1382 CheckDeps.erase(*AI); 1383 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1384 while (OI != AE) { 1385 // Check every accessing instruction pair in program order. 1386 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1387 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1388 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1389 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1390 auto A = std::make_pair(&*AI, *I1); 1391 auto B = std::make_pair(&*OI, *I2); 1392 1393 assert(*I1 != *I2); 1394 if (*I1 > *I2) 1395 std::swap(A, B); 1396 1397 Dependence::DepType Type = 1398 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1399 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1400 1401 // Gather dependences unless we accumulated MaxDependences 1402 // dependences. In that case return as soon as we find the first 1403 // unsafe dependence. This puts a limit on this quadratic 1404 // algorithm. 1405 if (RecordDependences) { 1406 if (Type != Dependence::NoDep) 1407 Dependences.push_back(Dependence(A.second, B.second, Type)); 1408 1409 if (Dependences.size() >= MaxDependences) { 1410 RecordDependences = false; 1411 Dependences.clear(); 1412 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1413 } 1414 } 1415 if (!RecordDependences && !SafeForVectorization) 1416 return false; 1417 } 1418 ++OI; 1419 } 1420 AI++; 1421 } 1422 } 1423 1424 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1425 return SafeForVectorization; 1426 } 1427 1428 SmallVector<Instruction *, 4> 1429 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1430 MemAccessInfo Access(Ptr, isWrite); 1431 auto &IndexVector = Accesses.find(Access)->second; 1432 1433 SmallVector<Instruction *, 4> Insts; 1434 std::transform(IndexVector.begin(), IndexVector.end(), 1435 std::back_inserter(Insts), 1436 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1437 return Insts; 1438 } 1439 1440 const char *MemoryDepChecker::Dependence::DepName[] = { 1441 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1442 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1443 1444 void MemoryDepChecker::Dependence::print( 1445 raw_ostream &OS, unsigned Depth, 1446 const SmallVectorImpl<Instruction *> &Instrs) const { 1447 OS.indent(Depth) << DepName[Type] << ":\n"; 1448 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1449 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1450 } 1451 1452 bool LoopAccessInfo::canAnalyzeLoop() { 1453 // We need to have a loop header. 1454 DEBUG(dbgs() << "LAA: Found a loop in " 1455 << TheLoop->getHeader()->getParent()->getName() << ": " 1456 << TheLoop->getHeader()->getName() << '\n'); 1457 1458 // We can only analyze innermost loops. 1459 if (!TheLoop->empty()) { 1460 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1461 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1462 return false; 1463 } 1464 1465 // We must have a single backedge. 1466 if (TheLoop->getNumBackEdges() != 1) { 1467 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1468 emitAnalysis( 1469 LoopAccessReport() << 1470 "loop control flow is not understood by analyzer"); 1471 return false; 1472 } 1473 1474 // We must have a single exiting block. 1475 if (!TheLoop->getExitingBlock()) { 1476 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1477 emitAnalysis( 1478 LoopAccessReport() << 1479 "loop control flow is not understood by analyzer"); 1480 return false; 1481 } 1482 1483 // We only handle bottom-tested loops, i.e. loop in which the condition is 1484 // checked at the end of each iteration. With that we can assume that all 1485 // instructions in the loop are executed the same number of times. 1486 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1487 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1488 emitAnalysis( 1489 LoopAccessReport() << 1490 "loop control flow is not understood by analyzer"); 1491 return false; 1492 } 1493 1494 // ScalarEvolution needs to be able to find the exit count. 1495 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 1496 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 1497 emitAnalysis(LoopAccessReport() 1498 << "could not determine number of loop iterations"); 1499 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1500 return false; 1501 } 1502 1503 return true; 1504 } 1505 1506 void LoopAccessInfo::analyzeLoop() { 1507 typedef SmallPtrSet<Value*, 16> ValueSet; 1508 1509 // Holds the Load and Store instructions. 1510 SmallVector<LoadInst *, 16> Loads; 1511 SmallVector<StoreInst *, 16> Stores; 1512 1513 // Holds all the different accesses in the loop. 1514 unsigned NumReads = 0; 1515 unsigned NumReadWrites = 0; 1516 1517 PtrRtChecking.Pointers.clear(); 1518 PtrRtChecking.Need = false; 1519 1520 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1521 1522 // For each block. 1523 for (Loop::block_iterator bb = TheLoop->block_begin(), 1524 be = TheLoop->block_end(); bb != be; ++bb) { 1525 1526 // Scan the BB and collect legal loads and stores. 1527 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1528 ++it) { 1529 1530 // If this is a load, save it. If this instruction can read from memory 1531 // but is not a load, then we quit. Notice that we don't handle function 1532 // calls that read or write. 1533 if (it->mayReadFromMemory()) { 1534 // Many math library functions read the rounding mode. We will only 1535 // vectorize a loop if it contains known function calls that don't set 1536 // the flag. Therefore, it is safe to ignore this read from memory. 1537 CallInst *Call = dyn_cast<CallInst>(it); 1538 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1539 continue; 1540 1541 // If the function has an explicit vectorized counterpart, we can safely 1542 // assume that it can be vectorized. 1543 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1544 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1545 continue; 1546 1547 LoadInst *Ld = dyn_cast<LoadInst>(it); 1548 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1549 emitAnalysis(LoopAccessReport(Ld) 1550 << "read with atomic ordering or volatile read"); 1551 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1552 CanVecMem = false; 1553 return; 1554 } 1555 NumLoads++; 1556 Loads.push_back(Ld); 1557 DepChecker.addAccess(Ld); 1558 if (EnableMemAccessVersioning) 1559 collectStridedAccess(Ld); 1560 continue; 1561 } 1562 1563 // Save 'store' instructions. Abort if other instructions write to memory. 1564 if (it->mayWriteToMemory()) { 1565 StoreInst *St = dyn_cast<StoreInst>(it); 1566 if (!St) { 1567 emitAnalysis(LoopAccessReport(&*it) << 1568 "instruction cannot be vectorized"); 1569 CanVecMem = false; 1570 return; 1571 } 1572 if (!St->isSimple() && !IsAnnotatedParallel) { 1573 emitAnalysis(LoopAccessReport(St) 1574 << "write with atomic ordering or volatile write"); 1575 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1576 CanVecMem = false; 1577 return; 1578 } 1579 NumStores++; 1580 Stores.push_back(St); 1581 DepChecker.addAccess(St); 1582 if (EnableMemAccessVersioning) 1583 collectStridedAccess(St); 1584 } 1585 } // Next instr. 1586 } // Next block. 1587 1588 // Now we have two lists that hold the loads and the stores. 1589 // Next, we find the pointers that they use. 1590 1591 // Check if we see any stores. If there are no stores, then we don't 1592 // care if the pointers are *restrict*. 1593 if (!Stores.size()) { 1594 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1595 CanVecMem = true; 1596 return; 1597 } 1598 1599 MemoryDepChecker::DepCandidates DependentAccesses; 1600 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1601 AA, LI, DependentAccesses, PSE); 1602 1603 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1604 // multiple times on the same object. If the ptr is accessed twice, once 1605 // for read and once for write, it will only appear once (on the write 1606 // list). This is okay, since we are going to check for conflicts between 1607 // writes and between reads and writes, but not between reads and reads. 1608 ValueSet Seen; 1609 1610 for (StoreInst *ST : Stores) { 1611 Value *Ptr = ST->getPointerOperand(); 1612 // Check for store to loop invariant address. 1613 StoreToLoopInvariantAddress |= isUniform(Ptr); 1614 // If we did *not* see this pointer before, insert it to the read-write 1615 // list. At this phase it is only a 'write' list. 1616 if (Seen.insert(Ptr).second) { 1617 ++NumReadWrites; 1618 1619 MemoryLocation Loc = MemoryLocation::get(ST); 1620 // The TBAA metadata could have a control dependency on the predication 1621 // condition, so we cannot rely on it when determining whether or not we 1622 // need runtime pointer checks. 1623 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1624 Loc.AATags.TBAA = nullptr; 1625 1626 Accesses.addStore(Loc); 1627 } 1628 } 1629 1630 if (IsAnnotatedParallel) { 1631 DEBUG(dbgs() 1632 << "LAA: A loop annotated parallel, ignore memory dependency " 1633 << "checks.\n"); 1634 CanVecMem = true; 1635 return; 1636 } 1637 1638 for (LoadInst *LD : Loads) { 1639 Value *Ptr = LD->getPointerOperand(); 1640 // If we did *not* see this pointer before, insert it to the 1641 // read list. If we *did* see it before, then it is already in 1642 // the read-write list. This allows us to vectorize expressions 1643 // such as A[i] += x; Because the address of A[i] is a read-write 1644 // pointer. This only works if the index of A[i] is consecutive. 1645 // If the address of i is unknown (for example A[B[i]]) then we may 1646 // read a few words, modify, and write a few words, and some of the 1647 // words may be written to the same address. 1648 bool IsReadOnlyPtr = false; 1649 if (Seen.insert(Ptr).second || 1650 !getPtrStride(PSE, Ptr, TheLoop, SymbolicStrides)) { 1651 ++NumReads; 1652 IsReadOnlyPtr = true; 1653 } 1654 1655 MemoryLocation Loc = MemoryLocation::get(LD); 1656 // The TBAA metadata could have a control dependency on the predication 1657 // condition, so we cannot rely on it when determining whether or not we 1658 // need runtime pointer checks. 1659 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1660 Loc.AATags.TBAA = nullptr; 1661 1662 Accesses.addLoad(Loc, IsReadOnlyPtr); 1663 } 1664 1665 // If we write (or read-write) to a single destination and there are no 1666 // other reads in this loop then is it safe to vectorize. 1667 if (NumReadWrites == 1 && NumReads == 0) { 1668 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1669 CanVecMem = true; 1670 return; 1671 } 1672 1673 // Build dependence sets and check whether we need a runtime pointer bounds 1674 // check. 1675 Accesses.buildDependenceSets(); 1676 1677 // Find pointers with computable bounds. We are going to use this information 1678 // to place a runtime bound check. 1679 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(PtrRtChecking, PSE.getSE(), 1680 TheLoop, SymbolicStrides); 1681 if (!CanDoRTIfNeeded) { 1682 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1683 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1684 << "the array bounds.\n"); 1685 CanVecMem = false; 1686 return; 1687 } 1688 1689 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1690 1691 CanVecMem = true; 1692 if (Accesses.isDependencyCheckNeeded()) { 1693 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1694 CanVecMem = DepChecker.areDepsSafe( 1695 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides); 1696 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1697 1698 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1699 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1700 1701 // Clear the dependency checks. We assume they are not needed. 1702 Accesses.resetDepChecks(DepChecker); 1703 1704 PtrRtChecking.reset(); 1705 PtrRtChecking.Need = true; 1706 1707 auto *SE = PSE.getSE(); 1708 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, 1709 SymbolicStrides, true); 1710 1711 // Check that we found the bounds for the pointer. 1712 if (!CanDoRTIfNeeded) { 1713 emitAnalysis(LoopAccessReport() 1714 << "cannot check memory dependencies at runtime"); 1715 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1716 CanVecMem = false; 1717 return; 1718 } 1719 1720 CanVecMem = true; 1721 } 1722 } 1723 1724 if (CanVecMem) 1725 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1726 << (PtrRtChecking.Need ? "" : " don't") 1727 << " need runtime memory checks.\n"); 1728 else { 1729 emitAnalysis( 1730 LoopAccessReport() 1731 << "unsafe dependent memory operations in loop. Use " 1732 "#pragma loop distribute(enable) to allow loop distribution " 1733 "to attempt to isolate the offending operations into a separate " 1734 "loop"); 1735 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1736 } 1737 } 1738 1739 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1740 DominatorTree *DT) { 1741 assert(TheLoop->contains(BB) && "Unknown block used"); 1742 1743 // Blocks that do not dominate the latch need predication. 1744 BasicBlock* Latch = TheLoop->getLoopLatch(); 1745 return !DT->dominates(BB, Latch); 1746 } 1747 1748 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1749 assert(!Report && "Multiple reports generated"); 1750 Report = Message; 1751 } 1752 1753 bool LoopAccessInfo::isUniform(Value *V) const { 1754 return (PSE.getSE()->isLoopInvariant(PSE.getSE()->getSCEV(V), TheLoop)); 1755 } 1756 1757 // FIXME: this function is currently a duplicate of the one in 1758 // LoopVectorize.cpp. 1759 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1760 Instruction *Loc) { 1761 if (FirstInst) 1762 return FirstInst; 1763 if (Instruction *I = dyn_cast<Instruction>(V)) 1764 return I->getParent() == Loc->getParent() ? I : nullptr; 1765 return nullptr; 1766 } 1767 1768 namespace { 1769 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1770 /// need to use value-handles because SCEV expansion can invalidate previously 1771 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1772 /// a previous one. 1773 struct PointerBounds { 1774 TrackingVH<Value> Start; 1775 TrackingVH<Value> End; 1776 }; 1777 } // end anonymous namespace 1778 1779 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1780 /// in \p TheLoop. \return the values for the bounds. 1781 static PointerBounds 1782 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1783 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1784 const RuntimePointerChecking &PtrRtChecking) { 1785 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1786 const SCEV *Sc = SE->getSCEV(Ptr); 1787 1788 if (SE->isLoopInvariant(Sc, TheLoop)) { 1789 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1790 << "\n"); 1791 return {Ptr, Ptr}; 1792 } else { 1793 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1794 LLVMContext &Ctx = Loc->getContext(); 1795 1796 // Use this type for pointer arithmetic. 1797 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1798 Value *Start = nullptr, *End = nullptr; 1799 1800 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1801 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1802 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1803 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1804 return {Start, End}; 1805 } 1806 } 1807 1808 /// \brief Turns a collection of checks into a collection of expanded upper and 1809 /// lower bounds for both pointers in the check. 1810 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1811 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1812 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1813 const RuntimePointerChecking &PtrRtChecking) { 1814 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1815 1816 // Here we're relying on the SCEV Expander's cache to only emit code for the 1817 // same bounds once. 1818 std::transform( 1819 PointerChecks.begin(), PointerChecks.end(), 1820 std::back_inserter(ChecksWithBounds), 1821 [&](const RuntimePointerChecking::PointerCheck &Check) { 1822 PointerBounds 1823 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1824 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1825 return std::make_pair(First, Second); 1826 }); 1827 1828 return ChecksWithBounds; 1829 } 1830 1831 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1832 Instruction *Loc, 1833 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1834 const { 1835 auto *SE = PSE.getSE(); 1836 SCEVExpander Exp(*SE, DL, "induction"); 1837 auto ExpandedChecks = 1838 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, PtrRtChecking); 1839 1840 LLVMContext &Ctx = Loc->getContext(); 1841 Instruction *FirstInst = nullptr; 1842 IRBuilder<> ChkBuilder(Loc); 1843 // Our instructions might fold to a constant. 1844 Value *MemoryRuntimeCheck = nullptr; 1845 1846 for (const auto &Check : ExpandedChecks) { 1847 const PointerBounds &A = Check.first, &B = Check.second; 1848 // Check if two pointers (A and B) conflict where conflict is computed as: 1849 // start(A) <= end(B) && start(B) <= end(A) 1850 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1851 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1852 1853 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1854 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1855 "Trying to bounds check pointers with different address spaces"); 1856 1857 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1858 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1859 1860 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1861 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1862 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1863 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1864 1865 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1866 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1867 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1868 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1869 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1870 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1871 if (MemoryRuntimeCheck) { 1872 IsConflict = 1873 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1874 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1875 } 1876 MemoryRuntimeCheck = IsConflict; 1877 } 1878 1879 if (!MemoryRuntimeCheck) 1880 return std::make_pair(nullptr, nullptr); 1881 1882 // We have to do this trickery because the IRBuilder might fold the check to a 1883 // constant expression in which case there is no Instruction anchored in a 1884 // the block. 1885 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1886 ConstantInt::getTrue(Ctx)); 1887 ChkBuilder.Insert(Check, "memcheck.conflict"); 1888 FirstInst = getFirstInst(FirstInst, Check, Loc); 1889 return std::make_pair(FirstInst, Check); 1890 } 1891 1892 std::pair<Instruction *, Instruction *> 1893 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 1894 if (!PtrRtChecking.Need) 1895 return std::make_pair(nullptr, nullptr); 1896 1897 return addRuntimeChecks(Loc, PtrRtChecking.getChecks()); 1898 } 1899 1900 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { 1901 Value *Ptr = nullptr; 1902 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 1903 Ptr = LI->getPointerOperand(); 1904 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 1905 Ptr = SI->getPointerOperand(); 1906 else 1907 return; 1908 1909 Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop); 1910 if (!Stride) 1911 return; 1912 1913 DEBUG(dbgs() << "LAA: Found a strided access that we can version"); 1914 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 1915 SymbolicStrides[Ptr] = Stride; 1916 StrideSet.insert(Stride); 1917 } 1918 1919 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1920 const DataLayout &DL, 1921 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1922 DominatorTree *DT, LoopInfo *LI) 1923 : PSE(*SE, *L), PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL), 1924 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1925 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1926 StoreToLoopInvariantAddress(false) { 1927 if (canAnalyzeLoop()) 1928 analyzeLoop(); 1929 } 1930 1931 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1932 if (CanVecMem) { 1933 OS.indent(Depth) << "Memory dependences are safe"; 1934 if (MaxSafeDepDistBytes != -1U) 1935 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 1936 << " bytes"; 1937 if (PtrRtChecking.Need) 1938 OS << " with run-time checks"; 1939 OS << "\n"; 1940 } 1941 1942 if (Report) 1943 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1944 1945 if (auto *Dependences = DepChecker.getDependences()) { 1946 OS.indent(Depth) << "Dependences:\n"; 1947 for (auto &Dep : *Dependences) { 1948 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1949 OS << "\n"; 1950 } 1951 } else 1952 OS.indent(Depth) << "Too many dependences, not recorded\n"; 1953 1954 // List the pair of accesses need run-time checks to prove independence. 1955 PtrRtChecking.print(OS, Depth); 1956 OS << "\n"; 1957 1958 OS.indent(Depth) << "Store to invariant address was " 1959 << (StoreToLoopInvariantAddress ? "" : "not ") 1960 << "found in loop.\n"; 1961 1962 OS.indent(Depth) << "SCEV assumptions:\n"; 1963 PSE.getUnionPredicate().print(OS, Depth); 1964 1965 OS << "\n"; 1966 1967 OS.indent(Depth) << "Expressions re-written:\n"; 1968 PSE.print(OS, Depth); 1969 } 1970 1971 const LoopAccessInfo &LoopAccessAnalysis::getInfo(Loop *L) { 1972 auto &LAI = LoopAccessInfoMap[L]; 1973 1974 if (!LAI) { 1975 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1976 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI); 1977 } 1978 return *LAI.get(); 1979 } 1980 1981 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1982 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1983 1984 for (Loop *TopLevelLoop : *LI) 1985 for (Loop *L : depth_first(TopLevelLoop)) { 1986 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1987 auto &LAI = LAA.getInfo(L); 1988 LAI.print(OS, 4); 1989 } 1990 } 1991 1992 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1993 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1994 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1995 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1996 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1997 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1998 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1999 2000 return false; 2001 } 2002 2003 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 2004 AU.addRequired<ScalarEvolutionWrapperPass>(); 2005 AU.addRequired<AAResultsWrapperPass>(); 2006 AU.addRequired<DominatorTreeWrapperPass>(); 2007 AU.addRequired<LoopInfoWrapperPass>(); 2008 2009 AU.setPreservesAll(); 2010 } 2011 2012 char LoopAccessAnalysis::ID = 0; 2013 static const char laa_name[] = "Loop Access Analysis"; 2014 #define LAA_NAME "loop-accesses" 2015 2016 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 2017 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2018 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2019 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2020 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2021 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 2022 2023 namespace llvm { 2024 Pass *createLAAPass() { 2025 return new LoopAccessAnalysis(); 2026 } 2027 } 2028