1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect dependences up to this threshold. 62 static cl::opt<unsigned> 63 MaxDependences("max-dependences", cl::Hidden, 64 cl::desc("Maximum number of dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 /// \brief Enable store-to-load forwarding conflict detection. This option can 69 /// be disabled for correctness testing. 70 static cl::opt<bool> EnableForwardingConflictDetection( 71 "store-to-load-forwarding-conflict-detection", cl::Hidden, 72 cl::desc("Enable conflict detection in loop-access analysis"), 73 cl::init(true)); 74 75 bool VectorizerParams::isInterleaveForced() { 76 return ::VectorizationInterleave.getNumOccurrences() > 0; 77 } 78 79 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 80 const Function *TheFunction, 81 const Loop *TheLoop, 82 const char *PassName) { 83 DebugLoc DL = TheLoop->getStartLoc(); 84 if (const Instruction *I = Message.getInstr()) 85 DL = I->getDebugLoc(); 86 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 87 *TheFunction, DL, Message.str()); 88 } 89 90 Value *llvm::stripIntegerCast(Value *V) { 91 if (CastInst *CI = dyn_cast<CastInst>(V)) 92 if (CI->getOperand(0)->getType()->isIntegerTy()) 93 return CI->getOperand(0); 94 return V; 95 } 96 97 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 98 const ValueToValueMap &PtrToStride, 99 Value *Ptr, Value *OrigPtr) { 100 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 101 102 // If there is an entry in the map return the SCEV of the pointer with the 103 // symbolic stride replaced by one. 104 ValueToValueMap::const_iterator SI = 105 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 106 if (SI != PtrToStride.end()) { 107 Value *StrideVal = SI->second; 108 109 // Strip casts. 110 StrideVal = stripIntegerCast(StrideVal); 111 112 // Replace symbolic stride by one. 113 Value *One = ConstantInt::get(StrideVal->getType(), 1); 114 ValueToValueMap RewriteMap; 115 RewriteMap[StrideVal] = One; 116 117 ScalarEvolution *SE = PSE.getSE(); 118 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 119 const auto *CT = 120 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 121 122 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 123 auto *Expr = PSE.getSCEV(Ptr); 124 125 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 126 << "\n"); 127 return Expr; 128 } 129 130 // Otherwise, just return the SCEV of the original pointer. 131 return OrigSCEV; 132 } 133 134 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 135 unsigned DepSetId, unsigned ASId, 136 const ValueToValueMap &Strides, 137 PredicatedScalarEvolution &PSE) { 138 // Get the stride replaced scev. 139 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 140 ScalarEvolution *SE = PSE.getSE(); 141 142 const SCEV *ScStart; 143 const SCEV *ScEnd; 144 145 if (SE->isLoopInvariant(Sc, Lp)) 146 ScStart = ScEnd = Sc; 147 else { 148 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 149 assert(AR && "Invalid addrec expression"); 150 const SCEV *Ex = PSE.getBackedgeTakenCount(); 151 152 ScStart = AR->getStart(); 153 ScEnd = AR->evaluateAtIteration(Ex, *SE); 154 const SCEV *Step = AR->getStepRecurrence(*SE); 155 156 // For expressions with negative step, the upper bound is ScStart and the 157 // lower bound is ScEnd. 158 if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) { 159 if (CStep->getValue()->isNegative()) 160 std::swap(ScStart, ScEnd); 161 } else { 162 // Fallback case: the step is not constant, but the we can still 163 // get the upper and lower bounds of the interval by using min/max 164 // expressions. 165 ScStart = SE->getUMinExpr(ScStart, ScEnd); 166 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 167 } 168 } 169 170 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 171 } 172 173 SmallVector<RuntimePointerChecking::PointerCheck, 4> 174 RuntimePointerChecking::generateChecks() const { 175 SmallVector<PointerCheck, 4> Checks; 176 177 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 178 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 179 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 180 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 181 182 if (needsChecking(CGI, CGJ)) 183 Checks.push_back(std::make_pair(&CGI, &CGJ)); 184 } 185 } 186 return Checks; 187 } 188 189 void RuntimePointerChecking::generateChecks( 190 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 191 assert(Checks.empty() && "Checks is not empty"); 192 groupChecks(DepCands, UseDependencies); 193 Checks = generateChecks(); 194 } 195 196 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 197 const CheckingPtrGroup &N) const { 198 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 199 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 200 if (needsChecking(M.Members[I], N.Members[J])) 201 return true; 202 return false; 203 } 204 205 /// Compare \p I and \p J and return the minimum. 206 /// Return nullptr in case we couldn't find an answer. 207 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 208 ScalarEvolution *SE) { 209 const SCEV *Diff = SE->getMinusSCEV(J, I); 210 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 211 212 if (!C) 213 return nullptr; 214 if (C->getValue()->isNegative()) 215 return J; 216 return I; 217 } 218 219 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 220 const SCEV *Start = RtCheck.Pointers[Index].Start; 221 const SCEV *End = RtCheck.Pointers[Index].End; 222 223 // Compare the starts and ends with the known minimum and maximum 224 // of this set. We need to know how we compare against the min/max 225 // of the set in order to be able to emit memchecks. 226 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 227 if (!Min0) 228 return false; 229 230 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 231 if (!Min1) 232 return false; 233 234 // Update the low bound expression if we've found a new min value. 235 if (Min0 == Start) 236 Low = Start; 237 238 // Update the high bound expression if we've found a new max value. 239 if (Min1 != End) 240 High = End; 241 242 Members.push_back(Index); 243 return true; 244 } 245 246 void RuntimePointerChecking::groupChecks( 247 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 248 // We build the groups from dependency candidates equivalence classes 249 // because: 250 // - We know that pointers in the same equivalence class share 251 // the same underlying object and therefore there is a chance 252 // that we can compare pointers 253 // - We wouldn't be able to merge two pointers for which we need 254 // to emit a memcheck. The classes in DepCands are already 255 // conveniently built such that no two pointers in the same 256 // class need checking against each other. 257 258 // We use the following (greedy) algorithm to construct the groups 259 // For every pointer in the equivalence class: 260 // For each existing group: 261 // - if the difference between this pointer and the min/max bounds 262 // of the group is a constant, then make the pointer part of the 263 // group and update the min/max bounds of that group as required. 264 265 CheckingGroups.clear(); 266 267 // If we need to check two pointers to the same underlying object 268 // with a non-constant difference, we shouldn't perform any pointer 269 // grouping with those pointers. This is because we can easily get 270 // into cases where the resulting check would return false, even when 271 // the accesses are safe. 272 // 273 // The following example shows this: 274 // for (i = 0; i < 1000; ++i) 275 // a[5000 + i * m] = a[i] + a[i + 9000] 276 // 277 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 278 // (0, 10000) which is always false. However, if m is 1, there is no 279 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 280 // us to perform an accurate check in this case. 281 // 282 // The above case requires that we have an UnknownDependence between 283 // accesses to the same underlying object. This cannot happen unless 284 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 285 // is also false. In this case we will use the fallback path and create 286 // separate checking groups for all pointers. 287 288 // If we don't have the dependency partitions, construct a new 289 // checking pointer group for each pointer. This is also required 290 // for correctness, because in this case we can have checking between 291 // pointers to the same underlying object. 292 if (!UseDependencies) { 293 for (unsigned I = 0; I < Pointers.size(); ++I) 294 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 295 return; 296 } 297 298 unsigned TotalComparisons = 0; 299 300 DenseMap<Value *, unsigned> PositionMap; 301 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 302 PositionMap[Pointers[Index].PointerValue] = Index; 303 304 // We need to keep track of what pointers we've already seen so we 305 // don't process them twice. 306 SmallSet<unsigned, 2> Seen; 307 308 // Go through all equivalence classes, get the "pointer check groups" 309 // and add them to the overall solution. We use the order in which accesses 310 // appear in 'Pointers' to enforce determinism. 311 for (unsigned I = 0; I < Pointers.size(); ++I) { 312 // We've seen this pointer before, and therefore already processed 313 // its equivalence class. 314 if (Seen.count(I)) 315 continue; 316 317 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 318 Pointers[I].IsWritePtr); 319 320 SmallVector<CheckingPtrGroup, 2> Groups; 321 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 322 323 // Because DepCands is constructed by visiting accesses in the order in 324 // which they appear in alias sets (which is deterministic) and the 325 // iteration order within an equivalence class member is only dependent on 326 // the order in which unions and insertions are performed on the 327 // equivalence class, the iteration order is deterministic. 328 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 329 MI != ME; ++MI) { 330 unsigned Pointer = PositionMap[MI->getPointer()]; 331 bool Merged = false; 332 // Mark this pointer as seen. 333 Seen.insert(Pointer); 334 335 // Go through all the existing sets and see if we can find one 336 // which can include this pointer. 337 for (CheckingPtrGroup &Group : Groups) { 338 // Don't perform more than a certain amount of comparisons. 339 // This should limit the cost of grouping the pointers to something 340 // reasonable. If we do end up hitting this threshold, the algorithm 341 // will create separate groups for all remaining pointers. 342 if (TotalComparisons > MemoryCheckMergeThreshold) 343 break; 344 345 TotalComparisons++; 346 347 if (Group.addPointer(Pointer)) { 348 Merged = true; 349 break; 350 } 351 } 352 353 if (!Merged) 354 // We couldn't add this pointer to any existing set or the threshold 355 // for the number of comparisons has been reached. Create a new group 356 // to hold the current pointer. 357 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 358 } 359 360 // We've computed the grouped checks for this partition. 361 // Save the results and continue with the next one. 362 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 363 } 364 } 365 366 bool RuntimePointerChecking::arePointersInSamePartition( 367 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 368 unsigned PtrIdx2) { 369 return (PtrToPartition[PtrIdx1] != -1 && 370 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 371 } 372 373 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 374 const PointerInfo &PointerI = Pointers[I]; 375 const PointerInfo &PointerJ = Pointers[J]; 376 377 // No need to check if two readonly pointers intersect. 378 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 379 return false; 380 381 // Only need to check pointers between two different dependency sets. 382 if (PointerI.DependencySetId == PointerJ.DependencySetId) 383 return false; 384 385 // Only need to check pointers in the same alias set. 386 if (PointerI.AliasSetId != PointerJ.AliasSetId) 387 return false; 388 389 return true; 390 } 391 392 void RuntimePointerChecking::printChecks( 393 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 394 unsigned Depth) const { 395 unsigned N = 0; 396 for (const auto &Check : Checks) { 397 const auto &First = Check.first->Members, &Second = Check.second->Members; 398 399 OS.indent(Depth) << "Check " << N++ << ":\n"; 400 401 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 402 for (unsigned K = 0; K < First.size(); ++K) 403 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 404 405 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 406 for (unsigned K = 0; K < Second.size(); ++K) 407 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 408 } 409 } 410 411 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 412 413 OS.indent(Depth) << "Run-time memory checks:\n"; 414 printChecks(OS, Checks, Depth); 415 416 OS.indent(Depth) << "Grouped accesses:\n"; 417 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 418 const auto &CG = CheckingGroups[I]; 419 420 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 421 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 422 << ")\n"; 423 for (unsigned J = 0; J < CG.Members.size(); ++J) { 424 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 425 << "\n"; 426 } 427 } 428 } 429 430 namespace { 431 /// \brief Analyses memory accesses in a loop. 432 /// 433 /// Checks whether run time pointer checks are needed and builds sets for data 434 /// dependence checking. 435 class AccessAnalysis { 436 public: 437 /// \brief Read or write access location. 438 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 439 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 440 441 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 442 MemoryDepChecker::DepCandidates &DA, 443 PredicatedScalarEvolution &PSE) 444 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 445 PSE(PSE) {} 446 447 /// \brief Register a load and whether it is only read from. 448 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 449 Value *Ptr = const_cast<Value*>(Loc.Ptr); 450 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 451 Accesses.insert(MemAccessInfo(Ptr, false)); 452 if (IsReadOnly) 453 ReadOnlyPtr.insert(Ptr); 454 } 455 456 /// \brief Register a store. 457 void addStore(MemoryLocation &Loc) { 458 Value *Ptr = const_cast<Value*>(Loc.Ptr); 459 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 460 Accesses.insert(MemAccessInfo(Ptr, true)); 461 } 462 463 /// \brief Check whether we can check the pointers at runtime for 464 /// non-intersection. 465 /// 466 /// Returns true if we need no check or if we do and we can generate them 467 /// (i.e. the pointers have computable bounds). 468 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 469 Loop *TheLoop, const ValueToValueMap &Strides, 470 bool ShouldCheckWrap = false); 471 472 /// \brief Goes over all memory accesses, checks whether a RT check is needed 473 /// and builds sets of dependent accesses. 474 void buildDependenceSets() { 475 processMemAccesses(); 476 } 477 478 /// \brief Initial processing of memory accesses determined that we need to 479 /// perform dependency checking. 480 /// 481 /// Note that this can later be cleared if we retry memcheck analysis without 482 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 483 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 484 485 /// We decided that no dependence analysis would be used. Reset the state. 486 void resetDepChecks(MemoryDepChecker &DepChecker) { 487 CheckDeps.clear(); 488 DepChecker.clearDependences(); 489 } 490 491 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 492 493 private: 494 typedef SetVector<MemAccessInfo> PtrAccessSet; 495 496 /// \brief Go over all memory access and check whether runtime pointer checks 497 /// are needed and build sets of dependency check candidates. 498 void processMemAccesses(); 499 500 /// Set of all accesses. 501 PtrAccessSet Accesses; 502 503 const DataLayout &DL; 504 505 /// Set of accesses that need a further dependence check. 506 MemAccessInfoSet CheckDeps; 507 508 /// Set of pointers that are read only. 509 SmallPtrSet<Value*, 16> ReadOnlyPtr; 510 511 /// An alias set tracker to partition the access set by underlying object and 512 //intrinsic property (such as TBAA metadata). 513 AliasSetTracker AST; 514 515 LoopInfo *LI; 516 517 /// Sets of potentially dependent accesses - members of one set share an 518 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 519 /// dependence check. 520 MemoryDepChecker::DepCandidates &DepCands; 521 522 /// \brief Initial processing of memory accesses determined that we may need 523 /// to add memchecks. Perform the analysis to determine the necessary checks. 524 /// 525 /// Note that, this is different from isDependencyCheckNeeded. When we retry 526 /// memcheck analysis without dependency checking 527 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 528 /// while this remains set if we have potentially dependent accesses. 529 bool IsRTCheckAnalysisNeeded; 530 531 /// The SCEV predicate containing all the SCEV-related assumptions. 532 PredicatedScalarEvolution &PSE; 533 }; 534 535 } // end anonymous namespace 536 537 /// \brief Check whether a pointer can participate in a runtime bounds check. 538 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 539 const ValueToValueMap &Strides, Value *Ptr, 540 Loop *L) { 541 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 542 543 // The bounds for loop-invariant pointer is trivial. 544 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 545 return true; 546 547 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 548 if (!AR) 549 return false; 550 551 return AR->isAffine(); 552 } 553 554 /// \brief Check whether a pointer address cannot wrap. 555 static bool isNoWrap(PredicatedScalarEvolution &PSE, 556 const ValueToValueMap &Strides, Value *Ptr, Loop *L) { 557 const SCEV *PtrScev = PSE.getSCEV(Ptr); 558 if (PSE.getSE()->isLoopInvariant(PtrScev, L)) 559 return true; 560 561 int Stride = getPtrStride(PSE, Ptr, L, Strides); 562 return Stride == 1; 563 } 564 565 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 566 ScalarEvolution *SE, Loop *TheLoop, 567 const ValueToValueMap &StridesMap, 568 bool ShouldCheckWrap) { 569 // Find pointers with computable bounds. We are going to use this information 570 // to place a runtime bound check. 571 bool CanDoRT = true; 572 573 bool NeedRTCheck = false; 574 if (!IsRTCheckAnalysisNeeded) return true; 575 576 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 577 578 // We assign a consecutive id to access from different alias sets. 579 // Accesses between different groups doesn't need to be checked. 580 unsigned ASId = 1; 581 for (auto &AS : AST) { 582 int NumReadPtrChecks = 0; 583 int NumWritePtrChecks = 0; 584 585 // We assign consecutive id to access from different dependence sets. 586 // Accesses within the same set don't need a runtime check. 587 unsigned RunningDepId = 1; 588 DenseMap<Value *, unsigned> DepSetId; 589 590 for (auto A : AS) { 591 Value *Ptr = A.getValue(); 592 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 593 MemAccessInfo Access(Ptr, IsWrite); 594 595 if (IsWrite) 596 ++NumWritePtrChecks; 597 else 598 ++NumReadPtrChecks; 599 600 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 601 // When we run after a failing dependency check we have to make sure 602 // we don't have wrapping pointers. 603 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) { 604 // The id of the dependence set. 605 unsigned DepId; 606 607 if (IsDepCheckNeeded) { 608 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 609 unsigned &LeaderId = DepSetId[Leader]; 610 if (!LeaderId) 611 LeaderId = RunningDepId++; 612 DepId = LeaderId; 613 } else 614 // Each access has its own dependence set. 615 DepId = RunningDepId++; 616 617 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 618 619 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 620 } else { 621 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 622 CanDoRT = false; 623 } 624 } 625 626 // If we have at least two writes or one write and a read then we need to 627 // check them. But there is no need to checks if there is only one 628 // dependence set for this alias set. 629 // 630 // Note that this function computes CanDoRT and NeedRTCheck independently. 631 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 632 // for which we couldn't find the bounds but we don't actually need to emit 633 // any checks so it does not matter. 634 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 635 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 636 NumWritePtrChecks >= 1)); 637 638 ++ASId; 639 } 640 641 // If the pointers that we would use for the bounds comparison have different 642 // address spaces, assume the values aren't directly comparable, so we can't 643 // use them for the runtime check. We also have to assume they could 644 // overlap. In the future there should be metadata for whether address spaces 645 // are disjoint. 646 unsigned NumPointers = RtCheck.Pointers.size(); 647 for (unsigned i = 0; i < NumPointers; ++i) { 648 for (unsigned j = i + 1; j < NumPointers; ++j) { 649 // Only need to check pointers between two different dependency sets. 650 if (RtCheck.Pointers[i].DependencySetId == 651 RtCheck.Pointers[j].DependencySetId) 652 continue; 653 // Only need to check pointers in the same alias set. 654 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 655 continue; 656 657 Value *PtrI = RtCheck.Pointers[i].PointerValue; 658 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 659 660 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 661 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 662 if (ASi != ASj) { 663 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 664 " different address spaces\n"); 665 return false; 666 } 667 } 668 } 669 670 if (NeedRTCheck && CanDoRT) 671 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 672 673 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 674 << " pointer comparisons.\n"); 675 676 RtCheck.Need = NeedRTCheck; 677 678 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 679 if (!CanDoRTIfNeeded) 680 RtCheck.reset(); 681 return CanDoRTIfNeeded; 682 } 683 684 void AccessAnalysis::processMemAccesses() { 685 // We process the set twice: first we process read-write pointers, last we 686 // process read-only pointers. This allows us to skip dependence tests for 687 // read-only pointers. 688 689 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 690 DEBUG(dbgs() << " AST: "; AST.dump()); 691 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 692 DEBUG({ 693 for (auto A : Accesses) 694 dbgs() << "\t" << *A.getPointer() << " (" << 695 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 696 "read-only" : "read")) << ")\n"; 697 }); 698 699 // The AliasSetTracker has nicely partitioned our pointers by metadata 700 // compatibility and potential for underlying-object overlap. As a result, we 701 // only need to check for potential pointer dependencies within each alias 702 // set. 703 for (auto &AS : AST) { 704 // Note that both the alias-set tracker and the alias sets themselves used 705 // linked lists internally and so the iteration order here is deterministic 706 // (matching the original instruction order within each set). 707 708 bool SetHasWrite = false; 709 710 // Map of pointers to last access encountered. 711 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 712 UnderlyingObjToAccessMap ObjToLastAccess; 713 714 // Set of access to check after all writes have been processed. 715 PtrAccessSet DeferredAccesses; 716 717 // Iterate over each alias set twice, once to process read/write pointers, 718 // and then to process read-only pointers. 719 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 720 bool UseDeferred = SetIteration > 0; 721 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 722 723 for (auto AV : AS) { 724 Value *Ptr = AV.getValue(); 725 726 // For a single memory access in AliasSetTracker, Accesses may contain 727 // both read and write, and they both need to be handled for CheckDeps. 728 for (auto AC : S) { 729 if (AC.getPointer() != Ptr) 730 continue; 731 732 bool IsWrite = AC.getInt(); 733 734 // If we're using the deferred access set, then it contains only 735 // reads. 736 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 737 if (UseDeferred && !IsReadOnlyPtr) 738 continue; 739 // Otherwise, the pointer must be in the PtrAccessSet, either as a 740 // read or a write. 741 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 742 S.count(MemAccessInfo(Ptr, false))) && 743 "Alias-set pointer not in the access set?"); 744 745 MemAccessInfo Access(Ptr, IsWrite); 746 DepCands.insert(Access); 747 748 // Memorize read-only pointers for later processing and skip them in 749 // the first round (they need to be checked after we have seen all 750 // write pointers). Note: we also mark pointer that are not 751 // consecutive as "read-only" pointers (so that we check 752 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 753 if (!UseDeferred && IsReadOnlyPtr) { 754 DeferredAccesses.insert(Access); 755 continue; 756 } 757 758 // If this is a write - check other reads and writes for conflicts. If 759 // this is a read only check other writes for conflicts (but only if 760 // there is no other write to the ptr - this is an optimization to 761 // catch "a[i] = a[i] + " without having to do a dependence check). 762 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 763 CheckDeps.insert(Access); 764 IsRTCheckAnalysisNeeded = true; 765 } 766 767 if (IsWrite) 768 SetHasWrite = true; 769 770 // Create sets of pointers connected by a shared alias set and 771 // underlying object. 772 typedef SmallVector<Value *, 16> ValueVector; 773 ValueVector TempObjects; 774 775 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 776 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 777 for (Value *UnderlyingObj : TempObjects) { 778 // nullptr never alias, don't join sets for pointer that have "null" 779 // in their UnderlyingObjects list. 780 if (isa<ConstantPointerNull>(UnderlyingObj)) 781 continue; 782 783 UnderlyingObjToAccessMap::iterator Prev = 784 ObjToLastAccess.find(UnderlyingObj); 785 if (Prev != ObjToLastAccess.end()) 786 DepCands.unionSets(Access, Prev->second); 787 788 ObjToLastAccess[UnderlyingObj] = Access; 789 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 790 } 791 } 792 } 793 } 794 } 795 } 796 797 static bool isInBoundsGep(Value *Ptr) { 798 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 799 return GEP->isInBounds(); 800 return false; 801 } 802 803 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 804 /// i.e. monotonically increasing/decreasing. 805 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 806 PredicatedScalarEvolution &PSE, const Loop *L) { 807 // FIXME: This should probably only return true for NUW. 808 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 809 return true; 810 811 // Scalar evolution does not propagate the non-wrapping flags to values that 812 // are derived from a non-wrapping induction variable because non-wrapping 813 // could be flow-sensitive. 814 // 815 // Look through the potentially overflowing instruction to try to prove 816 // non-wrapping for the *specific* value of Ptr. 817 818 // The arithmetic implied by an inbounds GEP can't overflow. 819 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 820 if (!GEP || !GEP->isInBounds()) 821 return false; 822 823 // Make sure there is only one non-const index and analyze that. 824 Value *NonConstIndex = nullptr; 825 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 826 if (!isa<ConstantInt>(*Index)) { 827 if (NonConstIndex) 828 return false; 829 NonConstIndex = *Index; 830 } 831 if (!NonConstIndex) 832 // The recurrence is on the pointer, ignore for now. 833 return false; 834 835 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 836 // AddRec using a NSW operation. 837 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 838 if (OBO->hasNoSignedWrap() && 839 // Assume constant for other the operand so that the AddRec can be 840 // easily found. 841 isa<ConstantInt>(OBO->getOperand(1))) { 842 auto *OpScev = PSE.getSCEV(OBO->getOperand(0)); 843 844 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 845 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 846 } 847 848 return false; 849 } 850 851 /// \brief Check whether the access through \p Ptr has a constant stride. 852 int llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, 853 const Loop *Lp, const ValueToValueMap &StridesMap, 854 bool Assume) { 855 Type *Ty = Ptr->getType(); 856 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 857 858 // Make sure that the pointer does not point to aggregate types. 859 auto *PtrTy = cast<PointerType>(Ty); 860 if (PtrTy->getElementType()->isAggregateType()) { 861 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr 862 << "\n"); 863 return 0; 864 } 865 866 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 867 868 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 869 if (Assume && !AR) 870 AR = PSE.getAsAddRec(Ptr); 871 872 if (!AR) { 873 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr 874 << " SCEV: " << *PtrScev << "\n"); 875 return 0; 876 } 877 878 // The accesss function must stride over the innermost loop. 879 if (Lp != AR->getLoop()) { 880 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 881 *Ptr << " SCEV: " << *AR << "\n"); 882 return 0; 883 } 884 885 // The address calculation must not wrap. Otherwise, a dependence could be 886 // inverted. 887 // An inbounds getelementptr that is a AddRec with a unit stride 888 // cannot wrap per definition. The unit stride requirement is checked later. 889 // An getelementptr without an inbounds attribute and unit stride would have 890 // to access the pointer value "0" which is undefined behavior in address 891 // space 0, therefore we can also vectorize this case. 892 bool IsInBoundsGEP = isInBoundsGep(Ptr); 893 bool IsNoWrapAddRec = 894 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) || 895 isNoWrapAddRec(Ptr, AR, PSE, Lp); 896 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 897 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 898 if (Assume) { 899 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 900 IsNoWrapAddRec = true; 901 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n" 902 << "LAA: Pointer: " << *Ptr << "\n" 903 << "LAA: SCEV: " << *AR << "\n" 904 << "LAA: Added an overflow assumption\n"); 905 } else { 906 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 907 << *Ptr << " SCEV: " << *AR << "\n"); 908 return 0; 909 } 910 } 911 912 // Check the step is constant. 913 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 914 915 // Calculate the pointer stride and check if it is constant. 916 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 917 if (!C) { 918 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 919 " SCEV: " << *AR << "\n"); 920 return 0; 921 } 922 923 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 924 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 925 const APInt &APStepVal = C->getAPInt(); 926 927 // Huge step value - give up. 928 if (APStepVal.getBitWidth() > 64) 929 return 0; 930 931 int64_t StepVal = APStepVal.getSExtValue(); 932 933 // Strided access. 934 int64_t Stride = StepVal / Size; 935 int64_t Rem = StepVal % Size; 936 if (Rem) 937 return 0; 938 939 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 940 // know we can't "wrap around the address space". In case of address space 941 // zero we know that this won't happen without triggering undefined behavior. 942 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 943 Stride != 1 && Stride != -1) { 944 if (Assume) { 945 // We can avoid this case by adding a run-time check. 946 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " 947 << "inbouds or in address space 0 may wrap:\n" 948 << "LAA: Pointer: " << *Ptr << "\n" 949 << "LAA: SCEV: " << *AR << "\n" 950 << "LAA: Added an overflow assumption\n"); 951 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW); 952 } else 953 return 0; 954 } 955 956 return Stride; 957 } 958 959 /// Take the pointer operand from the Load/Store instruction. 960 /// Returns NULL if this is not a valid Load/Store instruction. 961 static Value *getPointerOperand(Value *I) { 962 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 963 return LI->getPointerOperand(); 964 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 965 return SI->getPointerOperand(); 966 return nullptr; 967 } 968 969 /// Take the address space operand from the Load/Store instruction. 970 /// Returns -1 if this is not a valid Load/Store instruction. 971 static unsigned getAddressSpaceOperand(Value *I) { 972 if (LoadInst *L = dyn_cast<LoadInst>(I)) 973 return L->getPointerAddressSpace(); 974 if (StoreInst *S = dyn_cast<StoreInst>(I)) 975 return S->getPointerAddressSpace(); 976 return -1; 977 } 978 979 /// Returns true if the memory operations \p A and \p B are consecutive. 980 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 981 ScalarEvolution &SE, bool CheckType) { 982 Value *PtrA = getPointerOperand(A); 983 Value *PtrB = getPointerOperand(B); 984 unsigned ASA = getAddressSpaceOperand(A); 985 unsigned ASB = getAddressSpaceOperand(B); 986 987 // Check that the address spaces match and that the pointers are valid. 988 if (!PtrA || !PtrB || (ASA != ASB)) 989 return false; 990 991 // Make sure that A and B are different pointers. 992 if (PtrA == PtrB) 993 return false; 994 995 // Make sure that A and B have the same type if required. 996 if(CheckType && PtrA->getType() != PtrB->getType()) 997 return false; 998 999 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1000 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1001 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1002 1003 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1004 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1005 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1006 1007 // OffsetDelta = OffsetB - OffsetA; 1008 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 1009 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 1010 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 1011 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 1012 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 1013 // Check if they are based on the same pointer. That makes the offsets 1014 // sufficient. 1015 if (PtrA == PtrB) 1016 return OffsetDelta == Size; 1017 1018 // Compute the necessary base pointer delta to have the necessary final delta 1019 // equal to the size. 1020 // BaseDelta = Size - OffsetDelta; 1021 const SCEV *SizeSCEV = SE.getConstant(Size); 1022 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 1023 1024 // Otherwise compute the distance with SCEV between the base pointers. 1025 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 1026 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 1027 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 1028 return X == PtrSCEVB; 1029 } 1030 1031 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 1032 switch (Type) { 1033 case NoDep: 1034 case Forward: 1035 case BackwardVectorizable: 1036 return true; 1037 1038 case Unknown: 1039 case ForwardButPreventsForwarding: 1040 case Backward: 1041 case BackwardVectorizableButPreventsForwarding: 1042 return false; 1043 } 1044 llvm_unreachable("unexpected DepType!"); 1045 } 1046 1047 bool MemoryDepChecker::Dependence::isBackward() const { 1048 switch (Type) { 1049 case NoDep: 1050 case Forward: 1051 case ForwardButPreventsForwarding: 1052 case Unknown: 1053 return false; 1054 1055 case BackwardVectorizable: 1056 case Backward: 1057 case BackwardVectorizableButPreventsForwarding: 1058 return true; 1059 } 1060 llvm_unreachable("unexpected DepType!"); 1061 } 1062 1063 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1064 return isBackward() || Type == Unknown; 1065 } 1066 1067 bool MemoryDepChecker::Dependence::isForward() const { 1068 switch (Type) { 1069 case Forward: 1070 case ForwardButPreventsForwarding: 1071 return true; 1072 1073 case NoDep: 1074 case Unknown: 1075 case BackwardVectorizable: 1076 case Backward: 1077 case BackwardVectorizableButPreventsForwarding: 1078 return false; 1079 } 1080 llvm_unreachable("unexpected DepType!"); 1081 } 1082 1083 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 1084 unsigned TypeByteSize) { 1085 // If loads occur at a distance that is not a multiple of a feasible vector 1086 // factor store-load forwarding does not take place. 1087 // Positive dependences might cause troubles because vectorizing them might 1088 // prevent store-load forwarding making vectorized code run a lot slower. 1089 // a[i] = a[i-3] ^ a[i-8]; 1090 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1091 // hence on your typical architecture store-load forwarding does not take 1092 // place. Vectorizing in such cases does not make sense. 1093 // Store-load forwarding distance. 1094 1095 // After this many iterations store-to-load forwarding conflicts should not 1096 // cause any slowdowns. 1097 const unsigned NumItersForStoreLoadThroughMemory = 8 * TypeByteSize; 1098 // Maximum vector factor. 1099 unsigned MaxVFWithoutSLForwardIssues = std::min( 1100 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes); 1101 1102 // Compute the smallest VF at which the store and load would be misaligned. 1103 for (unsigned VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues; 1104 VF *= 2) { 1105 // If the number of vector iteration between the store and the load are 1106 // small we could incur conflicts. 1107 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) { 1108 MaxVFWithoutSLForwardIssues = (VF >>= 1); 1109 break; 1110 } 1111 } 1112 1113 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) { 1114 DEBUG(dbgs() << "LAA: Distance " << Distance 1115 << " that could cause a store-load forwarding conflict\n"); 1116 return true; 1117 } 1118 1119 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1120 MaxVFWithoutSLForwardIssues != 1121 VectorizerParams::MaxVectorWidth * TypeByteSize) 1122 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1123 return false; 1124 } 1125 1126 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1127 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1128 /// bytes. 1129 /// 1130 /// \returns true if they are independent. 1131 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 1132 unsigned TypeByteSize) { 1133 assert(Stride > 1 && "The stride must be greater than 1"); 1134 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1135 assert(Distance > 0 && "The distance must be non-zero"); 1136 1137 // Skip if the distance is not multiple of type byte size. 1138 if (Distance % TypeByteSize) 1139 return false; 1140 1141 unsigned ScaledDist = Distance / TypeByteSize; 1142 1143 // No dependence if the scaled distance is not multiple of the stride. 1144 // E.g. 1145 // for (i = 0; i < 1024 ; i += 4) 1146 // A[i+2] = A[i] + 1; 1147 // 1148 // Two accesses in memory (scaled distance is 2, stride is 4): 1149 // | A[0] | | | | A[4] | | | | 1150 // | | | A[2] | | | | A[6] | | 1151 // 1152 // E.g. 1153 // for (i = 0; i < 1024 ; i += 3) 1154 // A[i+4] = A[i] + 1; 1155 // 1156 // Two accesses in memory (scaled distance is 4, stride is 3): 1157 // | A[0] | | | A[3] | | | A[6] | | | 1158 // | | | | | A[4] | | | A[7] | | 1159 return ScaledDist % Stride; 1160 } 1161 1162 MemoryDepChecker::Dependence::DepType 1163 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1164 const MemAccessInfo &B, unsigned BIdx, 1165 const ValueToValueMap &Strides) { 1166 assert (AIdx < BIdx && "Must pass arguments in program order"); 1167 1168 Value *APtr = A.getPointer(); 1169 Value *BPtr = B.getPointer(); 1170 bool AIsWrite = A.getInt(); 1171 bool BIsWrite = B.getInt(); 1172 1173 // Two reads are independent. 1174 if (!AIsWrite && !BIsWrite) 1175 return Dependence::NoDep; 1176 1177 // We cannot check pointers in different address spaces. 1178 if (APtr->getType()->getPointerAddressSpace() != 1179 BPtr->getType()->getPointerAddressSpace()) 1180 return Dependence::Unknown; 1181 1182 int StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true); 1183 int StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true); 1184 1185 const SCEV *Src = PSE.getSCEV(APtr); 1186 const SCEV *Sink = PSE.getSCEV(BPtr); 1187 1188 // If the induction step is negative we have to invert source and sink of the 1189 // dependence. 1190 if (StrideAPtr < 0) { 1191 std::swap(APtr, BPtr); 1192 std::swap(Src, Sink); 1193 std::swap(AIsWrite, BIsWrite); 1194 std::swap(AIdx, BIdx); 1195 std::swap(StrideAPtr, StrideBPtr); 1196 } 1197 1198 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1199 1200 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1201 << "(Induction step: " << StrideAPtr << ")\n"); 1202 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1203 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1204 1205 // Need accesses with constant stride. We don't want to vectorize 1206 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1207 // the address space. 1208 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1209 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1210 return Dependence::Unknown; 1211 } 1212 1213 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1214 if (!C) { 1215 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1216 ShouldRetryWithRuntimeCheck = true; 1217 return Dependence::Unknown; 1218 } 1219 1220 Type *ATy = APtr->getType()->getPointerElementType(); 1221 Type *BTy = BPtr->getType()->getPointerElementType(); 1222 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1223 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1224 1225 const APInt &Val = C->getAPInt(); 1226 int64_t Distance = Val.getSExtValue(); 1227 unsigned Stride = std::abs(StrideAPtr); 1228 1229 // Attempt to prove strided accesses independent. 1230 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && 1231 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) { 1232 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1233 return Dependence::NoDep; 1234 } 1235 1236 // Negative distances are not plausible dependencies. 1237 if (Val.isNegative()) { 1238 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1239 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1240 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1241 ATy != BTy)) { 1242 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); 1243 return Dependence::ForwardButPreventsForwarding; 1244 } 1245 1246 DEBUG(dbgs() << "LAA: Dependence is negative\n"); 1247 return Dependence::Forward; 1248 } 1249 1250 // Write to the same location with the same size. 1251 // Could be improved to assert type sizes are the same (i32 == float, etc). 1252 if (Val == 0) { 1253 if (ATy == BTy) 1254 return Dependence::Forward; 1255 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1256 return Dependence::Unknown; 1257 } 1258 1259 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1260 1261 if (ATy != BTy) { 1262 DEBUG(dbgs() << 1263 "LAA: ReadWrite-Write positive dependency with different types\n"); 1264 return Dependence::Unknown; 1265 } 1266 1267 // Bail out early if passed-in parameters make vectorization not feasible. 1268 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1269 VectorizerParams::VectorizationFactor : 1); 1270 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1271 VectorizerParams::VectorizationInterleave : 1); 1272 // The minimum number of iterations for a vectorized/unrolled version. 1273 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1274 1275 // It's not vectorizable if the distance is smaller than the minimum distance 1276 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1277 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1278 // TypeByteSize (No need to plus the last gap distance). 1279 // 1280 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1281 // foo(int *A) { 1282 // int *B = (int *)((char *)A + 14); 1283 // for (i = 0 ; i < 1024 ; i += 2) 1284 // B[i] = A[i] + 1; 1285 // } 1286 // 1287 // Two accesses in memory (stride is 2): 1288 // | A[0] | | A[2] | | A[4] | | A[6] | | 1289 // | B[0] | | B[2] | | B[4] | 1290 // 1291 // Distance needs for vectorizing iterations except the last iteration: 1292 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1293 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1294 // 1295 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1296 // 12, which is less than distance. 1297 // 1298 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1299 // the minimum distance needed is 28, which is greater than distance. It is 1300 // not safe to do vectorization. 1301 unsigned MinDistanceNeeded = 1302 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1303 if (MinDistanceNeeded > Distance) { 1304 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1305 << '\n'); 1306 return Dependence::Backward; 1307 } 1308 1309 // Unsafe if the minimum distance needed is greater than max safe distance. 1310 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1311 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1312 << MinDistanceNeeded << " size in bytes"); 1313 return Dependence::Backward; 1314 } 1315 1316 // Positive distance bigger than max vectorization factor. 1317 // FIXME: Should use max factor instead of max distance in bytes, which could 1318 // not handle different types. 1319 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1320 // void foo (int *A, char *B) { 1321 // for (unsigned i = 0; i < 1024; i++) { 1322 // A[i+2] = A[i] + 1; 1323 // B[i+2] = B[i] + 1; 1324 // } 1325 // } 1326 // 1327 // This case is currently unsafe according to the max safe distance. If we 1328 // analyze the two accesses on array B, the max safe dependence distance 1329 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1330 // is 8, which is less than 2 and forbidden vectorization, But actually 1331 // both A and B could be vectorized by 2 iterations. 1332 MaxSafeDepDistBytes = 1333 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1334 1335 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1336 if (IsTrueDataDependence && EnableForwardingConflictDetection && 1337 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1338 return Dependence::BackwardVectorizableButPreventsForwarding; 1339 1340 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1341 << " with max VF = " 1342 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1343 1344 return Dependence::BackwardVectorizable; 1345 } 1346 1347 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1348 MemAccessInfoSet &CheckDeps, 1349 const ValueToValueMap &Strides) { 1350 1351 MaxSafeDepDistBytes = -1U; 1352 while (!CheckDeps.empty()) { 1353 MemAccessInfo CurAccess = *CheckDeps.begin(); 1354 1355 // Get the relevant memory access set. 1356 EquivalenceClasses<MemAccessInfo>::iterator I = 1357 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1358 1359 // Check accesses within this set. 1360 EquivalenceClasses<MemAccessInfo>::member_iterator AI = 1361 AccessSets.member_begin(I); 1362 EquivalenceClasses<MemAccessInfo>::member_iterator AE = 1363 AccessSets.member_end(); 1364 1365 // Check every access pair. 1366 while (AI != AE) { 1367 CheckDeps.erase(*AI); 1368 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1369 while (OI != AE) { 1370 // Check every accessing instruction pair in program order. 1371 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1372 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1373 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1374 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1375 auto A = std::make_pair(&*AI, *I1); 1376 auto B = std::make_pair(&*OI, *I2); 1377 1378 assert(*I1 != *I2); 1379 if (*I1 > *I2) 1380 std::swap(A, B); 1381 1382 Dependence::DepType Type = 1383 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1384 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1385 1386 // Gather dependences unless we accumulated MaxDependences 1387 // dependences. In that case return as soon as we find the first 1388 // unsafe dependence. This puts a limit on this quadratic 1389 // algorithm. 1390 if (RecordDependences) { 1391 if (Type != Dependence::NoDep) 1392 Dependences.push_back(Dependence(A.second, B.second, Type)); 1393 1394 if (Dependences.size() >= MaxDependences) { 1395 RecordDependences = false; 1396 Dependences.clear(); 1397 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1398 } 1399 } 1400 if (!RecordDependences && !SafeForVectorization) 1401 return false; 1402 } 1403 ++OI; 1404 } 1405 AI++; 1406 } 1407 } 1408 1409 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1410 return SafeForVectorization; 1411 } 1412 1413 SmallVector<Instruction *, 4> 1414 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1415 MemAccessInfo Access(Ptr, isWrite); 1416 auto &IndexVector = Accesses.find(Access)->second; 1417 1418 SmallVector<Instruction *, 4> Insts; 1419 std::transform(IndexVector.begin(), IndexVector.end(), 1420 std::back_inserter(Insts), 1421 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1422 return Insts; 1423 } 1424 1425 const char *MemoryDepChecker::Dependence::DepName[] = { 1426 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1427 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1428 1429 void MemoryDepChecker::Dependence::print( 1430 raw_ostream &OS, unsigned Depth, 1431 const SmallVectorImpl<Instruction *> &Instrs) const { 1432 OS.indent(Depth) << DepName[Type] << ":\n"; 1433 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1434 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1435 } 1436 1437 bool LoopAccessInfo::canAnalyzeLoop() { 1438 // We need to have a loop header. 1439 DEBUG(dbgs() << "LAA: Found a loop in " 1440 << TheLoop->getHeader()->getParent()->getName() << ": " 1441 << TheLoop->getHeader()->getName() << '\n'); 1442 1443 // We can only analyze innermost loops. 1444 if (!TheLoop->empty()) { 1445 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1446 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1447 return false; 1448 } 1449 1450 // We must have a single backedge. 1451 if (TheLoop->getNumBackEdges() != 1) { 1452 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1453 emitAnalysis( 1454 LoopAccessReport() << 1455 "loop control flow is not understood by analyzer"); 1456 return false; 1457 } 1458 1459 // We must have a single exiting block. 1460 if (!TheLoop->getExitingBlock()) { 1461 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1462 emitAnalysis( 1463 LoopAccessReport() << 1464 "loop control flow is not understood by analyzer"); 1465 return false; 1466 } 1467 1468 // We only handle bottom-tested loops, i.e. loop in which the condition is 1469 // checked at the end of each iteration. With that we can assume that all 1470 // instructions in the loop are executed the same number of times. 1471 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1472 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1473 emitAnalysis( 1474 LoopAccessReport() << 1475 "loop control flow is not understood by analyzer"); 1476 return false; 1477 } 1478 1479 // ScalarEvolution needs to be able to find the exit count. 1480 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 1481 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 1482 emitAnalysis(LoopAccessReport() 1483 << "could not determine number of loop iterations"); 1484 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1485 return false; 1486 } 1487 1488 return true; 1489 } 1490 1491 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { 1492 1493 typedef SmallPtrSet<Value*, 16> ValueSet; 1494 1495 // Holds the Load and Store instructions. 1496 SmallVector<LoadInst *, 16> Loads; 1497 SmallVector<StoreInst *, 16> Stores; 1498 1499 // Holds all the different accesses in the loop. 1500 unsigned NumReads = 0; 1501 unsigned NumReadWrites = 0; 1502 1503 PtrRtChecking.Pointers.clear(); 1504 PtrRtChecking.Need = false; 1505 1506 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1507 1508 // For each block. 1509 for (Loop::block_iterator bb = TheLoop->block_begin(), 1510 be = TheLoop->block_end(); bb != be; ++bb) { 1511 1512 // Scan the BB and collect legal loads and stores. 1513 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1514 ++it) { 1515 1516 // If this is a load, save it. If this instruction can read from memory 1517 // but is not a load, then we quit. Notice that we don't handle function 1518 // calls that read or write. 1519 if (it->mayReadFromMemory()) { 1520 // Many math library functions read the rounding mode. We will only 1521 // vectorize a loop if it contains known function calls that don't set 1522 // the flag. Therefore, it is safe to ignore this read from memory. 1523 CallInst *Call = dyn_cast<CallInst>(it); 1524 if (Call && getVectorIntrinsicIDForCall(Call, TLI)) 1525 continue; 1526 1527 // If the function has an explicit vectorized counterpart, we can safely 1528 // assume that it can be vectorized. 1529 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1530 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1531 continue; 1532 1533 LoadInst *Ld = dyn_cast<LoadInst>(it); 1534 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1535 emitAnalysis(LoopAccessReport(Ld) 1536 << "read with atomic ordering or volatile read"); 1537 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1538 CanVecMem = false; 1539 return; 1540 } 1541 NumLoads++; 1542 Loads.push_back(Ld); 1543 DepChecker.addAccess(Ld); 1544 continue; 1545 } 1546 1547 // Save 'store' instructions. Abort if other instructions write to memory. 1548 if (it->mayWriteToMemory()) { 1549 StoreInst *St = dyn_cast<StoreInst>(it); 1550 if (!St) { 1551 emitAnalysis(LoopAccessReport(&*it) << 1552 "instruction cannot be vectorized"); 1553 CanVecMem = false; 1554 return; 1555 } 1556 if (!St->isSimple() && !IsAnnotatedParallel) { 1557 emitAnalysis(LoopAccessReport(St) 1558 << "write with atomic ordering or volatile write"); 1559 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1560 CanVecMem = false; 1561 return; 1562 } 1563 NumStores++; 1564 Stores.push_back(St); 1565 DepChecker.addAccess(St); 1566 } 1567 } // Next instr. 1568 } // Next block. 1569 1570 // Now we have two lists that hold the loads and the stores. 1571 // Next, we find the pointers that they use. 1572 1573 // Check if we see any stores. If there are no stores, then we don't 1574 // care if the pointers are *restrict*. 1575 if (!Stores.size()) { 1576 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1577 CanVecMem = true; 1578 return; 1579 } 1580 1581 MemoryDepChecker::DepCandidates DependentAccesses; 1582 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1583 AA, LI, DependentAccesses, PSE); 1584 1585 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1586 // multiple times on the same object. If the ptr is accessed twice, once 1587 // for read and once for write, it will only appear once (on the write 1588 // list). This is okay, since we are going to check for conflicts between 1589 // writes and between reads and writes, but not between reads and reads. 1590 ValueSet Seen; 1591 1592 for (StoreInst *ST : Stores) { 1593 Value *Ptr = ST->getPointerOperand(); 1594 // Check for store to loop invariant address. 1595 StoreToLoopInvariantAddress |= isUniform(Ptr); 1596 // If we did *not* see this pointer before, insert it to the read-write 1597 // list. At this phase it is only a 'write' list. 1598 if (Seen.insert(Ptr).second) { 1599 ++NumReadWrites; 1600 1601 MemoryLocation Loc = MemoryLocation::get(ST); 1602 // The TBAA metadata could have a control dependency on the predication 1603 // condition, so we cannot rely on it when determining whether or not we 1604 // need runtime pointer checks. 1605 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1606 Loc.AATags.TBAA = nullptr; 1607 1608 Accesses.addStore(Loc); 1609 } 1610 } 1611 1612 if (IsAnnotatedParallel) { 1613 DEBUG(dbgs() 1614 << "LAA: A loop annotated parallel, ignore memory dependency " 1615 << "checks.\n"); 1616 CanVecMem = true; 1617 return; 1618 } 1619 1620 for (LoadInst *LD : Loads) { 1621 Value *Ptr = LD->getPointerOperand(); 1622 // If we did *not* see this pointer before, insert it to the 1623 // read list. If we *did* see it before, then it is already in 1624 // the read-write list. This allows us to vectorize expressions 1625 // such as A[i] += x; Because the address of A[i] is a read-write 1626 // pointer. This only works if the index of A[i] is consecutive. 1627 // If the address of i is unknown (for example A[B[i]]) then we may 1628 // read a few words, modify, and write a few words, and some of the 1629 // words may be written to the same address. 1630 bool IsReadOnlyPtr = false; 1631 if (Seen.insert(Ptr).second || !getPtrStride(PSE, Ptr, TheLoop, Strides)) { 1632 ++NumReads; 1633 IsReadOnlyPtr = true; 1634 } 1635 1636 MemoryLocation Loc = MemoryLocation::get(LD); 1637 // The TBAA metadata could have a control dependency on the predication 1638 // condition, so we cannot rely on it when determining whether or not we 1639 // need runtime pointer checks. 1640 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1641 Loc.AATags.TBAA = nullptr; 1642 1643 Accesses.addLoad(Loc, IsReadOnlyPtr); 1644 } 1645 1646 // If we write (or read-write) to a single destination and there are no 1647 // other reads in this loop then is it safe to vectorize. 1648 if (NumReadWrites == 1 && NumReads == 0) { 1649 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1650 CanVecMem = true; 1651 return; 1652 } 1653 1654 // Build dependence sets and check whether we need a runtime pointer bounds 1655 // check. 1656 Accesses.buildDependenceSets(); 1657 1658 // Find pointers with computable bounds. We are going to use this information 1659 // to place a runtime bound check. 1660 bool CanDoRTIfNeeded = 1661 Accesses.canCheckPtrAtRT(PtrRtChecking, PSE.getSE(), TheLoop, Strides); 1662 if (!CanDoRTIfNeeded) { 1663 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1664 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1665 << "the array bounds.\n"); 1666 CanVecMem = false; 1667 return; 1668 } 1669 1670 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1671 1672 CanVecMem = true; 1673 if (Accesses.isDependencyCheckNeeded()) { 1674 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1675 CanVecMem = DepChecker.areDepsSafe( 1676 DependentAccesses, Accesses.getDependenciesToCheck(), Strides); 1677 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1678 1679 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1680 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1681 1682 // Clear the dependency checks. We assume they are not needed. 1683 Accesses.resetDepChecks(DepChecker); 1684 1685 PtrRtChecking.reset(); 1686 PtrRtChecking.Need = true; 1687 1688 auto *SE = PSE.getSE(); 1689 CanDoRTIfNeeded = 1690 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); 1691 1692 // Check that we found the bounds for the pointer. 1693 if (!CanDoRTIfNeeded) { 1694 emitAnalysis(LoopAccessReport() 1695 << "cannot check memory dependencies at runtime"); 1696 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1697 CanVecMem = false; 1698 return; 1699 } 1700 1701 CanVecMem = true; 1702 } 1703 } 1704 1705 if (CanVecMem) 1706 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1707 << (PtrRtChecking.Need ? "" : " don't") 1708 << " need runtime memory checks.\n"); 1709 else { 1710 emitAnalysis( 1711 LoopAccessReport() 1712 << "unsafe dependent memory operations in loop. Use " 1713 "#pragma loop distribute(enable) to allow loop distribution " 1714 "to attempt to isolate the offending operations into a separate " 1715 "loop"); 1716 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1717 } 1718 } 1719 1720 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1721 DominatorTree *DT) { 1722 assert(TheLoop->contains(BB) && "Unknown block used"); 1723 1724 // Blocks that do not dominate the latch need predication. 1725 BasicBlock* Latch = TheLoop->getLoopLatch(); 1726 return !DT->dominates(BB, Latch); 1727 } 1728 1729 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1730 assert(!Report && "Multiple reports generated"); 1731 Report = Message; 1732 } 1733 1734 bool LoopAccessInfo::isUniform(Value *V) const { 1735 return (PSE.getSE()->isLoopInvariant(PSE.getSE()->getSCEV(V), TheLoop)); 1736 } 1737 1738 // FIXME: this function is currently a duplicate of the one in 1739 // LoopVectorize.cpp. 1740 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1741 Instruction *Loc) { 1742 if (FirstInst) 1743 return FirstInst; 1744 if (Instruction *I = dyn_cast<Instruction>(V)) 1745 return I->getParent() == Loc->getParent() ? I : nullptr; 1746 return nullptr; 1747 } 1748 1749 namespace { 1750 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1751 /// need to use value-handles because SCEV expansion can invalidate previously 1752 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1753 /// a previous one. 1754 struct PointerBounds { 1755 TrackingVH<Value> Start; 1756 TrackingVH<Value> End; 1757 }; 1758 } // end anonymous namespace 1759 1760 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1761 /// in \p TheLoop. \return the values for the bounds. 1762 static PointerBounds 1763 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1764 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1765 const RuntimePointerChecking &PtrRtChecking) { 1766 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1767 const SCEV *Sc = SE->getSCEV(Ptr); 1768 1769 if (SE->isLoopInvariant(Sc, TheLoop)) { 1770 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1771 << "\n"); 1772 return {Ptr, Ptr}; 1773 } else { 1774 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1775 LLVMContext &Ctx = Loc->getContext(); 1776 1777 // Use this type for pointer arithmetic. 1778 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1779 Value *Start = nullptr, *End = nullptr; 1780 1781 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1782 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1783 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1784 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1785 return {Start, End}; 1786 } 1787 } 1788 1789 /// \brief Turns a collection of checks into a collection of expanded upper and 1790 /// lower bounds for both pointers in the check. 1791 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1792 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1793 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1794 const RuntimePointerChecking &PtrRtChecking) { 1795 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1796 1797 // Here we're relying on the SCEV Expander's cache to only emit code for the 1798 // same bounds once. 1799 std::transform( 1800 PointerChecks.begin(), PointerChecks.end(), 1801 std::back_inserter(ChecksWithBounds), 1802 [&](const RuntimePointerChecking::PointerCheck &Check) { 1803 PointerBounds 1804 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1805 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1806 return std::make_pair(First, Second); 1807 }); 1808 1809 return ChecksWithBounds; 1810 } 1811 1812 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1813 Instruction *Loc, 1814 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1815 const { 1816 auto *SE = PSE.getSE(); 1817 SCEVExpander Exp(*SE, DL, "induction"); 1818 auto ExpandedChecks = 1819 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, PtrRtChecking); 1820 1821 LLVMContext &Ctx = Loc->getContext(); 1822 Instruction *FirstInst = nullptr; 1823 IRBuilder<> ChkBuilder(Loc); 1824 // Our instructions might fold to a constant. 1825 Value *MemoryRuntimeCheck = nullptr; 1826 1827 for (const auto &Check : ExpandedChecks) { 1828 const PointerBounds &A = Check.first, &B = Check.second; 1829 // Check if two pointers (A and B) conflict where conflict is computed as: 1830 // start(A) <= end(B) && start(B) <= end(A) 1831 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1832 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1833 1834 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1835 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1836 "Trying to bounds check pointers with different address spaces"); 1837 1838 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1839 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1840 1841 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1842 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1843 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1844 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1845 1846 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1847 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1848 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1849 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1850 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1851 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1852 if (MemoryRuntimeCheck) { 1853 IsConflict = 1854 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1855 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1856 } 1857 MemoryRuntimeCheck = IsConflict; 1858 } 1859 1860 if (!MemoryRuntimeCheck) 1861 return std::make_pair(nullptr, nullptr); 1862 1863 // We have to do this trickery because the IRBuilder might fold the check to a 1864 // constant expression in which case there is no Instruction anchored in a 1865 // the block. 1866 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1867 ConstantInt::getTrue(Ctx)); 1868 ChkBuilder.Insert(Check, "memcheck.conflict"); 1869 FirstInst = getFirstInst(FirstInst, Check, Loc); 1870 return std::make_pair(FirstInst, Check); 1871 } 1872 1873 std::pair<Instruction *, Instruction *> 1874 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 1875 if (!PtrRtChecking.Need) 1876 return std::make_pair(nullptr, nullptr); 1877 1878 return addRuntimeChecks(Loc, PtrRtChecking.getChecks()); 1879 } 1880 1881 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1882 const DataLayout &DL, 1883 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1884 DominatorTree *DT, LoopInfo *LI, 1885 const ValueToValueMap &Strides) 1886 : PSE(*SE, *L), PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL), 1887 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1888 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1889 StoreToLoopInvariantAddress(false) { 1890 if (canAnalyzeLoop()) 1891 analyzeLoop(Strides); 1892 } 1893 1894 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1895 if (CanVecMem) { 1896 OS.indent(Depth) << "Memory dependences are safe"; 1897 if (MaxSafeDepDistBytes != -1U) 1898 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes 1899 << " bytes"; 1900 if (PtrRtChecking.Need) 1901 OS << " with run-time checks"; 1902 OS << "\n"; 1903 } 1904 1905 if (Report) 1906 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1907 1908 if (auto *Dependences = DepChecker.getDependences()) { 1909 OS.indent(Depth) << "Dependences:\n"; 1910 for (auto &Dep : *Dependences) { 1911 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1912 OS << "\n"; 1913 } 1914 } else 1915 OS.indent(Depth) << "Too many dependences, not recorded\n"; 1916 1917 // List the pair of accesses need run-time checks to prove independence. 1918 PtrRtChecking.print(OS, Depth); 1919 OS << "\n"; 1920 1921 OS.indent(Depth) << "Store to invariant address was " 1922 << (StoreToLoopInvariantAddress ? "" : "not ") 1923 << "found in loop.\n"; 1924 1925 OS.indent(Depth) << "SCEV assumptions:\n"; 1926 PSE.getUnionPredicate().print(OS, Depth); 1927 1928 OS << "\n"; 1929 1930 OS.indent(Depth) << "Expressions re-written:\n"; 1931 PSE.print(OS, Depth); 1932 } 1933 1934 const LoopAccessInfo & 1935 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { 1936 auto &LAI = LoopAccessInfoMap[L]; 1937 1938 #ifndef NDEBUG 1939 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && 1940 "Symbolic strides changed for loop"); 1941 #endif 1942 1943 if (!LAI) { 1944 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1945 LAI = 1946 llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, Strides); 1947 #ifndef NDEBUG 1948 LAI->NumSymbolicStrides = Strides.size(); 1949 #endif 1950 } 1951 return *LAI.get(); 1952 } 1953 1954 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1955 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1956 1957 ValueToValueMap NoSymbolicStrides; 1958 1959 for (Loop *TopLevelLoop : *LI) 1960 for (Loop *L : depth_first(TopLevelLoop)) { 1961 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1962 auto &LAI = LAA.getInfo(L, NoSymbolicStrides); 1963 LAI.print(OS, 4); 1964 } 1965 } 1966 1967 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1968 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1969 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1970 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1971 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1972 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1973 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1974 1975 return false; 1976 } 1977 1978 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1979 AU.addRequired<ScalarEvolutionWrapperPass>(); 1980 AU.addRequired<AAResultsWrapperPass>(); 1981 AU.addRequired<DominatorTreeWrapperPass>(); 1982 AU.addRequired<LoopInfoWrapperPass>(); 1983 1984 AU.setPreservesAll(); 1985 } 1986 1987 char LoopAccessAnalysis::ID = 0; 1988 static const char laa_name[] = "Loop Access Analysis"; 1989 #define LAA_NAME "loop-accesses" 1990 1991 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1992 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1993 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 1994 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1995 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 1996 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1997 1998 namespace llvm { 1999 Pass *createLAAPass() { 2000 return new LoopAccessAnalysis(); 2001 } 2002 } 2003