1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect dependences up to this threshold. 62 static cl::opt<unsigned> 63 MaxDependences("max-dependences", cl::Hidden, 64 cl::desc("Maximum number of dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 bool VectorizerParams::isInterleaveForced() { 69 return ::VectorizationInterleave.getNumOccurrences() > 0; 70 } 71 72 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 73 const Function *TheFunction, 74 const Loop *TheLoop, 75 const char *PassName) { 76 DebugLoc DL = TheLoop->getStartLoc(); 77 if (const Instruction *I = Message.getInstr()) 78 DL = I->getDebugLoc(); 79 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 80 *TheFunction, DL, Message.str()); 81 } 82 83 Value *llvm::stripIntegerCast(Value *V) { 84 if (CastInst *CI = dyn_cast<CastInst>(V)) 85 if (CI->getOperand(0)->getType()->isIntegerTy()) 86 return CI->getOperand(0); 87 return V; 88 } 89 90 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, 91 const ValueToValueMap &PtrToStride, 92 Value *Ptr, Value *OrigPtr) { 93 const SCEV *OrigSCEV = PSE.getSCEV(Ptr); 94 95 // If there is an entry in the map return the SCEV of the pointer with the 96 // symbolic stride replaced by one. 97 ValueToValueMap::const_iterator SI = 98 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 99 if (SI != PtrToStride.end()) { 100 Value *StrideVal = SI->second; 101 102 // Strip casts. 103 StrideVal = stripIntegerCast(StrideVal); 104 105 // Replace symbolic stride by one. 106 Value *One = ConstantInt::get(StrideVal->getType(), 1); 107 ValueToValueMap RewriteMap; 108 RewriteMap[StrideVal] = One; 109 110 ScalarEvolution *SE = PSE.getSE(); 111 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 112 const auto *CT = 113 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 114 115 PSE.addPredicate(*SE->getEqualPredicate(U, CT)); 116 auto *Expr = PSE.getSCEV(Ptr); 117 118 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr 119 << "\n"); 120 return Expr; 121 } 122 123 // Otherwise, just return the SCEV of the original pointer. 124 return OrigSCEV; 125 } 126 127 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 128 unsigned DepSetId, unsigned ASId, 129 const ValueToValueMap &Strides, 130 PredicatedScalarEvolution &PSE) { 131 // Get the stride replaced scev. 132 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 133 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 134 assert(AR && "Invalid addrec expression"); 135 ScalarEvolution *SE = PSE.getSE(); 136 const SCEV *Ex = SE->getBackedgeTakenCount(Lp); 137 138 const SCEV *ScStart = AR->getStart(); 139 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); 140 const SCEV *Step = AR->getStepRecurrence(*SE); 141 142 // For expressions with negative step, the upper bound is ScStart and the 143 // lower bound is ScEnd. 144 if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) { 145 if (CStep->getValue()->isNegative()) 146 std::swap(ScStart, ScEnd); 147 } else { 148 // Fallback case: the step is not constant, but the we can still 149 // get the upper and lower bounds of the interval by using min/max 150 // expressions. 151 ScStart = SE->getUMinExpr(ScStart, ScEnd); 152 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 153 } 154 155 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 156 } 157 158 SmallVector<RuntimePointerChecking::PointerCheck, 4> 159 RuntimePointerChecking::generateChecks() const { 160 SmallVector<PointerCheck, 4> Checks; 161 162 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 163 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 164 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 165 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 166 167 if (needsChecking(CGI, CGJ)) 168 Checks.push_back(std::make_pair(&CGI, &CGJ)); 169 } 170 } 171 return Checks; 172 } 173 174 void RuntimePointerChecking::generateChecks( 175 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 176 assert(Checks.empty() && "Checks is not empty"); 177 groupChecks(DepCands, UseDependencies); 178 Checks = generateChecks(); 179 } 180 181 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 182 const CheckingPtrGroup &N) const { 183 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 184 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 185 if (needsChecking(M.Members[I], N.Members[J])) 186 return true; 187 return false; 188 } 189 190 /// Compare \p I and \p J and return the minimum. 191 /// Return nullptr in case we couldn't find an answer. 192 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 193 ScalarEvolution *SE) { 194 const SCEV *Diff = SE->getMinusSCEV(J, I); 195 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 196 197 if (!C) 198 return nullptr; 199 if (C->getValue()->isNegative()) 200 return J; 201 return I; 202 } 203 204 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 205 const SCEV *Start = RtCheck.Pointers[Index].Start; 206 const SCEV *End = RtCheck.Pointers[Index].End; 207 208 // Compare the starts and ends with the known minimum and maximum 209 // of this set. We need to know how we compare against the min/max 210 // of the set in order to be able to emit memchecks. 211 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 212 if (!Min0) 213 return false; 214 215 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 216 if (!Min1) 217 return false; 218 219 // Update the low bound expression if we've found a new min value. 220 if (Min0 == Start) 221 Low = Start; 222 223 // Update the high bound expression if we've found a new max value. 224 if (Min1 != End) 225 High = End; 226 227 Members.push_back(Index); 228 return true; 229 } 230 231 void RuntimePointerChecking::groupChecks( 232 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 233 // We build the groups from dependency candidates equivalence classes 234 // because: 235 // - We know that pointers in the same equivalence class share 236 // the same underlying object and therefore there is a chance 237 // that we can compare pointers 238 // - We wouldn't be able to merge two pointers for which we need 239 // to emit a memcheck. The classes in DepCands are already 240 // conveniently built such that no two pointers in the same 241 // class need checking against each other. 242 243 // We use the following (greedy) algorithm to construct the groups 244 // For every pointer in the equivalence class: 245 // For each existing group: 246 // - if the difference between this pointer and the min/max bounds 247 // of the group is a constant, then make the pointer part of the 248 // group and update the min/max bounds of that group as required. 249 250 CheckingGroups.clear(); 251 252 // If we need to check two pointers to the same underlying object 253 // with a non-constant difference, we shouldn't perform any pointer 254 // grouping with those pointers. This is because we can easily get 255 // into cases where the resulting check would return false, even when 256 // the accesses are safe. 257 // 258 // The following example shows this: 259 // for (i = 0; i < 1000; ++i) 260 // a[5000 + i * m] = a[i] + a[i + 9000] 261 // 262 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 263 // (0, 10000) which is always false. However, if m is 1, there is no 264 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 265 // us to perform an accurate check in this case. 266 // 267 // The above case requires that we have an UnknownDependence between 268 // accesses to the same underlying object. This cannot happen unless 269 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 270 // is also false. In this case we will use the fallback path and create 271 // separate checking groups for all pointers. 272 273 // If we don't have the dependency partitions, construct a new 274 // checking pointer group for each pointer. This is also required 275 // for correctness, because in this case we can have checking between 276 // pointers to the same underlying object. 277 if (!UseDependencies) { 278 for (unsigned I = 0; I < Pointers.size(); ++I) 279 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 280 return; 281 } 282 283 unsigned TotalComparisons = 0; 284 285 DenseMap<Value *, unsigned> PositionMap; 286 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 287 PositionMap[Pointers[Index].PointerValue] = Index; 288 289 // We need to keep track of what pointers we've already seen so we 290 // don't process them twice. 291 SmallSet<unsigned, 2> Seen; 292 293 // Go through all equivalence classes, get the "pointer check groups" 294 // and add them to the overall solution. We use the order in which accesses 295 // appear in 'Pointers' to enforce determinism. 296 for (unsigned I = 0; I < Pointers.size(); ++I) { 297 // We've seen this pointer before, and therefore already processed 298 // its equivalence class. 299 if (Seen.count(I)) 300 continue; 301 302 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 303 Pointers[I].IsWritePtr); 304 305 SmallVector<CheckingPtrGroup, 2> Groups; 306 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 307 308 // Because DepCands is constructed by visiting accesses in the order in 309 // which they appear in alias sets (which is deterministic) and the 310 // iteration order within an equivalence class member is only dependent on 311 // the order in which unions and insertions are performed on the 312 // equivalence class, the iteration order is deterministic. 313 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 314 MI != ME; ++MI) { 315 unsigned Pointer = PositionMap[MI->getPointer()]; 316 bool Merged = false; 317 // Mark this pointer as seen. 318 Seen.insert(Pointer); 319 320 // Go through all the existing sets and see if we can find one 321 // which can include this pointer. 322 for (CheckingPtrGroup &Group : Groups) { 323 // Don't perform more than a certain amount of comparisons. 324 // This should limit the cost of grouping the pointers to something 325 // reasonable. If we do end up hitting this threshold, the algorithm 326 // will create separate groups for all remaining pointers. 327 if (TotalComparisons > MemoryCheckMergeThreshold) 328 break; 329 330 TotalComparisons++; 331 332 if (Group.addPointer(Pointer)) { 333 Merged = true; 334 break; 335 } 336 } 337 338 if (!Merged) 339 // We couldn't add this pointer to any existing set or the threshold 340 // for the number of comparisons has been reached. Create a new group 341 // to hold the current pointer. 342 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 343 } 344 345 // We've computed the grouped checks for this partition. 346 // Save the results and continue with the next one. 347 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 348 } 349 } 350 351 bool RuntimePointerChecking::arePointersInSamePartition( 352 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 353 unsigned PtrIdx2) { 354 return (PtrToPartition[PtrIdx1] != -1 && 355 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 356 } 357 358 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 359 const PointerInfo &PointerI = Pointers[I]; 360 const PointerInfo &PointerJ = Pointers[J]; 361 362 // No need to check if two readonly pointers intersect. 363 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 364 return false; 365 366 // Only need to check pointers between two different dependency sets. 367 if (PointerI.DependencySetId == PointerJ.DependencySetId) 368 return false; 369 370 // Only need to check pointers in the same alias set. 371 if (PointerI.AliasSetId != PointerJ.AliasSetId) 372 return false; 373 374 return true; 375 } 376 377 void RuntimePointerChecking::printChecks( 378 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 379 unsigned Depth) const { 380 unsigned N = 0; 381 for (const auto &Check : Checks) { 382 const auto &First = Check.first->Members, &Second = Check.second->Members; 383 384 OS.indent(Depth) << "Check " << N++ << ":\n"; 385 386 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 387 for (unsigned K = 0; K < First.size(); ++K) 388 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 389 390 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 391 for (unsigned K = 0; K < Second.size(); ++K) 392 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 393 } 394 } 395 396 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 397 398 OS.indent(Depth) << "Run-time memory checks:\n"; 399 printChecks(OS, Checks, Depth); 400 401 OS.indent(Depth) << "Grouped accesses:\n"; 402 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 403 const auto &CG = CheckingGroups[I]; 404 405 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 406 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 407 << ")\n"; 408 for (unsigned J = 0; J < CG.Members.size(); ++J) { 409 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 410 << "\n"; 411 } 412 } 413 } 414 415 namespace { 416 /// \brief Analyses memory accesses in a loop. 417 /// 418 /// Checks whether run time pointer checks are needed and builds sets for data 419 /// dependence checking. 420 class AccessAnalysis { 421 public: 422 /// \brief Read or write access location. 423 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 424 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 425 426 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 427 MemoryDepChecker::DepCandidates &DA, 428 PredicatedScalarEvolution &PSE) 429 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 430 PSE(PSE) {} 431 432 /// \brief Register a load and whether it is only read from. 433 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 434 Value *Ptr = const_cast<Value*>(Loc.Ptr); 435 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 436 Accesses.insert(MemAccessInfo(Ptr, false)); 437 if (IsReadOnly) 438 ReadOnlyPtr.insert(Ptr); 439 } 440 441 /// \brief Register a store. 442 void addStore(MemoryLocation &Loc) { 443 Value *Ptr = const_cast<Value*>(Loc.Ptr); 444 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 445 Accesses.insert(MemAccessInfo(Ptr, true)); 446 } 447 448 /// \brief Check whether we can check the pointers at runtime for 449 /// non-intersection. 450 /// 451 /// Returns true if we need no check or if we do and we can generate them 452 /// (i.e. the pointers have computable bounds). 453 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 454 Loop *TheLoop, const ValueToValueMap &Strides, 455 bool ShouldCheckStride = false); 456 457 /// \brief Goes over all memory accesses, checks whether a RT check is needed 458 /// and builds sets of dependent accesses. 459 void buildDependenceSets() { 460 processMemAccesses(); 461 } 462 463 /// \brief Initial processing of memory accesses determined that we need to 464 /// perform dependency checking. 465 /// 466 /// Note that this can later be cleared if we retry memcheck analysis without 467 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 468 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 469 470 /// We decided that no dependence analysis would be used. Reset the state. 471 void resetDepChecks(MemoryDepChecker &DepChecker) { 472 CheckDeps.clear(); 473 DepChecker.clearDependences(); 474 } 475 476 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 477 478 private: 479 typedef SetVector<MemAccessInfo> PtrAccessSet; 480 481 /// \brief Go over all memory access and check whether runtime pointer checks 482 /// are needed and build sets of dependency check candidates. 483 void processMemAccesses(); 484 485 /// Set of all accesses. 486 PtrAccessSet Accesses; 487 488 const DataLayout &DL; 489 490 /// Set of accesses that need a further dependence check. 491 MemAccessInfoSet CheckDeps; 492 493 /// Set of pointers that are read only. 494 SmallPtrSet<Value*, 16> ReadOnlyPtr; 495 496 /// An alias set tracker to partition the access set by underlying object and 497 //intrinsic property (such as TBAA metadata). 498 AliasSetTracker AST; 499 500 LoopInfo *LI; 501 502 /// Sets of potentially dependent accesses - members of one set share an 503 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 504 /// dependence check. 505 MemoryDepChecker::DepCandidates &DepCands; 506 507 /// \brief Initial processing of memory accesses determined that we may need 508 /// to add memchecks. Perform the analysis to determine the necessary checks. 509 /// 510 /// Note that, this is different from isDependencyCheckNeeded. When we retry 511 /// memcheck analysis without dependency checking 512 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 513 /// while this remains set if we have potentially dependent accesses. 514 bool IsRTCheckAnalysisNeeded; 515 516 /// The SCEV predicate containing all the SCEV-related assumptions. 517 PredicatedScalarEvolution &PSE; 518 }; 519 520 } // end anonymous namespace 521 522 /// \brief Check whether a pointer can participate in a runtime bounds check. 523 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, 524 const ValueToValueMap &Strides, Value *Ptr, 525 Loop *L) { 526 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 527 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 528 if (!AR) 529 return false; 530 531 return AR->isAffine(); 532 } 533 534 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 535 ScalarEvolution *SE, Loop *TheLoop, 536 const ValueToValueMap &StridesMap, 537 bool ShouldCheckStride) { 538 // Find pointers with computable bounds. We are going to use this information 539 // to place a runtime bound check. 540 bool CanDoRT = true; 541 542 bool NeedRTCheck = false; 543 if (!IsRTCheckAnalysisNeeded) return true; 544 545 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 546 547 // We assign a consecutive id to access from different alias sets. 548 // Accesses between different groups doesn't need to be checked. 549 unsigned ASId = 1; 550 for (auto &AS : AST) { 551 int NumReadPtrChecks = 0; 552 int NumWritePtrChecks = 0; 553 554 // We assign consecutive id to access from different dependence sets. 555 // Accesses within the same set don't need a runtime check. 556 unsigned RunningDepId = 1; 557 DenseMap<Value *, unsigned> DepSetId; 558 559 for (auto A : AS) { 560 Value *Ptr = A.getValue(); 561 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 562 MemAccessInfo Access(Ptr, IsWrite); 563 564 if (IsWrite) 565 ++NumWritePtrChecks; 566 else 567 ++NumReadPtrChecks; 568 569 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) && 570 // When we run after a failing dependency check we have to make sure 571 // we don't have wrapping pointers. 572 (!ShouldCheckStride || 573 isStridedPtr(PSE, Ptr, TheLoop, StridesMap) == 1)) { 574 // The id of the dependence set. 575 unsigned DepId; 576 577 if (IsDepCheckNeeded) { 578 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 579 unsigned &LeaderId = DepSetId[Leader]; 580 if (!LeaderId) 581 LeaderId = RunningDepId++; 582 DepId = LeaderId; 583 } else 584 // Each access has its own dependence set. 585 DepId = RunningDepId++; 586 587 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE); 588 589 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 590 } else { 591 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 592 CanDoRT = false; 593 } 594 } 595 596 // If we have at least two writes or one write and a read then we need to 597 // check them. But there is no need to checks if there is only one 598 // dependence set for this alias set. 599 // 600 // Note that this function computes CanDoRT and NeedRTCheck independently. 601 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 602 // for which we couldn't find the bounds but we don't actually need to emit 603 // any checks so it does not matter. 604 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 605 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 606 NumWritePtrChecks >= 1)); 607 608 ++ASId; 609 } 610 611 // If the pointers that we would use for the bounds comparison have different 612 // address spaces, assume the values aren't directly comparable, so we can't 613 // use them for the runtime check. We also have to assume they could 614 // overlap. In the future there should be metadata for whether address spaces 615 // are disjoint. 616 unsigned NumPointers = RtCheck.Pointers.size(); 617 for (unsigned i = 0; i < NumPointers; ++i) { 618 for (unsigned j = i + 1; j < NumPointers; ++j) { 619 // Only need to check pointers between two different dependency sets. 620 if (RtCheck.Pointers[i].DependencySetId == 621 RtCheck.Pointers[j].DependencySetId) 622 continue; 623 // Only need to check pointers in the same alias set. 624 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 625 continue; 626 627 Value *PtrI = RtCheck.Pointers[i].PointerValue; 628 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 629 630 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 631 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 632 if (ASi != ASj) { 633 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 634 " different address spaces\n"); 635 return false; 636 } 637 } 638 } 639 640 if (NeedRTCheck && CanDoRT) 641 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 642 643 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 644 << " pointer comparisons.\n"); 645 646 RtCheck.Need = NeedRTCheck; 647 648 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 649 if (!CanDoRTIfNeeded) 650 RtCheck.reset(); 651 return CanDoRTIfNeeded; 652 } 653 654 void AccessAnalysis::processMemAccesses() { 655 // We process the set twice: first we process read-write pointers, last we 656 // process read-only pointers. This allows us to skip dependence tests for 657 // read-only pointers. 658 659 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 660 DEBUG(dbgs() << " AST: "; AST.dump()); 661 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 662 DEBUG({ 663 for (auto A : Accesses) 664 dbgs() << "\t" << *A.getPointer() << " (" << 665 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 666 "read-only" : "read")) << ")\n"; 667 }); 668 669 // The AliasSetTracker has nicely partitioned our pointers by metadata 670 // compatibility and potential for underlying-object overlap. As a result, we 671 // only need to check for potential pointer dependencies within each alias 672 // set. 673 for (auto &AS : AST) { 674 // Note that both the alias-set tracker and the alias sets themselves used 675 // linked lists internally and so the iteration order here is deterministic 676 // (matching the original instruction order within each set). 677 678 bool SetHasWrite = false; 679 680 // Map of pointers to last access encountered. 681 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 682 UnderlyingObjToAccessMap ObjToLastAccess; 683 684 // Set of access to check after all writes have been processed. 685 PtrAccessSet DeferredAccesses; 686 687 // Iterate over each alias set twice, once to process read/write pointers, 688 // and then to process read-only pointers. 689 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 690 bool UseDeferred = SetIteration > 0; 691 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 692 693 for (auto AV : AS) { 694 Value *Ptr = AV.getValue(); 695 696 // For a single memory access in AliasSetTracker, Accesses may contain 697 // both read and write, and they both need to be handled for CheckDeps. 698 for (auto AC : S) { 699 if (AC.getPointer() != Ptr) 700 continue; 701 702 bool IsWrite = AC.getInt(); 703 704 // If we're using the deferred access set, then it contains only 705 // reads. 706 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 707 if (UseDeferred && !IsReadOnlyPtr) 708 continue; 709 // Otherwise, the pointer must be in the PtrAccessSet, either as a 710 // read or a write. 711 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 712 S.count(MemAccessInfo(Ptr, false))) && 713 "Alias-set pointer not in the access set?"); 714 715 MemAccessInfo Access(Ptr, IsWrite); 716 DepCands.insert(Access); 717 718 // Memorize read-only pointers for later processing and skip them in 719 // the first round (they need to be checked after we have seen all 720 // write pointers). Note: we also mark pointer that are not 721 // consecutive as "read-only" pointers (so that we check 722 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 723 if (!UseDeferred && IsReadOnlyPtr) { 724 DeferredAccesses.insert(Access); 725 continue; 726 } 727 728 // If this is a write - check other reads and writes for conflicts. If 729 // this is a read only check other writes for conflicts (but only if 730 // there is no other write to the ptr - this is an optimization to 731 // catch "a[i] = a[i] + " without having to do a dependence check). 732 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 733 CheckDeps.insert(Access); 734 IsRTCheckAnalysisNeeded = true; 735 } 736 737 if (IsWrite) 738 SetHasWrite = true; 739 740 // Create sets of pointers connected by a shared alias set and 741 // underlying object. 742 typedef SmallVector<Value *, 16> ValueVector; 743 ValueVector TempObjects; 744 745 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 746 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 747 for (Value *UnderlyingObj : TempObjects) { 748 // nullptr never alias, don't join sets for pointer that have "null" 749 // in their UnderlyingObjects list. 750 if (isa<ConstantPointerNull>(UnderlyingObj)) 751 continue; 752 753 UnderlyingObjToAccessMap::iterator Prev = 754 ObjToLastAccess.find(UnderlyingObj); 755 if (Prev != ObjToLastAccess.end()) 756 DepCands.unionSets(Access, Prev->second); 757 758 ObjToLastAccess[UnderlyingObj] = Access; 759 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 760 } 761 } 762 } 763 } 764 } 765 } 766 767 static bool isInBoundsGep(Value *Ptr) { 768 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 769 return GEP->isInBounds(); 770 return false; 771 } 772 773 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 774 /// i.e. monotonically increasing/decreasing. 775 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 776 ScalarEvolution *SE, const Loop *L) { 777 // FIXME: This should probably only return true for NUW. 778 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 779 return true; 780 781 // Scalar evolution does not propagate the non-wrapping flags to values that 782 // are derived from a non-wrapping induction variable because non-wrapping 783 // could be flow-sensitive. 784 // 785 // Look through the potentially overflowing instruction to try to prove 786 // non-wrapping for the *specific* value of Ptr. 787 788 // The arithmetic implied by an inbounds GEP can't overflow. 789 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 790 if (!GEP || !GEP->isInBounds()) 791 return false; 792 793 // Make sure there is only one non-const index and analyze that. 794 Value *NonConstIndex = nullptr; 795 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 796 if (!isa<ConstantInt>(*Index)) { 797 if (NonConstIndex) 798 return false; 799 NonConstIndex = *Index; 800 } 801 if (!NonConstIndex) 802 // The recurrence is on the pointer, ignore for now. 803 return false; 804 805 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 806 // AddRec using a NSW operation. 807 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 808 if (OBO->hasNoSignedWrap() && 809 // Assume constant for other the operand so that the AddRec can be 810 // easily found. 811 isa<ConstantInt>(OBO->getOperand(1))) { 812 auto *OpScev = SE->getSCEV(OBO->getOperand(0)); 813 814 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 815 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 816 } 817 818 return false; 819 } 820 821 /// \brief Check whether the access through \p Ptr has a constant stride. 822 int llvm::isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr, 823 const Loop *Lp, const ValueToValueMap &StridesMap) { 824 Type *Ty = Ptr->getType(); 825 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 826 827 // Make sure that the pointer does not point to aggregate types. 828 auto *PtrTy = cast<PointerType>(Ty); 829 if (PtrTy->getElementType()->isAggregateType()) { 830 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 831 << *Ptr << "\n"); 832 return 0; 833 } 834 835 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); 836 837 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 838 if (!AR) { 839 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " 840 << *Ptr << " SCEV: " << *PtrScev << "\n"); 841 return 0; 842 } 843 844 // The accesss function must stride over the innermost loop. 845 if (Lp != AR->getLoop()) { 846 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 847 *Ptr << " SCEV: " << *PtrScev << "\n"); 848 return 0; 849 } 850 851 // The address calculation must not wrap. Otherwise, a dependence could be 852 // inverted. 853 // An inbounds getelementptr that is a AddRec with a unit stride 854 // cannot wrap per definition. The unit stride requirement is checked later. 855 // An getelementptr without an inbounds attribute and unit stride would have 856 // to access the pointer value "0" which is undefined behavior in address 857 // space 0, therefore we can also vectorize this case. 858 bool IsInBoundsGEP = isInBoundsGep(Ptr); 859 bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, PSE.getSE(), Lp); 860 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 861 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 862 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 863 << *Ptr << " SCEV: " << *PtrScev << "\n"); 864 return 0; 865 } 866 867 // Check the step is constant. 868 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 869 870 // Calculate the pointer stride and check if it is constant. 871 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 872 if (!C) { 873 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 874 " SCEV: " << *PtrScev << "\n"); 875 return 0; 876 } 877 878 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 879 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 880 const APInt &APStepVal = C->getAPInt(); 881 882 // Huge step value - give up. 883 if (APStepVal.getBitWidth() > 64) 884 return 0; 885 886 int64_t StepVal = APStepVal.getSExtValue(); 887 888 // Strided access. 889 int64_t Stride = StepVal / Size; 890 int64_t Rem = StepVal % Size; 891 if (Rem) 892 return 0; 893 894 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 895 // know we can't "wrap around the address space". In case of address space 896 // zero we know that this won't happen without triggering undefined behavior. 897 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 898 Stride != 1 && Stride != -1) 899 return 0; 900 901 return Stride; 902 } 903 904 /// Take the pointer operand from the Load/Store instruction. 905 /// Returns NULL if this is not a valid Load/Store instruction. 906 static Value *getPointerOperand(Value *I) { 907 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 908 return LI->getPointerOperand(); 909 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 910 return SI->getPointerOperand(); 911 return nullptr; 912 } 913 914 /// Take the address space operand from the Load/Store instruction. 915 /// Returns -1 if this is not a valid Load/Store instruction. 916 static unsigned getAddressSpaceOperand(Value *I) { 917 if (LoadInst *L = dyn_cast<LoadInst>(I)) 918 return L->getPointerAddressSpace(); 919 if (StoreInst *S = dyn_cast<StoreInst>(I)) 920 return S->getPointerAddressSpace(); 921 return -1; 922 } 923 924 /// Returns true if the memory operations \p A and \p B are consecutive. 925 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, 926 ScalarEvolution &SE, bool CheckType) { 927 Value *PtrA = getPointerOperand(A); 928 Value *PtrB = getPointerOperand(B); 929 unsigned ASA = getAddressSpaceOperand(A); 930 unsigned ASB = getAddressSpaceOperand(B); 931 932 // Check that the address spaces match and that the pointers are valid. 933 if (!PtrA || !PtrB || (ASA != ASB)) 934 return false; 935 936 // Make sure that A and B are different pointers. 937 if (PtrA == PtrB) 938 return false; 939 940 // Make sure that A and B have the same type if required. 941 if(CheckType && PtrA->getType() != PtrB->getType()) 942 return false; 943 944 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 945 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 946 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 947 948 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 949 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 950 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 951 952 // OffsetDelta = OffsetB - OffsetA; 953 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); 954 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); 955 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); 956 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV); 957 const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); 958 // Check if they are based on the same pointer. That makes the offsets 959 // sufficient. 960 if (PtrA == PtrB) 961 return OffsetDelta == Size; 962 963 // Compute the necessary base pointer delta to have the necessary final delta 964 // equal to the size. 965 // BaseDelta = Size - OffsetDelta; 966 const SCEV *SizeSCEV = SE.getConstant(Size); 967 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV); 968 969 // Otherwise compute the distance with SCEV between the base pointers. 970 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 971 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 972 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta); 973 return X == PtrSCEVB; 974 } 975 976 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 977 switch (Type) { 978 case NoDep: 979 case Forward: 980 case BackwardVectorizable: 981 return true; 982 983 case Unknown: 984 case ForwardButPreventsForwarding: 985 case Backward: 986 case BackwardVectorizableButPreventsForwarding: 987 return false; 988 } 989 llvm_unreachable("unexpected DepType!"); 990 } 991 992 bool MemoryDepChecker::Dependence::isBackward() const { 993 switch (Type) { 994 case NoDep: 995 case Forward: 996 case ForwardButPreventsForwarding: 997 case Unknown: 998 return false; 999 1000 case BackwardVectorizable: 1001 case Backward: 1002 case BackwardVectorizableButPreventsForwarding: 1003 return true; 1004 } 1005 llvm_unreachable("unexpected DepType!"); 1006 } 1007 1008 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 1009 return isBackward() || Type == Unknown; 1010 } 1011 1012 bool MemoryDepChecker::Dependence::isForward() const { 1013 switch (Type) { 1014 case Forward: 1015 case ForwardButPreventsForwarding: 1016 return true; 1017 1018 case NoDep: 1019 case Unknown: 1020 case BackwardVectorizable: 1021 case Backward: 1022 case BackwardVectorizableButPreventsForwarding: 1023 return false; 1024 } 1025 llvm_unreachable("unexpected DepType!"); 1026 } 1027 1028 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 1029 unsigned TypeByteSize) { 1030 // If loads occur at a distance that is not a multiple of a feasible vector 1031 // factor store-load forwarding does not take place. 1032 // Positive dependences might cause troubles because vectorizing them might 1033 // prevent store-load forwarding making vectorized code run a lot slower. 1034 // a[i] = a[i-3] ^ a[i-8]; 1035 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 1036 // hence on your typical architecture store-load forwarding does not take 1037 // place. Vectorizing in such cases does not make sense. 1038 // Store-load forwarding distance. 1039 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize; 1040 // Maximum vector factor. 1041 unsigned MaxVFWithoutSLForwardIssues = 1042 VectorizerParams::MaxVectorWidth * TypeByteSize; 1043 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues) 1044 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes; 1045 1046 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues; 1047 vf *= 2) { 1048 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) { 1049 MaxVFWithoutSLForwardIssues = (vf >>=1); 1050 break; 1051 } 1052 } 1053 1054 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) { 1055 DEBUG(dbgs() << "LAA: Distance " << Distance << 1056 " that could cause a store-load forwarding conflict\n"); 1057 return true; 1058 } 1059 1060 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 1061 MaxVFWithoutSLForwardIssues != 1062 VectorizerParams::MaxVectorWidth * TypeByteSize) 1063 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 1064 return false; 1065 } 1066 1067 /// \brief Check the dependence for two accesses with the same stride \p Stride. 1068 /// \p Distance is the positive distance and \p TypeByteSize is type size in 1069 /// bytes. 1070 /// 1071 /// \returns true if they are independent. 1072 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 1073 unsigned TypeByteSize) { 1074 assert(Stride > 1 && "The stride must be greater than 1"); 1075 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1076 assert(Distance > 0 && "The distance must be non-zero"); 1077 1078 // Skip if the distance is not multiple of type byte size. 1079 if (Distance % TypeByteSize) 1080 return false; 1081 1082 unsigned ScaledDist = Distance / TypeByteSize; 1083 1084 // No dependence if the scaled distance is not multiple of the stride. 1085 // E.g. 1086 // for (i = 0; i < 1024 ; i += 4) 1087 // A[i+2] = A[i] + 1; 1088 // 1089 // Two accesses in memory (scaled distance is 2, stride is 4): 1090 // | A[0] | | | | A[4] | | | | 1091 // | | | A[2] | | | | A[6] | | 1092 // 1093 // E.g. 1094 // for (i = 0; i < 1024 ; i += 3) 1095 // A[i+4] = A[i] + 1; 1096 // 1097 // Two accesses in memory (scaled distance is 4, stride is 3): 1098 // | A[0] | | | A[3] | | | A[6] | | | 1099 // | | | | | A[4] | | | A[7] | | 1100 return ScaledDist % Stride; 1101 } 1102 1103 MemoryDepChecker::Dependence::DepType 1104 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1105 const MemAccessInfo &B, unsigned BIdx, 1106 const ValueToValueMap &Strides) { 1107 assert (AIdx < BIdx && "Must pass arguments in program order"); 1108 1109 Value *APtr = A.getPointer(); 1110 Value *BPtr = B.getPointer(); 1111 bool AIsWrite = A.getInt(); 1112 bool BIsWrite = B.getInt(); 1113 1114 // Two reads are independent. 1115 if (!AIsWrite && !BIsWrite) 1116 return Dependence::NoDep; 1117 1118 // We cannot check pointers in different address spaces. 1119 if (APtr->getType()->getPointerAddressSpace() != 1120 BPtr->getType()->getPointerAddressSpace()) 1121 return Dependence::Unknown; 1122 1123 const SCEV *AScev = replaceSymbolicStrideSCEV(PSE, Strides, APtr); 1124 const SCEV *BScev = replaceSymbolicStrideSCEV(PSE, Strides, BPtr); 1125 1126 int StrideAPtr = isStridedPtr(PSE, APtr, InnermostLoop, Strides); 1127 int StrideBPtr = isStridedPtr(PSE, BPtr, InnermostLoop, Strides); 1128 1129 const SCEV *Src = AScev; 1130 const SCEV *Sink = BScev; 1131 1132 // If the induction step is negative we have to invert source and sink of the 1133 // dependence. 1134 if (StrideAPtr < 0) { 1135 //Src = BScev; 1136 //Sink = AScev; 1137 std::swap(APtr, BPtr); 1138 std::swap(Src, Sink); 1139 std::swap(AIsWrite, BIsWrite); 1140 std::swap(AIdx, BIdx); 1141 std::swap(StrideAPtr, StrideBPtr); 1142 } 1143 1144 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src); 1145 1146 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1147 << "(Induction step: " << StrideAPtr << ")\n"); 1148 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1149 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1150 1151 // Need accesses with constant stride. We don't want to vectorize 1152 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1153 // the address space. 1154 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1155 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1156 return Dependence::Unknown; 1157 } 1158 1159 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1160 if (!C) { 1161 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1162 ShouldRetryWithRuntimeCheck = true; 1163 return Dependence::Unknown; 1164 } 1165 1166 Type *ATy = APtr->getType()->getPointerElementType(); 1167 Type *BTy = BPtr->getType()->getPointerElementType(); 1168 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1169 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1170 1171 // Negative distances are not plausible dependencies. 1172 const APInt &Val = C->getAPInt(); 1173 if (Val.isNegative()) { 1174 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1175 if (IsTrueDataDependence && 1176 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1177 ATy != BTy)) 1178 return Dependence::ForwardButPreventsForwarding; 1179 1180 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n"); 1181 return Dependence::Forward; 1182 } 1183 1184 // Write to the same location with the same size. 1185 // Could be improved to assert type sizes are the same (i32 == float, etc). 1186 if (Val == 0) { 1187 if (ATy == BTy) 1188 return Dependence::Forward; 1189 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1190 return Dependence::Unknown; 1191 } 1192 1193 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1194 1195 if (ATy != BTy) { 1196 DEBUG(dbgs() << 1197 "LAA: ReadWrite-Write positive dependency with different types\n"); 1198 return Dependence::Unknown; 1199 } 1200 1201 unsigned Distance = (unsigned) Val.getZExtValue(); 1202 1203 unsigned Stride = std::abs(StrideAPtr); 1204 if (Stride > 1 && 1205 areStridedAccessesIndependent(Distance, Stride, TypeByteSize)) { 1206 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1207 return Dependence::NoDep; 1208 } 1209 1210 // Bail out early if passed-in parameters make vectorization not feasible. 1211 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1212 VectorizerParams::VectorizationFactor : 1); 1213 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1214 VectorizerParams::VectorizationInterleave : 1); 1215 // The minimum number of iterations for a vectorized/unrolled version. 1216 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1217 1218 // It's not vectorizable if the distance is smaller than the minimum distance 1219 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1220 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1221 // TypeByteSize (No need to plus the last gap distance). 1222 // 1223 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1224 // foo(int *A) { 1225 // int *B = (int *)((char *)A + 14); 1226 // for (i = 0 ; i < 1024 ; i += 2) 1227 // B[i] = A[i] + 1; 1228 // } 1229 // 1230 // Two accesses in memory (stride is 2): 1231 // | A[0] | | A[2] | | A[4] | | A[6] | | 1232 // | B[0] | | B[2] | | B[4] | 1233 // 1234 // Distance needs for vectorizing iterations except the last iteration: 1235 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1236 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1237 // 1238 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1239 // 12, which is less than distance. 1240 // 1241 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1242 // the minimum distance needed is 28, which is greater than distance. It is 1243 // not safe to do vectorization. 1244 unsigned MinDistanceNeeded = 1245 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1246 if (MinDistanceNeeded > Distance) { 1247 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1248 << '\n'); 1249 return Dependence::Backward; 1250 } 1251 1252 // Unsafe if the minimum distance needed is greater than max safe distance. 1253 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1254 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1255 << MinDistanceNeeded << " size in bytes"); 1256 return Dependence::Backward; 1257 } 1258 1259 // Positive distance bigger than max vectorization factor. 1260 // FIXME: Should use max factor instead of max distance in bytes, which could 1261 // not handle different types. 1262 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1263 // void foo (int *A, char *B) { 1264 // for (unsigned i = 0; i < 1024; i++) { 1265 // A[i+2] = A[i] + 1; 1266 // B[i+2] = B[i] + 1; 1267 // } 1268 // } 1269 // 1270 // This case is currently unsafe according to the max safe distance. If we 1271 // analyze the two accesses on array B, the max safe dependence distance 1272 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1273 // is 8, which is less than 2 and forbidden vectorization, But actually 1274 // both A and B could be vectorized by 2 iterations. 1275 MaxSafeDepDistBytes = 1276 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1277 1278 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1279 if (IsTrueDataDependence && 1280 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1281 return Dependence::BackwardVectorizableButPreventsForwarding; 1282 1283 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1284 << " with max VF = " 1285 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1286 1287 return Dependence::BackwardVectorizable; 1288 } 1289 1290 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1291 MemAccessInfoSet &CheckDeps, 1292 const ValueToValueMap &Strides) { 1293 1294 MaxSafeDepDistBytes = -1U; 1295 while (!CheckDeps.empty()) { 1296 MemAccessInfo CurAccess = *CheckDeps.begin(); 1297 1298 // Get the relevant memory access set. 1299 EquivalenceClasses<MemAccessInfo>::iterator I = 1300 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1301 1302 // Check accesses within this set. 1303 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE; 1304 AI = AccessSets.member_begin(I), AE = AccessSets.member_end(); 1305 1306 // Check every access pair. 1307 while (AI != AE) { 1308 CheckDeps.erase(*AI); 1309 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1310 while (OI != AE) { 1311 // Check every accessing instruction pair in program order. 1312 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1313 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1314 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1315 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1316 auto A = std::make_pair(&*AI, *I1); 1317 auto B = std::make_pair(&*OI, *I2); 1318 1319 assert(*I1 != *I2); 1320 if (*I1 > *I2) 1321 std::swap(A, B); 1322 1323 Dependence::DepType Type = 1324 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1325 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1326 1327 // Gather dependences unless we accumulated MaxDependences 1328 // dependences. In that case return as soon as we find the first 1329 // unsafe dependence. This puts a limit on this quadratic 1330 // algorithm. 1331 if (RecordDependences) { 1332 if (Type != Dependence::NoDep) 1333 Dependences.push_back(Dependence(A.second, B.second, Type)); 1334 1335 if (Dependences.size() >= MaxDependences) { 1336 RecordDependences = false; 1337 Dependences.clear(); 1338 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1339 } 1340 } 1341 if (!RecordDependences && !SafeForVectorization) 1342 return false; 1343 } 1344 ++OI; 1345 } 1346 AI++; 1347 } 1348 } 1349 1350 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1351 return SafeForVectorization; 1352 } 1353 1354 SmallVector<Instruction *, 4> 1355 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1356 MemAccessInfo Access(Ptr, isWrite); 1357 auto &IndexVector = Accesses.find(Access)->second; 1358 1359 SmallVector<Instruction *, 4> Insts; 1360 std::transform(IndexVector.begin(), IndexVector.end(), 1361 std::back_inserter(Insts), 1362 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1363 return Insts; 1364 } 1365 1366 const char *MemoryDepChecker::Dependence::DepName[] = { 1367 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1368 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1369 1370 void MemoryDepChecker::Dependence::print( 1371 raw_ostream &OS, unsigned Depth, 1372 const SmallVectorImpl<Instruction *> &Instrs) const { 1373 OS.indent(Depth) << DepName[Type] << ":\n"; 1374 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1375 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1376 } 1377 1378 bool LoopAccessInfo::canAnalyzeLoop() { 1379 // We need to have a loop header. 1380 DEBUG(dbgs() << "LAA: Found a loop in " 1381 << TheLoop->getHeader()->getParent()->getName() << ": " 1382 << TheLoop->getHeader()->getName() << '\n'); 1383 1384 // We can only analyze innermost loops. 1385 if (!TheLoop->empty()) { 1386 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1387 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1388 return false; 1389 } 1390 1391 // We must have a single backedge. 1392 if (TheLoop->getNumBackEdges() != 1) { 1393 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1394 emitAnalysis( 1395 LoopAccessReport() << 1396 "loop control flow is not understood by analyzer"); 1397 return false; 1398 } 1399 1400 // We must have a single exiting block. 1401 if (!TheLoop->getExitingBlock()) { 1402 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1403 emitAnalysis( 1404 LoopAccessReport() << 1405 "loop control flow is not understood by analyzer"); 1406 return false; 1407 } 1408 1409 // We only handle bottom-tested loops, i.e. loop in which the condition is 1410 // checked at the end of each iteration. With that we can assume that all 1411 // instructions in the loop are executed the same number of times. 1412 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1413 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1414 emitAnalysis( 1415 LoopAccessReport() << 1416 "loop control flow is not understood by analyzer"); 1417 return false; 1418 } 1419 1420 // ScalarEvolution needs to be able to find the exit count. 1421 const SCEV *ExitCount = PSE.getSE()->getBackedgeTakenCount(TheLoop); 1422 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 1423 emitAnalysis(LoopAccessReport() 1424 << "could not determine number of loop iterations"); 1425 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1426 return false; 1427 } 1428 1429 return true; 1430 } 1431 1432 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { 1433 1434 typedef SmallVector<Value*, 16> ValueVector; 1435 typedef SmallPtrSet<Value*, 16> ValueSet; 1436 1437 // Holds the Load and Store *instructions*. 1438 ValueVector Loads; 1439 ValueVector Stores; 1440 1441 // Holds all the different accesses in the loop. 1442 unsigned NumReads = 0; 1443 unsigned NumReadWrites = 0; 1444 1445 PtrRtChecking.Pointers.clear(); 1446 PtrRtChecking.Need = false; 1447 1448 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1449 1450 // For each block. 1451 for (Loop::block_iterator bb = TheLoop->block_begin(), 1452 be = TheLoop->block_end(); bb != be; ++bb) { 1453 1454 // Scan the BB and collect legal loads and stores. 1455 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1456 ++it) { 1457 1458 // If this is a load, save it. If this instruction can read from memory 1459 // but is not a load, then we quit. Notice that we don't handle function 1460 // calls that read or write. 1461 if (it->mayReadFromMemory()) { 1462 // Many math library functions read the rounding mode. We will only 1463 // vectorize a loop if it contains known function calls that don't set 1464 // the flag. Therefore, it is safe to ignore this read from memory. 1465 CallInst *Call = dyn_cast<CallInst>(it); 1466 if (Call && getIntrinsicIDForCall(Call, TLI)) 1467 continue; 1468 1469 // If the function has an explicit vectorized counterpart, we can safely 1470 // assume that it can be vectorized. 1471 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1472 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1473 continue; 1474 1475 LoadInst *Ld = dyn_cast<LoadInst>(it); 1476 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1477 emitAnalysis(LoopAccessReport(Ld) 1478 << "read with atomic ordering or volatile read"); 1479 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1480 CanVecMem = false; 1481 return; 1482 } 1483 NumLoads++; 1484 Loads.push_back(Ld); 1485 DepChecker.addAccess(Ld); 1486 continue; 1487 } 1488 1489 // Save 'store' instructions. Abort if other instructions write to memory. 1490 if (it->mayWriteToMemory()) { 1491 StoreInst *St = dyn_cast<StoreInst>(it); 1492 if (!St) { 1493 emitAnalysis(LoopAccessReport(&*it) << 1494 "instruction cannot be vectorized"); 1495 CanVecMem = false; 1496 return; 1497 } 1498 if (!St->isSimple() && !IsAnnotatedParallel) { 1499 emitAnalysis(LoopAccessReport(St) 1500 << "write with atomic ordering or volatile write"); 1501 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1502 CanVecMem = false; 1503 return; 1504 } 1505 NumStores++; 1506 Stores.push_back(St); 1507 DepChecker.addAccess(St); 1508 } 1509 } // Next instr. 1510 } // Next block. 1511 1512 // Now we have two lists that hold the loads and the stores. 1513 // Next, we find the pointers that they use. 1514 1515 // Check if we see any stores. If there are no stores, then we don't 1516 // care if the pointers are *restrict*. 1517 if (!Stores.size()) { 1518 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1519 CanVecMem = true; 1520 return; 1521 } 1522 1523 MemoryDepChecker::DepCandidates DependentAccesses; 1524 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1525 AA, LI, DependentAccesses, PSE); 1526 1527 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1528 // multiple times on the same object. If the ptr is accessed twice, once 1529 // for read and once for write, it will only appear once (on the write 1530 // list). This is okay, since we are going to check for conflicts between 1531 // writes and between reads and writes, but not between reads and reads. 1532 ValueSet Seen; 1533 1534 ValueVector::iterator I, IE; 1535 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { 1536 StoreInst *ST = cast<StoreInst>(*I); 1537 Value* Ptr = ST->getPointerOperand(); 1538 // Check for store to loop invariant address. 1539 StoreToLoopInvariantAddress |= isUniform(Ptr); 1540 // If we did *not* see this pointer before, insert it to the read-write 1541 // list. At this phase it is only a 'write' list. 1542 if (Seen.insert(Ptr).second) { 1543 ++NumReadWrites; 1544 1545 MemoryLocation Loc = MemoryLocation::get(ST); 1546 // The TBAA metadata could have a control dependency on the predication 1547 // condition, so we cannot rely on it when determining whether or not we 1548 // need runtime pointer checks. 1549 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1550 Loc.AATags.TBAA = nullptr; 1551 1552 Accesses.addStore(Loc); 1553 } 1554 } 1555 1556 if (IsAnnotatedParallel) { 1557 DEBUG(dbgs() 1558 << "LAA: A loop annotated parallel, ignore memory dependency " 1559 << "checks.\n"); 1560 CanVecMem = true; 1561 return; 1562 } 1563 1564 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { 1565 LoadInst *LD = cast<LoadInst>(*I); 1566 Value* Ptr = LD->getPointerOperand(); 1567 // If we did *not* see this pointer before, insert it to the 1568 // read list. If we *did* see it before, then it is already in 1569 // the read-write list. This allows us to vectorize expressions 1570 // such as A[i] += x; Because the address of A[i] is a read-write 1571 // pointer. This only works if the index of A[i] is consecutive. 1572 // If the address of i is unknown (for example A[B[i]]) then we may 1573 // read a few words, modify, and write a few words, and some of the 1574 // words may be written to the same address. 1575 bool IsReadOnlyPtr = false; 1576 if (Seen.insert(Ptr).second || !isStridedPtr(PSE, Ptr, TheLoop, Strides)) { 1577 ++NumReads; 1578 IsReadOnlyPtr = true; 1579 } 1580 1581 MemoryLocation Loc = MemoryLocation::get(LD); 1582 // The TBAA metadata could have a control dependency on the predication 1583 // condition, so we cannot rely on it when determining whether or not we 1584 // need runtime pointer checks. 1585 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1586 Loc.AATags.TBAA = nullptr; 1587 1588 Accesses.addLoad(Loc, IsReadOnlyPtr); 1589 } 1590 1591 // If we write (or read-write) to a single destination and there are no 1592 // other reads in this loop then is it safe to vectorize. 1593 if (NumReadWrites == 1 && NumReads == 0) { 1594 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1595 CanVecMem = true; 1596 return; 1597 } 1598 1599 // Build dependence sets and check whether we need a runtime pointer bounds 1600 // check. 1601 Accesses.buildDependenceSets(); 1602 1603 // Find pointers with computable bounds. We are going to use this information 1604 // to place a runtime bound check. 1605 bool CanDoRTIfNeeded = 1606 Accesses.canCheckPtrAtRT(PtrRtChecking, PSE.getSE(), TheLoop, Strides); 1607 if (!CanDoRTIfNeeded) { 1608 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1609 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1610 << "the array bounds.\n"); 1611 CanVecMem = false; 1612 return; 1613 } 1614 1615 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1616 1617 CanVecMem = true; 1618 if (Accesses.isDependencyCheckNeeded()) { 1619 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1620 CanVecMem = DepChecker.areDepsSafe( 1621 DependentAccesses, Accesses.getDependenciesToCheck(), Strides); 1622 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1623 1624 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1625 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1626 1627 // Clear the dependency checks. We assume they are not needed. 1628 Accesses.resetDepChecks(DepChecker); 1629 1630 PtrRtChecking.reset(); 1631 PtrRtChecking.Need = true; 1632 1633 auto *SE = PSE.getSE(); 1634 CanDoRTIfNeeded = 1635 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); 1636 1637 // Check that we found the bounds for the pointer. 1638 if (!CanDoRTIfNeeded) { 1639 emitAnalysis(LoopAccessReport() 1640 << "cannot check memory dependencies at runtime"); 1641 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1642 CanVecMem = false; 1643 return; 1644 } 1645 1646 CanVecMem = true; 1647 } 1648 } 1649 1650 if (CanVecMem) 1651 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1652 << (PtrRtChecking.Need ? "" : " don't") 1653 << " need runtime memory checks.\n"); 1654 else { 1655 emitAnalysis(LoopAccessReport() << 1656 "unsafe dependent memory operations in loop"); 1657 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1658 } 1659 } 1660 1661 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1662 DominatorTree *DT) { 1663 assert(TheLoop->contains(BB) && "Unknown block used"); 1664 1665 // Blocks that do not dominate the latch need predication. 1666 BasicBlock* Latch = TheLoop->getLoopLatch(); 1667 return !DT->dominates(BB, Latch); 1668 } 1669 1670 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1671 assert(!Report && "Multiple reports generated"); 1672 Report = Message; 1673 } 1674 1675 bool LoopAccessInfo::isUniform(Value *V) const { 1676 return (PSE.getSE()->isLoopInvariant(PSE.getSE()->getSCEV(V), TheLoop)); 1677 } 1678 1679 // FIXME: this function is currently a duplicate of the one in 1680 // LoopVectorize.cpp. 1681 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1682 Instruction *Loc) { 1683 if (FirstInst) 1684 return FirstInst; 1685 if (Instruction *I = dyn_cast<Instruction>(V)) 1686 return I->getParent() == Loc->getParent() ? I : nullptr; 1687 return nullptr; 1688 } 1689 1690 namespace { 1691 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1692 /// need to use value-handles because SCEV expansion can invalidate previously 1693 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1694 /// a previous one. 1695 struct PointerBounds { 1696 TrackingVH<Value> Start; 1697 TrackingVH<Value> End; 1698 }; 1699 } // end anonymous namespace 1700 1701 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1702 /// in \p TheLoop. \return the values for the bounds. 1703 static PointerBounds 1704 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1705 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1706 const RuntimePointerChecking &PtrRtChecking) { 1707 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1708 const SCEV *Sc = SE->getSCEV(Ptr); 1709 1710 if (SE->isLoopInvariant(Sc, TheLoop)) { 1711 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1712 << "\n"); 1713 return {Ptr, Ptr}; 1714 } else { 1715 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1716 LLVMContext &Ctx = Loc->getContext(); 1717 1718 // Use this type for pointer arithmetic. 1719 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1720 Value *Start = nullptr, *End = nullptr; 1721 1722 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1723 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1724 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1725 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1726 return {Start, End}; 1727 } 1728 } 1729 1730 /// \brief Turns a collection of checks into a collection of expanded upper and 1731 /// lower bounds for both pointers in the check. 1732 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1733 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1734 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1735 const RuntimePointerChecking &PtrRtChecking) { 1736 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1737 1738 // Here we're relying on the SCEV Expander's cache to only emit code for the 1739 // same bounds once. 1740 std::transform( 1741 PointerChecks.begin(), PointerChecks.end(), 1742 std::back_inserter(ChecksWithBounds), 1743 [&](const RuntimePointerChecking::PointerCheck &Check) { 1744 PointerBounds 1745 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1746 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1747 return std::make_pair(First, Second); 1748 }); 1749 1750 return ChecksWithBounds; 1751 } 1752 1753 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1754 Instruction *Loc, 1755 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1756 const { 1757 auto *SE = PSE.getSE(); 1758 SCEVExpander Exp(*SE, DL, "induction"); 1759 auto ExpandedChecks = 1760 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, PtrRtChecking); 1761 1762 LLVMContext &Ctx = Loc->getContext(); 1763 Instruction *FirstInst = nullptr; 1764 IRBuilder<> ChkBuilder(Loc); 1765 // Our instructions might fold to a constant. 1766 Value *MemoryRuntimeCheck = nullptr; 1767 1768 for (const auto &Check : ExpandedChecks) { 1769 const PointerBounds &A = Check.first, &B = Check.second; 1770 // Check if two pointers (A and B) conflict where conflict is computed as: 1771 // start(A) <= end(B) && start(B) <= end(A) 1772 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1773 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1774 1775 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1776 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1777 "Trying to bounds check pointers with different address spaces"); 1778 1779 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1780 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1781 1782 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1783 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1784 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1785 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1786 1787 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1788 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1789 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1790 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1791 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1792 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1793 if (MemoryRuntimeCheck) { 1794 IsConflict = 1795 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1796 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1797 } 1798 MemoryRuntimeCheck = IsConflict; 1799 } 1800 1801 if (!MemoryRuntimeCheck) 1802 return std::make_pair(nullptr, nullptr); 1803 1804 // We have to do this trickery because the IRBuilder might fold the check to a 1805 // constant expression in which case there is no Instruction anchored in a 1806 // the block. 1807 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1808 ConstantInt::getTrue(Ctx)); 1809 ChkBuilder.Insert(Check, "memcheck.conflict"); 1810 FirstInst = getFirstInst(FirstInst, Check, Loc); 1811 return std::make_pair(FirstInst, Check); 1812 } 1813 1814 std::pair<Instruction *, Instruction *> 1815 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 1816 if (!PtrRtChecking.Need) 1817 return std::make_pair(nullptr, nullptr); 1818 1819 return addRuntimeChecks(Loc, PtrRtChecking.getChecks()); 1820 } 1821 1822 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1823 const DataLayout &DL, 1824 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1825 DominatorTree *DT, LoopInfo *LI, 1826 const ValueToValueMap &Strides) 1827 : PSE(*SE), PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL), 1828 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1829 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1830 StoreToLoopInvariantAddress(false) { 1831 if (canAnalyzeLoop()) 1832 analyzeLoop(Strides); 1833 } 1834 1835 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1836 if (CanVecMem) { 1837 if (PtrRtChecking.Need) 1838 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; 1839 else 1840 OS.indent(Depth) << "Memory dependences are safe\n"; 1841 } 1842 1843 if (Report) 1844 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1845 1846 if (auto *Dependences = DepChecker.getDependences()) { 1847 OS.indent(Depth) << "Dependences:\n"; 1848 for (auto &Dep : *Dependences) { 1849 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1850 OS << "\n"; 1851 } 1852 } else 1853 OS.indent(Depth) << "Too many dependences, not recorded\n"; 1854 1855 // List the pair of accesses need run-time checks to prove independence. 1856 PtrRtChecking.print(OS, Depth); 1857 OS << "\n"; 1858 1859 OS.indent(Depth) << "Store to invariant address was " 1860 << (StoreToLoopInvariantAddress ? "" : "not ") 1861 << "found in loop.\n"; 1862 1863 OS.indent(Depth) << "SCEV assumptions:\n"; 1864 PSE.getUnionPredicate().print(OS, Depth); 1865 } 1866 1867 const LoopAccessInfo & 1868 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { 1869 auto &LAI = LoopAccessInfoMap[L]; 1870 1871 #ifndef NDEBUG 1872 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && 1873 "Symbolic strides changed for loop"); 1874 #endif 1875 1876 if (!LAI) { 1877 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1878 LAI = 1879 llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, Strides); 1880 #ifndef NDEBUG 1881 LAI->NumSymbolicStrides = Strides.size(); 1882 #endif 1883 } 1884 return *LAI.get(); 1885 } 1886 1887 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1888 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1889 1890 ValueToValueMap NoSymbolicStrides; 1891 1892 for (Loop *TopLevelLoop : *LI) 1893 for (Loop *L : depth_first(TopLevelLoop)) { 1894 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1895 auto &LAI = LAA.getInfo(L, NoSymbolicStrides); 1896 LAI.print(OS, 4); 1897 } 1898 } 1899 1900 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1901 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1902 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1903 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1904 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1905 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1906 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1907 1908 return false; 1909 } 1910 1911 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1912 AU.addRequired<ScalarEvolutionWrapperPass>(); 1913 AU.addRequired<AAResultsWrapperPass>(); 1914 AU.addRequired<DominatorTreeWrapperPass>(); 1915 AU.addRequired<LoopInfoWrapperPass>(); 1916 1917 AU.setPreservesAll(); 1918 } 1919 1920 char LoopAccessAnalysis::ID = 0; 1921 static const char laa_name[] = "Loop Access Analysis"; 1922 #define LAA_NAME "loop-accesses" 1923 1924 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1925 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1926 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 1927 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1928 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 1929 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1930 1931 namespace llvm { 1932 Pass *createLAAPass() { 1933 return new LoopAccessAnalysis(); 1934 } 1935 } 1936