1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect dependences up to this threshold. 62 static cl::opt<unsigned> 63 MaxDependences("max-dependences", cl::Hidden, 64 cl::desc("Maximum number of dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 bool VectorizerParams::isInterleaveForced() { 69 return ::VectorizationInterleave.getNumOccurrences() > 0; 70 } 71 72 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 73 const Function *TheFunction, 74 const Loop *TheLoop, 75 const char *PassName) { 76 DebugLoc DL = TheLoop->getStartLoc(); 77 if (const Instruction *I = Message.getInstr()) 78 DL = I->getDebugLoc(); 79 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 80 *TheFunction, DL, Message.str()); 81 } 82 83 Value *llvm::stripIntegerCast(Value *V) { 84 if (CastInst *CI = dyn_cast<CastInst>(V)) 85 if (CI->getOperand(0)->getType()->isIntegerTy()) 86 return CI->getOperand(0); 87 return V; 88 } 89 90 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, 91 const ValueToValueMap &PtrToStride, 92 SCEVUnionPredicate &Preds, 93 Value *Ptr, Value *OrigPtr) { 94 const SCEV *OrigSCEV = SE->getSCEV(Ptr); 95 96 // If there is an entry in the map return the SCEV of the pointer with the 97 // symbolic stride replaced by one. 98 ValueToValueMap::const_iterator SI = 99 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 100 if (SI != PtrToStride.end()) { 101 Value *StrideVal = SI->second; 102 103 // Strip casts. 104 StrideVal = stripIntegerCast(StrideVal); 105 106 // Replace symbolic stride by one. 107 Value *One = ConstantInt::get(StrideVal->getType(), 1); 108 ValueToValueMap RewriteMap; 109 RewriteMap[StrideVal] = One; 110 111 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal)); 112 const auto *CT = 113 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType())); 114 115 Preds.add(SE->getEqualPredicate(U, CT)); 116 117 const SCEV *ByOne = SE->rewriteUsingPredicate(OrigSCEV, Preds); 118 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne 119 << "\n"); 120 return ByOne; 121 } 122 123 // Otherwise, just return the SCEV of the original pointer. 124 return OrigSCEV; 125 } 126 127 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 128 unsigned DepSetId, unsigned ASId, 129 const ValueToValueMap &Strides, 130 SCEVUnionPredicate &Preds) { 131 // Get the stride replaced scev. 132 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); 133 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 134 assert(AR && "Invalid addrec expression"); 135 const SCEV *Ex = SE->getBackedgeTakenCount(Lp); 136 137 const SCEV *ScStart = AR->getStart(); 138 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); 139 const SCEV *Step = AR->getStepRecurrence(*SE); 140 141 // For expressions with negative step, the upper bound is ScStart and the 142 // lower bound is ScEnd. 143 if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) { 144 if (CStep->getValue()->isNegative()) 145 std::swap(ScStart, ScEnd); 146 } else { 147 // Fallback case: the step is not constant, but the we can still 148 // get the upper and lower bounds of the interval by using min/max 149 // expressions. 150 ScStart = SE->getUMinExpr(ScStart, ScEnd); 151 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 152 } 153 154 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 155 } 156 157 SmallVector<RuntimePointerChecking::PointerCheck, 4> 158 RuntimePointerChecking::generateChecks() const { 159 SmallVector<PointerCheck, 4> Checks; 160 161 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 162 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) { 163 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I]; 164 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J]; 165 166 if (needsChecking(CGI, CGJ)) 167 Checks.push_back(std::make_pair(&CGI, &CGJ)); 168 } 169 } 170 return Checks; 171 } 172 173 void RuntimePointerChecking::generateChecks( 174 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 175 assert(Checks.empty() && "Checks is not empty"); 176 groupChecks(DepCands, UseDependencies); 177 Checks = generateChecks(); 178 } 179 180 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M, 181 const CheckingPtrGroup &N) const { 182 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 183 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 184 if (needsChecking(M.Members[I], N.Members[J])) 185 return true; 186 return false; 187 } 188 189 /// Compare \p I and \p J and return the minimum. 190 /// Return nullptr in case we couldn't find an answer. 191 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 192 ScalarEvolution *SE) { 193 const SCEV *Diff = SE->getMinusSCEV(J, I); 194 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 195 196 if (!C) 197 return nullptr; 198 if (C->getValue()->isNegative()) 199 return J; 200 return I; 201 } 202 203 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 204 const SCEV *Start = RtCheck.Pointers[Index].Start; 205 const SCEV *End = RtCheck.Pointers[Index].End; 206 207 // Compare the starts and ends with the known minimum and maximum 208 // of this set. We need to know how we compare against the min/max 209 // of the set in order to be able to emit memchecks. 210 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 211 if (!Min0) 212 return false; 213 214 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 215 if (!Min1) 216 return false; 217 218 // Update the low bound expression if we've found a new min value. 219 if (Min0 == Start) 220 Low = Start; 221 222 // Update the high bound expression if we've found a new max value. 223 if (Min1 != End) 224 High = End; 225 226 Members.push_back(Index); 227 return true; 228 } 229 230 void RuntimePointerChecking::groupChecks( 231 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 232 // We build the groups from dependency candidates equivalence classes 233 // because: 234 // - We know that pointers in the same equivalence class share 235 // the same underlying object and therefore there is a chance 236 // that we can compare pointers 237 // - We wouldn't be able to merge two pointers for which we need 238 // to emit a memcheck. The classes in DepCands are already 239 // conveniently built such that no two pointers in the same 240 // class need checking against each other. 241 242 // We use the following (greedy) algorithm to construct the groups 243 // For every pointer in the equivalence class: 244 // For each existing group: 245 // - if the difference between this pointer and the min/max bounds 246 // of the group is a constant, then make the pointer part of the 247 // group and update the min/max bounds of that group as required. 248 249 CheckingGroups.clear(); 250 251 // If we need to check two pointers to the same underlying object 252 // with a non-constant difference, we shouldn't perform any pointer 253 // grouping with those pointers. This is because we can easily get 254 // into cases where the resulting check would return false, even when 255 // the accesses are safe. 256 // 257 // The following example shows this: 258 // for (i = 0; i < 1000; ++i) 259 // a[5000 + i * m] = a[i] + a[i + 9000] 260 // 261 // Here grouping gives a check of (5000, 5000 + 1000 * m) against 262 // (0, 10000) which is always false. However, if m is 1, there is no 263 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows 264 // us to perform an accurate check in this case. 265 // 266 // The above case requires that we have an UnknownDependence between 267 // accesses to the same underlying object. This cannot happen unless 268 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies 269 // is also false. In this case we will use the fallback path and create 270 // separate checking groups for all pointers. 271 272 // If we don't have the dependency partitions, construct a new 273 // checking pointer group for each pointer. This is also required 274 // for correctness, because in this case we can have checking between 275 // pointers to the same underlying object. 276 if (!UseDependencies) { 277 for (unsigned I = 0; I < Pointers.size(); ++I) 278 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 279 return; 280 } 281 282 unsigned TotalComparisons = 0; 283 284 DenseMap<Value *, unsigned> PositionMap; 285 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 286 PositionMap[Pointers[Index].PointerValue] = Index; 287 288 // We need to keep track of what pointers we've already seen so we 289 // don't process them twice. 290 SmallSet<unsigned, 2> Seen; 291 292 // Go through all equivalence classes, get the the "pointer check groups" 293 // and add them to the overall solution. We use the order in which accesses 294 // appear in 'Pointers' to enforce determinism. 295 for (unsigned I = 0; I < Pointers.size(); ++I) { 296 // We've seen this pointer before, and therefore already processed 297 // its equivalence class. 298 if (Seen.count(I)) 299 continue; 300 301 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 302 Pointers[I].IsWritePtr); 303 304 SmallVector<CheckingPtrGroup, 2> Groups; 305 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 306 307 // Because DepCands is constructed by visiting accesses in the order in 308 // which they appear in alias sets (which is deterministic) and the 309 // iteration order within an equivalence class member is only dependent on 310 // the order in which unions and insertions are performed on the 311 // equivalence class, the iteration order is deterministic. 312 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 313 MI != ME; ++MI) { 314 unsigned Pointer = PositionMap[MI->getPointer()]; 315 bool Merged = false; 316 // Mark this pointer as seen. 317 Seen.insert(Pointer); 318 319 // Go through all the existing sets and see if we can find one 320 // which can include this pointer. 321 for (CheckingPtrGroup &Group : Groups) { 322 // Don't perform more than a certain amount of comparisons. 323 // This should limit the cost of grouping the pointers to something 324 // reasonable. If we do end up hitting this threshold, the algorithm 325 // will create separate groups for all remaining pointers. 326 if (TotalComparisons > MemoryCheckMergeThreshold) 327 break; 328 329 TotalComparisons++; 330 331 if (Group.addPointer(Pointer)) { 332 Merged = true; 333 break; 334 } 335 } 336 337 if (!Merged) 338 // We couldn't add this pointer to any existing set or the threshold 339 // for the number of comparisons has been reached. Create a new group 340 // to hold the current pointer. 341 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 342 } 343 344 // We've computed the grouped checks for this partition. 345 // Save the results and continue with the next one. 346 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 347 } 348 } 349 350 bool RuntimePointerChecking::arePointersInSamePartition( 351 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 352 unsigned PtrIdx2) { 353 return (PtrToPartition[PtrIdx1] != -1 && 354 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 355 } 356 357 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const { 358 const PointerInfo &PointerI = Pointers[I]; 359 const PointerInfo &PointerJ = Pointers[J]; 360 361 // No need to check if two readonly pointers intersect. 362 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 363 return false; 364 365 // Only need to check pointers between two different dependency sets. 366 if (PointerI.DependencySetId == PointerJ.DependencySetId) 367 return false; 368 369 // Only need to check pointers in the same alias set. 370 if (PointerI.AliasSetId != PointerJ.AliasSetId) 371 return false; 372 373 return true; 374 } 375 376 void RuntimePointerChecking::printChecks( 377 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks, 378 unsigned Depth) const { 379 unsigned N = 0; 380 for (const auto &Check : Checks) { 381 const auto &First = Check.first->Members, &Second = Check.second->Members; 382 383 OS.indent(Depth) << "Check " << N++ << ":\n"; 384 385 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n"; 386 for (unsigned K = 0; K < First.size(); ++K) 387 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n"; 388 389 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n"; 390 for (unsigned K = 0; K < Second.size(); ++K) 391 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n"; 392 } 393 } 394 395 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { 396 397 OS.indent(Depth) << "Run-time memory checks:\n"; 398 printChecks(OS, Checks, Depth); 399 400 OS.indent(Depth) << "Grouped accesses:\n"; 401 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 402 const auto &CG = CheckingGroups[I]; 403 404 OS.indent(Depth + 2) << "Group " << &CG << ":\n"; 405 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High 406 << ")\n"; 407 for (unsigned J = 0; J < CG.Members.size(); ++J) { 408 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr 409 << "\n"; 410 } 411 } 412 } 413 414 namespace { 415 /// \brief Analyses memory accesses in a loop. 416 /// 417 /// Checks whether run time pointer checks are needed and builds sets for data 418 /// dependence checking. 419 class AccessAnalysis { 420 public: 421 /// \brief Read or write access location. 422 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 423 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 424 425 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 426 MemoryDepChecker::DepCandidates &DA, SCEVUnionPredicate &Preds) 427 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), 428 Preds(Preds) {} 429 430 /// \brief Register a load and whether it is only read from. 431 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 432 Value *Ptr = const_cast<Value*>(Loc.Ptr); 433 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 434 Accesses.insert(MemAccessInfo(Ptr, false)); 435 if (IsReadOnly) 436 ReadOnlyPtr.insert(Ptr); 437 } 438 439 /// \brief Register a store. 440 void addStore(MemoryLocation &Loc) { 441 Value *Ptr = const_cast<Value*>(Loc.Ptr); 442 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 443 Accesses.insert(MemAccessInfo(Ptr, true)); 444 } 445 446 /// \brief Check whether we can check the pointers at runtime for 447 /// non-intersection. 448 /// 449 /// Returns true if we need no check or if we do and we can generate them 450 /// (i.e. the pointers have computable bounds). 451 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 452 Loop *TheLoop, const ValueToValueMap &Strides, 453 bool ShouldCheckStride = false); 454 455 /// \brief Goes over all memory accesses, checks whether a RT check is needed 456 /// and builds sets of dependent accesses. 457 void buildDependenceSets() { 458 processMemAccesses(); 459 } 460 461 /// \brief Initial processing of memory accesses determined that we need to 462 /// perform dependency checking. 463 /// 464 /// Note that this can later be cleared if we retry memcheck analysis without 465 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 466 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 467 468 /// We decided that no dependence analysis would be used. Reset the state. 469 void resetDepChecks(MemoryDepChecker &DepChecker) { 470 CheckDeps.clear(); 471 DepChecker.clearDependences(); 472 } 473 474 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 475 476 private: 477 typedef SetVector<MemAccessInfo> PtrAccessSet; 478 479 /// \brief Go over all memory access and check whether runtime pointer checks 480 /// are needed and build sets of dependency check candidates. 481 void processMemAccesses(); 482 483 /// Set of all accesses. 484 PtrAccessSet Accesses; 485 486 const DataLayout &DL; 487 488 /// Set of accesses that need a further dependence check. 489 MemAccessInfoSet CheckDeps; 490 491 /// Set of pointers that are read only. 492 SmallPtrSet<Value*, 16> ReadOnlyPtr; 493 494 /// An alias set tracker to partition the access set by underlying object and 495 //intrinsic property (such as TBAA metadata). 496 AliasSetTracker AST; 497 498 LoopInfo *LI; 499 500 /// Sets of potentially dependent accesses - members of one set share an 501 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 502 /// dependence check. 503 MemoryDepChecker::DepCandidates &DepCands; 504 505 /// \brief Initial processing of memory accesses determined that we may need 506 /// to add memchecks. Perform the analysis to determine the necessary checks. 507 /// 508 /// Note that, this is different from isDependencyCheckNeeded. When we retry 509 /// memcheck analysis without dependency checking 510 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 511 /// while this remains set if we have potentially dependent accesses. 512 bool IsRTCheckAnalysisNeeded; 513 514 /// The SCEV predicate containing all the SCEV-related assumptions. 515 SCEVUnionPredicate &Preds; 516 }; 517 518 } // end anonymous namespace 519 520 /// \brief Check whether a pointer can participate in a runtime bounds check. 521 static bool hasComputableBounds(ScalarEvolution *SE, 522 const ValueToValueMap &Strides, Value *Ptr, 523 Loop *L, SCEVUnionPredicate &Preds) { 524 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); 525 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 526 if (!AR) 527 return false; 528 529 return AR->isAffine(); 530 } 531 532 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 533 ScalarEvolution *SE, Loop *TheLoop, 534 const ValueToValueMap &StridesMap, 535 bool ShouldCheckStride) { 536 // Find pointers with computable bounds. We are going to use this information 537 // to place a runtime bound check. 538 bool CanDoRT = true; 539 540 bool NeedRTCheck = false; 541 if (!IsRTCheckAnalysisNeeded) return true; 542 543 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 544 545 // We assign a consecutive id to access from different alias sets. 546 // Accesses between different groups doesn't need to be checked. 547 unsigned ASId = 1; 548 for (auto &AS : AST) { 549 int NumReadPtrChecks = 0; 550 int NumWritePtrChecks = 0; 551 552 // We assign consecutive id to access from different dependence sets. 553 // Accesses within the same set don't need a runtime check. 554 unsigned RunningDepId = 1; 555 DenseMap<Value *, unsigned> DepSetId; 556 557 for (auto A : AS) { 558 Value *Ptr = A.getValue(); 559 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 560 MemAccessInfo Access(Ptr, IsWrite); 561 562 if (IsWrite) 563 ++NumWritePtrChecks; 564 else 565 ++NumReadPtrChecks; 566 567 if (hasComputableBounds(SE, StridesMap, Ptr, TheLoop, Preds) && 568 // When we run after a failing dependency check we have to make sure 569 // we don't have wrapping pointers. 570 (!ShouldCheckStride || 571 isStridedPtr(SE, Ptr, TheLoop, StridesMap, Preds) == 1)) { 572 // The id of the dependence set. 573 unsigned DepId; 574 575 if (IsDepCheckNeeded) { 576 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 577 unsigned &LeaderId = DepSetId[Leader]; 578 if (!LeaderId) 579 LeaderId = RunningDepId++; 580 DepId = LeaderId; 581 } else 582 // Each access has its own dependence set. 583 DepId = RunningDepId++; 584 585 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, Preds); 586 587 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 588 } else { 589 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 590 CanDoRT = false; 591 } 592 } 593 594 // If we have at least two writes or one write and a read then we need to 595 // check them. But there is no need to checks if there is only one 596 // dependence set for this alias set. 597 // 598 // Note that this function computes CanDoRT and NeedRTCheck independently. 599 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 600 // for which we couldn't find the bounds but we don't actually need to emit 601 // any checks so it does not matter. 602 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 603 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 604 NumWritePtrChecks >= 1)); 605 606 ++ASId; 607 } 608 609 // If the pointers that we would use for the bounds comparison have different 610 // address spaces, assume the values aren't directly comparable, so we can't 611 // use them for the runtime check. We also have to assume they could 612 // overlap. In the future there should be metadata for whether address spaces 613 // are disjoint. 614 unsigned NumPointers = RtCheck.Pointers.size(); 615 for (unsigned i = 0; i < NumPointers; ++i) { 616 for (unsigned j = i + 1; j < NumPointers; ++j) { 617 // Only need to check pointers between two different dependency sets. 618 if (RtCheck.Pointers[i].DependencySetId == 619 RtCheck.Pointers[j].DependencySetId) 620 continue; 621 // Only need to check pointers in the same alias set. 622 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 623 continue; 624 625 Value *PtrI = RtCheck.Pointers[i].PointerValue; 626 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 627 628 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 629 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 630 if (ASi != ASj) { 631 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 632 " different address spaces\n"); 633 return false; 634 } 635 } 636 } 637 638 if (NeedRTCheck && CanDoRT) 639 RtCheck.generateChecks(DepCands, IsDepCheckNeeded); 640 641 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks() 642 << " pointer comparisons.\n"); 643 644 RtCheck.Need = NeedRTCheck; 645 646 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 647 if (!CanDoRTIfNeeded) 648 RtCheck.reset(); 649 return CanDoRTIfNeeded; 650 } 651 652 void AccessAnalysis::processMemAccesses() { 653 // We process the set twice: first we process read-write pointers, last we 654 // process read-only pointers. This allows us to skip dependence tests for 655 // read-only pointers. 656 657 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 658 DEBUG(dbgs() << " AST: "; AST.dump()); 659 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 660 DEBUG({ 661 for (auto A : Accesses) 662 dbgs() << "\t" << *A.getPointer() << " (" << 663 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 664 "read-only" : "read")) << ")\n"; 665 }); 666 667 // The AliasSetTracker has nicely partitioned our pointers by metadata 668 // compatibility and potential for underlying-object overlap. As a result, we 669 // only need to check for potential pointer dependencies within each alias 670 // set. 671 for (auto &AS : AST) { 672 // Note that both the alias-set tracker and the alias sets themselves used 673 // linked lists internally and so the iteration order here is deterministic 674 // (matching the original instruction order within each set). 675 676 bool SetHasWrite = false; 677 678 // Map of pointers to last access encountered. 679 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 680 UnderlyingObjToAccessMap ObjToLastAccess; 681 682 // Set of access to check after all writes have been processed. 683 PtrAccessSet DeferredAccesses; 684 685 // Iterate over each alias set twice, once to process read/write pointers, 686 // and then to process read-only pointers. 687 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 688 bool UseDeferred = SetIteration > 0; 689 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 690 691 for (auto AV : AS) { 692 Value *Ptr = AV.getValue(); 693 694 // For a single memory access in AliasSetTracker, Accesses may contain 695 // both read and write, and they both need to be handled for CheckDeps. 696 for (auto AC : S) { 697 if (AC.getPointer() != Ptr) 698 continue; 699 700 bool IsWrite = AC.getInt(); 701 702 // If we're using the deferred access set, then it contains only 703 // reads. 704 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 705 if (UseDeferred && !IsReadOnlyPtr) 706 continue; 707 // Otherwise, the pointer must be in the PtrAccessSet, either as a 708 // read or a write. 709 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 710 S.count(MemAccessInfo(Ptr, false))) && 711 "Alias-set pointer not in the access set?"); 712 713 MemAccessInfo Access(Ptr, IsWrite); 714 DepCands.insert(Access); 715 716 // Memorize read-only pointers for later processing and skip them in 717 // the first round (they need to be checked after we have seen all 718 // write pointers). Note: we also mark pointer that are not 719 // consecutive as "read-only" pointers (so that we check 720 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 721 if (!UseDeferred && IsReadOnlyPtr) { 722 DeferredAccesses.insert(Access); 723 continue; 724 } 725 726 // If this is a write - check other reads and writes for conflicts. If 727 // this is a read only check other writes for conflicts (but only if 728 // there is no other write to the ptr - this is an optimization to 729 // catch "a[i] = a[i] + " without having to do a dependence check). 730 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 731 CheckDeps.insert(Access); 732 IsRTCheckAnalysisNeeded = true; 733 } 734 735 if (IsWrite) 736 SetHasWrite = true; 737 738 // Create sets of pointers connected by a shared alias set and 739 // underlying object. 740 typedef SmallVector<Value *, 16> ValueVector; 741 ValueVector TempObjects; 742 743 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 744 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 745 for (Value *UnderlyingObj : TempObjects) { 746 // nullptr never alias, don't join sets for pointer that have "null" 747 // in their UnderlyingObjects list. 748 if (isa<ConstantPointerNull>(UnderlyingObj)) 749 continue; 750 751 UnderlyingObjToAccessMap::iterator Prev = 752 ObjToLastAccess.find(UnderlyingObj); 753 if (Prev != ObjToLastAccess.end()) 754 DepCands.unionSets(Access, Prev->second); 755 756 ObjToLastAccess[UnderlyingObj] = Access; 757 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 758 } 759 } 760 } 761 } 762 } 763 } 764 765 static bool isInBoundsGep(Value *Ptr) { 766 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 767 return GEP->isInBounds(); 768 return false; 769 } 770 771 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 772 /// i.e. monotonically increasing/decreasing. 773 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 774 ScalarEvolution *SE, const Loop *L) { 775 // FIXME: This should probably only return true for NUW. 776 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 777 return true; 778 779 // Scalar evolution does not propagate the non-wrapping flags to values that 780 // are derived from a non-wrapping induction variable because non-wrapping 781 // could be flow-sensitive. 782 // 783 // Look through the potentially overflowing instruction to try to prove 784 // non-wrapping for the *specific* value of Ptr. 785 786 // The arithmetic implied by an inbounds GEP can't overflow. 787 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 788 if (!GEP || !GEP->isInBounds()) 789 return false; 790 791 // Make sure there is only one non-const index and analyze that. 792 Value *NonConstIndex = nullptr; 793 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 794 if (!isa<ConstantInt>(*Index)) { 795 if (NonConstIndex) 796 return false; 797 NonConstIndex = *Index; 798 } 799 if (!NonConstIndex) 800 // The recurrence is on the pointer, ignore for now. 801 return false; 802 803 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 804 // AddRec using a NSW operation. 805 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 806 if (OBO->hasNoSignedWrap() && 807 // Assume constant for other the operand so that the AddRec can be 808 // easily found. 809 isa<ConstantInt>(OBO->getOperand(1))) { 810 auto *OpScev = SE->getSCEV(OBO->getOperand(0)); 811 812 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 813 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 814 } 815 816 return false; 817 } 818 819 /// \brief Check whether the access through \p Ptr has a constant stride. 820 int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, 821 const ValueToValueMap &StridesMap, 822 SCEVUnionPredicate &Preds) { 823 Type *Ty = Ptr->getType(); 824 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 825 826 // Make sure that the pointer does not point to aggregate types. 827 auto *PtrTy = cast<PointerType>(Ty); 828 if (PtrTy->getElementType()->isAggregateType()) { 829 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 830 << *Ptr << "\n"); 831 return 0; 832 } 833 834 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Preds, Ptr); 835 836 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 837 if (!AR) { 838 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " 839 << *Ptr << " SCEV: " << *PtrScev << "\n"); 840 return 0; 841 } 842 843 // The accesss function must stride over the innermost loop. 844 if (Lp != AR->getLoop()) { 845 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 846 *Ptr << " SCEV: " << *PtrScev << "\n"); 847 } 848 849 // The address calculation must not wrap. Otherwise, a dependence could be 850 // inverted. 851 // An inbounds getelementptr that is a AddRec with a unit stride 852 // cannot wrap per definition. The unit stride requirement is checked later. 853 // An getelementptr without an inbounds attribute and unit stride would have 854 // to access the pointer value "0" which is undefined behavior in address 855 // space 0, therefore we can also vectorize this case. 856 bool IsInBoundsGEP = isInBoundsGep(Ptr); 857 bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp); 858 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 859 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 860 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 861 << *Ptr << " SCEV: " << *PtrScev << "\n"); 862 return 0; 863 } 864 865 // Check the step is constant. 866 const SCEV *Step = AR->getStepRecurrence(*SE); 867 868 // Calculate the pointer stride and check if it is constant. 869 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 870 if (!C) { 871 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 872 " SCEV: " << *PtrScev << "\n"); 873 return 0; 874 } 875 876 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 877 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 878 const APInt &APStepVal = C->getValue()->getValue(); 879 880 // Huge step value - give up. 881 if (APStepVal.getBitWidth() > 64) 882 return 0; 883 884 int64_t StepVal = APStepVal.getSExtValue(); 885 886 // Strided access. 887 int64_t Stride = StepVal / Size; 888 int64_t Rem = StepVal % Size; 889 if (Rem) 890 return 0; 891 892 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 893 // know we can't "wrap around the address space". In case of address space 894 // zero we know that this won't happen without triggering undefined behavior. 895 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 896 Stride != 1 && Stride != -1) 897 return 0; 898 899 return Stride; 900 } 901 902 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 903 switch (Type) { 904 case NoDep: 905 case Forward: 906 case BackwardVectorizable: 907 return true; 908 909 case Unknown: 910 case ForwardButPreventsForwarding: 911 case Backward: 912 case BackwardVectorizableButPreventsForwarding: 913 return false; 914 } 915 llvm_unreachable("unexpected DepType!"); 916 } 917 918 bool MemoryDepChecker::Dependence::isBackward() const { 919 switch (Type) { 920 case NoDep: 921 case Forward: 922 case ForwardButPreventsForwarding: 923 case Unknown: 924 return false; 925 926 case BackwardVectorizable: 927 case Backward: 928 case BackwardVectorizableButPreventsForwarding: 929 return true; 930 } 931 llvm_unreachable("unexpected DepType!"); 932 } 933 934 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 935 return isBackward() || Type == Unknown; 936 } 937 938 bool MemoryDepChecker::Dependence::isForward() const { 939 switch (Type) { 940 case Forward: 941 case ForwardButPreventsForwarding: 942 return true; 943 944 case NoDep: 945 case Unknown: 946 case BackwardVectorizable: 947 case Backward: 948 case BackwardVectorizableButPreventsForwarding: 949 return false; 950 } 951 llvm_unreachable("unexpected DepType!"); 952 } 953 954 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 955 unsigned TypeByteSize) { 956 // If loads occur at a distance that is not a multiple of a feasible vector 957 // factor store-load forwarding does not take place. 958 // Positive dependences might cause troubles because vectorizing them might 959 // prevent store-load forwarding making vectorized code run a lot slower. 960 // a[i] = a[i-3] ^ a[i-8]; 961 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 962 // hence on your typical architecture store-load forwarding does not take 963 // place. Vectorizing in such cases does not make sense. 964 // Store-load forwarding distance. 965 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize; 966 // Maximum vector factor. 967 unsigned MaxVFWithoutSLForwardIssues = 968 VectorizerParams::MaxVectorWidth * TypeByteSize; 969 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues) 970 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes; 971 972 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues; 973 vf *= 2) { 974 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) { 975 MaxVFWithoutSLForwardIssues = (vf >>=1); 976 break; 977 } 978 } 979 980 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) { 981 DEBUG(dbgs() << "LAA: Distance " << Distance << 982 " that could cause a store-load forwarding conflict\n"); 983 return true; 984 } 985 986 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 987 MaxVFWithoutSLForwardIssues != 988 VectorizerParams::MaxVectorWidth * TypeByteSize) 989 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 990 return false; 991 } 992 993 /// \brief Check the dependence for two accesses with the same stride \p Stride. 994 /// \p Distance is the positive distance and \p TypeByteSize is type size in 995 /// bytes. 996 /// 997 /// \returns true if they are independent. 998 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 999 unsigned TypeByteSize) { 1000 assert(Stride > 1 && "The stride must be greater than 1"); 1001 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 1002 assert(Distance > 0 && "The distance must be non-zero"); 1003 1004 // Skip if the distance is not multiple of type byte size. 1005 if (Distance % TypeByteSize) 1006 return false; 1007 1008 unsigned ScaledDist = Distance / TypeByteSize; 1009 1010 // No dependence if the scaled distance is not multiple of the stride. 1011 // E.g. 1012 // for (i = 0; i < 1024 ; i += 4) 1013 // A[i+2] = A[i] + 1; 1014 // 1015 // Two accesses in memory (scaled distance is 2, stride is 4): 1016 // | A[0] | | | | A[4] | | | | 1017 // | | | A[2] | | | | A[6] | | 1018 // 1019 // E.g. 1020 // for (i = 0; i < 1024 ; i += 3) 1021 // A[i+4] = A[i] + 1; 1022 // 1023 // Two accesses in memory (scaled distance is 4, stride is 3): 1024 // | A[0] | | | A[3] | | | A[6] | | | 1025 // | | | | | A[4] | | | A[7] | | 1026 return ScaledDist % Stride; 1027 } 1028 1029 MemoryDepChecker::Dependence::DepType 1030 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1031 const MemAccessInfo &B, unsigned BIdx, 1032 const ValueToValueMap &Strides) { 1033 assert (AIdx < BIdx && "Must pass arguments in program order"); 1034 1035 Value *APtr = A.getPointer(); 1036 Value *BPtr = B.getPointer(); 1037 bool AIsWrite = A.getInt(); 1038 bool BIsWrite = B.getInt(); 1039 1040 // Two reads are independent. 1041 if (!AIsWrite && !BIsWrite) 1042 return Dependence::NoDep; 1043 1044 // We cannot check pointers in different address spaces. 1045 if (APtr->getType()->getPointerAddressSpace() != 1046 BPtr->getType()->getPointerAddressSpace()) 1047 return Dependence::Unknown; 1048 1049 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, APtr); 1050 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, BPtr); 1051 1052 int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides, Preds); 1053 int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides, Preds); 1054 1055 const SCEV *Src = AScev; 1056 const SCEV *Sink = BScev; 1057 1058 // If the induction step is negative we have to invert source and sink of the 1059 // dependence. 1060 if (StrideAPtr < 0) { 1061 //Src = BScev; 1062 //Sink = AScev; 1063 std::swap(APtr, BPtr); 1064 std::swap(Src, Sink); 1065 std::swap(AIsWrite, BIsWrite); 1066 std::swap(AIdx, BIdx); 1067 std::swap(StrideAPtr, StrideBPtr); 1068 } 1069 1070 const SCEV *Dist = SE->getMinusSCEV(Sink, Src); 1071 1072 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1073 << "(Induction step: " << StrideAPtr << ")\n"); 1074 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1075 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1076 1077 // Need accesses with constant stride. We don't want to vectorize 1078 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1079 // the address space. 1080 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1081 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1082 return Dependence::Unknown; 1083 } 1084 1085 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1086 if (!C) { 1087 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1088 ShouldRetryWithRuntimeCheck = true; 1089 return Dependence::Unknown; 1090 } 1091 1092 Type *ATy = APtr->getType()->getPointerElementType(); 1093 Type *BTy = BPtr->getType()->getPointerElementType(); 1094 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1095 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1096 1097 // Negative distances are not plausible dependencies. 1098 const APInt &Val = C->getValue()->getValue(); 1099 if (Val.isNegative()) { 1100 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1101 if (IsTrueDataDependence && 1102 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1103 ATy != BTy)) 1104 return Dependence::ForwardButPreventsForwarding; 1105 1106 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n"); 1107 return Dependence::Forward; 1108 } 1109 1110 // Write to the same location with the same size. 1111 // Could be improved to assert type sizes are the same (i32 == float, etc). 1112 if (Val == 0) { 1113 if (ATy == BTy) 1114 return Dependence::Forward; 1115 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1116 return Dependence::Unknown; 1117 } 1118 1119 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1120 1121 if (ATy != BTy) { 1122 DEBUG(dbgs() << 1123 "LAA: ReadWrite-Write positive dependency with different types\n"); 1124 return Dependence::Unknown; 1125 } 1126 1127 unsigned Distance = (unsigned) Val.getZExtValue(); 1128 1129 unsigned Stride = std::abs(StrideAPtr); 1130 if (Stride > 1 && 1131 areStridedAccessesIndependent(Distance, Stride, TypeByteSize)) { 1132 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1133 return Dependence::NoDep; 1134 } 1135 1136 // Bail out early if passed-in parameters make vectorization not feasible. 1137 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1138 VectorizerParams::VectorizationFactor : 1); 1139 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1140 VectorizerParams::VectorizationInterleave : 1); 1141 // The minimum number of iterations for a vectorized/unrolled version. 1142 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1143 1144 // It's not vectorizable if the distance is smaller than the minimum distance 1145 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1146 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1147 // TypeByteSize (No need to plus the last gap distance). 1148 // 1149 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1150 // foo(int *A) { 1151 // int *B = (int *)((char *)A + 14); 1152 // for (i = 0 ; i < 1024 ; i += 2) 1153 // B[i] = A[i] + 1; 1154 // } 1155 // 1156 // Two accesses in memory (stride is 2): 1157 // | A[0] | | A[2] | | A[4] | | A[6] | | 1158 // | B[0] | | B[2] | | B[4] | 1159 // 1160 // Distance needs for vectorizing iterations except the last iteration: 1161 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1162 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1163 // 1164 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1165 // 12, which is less than distance. 1166 // 1167 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1168 // the minimum distance needed is 28, which is greater than distance. It is 1169 // not safe to do vectorization. 1170 unsigned MinDistanceNeeded = 1171 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1172 if (MinDistanceNeeded > Distance) { 1173 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1174 << '\n'); 1175 return Dependence::Backward; 1176 } 1177 1178 // Unsafe if the minimum distance needed is greater than max safe distance. 1179 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1180 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1181 << MinDistanceNeeded << " size in bytes"); 1182 return Dependence::Backward; 1183 } 1184 1185 // Positive distance bigger than max vectorization factor. 1186 // FIXME: Should use max factor instead of max distance in bytes, which could 1187 // not handle different types. 1188 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1189 // void foo (int *A, char *B) { 1190 // for (unsigned i = 0; i < 1024; i++) { 1191 // A[i+2] = A[i] + 1; 1192 // B[i+2] = B[i] + 1; 1193 // } 1194 // } 1195 // 1196 // This case is currently unsafe according to the max safe distance. If we 1197 // analyze the two accesses on array B, the max safe dependence distance 1198 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1199 // is 8, which is less than 2 and forbidden vectorization, But actually 1200 // both A and B could be vectorized by 2 iterations. 1201 MaxSafeDepDistBytes = 1202 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1203 1204 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1205 if (IsTrueDataDependence && 1206 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1207 return Dependence::BackwardVectorizableButPreventsForwarding; 1208 1209 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1210 << " with max VF = " 1211 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1212 1213 return Dependence::BackwardVectorizable; 1214 } 1215 1216 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1217 MemAccessInfoSet &CheckDeps, 1218 const ValueToValueMap &Strides) { 1219 1220 MaxSafeDepDistBytes = -1U; 1221 while (!CheckDeps.empty()) { 1222 MemAccessInfo CurAccess = *CheckDeps.begin(); 1223 1224 // Get the relevant memory access set. 1225 EquivalenceClasses<MemAccessInfo>::iterator I = 1226 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1227 1228 // Check accesses within this set. 1229 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE; 1230 AI = AccessSets.member_begin(I), AE = AccessSets.member_end(); 1231 1232 // Check every access pair. 1233 while (AI != AE) { 1234 CheckDeps.erase(*AI); 1235 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1236 while (OI != AE) { 1237 // Check every accessing instruction pair in program order. 1238 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1239 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1240 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1241 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1242 auto A = std::make_pair(&*AI, *I1); 1243 auto B = std::make_pair(&*OI, *I2); 1244 1245 assert(*I1 != *I2); 1246 if (*I1 > *I2) 1247 std::swap(A, B); 1248 1249 Dependence::DepType Type = 1250 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1251 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1252 1253 // Gather dependences unless we accumulated MaxDependences 1254 // dependences. In that case return as soon as we find the first 1255 // unsafe dependence. This puts a limit on this quadratic 1256 // algorithm. 1257 if (RecordDependences) { 1258 if (Type != Dependence::NoDep) 1259 Dependences.push_back(Dependence(A.second, B.second, Type)); 1260 1261 if (Dependences.size() >= MaxDependences) { 1262 RecordDependences = false; 1263 Dependences.clear(); 1264 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1265 } 1266 } 1267 if (!RecordDependences && !SafeForVectorization) 1268 return false; 1269 } 1270 ++OI; 1271 } 1272 AI++; 1273 } 1274 } 1275 1276 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n"); 1277 return SafeForVectorization; 1278 } 1279 1280 SmallVector<Instruction *, 4> 1281 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1282 MemAccessInfo Access(Ptr, isWrite); 1283 auto &IndexVector = Accesses.find(Access)->second; 1284 1285 SmallVector<Instruction *, 4> Insts; 1286 std::transform(IndexVector.begin(), IndexVector.end(), 1287 std::back_inserter(Insts), 1288 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1289 return Insts; 1290 } 1291 1292 const char *MemoryDepChecker::Dependence::DepName[] = { 1293 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1294 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1295 1296 void MemoryDepChecker::Dependence::print( 1297 raw_ostream &OS, unsigned Depth, 1298 const SmallVectorImpl<Instruction *> &Instrs) const { 1299 OS.indent(Depth) << DepName[Type] << ":\n"; 1300 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1301 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1302 } 1303 1304 bool LoopAccessInfo::canAnalyzeLoop() { 1305 // We need to have a loop header. 1306 DEBUG(dbgs() << "LAA: Found a loop: " << 1307 TheLoop->getHeader()->getName() << '\n'); 1308 1309 // We can only analyze innermost loops. 1310 if (!TheLoop->empty()) { 1311 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1312 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1313 return false; 1314 } 1315 1316 // We must have a single backedge. 1317 if (TheLoop->getNumBackEdges() != 1) { 1318 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1319 emitAnalysis( 1320 LoopAccessReport() << 1321 "loop control flow is not understood by analyzer"); 1322 return false; 1323 } 1324 1325 // We must have a single exiting block. 1326 if (!TheLoop->getExitingBlock()) { 1327 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1328 emitAnalysis( 1329 LoopAccessReport() << 1330 "loop control flow is not understood by analyzer"); 1331 return false; 1332 } 1333 1334 // We only handle bottom-tested loops, i.e. loop in which the condition is 1335 // checked at the end of each iteration. With that we can assume that all 1336 // instructions in the loop are executed the same number of times. 1337 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1338 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1339 emitAnalysis( 1340 LoopAccessReport() << 1341 "loop control flow is not understood by analyzer"); 1342 return false; 1343 } 1344 1345 // ScalarEvolution needs to be able to find the exit count. 1346 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); 1347 if (ExitCount == SE->getCouldNotCompute()) { 1348 emitAnalysis(LoopAccessReport() << 1349 "could not determine number of loop iterations"); 1350 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1351 return false; 1352 } 1353 1354 return true; 1355 } 1356 1357 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { 1358 1359 typedef SmallVector<Value*, 16> ValueVector; 1360 typedef SmallPtrSet<Value*, 16> ValueSet; 1361 1362 // Holds the Load and Store *instructions*. 1363 ValueVector Loads; 1364 ValueVector Stores; 1365 1366 // Holds all the different accesses in the loop. 1367 unsigned NumReads = 0; 1368 unsigned NumReadWrites = 0; 1369 1370 PtrRtChecking.Pointers.clear(); 1371 PtrRtChecking.Need = false; 1372 1373 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1374 1375 // For each block. 1376 for (Loop::block_iterator bb = TheLoop->block_begin(), 1377 be = TheLoop->block_end(); bb != be; ++bb) { 1378 1379 // Scan the BB and collect legal loads and stores. 1380 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1381 ++it) { 1382 1383 // If this is a load, save it. If this instruction can read from memory 1384 // but is not a load, then we quit. Notice that we don't handle function 1385 // calls that read or write. 1386 if (it->mayReadFromMemory()) { 1387 // Many math library functions read the rounding mode. We will only 1388 // vectorize a loop if it contains known function calls that don't set 1389 // the flag. Therefore, it is safe to ignore this read from memory. 1390 CallInst *Call = dyn_cast<CallInst>(it); 1391 if (Call && getIntrinsicIDForCall(Call, TLI)) 1392 continue; 1393 1394 // If the function has an explicit vectorized counterpart, we can safely 1395 // assume that it can be vectorized. 1396 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1397 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1398 continue; 1399 1400 LoadInst *Ld = dyn_cast<LoadInst>(it); 1401 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1402 emitAnalysis(LoopAccessReport(Ld) 1403 << "read with atomic ordering or volatile read"); 1404 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1405 CanVecMem = false; 1406 return; 1407 } 1408 NumLoads++; 1409 Loads.push_back(Ld); 1410 DepChecker.addAccess(Ld); 1411 continue; 1412 } 1413 1414 // Save 'store' instructions. Abort if other instructions write to memory. 1415 if (it->mayWriteToMemory()) { 1416 StoreInst *St = dyn_cast<StoreInst>(it); 1417 if (!St) { 1418 emitAnalysis(LoopAccessReport(&*it) << 1419 "instruction cannot be vectorized"); 1420 CanVecMem = false; 1421 return; 1422 } 1423 if (!St->isSimple() && !IsAnnotatedParallel) { 1424 emitAnalysis(LoopAccessReport(St) 1425 << "write with atomic ordering or volatile write"); 1426 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1427 CanVecMem = false; 1428 return; 1429 } 1430 NumStores++; 1431 Stores.push_back(St); 1432 DepChecker.addAccess(St); 1433 } 1434 } // Next instr. 1435 } // Next block. 1436 1437 // Now we have two lists that hold the loads and the stores. 1438 // Next, we find the pointers that they use. 1439 1440 // Check if we see any stores. If there are no stores, then we don't 1441 // care if the pointers are *restrict*. 1442 if (!Stores.size()) { 1443 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1444 CanVecMem = true; 1445 return; 1446 } 1447 1448 MemoryDepChecker::DepCandidates DependentAccesses; 1449 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1450 AA, LI, DependentAccesses, Preds); 1451 1452 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1453 // multiple times on the same object. If the ptr is accessed twice, once 1454 // for read and once for write, it will only appear once (on the write 1455 // list). This is okay, since we are going to check for conflicts between 1456 // writes and between reads and writes, but not between reads and reads. 1457 ValueSet Seen; 1458 1459 ValueVector::iterator I, IE; 1460 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { 1461 StoreInst *ST = cast<StoreInst>(*I); 1462 Value* Ptr = ST->getPointerOperand(); 1463 // Check for store to loop invariant address. 1464 StoreToLoopInvariantAddress |= isUniform(Ptr); 1465 // If we did *not* see this pointer before, insert it to the read-write 1466 // list. At this phase it is only a 'write' list. 1467 if (Seen.insert(Ptr).second) { 1468 ++NumReadWrites; 1469 1470 MemoryLocation Loc = MemoryLocation::get(ST); 1471 // The TBAA metadata could have a control dependency on the predication 1472 // condition, so we cannot rely on it when determining whether or not we 1473 // need runtime pointer checks. 1474 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1475 Loc.AATags.TBAA = nullptr; 1476 1477 Accesses.addStore(Loc); 1478 } 1479 } 1480 1481 if (IsAnnotatedParallel) { 1482 DEBUG(dbgs() 1483 << "LAA: A loop annotated parallel, ignore memory dependency " 1484 << "checks.\n"); 1485 CanVecMem = true; 1486 return; 1487 } 1488 1489 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { 1490 LoadInst *LD = cast<LoadInst>(*I); 1491 Value* Ptr = LD->getPointerOperand(); 1492 // If we did *not* see this pointer before, insert it to the 1493 // read list. If we *did* see it before, then it is already in 1494 // the read-write list. This allows us to vectorize expressions 1495 // such as A[i] += x; Because the address of A[i] is a read-write 1496 // pointer. This only works if the index of A[i] is consecutive. 1497 // If the address of i is unknown (for example A[B[i]]) then we may 1498 // read a few words, modify, and write a few words, and some of the 1499 // words may be written to the same address. 1500 bool IsReadOnlyPtr = false; 1501 if (Seen.insert(Ptr).second || 1502 !isStridedPtr(SE, Ptr, TheLoop, Strides, Preds)) { 1503 ++NumReads; 1504 IsReadOnlyPtr = true; 1505 } 1506 1507 MemoryLocation Loc = MemoryLocation::get(LD); 1508 // The TBAA metadata could have a control dependency on the predication 1509 // condition, so we cannot rely on it when determining whether or not we 1510 // need runtime pointer checks. 1511 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1512 Loc.AATags.TBAA = nullptr; 1513 1514 Accesses.addLoad(Loc, IsReadOnlyPtr); 1515 } 1516 1517 // If we write (or read-write) to a single destination and there are no 1518 // other reads in this loop then is it safe to vectorize. 1519 if (NumReadWrites == 1 && NumReads == 0) { 1520 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1521 CanVecMem = true; 1522 return; 1523 } 1524 1525 // Build dependence sets and check whether we need a runtime pointer bounds 1526 // check. 1527 Accesses.buildDependenceSets(); 1528 1529 // Find pointers with computable bounds. We are going to use this information 1530 // to place a runtime bound check. 1531 bool CanDoRTIfNeeded = 1532 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides); 1533 if (!CanDoRTIfNeeded) { 1534 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1535 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1536 << "the array bounds.\n"); 1537 CanVecMem = false; 1538 return; 1539 } 1540 1541 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1542 1543 CanVecMem = true; 1544 if (Accesses.isDependencyCheckNeeded()) { 1545 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1546 CanVecMem = DepChecker.areDepsSafe( 1547 DependentAccesses, Accesses.getDependenciesToCheck(), Strides); 1548 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1549 1550 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1551 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1552 1553 // Clear the dependency checks. We assume they are not needed. 1554 Accesses.resetDepChecks(DepChecker); 1555 1556 PtrRtChecking.reset(); 1557 PtrRtChecking.Need = true; 1558 1559 CanDoRTIfNeeded = 1560 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); 1561 1562 // Check that we found the bounds for the pointer. 1563 if (!CanDoRTIfNeeded) { 1564 emitAnalysis(LoopAccessReport() 1565 << "cannot check memory dependencies at runtime"); 1566 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1567 CanVecMem = false; 1568 return; 1569 } 1570 1571 CanVecMem = true; 1572 } 1573 } 1574 1575 if (CanVecMem) 1576 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1577 << (PtrRtChecking.Need ? "" : " don't") 1578 << " need runtime memory checks.\n"); 1579 else { 1580 emitAnalysis(LoopAccessReport() << 1581 "unsafe dependent memory operations in loop"); 1582 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1583 } 1584 } 1585 1586 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1587 DominatorTree *DT) { 1588 assert(TheLoop->contains(BB) && "Unknown block used"); 1589 1590 // Blocks that do not dominate the latch need predication. 1591 BasicBlock* Latch = TheLoop->getLoopLatch(); 1592 return !DT->dominates(BB, Latch); 1593 } 1594 1595 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1596 assert(!Report && "Multiple reports generated"); 1597 Report = Message; 1598 } 1599 1600 bool LoopAccessInfo::isUniform(Value *V) const { 1601 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 1602 } 1603 1604 // FIXME: this function is currently a duplicate of the one in 1605 // LoopVectorize.cpp. 1606 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1607 Instruction *Loc) { 1608 if (FirstInst) 1609 return FirstInst; 1610 if (Instruction *I = dyn_cast<Instruction>(V)) 1611 return I->getParent() == Loc->getParent() ? I : nullptr; 1612 return nullptr; 1613 } 1614 1615 namespace { 1616 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We 1617 /// need to use value-handles because SCEV expansion can invalidate previously 1618 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1619 /// a previous one. 1620 struct PointerBounds { 1621 TrackingVH<Value> Start; 1622 TrackingVH<Value> End; 1623 }; 1624 } // end anonymous namespace 1625 1626 /// \brief Expand code for the lower and upper bound of the pointer group \p CG 1627 /// in \p TheLoop. \return the values for the bounds. 1628 static PointerBounds 1629 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, 1630 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE, 1631 const RuntimePointerChecking &PtrRtChecking) { 1632 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue; 1633 const SCEV *Sc = SE->getSCEV(Ptr); 1634 1635 if (SE->isLoopInvariant(Sc, TheLoop)) { 1636 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1637 << "\n"); 1638 return {Ptr, Ptr}; 1639 } else { 1640 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1641 LLVMContext &Ctx = Loc->getContext(); 1642 1643 // Use this type for pointer arithmetic. 1644 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1645 Value *Start = nullptr, *End = nullptr; 1646 1647 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1648 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1649 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1650 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1651 return {Start, End}; 1652 } 1653 } 1654 1655 /// \brief Turns a collection of checks into a collection of expanded upper and 1656 /// lower bounds for both pointers in the check. 1657 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( 1658 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, 1659 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp, 1660 const RuntimePointerChecking &PtrRtChecking) { 1661 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1662 1663 // Here we're relying on the SCEV Expander's cache to only emit code for the 1664 // same bounds once. 1665 std::transform( 1666 PointerChecks.begin(), PointerChecks.end(), 1667 std::back_inserter(ChecksWithBounds), 1668 [&](const RuntimePointerChecking::PointerCheck &Check) { 1669 PointerBounds 1670 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking), 1671 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking); 1672 return std::make_pair(First, Second); 1673 }); 1674 1675 return ChecksWithBounds; 1676 } 1677 1678 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks( 1679 Instruction *Loc, 1680 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks) 1681 const { 1682 1683 SCEVExpander Exp(*SE, DL, "induction"); 1684 auto ExpandedChecks = 1685 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, PtrRtChecking); 1686 1687 LLVMContext &Ctx = Loc->getContext(); 1688 Instruction *FirstInst = nullptr; 1689 IRBuilder<> ChkBuilder(Loc); 1690 // Our instructions might fold to a constant. 1691 Value *MemoryRuntimeCheck = nullptr; 1692 1693 for (const auto &Check : ExpandedChecks) { 1694 const PointerBounds &A = Check.first, &B = Check.second; 1695 // Check if two pointers (A and B) conflict where conflict is computed as: 1696 // start(A) <= end(B) && start(B) <= end(A) 1697 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1698 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1699 1700 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1701 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1702 "Trying to bounds check pointers with different address spaces"); 1703 1704 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1705 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1706 1707 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1708 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1709 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1710 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1711 1712 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1713 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1714 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1715 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1716 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1717 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1718 if (MemoryRuntimeCheck) { 1719 IsConflict = 1720 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1721 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1722 } 1723 MemoryRuntimeCheck = IsConflict; 1724 } 1725 1726 if (!MemoryRuntimeCheck) 1727 return std::make_pair(nullptr, nullptr); 1728 1729 // We have to do this trickery because the IRBuilder might fold the check to a 1730 // constant expression in which case there is no Instruction anchored in a 1731 // the block. 1732 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1733 ConstantInt::getTrue(Ctx)); 1734 ChkBuilder.Insert(Check, "memcheck.conflict"); 1735 FirstInst = getFirstInst(FirstInst, Check, Loc); 1736 return std::make_pair(FirstInst, Check); 1737 } 1738 1739 std::pair<Instruction *, Instruction *> 1740 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const { 1741 if (!PtrRtChecking.Need) 1742 return std::make_pair(nullptr, nullptr); 1743 1744 return addRuntimeChecks(Loc, PtrRtChecking.getChecks()); 1745 } 1746 1747 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1748 const DataLayout &DL, 1749 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1750 DominatorTree *DT, LoopInfo *LI, 1751 const ValueToValueMap &Strides) 1752 : PtrRtChecking(SE), DepChecker(SE, L, Preds), TheLoop(L), SE(SE), DL(DL), 1753 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1754 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1755 StoreToLoopInvariantAddress(false) { 1756 if (canAnalyzeLoop()) 1757 analyzeLoop(Strides); 1758 } 1759 1760 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1761 if (CanVecMem) { 1762 if (PtrRtChecking.Need) 1763 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; 1764 else 1765 OS.indent(Depth) << "Memory dependences are safe\n"; 1766 } 1767 1768 if (Report) 1769 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1770 1771 if (auto *Dependences = DepChecker.getDependences()) { 1772 OS.indent(Depth) << "Dependences:\n"; 1773 for (auto &Dep : *Dependences) { 1774 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1775 OS << "\n"; 1776 } 1777 } else 1778 OS.indent(Depth) << "Too many dependences, not recorded\n"; 1779 1780 // List the pair of accesses need run-time checks to prove independence. 1781 PtrRtChecking.print(OS, Depth); 1782 OS << "\n"; 1783 1784 OS.indent(Depth) << "Store to invariant address was " 1785 << (StoreToLoopInvariantAddress ? "" : "not ") 1786 << "found in loop.\n"; 1787 1788 OS.indent(Depth) << "SCEV assumptions:\n"; 1789 Preds.print(OS, Depth); 1790 } 1791 1792 const LoopAccessInfo & 1793 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { 1794 auto &LAI = LoopAccessInfoMap[L]; 1795 1796 #ifndef NDEBUG 1797 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && 1798 "Symbolic strides changed for loop"); 1799 #endif 1800 1801 if (!LAI) { 1802 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1803 LAI = 1804 llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, Strides); 1805 #ifndef NDEBUG 1806 LAI->NumSymbolicStrides = Strides.size(); 1807 #endif 1808 } 1809 return *LAI.get(); 1810 } 1811 1812 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1813 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1814 1815 ValueToValueMap NoSymbolicStrides; 1816 1817 for (Loop *TopLevelLoop : *LI) 1818 for (Loop *L : depth_first(TopLevelLoop)) { 1819 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1820 auto &LAI = LAA.getInfo(L, NoSymbolicStrides); 1821 LAI.print(OS, 4); 1822 } 1823 } 1824 1825 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1826 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1827 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1828 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1829 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1830 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1831 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1832 1833 return false; 1834 } 1835 1836 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1837 AU.addRequired<ScalarEvolutionWrapperPass>(); 1838 AU.addRequired<AAResultsWrapperPass>(); 1839 AU.addRequired<DominatorTreeWrapperPass>(); 1840 AU.addRequired<LoopInfoWrapperPass>(); 1841 1842 AU.setPreservesAll(); 1843 } 1844 1845 char LoopAccessAnalysis::ID = 0; 1846 static const char laa_name[] = "Loop Access Analysis"; 1847 #define LAA_NAME "loop-accesses" 1848 1849 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1850 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1851 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 1852 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1853 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 1854 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1855 1856 namespace llvm { 1857 Pass *createLAAPass() { 1858 return new LoopAccessAnalysis(); 1859 } 1860 } 1861