1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect interesting dependences up to this threshold. 62 static cl::opt<unsigned> MaxInterestingDependence( 63 "max-interesting-dependences", cl::Hidden, 64 cl::desc("Maximum number of interesting dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 bool VectorizerParams::isInterleaveForced() { 69 return ::VectorizationInterleave.getNumOccurrences() > 0; 70 } 71 72 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 73 const Function *TheFunction, 74 const Loop *TheLoop, 75 const char *PassName) { 76 DebugLoc DL = TheLoop->getStartLoc(); 77 if (const Instruction *I = Message.getInstr()) 78 DL = I->getDebugLoc(); 79 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 80 *TheFunction, DL, Message.str()); 81 } 82 83 Value *llvm::stripIntegerCast(Value *V) { 84 if (CastInst *CI = dyn_cast<CastInst>(V)) 85 if (CI->getOperand(0)->getType()->isIntegerTy()) 86 return CI->getOperand(0); 87 return V; 88 } 89 90 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, 91 const ValueToValueMap &PtrToStride, 92 Value *Ptr, Value *OrigPtr) { 93 94 const SCEV *OrigSCEV = SE->getSCEV(Ptr); 95 96 // If there is an entry in the map return the SCEV of the pointer with the 97 // symbolic stride replaced by one. 98 ValueToValueMap::const_iterator SI = 99 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 100 if (SI != PtrToStride.end()) { 101 Value *StrideVal = SI->second; 102 103 // Strip casts. 104 StrideVal = stripIntegerCast(StrideVal); 105 106 // Replace symbolic stride by one. 107 Value *One = ConstantInt::get(StrideVal->getType(), 1); 108 ValueToValueMap RewriteMap; 109 RewriteMap[StrideVal] = One; 110 111 const SCEV *ByOne = 112 SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true); 113 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne 114 << "\n"); 115 return ByOne; 116 } 117 118 // Otherwise, just return the SCEV of the original pointer. 119 return SE->getSCEV(Ptr); 120 } 121 122 void LoopAccessInfo::RuntimePointerCheck::insert( 123 Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, unsigned ASId, 124 const ValueToValueMap &Strides) { 125 // Get the stride replaced scev. 126 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr); 127 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 128 assert(AR && "Invalid addrec expression"); 129 const SCEV *Ex = SE->getBackedgeTakenCount(Lp); 130 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); 131 Pointers.push_back(Ptr); 132 Starts.push_back(AR->getStart()); 133 Ends.push_back(ScEnd); 134 IsWritePtr.push_back(WritePtr); 135 DependencySetId.push_back(DepSetId); 136 AliasSetId.push_back(ASId); 137 Exprs.push_back(Sc); 138 } 139 140 bool LoopAccessInfo::RuntimePointerCheck::needsChecking( 141 const CheckingPtrGroup &M, const CheckingPtrGroup &N, 142 const SmallVectorImpl<int> *PtrPartition) const { 143 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 144 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 145 if (needsChecking(M.Members[I], N.Members[J], PtrPartition)) 146 return true; 147 return false; 148 } 149 150 /// Compare \p I and \p J and return the minimum. 151 /// Return nullptr in case we couldn't find an answer. 152 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 153 ScalarEvolution *SE) { 154 const SCEV *Diff = SE->getMinusSCEV(J, I); 155 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 156 157 if (!C) 158 return nullptr; 159 if (C->getValue()->isNegative()) 160 return J; 161 return I; 162 } 163 164 bool LoopAccessInfo::RuntimePointerCheck::CheckingPtrGroup::addPointer( 165 unsigned Index) { 166 // Compare the starts and ends with the known minimum and maximum 167 // of this set. We need to know how we compare against the min/max 168 // of the set in order to be able to emit memchecks. 169 const SCEV *Min0 = getMinFromExprs(RtCheck.Starts[Index], Low, RtCheck.SE); 170 if (!Min0) 171 return false; 172 173 const SCEV *Min1 = getMinFromExprs(RtCheck.Ends[Index], High, RtCheck.SE); 174 if (!Min1) 175 return false; 176 177 // Update the low bound expression if we've found a new min value. 178 if (Min0 == RtCheck.Starts[Index]) 179 Low = RtCheck.Starts[Index]; 180 181 // Update the high bound expression if we've found a new max value. 182 if (Min1 != RtCheck.Ends[Index]) 183 High = RtCheck.Ends[Index]; 184 185 Members.push_back(Index); 186 return true; 187 } 188 189 void LoopAccessInfo::RuntimePointerCheck::groupChecks( 190 MemoryDepChecker::DepCandidates &DepCands, 191 bool UseDependencies) { 192 // We build the groups from dependency candidates equivalence classes 193 // because: 194 // - We know that pointers in the same equivalence class share 195 // the same underlying object and therefore there is a chance 196 // that we can compare pointers 197 // - We wouldn't be able to merge two pointers for which we need 198 // to emit a memcheck. The classes in DepCands are already 199 // conveniently built such that no two pointers in the same 200 // class need checking against each other. 201 202 // We use the following (greedy) algorithm to construct the groups 203 // For every pointer in the equivalence class: 204 // For each existing group: 205 // - if the difference between this pointer and the min/max bounds 206 // of the group is a constant, then make the pointer part of the 207 // group and update the min/max bounds of that group as required. 208 209 CheckingGroups.clear(); 210 211 // If we don't have the dependency partitions, construct a new 212 // checking pointer group for each pointer. 213 if (!UseDependencies) { 214 for (unsigned I = 0; I < Pointers.size(); ++I) 215 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 216 return; 217 } 218 219 unsigned TotalComparisons = 0; 220 221 DenseMap<Value *, unsigned> PositionMap; 222 for (unsigned Pointer = 0; Pointer < Pointers.size(); ++Pointer) 223 PositionMap[Pointers[Pointer]] = Pointer; 224 225 // We need to keep track of what pointers we've already seen so we 226 // don't process them twice. 227 SmallSet<unsigned, 2> Seen; 228 229 // Go through all equivalence classes, get the the "pointer check groups" 230 // and add them to the overall solution. We use the order in which accesses 231 // appear in 'Pointers' to enforce determinism. 232 for (unsigned I = 0; I < Pointers.size(); ++I) { 233 // We've seen this pointer before, and therefore already processed 234 // its equivalence class. 235 if (Seen.count(I)) 236 continue; 237 238 MemoryDepChecker::MemAccessInfo Access(Pointers[I], IsWritePtr[I]); 239 240 SmallVector<CheckingPtrGroup, 2> Groups; 241 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 242 243 // Because DepCands is constructed by visiting accesses in the order in 244 // which they appear in alias sets (which is deterministic) and the 245 // iteration order within an equivalence class member is only dependent on 246 // the order in which unions and insertions are performed on the 247 // equivalence class, the iteration order is deterministic. 248 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 249 MI != ME; ++MI) { 250 unsigned Pointer = PositionMap[MI->getPointer()]; 251 bool Merged = false; 252 // Mark this pointer as seen. 253 Seen.insert(Pointer); 254 255 // Go through all the existing sets and see if we can find one 256 // which can include this pointer. 257 for (CheckingPtrGroup &Group : Groups) { 258 // Don't perform more than a certain amount of comparisons. 259 // This should limit the cost of grouping the pointers to something 260 // reasonable. If we do end up hitting this threshold, the algorithm 261 // will create separate groups for all remaining pointers. 262 if (TotalComparisons > MemoryCheckMergeThreshold) 263 break; 264 265 TotalComparisons++; 266 267 if (Group.addPointer(Pointer)) { 268 Merged = true; 269 break; 270 } 271 } 272 273 if (!Merged) 274 // We couldn't add this pointer to any existing set or the threshold 275 // for the number of comparisons has been reached. Create a new group 276 // to hold the current pointer. 277 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 278 } 279 280 // We've computed the grouped checks for this partition. 281 // Save the results and continue with the next one. 282 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 283 } 284 } 285 286 bool LoopAccessInfo::RuntimePointerCheck::needsChecking( 287 unsigned I, unsigned J, const SmallVectorImpl<int> *PtrPartition) const { 288 // No need to check if two readonly pointers intersect. 289 if (!IsWritePtr[I] && !IsWritePtr[J]) 290 return false; 291 292 // Only need to check pointers between two different dependency sets. 293 if (DependencySetId[I] == DependencySetId[J]) 294 return false; 295 296 // Only need to check pointers in the same alias set. 297 if (AliasSetId[I] != AliasSetId[J]) 298 return false; 299 300 // If PtrPartition is set omit checks between pointers of the same partition. 301 // Partition number -1 means that the pointer is used in multiple partitions. 302 // In this case we can't omit the check. 303 if (PtrPartition && (*PtrPartition)[I] != -1 && 304 (*PtrPartition)[I] == (*PtrPartition)[J]) 305 return false; 306 307 return true; 308 } 309 310 void LoopAccessInfo::RuntimePointerCheck::print( 311 raw_ostream &OS, unsigned Depth, 312 const SmallVectorImpl<int> *PtrPartition) const { 313 314 OS.indent(Depth) << "Run-time memory checks:\n"; 315 316 unsigned N = 0; 317 for (unsigned I = 0; I < CheckingGroups.size(); ++I) 318 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) 319 if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) { 320 OS.indent(Depth) << "Check " << N++ << ":\n"; 321 OS.indent(Depth + 2) << "Comparing group " << I << ":\n"; 322 323 for (unsigned K = 0; K < CheckingGroups[I].Members.size(); ++K) { 324 OS.indent(Depth + 2) << *Pointers[CheckingGroups[I].Members[K]] 325 << "\n"; 326 if (PtrPartition) 327 OS << " (Partition: " 328 << (*PtrPartition)[CheckingGroups[I].Members[K]] << ")" 329 << "\n"; 330 } 331 332 OS.indent(Depth + 2) << "Against group " << J << ":\n"; 333 334 for (unsigned K = 0; K < CheckingGroups[J].Members.size(); ++K) { 335 OS.indent(Depth + 2) << *Pointers[CheckingGroups[J].Members[K]] 336 << "\n"; 337 if (PtrPartition) 338 OS << " (Partition: " 339 << (*PtrPartition)[CheckingGroups[J].Members[K]] << ")" 340 << "\n"; 341 } 342 } 343 344 OS.indent(Depth) << "Grouped accesses:\n"; 345 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 346 OS.indent(Depth + 2) << "Group " << I << ":\n"; 347 OS.indent(Depth + 4) << "(Low: " << *CheckingGroups[I].Low 348 << " High: " << *CheckingGroups[I].High << ")\n"; 349 for (unsigned J = 0; J < CheckingGroups[I].Members.size(); ++J) { 350 OS.indent(Depth + 6) << "Member: " << *Exprs[CheckingGroups[I].Members[J]] 351 << "\n"; 352 } 353 } 354 } 355 356 unsigned LoopAccessInfo::RuntimePointerCheck::getNumberOfChecks( 357 const SmallVectorImpl<int> *PtrPartition) const { 358 359 unsigned NumPartitions = CheckingGroups.size(); 360 unsigned CheckCount = 0; 361 362 for (unsigned I = 0; I < NumPartitions; ++I) 363 for (unsigned J = I + 1; J < NumPartitions; ++J) 364 if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) 365 CheckCount++; 366 return CheckCount; 367 } 368 369 bool LoopAccessInfo::RuntimePointerCheck::needsAnyChecking( 370 const SmallVectorImpl<int> *PtrPartition) const { 371 unsigned NumPointers = Pointers.size(); 372 373 for (unsigned I = 0; I < NumPointers; ++I) 374 for (unsigned J = I + 1; J < NumPointers; ++J) 375 if (needsChecking(I, J, PtrPartition)) 376 return true; 377 return false; 378 } 379 380 namespace { 381 /// \brief Analyses memory accesses in a loop. 382 /// 383 /// Checks whether run time pointer checks are needed and builds sets for data 384 /// dependence checking. 385 class AccessAnalysis { 386 public: 387 /// \brief Read or write access location. 388 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 389 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 390 391 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 392 MemoryDepChecker::DepCandidates &DA) 393 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), 394 IsRTCheckAnalysisNeeded(false) {} 395 396 /// \brief Register a load and whether it is only read from. 397 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 398 Value *Ptr = const_cast<Value*>(Loc.Ptr); 399 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 400 Accesses.insert(MemAccessInfo(Ptr, false)); 401 if (IsReadOnly) 402 ReadOnlyPtr.insert(Ptr); 403 } 404 405 /// \brief Register a store. 406 void addStore(MemoryLocation &Loc) { 407 Value *Ptr = const_cast<Value*>(Loc.Ptr); 408 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 409 Accesses.insert(MemAccessInfo(Ptr, true)); 410 } 411 412 /// \brief Check whether we can check the pointers at runtime for 413 /// non-intersection. 414 /// 415 /// Returns true if we need no check or if we do and we can generate them 416 /// (i.e. the pointers have computable bounds). 417 bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck, 418 ScalarEvolution *SE, Loop *TheLoop, 419 const ValueToValueMap &Strides, 420 bool ShouldCheckStride = false); 421 422 /// \brief Goes over all memory accesses, checks whether a RT check is needed 423 /// and builds sets of dependent accesses. 424 void buildDependenceSets() { 425 processMemAccesses(); 426 } 427 428 /// \brief Initial processing of memory accesses determined that we need to 429 /// perform dependency checking. 430 /// 431 /// Note that this can later be cleared if we retry memcheck analysis without 432 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 433 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 434 435 /// We decided that no dependence analysis would be used. Reset the state. 436 void resetDepChecks(MemoryDepChecker &DepChecker) { 437 CheckDeps.clear(); 438 DepChecker.clearInterestingDependences(); 439 } 440 441 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 442 443 private: 444 typedef SetVector<MemAccessInfo> PtrAccessSet; 445 446 /// \brief Go over all memory access and check whether runtime pointer checks 447 /// are needed and build sets of dependency check candidates. 448 void processMemAccesses(); 449 450 /// Set of all accesses. 451 PtrAccessSet Accesses; 452 453 const DataLayout &DL; 454 455 /// Set of accesses that need a further dependence check. 456 MemAccessInfoSet CheckDeps; 457 458 /// Set of pointers that are read only. 459 SmallPtrSet<Value*, 16> ReadOnlyPtr; 460 461 /// An alias set tracker to partition the access set by underlying object and 462 //intrinsic property (such as TBAA metadata). 463 AliasSetTracker AST; 464 465 LoopInfo *LI; 466 467 /// Sets of potentially dependent accesses - members of one set share an 468 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 469 /// dependence check. 470 MemoryDepChecker::DepCandidates &DepCands; 471 472 /// \brief Initial processing of memory accesses determined that we may need 473 /// to add memchecks. Perform the analysis to determine the necessary checks. 474 /// 475 /// Note that, this is different from isDependencyCheckNeeded. When we retry 476 /// memcheck analysis without dependency checking 477 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 478 /// while this remains set if we have potentially dependent accesses. 479 bool IsRTCheckAnalysisNeeded; 480 }; 481 482 } // end anonymous namespace 483 484 /// \brief Check whether a pointer can participate in a runtime bounds check. 485 static bool hasComputableBounds(ScalarEvolution *SE, 486 const ValueToValueMap &Strides, Value *Ptr) { 487 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr); 488 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 489 if (!AR) 490 return false; 491 492 return AR->isAffine(); 493 } 494 495 bool AccessAnalysis::canCheckPtrAtRT( 496 LoopAccessInfo::RuntimePointerCheck &RtCheck, ScalarEvolution *SE, 497 Loop *TheLoop, const ValueToValueMap &StridesMap, bool ShouldCheckStride) { 498 // Find pointers with computable bounds. We are going to use this information 499 // to place a runtime bound check. 500 bool CanDoRT = true; 501 502 bool NeedRTCheck = false; 503 if (!IsRTCheckAnalysisNeeded) return true; 504 505 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 506 507 // We assign a consecutive id to access from different alias sets. 508 // Accesses between different groups doesn't need to be checked. 509 unsigned ASId = 1; 510 for (auto &AS : AST) { 511 int NumReadPtrChecks = 0; 512 int NumWritePtrChecks = 0; 513 514 // We assign consecutive id to access from different dependence sets. 515 // Accesses within the same set don't need a runtime check. 516 unsigned RunningDepId = 1; 517 DenseMap<Value *, unsigned> DepSetId; 518 519 for (auto A : AS) { 520 Value *Ptr = A.getValue(); 521 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 522 MemAccessInfo Access(Ptr, IsWrite); 523 524 if (IsWrite) 525 ++NumWritePtrChecks; 526 else 527 ++NumReadPtrChecks; 528 529 if (hasComputableBounds(SE, StridesMap, Ptr) && 530 // When we run after a failing dependency check we have to make sure 531 // we don't have wrapping pointers. 532 (!ShouldCheckStride || 533 isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) { 534 // The id of the dependence set. 535 unsigned DepId; 536 537 if (IsDepCheckNeeded) { 538 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 539 unsigned &LeaderId = DepSetId[Leader]; 540 if (!LeaderId) 541 LeaderId = RunningDepId++; 542 DepId = LeaderId; 543 } else 544 // Each access has its own dependence set. 545 DepId = RunningDepId++; 546 547 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap); 548 549 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 550 } else { 551 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 552 CanDoRT = false; 553 } 554 } 555 556 // If we have at least two writes or one write and a read then we need to 557 // check them. But there is no need to checks if there is only one 558 // dependence set for this alias set. 559 // 560 // Note that this function computes CanDoRT and NeedRTCheck independently. 561 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 562 // for which we couldn't find the bounds but we don't actually need to emit 563 // any checks so it does not matter. 564 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 565 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 566 NumWritePtrChecks >= 1)); 567 568 ++ASId; 569 } 570 571 // If the pointers that we would use for the bounds comparison have different 572 // address spaces, assume the values aren't directly comparable, so we can't 573 // use them for the runtime check. We also have to assume they could 574 // overlap. In the future there should be metadata for whether address spaces 575 // are disjoint. 576 unsigned NumPointers = RtCheck.Pointers.size(); 577 for (unsigned i = 0; i < NumPointers; ++i) { 578 for (unsigned j = i + 1; j < NumPointers; ++j) { 579 // Only need to check pointers between two different dependency sets. 580 if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j]) 581 continue; 582 // Only need to check pointers in the same alias set. 583 if (RtCheck.AliasSetId[i] != RtCheck.AliasSetId[j]) 584 continue; 585 586 Value *PtrI = RtCheck.Pointers[i]; 587 Value *PtrJ = RtCheck.Pointers[j]; 588 589 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 590 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 591 if (ASi != ASj) { 592 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 593 " different address spaces\n"); 594 return false; 595 } 596 } 597 } 598 599 if (NeedRTCheck && CanDoRT) 600 RtCheck.groupChecks(DepCands, IsDepCheckNeeded); 601 602 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks(nullptr) 603 << " pointer comparisons.\n"); 604 605 RtCheck.Need = NeedRTCheck; 606 607 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 608 if (!CanDoRTIfNeeded) 609 RtCheck.reset(); 610 return CanDoRTIfNeeded; 611 } 612 613 void AccessAnalysis::processMemAccesses() { 614 // We process the set twice: first we process read-write pointers, last we 615 // process read-only pointers. This allows us to skip dependence tests for 616 // read-only pointers. 617 618 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 619 DEBUG(dbgs() << " AST: "; AST.dump()); 620 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 621 DEBUG({ 622 for (auto A : Accesses) 623 dbgs() << "\t" << *A.getPointer() << " (" << 624 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 625 "read-only" : "read")) << ")\n"; 626 }); 627 628 // The AliasSetTracker has nicely partitioned our pointers by metadata 629 // compatibility and potential for underlying-object overlap. As a result, we 630 // only need to check for potential pointer dependencies within each alias 631 // set. 632 for (auto &AS : AST) { 633 // Note that both the alias-set tracker and the alias sets themselves used 634 // linked lists internally and so the iteration order here is deterministic 635 // (matching the original instruction order within each set). 636 637 bool SetHasWrite = false; 638 639 // Map of pointers to last access encountered. 640 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 641 UnderlyingObjToAccessMap ObjToLastAccess; 642 643 // Set of access to check after all writes have been processed. 644 PtrAccessSet DeferredAccesses; 645 646 // Iterate over each alias set twice, once to process read/write pointers, 647 // and then to process read-only pointers. 648 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 649 bool UseDeferred = SetIteration > 0; 650 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 651 652 for (auto AV : AS) { 653 Value *Ptr = AV.getValue(); 654 655 // For a single memory access in AliasSetTracker, Accesses may contain 656 // both read and write, and they both need to be handled for CheckDeps. 657 for (auto AC : S) { 658 if (AC.getPointer() != Ptr) 659 continue; 660 661 bool IsWrite = AC.getInt(); 662 663 // If we're using the deferred access set, then it contains only 664 // reads. 665 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 666 if (UseDeferred && !IsReadOnlyPtr) 667 continue; 668 // Otherwise, the pointer must be in the PtrAccessSet, either as a 669 // read or a write. 670 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 671 S.count(MemAccessInfo(Ptr, false))) && 672 "Alias-set pointer not in the access set?"); 673 674 MemAccessInfo Access(Ptr, IsWrite); 675 DepCands.insert(Access); 676 677 // Memorize read-only pointers for later processing and skip them in 678 // the first round (they need to be checked after we have seen all 679 // write pointers). Note: we also mark pointer that are not 680 // consecutive as "read-only" pointers (so that we check 681 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 682 if (!UseDeferred && IsReadOnlyPtr) { 683 DeferredAccesses.insert(Access); 684 continue; 685 } 686 687 // If this is a write - check other reads and writes for conflicts. If 688 // this is a read only check other writes for conflicts (but only if 689 // there is no other write to the ptr - this is an optimization to 690 // catch "a[i] = a[i] + " without having to do a dependence check). 691 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 692 CheckDeps.insert(Access); 693 IsRTCheckAnalysisNeeded = true; 694 } 695 696 if (IsWrite) 697 SetHasWrite = true; 698 699 // Create sets of pointers connected by a shared alias set and 700 // underlying object. 701 typedef SmallVector<Value *, 16> ValueVector; 702 ValueVector TempObjects; 703 704 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 705 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 706 for (Value *UnderlyingObj : TempObjects) { 707 UnderlyingObjToAccessMap::iterator Prev = 708 ObjToLastAccess.find(UnderlyingObj); 709 if (Prev != ObjToLastAccess.end()) 710 DepCands.unionSets(Access, Prev->second); 711 712 ObjToLastAccess[UnderlyingObj] = Access; 713 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 714 } 715 } 716 } 717 } 718 } 719 } 720 721 static bool isInBoundsGep(Value *Ptr) { 722 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 723 return GEP->isInBounds(); 724 return false; 725 } 726 727 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 728 /// i.e. monotonically increasing/decreasing. 729 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 730 ScalarEvolution *SE, const Loop *L) { 731 // FIXME: This should probably only return true for NUW. 732 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 733 return true; 734 735 // Scalar evolution does not propagate the non-wrapping flags to values that 736 // are derived from a non-wrapping induction variable because non-wrapping 737 // could be flow-sensitive. 738 // 739 // Look through the potentially overflowing instruction to try to prove 740 // non-wrapping for the *specific* value of Ptr. 741 742 // The arithmetic implied by an inbounds GEP can't overflow. 743 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 744 if (!GEP || !GEP->isInBounds()) 745 return false; 746 747 // Make sure there is only one non-const index and analyze that. 748 Value *NonConstIndex = nullptr; 749 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 750 if (!isa<ConstantInt>(*Index)) { 751 if (NonConstIndex) 752 return false; 753 NonConstIndex = *Index; 754 } 755 if (!NonConstIndex) 756 // The recurrence is on the pointer, ignore for now. 757 return false; 758 759 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 760 // AddRec using a NSW operation. 761 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 762 if (OBO->hasNoSignedWrap() && 763 // Assume constant for other the operand so that the AddRec can be 764 // easily found. 765 isa<ConstantInt>(OBO->getOperand(1))) { 766 auto *OpScev = SE->getSCEV(OBO->getOperand(0)); 767 768 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 769 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 770 } 771 772 return false; 773 } 774 775 /// \brief Check whether the access through \p Ptr has a constant stride. 776 int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, 777 const ValueToValueMap &StridesMap) { 778 const Type *Ty = Ptr->getType(); 779 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 780 781 // Make sure that the pointer does not point to aggregate types. 782 const PointerType *PtrTy = cast<PointerType>(Ty); 783 if (PtrTy->getElementType()->isAggregateType()) { 784 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 785 << *Ptr << "\n"); 786 return 0; 787 } 788 789 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr); 790 791 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 792 if (!AR) { 793 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " 794 << *Ptr << " SCEV: " << *PtrScev << "\n"); 795 return 0; 796 } 797 798 // The accesss function must stride over the innermost loop. 799 if (Lp != AR->getLoop()) { 800 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 801 *Ptr << " SCEV: " << *PtrScev << "\n"); 802 } 803 804 // The address calculation must not wrap. Otherwise, a dependence could be 805 // inverted. 806 // An inbounds getelementptr that is a AddRec with a unit stride 807 // cannot wrap per definition. The unit stride requirement is checked later. 808 // An getelementptr without an inbounds attribute and unit stride would have 809 // to access the pointer value "0" which is undefined behavior in address 810 // space 0, therefore we can also vectorize this case. 811 bool IsInBoundsGEP = isInBoundsGep(Ptr); 812 bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp); 813 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 814 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 815 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 816 << *Ptr << " SCEV: " << *PtrScev << "\n"); 817 return 0; 818 } 819 820 // Check the step is constant. 821 const SCEV *Step = AR->getStepRecurrence(*SE); 822 823 // Calculate the pointer stride and check if it is constant. 824 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 825 if (!C) { 826 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 827 " SCEV: " << *PtrScev << "\n"); 828 return 0; 829 } 830 831 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 832 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 833 const APInt &APStepVal = C->getValue()->getValue(); 834 835 // Huge step value - give up. 836 if (APStepVal.getBitWidth() > 64) 837 return 0; 838 839 int64_t StepVal = APStepVal.getSExtValue(); 840 841 // Strided access. 842 int64_t Stride = StepVal / Size; 843 int64_t Rem = StepVal % Size; 844 if (Rem) 845 return 0; 846 847 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 848 // know we can't "wrap around the address space". In case of address space 849 // zero we know that this won't happen without triggering undefined behavior. 850 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 851 Stride != 1 && Stride != -1) 852 return 0; 853 854 return Stride; 855 } 856 857 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 858 switch (Type) { 859 case NoDep: 860 case Forward: 861 case BackwardVectorizable: 862 return true; 863 864 case Unknown: 865 case ForwardButPreventsForwarding: 866 case Backward: 867 case BackwardVectorizableButPreventsForwarding: 868 return false; 869 } 870 llvm_unreachable("unexpected DepType!"); 871 } 872 873 bool MemoryDepChecker::Dependence::isInterestingDependence(DepType Type) { 874 switch (Type) { 875 case NoDep: 876 case Forward: 877 return false; 878 879 case BackwardVectorizable: 880 case Unknown: 881 case ForwardButPreventsForwarding: 882 case Backward: 883 case BackwardVectorizableButPreventsForwarding: 884 return true; 885 } 886 llvm_unreachable("unexpected DepType!"); 887 } 888 889 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 890 switch (Type) { 891 case NoDep: 892 case Forward: 893 case ForwardButPreventsForwarding: 894 return false; 895 896 case Unknown: 897 case BackwardVectorizable: 898 case Backward: 899 case BackwardVectorizableButPreventsForwarding: 900 return true; 901 } 902 llvm_unreachable("unexpected DepType!"); 903 } 904 905 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 906 unsigned TypeByteSize) { 907 // If loads occur at a distance that is not a multiple of a feasible vector 908 // factor store-load forwarding does not take place. 909 // Positive dependences might cause troubles because vectorizing them might 910 // prevent store-load forwarding making vectorized code run a lot slower. 911 // a[i] = a[i-3] ^ a[i-8]; 912 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 913 // hence on your typical architecture store-load forwarding does not take 914 // place. Vectorizing in such cases does not make sense. 915 // Store-load forwarding distance. 916 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize; 917 // Maximum vector factor. 918 unsigned MaxVFWithoutSLForwardIssues = 919 VectorizerParams::MaxVectorWidth * TypeByteSize; 920 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues) 921 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes; 922 923 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues; 924 vf *= 2) { 925 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) { 926 MaxVFWithoutSLForwardIssues = (vf >>=1); 927 break; 928 } 929 } 930 931 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) { 932 DEBUG(dbgs() << "LAA: Distance " << Distance << 933 " that could cause a store-load forwarding conflict\n"); 934 return true; 935 } 936 937 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 938 MaxVFWithoutSLForwardIssues != 939 VectorizerParams::MaxVectorWidth * TypeByteSize) 940 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 941 return false; 942 } 943 944 /// \brief Check the dependence for two accesses with the same stride \p Stride. 945 /// \p Distance is the positive distance and \p TypeByteSize is type size in 946 /// bytes. 947 /// 948 /// \returns true if they are independent. 949 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 950 unsigned TypeByteSize) { 951 assert(Stride > 1 && "The stride must be greater than 1"); 952 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 953 assert(Distance > 0 && "The distance must be non-zero"); 954 955 // Skip if the distance is not multiple of type byte size. 956 if (Distance % TypeByteSize) 957 return false; 958 959 unsigned ScaledDist = Distance / TypeByteSize; 960 961 // No dependence if the scaled distance is not multiple of the stride. 962 // E.g. 963 // for (i = 0; i < 1024 ; i += 4) 964 // A[i+2] = A[i] + 1; 965 // 966 // Two accesses in memory (scaled distance is 2, stride is 4): 967 // | A[0] | | | | A[4] | | | | 968 // | | | A[2] | | | | A[6] | | 969 // 970 // E.g. 971 // for (i = 0; i < 1024 ; i += 3) 972 // A[i+4] = A[i] + 1; 973 // 974 // Two accesses in memory (scaled distance is 4, stride is 3): 975 // | A[0] | | | A[3] | | | A[6] | | | 976 // | | | | | A[4] | | | A[7] | | 977 return ScaledDist % Stride; 978 } 979 980 MemoryDepChecker::Dependence::DepType 981 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 982 const MemAccessInfo &B, unsigned BIdx, 983 const ValueToValueMap &Strides) { 984 assert (AIdx < BIdx && "Must pass arguments in program order"); 985 986 Value *APtr = A.getPointer(); 987 Value *BPtr = B.getPointer(); 988 bool AIsWrite = A.getInt(); 989 bool BIsWrite = B.getInt(); 990 991 // Two reads are independent. 992 if (!AIsWrite && !BIsWrite) 993 return Dependence::NoDep; 994 995 // We cannot check pointers in different address spaces. 996 if (APtr->getType()->getPointerAddressSpace() != 997 BPtr->getType()->getPointerAddressSpace()) 998 return Dependence::Unknown; 999 1000 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr); 1001 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr); 1002 1003 int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides); 1004 int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides); 1005 1006 const SCEV *Src = AScev; 1007 const SCEV *Sink = BScev; 1008 1009 // If the induction step is negative we have to invert source and sink of the 1010 // dependence. 1011 if (StrideAPtr < 0) { 1012 //Src = BScev; 1013 //Sink = AScev; 1014 std::swap(APtr, BPtr); 1015 std::swap(Src, Sink); 1016 std::swap(AIsWrite, BIsWrite); 1017 std::swap(AIdx, BIdx); 1018 std::swap(StrideAPtr, StrideBPtr); 1019 } 1020 1021 const SCEV *Dist = SE->getMinusSCEV(Sink, Src); 1022 1023 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1024 << "(Induction step: " << StrideAPtr << ")\n"); 1025 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1026 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1027 1028 // Need accesses with constant stride. We don't want to vectorize 1029 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1030 // the address space. 1031 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1032 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1033 return Dependence::Unknown; 1034 } 1035 1036 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1037 if (!C) { 1038 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1039 ShouldRetryWithRuntimeCheck = true; 1040 return Dependence::Unknown; 1041 } 1042 1043 Type *ATy = APtr->getType()->getPointerElementType(); 1044 Type *BTy = BPtr->getType()->getPointerElementType(); 1045 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1046 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1047 1048 // Negative distances are not plausible dependencies. 1049 const APInt &Val = C->getValue()->getValue(); 1050 if (Val.isNegative()) { 1051 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1052 if (IsTrueDataDependence && 1053 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1054 ATy != BTy)) 1055 return Dependence::ForwardButPreventsForwarding; 1056 1057 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n"); 1058 return Dependence::Forward; 1059 } 1060 1061 // Write to the same location with the same size. 1062 // Could be improved to assert type sizes are the same (i32 == float, etc). 1063 if (Val == 0) { 1064 if (ATy == BTy) 1065 return Dependence::NoDep; 1066 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1067 return Dependence::Unknown; 1068 } 1069 1070 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1071 1072 if (ATy != BTy) { 1073 DEBUG(dbgs() << 1074 "LAA: ReadWrite-Write positive dependency with different types\n"); 1075 return Dependence::Unknown; 1076 } 1077 1078 unsigned Distance = (unsigned) Val.getZExtValue(); 1079 1080 unsigned Stride = std::abs(StrideAPtr); 1081 if (Stride > 1 && 1082 areStridedAccessesIndependent(Distance, Stride, TypeByteSize)) { 1083 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1084 return Dependence::NoDep; 1085 } 1086 1087 // Bail out early if passed-in parameters make vectorization not feasible. 1088 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1089 VectorizerParams::VectorizationFactor : 1); 1090 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1091 VectorizerParams::VectorizationInterleave : 1); 1092 // The minimum number of iterations for a vectorized/unrolled version. 1093 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1094 1095 // It's not vectorizable if the distance is smaller than the minimum distance 1096 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1097 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1098 // TypeByteSize (No need to plus the last gap distance). 1099 // 1100 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1101 // foo(int *A) { 1102 // int *B = (int *)((char *)A + 14); 1103 // for (i = 0 ; i < 1024 ; i += 2) 1104 // B[i] = A[i] + 1; 1105 // } 1106 // 1107 // Two accesses in memory (stride is 2): 1108 // | A[0] | | A[2] | | A[4] | | A[6] | | 1109 // | B[0] | | B[2] | | B[4] | 1110 // 1111 // Distance needs for vectorizing iterations except the last iteration: 1112 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1113 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1114 // 1115 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1116 // 12, which is less than distance. 1117 // 1118 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1119 // the minimum distance needed is 28, which is greater than distance. It is 1120 // not safe to do vectorization. 1121 unsigned MinDistanceNeeded = 1122 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1123 if (MinDistanceNeeded > Distance) { 1124 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1125 << '\n'); 1126 return Dependence::Backward; 1127 } 1128 1129 // Unsafe if the minimum distance needed is greater than max safe distance. 1130 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1131 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1132 << MinDistanceNeeded << " size in bytes"); 1133 return Dependence::Backward; 1134 } 1135 1136 // Positive distance bigger than max vectorization factor. 1137 // FIXME: Should use max factor instead of max distance in bytes, which could 1138 // not handle different types. 1139 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1140 // void foo (int *A, char *B) { 1141 // for (unsigned i = 0; i < 1024; i++) { 1142 // A[i+2] = A[i] + 1; 1143 // B[i+2] = B[i] + 1; 1144 // } 1145 // } 1146 // 1147 // This case is currently unsafe according to the max safe distance. If we 1148 // analyze the two accesses on array B, the max safe dependence distance 1149 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1150 // is 8, which is less than 2 and forbidden vectorization, But actually 1151 // both A and B could be vectorized by 2 iterations. 1152 MaxSafeDepDistBytes = 1153 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1154 1155 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1156 if (IsTrueDataDependence && 1157 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1158 return Dependence::BackwardVectorizableButPreventsForwarding; 1159 1160 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1161 << " with max VF = " 1162 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1163 1164 return Dependence::BackwardVectorizable; 1165 } 1166 1167 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1168 MemAccessInfoSet &CheckDeps, 1169 const ValueToValueMap &Strides) { 1170 1171 MaxSafeDepDistBytes = -1U; 1172 while (!CheckDeps.empty()) { 1173 MemAccessInfo CurAccess = *CheckDeps.begin(); 1174 1175 // Get the relevant memory access set. 1176 EquivalenceClasses<MemAccessInfo>::iterator I = 1177 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1178 1179 // Check accesses within this set. 1180 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE; 1181 AI = AccessSets.member_begin(I), AE = AccessSets.member_end(); 1182 1183 // Check every access pair. 1184 while (AI != AE) { 1185 CheckDeps.erase(*AI); 1186 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1187 while (OI != AE) { 1188 // Check every accessing instruction pair in program order. 1189 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1190 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1191 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1192 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1193 auto A = std::make_pair(&*AI, *I1); 1194 auto B = std::make_pair(&*OI, *I2); 1195 1196 assert(*I1 != *I2); 1197 if (*I1 > *I2) 1198 std::swap(A, B); 1199 1200 Dependence::DepType Type = 1201 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1202 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1203 1204 // Gather dependences unless we accumulated MaxInterestingDependence 1205 // dependences. In that case return as soon as we find the first 1206 // unsafe dependence. This puts a limit on this quadratic 1207 // algorithm. 1208 if (RecordInterestingDependences) { 1209 if (Dependence::isInterestingDependence(Type)) 1210 InterestingDependences.push_back( 1211 Dependence(A.second, B.second, Type)); 1212 1213 if (InterestingDependences.size() >= MaxInterestingDependence) { 1214 RecordInterestingDependences = false; 1215 InterestingDependences.clear(); 1216 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1217 } 1218 } 1219 if (!RecordInterestingDependences && !SafeForVectorization) 1220 return false; 1221 } 1222 ++OI; 1223 } 1224 AI++; 1225 } 1226 } 1227 1228 DEBUG(dbgs() << "Total Interesting Dependences: " 1229 << InterestingDependences.size() << "\n"); 1230 return SafeForVectorization; 1231 } 1232 1233 SmallVector<Instruction *, 4> 1234 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1235 MemAccessInfo Access(Ptr, isWrite); 1236 auto &IndexVector = Accesses.find(Access)->second; 1237 1238 SmallVector<Instruction *, 4> Insts; 1239 std::transform(IndexVector.begin(), IndexVector.end(), 1240 std::back_inserter(Insts), 1241 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1242 return Insts; 1243 } 1244 1245 const char *MemoryDepChecker::Dependence::DepName[] = { 1246 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1247 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1248 1249 void MemoryDepChecker::Dependence::print( 1250 raw_ostream &OS, unsigned Depth, 1251 const SmallVectorImpl<Instruction *> &Instrs) const { 1252 OS.indent(Depth) << DepName[Type] << ":\n"; 1253 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1254 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1255 } 1256 1257 bool LoopAccessInfo::canAnalyzeLoop() { 1258 // We need to have a loop header. 1259 DEBUG(dbgs() << "LAA: Found a loop: " << 1260 TheLoop->getHeader()->getName() << '\n'); 1261 1262 // We can only analyze innermost loops. 1263 if (!TheLoop->empty()) { 1264 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1265 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1266 return false; 1267 } 1268 1269 // We must have a single backedge. 1270 if (TheLoop->getNumBackEdges() != 1) { 1271 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1272 emitAnalysis( 1273 LoopAccessReport() << 1274 "loop control flow is not understood by analyzer"); 1275 return false; 1276 } 1277 1278 // We must have a single exiting block. 1279 if (!TheLoop->getExitingBlock()) { 1280 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1281 emitAnalysis( 1282 LoopAccessReport() << 1283 "loop control flow is not understood by analyzer"); 1284 return false; 1285 } 1286 1287 // We only handle bottom-tested loops, i.e. loop in which the condition is 1288 // checked at the end of each iteration. With that we can assume that all 1289 // instructions in the loop are executed the same number of times. 1290 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1291 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1292 emitAnalysis( 1293 LoopAccessReport() << 1294 "loop control flow is not understood by analyzer"); 1295 return false; 1296 } 1297 1298 // ScalarEvolution needs to be able to find the exit count. 1299 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); 1300 if (ExitCount == SE->getCouldNotCompute()) { 1301 emitAnalysis(LoopAccessReport() << 1302 "could not determine number of loop iterations"); 1303 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1304 return false; 1305 } 1306 1307 return true; 1308 } 1309 1310 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { 1311 1312 typedef SmallVector<Value*, 16> ValueVector; 1313 typedef SmallPtrSet<Value*, 16> ValueSet; 1314 1315 // Holds the Load and Store *instructions*. 1316 ValueVector Loads; 1317 ValueVector Stores; 1318 1319 // Holds all the different accesses in the loop. 1320 unsigned NumReads = 0; 1321 unsigned NumReadWrites = 0; 1322 1323 PtrRtCheck.Pointers.clear(); 1324 PtrRtCheck.Need = false; 1325 1326 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1327 1328 // For each block. 1329 for (Loop::block_iterator bb = TheLoop->block_begin(), 1330 be = TheLoop->block_end(); bb != be; ++bb) { 1331 1332 // Scan the BB and collect legal loads and stores. 1333 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1334 ++it) { 1335 1336 // If this is a load, save it. If this instruction can read from memory 1337 // but is not a load, then we quit. Notice that we don't handle function 1338 // calls that read or write. 1339 if (it->mayReadFromMemory()) { 1340 // Many math library functions read the rounding mode. We will only 1341 // vectorize a loop if it contains known function calls that don't set 1342 // the flag. Therefore, it is safe to ignore this read from memory. 1343 CallInst *Call = dyn_cast<CallInst>(it); 1344 if (Call && getIntrinsicIDForCall(Call, TLI)) 1345 continue; 1346 1347 // If the function has an explicit vectorized counterpart, we can safely 1348 // assume that it can be vectorized. 1349 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1350 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1351 continue; 1352 1353 LoadInst *Ld = dyn_cast<LoadInst>(it); 1354 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1355 emitAnalysis(LoopAccessReport(Ld) 1356 << "read with atomic ordering or volatile read"); 1357 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1358 CanVecMem = false; 1359 return; 1360 } 1361 NumLoads++; 1362 Loads.push_back(Ld); 1363 DepChecker.addAccess(Ld); 1364 continue; 1365 } 1366 1367 // Save 'store' instructions. Abort if other instructions write to memory. 1368 if (it->mayWriteToMemory()) { 1369 StoreInst *St = dyn_cast<StoreInst>(it); 1370 if (!St) { 1371 emitAnalysis(LoopAccessReport(it) << 1372 "instruction cannot be vectorized"); 1373 CanVecMem = false; 1374 return; 1375 } 1376 if (!St->isSimple() && !IsAnnotatedParallel) { 1377 emitAnalysis(LoopAccessReport(St) 1378 << "write with atomic ordering or volatile write"); 1379 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1380 CanVecMem = false; 1381 return; 1382 } 1383 NumStores++; 1384 Stores.push_back(St); 1385 DepChecker.addAccess(St); 1386 } 1387 } // Next instr. 1388 } // Next block. 1389 1390 // Now we have two lists that hold the loads and the stores. 1391 // Next, we find the pointers that they use. 1392 1393 // Check if we see any stores. If there are no stores, then we don't 1394 // care if the pointers are *restrict*. 1395 if (!Stores.size()) { 1396 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1397 CanVecMem = true; 1398 return; 1399 } 1400 1401 MemoryDepChecker::DepCandidates DependentAccesses; 1402 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1403 AA, LI, DependentAccesses); 1404 1405 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1406 // multiple times on the same object. If the ptr is accessed twice, once 1407 // for read and once for write, it will only appear once (on the write 1408 // list). This is okay, since we are going to check for conflicts between 1409 // writes and between reads and writes, but not between reads and reads. 1410 ValueSet Seen; 1411 1412 ValueVector::iterator I, IE; 1413 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { 1414 StoreInst *ST = cast<StoreInst>(*I); 1415 Value* Ptr = ST->getPointerOperand(); 1416 // Check for store to loop invariant address. 1417 StoreToLoopInvariantAddress |= isUniform(Ptr); 1418 // If we did *not* see this pointer before, insert it to the read-write 1419 // list. At this phase it is only a 'write' list. 1420 if (Seen.insert(Ptr).second) { 1421 ++NumReadWrites; 1422 1423 MemoryLocation Loc = MemoryLocation::get(ST); 1424 // The TBAA metadata could have a control dependency on the predication 1425 // condition, so we cannot rely on it when determining whether or not we 1426 // need runtime pointer checks. 1427 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1428 Loc.AATags.TBAA = nullptr; 1429 1430 Accesses.addStore(Loc); 1431 } 1432 } 1433 1434 if (IsAnnotatedParallel) { 1435 DEBUG(dbgs() 1436 << "LAA: A loop annotated parallel, ignore memory dependency " 1437 << "checks.\n"); 1438 CanVecMem = true; 1439 return; 1440 } 1441 1442 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { 1443 LoadInst *LD = cast<LoadInst>(*I); 1444 Value* Ptr = LD->getPointerOperand(); 1445 // If we did *not* see this pointer before, insert it to the 1446 // read list. If we *did* see it before, then it is already in 1447 // the read-write list. This allows us to vectorize expressions 1448 // such as A[i] += x; Because the address of A[i] is a read-write 1449 // pointer. This only works if the index of A[i] is consecutive. 1450 // If the address of i is unknown (for example A[B[i]]) then we may 1451 // read a few words, modify, and write a few words, and some of the 1452 // words may be written to the same address. 1453 bool IsReadOnlyPtr = false; 1454 if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) { 1455 ++NumReads; 1456 IsReadOnlyPtr = true; 1457 } 1458 1459 MemoryLocation Loc = MemoryLocation::get(LD); 1460 // The TBAA metadata could have a control dependency on the predication 1461 // condition, so we cannot rely on it when determining whether or not we 1462 // need runtime pointer checks. 1463 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1464 Loc.AATags.TBAA = nullptr; 1465 1466 Accesses.addLoad(Loc, IsReadOnlyPtr); 1467 } 1468 1469 // If we write (or read-write) to a single destination and there are no 1470 // other reads in this loop then is it safe to vectorize. 1471 if (NumReadWrites == 1 && NumReads == 0) { 1472 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1473 CanVecMem = true; 1474 return; 1475 } 1476 1477 // Build dependence sets and check whether we need a runtime pointer bounds 1478 // check. 1479 Accesses.buildDependenceSets(); 1480 1481 // Find pointers with computable bounds. We are going to use this information 1482 // to place a runtime bound check. 1483 bool CanDoRTIfNeeded = 1484 Accesses.canCheckPtrAtRT(PtrRtCheck, SE, TheLoop, Strides); 1485 if (!CanDoRTIfNeeded) { 1486 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1487 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1488 << "the array bounds.\n"); 1489 CanVecMem = false; 1490 return; 1491 } 1492 1493 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1494 1495 CanVecMem = true; 1496 if (Accesses.isDependencyCheckNeeded()) { 1497 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1498 CanVecMem = DepChecker.areDepsSafe( 1499 DependentAccesses, Accesses.getDependenciesToCheck(), Strides); 1500 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1501 1502 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1503 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1504 1505 // Clear the dependency checks. We assume they are not needed. 1506 Accesses.resetDepChecks(DepChecker); 1507 1508 PtrRtCheck.reset(); 1509 PtrRtCheck.Need = true; 1510 1511 CanDoRTIfNeeded = 1512 Accesses.canCheckPtrAtRT(PtrRtCheck, SE, TheLoop, Strides, true); 1513 1514 // Check that we found the bounds for the pointer. 1515 if (!CanDoRTIfNeeded) { 1516 emitAnalysis(LoopAccessReport() 1517 << "cannot check memory dependencies at runtime"); 1518 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1519 CanVecMem = false; 1520 return; 1521 } 1522 1523 CanVecMem = true; 1524 } 1525 } 1526 1527 if (CanVecMem) 1528 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1529 << (PtrRtCheck.Need ? "" : " don't") 1530 << " need runtime memory checks.\n"); 1531 else { 1532 emitAnalysis(LoopAccessReport() << 1533 "unsafe dependent memory operations in loop"); 1534 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1535 } 1536 } 1537 1538 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1539 DominatorTree *DT) { 1540 assert(TheLoop->contains(BB) && "Unknown block used"); 1541 1542 // Blocks that do not dominate the latch need predication. 1543 BasicBlock* Latch = TheLoop->getLoopLatch(); 1544 return !DT->dominates(BB, Latch); 1545 } 1546 1547 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1548 assert(!Report && "Multiple reports generated"); 1549 Report = Message; 1550 } 1551 1552 bool LoopAccessInfo::isUniform(Value *V) const { 1553 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 1554 } 1555 1556 // FIXME: this function is currently a duplicate of the one in 1557 // LoopVectorize.cpp. 1558 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1559 Instruction *Loc) { 1560 if (FirstInst) 1561 return FirstInst; 1562 if (Instruction *I = dyn_cast<Instruction>(V)) 1563 return I->getParent() == Loc->getParent() ? I : nullptr; 1564 return nullptr; 1565 } 1566 1567 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeCheck( 1568 Instruction *Loc, const SmallVectorImpl<int> *PtrPartition) const { 1569 if (!PtrRtCheck.Need) 1570 return std::make_pair(nullptr, nullptr); 1571 1572 SmallVector<TrackingVH<Value>, 2> Starts; 1573 SmallVector<TrackingVH<Value>, 2> Ends; 1574 1575 LLVMContext &Ctx = Loc->getContext(); 1576 SCEVExpander Exp(*SE, DL, "induction"); 1577 Instruction *FirstInst = nullptr; 1578 1579 for (unsigned i = 0; i < PtrRtCheck.CheckingGroups.size(); ++i) { 1580 const RuntimePointerCheck::CheckingPtrGroup &CG = 1581 PtrRtCheck.CheckingGroups[i]; 1582 Value *Ptr = PtrRtCheck.Pointers[CG.Members[0]]; 1583 const SCEV *Sc = SE->getSCEV(Ptr); 1584 1585 if (SE->isLoopInvariant(Sc, TheLoop)) { 1586 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1587 << "\n"); 1588 Starts.push_back(Ptr); 1589 Ends.push_back(Ptr); 1590 } else { 1591 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1592 1593 // Use this type for pointer arithmetic. 1594 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1595 Value *Start = nullptr, *End = nullptr; 1596 1597 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1598 Start = Exp.expandCodeFor(CG.Low, PtrArithTy, Loc); 1599 End = Exp.expandCodeFor(CG.High, PtrArithTy, Loc); 1600 DEBUG(dbgs() << "Start: " << *CG.Low << " End: " << *CG.High << "\n"); 1601 Starts.push_back(Start); 1602 Ends.push_back(End); 1603 } 1604 } 1605 1606 IRBuilder<> ChkBuilder(Loc); 1607 // Our instructions might fold to a constant. 1608 Value *MemoryRuntimeCheck = nullptr; 1609 for (unsigned i = 0; i < PtrRtCheck.CheckingGroups.size(); ++i) { 1610 for (unsigned j = i + 1; j < PtrRtCheck.CheckingGroups.size(); ++j) { 1611 const RuntimePointerCheck::CheckingPtrGroup &CGI = 1612 PtrRtCheck.CheckingGroups[i]; 1613 const RuntimePointerCheck::CheckingPtrGroup &CGJ = 1614 PtrRtCheck.CheckingGroups[j]; 1615 1616 if (!PtrRtCheck.needsChecking(CGI, CGJ, PtrPartition)) 1617 continue; 1618 1619 unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace(); 1620 unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace(); 1621 1622 assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) && 1623 (AS1 == Ends[i]->getType()->getPointerAddressSpace()) && 1624 "Trying to bounds check pointers with different address spaces"); 1625 1626 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1627 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1628 1629 Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc"); 1630 Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc"); 1631 Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc"); 1632 Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc"); 1633 1634 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1635 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1636 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1637 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1638 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1639 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1640 if (MemoryRuntimeCheck) { 1641 IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, 1642 "conflict.rdx"); 1643 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1644 } 1645 MemoryRuntimeCheck = IsConflict; 1646 } 1647 } 1648 1649 if (!MemoryRuntimeCheck) 1650 return std::make_pair(nullptr, nullptr); 1651 1652 // We have to do this trickery because the IRBuilder might fold the check to a 1653 // constant expression in which case there is no Instruction anchored in a 1654 // the block. 1655 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1656 ConstantInt::getTrue(Ctx)); 1657 ChkBuilder.Insert(Check, "memcheck.conflict"); 1658 FirstInst = getFirstInst(FirstInst, Check, Loc); 1659 return std::make_pair(FirstInst, Check); 1660 } 1661 1662 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1663 const DataLayout &DL, 1664 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1665 DominatorTree *DT, LoopInfo *LI, 1666 const ValueToValueMap &Strides) 1667 : PtrRtCheck(SE), DepChecker(SE, L), TheLoop(L), SE(SE), DL(DL), TLI(TLI), 1668 AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1669 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1670 StoreToLoopInvariantAddress(false) { 1671 if (canAnalyzeLoop()) 1672 analyzeLoop(Strides); 1673 } 1674 1675 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1676 if (CanVecMem) { 1677 if (PtrRtCheck.Need) 1678 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; 1679 else 1680 OS.indent(Depth) << "Memory dependences are safe\n"; 1681 } 1682 1683 if (Report) 1684 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1685 1686 if (auto *InterestingDependences = DepChecker.getInterestingDependences()) { 1687 OS.indent(Depth) << "Interesting Dependences:\n"; 1688 for (auto &Dep : *InterestingDependences) { 1689 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1690 OS << "\n"; 1691 } 1692 } else 1693 OS.indent(Depth) << "Too many interesting dependences, not recorded\n"; 1694 1695 // List the pair of accesses need run-time checks to prove independence. 1696 PtrRtCheck.print(OS, Depth); 1697 OS << "\n"; 1698 1699 OS.indent(Depth) << "Store to invariant address was " 1700 << (StoreToLoopInvariantAddress ? "" : "not ") 1701 << "found in loop.\n"; 1702 } 1703 1704 const LoopAccessInfo & 1705 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { 1706 auto &LAI = LoopAccessInfoMap[L]; 1707 1708 #ifndef NDEBUG 1709 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && 1710 "Symbolic strides changed for loop"); 1711 #endif 1712 1713 if (!LAI) { 1714 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1715 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, 1716 Strides); 1717 #ifndef NDEBUG 1718 LAI->NumSymbolicStrides = Strides.size(); 1719 #endif 1720 } 1721 return *LAI.get(); 1722 } 1723 1724 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1725 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1726 1727 ValueToValueMap NoSymbolicStrides; 1728 1729 for (Loop *TopLevelLoop : *LI) 1730 for (Loop *L : depth_first(TopLevelLoop)) { 1731 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1732 auto &LAI = LAA.getInfo(L, NoSymbolicStrides); 1733 LAI.print(OS, 4); 1734 } 1735 } 1736 1737 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1738 SE = &getAnalysis<ScalarEvolution>(); 1739 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1740 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1741 AA = &getAnalysis<AliasAnalysis>(); 1742 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1743 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1744 1745 return false; 1746 } 1747 1748 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1749 AU.addRequired<ScalarEvolution>(); 1750 AU.addRequired<AliasAnalysis>(); 1751 AU.addRequired<DominatorTreeWrapperPass>(); 1752 AU.addRequired<LoopInfoWrapperPass>(); 1753 1754 AU.setPreservesAll(); 1755 } 1756 1757 char LoopAccessAnalysis::ID = 0; 1758 static const char laa_name[] = "Loop Access Analysis"; 1759 #define LAA_NAME "loop-accesses" 1760 1761 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1762 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 1763 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 1764 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1765 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 1766 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1767 1768 namespace llvm { 1769 Pass *createLAAPass() { 1770 return new LoopAccessAnalysis(); 1771 } 1772 } 1773