1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The implementation for the loop memory dependence that was originally 11 // developed for the loop vectorizer. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/LoopAccessAnalysis.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/ScalarEvolutionExpander.h" 18 #include "llvm/Analysis/TargetLibraryInfo.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 using namespace llvm; 27 28 #define DEBUG_TYPE "loop-accesses" 29 30 static cl::opt<unsigned, true> 31 VectorizationFactor("force-vector-width", cl::Hidden, 32 cl::desc("Sets the SIMD width. Zero is autoselect."), 33 cl::location(VectorizerParams::VectorizationFactor)); 34 unsigned VectorizerParams::VectorizationFactor; 35 36 static cl::opt<unsigned, true> 37 VectorizationInterleave("force-vector-interleave", cl::Hidden, 38 cl::desc("Sets the vectorization interleave count. " 39 "Zero is autoselect."), 40 cl::location( 41 VectorizerParams::VectorizationInterleave)); 42 unsigned VectorizerParams::VectorizationInterleave; 43 44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( 45 "runtime-memory-check-threshold", cl::Hidden, 46 cl::desc("When performing memory disambiguation checks at runtime do not " 47 "generate more than this number of comparisons (default = 8)."), 48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); 49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold; 50 51 /// \brief The maximum iterations used to merge memory checks 52 static cl::opt<unsigned> MemoryCheckMergeThreshold( 53 "memory-check-merge-threshold", cl::Hidden, 54 cl::desc("Maximum number of comparisons done when trying to merge " 55 "runtime memory checks. (default = 100)"), 56 cl::init(100)); 57 58 /// Maximum SIMD width. 59 const unsigned VectorizerParams::MaxVectorWidth = 64; 60 61 /// \brief We collect interesting dependences up to this threshold. 62 static cl::opt<unsigned> MaxInterestingDependence( 63 "max-interesting-dependences", cl::Hidden, 64 cl::desc("Maximum number of interesting dependences collected by " 65 "loop-access analysis (default = 100)"), 66 cl::init(100)); 67 68 bool VectorizerParams::isInterleaveForced() { 69 return ::VectorizationInterleave.getNumOccurrences() > 0; 70 } 71 72 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, 73 const Function *TheFunction, 74 const Loop *TheLoop, 75 const char *PassName) { 76 DebugLoc DL = TheLoop->getStartLoc(); 77 if (const Instruction *I = Message.getInstr()) 78 DL = I->getDebugLoc(); 79 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, 80 *TheFunction, DL, Message.str()); 81 } 82 83 Value *llvm::stripIntegerCast(Value *V) { 84 if (CastInst *CI = dyn_cast<CastInst>(V)) 85 if (CI->getOperand(0)->getType()->isIntegerTy()) 86 return CI->getOperand(0); 87 return V; 88 } 89 90 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, 91 const ValueToValueMap &PtrToStride, 92 Value *Ptr, Value *OrigPtr) { 93 94 const SCEV *OrigSCEV = SE->getSCEV(Ptr); 95 96 // If there is an entry in the map return the SCEV of the pointer with the 97 // symbolic stride replaced by one. 98 ValueToValueMap::const_iterator SI = 99 PtrToStride.find(OrigPtr ? OrigPtr : Ptr); 100 if (SI != PtrToStride.end()) { 101 Value *StrideVal = SI->second; 102 103 // Strip casts. 104 StrideVal = stripIntegerCast(StrideVal); 105 106 // Replace symbolic stride by one. 107 Value *One = ConstantInt::get(StrideVal->getType(), 1); 108 ValueToValueMap RewriteMap; 109 RewriteMap[StrideVal] = One; 110 111 const SCEV *ByOne = 112 SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true); 113 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne 114 << "\n"); 115 return ByOne; 116 } 117 118 // Otherwise, just return the SCEV of the original pointer. 119 return SE->getSCEV(Ptr); 120 } 121 122 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, 123 unsigned DepSetId, unsigned ASId, 124 const ValueToValueMap &Strides) { 125 // Get the stride replaced scev. 126 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr); 127 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); 128 assert(AR && "Invalid addrec expression"); 129 const SCEV *Ex = SE->getBackedgeTakenCount(Lp); 130 131 const SCEV *ScStart = AR->getStart(); 132 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); 133 const SCEV *Step = AR->getStepRecurrence(*SE); 134 135 // For expressions with negative step, the upper bound is ScStart and the 136 // lower bound is ScEnd. 137 if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) { 138 if (CStep->getValue()->isNegative()) 139 std::swap(ScStart, ScEnd); 140 } else { 141 // Fallback case: the step is not constant, but the we can still 142 // get the upper and lower bounds of the interval by using min/max 143 // expressions. 144 ScStart = SE->getUMinExpr(ScStart, ScEnd); 145 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd); 146 } 147 148 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc); 149 } 150 151 bool RuntimePointerChecking::needsChecking( 152 const CheckingPtrGroup &M, const CheckingPtrGroup &N, 153 const SmallVectorImpl<int> *PtrPartition) const { 154 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) 155 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) 156 if (needsChecking(M.Members[I], N.Members[J], PtrPartition)) 157 return true; 158 return false; 159 } 160 161 /// Compare \p I and \p J and return the minimum. 162 /// Return nullptr in case we couldn't find an answer. 163 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, 164 ScalarEvolution *SE) { 165 const SCEV *Diff = SE->getMinusSCEV(J, I); 166 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); 167 168 if (!C) 169 return nullptr; 170 if (C->getValue()->isNegative()) 171 return J; 172 return I; 173 } 174 175 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { 176 const SCEV *Start = RtCheck.Pointers[Index].Start; 177 const SCEV *End = RtCheck.Pointers[Index].End; 178 179 // Compare the starts and ends with the known minimum and maximum 180 // of this set. We need to know how we compare against the min/max 181 // of the set in order to be able to emit memchecks. 182 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); 183 if (!Min0) 184 return false; 185 186 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); 187 if (!Min1) 188 return false; 189 190 // Update the low bound expression if we've found a new min value. 191 if (Min0 == Start) 192 Low = Start; 193 194 // Update the high bound expression if we've found a new max value. 195 if (Min1 != End) 196 High = End; 197 198 Members.push_back(Index); 199 return true; 200 } 201 202 void RuntimePointerChecking::groupChecks( 203 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { 204 // We build the groups from dependency candidates equivalence classes 205 // because: 206 // - We know that pointers in the same equivalence class share 207 // the same underlying object and therefore there is a chance 208 // that we can compare pointers 209 // - We wouldn't be able to merge two pointers for which we need 210 // to emit a memcheck. The classes in DepCands are already 211 // conveniently built such that no two pointers in the same 212 // class need checking against each other. 213 214 // We use the following (greedy) algorithm to construct the groups 215 // For every pointer in the equivalence class: 216 // For each existing group: 217 // - if the difference between this pointer and the min/max bounds 218 // of the group is a constant, then make the pointer part of the 219 // group and update the min/max bounds of that group as required. 220 221 CheckingGroups.clear(); 222 223 // If we don't have the dependency partitions, construct a new 224 // checking pointer group for each pointer. 225 if (!UseDependencies) { 226 for (unsigned I = 0; I < Pointers.size(); ++I) 227 CheckingGroups.push_back(CheckingPtrGroup(I, *this)); 228 return; 229 } 230 231 unsigned TotalComparisons = 0; 232 233 DenseMap<Value *, unsigned> PositionMap; 234 for (unsigned Index = 0; Index < Pointers.size(); ++Index) 235 PositionMap[Pointers[Index].PointerValue] = Index; 236 237 // We need to keep track of what pointers we've already seen so we 238 // don't process them twice. 239 SmallSet<unsigned, 2> Seen; 240 241 // Go through all equivalence classes, get the the "pointer check groups" 242 // and add them to the overall solution. We use the order in which accesses 243 // appear in 'Pointers' to enforce determinism. 244 for (unsigned I = 0; I < Pointers.size(); ++I) { 245 // We've seen this pointer before, and therefore already processed 246 // its equivalence class. 247 if (Seen.count(I)) 248 continue; 249 250 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, 251 Pointers[I].IsWritePtr); 252 253 SmallVector<CheckingPtrGroup, 2> Groups; 254 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); 255 256 // Because DepCands is constructed by visiting accesses in the order in 257 // which they appear in alias sets (which is deterministic) and the 258 // iteration order within an equivalence class member is only dependent on 259 // the order in which unions and insertions are performed on the 260 // equivalence class, the iteration order is deterministic. 261 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); 262 MI != ME; ++MI) { 263 unsigned Pointer = PositionMap[MI->getPointer()]; 264 bool Merged = false; 265 // Mark this pointer as seen. 266 Seen.insert(Pointer); 267 268 // Go through all the existing sets and see if we can find one 269 // which can include this pointer. 270 for (CheckingPtrGroup &Group : Groups) { 271 // Don't perform more than a certain amount of comparisons. 272 // This should limit the cost of grouping the pointers to something 273 // reasonable. If we do end up hitting this threshold, the algorithm 274 // will create separate groups for all remaining pointers. 275 if (TotalComparisons > MemoryCheckMergeThreshold) 276 break; 277 278 TotalComparisons++; 279 280 if (Group.addPointer(Pointer)) { 281 Merged = true; 282 break; 283 } 284 } 285 286 if (!Merged) 287 // We couldn't add this pointer to any existing set or the threshold 288 // for the number of comparisons has been reached. Create a new group 289 // to hold the current pointer. 290 Groups.push_back(CheckingPtrGroup(Pointer, *this)); 291 } 292 293 // We've computed the grouped checks for this partition. 294 // Save the results and continue with the next one. 295 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); 296 } 297 } 298 299 bool RuntimePointerChecking::arePointersInSamePartition( 300 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1, 301 unsigned PtrIdx2) { 302 return (PtrToPartition[PtrIdx1] != -1 && 303 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]); 304 } 305 306 bool RuntimePointerChecking::needsChecking( 307 unsigned I, unsigned J, const SmallVectorImpl<int> *PtrPartition) const { 308 const PointerInfo &PointerI = Pointers[I]; 309 const PointerInfo &PointerJ = Pointers[J]; 310 311 // No need to check if two readonly pointers intersect. 312 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) 313 return false; 314 315 // Only need to check pointers between two different dependency sets. 316 if (PointerI.DependencySetId == PointerJ.DependencySetId) 317 return false; 318 319 // Only need to check pointers in the same alias set. 320 if (PointerI.AliasSetId != PointerJ.AliasSetId) 321 return false; 322 323 // If PtrPartition is set omit checks between pointers of the same partition. 324 if (PtrPartition && arePointersInSamePartition(*PtrPartition, I, J)) 325 return false; 326 327 return true; 328 } 329 330 void RuntimePointerChecking::print( 331 raw_ostream &OS, unsigned Depth, 332 const SmallVectorImpl<int> *PtrPartition) const { 333 334 OS.indent(Depth) << "Run-time memory checks:\n"; 335 336 unsigned N = 0; 337 for (unsigned I = 0; I < CheckingGroups.size(); ++I) 338 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) 339 if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) { 340 OS.indent(Depth) << "Check " << N++ << ":\n"; 341 OS.indent(Depth + 2) << "Comparing group " << I << ":\n"; 342 343 for (unsigned K = 0; K < CheckingGroups[I].Members.size(); ++K) { 344 OS.indent(Depth + 2) 345 << *Pointers[CheckingGroups[I].Members[K]].PointerValue << "\n"; 346 if (PtrPartition) 347 OS << " (Partition: " 348 << (*PtrPartition)[CheckingGroups[I].Members[K]] << ")" 349 << "\n"; 350 } 351 352 OS.indent(Depth + 2) << "Against group " << J << ":\n"; 353 354 for (unsigned K = 0; K < CheckingGroups[J].Members.size(); ++K) { 355 OS.indent(Depth + 2) 356 << *Pointers[CheckingGroups[J].Members[K]].PointerValue << "\n"; 357 if (PtrPartition) 358 OS << " (Partition: " 359 << (*PtrPartition)[CheckingGroups[J].Members[K]] << ")" 360 << "\n"; 361 } 362 } 363 364 OS.indent(Depth) << "Grouped accesses:\n"; 365 for (unsigned I = 0; I < CheckingGroups.size(); ++I) { 366 OS.indent(Depth + 2) << "Group " << I << ":\n"; 367 OS.indent(Depth + 4) << "(Low: " << *CheckingGroups[I].Low 368 << " High: " << *CheckingGroups[I].High << ")\n"; 369 for (unsigned J = 0; J < CheckingGroups[I].Members.size(); ++J) { 370 OS.indent(Depth + 6) << "Member: " 371 << *Pointers[CheckingGroups[I].Members[J]].Expr 372 << "\n"; 373 } 374 } 375 } 376 377 unsigned RuntimePointerChecking::getNumberOfChecks( 378 const SmallVectorImpl<int> *PtrPartition) const { 379 380 unsigned NumPartitions = CheckingGroups.size(); 381 unsigned CheckCount = 0; 382 383 for (unsigned I = 0; I < NumPartitions; ++I) 384 for (unsigned J = I + 1; J < NumPartitions; ++J) 385 if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) 386 CheckCount++; 387 return CheckCount; 388 } 389 390 bool RuntimePointerChecking::needsAnyChecking( 391 const SmallVectorImpl<int> *PtrPartition) const { 392 unsigned NumPointers = Pointers.size(); 393 394 for (unsigned I = 0; I < NumPointers; ++I) 395 for (unsigned J = I + 1; J < NumPointers; ++J) 396 if (needsChecking(I, J, PtrPartition)) 397 return true; 398 return false; 399 } 400 401 namespace { 402 /// \brief Analyses memory accesses in a loop. 403 /// 404 /// Checks whether run time pointer checks are needed and builds sets for data 405 /// dependence checking. 406 class AccessAnalysis { 407 public: 408 /// \brief Read or write access location. 409 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; 410 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; 411 412 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, 413 MemoryDepChecker::DepCandidates &DA) 414 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), 415 IsRTCheckAnalysisNeeded(false) {} 416 417 /// \brief Register a load and whether it is only read from. 418 void addLoad(MemoryLocation &Loc, bool IsReadOnly) { 419 Value *Ptr = const_cast<Value*>(Loc.Ptr); 420 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 421 Accesses.insert(MemAccessInfo(Ptr, false)); 422 if (IsReadOnly) 423 ReadOnlyPtr.insert(Ptr); 424 } 425 426 /// \brief Register a store. 427 void addStore(MemoryLocation &Loc) { 428 Value *Ptr = const_cast<Value*>(Loc.Ptr); 429 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); 430 Accesses.insert(MemAccessInfo(Ptr, true)); 431 } 432 433 /// \brief Check whether we can check the pointers at runtime for 434 /// non-intersection. 435 /// 436 /// Returns true if we need no check or if we do and we can generate them 437 /// (i.e. the pointers have computable bounds). 438 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, 439 Loop *TheLoop, const ValueToValueMap &Strides, 440 bool ShouldCheckStride = false); 441 442 /// \brief Goes over all memory accesses, checks whether a RT check is needed 443 /// and builds sets of dependent accesses. 444 void buildDependenceSets() { 445 processMemAccesses(); 446 } 447 448 /// \brief Initial processing of memory accesses determined that we need to 449 /// perform dependency checking. 450 /// 451 /// Note that this can later be cleared if we retry memcheck analysis without 452 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). 453 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } 454 455 /// We decided that no dependence analysis would be used. Reset the state. 456 void resetDepChecks(MemoryDepChecker &DepChecker) { 457 CheckDeps.clear(); 458 DepChecker.clearInterestingDependences(); 459 } 460 461 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } 462 463 private: 464 typedef SetVector<MemAccessInfo> PtrAccessSet; 465 466 /// \brief Go over all memory access and check whether runtime pointer checks 467 /// are needed and build sets of dependency check candidates. 468 void processMemAccesses(); 469 470 /// Set of all accesses. 471 PtrAccessSet Accesses; 472 473 const DataLayout &DL; 474 475 /// Set of accesses that need a further dependence check. 476 MemAccessInfoSet CheckDeps; 477 478 /// Set of pointers that are read only. 479 SmallPtrSet<Value*, 16> ReadOnlyPtr; 480 481 /// An alias set tracker to partition the access set by underlying object and 482 //intrinsic property (such as TBAA metadata). 483 AliasSetTracker AST; 484 485 LoopInfo *LI; 486 487 /// Sets of potentially dependent accesses - members of one set share an 488 /// underlying pointer. The set "CheckDeps" identfies which sets really need a 489 /// dependence check. 490 MemoryDepChecker::DepCandidates &DepCands; 491 492 /// \brief Initial processing of memory accesses determined that we may need 493 /// to add memchecks. Perform the analysis to determine the necessary checks. 494 /// 495 /// Note that, this is different from isDependencyCheckNeeded. When we retry 496 /// memcheck analysis without dependency checking 497 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared 498 /// while this remains set if we have potentially dependent accesses. 499 bool IsRTCheckAnalysisNeeded; 500 }; 501 502 } // end anonymous namespace 503 504 /// \brief Check whether a pointer can participate in a runtime bounds check. 505 static bool hasComputableBounds(ScalarEvolution *SE, 506 const ValueToValueMap &Strides, Value *Ptr) { 507 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr); 508 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 509 if (!AR) 510 return false; 511 512 return AR->isAffine(); 513 } 514 515 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, 516 ScalarEvolution *SE, Loop *TheLoop, 517 const ValueToValueMap &StridesMap, 518 bool ShouldCheckStride) { 519 // Find pointers with computable bounds. We are going to use this information 520 // to place a runtime bound check. 521 bool CanDoRT = true; 522 523 bool NeedRTCheck = false; 524 if (!IsRTCheckAnalysisNeeded) return true; 525 526 bool IsDepCheckNeeded = isDependencyCheckNeeded(); 527 528 // We assign a consecutive id to access from different alias sets. 529 // Accesses between different groups doesn't need to be checked. 530 unsigned ASId = 1; 531 for (auto &AS : AST) { 532 int NumReadPtrChecks = 0; 533 int NumWritePtrChecks = 0; 534 535 // We assign consecutive id to access from different dependence sets. 536 // Accesses within the same set don't need a runtime check. 537 unsigned RunningDepId = 1; 538 DenseMap<Value *, unsigned> DepSetId; 539 540 for (auto A : AS) { 541 Value *Ptr = A.getValue(); 542 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); 543 MemAccessInfo Access(Ptr, IsWrite); 544 545 if (IsWrite) 546 ++NumWritePtrChecks; 547 else 548 ++NumReadPtrChecks; 549 550 if (hasComputableBounds(SE, StridesMap, Ptr) && 551 // When we run after a failing dependency check we have to make sure 552 // we don't have wrapping pointers. 553 (!ShouldCheckStride || 554 isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) { 555 // The id of the dependence set. 556 unsigned DepId; 557 558 if (IsDepCheckNeeded) { 559 Value *Leader = DepCands.getLeaderValue(Access).getPointer(); 560 unsigned &LeaderId = DepSetId[Leader]; 561 if (!LeaderId) 562 LeaderId = RunningDepId++; 563 DepId = LeaderId; 564 } else 565 // Each access has its own dependence set. 566 DepId = RunningDepId++; 567 568 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap); 569 570 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); 571 } else { 572 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); 573 CanDoRT = false; 574 } 575 } 576 577 // If we have at least two writes or one write and a read then we need to 578 // check them. But there is no need to checks if there is only one 579 // dependence set for this alias set. 580 // 581 // Note that this function computes CanDoRT and NeedRTCheck independently. 582 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer 583 // for which we couldn't find the bounds but we don't actually need to emit 584 // any checks so it does not matter. 585 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) 586 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && 587 NumWritePtrChecks >= 1)); 588 589 ++ASId; 590 } 591 592 // If the pointers that we would use for the bounds comparison have different 593 // address spaces, assume the values aren't directly comparable, so we can't 594 // use them for the runtime check. We also have to assume they could 595 // overlap. In the future there should be metadata for whether address spaces 596 // are disjoint. 597 unsigned NumPointers = RtCheck.Pointers.size(); 598 for (unsigned i = 0; i < NumPointers; ++i) { 599 for (unsigned j = i + 1; j < NumPointers; ++j) { 600 // Only need to check pointers between two different dependency sets. 601 if (RtCheck.Pointers[i].DependencySetId == 602 RtCheck.Pointers[j].DependencySetId) 603 continue; 604 // Only need to check pointers in the same alias set. 605 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) 606 continue; 607 608 Value *PtrI = RtCheck.Pointers[i].PointerValue; 609 Value *PtrJ = RtCheck.Pointers[j].PointerValue; 610 611 unsigned ASi = PtrI->getType()->getPointerAddressSpace(); 612 unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); 613 if (ASi != ASj) { 614 DEBUG(dbgs() << "LAA: Runtime check would require comparison between" 615 " different address spaces\n"); 616 return false; 617 } 618 } 619 } 620 621 if (NeedRTCheck && CanDoRT) 622 RtCheck.groupChecks(DepCands, IsDepCheckNeeded); 623 624 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks(nullptr) 625 << " pointer comparisons.\n"); 626 627 RtCheck.Need = NeedRTCheck; 628 629 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; 630 if (!CanDoRTIfNeeded) 631 RtCheck.reset(); 632 return CanDoRTIfNeeded; 633 } 634 635 void AccessAnalysis::processMemAccesses() { 636 // We process the set twice: first we process read-write pointers, last we 637 // process read-only pointers. This allows us to skip dependence tests for 638 // read-only pointers. 639 640 DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); 641 DEBUG(dbgs() << " AST: "; AST.dump()); 642 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); 643 DEBUG({ 644 for (auto A : Accesses) 645 dbgs() << "\t" << *A.getPointer() << " (" << 646 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? 647 "read-only" : "read")) << ")\n"; 648 }); 649 650 // The AliasSetTracker has nicely partitioned our pointers by metadata 651 // compatibility and potential for underlying-object overlap. As a result, we 652 // only need to check for potential pointer dependencies within each alias 653 // set. 654 for (auto &AS : AST) { 655 // Note that both the alias-set tracker and the alias sets themselves used 656 // linked lists internally and so the iteration order here is deterministic 657 // (matching the original instruction order within each set). 658 659 bool SetHasWrite = false; 660 661 // Map of pointers to last access encountered. 662 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; 663 UnderlyingObjToAccessMap ObjToLastAccess; 664 665 // Set of access to check after all writes have been processed. 666 PtrAccessSet DeferredAccesses; 667 668 // Iterate over each alias set twice, once to process read/write pointers, 669 // and then to process read-only pointers. 670 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { 671 bool UseDeferred = SetIteration > 0; 672 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; 673 674 for (auto AV : AS) { 675 Value *Ptr = AV.getValue(); 676 677 // For a single memory access in AliasSetTracker, Accesses may contain 678 // both read and write, and they both need to be handled for CheckDeps. 679 for (auto AC : S) { 680 if (AC.getPointer() != Ptr) 681 continue; 682 683 bool IsWrite = AC.getInt(); 684 685 // If we're using the deferred access set, then it contains only 686 // reads. 687 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; 688 if (UseDeferred && !IsReadOnlyPtr) 689 continue; 690 // Otherwise, the pointer must be in the PtrAccessSet, either as a 691 // read or a write. 692 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || 693 S.count(MemAccessInfo(Ptr, false))) && 694 "Alias-set pointer not in the access set?"); 695 696 MemAccessInfo Access(Ptr, IsWrite); 697 DepCands.insert(Access); 698 699 // Memorize read-only pointers for later processing and skip them in 700 // the first round (they need to be checked after we have seen all 701 // write pointers). Note: we also mark pointer that are not 702 // consecutive as "read-only" pointers (so that we check 703 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". 704 if (!UseDeferred && IsReadOnlyPtr) { 705 DeferredAccesses.insert(Access); 706 continue; 707 } 708 709 // If this is a write - check other reads and writes for conflicts. If 710 // this is a read only check other writes for conflicts (but only if 711 // there is no other write to the ptr - this is an optimization to 712 // catch "a[i] = a[i] + " without having to do a dependence check). 713 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { 714 CheckDeps.insert(Access); 715 IsRTCheckAnalysisNeeded = true; 716 } 717 718 if (IsWrite) 719 SetHasWrite = true; 720 721 // Create sets of pointers connected by a shared alias set and 722 // underlying object. 723 typedef SmallVector<Value *, 16> ValueVector; 724 ValueVector TempObjects; 725 726 GetUnderlyingObjects(Ptr, TempObjects, DL, LI); 727 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); 728 for (Value *UnderlyingObj : TempObjects) { 729 UnderlyingObjToAccessMap::iterator Prev = 730 ObjToLastAccess.find(UnderlyingObj); 731 if (Prev != ObjToLastAccess.end()) 732 DepCands.unionSets(Access, Prev->second); 733 734 ObjToLastAccess[UnderlyingObj] = Access; 735 DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); 736 } 737 } 738 } 739 } 740 } 741 } 742 743 static bool isInBoundsGep(Value *Ptr) { 744 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 745 return GEP->isInBounds(); 746 return false; 747 } 748 749 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, 750 /// i.e. monotonically increasing/decreasing. 751 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, 752 ScalarEvolution *SE, const Loop *L) { 753 // FIXME: This should probably only return true for NUW. 754 if (AR->getNoWrapFlags(SCEV::NoWrapMask)) 755 return true; 756 757 // Scalar evolution does not propagate the non-wrapping flags to values that 758 // are derived from a non-wrapping induction variable because non-wrapping 759 // could be flow-sensitive. 760 // 761 // Look through the potentially overflowing instruction to try to prove 762 // non-wrapping for the *specific* value of Ptr. 763 764 // The arithmetic implied by an inbounds GEP can't overflow. 765 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 766 if (!GEP || !GEP->isInBounds()) 767 return false; 768 769 // Make sure there is only one non-const index and analyze that. 770 Value *NonConstIndex = nullptr; 771 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 772 if (!isa<ConstantInt>(*Index)) { 773 if (NonConstIndex) 774 return false; 775 NonConstIndex = *Index; 776 } 777 if (!NonConstIndex) 778 // The recurrence is on the pointer, ignore for now. 779 return false; 780 781 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW 782 // AddRec using a NSW operation. 783 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) 784 if (OBO->hasNoSignedWrap() && 785 // Assume constant for other the operand so that the AddRec can be 786 // easily found. 787 isa<ConstantInt>(OBO->getOperand(1))) { 788 auto *OpScev = SE->getSCEV(OBO->getOperand(0)); 789 790 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) 791 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); 792 } 793 794 return false; 795 } 796 797 /// \brief Check whether the access through \p Ptr has a constant stride. 798 int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, 799 const ValueToValueMap &StridesMap) { 800 const Type *Ty = Ptr->getType(); 801 assert(Ty->isPointerTy() && "Unexpected non-ptr"); 802 803 // Make sure that the pointer does not point to aggregate types. 804 const PointerType *PtrTy = cast<PointerType>(Ty); 805 if (PtrTy->getElementType()->isAggregateType()) { 806 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" 807 << *Ptr << "\n"); 808 return 0; 809 } 810 811 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr); 812 813 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); 814 if (!AR) { 815 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " 816 << *Ptr << " SCEV: " << *PtrScev << "\n"); 817 return 0; 818 } 819 820 // The accesss function must stride over the innermost loop. 821 if (Lp != AR->getLoop()) { 822 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << 823 *Ptr << " SCEV: " << *PtrScev << "\n"); 824 } 825 826 // The address calculation must not wrap. Otherwise, a dependence could be 827 // inverted. 828 // An inbounds getelementptr that is a AddRec with a unit stride 829 // cannot wrap per definition. The unit stride requirement is checked later. 830 // An getelementptr without an inbounds attribute and unit stride would have 831 // to access the pointer value "0" which is undefined behavior in address 832 // space 0, therefore we can also vectorize this case. 833 bool IsInBoundsGEP = isInBoundsGep(Ptr); 834 bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp); 835 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; 836 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { 837 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " 838 << *Ptr << " SCEV: " << *PtrScev << "\n"); 839 return 0; 840 } 841 842 // Check the step is constant. 843 const SCEV *Step = AR->getStepRecurrence(*SE); 844 845 // Calculate the pointer stride and check if it is constant. 846 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 847 if (!C) { 848 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << 849 " SCEV: " << *PtrScev << "\n"); 850 return 0; 851 } 852 853 auto &DL = Lp->getHeader()->getModule()->getDataLayout(); 854 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 855 const APInt &APStepVal = C->getValue()->getValue(); 856 857 // Huge step value - give up. 858 if (APStepVal.getBitWidth() > 64) 859 return 0; 860 861 int64_t StepVal = APStepVal.getSExtValue(); 862 863 // Strided access. 864 int64_t Stride = StepVal / Size; 865 int64_t Rem = StepVal % Size; 866 if (Rem) 867 return 0; 868 869 // If the SCEV could wrap but we have an inbounds gep with a unit stride we 870 // know we can't "wrap around the address space". In case of address space 871 // zero we know that this won't happen without triggering undefined behavior. 872 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && 873 Stride != 1 && Stride != -1) 874 return 0; 875 876 return Stride; 877 } 878 879 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { 880 switch (Type) { 881 case NoDep: 882 case Forward: 883 case BackwardVectorizable: 884 return true; 885 886 case Unknown: 887 case ForwardButPreventsForwarding: 888 case Backward: 889 case BackwardVectorizableButPreventsForwarding: 890 return false; 891 } 892 llvm_unreachable("unexpected DepType!"); 893 } 894 895 bool MemoryDepChecker::Dependence::isInterestingDependence(DepType Type) { 896 switch (Type) { 897 case NoDep: 898 case Forward: 899 return false; 900 901 case BackwardVectorizable: 902 case Unknown: 903 case ForwardButPreventsForwarding: 904 case Backward: 905 case BackwardVectorizableButPreventsForwarding: 906 return true; 907 } 908 llvm_unreachable("unexpected DepType!"); 909 } 910 911 bool MemoryDepChecker::Dependence::isPossiblyBackward() const { 912 switch (Type) { 913 case NoDep: 914 case Forward: 915 case ForwardButPreventsForwarding: 916 return false; 917 918 case Unknown: 919 case BackwardVectorizable: 920 case Backward: 921 case BackwardVectorizableButPreventsForwarding: 922 return true; 923 } 924 llvm_unreachable("unexpected DepType!"); 925 } 926 927 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, 928 unsigned TypeByteSize) { 929 // If loads occur at a distance that is not a multiple of a feasible vector 930 // factor store-load forwarding does not take place. 931 // Positive dependences might cause troubles because vectorizing them might 932 // prevent store-load forwarding making vectorized code run a lot slower. 933 // a[i] = a[i-3] ^ a[i-8]; 934 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and 935 // hence on your typical architecture store-load forwarding does not take 936 // place. Vectorizing in such cases does not make sense. 937 // Store-load forwarding distance. 938 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize; 939 // Maximum vector factor. 940 unsigned MaxVFWithoutSLForwardIssues = 941 VectorizerParams::MaxVectorWidth * TypeByteSize; 942 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues) 943 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes; 944 945 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues; 946 vf *= 2) { 947 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) { 948 MaxVFWithoutSLForwardIssues = (vf >>=1); 949 break; 950 } 951 } 952 953 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) { 954 DEBUG(dbgs() << "LAA: Distance " << Distance << 955 " that could cause a store-load forwarding conflict\n"); 956 return true; 957 } 958 959 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && 960 MaxVFWithoutSLForwardIssues != 961 VectorizerParams::MaxVectorWidth * TypeByteSize) 962 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; 963 return false; 964 } 965 966 /// \brief Check the dependence for two accesses with the same stride \p Stride. 967 /// \p Distance is the positive distance and \p TypeByteSize is type size in 968 /// bytes. 969 /// 970 /// \returns true if they are independent. 971 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, 972 unsigned TypeByteSize) { 973 assert(Stride > 1 && "The stride must be greater than 1"); 974 assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); 975 assert(Distance > 0 && "The distance must be non-zero"); 976 977 // Skip if the distance is not multiple of type byte size. 978 if (Distance % TypeByteSize) 979 return false; 980 981 unsigned ScaledDist = Distance / TypeByteSize; 982 983 // No dependence if the scaled distance is not multiple of the stride. 984 // E.g. 985 // for (i = 0; i < 1024 ; i += 4) 986 // A[i+2] = A[i] + 1; 987 // 988 // Two accesses in memory (scaled distance is 2, stride is 4): 989 // | A[0] | | | | A[4] | | | | 990 // | | | A[2] | | | | A[6] | | 991 // 992 // E.g. 993 // for (i = 0; i < 1024 ; i += 3) 994 // A[i+4] = A[i] + 1; 995 // 996 // Two accesses in memory (scaled distance is 4, stride is 3): 997 // | A[0] | | | A[3] | | | A[6] | | | 998 // | | | | | A[4] | | | A[7] | | 999 return ScaledDist % Stride; 1000 } 1001 1002 MemoryDepChecker::Dependence::DepType 1003 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, 1004 const MemAccessInfo &B, unsigned BIdx, 1005 const ValueToValueMap &Strides) { 1006 assert (AIdx < BIdx && "Must pass arguments in program order"); 1007 1008 Value *APtr = A.getPointer(); 1009 Value *BPtr = B.getPointer(); 1010 bool AIsWrite = A.getInt(); 1011 bool BIsWrite = B.getInt(); 1012 1013 // Two reads are independent. 1014 if (!AIsWrite && !BIsWrite) 1015 return Dependence::NoDep; 1016 1017 // We cannot check pointers in different address spaces. 1018 if (APtr->getType()->getPointerAddressSpace() != 1019 BPtr->getType()->getPointerAddressSpace()) 1020 return Dependence::Unknown; 1021 1022 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr); 1023 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr); 1024 1025 int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides); 1026 int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides); 1027 1028 const SCEV *Src = AScev; 1029 const SCEV *Sink = BScev; 1030 1031 // If the induction step is negative we have to invert source and sink of the 1032 // dependence. 1033 if (StrideAPtr < 0) { 1034 //Src = BScev; 1035 //Sink = AScev; 1036 std::swap(APtr, BPtr); 1037 std::swap(Src, Sink); 1038 std::swap(AIsWrite, BIsWrite); 1039 std::swap(AIdx, BIdx); 1040 std::swap(StrideAPtr, StrideBPtr); 1041 } 1042 1043 const SCEV *Dist = SE->getMinusSCEV(Sink, Src); 1044 1045 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink 1046 << "(Induction step: " << StrideAPtr << ")\n"); 1047 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " 1048 << *InstMap[BIdx] << ": " << *Dist << "\n"); 1049 1050 // Need accesses with constant stride. We don't want to vectorize 1051 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in 1052 // the address space. 1053 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ 1054 DEBUG(dbgs() << "Pointer access with non-constant stride\n"); 1055 return Dependence::Unknown; 1056 } 1057 1058 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); 1059 if (!C) { 1060 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); 1061 ShouldRetryWithRuntimeCheck = true; 1062 return Dependence::Unknown; 1063 } 1064 1065 Type *ATy = APtr->getType()->getPointerElementType(); 1066 Type *BTy = BPtr->getType()->getPointerElementType(); 1067 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); 1068 unsigned TypeByteSize = DL.getTypeAllocSize(ATy); 1069 1070 // Negative distances are not plausible dependencies. 1071 const APInt &Val = C->getValue()->getValue(); 1072 if (Val.isNegative()) { 1073 bool IsTrueDataDependence = (AIsWrite && !BIsWrite); 1074 if (IsTrueDataDependence && 1075 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || 1076 ATy != BTy)) 1077 return Dependence::ForwardButPreventsForwarding; 1078 1079 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n"); 1080 return Dependence::Forward; 1081 } 1082 1083 // Write to the same location with the same size. 1084 // Could be improved to assert type sizes are the same (i32 == float, etc). 1085 if (Val == 0) { 1086 if (ATy == BTy) 1087 return Dependence::NoDep; 1088 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); 1089 return Dependence::Unknown; 1090 } 1091 1092 assert(Val.isStrictlyPositive() && "Expect a positive value"); 1093 1094 if (ATy != BTy) { 1095 DEBUG(dbgs() << 1096 "LAA: ReadWrite-Write positive dependency with different types\n"); 1097 return Dependence::Unknown; 1098 } 1099 1100 unsigned Distance = (unsigned) Val.getZExtValue(); 1101 1102 unsigned Stride = std::abs(StrideAPtr); 1103 if (Stride > 1 && 1104 areStridedAccessesIndependent(Distance, Stride, TypeByteSize)) { 1105 DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); 1106 return Dependence::NoDep; 1107 } 1108 1109 // Bail out early if passed-in parameters make vectorization not feasible. 1110 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? 1111 VectorizerParams::VectorizationFactor : 1); 1112 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? 1113 VectorizerParams::VectorizationInterleave : 1); 1114 // The minimum number of iterations for a vectorized/unrolled version. 1115 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); 1116 1117 // It's not vectorizable if the distance is smaller than the minimum distance 1118 // needed for a vectroized/unrolled version. Vectorizing one iteration in 1119 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs 1120 // TypeByteSize (No need to plus the last gap distance). 1121 // 1122 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1123 // foo(int *A) { 1124 // int *B = (int *)((char *)A + 14); 1125 // for (i = 0 ; i < 1024 ; i += 2) 1126 // B[i] = A[i] + 1; 1127 // } 1128 // 1129 // Two accesses in memory (stride is 2): 1130 // | A[0] | | A[2] | | A[4] | | A[6] | | 1131 // | B[0] | | B[2] | | B[4] | 1132 // 1133 // Distance needs for vectorizing iterations except the last iteration: 1134 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. 1135 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. 1136 // 1137 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is 1138 // 12, which is less than distance. 1139 // 1140 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), 1141 // the minimum distance needed is 28, which is greater than distance. It is 1142 // not safe to do vectorization. 1143 unsigned MinDistanceNeeded = 1144 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; 1145 if (MinDistanceNeeded > Distance) { 1146 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance 1147 << '\n'); 1148 return Dependence::Backward; 1149 } 1150 1151 // Unsafe if the minimum distance needed is greater than max safe distance. 1152 if (MinDistanceNeeded > MaxSafeDepDistBytes) { 1153 DEBUG(dbgs() << "LAA: Failure because it needs at least " 1154 << MinDistanceNeeded << " size in bytes"); 1155 return Dependence::Backward; 1156 } 1157 1158 // Positive distance bigger than max vectorization factor. 1159 // FIXME: Should use max factor instead of max distance in bytes, which could 1160 // not handle different types. 1161 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. 1162 // void foo (int *A, char *B) { 1163 // for (unsigned i = 0; i < 1024; i++) { 1164 // A[i+2] = A[i] + 1; 1165 // B[i+2] = B[i] + 1; 1166 // } 1167 // } 1168 // 1169 // This case is currently unsafe according to the max safe distance. If we 1170 // analyze the two accesses on array B, the max safe dependence distance 1171 // is 2. Then we analyze the accesses on array A, the minimum distance needed 1172 // is 8, which is less than 2 and forbidden vectorization, But actually 1173 // both A and B could be vectorized by 2 iterations. 1174 MaxSafeDepDistBytes = 1175 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; 1176 1177 bool IsTrueDataDependence = (!AIsWrite && BIsWrite); 1178 if (IsTrueDataDependence && 1179 couldPreventStoreLoadForward(Distance, TypeByteSize)) 1180 return Dependence::BackwardVectorizableButPreventsForwarding; 1181 1182 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() 1183 << " with max VF = " 1184 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); 1185 1186 return Dependence::BackwardVectorizable; 1187 } 1188 1189 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, 1190 MemAccessInfoSet &CheckDeps, 1191 const ValueToValueMap &Strides) { 1192 1193 MaxSafeDepDistBytes = -1U; 1194 while (!CheckDeps.empty()) { 1195 MemAccessInfo CurAccess = *CheckDeps.begin(); 1196 1197 // Get the relevant memory access set. 1198 EquivalenceClasses<MemAccessInfo>::iterator I = 1199 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); 1200 1201 // Check accesses within this set. 1202 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE; 1203 AI = AccessSets.member_begin(I), AE = AccessSets.member_end(); 1204 1205 // Check every access pair. 1206 while (AI != AE) { 1207 CheckDeps.erase(*AI); 1208 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); 1209 while (OI != AE) { 1210 // Check every accessing instruction pair in program order. 1211 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), 1212 I1E = Accesses[*AI].end(); I1 != I1E; ++I1) 1213 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), 1214 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { 1215 auto A = std::make_pair(&*AI, *I1); 1216 auto B = std::make_pair(&*OI, *I2); 1217 1218 assert(*I1 != *I2); 1219 if (*I1 > *I2) 1220 std::swap(A, B); 1221 1222 Dependence::DepType Type = 1223 isDependent(*A.first, A.second, *B.first, B.second, Strides); 1224 SafeForVectorization &= Dependence::isSafeForVectorization(Type); 1225 1226 // Gather dependences unless we accumulated MaxInterestingDependence 1227 // dependences. In that case return as soon as we find the first 1228 // unsafe dependence. This puts a limit on this quadratic 1229 // algorithm. 1230 if (RecordInterestingDependences) { 1231 if (Dependence::isInterestingDependence(Type)) 1232 InterestingDependences.push_back( 1233 Dependence(A.second, B.second, Type)); 1234 1235 if (InterestingDependences.size() >= MaxInterestingDependence) { 1236 RecordInterestingDependences = false; 1237 InterestingDependences.clear(); 1238 DEBUG(dbgs() << "Too many dependences, stopped recording\n"); 1239 } 1240 } 1241 if (!RecordInterestingDependences && !SafeForVectorization) 1242 return false; 1243 } 1244 ++OI; 1245 } 1246 AI++; 1247 } 1248 } 1249 1250 DEBUG(dbgs() << "Total Interesting Dependences: " 1251 << InterestingDependences.size() << "\n"); 1252 return SafeForVectorization; 1253 } 1254 1255 SmallVector<Instruction *, 4> 1256 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { 1257 MemAccessInfo Access(Ptr, isWrite); 1258 auto &IndexVector = Accesses.find(Access)->second; 1259 1260 SmallVector<Instruction *, 4> Insts; 1261 std::transform(IndexVector.begin(), IndexVector.end(), 1262 std::back_inserter(Insts), 1263 [&](unsigned Idx) { return this->InstMap[Idx]; }); 1264 return Insts; 1265 } 1266 1267 const char *MemoryDepChecker::Dependence::DepName[] = { 1268 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", 1269 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; 1270 1271 void MemoryDepChecker::Dependence::print( 1272 raw_ostream &OS, unsigned Depth, 1273 const SmallVectorImpl<Instruction *> &Instrs) const { 1274 OS.indent(Depth) << DepName[Type] << ":\n"; 1275 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; 1276 OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; 1277 } 1278 1279 bool LoopAccessInfo::canAnalyzeLoop() { 1280 // We need to have a loop header. 1281 DEBUG(dbgs() << "LAA: Found a loop: " << 1282 TheLoop->getHeader()->getName() << '\n'); 1283 1284 // We can only analyze innermost loops. 1285 if (!TheLoop->empty()) { 1286 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); 1287 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); 1288 return false; 1289 } 1290 1291 // We must have a single backedge. 1292 if (TheLoop->getNumBackEdges() != 1) { 1293 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1294 emitAnalysis( 1295 LoopAccessReport() << 1296 "loop control flow is not understood by analyzer"); 1297 return false; 1298 } 1299 1300 // We must have a single exiting block. 1301 if (!TheLoop->getExitingBlock()) { 1302 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1303 emitAnalysis( 1304 LoopAccessReport() << 1305 "loop control flow is not understood by analyzer"); 1306 return false; 1307 } 1308 1309 // We only handle bottom-tested loops, i.e. loop in which the condition is 1310 // checked at the end of each iteration. With that we can assume that all 1311 // instructions in the loop are executed the same number of times. 1312 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 1313 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); 1314 emitAnalysis( 1315 LoopAccessReport() << 1316 "loop control flow is not understood by analyzer"); 1317 return false; 1318 } 1319 1320 // ScalarEvolution needs to be able to find the exit count. 1321 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); 1322 if (ExitCount == SE->getCouldNotCompute()) { 1323 emitAnalysis(LoopAccessReport() << 1324 "could not determine number of loop iterations"); 1325 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); 1326 return false; 1327 } 1328 1329 return true; 1330 } 1331 1332 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { 1333 1334 typedef SmallVector<Value*, 16> ValueVector; 1335 typedef SmallPtrSet<Value*, 16> ValueSet; 1336 1337 // Holds the Load and Store *instructions*. 1338 ValueVector Loads; 1339 ValueVector Stores; 1340 1341 // Holds all the different accesses in the loop. 1342 unsigned NumReads = 0; 1343 unsigned NumReadWrites = 0; 1344 1345 PtrRtChecking.Pointers.clear(); 1346 PtrRtChecking.Need = false; 1347 1348 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 1349 1350 // For each block. 1351 for (Loop::block_iterator bb = TheLoop->block_begin(), 1352 be = TheLoop->block_end(); bb != be; ++bb) { 1353 1354 // Scan the BB and collect legal loads and stores. 1355 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 1356 ++it) { 1357 1358 // If this is a load, save it. If this instruction can read from memory 1359 // but is not a load, then we quit. Notice that we don't handle function 1360 // calls that read or write. 1361 if (it->mayReadFromMemory()) { 1362 // Many math library functions read the rounding mode. We will only 1363 // vectorize a loop if it contains known function calls that don't set 1364 // the flag. Therefore, it is safe to ignore this read from memory. 1365 CallInst *Call = dyn_cast<CallInst>(it); 1366 if (Call && getIntrinsicIDForCall(Call, TLI)) 1367 continue; 1368 1369 // If the function has an explicit vectorized counterpart, we can safely 1370 // assume that it can be vectorized. 1371 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && 1372 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) 1373 continue; 1374 1375 LoadInst *Ld = dyn_cast<LoadInst>(it); 1376 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { 1377 emitAnalysis(LoopAccessReport(Ld) 1378 << "read with atomic ordering or volatile read"); 1379 DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); 1380 CanVecMem = false; 1381 return; 1382 } 1383 NumLoads++; 1384 Loads.push_back(Ld); 1385 DepChecker.addAccess(Ld); 1386 continue; 1387 } 1388 1389 // Save 'store' instructions. Abort if other instructions write to memory. 1390 if (it->mayWriteToMemory()) { 1391 StoreInst *St = dyn_cast<StoreInst>(it); 1392 if (!St) { 1393 emitAnalysis(LoopAccessReport(it) << 1394 "instruction cannot be vectorized"); 1395 CanVecMem = false; 1396 return; 1397 } 1398 if (!St->isSimple() && !IsAnnotatedParallel) { 1399 emitAnalysis(LoopAccessReport(St) 1400 << "write with atomic ordering or volatile write"); 1401 DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); 1402 CanVecMem = false; 1403 return; 1404 } 1405 NumStores++; 1406 Stores.push_back(St); 1407 DepChecker.addAccess(St); 1408 } 1409 } // Next instr. 1410 } // Next block. 1411 1412 // Now we have two lists that hold the loads and the stores. 1413 // Next, we find the pointers that they use. 1414 1415 // Check if we see any stores. If there are no stores, then we don't 1416 // care if the pointers are *restrict*. 1417 if (!Stores.size()) { 1418 DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); 1419 CanVecMem = true; 1420 return; 1421 } 1422 1423 MemoryDepChecker::DepCandidates DependentAccesses; 1424 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), 1425 AA, LI, DependentAccesses); 1426 1427 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects 1428 // multiple times on the same object. If the ptr is accessed twice, once 1429 // for read and once for write, it will only appear once (on the write 1430 // list). This is okay, since we are going to check for conflicts between 1431 // writes and between reads and writes, but not between reads and reads. 1432 ValueSet Seen; 1433 1434 ValueVector::iterator I, IE; 1435 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { 1436 StoreInst *ST = cast<StoreInst>(*I); 1437 Value* Ptr = ST->getPointerOperand(); 1438 // Check for store to loop invariant address. 1439 StoreToLoopInvariantAddress |= isUniform(Ptr); 1440 // If we did *not* see this pointer before, insert it to the read-write 1441 // list. At this phase it is only a 'write' list. 1442 if (Seen.insert(Ptr).second) { 1443 ++NumReadWrites; 1444 1445 MemoryLocation Loc = MemoryLocation::get(ST); 1446 // The TBAA metadata could have a control dependency on the predication 1447 // condition, so we cannot rely on it when determining whether or not we 1448 // need runtime pointer checks. 1449 if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) 1450 Loc.AATags.TBAA = nullptr; 1451 1452 Accesses.addStore(Loc); 1453 } 1454 } 1455 1456 if (IsAnnotatedParallel) { 1457 DEBUG(dbgs() 1458 << "LAA: A loop annotated parallel, ignore memory dependency " 1459 << "checks.\n"); 1460 CanVecMem = true; 1461 return; 1462 } 1463 1464 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { 1465 LoadInst *LD = cast<LoadInst>(*I); 1466 Value* Ptr = LD->getPointerOperand(); 1467 // If we did *not* see this pointer before, insert it to the 1468 // read list. If we *did* see it before, then it is already in 1469 // the read-write list. This allows us to vectorize expressions 1470 // such as A[i] += x; Because the address of A[i] is a read-write 1471 // pointer. This only works if the index of A[i] is consecutive. 1472 // If the address of i is unknown (for example A[B[i]]) then we may 1473 // read a few words, modify, and write a few words, and some of the 1474 // words may be written to the same address. 1475 bool IsReadOnlyPtr = false; 1476 if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) { 1477 ++NumReads; 1478 IsReadOnlyPtr = true; 1479 } 1480 1481 MemoryLocation Loc = MemoryLocation::get(LD); 1482 // The TBAA metadata could have a control dependency on the predication 1483 // condition, so we cannot rely on it when determining whether or not we 1484 // need runtime pointer checks. 1485 if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) 1486 Loc.AATags.TBAA = nullptr; 1487 1488 Accesses.addLoad(Loc, IsReadOnlyPtr); 1489 } 1490 1491 // If we write (or read-write) to a single destination and there are no 1492 // other reads in this loop then is it safe to vectorize. 1493 if (NumReadWrites == 1 && NumReads == 0) { 1494 DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); 1495 CanVecMem = true; 1496 return; 1497 } 1498 1499 // Build dependence sets and check whether we need a runtime pointer bounds 1500 // check. 1501 Accesses.buildDependenceSets(); 1502 1503 // Find pointers with computable bounds. We are going to use this information 1504 // to place a runtime bound check. 1505 bool CanDoRTIfNeeded = 1506 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides); 1507 if (!CanDoRTIfNeeded) { 1508 emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); 1509 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " 1510 << "the array bounds.\n"); 1511 CanVecMem = false; 1512 return; 1513 } 1514 1515 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); 1516 1517 CanVecMem = true; 1518 if (Accesses.isDependencyCheckNeeded()) { 1519 DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); 1520 CanVecMem = DepChecker.areDepsSafe( 1521 DependentAccesses, Accesses.getDependenciesToCheck(), Strides); 1522 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); 1523 1524 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { 1525 DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); 1526 1527 // Clear the dependency checks. We assume they are not needed. 1528 Accesses.resetDepChecks(DepChecker); 1529 1530 PtrRtChecking.reset(); 1531 PtrRtChecking.Need = true; 1532 1533 CanDoRTIfNeeded = 1534 Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); 1535 1536 // Check that we found the bounds for the pointer. 1537 if (!CanDoRTIfNeeded) { 1538 emitAnalysis(LoopAccessReport() 1539 << "cannot check memory dependencies at runtime"); 1540 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); 1541 CanVecMem = false; 1542 return; 1543 } 1544 1545 CanVecMem = true; 1546 } 1547 } 1548 1549 if (CanVecMem) 1550 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" 1551 << (PtrRtChecking.Need ? "" : " don't") 1552 << " need runtime memory checks.\n"); 1553 else { 1554 emitAnalysis(LoopAccessReport() << 1555 "unsafe dependent memory operations in loop"); 1556 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); 1557 } 1558 } 1559 1560 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, 1561 DominatorTree *DT) { 1562 assert(TheLoop->contains(BB) && "Unknown block used"); 1563 1564 // Blocks that do not dominate the latch need predication. 1565 BasicBlock* Latch = TheLoop->getLoopLatch(); 1566 return !DT->dominates(BB, Latch); 1567 } 1568 1569 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { 1570 assert(!Report && "Multiple reports generated"); 1571 Report = Message; 1572 } 1573 1574 bool LoopAccessInfo::isUniform(Value *V) const { 1575 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); 1576 } 1577 1578 // FIXME: this function is currently a duplicate of the one in 1579 // LoopVectorize.cpp. 1580 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 1581 Instruction *Loc) { 1582 if (FirstInst) 1583 return FirstInst; 1584 if (Instruction *I = dyn_cast<Instruction>(V)) 1585 return I->getParent() == Loc->getParent() ? I : nullptr; 1586 return nullptr; 1587 } 1588 1589 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeCheck( 1590 Instruction *Loc, const SmallVectorImpl<int> *PtrPartition) const { 1591 if (!PtrRtChecking.Need) 1592 return std::make_pair(nullptr, nullptr); 1593 1594 SmallVector<TrackingVH<Value>, 2> Starts; 1595 SmallVector<TrackingVH<Value>, 2> Ends; 1596 1597 LLVMContext &Ctx = Loc->getContext(); 1598 SCEVExpander Exp(*SE, DL, "induction"); 1599 Instruction *FirstInst = nullptr; 1600 1601 for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { 1602 const RuntimePointerChecking::CheckingPtrGroup &CG = 1603 PtrRtChecking.CheckingGroups[i]; 1604 Value *Ptr = PtrRtChecking.Pointers[CG.Members[0]].PointerValue; 1605 const SCEV *Sc = SE->getSCEV(Ptr); 1606 1607 if (SE->isLoopInvariant(Sc, TheLoop)) { 1608 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr 1609 << "\n"); 1610 Starts.push_back(Ptr); 1611 Ends.push_back(Ptr); 1612 } else { 1613 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 1614 1615 // Use this type for pointer arithmetic. 1616 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); 1617 Value *Start = nullptr, *End = nullptr; 1618 1619 DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1620 Start = Exp.expandCodeFor(CG.Low, PtrArithTy, Loc); 1621 End = Exp.expandCodeFor(CG.High, PtrArithTy, Loc); 1622 DEBUG(dbgs() << "Start: " << *CG.Low << " End: " << *CG.High << "\n"); 1623 Starts.push_back(Start); 1624 Ends.push_back(End); 1625 } 1626 } 1627 1628 IRBuilder<> ChkBuilder(Loc); 1629 // Our instructions might fold to a constant. 1630 Value *MemoryRuntimeCheck = nullptr; 1631 for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { 1632 for (unsigned j = i + 1; j < PtrRtChecking.CheckingGroups.size(); ++j) { 1633 const RuntimePointerChecking::CheckingPtrGroup &CGI = 1634 PtrRtChecking.CheckingGroups[i]; 1635 const RuntimePointerChecking::CheckingPtrGroup &CGJ = 1636 PtrRtChecking.CheckingGroups[j]; 1637 1638 if (!PtrRtChecking.needsChecking(CGI, CGJ, PtrPartition)) 1639 continue; 1640 1641 unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace(); 1642 unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace(); 1643 1644 assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) && 1645 (AS1 == Ends[i]->getType()->getPointerAddressSpace()) && 1646 "Trying to bounds check pointers with different address spaces"); 1647 1648 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1649 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1650 1651 Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc"); 1652 Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc"); 1653 Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc"); 1654 Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc"); 1655 1656 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); 1657 FirstInst = getFirstInst(FirstInst, Cmp0, Loc); 1658 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); 1659 FirstInst = getFirstInst(FirstInst, Cmp1, Loc); 1660 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1661 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1662 if (MemoryRuntimeCheck) { 1663 IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, 1664 "conflict.rdx"); 1665 FirstInst = getFirstInst(FirstInst, IsConflict, Loc); 1666 } 1667 MemoryRuntimeCheck = IsConflict; 1668 } 1669 } 1670 1671 if (!MemoryRuntimeCheck) 1672 return std::make_pair(nullptr, nullptr); 1673 1674 // We have to do this trickery because the IRBuilder might fold the check to a 1675 // constant expression in which case there is no Instruction anchored in a 1676 // the block. 1677 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, 1678 ConstantInt::getTrue(Ctx)); 1679 ChkBuilder.Insert(Check, "memcheck.conflict"); 1680 FirstInst = getFirstInst(FirstInst, Check, Loc); 1681 return std::make_pair(FirstInst, Check); 1682 } 1683 1684 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, 1685 const DataLayout &DL, 1686 const TargetLibraryInfo *TLI, AliasAnalysis *AA, 1687 DominatorTree *DT, LoopInfo *LI, 1688 const ValueToValueMap &Strides) 1689 : PtrRtChecking(SE), DepChecker(SE, L), TheLoop(L), SE(SE), DL(DL), 1690 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), 1691 MaxSafeDepDistBytes(-1U), CanVecMem(false), 1692 StoreToLoopInvariantAddress(false) { 1693 if (canAnalyzeLoop()) 1694 analyzeLoop(Strides); 1695 } 1696 1697 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { 1698 if (CanVecMem) { 1699 if (PtrRtChecking.Need) 1700 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; 1701 else 1702 OS.indent(Depth) << "Memory dependences are safe\n"; 1703 } 1704 1705 if (Report) 1706 OS.indent(Depth) << "Report: " << Report->str() << "\n"; 1707 1708 if (auto *InterestingDependences = DepChecker.getInterestingDependences()) { 1709 OS.indent(Depth) << "Interesting Dependences:\n"; 1710 for (auto &Dep : *InterestingDependences) { 1711 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); 1712 OS << "\n"; 1713 } 1714 } else 1715 OS.indent(Depth) << "Too many interesting dependences, not recorded\n"; 1716 1717 // List the pair of accesses need run-time checks to prove independence. 1718 PtrRtChecking.print(OS, Depth); 1719 OS << "\n"; 1720 1721 OS.indent(Depth) << "Store to invariant address was " 1722 << (StoreToLoopInvariantAddress ? "" : "not ") 1723 << "found in loop.\n"; 1724 } 1725 1726 const LoopAccessInfo & 1727 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { 1728 auto &LAI = LoopAccessInfoMap[L]; 1729 1730 #ifndef NDEBUG 1731 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && 1732 "Symbolic strides changed for loop"); 1733 #endif 1734 1735 if (!LAI) { 1736 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1737 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, 1738 Strides); 1739 #ifndef NDEBUG 1740 LAI->NumSymbolicStrides = Strides.size(); 1741 #endif 1742 } 1743 return *LAI.get(); 1744 } 1745 1746 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { 1747 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); 1748 1749 ValueToValueMap NoSymbolicStrides; 1750 1751 for (Loop *TopLevelLoop : *LI) 1752 for (Loop *L : depth_first(TopLevelLoop)) { 1753 OS.indent(2) << L->getHeader()->getName() << ":\n"; 1754 auto &LAI = LAA.getInfo(L, NoSymbolicStrides); 1755 LAI.print(OS, 4); 1756 } 1757 } 1758 1759 bool LoopAccessAnalysis::runOnFunction(Function &F) { 1760 SE = &getAnalysis<ScalarEvolution>(); 1761 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1762 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1763 AA = &getAnalysis<AliasAnalysis>(); 1764 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1765 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1766 1767 return false; 1768 } 1769 1770 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1771 AU.addRequired<ScalarEvolution>(); 1772 AU.addRequired<AliasAnalysis>(); 1773 AU.addRequired<DominatorTreeWrapperPass>(); 1774 AU.addRequired<LoopInfoWrapperPass>(); 1775 1776 AU.setPreservesAll(); 1777 } 1778 1779 char LoopAccessAnalysis::ID = 0; 1780 static const char laa_name[] = "Loop Access Analysis"; 1781 #define LAA_NAME "loop-accesses" 1782 1783 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1784 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 1785 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 1786 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1787 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 1788 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) 1789 1790 namespace llvm { 1791 Pass *createLAAPass() { 1792 return new LoopAccessAnalysis(); 1793 } 1794 } 1795