1 //===- Loads.cpp - Local load analysis ------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines simple local analyses for load instructions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/Loads.h" 14 #include "llvm/Analysis/AliasAnalysis.h" 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/IR/DataLayout.h" 17 #include "llvm/IR/GlobalAlias.h" 18 #include "llvm/IR/GlobalVariable.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/IR/LLVMContext.h" 21 #include "llvm/IR/Module.h" 22 #include "llvm/IR/Operator.h" 23 #include "llvm/IR/Statepoint.h" 24 25 using namespace llvm; 26 27 static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align, 28 const DataLayout &DL) { 29 APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL)); 30 31 if (!BaseAlign) { 32 Type *Ty = Base->getType()->getPointerElementType(); 33 if (!Ty->isSized()) 34 return false; 35 BaseAlign = DL.getABITypeAlignment(Ty); 36 } 37 38 APInt Alignment(Offset.getBitWidth(), Align); 39 40 assert(Alignment.isPowerOf2() && "must be a power of 2!"); 41 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); 42 } 43 44 /// Test if V is always a pointer to allocated and suitably aligned memory for 45 /// a simple load or store. 46 static bool isDereferenceableAndAlignedPointer( 47 const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL, 48 const Instruction *CtxI, const DominatorTree *DT, 49 SmallPtrSetImpl<const Value *> &Visited) { 50 // Already visited? Bail out, we've likely hit unreachable code. 51 if (!Visited.insert(V).second) 52 return false; 53 54 // Note that it is not safe to speculate into a malloc'd region because 55 // malloc may return null. 56 57 // bitcast instructions are no-ops as far as dereferenceability is concerned. 58 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) 59 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size, 60 DL, CtxI, DT, Visited); 61 62 bool CheckForNonNull = false; 63 APInt KnownDerefBytes(Size.getBitWidth(), 64 V->getPointerDereferenceableBytes(DL, CheckForNonNull)); 65 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size)) 66 if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) { 67 // As we recursed through GEPs to get here, we've incrementally checked 68 // that each step advanced by a multiple of the alignment. If our base is 69 // properly aligned, then the original offset accessed must also be. 70 Type *Ty = V->getType(); 71 assert(Ty->isSized() && "must be sized"); 72 APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); 73 return isAligned(V, Offset, Align, DL); 74 } 75 76 // For GEPs, determine if the indexing lands within the allocated object. 77 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 78 const Value *Base = GEP->getPointerOperand(); 79 80 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); 81 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || 82 !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue()) 83 return false; 84 85 // If the base pointer is dereferenceable for Offset+Size bytes, then the 86 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base 87 // pointer is aligned to Align bytes, and the Offset is divisible by Align 88 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also 89 // aligned to Align bytes. 90 91 // Offset and Size may have different bit widths if we have visited an 92 // addrspacecast, so we can't do arithmetic directly on the APInt values. 93 return isDereferenceableAndAlignedPointer( 94 Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()), 95 DL, CtxI, DT, Visited); 96 } 97 98 // For gc.relocate, look through relocations 99 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V)) 100 return isDereferenceableAndAlignedPointer( 101 RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited); 102 103 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) 104 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size, 105 DL, CtxI, DT, Visited); 106 107 if (const auto *Call = dyn_cast<CallBase>(V)) 108 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) 109 return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT, 110 Visited); 111 112 // If we don't know, assume the worst. 113 return false; 114 } 115 116 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, 117 const APInt &Size, 118 const DataLayout &DL, 119 const Instruction *CtxI, 120 const DominatorTree *DT) { 121 assert(Align != 0 && "expected explicitly set alignment"); 122 // Note: At the moment, Size can be zero. This ends up being interpreted as 123 // a query of whether [Base, V] is dereferenceable and V is aligned (since 124 // that's what the implementation happened to do). It's unclear if this is 125 // the desired semantic, but at least SelectionDAG does exercise this case. 126 127 SmallPtrSet<const Value *, 32> Visited; 128 return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT, 129 Visited); 130 } 131 132 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, 133 unsigned Align, 134 const DataLayout &DL, 135 const Instruction *CtxI, 136 const DominatorTree *DT) { 137 // When dereferenceability information is provided by a dereferenceable 138 // attribute, we know exactly how many bytes are dereferenceable. If we can 139 // determine the exact offset to the attributed variable, we can use that 140 // information here. 141 142 // Require ABI alignment for loads without alignment specification 143 if (Align == 0) 144 Align = DL.getABITypeAlignment(Ty); 145 146 if (!Ty->isSized()) 147 return false; 148 149 APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()), 150 DL.getTypeStoreSize(Ty)); 151 return isDereferenceableAndAlignedPointer(V, Align, AccessSize, 152 DL, CtxI, DT); 153 } 154 155 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, 156 const DataLayout &DL, 157 const Instruction *CtxI, 158 const DominatorTree *DT) { 159 return isDereferenceableAndAlignedPointer(V, Ty, 1, DL, CtxI, DT); 160 } 161 162 /// Test if A and B will obviously have the same value. 163 /// 164 /// This includes recognizing that %t0 and %t1 will have the same 165 /// value in code like this: 166 /// \code 167 /// %t0 = getelementptr \@a, 0, 3 168 /// store i32 0, i32* %t0 169 /// %t1 = getelementptr \@a, 0, 3 170 /// %t2 = load i32* %t1 171 /// \endcode 172 /// 173 static bool AreEquivalentAddressValues(const Value *A, const Value *B) { 174 // Test if the values are trivially equivalent. 175 if (A == B) 176 return true; 177 178 // Test if the values come from identical arithmetic instructions. 179 // Use isIdenticalToWhenDefined instead of isIdenticalTo because 180 // this function is only used when one address use dominates the 181 // other, which means that they'll always either have the same 182 // value or one of them will have an undefined value. 183 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) || 184 isa<GetElementPtrInst>(A)) 185 if (const Instruction *BI = dyn_cast<Instruction>(B)) 186 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 187 return true; 188 189 // Otherwise they may not be equivalent. 190 return false; 191 } 192 193 /// Check if executing a load of this pointer value cannot trap. 194 /// 195 /// If DT and ScanFrom are specified this method performs context-sensitive 196 /// analysis and returns true if it is safe to load immediately before ScanFrom. 197 /// 198 /// If it is not obviously safe to load from the specified pointer, we do 199 /// a quick local scan of the basic block containing \c ScanFrom, to determine 200 /// if the address is already accessed. 201 /// 202 /// This uses the pointee type to determine how many bytes need to be safe to 203 /// load from the pointer. 204 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size, 205 const DataLayout &DL, 206 Instruction *ScanFrom, 207 const DominatorTree *DT) { 208 // Zero alignment means that the load has the ABI alignment for the target 209 if (Align == 0) 210 Align = DL.getABITypeAlignment(V->getType()->getPointerElementType()); 211 assert(isPowerOf2_32(Align)); 212 213 // If DT is not specified we can't make context-sensitive query 214 const Instruction* CtxI = DT ? ScanFrom : nullptr; 215 if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT)) 216 return true; 217 218 if (!ScanFrom) 219 return false; 220 221 if (Size.getBitWidth() > 64) 222 return false; 223 const uint64_t LoadSize = Size.getZExtValue(); 224 225 // Otherwise, be a little bit aggressive by scanning the local block where we 226 // want to check to see if the pointer is already being loaded or stored 227 // from/to. If so, the previous load or store would have already trapped, 228 // so there is no harm doing an extra load (also, CSE will later eliminate 229 // the load entirely). 230 BasicBlock::iterator BBI = ScanFrom->getIterator(), 231 E = ScanFrom->getParent()->begin(); 232 233 // We can at least always strip pointer casts even though we can't use the 234 // base here. 235 V = V->stripPointerCasts(); 236 237 while (BBI != E) { 238 --BBI; 239 240 // If we see a free or a call which may write to memory (i.e. which might do 241 // a free) the pointer could be marked invalid. 242 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() && 243 !isa<DbgInfoIntrinsic>(BBI)) 244 return false; 245 246 Value *AccessedPtr; 247 unsigned AccessedAlign; 248 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 249 // Ignore volatile loads. The execution of a volatile load cannot 250 // be used to prove an address is backed by regular memory; it can, 251 // for example, point to an MMIO register. 252 if (LI->isVolatile()) 253 continue; 254 AccessedPtr = LI->getPointerOperand(); 255 AccessedAlign = LI->getAlignment(); 256 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) { 257 // Ignore volatile stores (see comment for loads). 258 if (SI->isVolatile()) 259 continue; 260 AccessedPtr = SI->getPointerOperand(); 261 AccessedAlign = SI->getAlignment(); 262 } else 263 continue; 264 265 Type *AccessedTy = AccessedPtr->getType()->getPointerElementType(); 266 if (AccessedAlign == 0) 267 AccessedAlign = DL.getABITypeAlignment(AccessedTy); 268 if (AccessedAlign < Align) 269 continue; 270 271 // Handle trivial cases. 272 if (AccessedPtr == V && 273 LoadSize <= DL.getTypeStoreSize(AccessedTy)) 274 return true; 275 276 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) && 277 LoadSize <= DL.getTypeStoreSize(AccessedTy)) 278 return true; 279 } 280 return false; 281 } 282 283 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align, 284 const DataLayout &DL, 285 Instruction *ScanFrom, 286 const DominatorTree *DT) { 287 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)); 288 return isSafeToLoadUnconditionally(V, Align, Size, DL, ScanFrom, DT); 289 } 290 291 /// DefMaxInstsToScan - the default number of maximum instructions 292 /// to scan in the block, used by FindAvailableLoadedValue(). 293 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump 294 /// threading in part by eliminating partially redundant loads. 295 /// At that point, the value of MaxInstsToScan was already set to '6' 296 /// without documented explanation. 297 cl::opt<unsigned> 298 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden, 299 cl::desc("Use this to specify the default maximum number of instructions " 300 "to scan backward from a given instruction, when searching for " 301 "available loaded value")); 302 303 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, 304 BasicBlock *ScanBB, 305 BasicBlock::iterator &ScanFrom, 306 unsigned MaxInstsToScan, 307 AliasAnalysis *AA, bool *IsLoad, 308 unsigned *NumScanedInst) { 309 // Don't CSE load that is volatile or anything stronger than unordered. 310 if (!Load->isUnordered()) 311 return nullptr; 312 313 return FindAvailablePtrLoadStore( 314 Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB, 315 ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst); 316 } 317 318 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy, 319 bool AtLeastAtomic, BasicBlock *ScanBB, 320 BasicBlock::iterator &ScanFrom, 321 unsigned MaxInstsToScan, 322 AliasAnalysis *AA, bool *IsLoadCSE, 323 unsigned *NumScanedInst) { 324 if (MaxInstsToScan == 0) 325 MaxInstsToScan = ~0U; 326 327 const DataLayout &DL = ScanBB->getModule()->getDataLayout(); 328 329 // Try to get the store size for the type. 330 auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy)); 331 332 Value *StrippedPtr = Ptr->stripPointerCasts(); 333 334 while (ScanFrom != ScanBB->begin()) { 335 // We must ignore debug info directives when counting (otherwise they 336 // would affect codegen). 337 Instruction *Inst = &*--ScanFrom; 338 if (isa<DbgInfoIntrinsic>(Inst)) 339 continue; 340 341 // Restore ScanFrom to expected value in case next test succeeds 342 ScanFrom++; 343 344 if (NumScanedInst) 345 ++(*NumScanedInst); 346 347 // Don't scan huge blocks. 348 if (MaxInstsToScan-- == 0) 349 return nullptr; 350 351 --ScanFrom; 352 // If this is a load of Ptr, the loaded value is available. 353 // (This is true even if the load is volatile or atomic, although 354 // those cases are unlikely.) 355 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 356 if (AreEquivalentAddressValues( 357 LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) && 358 CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) { 359 360 // We can value forward from an atomic to a non-atomic, but not the 361 // other way around. 362 if (LI->isAtomic() < AtLeastAtomic) 363 return nullptr; 364 365 if (IsLoadCSE) 366 *IsLoadCSE = true; 367 return LI; 368 } 369 370 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 371 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); 372 // If this is a store through Ptr, the value is available! 373 // (This is true even if the store is volatile or atomic, although 374 // those cases are unlikely.) 375 if (AreEquivalentAddressValues(StorePtr, StrippedPtr) && 376 CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(), 377 AccessTy, DL)) { 378 379 // We can value forward from an atomic to a non-atomic, but not the 380 // other way around. 381 if (SI->isAtomic() < AtLeastAtomic) 382 return nullptr; 383 384 if (IsLoadCSE) 385 *IsLoadCSE = false; 386 return SI->getOperand(0); 387 } 388 389 // If both StrippedPtr and StorePtr reach all the way to an alloca or 390 // global and they are different, ignore the store. This is a trivial form 391 // of alias analysis that is important for reg2mem'd code. 392 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) && 393 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) && 394 StrippedPtr != StorePtr) 395 continue; 396 397 // If we have alias analysis and it says the store won't modify the loaded 398 // value, ignore the store. 399 if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize))) 400 continue; 401 402 // Otherwise the store that may or may not alias the pointer, bail out. 403 ++ScanFrom; 404 return nullptr; 405 } 406 407 // If this is some other instruction that may clobber Ptr, bail out. 408 if (Inst->mayWriteToMemory()) { 409 // If alias analysis claims that it really won't modify the load, 410 // ignore it. 411 if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize))) 412 continue; 413 414 // May modify the pointer, bail out. 415 ++ScanFrom; 416 return nullptr; 417 } 418 } 419 420 // Got to the start of the block, we didn't find it, but are done for this 421 // block. 422 return nullptr; 423 } 424