1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
27 
28 using namespace llvm;
29 
30 static MaybeAlign getBaseAlign(const Value *Base, const DataLayout &DL) {
31   if (const MaybeAlign PA = Base->getPointerAlignment(DL))
32     return *PA;
33   Type *const Ty = Base->getType()->getPointerElementType();
34   if (!Ty->isSized())
35     return None;
36   return Align(DL.getABITypeAlignment(Ty));
37 }
38 
39 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
40                       const DataLayout &DL) {
41   if (MaybeAlign BA = getBaseAlign(Base, DL)) {
42     const APInt APBaseAlign(Offset.getBitWidth(), BA->value());
43     const APInt APAlign(Offset.getBitWidth(), Alignment.value());
44     assert(APAlign.isPowerOf2() && "must be a power of 2!");
45     return APBaseAlign.uge(APAlign) && !(Offset & (APAlign - 1));
46   }
47   return false;
48 }
49 
50 /// Test if V is always a pointer to allocated and suitably aligned memory for
51 /// a simple load or store.
52 static bool isDereferenceableAndAlignedPointer(
53     const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
54     const Instruction *CtxI, const DominatorTree *DT,
55     SmallPtrSetImpl<const Value *> &Visited) {
56   // Already visited?  Bail out, we've likely hit unreachable code.
57   if (!Visited.insert(V).second)
58     return false;
59 
60   // Note that it is not safe to speculate into a malloc'd region because
61   // malloc may return null.
62 
63   // bitcast instructions are no-ops as far as dereferenceability is concerned.
64   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
65     return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
66                                               DL, CtxI, DT, Visited);
67 
68   bool CheckForNonNull = false;
69   APInt KnownDerefBytes(Size.getBitWidth(),
70                         V->getPointerDereferenceableBytes(DL, CheckForNonNull));
71   if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size))
72     if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
73       // As we recursed through GEPs to get here, we've incrementally checked
74       // that each step advanced by a multiple of the alignment. If our base is
75       // properly aligned, then the original offset accessed must also be.
76       Type *Ty = V->getType();
77       assert(Ty->isSized() && "must be sized");
78       APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
79       return isAligned(V, Offset, llvm::Align(Align), DL);
80     }
81 
82   // For GEPs, determine if the indexing lands within the allocated object.
83   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
84     const Value *Base = GEP->getPointerOperand();
85 
86     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
87     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
88         !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
89       return false;
90 
91     // If the base pointer is dereferenceable for Offset+Size bytes, then the
92     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
93     // pointer is aligned to Align bytes, and the Offset is divisible by Align
94     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
95     // aligned to Align bytes.
96 
97     // Offset and Size may have different bit widths if we have visited an
98     // addrspacecast, so we can't do arithmetic directly on the APInt values.
99     return isDereferenceableAndAlignedPointer(
100         Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
101         DL, CtxI, DT, Visited);
102   }
103 
104   // For gc.relocate, look through relocations
105   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
106     return isDereferenceableAndAlignedPointer(
107         RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
108 
109   if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
110     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
111                                               DL, CtxI, DT, Visited);
112 
113   if (const auto *Call = dyn_cast<CallBase>(V))
114     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
115       return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
116                                                 Visited);
117 
118   // If we don't know, assume the worst.
119   return false;
120 }
121 
122 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
123                                               const APInt &Size,
124                                               const DataLayout &DL,
125                                               const Instruction *CtxI,
126                                               const DominatorTree *DT) {
127   assert(Align != 0 && "expected explicitly set alignment");
128   // Note: At the moment, Size can be zero.  This ends up being interpreted as
129   // a query of whether [Base, V] is dereferenceable and V is aligned (since
130   // that's what the implementation happened to do).  It's unclear if this is
131   // the desired semantic, but at least SelectionDAG does exercise this case.
132 
133   SmallPtrSet<const Value *, 32> Visited;
134   return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT,
135                                               Visited);
136 }
137 
138 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
139                                               unsigned Align,
140                                               const DataLayout &DL,
141                                               const Instruction *CtxI,
142                                               const DominatorTree *DT) {
143   // When dereferenceability information is provided by a dereferenceable
144   // attribute, we know exactly how many bytes are dereferenceable. If we can
145   // determine the exact offset to the attributed variable, we can use that
146   // information here.
147 
148   // Require ABI alignment for loads without alignment specification
149   if (Align == 0)
150     Align = DL.getABITypeAlignment(Ty);
151 
152   if (!Ty->isSized())
153     return false;
154 
155   APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()),
156                    DL.getTypeStoreSize(Ty));
157   return isDereferenceableAndAlignedPointer(V, Align, AccessSize,
158                                             DL, CtxI, DT);
159 }
160 
161 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
162                                     const DataLayout &DL,
163                                     const Instruction *CtxI,
164                                     const DominatorTree *DT) {
165   return isDereferenceableAndAlignedPointer(V, Ty, 1, DL, CtxI, DT);
166 }
167 
168 /// Test if A and B will obviously have the same value.
169 ///
170 /// This includes recognizing that %t0 and %t1 will have the same
171 /// value in code like this:
172 /// \code
173 ///   %t0 = getelementptr \@a, 0, 3
174 ///   store i32 0, i32* %t0
175 ///   %t1 = getelementptr \@a, 0, 3
176 ///   %t2 = load i32* %t1
177 /// \endcode
178 ///
179 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
180   // Test if the values are trivially equivalent.
181   if (A == B)
182     return true;
183 
184   // Test if the values come from identical arithmetic instructions.
185   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
186   // this function is only used when one address use dominates the
187   // other, which means that they'll always either have the same
188   // value or one of them will have an undefined value.
189   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
190       isa<GetElementPtrInst>(A))
191     if (const Instruction *BI = dyn_cast<Instruction>(B))
192       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
193         return true;
194 
195   // Otherwise they may not be equivalent.
196   return false;
197 }
198 
199 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
200                                              ScalarEvolution &SE,
201                                              DominatorTree &DT) {
202   auto &DL = LI->getModule()->getDataLayout();
203   Value *Ptr = LI->getPointerOperand();
204 
205   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
206                 DL.getTypeStoreSize(LI->getType()));
207   unsigned Align = LI->getAlignment();
208   if (Align == 0)
209     Align = DL.getABITypeAlignment(LI->getType());
210 
211   Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
212 
213   // If given a uniform (i.e. non-varying) address, see if we can prove the
214   // access is safe within the loop w/o needing predication.
215   if (L->isLoopInvariant(Ptr))
216     return isDereferenceableAndAlignedPointer(Ptr, Align, EltSize, DL,
217                                               HeaderFirstNonPHI, &DT);
218 
219   // Otherwise, check to see if we have a repeating access pattern where we can
220   // prove that all accesses are well aligned and dereferenceable.
221   auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
222   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
223     return false;
224   auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
225   if (!Step)
226     return false;
227   // TODO: generalize to access patterns which have gaps
228   if (Step->getAPInt() != EltSize)
229     return false;
230 
231   // TODO: If the symbolic trip count has a small bound (max count), we might
232   // be able to prove safety.
233   auto TC = SE.getSmallConstantTripCount(L);
234   if (!TC)
235     return false;
236 
237   const APInt AccessSize = TC * EltSize;
238 
239   auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
240   if (!StartS)
241     return false;
242   assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
243   Value *Base = StartS->getValue();
244 
245   // For the moment, restrict ourselves to the case where the access size is a
246   // multiple of the requested alignment and the base is aligned.
247   // TODO: generalize if a case found which warrants
248   if (EltSize.urem(Align) != 0)
249     return false;
250   return isDereferenceableAndAlignedPointer(Base, Align, AccessSize,
251                                             DL, HeaderFirstNonPHI, &DT);
252 }
253 
254 /// Check if executing a load of this pointer value cannot trap.
255 ///
256 /// If DT and ScanFrom are specified this method performs context-sensitive
257 /// analysis and returns true if it is safe to load immediately before ScanFrom.
258 ///
259 /// If it is not obviously safe to load from the specified pointer, we do
260 /// a quick local scan of the basic block containing \c ScanFrom, to determine
261 /// if the address is already accessed.
262 ///
263 /// This uses the pointee type to determine how many bytes need to be safe to
264 /// load from the pointer.
265 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
266                                        const DataLayout &DL,
267                                        Instruction *ScanFrom,
268                                        const DominatorTree *DT) {
269   // Zero alignment means that the load has the ABI alignment for the target
270   if (Align == 0)
271     Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
272   assert(isPowerOf2_32(Align));
273 
274   // If DT is not specified we can't make context-sensitive query
275   const Instruction* CtxI = DT ? ScanFrom : nullptr;
276   if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT))
277     return true;
278 
279   if (!ScanFrom)
280     return false;
281 
282   if (Size.getBitWidth() > 64)
283     return false;
284   const uint64_t LoadSize = Size.getZExtValue();
285 
286   // Otherwise, be a little bit aggressive by scanning the local block where we
287   // want to check to see if the pointer is already being loaded or stored
288   // from/to.  If so, the previous load or store would have already trapped,
289   // so there is no harm doing an extra load (also, CSE will later eliminate
290   // the load entirely).
291   BasicBlock::iterator BBI = ScanFrom->getIterator(),
292                        E = ScanFrom->getParent()->begin();
293 
294   // We can at least always strip pointer casts even though we can't use the
295   // base here.
296   V = V->stripPointerCasts();
297 
298   while (BBI != E) {
299     --BBI;
300 
301     // If we see a free or a call which may write to memory (i.e. which might do
302     // a free) the pointer could be marked invalid.
303     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
304         !isa<DbgInfoIntrinsic>(BBI))
305       return false;
306 
307     Value *AccessedPtr;
308     unsigned AccessedAlign;
309     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
310       // Ignore volatile loads. The execution of a volatile load cannot
311       // be used to prove an address is backed by regular memory; it can,
312       // for example, point to an MMIO register.
313       if (LI->isVolatile())
314         continue;
315       AccessedPtr = LI->getPointerOperand();
316       AccessedAlign = LI->getAlignment();
317     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
318       // Ignore volatile stores (see comment for loads).
319       if (SI->isVolatile())
320         continue;
321       AccessedPtr = SI->getPointerOperand();
322       AccessedAlign = SI->getAlignment();
323     } else
324       continue;
325 
326     Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
327     if (AccessedAlign == 0)
328       AccessedAlign = DL.getABITypeAlignment(AccessedTy);
329     if (AccessedAlign < Align)
330       continue;
331 
332     // Handle trivial cases.
333     if (AccessedPtr == V &&
334         LoadSize <= DL.getTypeStoreSize(AccessedTy))
335       return true;
336 
337     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
338         LoadSize <= DL.getTypeStoreSize(AccessedTy))
339       return true;
340   }
341   return false;
342 }
343 
344 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align,
345                                        const DataLayout &DL,
346                                        Instruction *ScanFrom,
347                                        const DominatorTree *DT) {
348   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
349   return isSafeToLoadUnconditionally(V, Align, Size, DL, ScanFrom, DT);
350 }
351 
352   /// DefMaxInstsToScan - the default number of maximum instructions
353 /// to scan in the block, used by FindAvailableLoadedValue().
354 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
355 /// threading in part by eliminating partially redundant loads.
356 /// At that point, the value of MaxInstsToScan was already set to '6'
357 /// without documented explanation.
358 cl::opt<unsigned>
359 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
360   cl::desc("Use this to specify the default maximum number of instructions "
361            "to scan backward from a given instruction, when searching for "
362            "available loaded value"));
363 
364 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
365                                       BasicBlock *ScanBB,
366                                       BasicBlock::iterator &ScanFrom,
367                                       unsigned MaxInstsToScan,
368                                       AliasAnalysis *AA, bool *IsLoad,
369                                       unsigned *NumScanedInst) {
370   // Don't CSE load that is volatile or anything stronger than unordered.
371   if (!Load->isUnordered())
372     return nullptr;
373 
374   return FindAvailablePtrLoadStore(
375       Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
376       ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
377 }
378 
379 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
380                                        bool AtLeastAtomic, BasicBlock *ScanBB,
381                                        BasicBlock::iterator &ScanFrom,
382                                        unsigned MaxInstsToScan,
383                                        AliasAnalysis *AA, bool *IsLoadCSE,
384                                        unsigned *NumScanedInst) {
385   if (MaxInstsToScan == 0)
386     MaxInstsToScan = ~0U;
387 
388   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
389 
390   // Try to get the store size for the type.
391   auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
392 
393   Value *StrippedPtr = Ptr->stripPointerCasts();
394 
395   while (ScanFrom != ScanBB->begin()) {
396     // We must ignore debug info directives when counting (otherwise they
397     // would affect codegen).
398     Instruction *Inst = &*--ScanFrom;
399     if (isa<DbgInfoIntrinsic>(Inst))
400       continue;
401 
402     // Restore ScanFrom to expected value in case next test succeeds
403     ScanFrom++;
404 
405     if (NumScanedInst)
406       ++(*NumScanedInst);
407 
408     // Don't scan huge blocks.
409     if (MaxInstsToScan-- == 0)
410       return nullptr;
411 
412     --ScanFrom;
413     // If this is a load of Ptr, the loaded value is available.
414     // (This is true even if the load is volatile or atomic, although
415     // those cases are unlikely.)
416     if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
417       if (AreEquivalentAddressValues(
418               LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
419           CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
420 
421         // We can value forward from an atomic to a non-atomic, but not the
422         // other way around.
423         if (LI->isAtomic() < AtLeastAtomic)
424           return nullptr;
425 
426         if (IsLoadCSE)
427             *IsLoadCSE = true;
428         return LI;
429       }
430 
431     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
432       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
433       // If this is a store through Ptr, the value is available!
434       // (This is true even if the store is volatile or atomic, although
435       // those cases are unlikely.)
436       if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
437           CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
438                                                AccessTy, DL)) {
439 
440         // We can value forward from an atomic to a non-atomic, but not the
441         // other way around.
442         if (SI->isAtomic() < AtLeastAtomic)
443           return nullptr;
444 
445         if (IsLoadCSE)
446           *IsLoadCSE = false;
447         return SI->getOperand(0);
448       }
449 
450       // If both StrippedPtr and StorePtr reach all the way to an alloca or
451       // global and they are different, ignore the store. This is a trivial form
452       // of alias analysis that is important for reg2mem'd code.
453       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
454           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
455           StrippedPtr != StorePtr)
456         continue;
457 
458       // If we have alias analysis and it says the store won't modify the loaded
459       // value, ignore the store.
460       if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
461         continue;
462 
463       // Otherwise the store that may or may not alias the pointer, bail out.
464       ++ScanFrom;
465       return nullptr;
466     }
467 
468     // If this is some other instruction that may clobber Ptr, bail out.
469     if (Inst->mayWriteToMemory()) {
470       // If alias analysis claims that it really won't modify the load,
471       // ignore it.
472       if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
473         continue;
474 
475       // May modify the pointer, bail out.
476       ++ScanFrom;
477       return nullptr;
478     }
479   }
480 
481   // Got to the start of the block, we didn't find it, but are done for this
482   // block.
483   return nullptr;
484 }
485