1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
27 
28 using namespace llvm;
29 
30 static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align,
31                       const DataLayout &DL) {
32   APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
33 
34   if (!BaseAlign) {
35     Type *Ty = Base->getType()->getPointerElementType();
36     if (!Ty->isSized())
37       return false;
38     BaseAlign = DL.getABITypeAlignment(Ty);
39   }
40 
41   APInt Alignment(Offset.getBitWidth(), Align);
42 
43   assert(Alignment.isPowerOf2() && "must be a power of 2!");
44   return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
45 }
46 
47 /// Test if V is always a pointer to allocated and suitably aligned memory for
48 /// a simple load or store.
49 static bool isDereferenceableAndAlignedPointer(
50     const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
51     const Instruction *CtxI, const DominatorTree *DT,
52     SmallPtrSetImpl<const Value *> &Visited) {
53   // Already visited?  Bail out, we've likely hit unreachable code.
54   if (!Visited.insert(V).second)
55     return false;
56 
57   // Note that it is not safe to speculate into a malloc'd region because
58   // malloc may return null.
59 
60   // bitcast instructions are no-ops as far as dereferenceability is concerned.
61   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
62     return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
63                                               DL, CtxI, DT, Visited);
64 
65   bool CheckForNonNull = false;
66   APInt KnownDerefBytes(Size.getBitWidth(),
67                         V->getPointerDereferenceableBytes(DL, CheckForNonNull));
68   if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size))
69     if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
70       // As we recursed through GEPs to get here, we've incrementally checked
71       // that each step advanced by a multiple of the alignment. If our base is
72       // properly aligned, then the original offset accessed must also be.
73       Type *Ty = V->getType();
74       assert(Ty->isSized() && "must be sized");
75       APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
76       return isAligned(V, Offset, Align, DL);
77     }
78 
79   // For GEPs, determine if the indexing lands within the allocated object.
80   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
81     const Value *Base = GEP->getPointerOperand();
82 
83     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
84     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
85         !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
86       return false;
87 
88     // If the base pointer is dereferenceable for Offset+Size bytes, then the
89     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
90     // pointer is aligned to Align bytes, and the Offset is divisible by Align
91     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
92     // aligned to Align bytes.
93 
94     // Offset and Size may have different bit widths if we have visited an
95     // addrspacecast, so we can't do arithmetic directly on the APInt values.
96     return isDereferenceableAndAlignedPointer(
97         Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
98         DL, CtxI, DT, Visited);
99   }
100 
101   // For gc.relocate, look through relocations
102   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
103     return isDereferenceableAndAlignedPointer(
104         RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
105 
106   if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
107     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
108                                               DL, CtxI, DT, Visited);
109 
110   if (const auto *Call = dyn_cast<CallBase>(V))
111     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
112       return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
113                                                 Visited);
114 
115   // If we don't know, assume the worst.
116   return false;
117 }
118 
119 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
120                                               const APInt &Size,
121                                               const DataLayout &DL,
122                                               const Instruction *CtxI,
123                                               const DominatorTree *DT) {
124   assert(Align != 0 && "expected explicitly set alignment");
125   // Note: At the moment, Size can be zero.  This ends up being interpreted as
126   // a query of whether [Base, V] is dereferenceable and V is aligned (since
127   // that's what the implementation happened to do).  It's unclear if this is
128   // the desired semantic, but at least SelectionDAG does exercise this case.
129 
130   SmallPtrSet<const Value *, 32> Visited;
131   return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT,
132                                               Visited);
133 }
134 
135 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
136                                               unsigned Align,
137                                               const DataLayout &DL,
138                                               const Instruction *CtxI,
139                                               const DominatorTree *DT) {
140   // When dereferenceability information is provided by a dereferenceable
141   // attribute, we know exactly how many bytes are dereferenceable. If we can
142   // determine the exact offset to the attributed variable, we can use that
143   // information here.
144 
145   // Require ABI alignment for loads without alignment specification
146   if (Align == 0)
147     Align = DL.getABITypeAlignment(Ty);
148 
149   if (!Ty->isSized())
150     return false;
151 
152   APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()),
153                    DL.getTypeStoreSize(Ty));
154   return isDereferenceableAndAlignedPointer(V, Align, AccessSize,
155                                             DL, CtxI, DT);
156 }
157 
158 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
159                                     const DataLayout &DL,
160                                     const Instruction *CtxI,
161                                     const DominatorTree *DT) {
162   return isDereferenceableAndAlignedPointer(V, Ty, 1, DL, CtxI, DT);
163 }
164 
165 /// Test if A and B will obviously have the same value.
166 ///
167 /// This includes recognizing that %t0 and %t1 will have the same
168 /// value in code like this:
169 /// \code
170 ///   %t0 = getelementptr \@a, 0, 3
171 ///   store i32 0, i32* %t0
172 ///   %t1 = getelementptr \@a, 0, 3
173 ///   %t2 = load i32* %t1
174 /// \endcode
175 ///
176 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
177   // Test if the values are trivially equivalent.
178   if (A == B)
179     return true;
180 
181   // Test if the values come from identical arithmetic instructions.
182   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
183   // this function is only used when one address use dominates the
184   // other, which means that they'll always either have the same
185   // value or one of them will have an undefined value.
186   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
187       isa<GetElementPtrInst>(A))
188     if (const Instruction *BI = dyn_cast<Instruction>(B))
189       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
190         return true;
191 
192   // Otherwise they may not be equivalent.
193   return false;
194 }
195 
196 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
197                                              ScalarEvolution &SE,
198                                              DominatorTree &DT) {
199   auto &DL = LI->getModule()->getDataLayout();
200   Value *Ptr = LI->getPointerOperand();
201   auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
202   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
203     return false;
204   auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
205   if (!Step)
206     return false;
207   APInt StepC = Step->getAPInt();
208   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
209                  DL.getTypeStoreSize(LI->getType()));
210   // TODO: generalize to access patterns which have gaps
211   // TODO: handle uniform addresses (if not already handled by LICM)
212   if (StepC != EltSize)
213     return false;
214 
215   // TODO: If the symbolic trip count has a small bound (max count), we might
216   // be able to prove safety.
217   auto TC = SE.getSmallConstantTripCount(L);
218   if (!TC)
219     return false;
220 
221   const APInt AccessSize = TC * EltSize;
222 
223   auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
224   if (!StartS)
225     return false;
226   assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
227   Value *Base = StartS->getValue();
228 
229   Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
230 
231   unsigned Align = LI->getAlignment();
232   if (Align == 0)
233     Align = DL.getABITypeAlignment(LI->getType());
234   // For the moment, restrict ourselves to the case where the access size is a
235   // multiple of the requested alignment and the base is aligned.
236   // TODO: generalize if a case found which warrants
237   if (EltSize.urem(Align) != 0)
238     return false;
239   return isDereferenceableAndAlignedPointer(Base, Align, AccessSize,
240                                             DL, HeaderFirstNonPHI, &DT);
241 }
242 
243 /// Check if executing a load of this pointer value cannot trap.
244 ///
245 /// If DT and ScanFrom are specified this method performs context-sensitive
246 /// analysis and returns true if it is safe to load immediately before ScanFrom.
247 ///
248 /// If it is not obviously safe to load from the specified pointer, we do
249 /// a quick local scan of the basic block containing \c ScanFrom, to determine
250 /// if the address is already accessed.
251 ///
252 /// This uses the pointee type to determine how many bytes need to be safe to
253 /// load from the pointer.
254 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
255                                        const DataLayout &DL,
256                                        Instruction *ScanFrom,
257                                        const DominatorTree *DT) {
258   // Zero alignment means that the load has the ABI alignment for the target
259   if (Align == 0)
260     Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
261   assert(isPowerOf2_32(Align));
262 
263   // If DT is not specified we can't make context-sensitive query
264   const Instruction* CtxI = DT ? ScanFrom : nullptr;
265   if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT))
266     return true;
267 
268   if (!ScanFrom)
269     return false;
270 
271   if (Size.getBitWidth() > 64)
272     return false;
273   const uint64_t LoadSize = Size.getZExtValue();
274 
275   // Otherwise, be a little bit aggressive by scanning the local block where we
276   // want to check to see if the pointer is already being loaded or stored
277   // from/to.  If so, the previous load or store would have already trapped,
278   // so there is no harm doing an extra load (also, CSE will later eliminate
279   // the load entirely).
280   BasicBlock::iterator BBI = ScanFrom->getIterator(),
281                        E = ScanFrom->getParent()->begin();
282 
283   // We can at least always strip pointer casts even though we can't use the
284   // base here.
285   V = V->stripPointerCasts();
286 
287   while (BBI != E) {
288     --BBI;
289 
290     // If we see a free or a call which may write to memory (i.e. which might do
291     // a free) the pointer could be marked invalid.
292     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
293         !isa<DbgInfoIntrinsic>(BBI))
294       return false;
295 
296     Value *AccessedPtr;
297     unsigned AccessedAlign;
298     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
299       // Ignore volatile loads. The execution of a volatile load cannot
300       // be used to prove an address is backed by regular memory; it can,
301       // for example, point to an MMIO register.
302       if (LI->isVolatile())
303         continue;
304       AccessedPtr = LI->getPointerOperand();
305       AccessedAlign = LI->getAlignment();
306     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
307       // Ignore volatile stores (see comment for loads).
308       if (SI->isVolatile())
309         continue;
310       AccessedPtr = SI->getPointerOperand();
311       AccessedAlign = SI->getAlignment();
312     } else
313       continue;
314 
315     Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
316     if (AccessedAlign == 0)
317       AccessedAlign = DL.getABITypeAlignment(AccessedTy);
318     if (AccessedAlign < Align)
319       continue;
320 
321     // Handle trivial cases.
322     if (AccessedPtr == V &&
323         LoadSize <= DL.getTypeStoreSize(AccessedTy))
324       return true;
325 
326     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
327         LoadSize <= DL.getTypeStoreSize(AccessedTy))
328       return true;
329   }
330   return false;
331 }
332 
333 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align,
334                                        const DataLayout &DL,
335                                        Instruction *ScanFrom,
336                                        const DominatorTree *DT) {
337   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
338   return isSafeToLoadUnconditionally(V, Align, Size, DL, ScanFrom, DT);
339 }
340 
341   /// DefMaxInstsToScan - the default number of maximum instructions
342 /// to scan in the block, used by FindAvailableLoadedValue().
343 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
344 /// threading in part by eliminating partially redundant loads.
345 /// At that point, the value of MaxInstsToScan was already set to '6'
346 /// without documented explanation.
347 cl::opt<unsigned>
348 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
349   cl::desc("Use this to specify the default maximum number of instructions "
350            "to scan backward from a given instruction, when searching for "
351            "available loaded value"));
352 
353 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
354                                       BasicBlock *ScanBB,
355                                       BasicBlock::iterator &ScanFrom,
356                                       unsigned MaxInstsToScan,
357                                       AliasAnalysis *AA, bool *IsLoad,
358                                       unsigned *NumScanedInst) {
359   // Don't CSE load that is volatile or anything stronger than unordered.
360   if (!Load->isUnordered())
361     return nullptr;
362 
363   return FindAvailablePtrLoadStore(
364       Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
365       ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
366 }
367 
368 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
369                                        bool AtLeastAtomic, BasicBlock *ScanBB,
370                                        BasicBlock::iterator &ScanFrom,
371                                        unsigned MaxInstsToScan,
372                                        AliasAnalysis *AA, bool *IsLoadCSE,
373                                        unsigned *NumScanedInst) {
374   if (MaxInstsToScan == 0)
375     MaxInstsToScan = ~0U;
376 
377   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
378 
379   // Try to get the store size for the type.
380   auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
381 
382   Value *StrippedPtr = Ptr->stripPointerCasts();
383 
384   while (ScanFrom != ScanBB->begin()) {
385     // We must ignore debug info directives when counting (otherwise they
386     // would affect codegen).
387     Instruction *Inst = &*--ScanFrom;
388     if (isa<DbgInfoIntrinsic>(Inst))
389       continue;
390 
391     // Restore ScanFrom to expected value in case next test succeeds
392     ScanFrom++;
393 
394     if (NumScanedInst)
395       ++(*NumScanedInst);
396 
397     // Don't scan huge blocks.
398     if (MaxInstsToScan-- == 0)
399       return nullptr;
400 
401     --ScanFrom;
402     // If this is a load of Ptr, the loaded value is available.
403     // (This is true even if the load is volatile or atomic, although
404     // those cases are unlikely.)
405     if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
406       if (AreEquivalentAddressValues(
407               LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
408           CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
409 
410         // We can value forward from an atomic to a non-atomic, but not the
411         // other way around.
412         if (LI->isAtomic() < AtLeastAtomic)
413           return nullptr;
414 
415         if (IsLoadCSE)
416             *IsLoadCSE = true;
417         return LI;
418       }
419 
420     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
421       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
422       // If this is a store through Ptr, the value is available!
423       // (This is true even if the store is volatile or atomic, although
424       // those cases are unlikely.)
425       if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
426           CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
427                                                AccessTy, DL)) {
428 
429         // We can value forward from an atomic to a non-atomic, but not the
430         // other way around.
431         if (SI->isAtomic() < AtLeastAtomic)
432           return nullptr;
433 
434         if (IsLoadCSE)
435           *IsLoadCSE = false;
436         return SI->getOperand(0);
437       }
438 
439       // If both StrippedPtr and StorePtr reach all the way to an alloca or
440       // global and they are different, ignore the store. This is a trivial form
441       // of alias analysis that is important for reg2mem'd code.
442       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
443           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
444           StrippedPtr != StorePtr)
445         continue;
446 
447       // If we have alias analysis and it says the store won't modify the loaded
448       // value, ignore the store.
449       if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
450         continue;
451 
452       // Otherwise the store that may or may not alias the pointer, bail out.
453       ++ScanFrom;
454       return nullptr;
455     }
456 
457     // If this is some other instruction that may clobber Ptr, bail out.
458     if (Inst->mayWriteToMemory()) {
459       // If alias analysis claims that it really won't modify the load,
460       // ignore it.
461       if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
462         continue;
463 
464       // May modify the pointer, bail out.
465       ++ScanFrom;
466       return nullptr;
467     }
468   }
469 
470   // Got to the start of the block, we didn't find it, but are done for this
471   // block.
472   return nullptr;
473 }
474