1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file is a part of HWAddressSanitizer, an address sanity checker
12 /// based on tagged addressing.
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/MDBuilder.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Instrumentation.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/ModuleUtils.h"
46 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
47 #include <sstream>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "hwasan"
52 
53 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
54 static const char *const kHwasanInitName = "__hwasan_init";
55 
56 static const char *const kHwasanShadowMemoryDynamicAddress =
57     "__hwasan_shadow_memory_dynamic_address";
58 
59 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
60 static const size_t kNumberOfAccessSizes = 5;
61 
62 static const size_t kDefaultShadowScale = 4;
63 static const uint64_t kDynamicShadowSentinel =
64     std::numeric_limits<uint64_t>::max();
65 static const unsigned kPointerTagShift = 56;
66 
67 static const unsigned kShadowBaseAlignment = 32;
68 
69 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
70     "hwasan-memory-access-callback-prefix",
71     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
72     cl::init("__hwasan_"));
73 
74 static cl::opt<bool>
75     ClInstrumentWithCalls("hwasan-instrument-with-calls",
76                 cl::desc("instrument reads and writes with callbacks"),
77                 cl::Hidden, cl::init(false));
78 
79 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
80                                        cl::desc("instrument read instructions"),
81                                        cl::Hidden, cl::init(true));
82 
83 static cl::opt<bool> ClInstrumentWrites(
84     "hwasan-instrument-writes", cl::desc("instrument write instructions"),
85     cl::Hidden, cl::init(true));
86 
87 static cl::opt<bool> ClInstrumentAtomics(
88     "hwasan-instrument-atomics",
89     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
90     cl::init(true));
91 
92 static cl::opt<bool> ClRecover(
93     "hwasan-recover",
94     cl::desc("Enable recovery mode (continue-after-error)."),
95     cl::Hidden, cl::init(false));
96 
97 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
98                                        cl::desc("instrument stack (allocas)"),
99                                        cl::Hidden, cl::init(true));
100 
101 static cl::opt<bool> ClUARRetagToZero(
102     "hwasan-uar-retag-to-zero",
103     cl::desc("Clear alloca tags before returning from the function to allow "
104              "non-instrumented and instrumented function calls mix. When set "
105              "to false, allocas are retagged before returning from the "
106              "function to detect use after return."),
107     cl::Hidden, cl::init(true));
108 
109 static cl::opt<bool> ClGenerateTagsWithCalls(
110     "hwasan-generate-tags-with-calls",
111     cl::desc("generate new tags with runtime library calls"), cl::Hidden,
112     cl::init(false));
113 
114 static cl::opt<int> ClMatchAllTag(
115     "hwasan-match-all-tag",
116     cl::desc("don't report bad accesses via pointers with this tag"),
117     cl::Hidden, cl::init(-1));
118 
119 static cl::opt<bool> ClEnableKhwasan(
120     "hwasan-kernel",
121     cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
122     cl::Hidden, cl::init(false));
123 
124 // These flags allow to change the shadow mapping and control how shadow memory
125 // is accessed. The shadow mapping looks like:
126 //    Shadow = (Mem >> scale) + offset
127 
128 static cl::opt<unsigned long long> ClMappingOffset(
129     "hwasan-mapping-offset",
130     cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
131     cl::init(0));
132 
133 static cl::opt<bool>
134     ClWithIfunc("hwasan-with-ifunc",
135                 cl::desc("Access dynamic shadow through an ifunc global on "
136                          "platforms that support this"),
137                 cl::Hidden, cl::init(false));
138 
139 static cl::opt<bool> ClWithTls(
140     "hwasan-with-tls",
141     cl::desc("Access dynamic shadow through an thread-local pointer on "
142              "platforms that support this"),
143     cl::Hidden, cl::init(true));
144 
145 static cl::opt<bool>
146     ClRecordStackHistory("hwasan-record-stack-history",
147                          cl::desc("Record stack frames with tagged allocations "
148                                   "in a thread-local ring buffer"),
149                          cl::Hidden, cl::init(true));
150 static cl::opt<bool>
151     ClCreateFrameDescriptions("hwasan-create-frame-descriptions",
152                               cl::desc("create static frame descriptions"),
153                               cl::Hidden, cl::init(true));
154 
155 static cl::opt<bool>
156     ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
157                               cl::desc("instrument memory intrinsics"),
158                               cl::Hidden, cl::init(true));
159 namespace {
160 
161 /// An instrumentation pass implementing detection of addressability bugs
162 /// using tagged pointers.
163 class HWAddressSanitizer : public FunctionPass {
164 public:
165   // Pass identification, replacement for typeid.
166   static char ID;
167 
HWAddressSanitizer(bool CompileKernel=false,bool Recover=false)168   explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
169       : FunctionPass(ID) {
170     this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
171     this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
172         ClEnableKhwasan : CompileKernel;
173   }
174 
getPassName() const175   StringRef getPassName() const override { return "HWAddressSanitizer"; }
176 
177   bool runOnFunction(Function &F) override;
178   bool doInitialization(Module &M) override;
179 
180   void initializeCallbacks(Module &M);
181 
182   Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
183 
184   void untagPointerOperand(Instruction *I, Value *Addr);
185   Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
186   void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
187                                  unsigned AccessSizeIndex,
188                                  Instruction *InsertBefore);
189   void instrumentMemIntrinsic(MemIntrinsic *MI);
190   bool instrumentMemAccess(Instruction *I);
191   Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
192                                    uint64_t *TypeSize, unsigned *Alignment,
193                                    Value **MaybeMask);
194 
195   bool isInterestingAlloca(const AllocaInst &AI);
196   bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
197   Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
198   Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
199   bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
200                        SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
201   Value *getNextTagWithCall(IRBuilder<> &IRB);
202   Value *getStackBaseTag(IRBuilder<> &IRB);
203   Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
204                      unsigned AllocaNo);
205   Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
206 
207   Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
208   Value *emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
209 
210 private:
211   LLVMContext *C;
212   std::string CurModuleUniqueId;
213   Triple TargetTriple;
214   Function *HWAsanMemmove, *HWAsanMemcpy, *HWAsanMemset;
215 
216   // Frame description is a way to pass names/sizes of local variables
217   // to the run-time w/o adding extra executable code in every function.
218   // We do this by creating a separate section with {PC,Descr} pairs and passing
219   // the section beg/end to __hwasan_init_frames() at module init time.
220   std::string createFrameString(ArrayRef<AllocaInst*> Allocas);
221   void createFrameGlobal(Function &F, const std::string &FrameString);
222   // Get the section name for frame descriptions. Currently ELF-only.
getFrameSection()223   const char *getFrameSection() { return "__hwasan_frames"; }
getFrameSectionBeg()224   const char *getFrameSectionBeg() { return  "__start___hwasan_frames"; }
getFrameSectionEnd()225   const char *getFrameSectionEnd() { return  "__stop___hwasan_frames"; }
createFrameSectionBound(Module & M,Type * Ty,const char * Name)226   GlobalVariable *createFrameSectionBound(Module &M, Type *Ty,
227                                           const char *Name) {
228     auto GV = new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
229                                  nullptr, Name);
230     GV->setVisibility(GlobalValue::HiddenVisibility);
231     return GV;
232   }
233 
234   /// This struct defines the shadow mapping using the rule:
235   ///   shadow = (mem >> Scale) + Offset.
236   /// If InGlobal is true, then
237   ///   extern char __hwasan_shadow[];
238   ///   shadow = (mem >> Scale) + &__hwasan_shadow
239   /// If InTls is true, then
240   ///   extern char *__hwasan_tls;
241   ///   shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
242   struct ShadowMapping {
243     int Scale;
244     uint64_t Offset;
245     bool InGlobal;
246     bool InTls;
247 
248     void init(Triple &TargetTriple);
getAllocaAlignment__anone01f24340111::HWAddressSanitizer::ShadowMapping249     unsigned getAllocaAlignment() const { return 1U << Scale; }
250   };
251   ShadowMapping Mapping;
252 
253   Type *IntptrTy;
254   Type *Int8PtrTy;
255   Type *Int8Ty;
256 
257   bool CompileKernel;
258   bool Recover;
259 
260   Function *HwasanCtorFunction;
261 
262   Function *HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
263   Function *HwasanMemoryAccessCallbackSized[2];
264 
265   Function *HwasanTagMemoryFunc;
266   Function *HwasanGenerateTagFunc;
267   Function *HwasanThreadEnterFunc;
268 
269   Constant *ShadowGlobal;
270 
271   Value *LocalDynamicShadow = nullptr;
272   GlobalValue *ThreadPtrGlobal = nullptr;
273 };
274 
275 } // end anonymous namespace
276 
277 char HWAddressSanitizer::ID = 0;
278 
279 INITIALIZE_PASS_BEGIN(
280     HWAddressSanitizer, "hwasan",
281     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
282     false)
283 INITIALIZE_PASS_END(
284     HWAddressSanitizer, "hwasan",
285     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
286     false)
287 
createHWAddressSanitizerPass(bool CompileKernel,bool Recover)288 FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
289                                                  bool Recover) {
290   assert(!CompileKernel || Recover);
291   return new HWAddressSanitizer(CompileKernel, Recover);
292 }
293 
294 /// Module-level initialization.
295 ///
296 /// inserts a call to __hwasan_init to the module's constructor list.
doInitialization(Module & M)297 bool HWAddressSanitizer::doInitialization(Module &M) {
298   LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
299   auto &DL = M.getDataLayout();
300 
301   TargetTriple = Triple(M.getTargetTriple());
302 
303   Mapping.init(TargetTriple);
304 
305   C = &(M.getContext());
306   CurModuleUniqueId = getUniqueModuleId(&M);
307   IRBuilder<> IRB(*C);
308   IntptrTy = IRB.getIntPtrTy(DL);
309   Int8PtrTy = IRB.getInt8PtrTy();
310   Int8Ty = IRB.getInt8Ty();
311 
312   HwasanCtorFunction = nullptr;
313   if (!CompileKernel) {
314     std::tie(HwasanCtorFunction, std::ignore) =
315         createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
316                                             kHwasanInitName,
317                                             /*InitArgTypes=*/{},
318                                             /*InitArgs=*/{});
319     Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
320     HwasanCtorFunction->setComdat(CtorComdat);
321     appendToGlobalCtors(M, HwasanCtorFunction, 0, HwasanCtorFunction);
322 
323     // Create a zero-length global in __hwasan_frame so that the linker will
324     // always create start and stop symbols.
325     //
326     // N.B. If we ever start creating associated metadata in this pass this
327     // global will need to be associated with the ctor.
328     Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
329     auto GV =
330         new GlobalVariable(M, Int8Arr0Ty, /*isConstantGlobal*/ true,
331                            GlobalVariable::PrivateLinkage,
332                            Constant::getNullValue(Int8Arr0Ty), "__hwasan");
333     GV->setSection(getFrameSection());
334     GV->setComdat(CtorComdat);
335     appendToCompilerUsed(M, GV);
336 
337     IRBuilder<> IRBCtor(HwasanCtorFunction->getEntryBlock().getTerminator());
338     IRBCtor.CreateCall(
339         declareSanitizerInitFunction(M, "__hwasan_init_frames",
340                                      {Int8PtrTy, Int8PtrTy}),
341         {createFrameSectionBound(M, Int8Ty, getFrameSectionBeg()),
342          createFrameSectionBound(M, Int8Ty, getFrameSectionEnd())});
343   }
344 
345   if (!TargetTriple.isAndroid())
346     appendToCompilerUsed(
347         M, ThreadPtrGlobal = new GlobalVariable(
348                M, IntptrTy, false, GlobalVariable::ExternalLinkage, nullptr,
349                "__hwasan_tls", nullptr, GlobalVariable::InitialExecTLSModel));
350 
351   return true;
352 }
353 
initializeCallbacks(Module & M)354 void HWAddressSanitizer::initializeCallbacks(Module &M) {
355   IRBuilder<> IRB(*C);
356   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
357     const std::string TypeStr = AccessIsWrite ? "store" : "load";
358     const std::string EndingStr = Recover ? "_noabort" : "";
359 
360     HwasanMemoryAccessCallbackSized[AccessIsWrite] =
361         checkSanitizerInterfaceFunction(M.getOrInsertFunction(
362             ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
363             FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)));
364 
365     for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
366          AccessSizeIndex++) {
367       HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
368           checkSanitizerInterfaceFunction(M.getOrInsertFunction(
369               ClMemoryAccessCallbackPrefix + TypeStr +
370                   itostr(1ULL << AccessSizeIndex) + EndingStr,
371               FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)));
372     }
373   }
374 
375   HwasanTagMemoryFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
376       "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy));
377   HwasanGenerateTagFunc = checkSanitizerInterfaceFunction(
378       M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty));
379 
380   if (Mapping.InGlobal)
381     ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
382                                        ArrayType::get(IRB.getInt8Ty(), 0));
383 
384   const std::string MemIntrinCallbackPrefix =
385       CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
386   HWAsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
387       MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
388       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
389   HWAsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
390       MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
391       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
392   HWAsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
393       MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
394       IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy));
395 
396   HwasanThreadEnterFunc = checkSanitizerInterfaceFunction(
397       M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy()));
398 }
399 
getDynamicShadowNonTls(IRBuilder<> & IRB)400 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
401   // Generate code only when dynamic addressing is needed.
402   if (Mapping.Offset != kDynamicShadowSentinel)
403     return nullptr;
404 
405   if (Mapping.InGlobal) {
406     // An empty inline asm with input reg == output reg.
407     // An opaque pointer-to-int cast, basically.
408     InlineAsm *Asm = InlineAsm::get(
409         FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
410         StringRef(""), StringRef("=r,0"),
411         /*hasSideEffects=*/false);
412     return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
413   } else {
414     Value *GlobalDynamicAddress =
415         IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
416             kHwasanShadowMemoryDynamicAddress, IntptrTy);
417     return IRB.CreateLoad(GlobalDynamicAddress);
418   }
419 }
420 
isInterestingMemoryAccess(Instruction * I,bool * IsWrite,uint64_t * TypeSize,unsigned * Alignment,Value ** MaybeMask)421 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
422                                                      bool *IsWrite,
423                                                      uint64_t *TypeSize,
424                                                      unsigned *Alignment,
425                                                      Value **MaybeMask) {
426   // Skip memory accesses inserted by another instrumentation.
427   if (I->getMetadata("nosanitize")) return nullptr;
428 
429   // Do not instrument the load fetching the dynamic shadow address.
430   if (LocalDynamicShadow == I)
431     return nullptr;
432 
433   Value *PtrOperand = nullptr;
434   const DataLayout &DL = I->getModule()->getDataLayout();
435   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
436     if (!ClInstrumentReads) return nullptr;
437     *IsWrite = false;
438     *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
439     *Alignment = LI->getAlignment();
440     PtrOperand = LI->getPointerOperand();
441   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
442     if (!ClInstrumentWrites) return nullptr;
443     *IsWrite = true;
444     *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
445     *Alignment = SI->getAlignment();
446     PtrOperand = SI->getPointerOperand();
447   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
448     if (!ClInstrumentAtomics) return nullptr;
449     *IsWrite = true;
450     *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
451     *Alignment = 0;
452     PtrOperand = RMW->getPointerOperand();
453   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
454     if (!ClInstrumentAtomics) return nullptr;
455     *IsWrite = true;
456     *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
457     *Alignment = 0;
458     PtrOperand = XCHG->getPointerOperand();
459   }
460 
461   if (PtrOperand) {
462     // Do not instrument accesses from different address spaces; we cannot deal
463     // with them.
464     Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
465     if (PtrTy->getPointerAddressSpace() != 0)
466       return nullptr;
467 
468     // Ignore swifterror addresses.
469     // swifterror memory addresses are mem2reg promoted by instruction
470     // selection. As such they cannot have regular uses like an instrumentation
471     // function and it makes no sense to track them as memory.
472     if (PtrOperand->isSwiftError())
473       return nullptr;
474   }
475 
476   return PtrOperand;
477 }
478 
getPointerOperandIndex(Instruction * I)479 static unsigned getPointerOperandIndex(Instruction *I) {
480   if (LoadInst *LI = dyn_cast<LoadInst>(I))
481     return LI->getPointerOperandIndex();
482   if (StoreInst *SI = dyn_cast<StoreInst>(I))
483     return SI->getPointerOperandIndex();
484   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
485     return RMW->getPointerOperandIndex();
486   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
487     return XCHG->getPointerOperandIndex();
488   report_fatal_error("Unexpected instruction");
489   return -1;
490 }
491 
TypeSizeToSizeIndex(uint32_t TypeSize)492 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
493   size_t Res = countTrailingZeros(TypeSize / 8);
494   assert(Res < kNumberOfAccessSizes);
495   return Res;
496 }
497 
untagPointerOperand(Instruction * I,Value * Addr)498 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
499   if (TargetTriple.isAArch64())
500     return;
501 
502   IRBuilder<> IRB(I);
503   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
504   Value *UntaggedPtr =
505       IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
506   I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
507 }
508 
memToShadow(Value * Mem,Type * Ty,IRBuilder<> & IRB)509 Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
510   // Mem >> Scale
511   Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
512   if (Mapping.Offset == 0)
513     return Shadow;
514   // (Mem >> Scale) + Offset
515   Value *ShadowBase;
516   if (LocalDynamicShadow)
517     ShadowBase = LocalDynamicShadow;
518   else
519     ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
520   return IRB.CreateAdd(Shadow, ShadowBase);
521 }
522 
instrumentMemAccessInline(Value * PtrLong,bool IsWrite,unsigned AccessSizeIndex,Instruction * InsertBefore)523 void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
524                                                    unsigned AccessSizeIndex,
525                                                    Instruction *InsertBefore) {
526   IRBuilder<> IRB(InsertBefore);
527   Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
528                                   IRB.getInt8Ty());
529   Value *AddrLong = untagPointer(IRB, PtrLong);
530   Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
531   Value *MemTag = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, Int8PtrTy));
532   Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
533 
534   int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
535       ClMatchAllTag : (CompileKernel ? 0xFF : -1);
536   if (matchAllTag != -1) {
537     Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
538         ConstantInt::get(PtrTag->getType(), matchAllTag));
539     TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
540   }
541 
542   Instruction *CheckTerm =
543       SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
544                                 MDBuilder(*C).createBranchWeights(1, 100000));
545 
546   IRB.SetInsertPoint(CheckTerm);
547   const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
548   InlineAsm *Asm;
549   switch (TargetTriple.getArch()) {
550     case Triple::x86_64:
551       // The signal handler will find the data address in rdi.
552       Asm = InlineAsm::get(
553           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
554           "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
555           "{rdi}",
556           /*hasSideEffects=*/true);
557       break;
558     case Triple::aarch64:
559     case Triple::aarch64_be:
560       // The signal handler will find the data address in x0.
561       Asm = InlineAsm::get(
562           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
563           "brk #" + itostr(0x900 + AccessInfo),
564           "{x0}",
565           /*hasSideEffects=*/true);
566       break;
567     default:
568       report_fatal_error("unsupported architecture");
569   }
570   IRB.CreateCall(Asm, PtrLong);
571 }
572 
instrumentMemIntrinsic(MemIntrinsic * MI)573 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
574   IRBuilder<> IRB(MI);
575   if (isa<MemTransferInst>(MI)) {
576     IRB.CreateCall(
577         isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
578         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
579          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
580          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
581   } else if (isa<MemSetInst>(MI)) {
582     IRB.CreateCall(
583         HWAsanMemset,
584         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
585          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
586          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
587   }
588   MI->eraseFromParent();
589 }
590 
instrumentMemAccess(Instruction * I)591 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
592   LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
593   bool IsWrite = false;
594   unsigned Alignment = 0;
595   uint64_t TypeSize = 0;
596   Value *MaybeMask = nullptr;
597 
598   if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
599     instrumentMemIntrinsic(cast<MemIntrinsic>(I));
600     return true;
601   }
602 
603   Value *Addr =
604       isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
605 
606   if (!Addr)
607     return false;
608 
609   if (MaybeMask)
610     return false; //FIXME
611 
612   IRBuilder<> IRB(I);
613   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
614   if (isPowerOf2_64(TypeSize) &&
615       (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
616       (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
617        Alignment >= TypeSize / 8)) {
618     size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
619     if (ClInstrumentWithCalls) {
620       IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
621                      AddrLong);
622     } else {
623       instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
624     }
625   } else {
626     IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
627                    {AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
628   }
629   untagPointerOperand(I, Addr);
630 
631   return true;
632 }
633 
getAllocaSizeInBytes(const AllocaInst & AI)634 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
635   uint64_t ArraySize = 1;
636   if (AI.isArrayAllocation()) {
637     const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
638     assert(CI && "non-constant array size");
639     ArraySize = CI->getZExtValue();
640   }
641   Type *Ty = AI.getAllocatedType();
642   uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
643   return SizeInBytes * ArraySize;
644 }
645 
tagAlloca(IRBuilder<> & IRB,AllocaInst * AI,Value * Tag)646 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
647                                    Value *Tag) {
648   size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
649                 ~(Mapping.getAllocaAlignment() - 1);
650 
651   Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
652   if (ClInstrumentWithCalls) {
653     IRB.CreateCall(HwasanTagMemoryFunc,
654                    {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
655                     ConstantInt::get(IntptrTy, Size)});
656   } else {
657     size_t ShadowSize = Size >> Mapping.Scale;
658     Value *ShadowPtr = IRB.CreateIntToPtr(
659         memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
660         Int8PtrTy);
661     // If this memset is not inlined, it will be intercepted in the hwasan
662     // runtime library. That's OK, because the interceptor skips the checks if
663     // the address is in the shadow region.
664     // FIXME: the interceptor is not as fast as real memset. Consider lowering
665     // llvm.memset right here into either a sequence of stores, or a call to
666     // hwasan_tag_memory.
667     IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
668   }
669   return true;
670 }
671 
RetagMask(unsigned AllocaNo)672 static unsigned RetagMask(unsigned AllocaNo) {
673   // A list of 8-bit numbers that have at most one run of non-zero bits.
674   // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
675   // masks.
676   // The list does not include the value 255, which is used for UAR.
677   static unsigned FastMasks[] = {
678       0,   1,   2,   3,   4,   6,   7,   8,   12,  14,  15, 16,  24,
679       28,  30,  31,  32,  48,  56,  60,  62,  63,  64,  96, 112, 120,
680       124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
681   return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
682 }
683 
getNextTagWithCall(IRBuilder<> & IRB)684 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
685   return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
686 }
687 
getStackBaseTag(IRBuilder<> & IRB)688 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
689   if (ClGenerateTagsWithCalls)
690     return getNextTagWithCall(IRB);
691   // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
692   // first).
693   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
694   auto GetStackPointerFn =
695       Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
696   Value *StackPointer = IRB.CreateCall(
697       GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
698 
699   // Extract some entropy from the stack pointer for the tags.
700   // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
701   // between functions).
702   Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
703   Value *StackTag =
704       IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
705                     "hwasan.stack.base.tag");
706   return StackTag;
707 }
708 
getAllocaTag(IRBuilder<> & IRB,Value * StackTag,AllocaInst * AI,unsigned AllocaNo)709 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
710                                         AllocaInst *AI, unsigned AllocaNo) {
711   if (ClGenerateTagsWithCalls)
712     return getNextTagWithCall(IRB);
713   return IRB.CreateXor(StackTag,
714                        ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
715 }
716 
getUARTag(IRBuilder<> & IRB,Value * StackTag)717 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
718   if (ClUARRetagToZero)
719     return ConstantInt::get(IntptrTy, 0);
720   if (ClGenerateTagsWithCalls)
721     return getNextTagWithCall(IRB);
722   return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
723 }
724 
725 // Add a tag to an address.
tagPointer(IRBuilder<> & IRB,Type * Ty,Value * PtrLong,Value * Tag)726 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
727                                       Value *PtrLong, Value *Tag) {
728   Value *TaggedPtrLong;
729   if (CompileKernel) {
730     // Kernel addresses have 0xFF in the most significant byte.
731     Value *ShiftedTag = IRB.CreateOr(
732         IRB.CreateShl(Tag, kPointerTagShift),
733         ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
734     TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
735   } else {
736     // Userspace can simply do OR (tag << 56);
737     Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
738     TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
739   }
740   return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
741 }
742 
743 // Remove tag from an address.
untagPointer(IRBuilder<> & IRB,Value * PtrLong)744 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
745   Value *UntaggedPtrLong;
746   if (CompileKernel) {
747     // Kernel addresses have 0xFF in the most significant byte.
748     UntaggedPtrLong = IRB.CreateOr(PtrLong,
749         ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
750   } else {
751     // Userspace addresses have 0x00.
752     UntaggedPtrLong = IRB.CreateAnd(PtrLong,
753         ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
754   }
755   return UntaggedPtrLong;
756 }
757 
getHwasanThreadSlotPtr(IRBuilder<> & IRB,Type * Ty)758 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
759   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
760   if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
761     // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
762     // in Bionic's libc/private/bionic_tls.h.
763     Function *ThreadPointerFunc =
764         Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
765     Value *SlotPtr = IRB.CreatePointerCast(
766         IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), 0x30),
767         Ty->getPointerTo(0));
768     return SlotPtr;
769   }
770   if (ThreadPtrGlobal)
771     return ThreadPtrGlobal;
772 
773 
774   return nullptr;
775 }
776 
777 // Creates a string with a description of the stack frame (set of Allocas).
778 // The string is intended to be human readable.
779 // The current form is: Size1 Name1; Size2 Name2; ...
780 std::string
createFrameString(ArrayRef<AllocaInst * > Allocas)781 HWAddressSanitizer::createFrameString(ArrayRef<AllocaInst *> Allocas) {
782   std::ostringstream Descr;
783   for (auto AI : Allocas)
784     Descr << getAllocaSizeInBytes(*AI) << " " <<  AI->getName().str() << "; ";
785   return Descr.str();
786 }
787 
788 // Creates a global in the frame section which consists of two pointers:
789 // the function PC and the frame string constant.
createFrameGlobal(Function & F,const std::string & FrameString)790 void HWAddressSanitizer::createFrameGlobal(Function &F,
791                                            const std::string &FrameString) {
792   Module &M = *F.getParent();
793   auto DescrGV = createPrivateGlobalForString(M, FrameString, true);
794   auto PtrPairTy = StructType::get(F.getType(), DescrGV->getType());
795   auto GV = new GlobalVariable(
796       M, PtrPairTy, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
797       ConstantStruct::get(PtrPairTy, (Constant *)&F, (Constant *)DescrGV),
798       "__hwasan");
799   GV->setSection(getFrameSection());
800   appendToCompilerUsed(M, GV);
801   // Put GV into the F's Comadat so that if F is deleted GV can be deleted too.
802   if (auto Comdat =
803           GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
804     GV->setComdat(Comdat);
805 }
806 
emitPrologue(IRBuilder<> & IRB,bool WithFrameRecord)807 Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
808                                         bool WithFrameRecord) {
809   if (!Mapping.InTls)
810     return getDynamicShadowNonTls(IRB);
811 
812   Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
813   assert(SlotPtr);
814 
815   Instruction *ThreadLong = IRB.CreateLoad(SlotPtr);
816 
817   Function *F = IRB.GetInsertBlock()->getParent();
818   if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
819     Value *ThreadLongEqZero =
820         IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
821     auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
822         ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
823         false, MDBuilder(*C).createBranchWeights(1, 100000)));
824 
825     IRB.SetInsertPoint(Br);
826     // FIXME: This should call a new runtime function with a custom calling
827     // convention to avoid needing to spill all arguments here.
828     IRB.CreateCall(HwasanThreadEnterFunc);
829     LoadInst *ReloadThreadLong = IRB.CreateLoad(SlotPtr);
830 
831     IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
832     PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
833     ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
834     ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
835     ThreadLong = ThreadLongPhi;
836   }
837 
838   // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
839   Value *ThreadLongMaybeUntagged =
840       TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
841 
842   if (WithFrameRecord) {
843     // Prepare ring buffer data.
844     auto PC = IRB.CreatePtrToInt(F, IntptrTy);
845     auto GetStackPointerFn =
846         Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
847     Value *SP = IRB.CreatePtrToInt(
848         IRB.CreateCall(GetStackPointerFn,
849                        {Constant::getNullValue(IRB.getInt32Ty())}),
850         IntptrTy);
851     // Mix SP and PC. TODO: also add the tag to the mix.
852     // Assumptions:
853     // PC is 0x0000PPPPPPPPPPPP  (48 bits are meaningful, others are zero)
854     // SP is 0xsssssssssssSSSS0  (4 lower bits are zero)
855     // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
856     //       0xSSSSPPPPPPPPPPPP
857     SP = IRB.CreateShl(SP, 44);
858 
859     // Store data to ring buffer.
860     Value *RecordPtr =
861         IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
862     IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
863 
864     // Update the ring buffer. Top byte of ThreadLong defines the size of the
865     // buffer in pages, it must be a power of two, and the start of the buffer
866     // must be aligned by twice that much. Therefore wrap around of the ring
867     // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
868     // The use of AShr instead of LShr is due to
869     //   https://bugs.llvm.org/show_bug.cgi?id=39030
870     // Runtime library makes sure not to use the highest bit.
871     Value *WrapMask = IRB.CreateXor(
872         IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
873         ConstantInt::get(IntptrTy, (uint64_t)-1));
874     Value *ThreadLongNew = IRB.CreateAnd(
875         IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
876     IRB.CreateStore(ThreadLongNew, SlotPtr);
877   }
878 
879   // Get shadow base address by aligning RecordPtr up.
880   // Note: this is not correct if the pointer is already aligned.
881   // Runtime library will make sure this never happens.
882   Value *ShadowBase = IRB.CreateAdd(
883       IRB.CreateOr(
884           ThreadLongMaybeUntagged,
885           ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
886       ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
887   return ShadowBase;
888 }
889 
instrumentStack(SmallVectorImpl<AllocaInst * > & Allocas,SmallVectorImpl<Instruction * > & RetVec,Value * StackTag)890 bool HWAddressSanitizer::instrumentStack(
891     SmallVectorImpl<AllocaInst *> &Allocas,
892     SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
893   // Ideally, we want to calculate tagged stack base pointer, and rewrite all
894   // alloca addresses using that. Unfortunately, offsets are not known yet
895   // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
896   // temp, shift-OR it into each alloca address and xor with the retag mask.
897   // This generates one extra instruction per alloca use.
898   for (unsigned N = 0; N < Allocas.size(); ++N) {
899     auto *AI = Allocas[N];
900     IRBuilder<> IRB(AI->getNextNode());
901 
902     // Replace uses of the alloca with tagged address.
903     Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
904     Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
905     Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
906     std::string Name =
907         AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
908     Replacement->setName(Name + ".hwasan");
909 
910     for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
911       Use &U = *UI++;
912       if (U.getUser() != AILong)
913         U.set(Replacement);
914     }
915 
916     tagAlloca(IRB, AI, Tag);
917 
918     for (auto RI : RetVec) {
919       IRB.SetInsertPoint(RI);
920 
921       // Re-tag alloca memory with the special UAR tag.
922       Value *Tag = getUARTag(IRB, StackTag);
923       tagAlloca(IRB, AI, Tag);
924     }
925   }
926 
927   return true;
928 }
929 
isInterestingAlloca(const AllocaInst & AI)930 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
931   return (AI.getAllocatedType()->isSized() &&
932           // FIXME: instrument dynamic allocas, too
933           AI.isStaticAlloca() &&
934           // alloca() may be called with 0 size, ignore it.
935           getAllocaSizeInBytes(AI) > 0 &&
936           // We are only interested in allocas not promotable to registers.
937           // Promotable allocas are common under -O0.
938           !isAllocaPromotable(&AI) &&
939           // inalloca allocas are not treated as static, and we don't want
940           // dynamic alloca instrumentation for them as well.
941           !AI.isUsedWithInAlloca() &&
942           // swifterror allocas are register promoted by ISel
943           !AI.isSwiftError());
944 }
945 
runOnFunction(Function & F)946 bool HWAddressSanitizer::runOnFunction(Function &F) {
947   if (&F == HwasanCtorFunction)
948     return false;
949 
950   if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
951     return false;
952 
953   LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
954 
955   SmallVector<Instruction*, 16> ToInstrument;
956   SmallVector<AllocaInst*, 8> AllocasToInstrument;
957   SmallVector<Instruction*, 8> RetVec;
958   for (auto &BB : F) {
959     for (auto &Inst : BB) {
960       if (ClInstrumentStack)
961         if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
962           // Realign all allocas. We don't want small uninteresting allocas to
963           // hide in instrumented alloca's padding.
964           if (AI->getAlignment() < Mapping.getAllocaAlignment())
965             AI->setAlignment(Mapping.getAllocaAlignment());
966           // Instrument some of them.
967           if (isInterestingAlloca(*AI))
968             AllocasToInstrument.push_back(AI);
969           continue;
970         }
971 
972       if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
973           isa<CleanupReturnInst>(Inst))
974         RetVec.push_back(&Inst);
975 
976       Value *MaybeMask = nullptr;
977       bool IsWrite;
978       unsigned Alignment;
979       uint64_t TypeSize;
980       Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
981                                               &Alignment, &MaybeMask);
982       if (Addr || isa<MemIntrinsic>(Inst))
983         ToInstrument.push_back(&Inst);
984     }
985   }
986 
987   if (AllocasToInstrument.empty() && ToInstrument.empty())
988     return false;
989 
990   if (ClCreateFrameDescriptions && !AllocasToInstrument.empty())
991     createFrameGlobal(F, createFrameString(AllocasToInstrument));
992 
993   initializeCallbacks(*F.getParent());
994 
995   assert(!LocalDynamicShadow);
996 
997   Instruction *InsertPt = &*F.getEntryBlock().begin();
998   IRBuilder<> EntryIRB(InsertPt);
999   LocalDynamicShadow = emitPrologue(EntryIRB,
1000                                     /*WithFrameRecord*/ ClRecordStackHistory &&
1001                                         !AllocasToInstrument.empty());
1002 
1003   bool Changed = false;
1004   if (!AllocasToInstrument.empty()) {
1005     Value *StackTag =
1006         ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1007     Changed |= instrumentStack(AllocasToInstrument, RetVec, StackTag);
1008   }
1009 
1010   for (auto Inst : ToInstrument)
1011     Changed |= instrumentMemAccess(Inst);
1012 
1013   LocalDynamicShadow = nullptr;
1014 
1015   return Changed;
1016 }
1017 
init(Triple & TargetTriple)1018 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
1019   Scale = kDefaultShadowScale;
1020   if (ClMappingOffset.getNumOccurrences() > 0) {
1021     InGlobal = false;
1022     InTls = false;
1023     Offset = ClMappingOffset;
1024   } else if (ClEnableKhwasan || ClInstrumentWithCalls) {
1025     InGlobal = false;
1026     InTls = false;
1027     Offset = 0;
1028   } else if (ClWithIfunc) {
1029     InGlobal = true;
1030     InTls = false;
1031     Offset = kDynamicShadowSentinel;
1032   } else if (ClWithTls) {
1033     InGlobal = false;
1034     InTls = true;
1035     Offset = kDynamicShadowSentinel;
1036   } else {
1037     InGlobal = false;
1038     InTls = false;
1039     Offset = kDynamicShadowSentinel;
1040   }
1041 }
1042