1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of HWAddressSanitizer, an address sanity checker
11 /// based on tagged addressing.
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/BinaryFormat/ELF.h"
21 #include "llvm/IR/Attributes.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DebugInfoMetadata.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/InstVisitor.h"
32 #include "llvm/IR/Instruction.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Transforms/Instrumentation.h"
48 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/ModuleUtils.h"
51 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
52 #include <sstream>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "hwasan"
57 
58 const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
59 const char kHwasanNoteName[] = "hwasan.note";
60 const char kHwasanInitName[] = "__hwasan_init";
61 const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
62 
63 const char kHwasanShadowMemoryDynamicAddress[] =
64     "__hwasan_shadow_memory_dynamic_address";
65 
66 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
67 static const size_t kNumberOfAccessSizes = 5;
68 
69 static const size_t kDefaultShadowScale = 4;
70 static const uint64_t kDynamicShadowSentinel =
71     std::numeric_limits<uint64_t>::max();
72 
73 static const unsigned kShadowBaseAlignment = 32;
74 
75 static cl::opt<std::string>
76     ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
77                                  cl::desc("Prefix for memory access callbacks"),
78                                  cl::Hidden, cl::init("__hwasan_"));
79 
80 static cl::opt<bool> ClInstrumentWithCalls(
81     "hwasan-instrument-with-calls",
82     cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
83     cl::init(false));
84 
85 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
86                                        cl::desc("instrument read instructions"),
87                                        cl::Hidden, cl::init(true));
88 
89 static cl::opt<bool>
90     ClInstrumentWrites("hwasan-instrument-writes",
91                        cl::desc("instrument write instructions"), cl::Hidden,
92                        cl::init(true));
93 
94 static cl::opt<bool> ClInstrumentAtomics(
95     "hwasan-instrument-atomics",
96     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
97     cl::init(true));
98 
99 static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
100                                        cl::desc("instrument byval arguments"),
101                                        cl::Hidden, cl::init(true));
102 
103 static cl::opt<bool>
104     ClRecover("hwasan-recover",
105               cl::desc("Enable recovery mode (continue-after-error)."),
106               cl::Hidden, cl::init(false));
107 
108 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
109                                        cl::desc("instrument stack (allocas)"),
110                                        cl::Hidden, cl::init(true));
111 
112 static cl::opt<bool> ClUARRetagToZero(
113     "hwasan-uar-retag-to-zero",
114     cl::desc("Clear alloca tags before returning from the function to allow "
115              "non-instrumented and instrumented function calls mix. When set "
116              "to false, allocas are retagged before returning from the "
117              "function to detect use after return."),
118     cl::Hidden, cl::init(true));
119 
120 static cl::opt<bool> ClGenerateTagsWithCalls(
121     "hwasan-generate-tags-with-calls",
122     cl::desc("generate new tags with runtime library calls"), cl::Hidden,
123     cl::init(false));
124 
125 static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
126                                cl::Hidden, cl::init(false), cl::ZeroOrMore);
127 
128 static cl::opt<int> ClMatchAllTag(
129     "hwasan-match-all-tag",
130     cl::desc("don't report bad accesses via pointers with this tag"),
131     cl::Hidden, cl::init(-1));
132 
133 static cl::opt<bool>
134     ClEnableKhwasan("hwasan-kernel",
135                     cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
136                     cl::Hidden, cl::init(false));
137 
138 // These flags allow to change the shadow mapping and control how shadow memory
139 // is accessed. The shadow mapping looks like:
140 //    Shadow = (Mem >> scale) + offset
141 
142 static cl::opt<uint64_t>
143     ClMappingOffset("hwasan-mapping-offset",
144                     cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
145                     cl::Hidden, cl::init(0));
146 
147 static cl::opt<bool>
148     ClWithIfunc("hwasan-with-ifunc",
149                 cl::desc("Access dynamic shadow through an ifunc global on "
150                          "platforms that support this"),
151                 cl::Hidden, cl::init(false));
152 
153 static cl::opt<bool> ClWithTls(
154     "hwasan-with-tls",
155     cl::desc("Access dynamic shadow through an thread-local pointer on "
156              "platforms that support this"),
157     cl::Hidden, cl::init(true));
158 
159 static cl::opt<bool>
160     ClRecordStackHistory("hwasan-record-stack-history",
161                          cl::desc("Record stack frames with tagged allocations "
162                                   "in a thread-local ring buffer"),
163                          cl::Hidden, cl::init(true));
164 static cl::opt<bool>
165     ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
166                               cl::desc("instrument memory intrinsics"),
167                               cl::Hidden, cl::init(true));
168 
169 static cl::opt<bool>
170     ClInstrumentLandingPads("hwasan-instrument-landing-pads",
171                             cl::desc("instrument landing pads"), cl::Hidden,
172                             cl::init(false), cl::ZeroOrMore);
173 
174 static cl::opt<bool> ClUseShortGranules(
175     "hwasan-use-short-granules",
176     cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
177     cl::init(false), cl::ZeroOrMore);
178 
179 static cl::opt<bool> ClInstrumentPersonalityFunctions(
180     "hwasan-instrument-personality-functions",
181     cl::desc("instrument personality functions"), cl::Hidden, cl::init(false),
182     cl::ZeroOrMore);
183 
184 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
185                                        cl::desc("inline all checks"),
186                                        cl::Hidden, cl::init(false));
187 
188 // Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
189 static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
190                                       cl::desc("Use page aliasing in HWASan"),
191                                       cl::Hidden, cl::init(false));
192 
193 namespace {
194 
195 /// An instrumentation pass implementing detection of addressability bugs
196 /// using tagged pointers.
197 class HWAddressSanitizer {
198 public:
199   explicit HWAddressSanitizer(Module &M, bool CompileKernel = false,
200                               bool Recover = false)
201       : M(M) {
202     this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
203     this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
204                               ? ClEnableKhwasan
205                               : CompileKernel;
206 
207     initializeModule();
208   }
209 
210   bool sanitizeFunction(Function &F);
211   void initializeModule();
212   void createHwasanCtorComdat();
213 
214   void initializeCallbacks(Module &M);
215 
216   Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
217 
218   Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
219   Value *getShadowNonTls(IRBuilder<> &IRB);
220 
221   void untagPointerOperand(Instruction *I, Value *Addr);
222   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
223   void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
224                                  unsigned AccessSizeIndex,
225                                  Instruction *InsertBefore);
226   void instrumentMemIntrinsic(MemIntrinsic *MI);
227   bool instrumentMemAccess(InterestingMemoryOperand &O);
228   bool ignoreAccess(Value *Ptr);
229   void getInterestingMemoryOperands(
230       Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
231 
232   bool isInterestingAlloca(const AllocaInst &AI);
233   bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
234   Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
235   Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
236   bool instrumentStack(
237       SmallVectorImpl<AllocaInst *> &Allocas,
238       DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
239       SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
240   Value *readRegister(IRBuilder<> &IRB, StringRef Name);
241   bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
242   Value *getNextTagWithCall(IRBuilder<> &IRB);
243   Value *getStackBaseTag(IRBuilder<> &IRB);
244   Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
245                       unsigned AllocaNo);
246   Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
247 
248   Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
249   Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
250   unsigned retagMask(unsigned AllocaNo);
251 
252   void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
253 
254   void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
255   void instrumentGlobals();
256 
257   void instrumentPersonalityFunctions();
258 
259 private:
260   LLVMContext *C;
261   Module &M;
262   Triple TargetTriple;
263   FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
264   FunctionCallee HWAsanHandleVfork;
265 
266   /// This struct defines the shadow mapping using the rule:
267   ///   shadow = (mem >> Scale) + Offset.
268   /// If InGlobal is true, then
269   ///   extern char __hwasan_shadow[];
270   ///   shadow = (mem >> Scale) + &__hwasan_shadow
271   /// If InTls is true, then
272   ///   extern char *__hwasan_tls;
273   ///   shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
274   ///
275   /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
276   /// ring buffer for storing stack allocations on targets that support it.
277   struct ShadowMapping {
278     int Scale;
279     uint64_t Offset;
280     bool InGlobal;
281     bool InTls;
282     bool WithFrameRecord;
283 
284     void init(Triple &TargetTriple, bool InstrumentWithCalls);
285     unsigned getObjectAlignment() const { return 1U << Scale; }
286   };
287   ShadowMapping Mapping;
288 
289   Type *VoidTy = Type::getVoidTy(M.getContext());
290   Type *IntptrTy;
291   Type *Int8PtrTy;
292   Type *Int8Ty;
293   Type *Int32Ty;
294   Type *Int64Ty = Type::getInt64Ty(M.getContext());
295 
296   bool CompileKernel;
297   bool Recover;
298   bool OutlinedChecks;
299   bool UseShortGranules;
300   bool InstrumentLandingPads;
301   bool InstrumentWithCalls;
302   bool InstrumentStack;
303   bool UsePageAliases;
304 
305   bool HasMatchAllTag = false;
306   uint8_t MatchAllTag = 0;
307 
308   unsigned PointerTagShift;
309   uint64_t TagMaskByte;
310 
311   Function *HwasanCtorFunction;
312 
313   FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
314   FunctionCallee HwasanMemoryAccessCallbackSized[2];
315 
316   FunctionCallee HwasanTagMemoryFunc;
317   FunctionCallee HwasanGenerateTagFunc;
318 
319   Constant *ShadowGlobal;
320 
321   Value *ShadowBase = nullptr;
322   Value *StackBaseTag = nullptr;
323   GlobalValue *ThreadPtrGlobal = nullptr;
324 };
325 
326 class HWAddressSanitizerLegacyPass : public FunctionPass {
327 public:
328   // Pass identification, replacement for typeid.
329   static char ID;
330 
331   explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
332                                         bool Recover = false)
333       : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) {
334     initializeHWAddressSanitizerLegacyPassPass(
335         *PassRegistry::getPassRegistry());
336   }
337 
338   StringRef getPassName() const override { return "HWAddressSanitizer"; }
339 
340   bool doInitialization(Module &M) override {
341     HWASan = std::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover);
342     return true;
343   }
344 
345   bool runOnFunction(Function &F) override {
346     return HWASan->sanitizeFunction(F);
347   }
348 
349   bool doFinalization(Module &M) override {
350     HWASan.reset();
351     return false;
352   }
353 
354 private:
355   std::unique_ptr<HWAddressSanitizer> HWASan;
356   bool CompileKernel;
357   bool Recover;
358 };
359 
360 } // end anonymous namespace
361 
362 char HWAddressSanitizerLegacyPass::ID = 0;
363 
364 INITIALIZE_PASS_BEGIN(
365     HWAddressSanitizerLegacyPass, "hwasan",
366     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
367     false)
368 INITIALIZE_PASS_END(
369     HWAddressSanitizerLegacyPass, "hwasan",
370     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
371     false)
372 
373 FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel,
374                                                            bool Recover) {
375   assert(!CompileKernel || Recover);
376   return new HWAddressSanitizerLegacyPass(CompileKernel, Recover);
377 }
378 
379 HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover)
380     : CompileKernel(CompileKernel), Recover(Recover) {}
381 
382 PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
383                                               ModuleAnalysisManager &MAM) {
384   HWAddressSanitizer HWASan(M, CompileKernel, Recover);
385   bool Modified = false;
386   for (Function &F : M)
387     Modified |= HWASan.sanitizeFunction(F);
388   if (Modified)
389     return PreservedAnalyses::none();
390   return PreservedAnalyses::all();
391 }
392 
393 void HWAddressSanitizer::createHwasanCtorComdat() {
394   std::tie(HwasanCtorFunction, std::ignore) =
395       getOrCreateSanitizerCtorAndInitFunctions(
396           M, kHwasanModuleCtorName, kHwasanInitName,
397           /*InitArgTypes=*/{},
398           /*InitArgs=*/{},
399           // This callback is invoked when the functions are created the first
400           // time. Hook them into the global ctors list in that case:
401           [&](Function *Ctor, FunctionCallee) {
402             Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
403             Ctor->setComdat(CtorComdat);
404             appendToGlobalCtors(M, Ctor, 0, Ctor);
405           });
406 
407   // Create a note that contains pointers to the list of global
408   // descriptors. Adding a note to the output file will cause the linker to
409   // create a PT_NOTE program header pointing to the note that we can use to
410   // find the descriptor list starting from the program headers. A function
411   // provided by the runtime initializes the shadow memory for the globals by
412   // accessing the descriptor list via the note. The dynamic loader needs to
413   // call this function whenever a library is loaded.
414   //
415   // The reason why we use a note for this instead of a more conventional
416   // approach of having a global constructor pass a descriptor list pointer to
417   // the runtime is because of an order of initialization problem. With
418   // constructors we can encounter the following problematic scenario:
419   //
420   // 1) library A depends on library B and also interposes one of B's symbols
421   // 2) B's constructors are called before A's (as required for correctness)
422   // 3) during construction, B accesses one of its "own" globals (actually
423   //    interposed by A) and triggers a HWASAN failure due to the initialization
424   //    for A not having happened yet
425   //
426   // Even without interposition it is possible to run into similar situations in
427   // cases where two libraries mutually depend on each other.
428   //
429   // We only need one note per binary, so put everything for the note in a
430   // comdat. This needs to be a comdat with an .init_array section to prevent
431   // newer versions of lld from discarding the note.
432   //
433   // Create the note even if we aren't instrumenting globals. This ensures that
434   // binaries linked from object files with both instrumented and
435   // non-instrumented globals will end up with a note, even if a comdat from an
436   // object file with non-instrumented globals is selected. The note is harmless
437   // if the runtime doesn't support it, since it will just be ignored.
438   Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
439 
440   Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
441   auto Start =
442       new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
443                          nullptr, "__start_hwasan_globals");
444   Start->setVisibility(GlobalValue::HiddenVisibility);
445   Start->setDSOLocal(true);
446   auto Stop =
447       new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
448                          nullptr, "__stop_hwasan_globals");
449   Stop->setVisibility(GlobalValue::HiddenVisibility);
450   Stop->setDSOLocal(true);
451 
452   // Null-terminated so actually 8 bytes, which are required in order to align
453   // the note properly.
454   auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
455 
456   auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
457                                  Int32Ty, Int32Ty);
458   auto *Note =
459       new GlobalVariable(M, NoteTy, /*isConstant=*/true,
460                          GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName);
461   Note->setSection(".note.hwasan.globals");
462   Note->setComdat(NoteComdat);
463   Note->setAlignment(Align(4));
464   Note->setDSOLocal(true);
465 
466   // The pointers in the note need to be relative so that the note ends up being
467   // placed in rodata, which is the standard location for notes.
468   auto CreateRelPtr = [&](Constant *Ptr) {
469     return ConstantExpr::getTrunc(
470         ConstantExpr::getSub(ConstantExpr::getPtrToInt(Ptr, Int64Ty),
471                              ConstantExpr::getPtrToInt(Note, Int64Ty)),
472         Int32Ty);
473   };
474   Note->setInitializer(ConstantStruct::getAnon(
475       {ConstantInt::get(Int32Ty, 8),                           // n_namesz
476        ConstantInt::get(Int32Ty, 8),                           // n_descsz
477        ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
478        Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
479   appendToCompilerUsed(M, Note);
480 
481   // Create a zero-length global in hwasan_globals so that the linker will
482   // always create start and stop symbols.
483   auto Dummy = new GlobalVariable(
484       M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
485       Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
486   Dummy->setSection("hwasan_globals");
487   Dummy->setComdat(NoteComdat);
488   Dummy->setMetadata(LLVMContext::MD_associated,
489                      MDNode::get(*C, ValueAsMetadata::get(Note)));
490   appendToCompilerUsed(M, Dummy);
491 }
492 
493 /// Module-level initialization.
494 ///
495 /// inserts a call to __hwasan_init to the module's constructor list.
496 void HWAddressSanitizer::initializeModule() {
497   LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
498   auto &DL = M.getDataLayout();
499 
500   TargetTriple = Triple(M.getTargetTriple());
501 
502   // x86_64 currently has two modes:
503   // - Intel LAM (default)
504   // - pointer aliasing
505   // Pointer aliasing mode is heap only.  LAM mode is heap+stack, with support
506   // planned for globals as well.
507   bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
508   UsePageAliases = ClUsePageAliases && IsX86_64;
509   InstrumentWithCalls = IsX86_64 ? true : ClInstrumentWithCalls;
510   InstrumentStack = UsePageAliases ? false : ClInstrumentStack;
511   PointerTagShift = IsX86_64 ? 57 : 56;
512   TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
513 
514   Mapping.init(TargetTriple, InstrumentWithCalls);
515 
516   C = &(M.getContext());
517   IRBuilder<> IRB(*C);
518   IntptrTy = IRB.getIntPtrTy(DL);
519   Int8PtrTy = IRB.getInt8PtrTy();
520   Int8Ty = IRB.getInt8Ty();
521   Int32Ty = IRB.getInt32Ty();
522 
523   HwasanCtorFunction = nullptr;
524 
525   // Older versions of Android do not have the required runtime support for
526   // short granules, global or personality function instrumentation. On other
527   // platforms we currently require using the latest version of the runtime.
528   bool NewRuntime =
529       !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
530 
531   UseShortGranules =
532       ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
533   OutlinedChecks =
534       TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() &&
535       (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
536 
537   if (ClMatchAllTag.getNumOccurrences()) {
538     if (ClMatchAllTag != -1) {
539       HasMatchAllTag = true;
540       MatchAllTag = ClMatchAllTag & 0xFF;
541     }
542   } else if (CompileKernel) {
543     HasMatchAllTag = true;
544     MatchAllTag = 0xFF;
545   }
546 
547   // If we don't have personality function support, fall back to landing pads.
548   InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
549                               ? ClInstrumentLandingPads
550                               : !NewRuntime;
551 
552   if (!CompileKernel) {
553     createHwasanCtorComdat();
554     bool InstrumentGlobals =
555         ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
556 
557     // TODO: Support globals for x86_64 in non-aliasing mode.
558     if (InstrumentGlobals && !IsX86_64)
559       instrumentGlobals();
560 
561     bool InstrumentPersonalityFunctions =
562         ClInstrumentPersonalityFunctions.getNumOccurrences()
563             ? ClInstrumentPersonalityFunctions
564             : NewRuntime;
565     if (InstrumentPersonalityFunctions)
566       instrumentPersonalityFunctions();
567   }
568 
569   if (!TargetTriple.isAndroid()) {
570     Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
571       auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
572                                     GlobalValue::ExternalLinkage, nullptr,
573                                     "__hwasan_tls", nullptr,
574                                     GlobalVariable::InitialExecTLSModel);
575       appendToCompilerUsed(M, GV);
576       return GV;
577     });
578     ThreadPtrGlobal = cast<GlobalVariable>(C);
579   }
580 }
581 
582 void HWAddressSanitizer::initializeCallbacks(Module &M) {
583   IRBuilder<> IRB(*C);
584   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
585     const std::string TypeStr = AccessIsWrite ? "store" : "load";
586     const std::string EndingStr = Recover ? "_noabort" : "";
587 
588     HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
589         ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
590         FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
591 
592     for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
593          AccessSizeIndex++) {
594       HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
595           M.getOrInsertFunction(
596               ClMemoryAccessCallbackPrefix + TypeStr +
597                   itostr(1ULL << AccessSizeIndex) + EndingStr,
598               FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
599     }
600   }
601 
602   HwasanTagMemoryFunc = M.getOrInsertFunction(
603       "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
604   HwasanGenerateTagFunc =
605       M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
606 
607   ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
608                                      ArrayType::get(IRB.getInt8Ty(), 0));
609 
610   const std::string MemIntrinCallbackPrefix =
611       CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
612   HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
613                                         IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
614                                         IRB.getInt8PtrTy(), IntptrTy);
615   HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
616                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
617                                        IRB.getInt8PtrTy(), IntptrTy);
618   HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
619                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
620                                        IRB.getInt32Ty(), IntptrTy);
621 
622   HWAsanHandleVfork =
623       M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
624 }
625 
626 Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
627   // An empty inline asm with input reg == output reg.
628   // An opaque no-op cast, basically.
629   // This prevents code bloat as a result of rematerializing trivial definitions
630   // such as constants or global addresses at every load and store.
631   InlineAsm *Asm =
632       InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
633                      StringRef(""), StringRef("=r,0"),
634                      /*hasSideEffects=*/false);
635   return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
636 }
637 
638 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
639   return getOpaqueNoopCast(IRB, ShadowGlobal);
640 }
641 
642 Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
643   if (Mapping.Offset != kDynamicShadowSentinel)
644     return getOpaqueNoopCast(
645         IRB, ConstantExpr::getIntToPtr(
646                  ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
647 
648   if (Mapping.InGlobal) {
649     return getDynamicShadowIfunc(IRB);
650   } else {
651     Value *GlobalDynamicAddress =
652         IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
653             kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
654     return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
655   }
656 }
657 
658 bool HWAddressSanitizer::ignoreAccess(Value *Ptr) {
659   // Do not instrument acesses from different address spaces; we cannot deal
660   // with them.
661   Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
662   if (PtrTy->getPointerAddressSpace() != 0)
663     return true;
664 
665   // Ignore swifterror addresses.
666   // swifterror memory addresses are mem2reg promoted by instruction
667   // selection. As such they cannot have regular uses like an instrumentation
668   // function and it makes no sense to track them as memory.
669   if (Ptr->isSwiftError())
670     return true;
671 
672   return false;
673 }
674 
675 void HWAddressSanitizer::getInterestingMemoryOperands(
676     Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
677   // Skip memory accesses inserted by another instrumentation.
678   if (I->hasMetadata("nosanitize"))
679     return;
680 
681   // Do not instrument the load fetching the dynamic shadow address.
682   if (ShadowBase == I)
683     return;
684 
685   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
686     if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
687       return;
688     Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
689                              LI->getType(), LI->getAlign());
690   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
691     if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
692       return;
693     Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
694                              SI->getValueOperand()->getType(), SI->getAlign());
695   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
696     if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
697       return;
698     Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
699                              RMW->getValOperand()->getType(), None);
700   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
701     if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
702       return;
703     Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
704                              XCHG->getCompareOperand()->getType(), None);
705   } else if (auto CI = dyn_cast<CallInst>(I)) {
706     for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
707       if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
708           ignoreAccess(CI->getArgOperand(ArgNo)))
709         continue;
710       Type *Ty = CI->getParamByValType(ArgNo);
711       Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
712     }
713   }
714 }
715 
716 static unsigned getPointerOperandIndex(Instruction *I) {
717   if (LoadInst *LI = dyn_cast<LoadInst>(I))
718     return LI->getPointerOperandIndex();
719   if (StoreInst *SI = dyn_cast<StoreInst>(I))
720     return SI->getPointerOperandIndex();
721   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
722     return RMW->getPointerOperandIndex();
723   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
724     return XCHG->getPointerOperandIndex();
725   report_fatal_error("Unexpected instruction");
726   return -1;
727 }
728 
729 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
730   size_t Res = countTrailingZeros(TypeSize / 8);
731   assert(Res < kNumberOfAccessSizes);
732   return Res;
733 }
734 
735 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
736   if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64)
737     return;
738 
739   IRBuilder<> IRB(I);
740   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
741   Value *UntaggedPtr =
742       IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
743   I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
744 }
745 
746 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
747   // Mem >> Scale
748   Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
749   if (Mapping.Offset == 0)
750     return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
751   // (Mem >> Scale) + Offset
752   return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
753 }
754 
755 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
756                                                    unsigned AccessSizeIndex,
757                                                    Instruction *InsertBefore) {
758   assert(!UsePageAliases);
759   const int64_t AccessInfo =
760       (CompileKernel << HWASanAccessInfo::CompileKernelShift) +
761       (HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) +
762       (MatchAllTag << HWASanAccessInfo::MatchAllShift) +
763       (Recover << HWASanAccessInfo::RecoverShift) +
764       (IsWrite << HWASanAccessInfo::IsWriteShift) +
765       (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
766   IRBuilder<> IRB(InsertBefore);
767 
768   if (OutlinedChecks) {
769     Module *M = IRB.GetInsertBlock()->getParent()->getParent();
770     Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
771     IRB.CreateCall(Intrinsic::getDeclaration(
772                        M, UseShortGranules
773                               ? Intrinsic::hwasan_check_memaccess_shortgranules
774                               : Intrinsic::hwasan_check_memaccess),
775                    {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
776     return;
777   }
778 
779   Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
780   Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift),
781                                   IRB.getInt8Ty());
782   Value *AddrLong = untagPointer(IRB, PtrLong);
783   Value *Shadow = memToShadow(AddrLong, IRB);
784   Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
785   Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
786 
787   if (HasMatchAllTag) {
788     Value *TagNotIgnored = IRB.CreateICmpNE(
789         PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag));
790     TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
791   }
792 
793   Instruction *CheckTerm =
794       SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
795                                 MDBuilder(*C).createBranchWeights(1, 100000));
796 
797   IRB.SetInsertPoint(CheckTerm);
798   Value *OutOfShortGranuleTagRange =
799       IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
800   Instruction *CheckFailTerm =
801       SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
802                                 MDBuilder(*C).createBranchWeights(1, 100000));
803 
804   IRB.SetInsertPoint(CheckTerm);
805   Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
806   PtrLowBits = IRB.CreateAdd(
807       PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
808   Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
809   SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
810                             MDBuilder(*C).createBranchWeights(1, 100000),
811                             (DomTreeUpdater *)nullptr, nullptr,
812                             CheckFailTerm->getParent());
813 
814   IRB.SetInsertPoint(CheckTerm);
815   Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
816   InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
817   Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
818   Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
819   SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
820                             MDBuilder(*C).createBranchWeights(1, 100000),
821                             (DomTreeUpdater *)nullptr, nullptr,
822                             CheckFailTerm->getParent());
823 
824   IRB.SetInsertPoint(CheckFailTerm);
825   InlineAsm *Asm;
826   switch (TargetTriple.getArch()) {
827   case Triple::x86_64:
828     // The signal handler will find the data address in rdi.
829     Asm = InlineAsm::get(
830         FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
831         "int3\nnopl " +
832             itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
833             "(%rax)",
834         "{rdi}",
835         /*hasSideEffects=*/true);
836     break;
837   case Triple::aarch64:
838   case Triple::aarch64_be:
839     // The signal handler will find the data address in x0.
840     Asm = InlineAsm::get(
841         FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
842         "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
843         "{x0}",
844         /*hasSideEffects=*/true);
845     break;
846   default:
847     report_fatal_error("unsupported architecture");
848   }
849   IRB.CreateCall(Asm, PtrLong);
850   if (Recover)
851     cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
852 }
853 
854 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
855   IRBuilder<> IRB(MI);
856   if (isa<MemTransferInst>(MI)) {
857     IRB.CreateCall(
858         isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
859         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
860          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
861          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
862   } else if (isa<MemSetInst>(MI)) {
863     IRB.CreateCall(
864         HWAsanMemset,
865         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
866          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
867          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
868   }
869   MI->eraseFromParent();
870 }
871 
872 bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
873   Value *Addr = O.getPtr();
874 
875   LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
876 
877   if (O.MaybeMask)
878     return false; // FIXME
879 
880   IRBuilder<> IRB(O.getInsn());
881   if (isPowerOf2_64(O.TypeSize) &&
882       (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
883       (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) ||
884        *O.Alignment >= O.TypeSize / 8)) {
885     size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
886     if (InstrumentWithCalls) {
887       IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
888                      IRB.CreatePointerCast(Addr, IntptrTy));
889     } else {
890       instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
891     }
892   } else {
893     IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
894                    {IRB.CreatePointerCast(Addr, IntptrTy),
895                     ConstantInt::get(IntptrTy, O.TypeSize / 8)});
896   }
897   untagPointerOperand(O.getInsn(), Addr);
898 
899   return true;
900 }
901 
902 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
903   uint64_t ArraySize = 1;
904   if (AI.isArrayAllocation()) {
905     const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
906     assert(CI && "non-constant array size");
907     ArraySize = CI->getZExtValue();
908   }
909   Type *Ty = AI.getAllocatedType();
910   uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
911   return SizeInBytes * ArraySize;
912 }
913 
914 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
915                                    size_t Size) {
916   size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
917   if (!UseShortGranules)
918     Size = AlignedSize;
919 
920   Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
921   if (InstrumentWithCalls) {
922     IRB.CreateCall(HwasanTagMemoryFunc,
923                    {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
924                     ConstantInt::get(IntptrTy, AlignedSize)});
925   } else {
926     size_t ShadowSize = Size >> Mapping.Scale;
927     Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
928     // If this memset is not inlined, it will be intercepted in the hwasan
929     // runtime library. That's OK, because the interceptor skips the checks if
930     // the address is in the shadow region.
931     // FIXME: the interceptor is not as fast as real memset. Consider lowering
932     // llvm.memset right here into either a sequence of stores, or a call to
933     // hwasan_tag_memory.
934     if (ShadowSize)
935       IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1));
936     if (Size != AlignedSize) {
937       IRB.CreateStore(
938           ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()),
939           IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
940       IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
941                                    Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
942                                    AlignedSize - 1));
943     }
944   }
945   return true;
946 }
947 
948 unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
949   if (TargetTriple.getArch() == Triple::x86_64)
950     return AllocaNo & TagMaskByte;
951 
952   // A list of 8-bit numbers that have at most one run of non-zero bits.
953   // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
954   // masks.
955   // The list does not include the value 255, which is used for UAR.
956   //
957   // Because we are more likely to use earlier elements of this list than later
958   // ones, it is sorted in increasing order of probability of collision with a
959   // mask allocated (temporally) nearby. The program that generated this list
960   // can be found at:
961   // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
962   static unsigned FastMasks[] = {0,  128, 64,  192, 32,  96,  224, 112, 240,
963                                  48, 16,  120, 248, 56,  24,  8,   124, 252,
964                                  60, 28,  12,  4,   126, 254, 62,  30,  14,
965                                  6,  2,   127, 63,  31,  15,  7,   3,   1};
966   return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
967 }
968 
969 Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
970   if (TargetTriple.getArch() == Triple::x86_64) {
971     Constant *TagMask = ConstantInt::get(IntptrTy, TagMaskByte);
972     Value *NewTag = IRB.CreateAnd(OldTag, TagMask);
973     return NewTag;
974   }
975   // aarch64 uses 8-bit tags, so no mask is needed.
976   return OldTag;
977 }
978 
979 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
980   return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
981 }
982 
983 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
984   if (ClGenerateTagsWithCalls)
985     return getNextTagWithCall(IRB);
986   if (StackBaseTag)
987     return StackBaseTag;
988   // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
989   // first).
990   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
991   auto GetStackPointerFn = Intrinsic::getDeclaration(
992       M, Intrinsic::frameaddress,
993       IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
994   Value *StackPointer = IRB.CreateCall(
995       GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
996 
997   // Extract some entropy from the stack pointer for the tags.
998   // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
999   // between functions).
1000   Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
1001   Value *StackTag =
1002       applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1003                                       IRB.CreateLShr(StackPointerLong, 20)));
1004   StackTag->setName("hwasan.stack.base.tag");
1005   return StackTag;
1006 }
1007 
1008 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1009                                         AllocaInst *AI, unsigned AllocaNo) {
1010   if (ClGenerateTagsWithCalls)
1011     return getNextTagWithCall(IRB);
1012   return IRB.CreateXor(StackTag,
1013                        ConstantInt::get(IntptrTy, retagMask(AllocaNo)));
1014 }
1015 
1016 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
1017   if (ClUARRetagToZero)
1018     return ConstantInt::get(IntptrTy, 0);
1019   if (ClGenerateTagsWithCalls)
1020     return getNextTagWithCall(IRB);
1021   return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, TagMaskByte));
1022 }
1023 
1024 // Add a tag to an address.
1025 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1026                                       Value *PtrLong, Value *Tag) {
1027   assert(!UsePageAliases);
1028   Value *TaggedPtrLong;
1029   if (CompileKernel) {
1030     // Kernel addresses have 0xFF in the most significant byte.
1031     Value *ShiftedTag =
1032         IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1033                      ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1034     TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1035   } else {
1036     // Userspace can simply do OR (tag << PointerTagShift);
1037     Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1038     TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1039   }
1040   return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1041 }
1042 
1043 // Remove tag from an address.
1044 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1045   assert(!UsePageAliases);
1046   Value *UntaggedPtrLong;
1047   if (CompileKernel) {
1048     // Kernel addresses have 0xFF in the most significant byte.
1049     UntaggedPtrLong =
1050         IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1051                                                0xFFULL << PointerTagShift));
1052   } else {
1053     // Userspace addresses have 0x00.
1054     UntaggedPtrLong =
1055         IRB.CreateAnd(PtrLong, ConstantInt::get(PtrLong->getType(),
1056                                                 ~(0xFFULL << PointerTagShift)));
1057   }
1058   return UntaggedPtrLong;
1059 }
1060 
1061 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1062   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1063   if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1064     // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1065     // in Bionic's libc/private/bionic_tls.h.
1066     Function *ThreadPointerFunc =
1067         Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1068     Value *SlotPtr = IRB.CreatePointerCast(
1069         IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
1070                                IRB.CreateCall(ThreadPointerFunc), 0x30),
1071         Ty->getPointerTo(0));
1072     return SlotPtr;
1073   }
1074   if (ThreadPtrGlobal)
1075     return ThreadPtrGlobal;
1076 
1077   return nullptr;
1078 }
1079 
1080 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1081   if (!Mapping.InTls)
1082     ShadowBase = getShadowNonTls(IRB);
1083   else if (!WithFrameRecord && TargetTriple.isAndroid())
1084     ShadowBase = getDynamicShadowIfunc(IRB);
1085 
1086   if (!WithFrameRecord && ShadowBase)
1087     return;
1088 
1089   Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1090   assert(SlotPtr);
1091 
1092   Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1093   // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
1094   Value *ThreadLongMaybeUntagged =
1095       TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
1096 
1097   if (WithFrameRecord) {
1098     Function *F = IRB.GetInsertBlock()->getParent();
1099     StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1100 
1101     // Prepare ring buffer data.
1102     Value *PC;
1103     if (TargetTriple.getArch() == Triple::aarch64)
1104       PC = readRegister(IRB, "pc");
1105     else
1106       PC = IRB.CreatePtrToInt(F, IntptrTy);
1107     Module *M = F->getParent();
1108     auto GetStackPointerFn = Intrinsic::getDeclaration(
1109         M, Intrinsic::frameaddress,
1110         IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1111     Value *SP = IRB.CreatePtrToInt(
1112         IRB.CreateCall(GetStackPointerFn,
1113                        {Constant::getNullValue(IRB.getInt32Ty())}),
1114         IntptrTy);
1115     // Mix SP and PC.
1116     // Assumptions:
1117     // PC is 0x0000PPPPPPPPPPPP  (48 bits are meaningful, others are zero)
1118     // SP is 0xsssssssssssSSSS0  (4 lower bits are zero)
1119     // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1120     //       0xSSSSPPPPPPPPPPPP
1121     SP = IRB.CreateShl(SP, 44);
1122 
1123     // Store data to ring buffer.
1124     Value *RecordPtr =
1125         IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
1126     IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
1127 
1128     // Update the ring buffer. Top byte of ThreadLong defines the size of the
1129     // buffer in pages, it must be a power of two, and the start of the buffer
1130     // must be aligned by twice that much. Therefore wrap around of the ring
1131     // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1132     // The use of AShr instead of LShr is due to
1133     //   https://bugs.llvm.org/show_bug.cgi?id=39030
1134     // Runtime library makes sure not to use the highest bit.
1135     Value *WrapMask = IRB.CreateXor(
1136         IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1137         ConstantInt::get(IntptrTy, (uint64_t)-1));
1138     Value *ThreadLongNew = IRB.CreateAnd(
1139         IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1140     IRB.CreateStore(ThreadLongNew, SlotPtr);
1141   }
1142 
1143   if (!ShadowBase) {
1144     // Get shadow base address by aligning RecordPtr up.
1145     // Note: this is not correct if the pointer is already aligned.
1146     // Runtime library will make sure this never happens.
1147     ShadowBase = IRB.CreateAdd(
1148         IRB.CreateOr(
1149             ThreadLongMaybeUntagged,
1150             ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1151         ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1152     ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1153   }
1154 }
1155 
1156 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1157   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1158   Function *ReadRegister =
1159       Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1160   MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1161   Value *Args[] = {MetadataAsValue::get(*C, MD)};
1162   return IRB.CreateCall(ReadRegister, Args);
1163 }
1164 
1165 bool HWAddressSanitizer::instrumentLandingPads(
1166     SmallVectorImpl<Instruction *> &LandingPadVec) {
1167   for (auto *LP : LandingPadVec) {
1168     IRBuilder<> IRB(LP->getNextNode());
1169     IRB.CreateCall(
1170         HWAsanHandleVfork,
1171         {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1172                                                                       : "sp")});
1173   }
1174   return true;
1175 }
1176 
1177 bool HWAddressSanitizer::instrumentStack(
1178     SmallVectorImpl<AllocaInst *> &Allocas,
1179     DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
1180     SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
1181   // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1182   // alloca addresses using that. Unfortunately, offsets are not known yet
1183   // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1184   // temp, shift-OR it into each alloca address and xor with the retag mask.
1185   // This generates one extra instruction per alloca use.
1186   for (unsigned N = 0; N < Allocas.size(); ++N) {
1187     auto *AI = Allocas[N];
1188     IRBuilder<> IRB(AI->getNextNode());
1189 
1190     // Replace uses of the alloca with tagged address.
1191     Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
1192     Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1193     Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1194     std::string Name =
1195         AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1196     Replacement->setName(Name + ".hwasan");
1197 
1198     AI->replaceUsesWithIf(Replacement,
1199                           [AILong](Use &U) { return U.getUser() != AILong; });
1200 
1201     for (auto *DDI : AllocaDbgMap.lookup(AI)) {
1202       // Prepend "tag_offset, N" to the dwarf expression.
1203       // Tag offset logically applies to the alloca pointer, and it makes sense
1204       // to put it at the beginning of the expression.
1205       SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset,
1206                                          retagMask(N)};
1207       auto Locations = DDI->location_ops();
1208       unsigned LocNo = std::distance(Locations.begin(), find(Locations, AI));
1209       DDI->setExpression(
1210           DIExpression::appendOpsToArg(DDI->getExpression(), NewOps, LocNo));
1211     }
1212 
1213     size_t Size = getAllocaSizeInBytes(*AI);
1214     tagAlloca(IRB, AI, Tag, Size);
1215 
1216     for (auto RI : RetVec) {
1217       IRB.SetInsertPoint(RI);
1218 
1219       // Re-tag alloca memory with the special UAR tag.
1220       Value *Tag = getUARTag(IRB, StackTag);
1221       tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getObjectAlignment()));
1222     }
1223   }
1224 
1225   return true;
1226 }
1227 
1228 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1229   return (AI.getAllocatedType()->isSized() &&
1230           // FIXME: instrument dynamic allocas, too
1231           AI.isStaticAlloca() &&
1232           // alloca() may be called with 0 size, ignore it.
1233           getAllocaSizeInBytes(AI) > 0 &&
1234           // We are only interested in allocas not promotable to registers.
1235           // Promotable allocas are common under -O0.
1236           !isAllocaPromotable(&AI) &&
1237           // inalloca allocas are not treated as static, and we don't want
1238           // dynamic alloca instrumentation for them as well.
1239           !AI.isUsedWithInAlloca() &&
1240           // swifterror allocas are register promoted by ISel
1241           !AI.isSwiftError());
1242 }
1243 
1244 bool HWAddressSanitizer::sanitizeFunction(Function &F) {
1245   if (&F == HwasanCtorFunction)
1246     return false;
1247 
1248   if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1249     return false;
1250 
1251   LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1252 
1253   SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1254   SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1255   SmallVector<AllocaInst *, 8> AllocasToInstrument;
1256   SmallVector<Instruction *, 8> RetVec;
1257   SmallVector<Instruction *, 8> LandingPadVec;
1258   DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> AllocaDbgMap;
1259   for (auto &BB : F) {
1260     for (auto &Inst : BB) {
1261       if (InstrumentStack)
1262         if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
1263           if (isInterestingAlloca(*AI))
1264             AllocasToInstrument.push_back(AI);
1265           continue;
1266         }
1267 
1268       if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1269           isa<CleanupReturnInst>(Inst))
1270         RetVec.push_back(&Inst);
1271 
1272       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst))
1273         for (Value *V : DVI->location_ops())
1274           if (auto *Alloca = dyn_cast_or_null<AllocaInst>(V))
1275             AllocaDbgMap[Alloca].push_back(DVI);
1276 
1277       if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1278         LandingPadVec.push_back(&Inst);
1279 
1280       getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1281 
1282       if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1283         IntrinToInstrument.push_back(MI);
1284     }
1285   }
1286 
1287   initializeCallbacks(*F.getParent());
1288 
1289   bool Changed = false;
1290 
1291   if (!LandingPadVec.empty())
1292     Changed |= instrumentLandingPads(LandingPadVec);
1293 
1294   if (AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1295       F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1296     // __hwasan_personality_thunk is a no-op for functions without an
1297     // instrumented stack, so we can drop it.
1298     F.setPersonalityFn(nullptr);
1299     Changed = true;
1300   }
1301 
1302   if (AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1303       IntrinToInstrument.empty())
1304     return Changed;
1305 
1306   assert(!ShadowBase);
1307 
1308   Instruction *InsertPt = &*F.getEntryBlock().begin();
1309   IRBuilder<> EntryIRB(InsertPt);
1310   emitPrologue(EntryIRB,
1311                /*WithFrameRecord*/ ClRecordStackHistory &&
1312                    Mapping.WithFrameRecord && !AllocasToInstrument.empty());
1313 
1314   if (!AllocasToInstrument.empty()) {
1315     Value *StackTag =
1316         ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1317     instrumentStack(AllocasToInstrument, AllocaDbgMap, RetVec, StackTag);
1318   }
1319   // Pad and align each of the allocas that we instrumented to stop small
1320   // uninteresting allocas from hiding in instrumented alloca's padding and so
1321   // that we have enough space to store real tags for short granules.
1322   DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
1323   for (AllocaInst *AI : AllocasToInstrument) {
1324     uint64_t Size = getAllocaSizeInBytes(*AI);
1325     uint64_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1326     AI->setAlignment(
1327         Align(std::max(AI->getAlignment(), Mapping.getObjectAlignment())));
1328     if (Size != AlignedSize) {
1329       Type *AllocatedType = AI->getAllocatedType();
1330       if (AI->isArrayAllocation()) {
1331         uint64_t ArraySize =
1332             cast<ConstantInt>(AI->getArraySize())->getZExtValue();
1333         AllocatedType = ArrayType::get(AllocatedType, ArraySize);
1334       }
1335       Type *TypeWithPadding = StructType::get(
1336           AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
1337       auto *NewAI = new AllocaInst(
1338           TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1339       NewAI->takeName(AI);
1340       NewAI->setAlignment(AI->getAlign());
1341       NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1342       NewAI->setSwiftError(AI->isSwiftError());
1343       NewAI->copyMetadata(*AI);
1344       auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
1345       AI->replaceAllUsesWith(Bitcast);
1346       AllocaToPaddedAllocaMap[AI] = NewAI;
1347     }
1348   }
1349 
1350   if (!AllocaToPaddedAllocaMap.empty()) {
1351     for (auto &BB : F) {
1352       for (auto &Inst : BB) {
1353         if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
1354           for (Value *V : DVI->location_ops()) {
1355             if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
1356               if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1357                 DVI->replaceVariableLocationOp(V, NewAI);
1358             }
1359           }
1360         }
1361       }
1362     }
1363     for (auto &P : AllocaToPaddedAllocaMap)
1364       P.first->eraseFromParent();
1365   }
1366 
1367   // If we split the entry block, move any allocas that were originally in the
1368   // entry block back into the entry block so that they aren't treated as
1369   // dynamic allocas.
1370   if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1371     InsertPt = &*F.getEntryBlock().begin();
1372     for (auto II = EntryIRB.GetInsertBlock()->begin(),
1373               IE = EntryIRB.GetInsertBlock()->end();
1374          II != IE;) {
1375       Instruction *I = &*II++;
1376       if (auto *AI = dyn_cast<AllocaInst>(I))
1377         if (isa<ConstantInt>(AI->getArraySize()))
1378           I->moveBefore(InsertPt);
1379     }
1380   }
1381 
1382   for (auto &Operand : OperandsToInstrument)
1383     instrumentMemAccess(Operand);
1384 
1385   if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1386     for (auto Inst : IntrinToInstrument)
1387       instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1388   }
1389 
1390   ShadowBase = nullptr;
1391   StackBaseTag = nullptr;
1392 
1393   return true;
1394 }
1395 
1396 void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1397   assert(!UsePageAliases);
1398   Constant *Initializer = GV->getInitializer();
1399   uint64_t SizeInBytes =
1400       M.getDataLayout().getTypeAllocSize(Initializer->getType());
1401   uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1402   if (SizeInBytes != NewSize) {
1403     // Pad the initializer out to the next multiple of 16 bytes and add the
1404     // required short granule tag.
1405     std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1406     Init.back() = Tag;
1407     Constant *Padding = ConstantDataArray::get(*C, Init);
1408     Initializer = ConstantStruct::getAnon({Initializer, Padding});
1409   }
1410 
1411   auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1412                                    GlobalValue::ExternalLinkage, Initializer,
1413                                    GV->getName() + ".hwasan");
1414   NewGV->copyAttributesFrom(GV);
1415   NewGV->setLinkage(GlobalValue::PrivateLinkage);
1416   NewGV->copyMetadata(GV, 0);
1417   NewGV->setAlignment(
1418       MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment())));
1419 
1420   // It is invalid to ICF two globals that have different tags. In the case
1421   // where the size of the global is a multiple of the tag granularity the
1422   // contents of the globals may be the same but the tags (i.e. symbol values)
1423   // may be different, and the symbols are not considered during ICF. In the
1424   // case where the size is not a multiple of the granularity, the short granule
1425   // tags would discriminate two globals with different tags, but there would
1426   // otherwise be nothing stopping such a global from being incorrectly ICF'd
1427   // with an uninstrumented (i.e. tag 0) global that happened to have the short
1428   // granule tag in the last byte.
1429   NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1430 
1431   // Descriptor format (assuming little-endian):
1432   // bytes 0-3: relative address of global
1433   // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1434   // it isn't, we create multiple descriptors)
1435   // byte 7: tag
1436   auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1437   const uint64_t MaxDescriptorSize = 0xfffff0;
1438   for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1439        DescriptorPos += MaxDescriptorSize) {
1440     auto *Descriptor =
1441         new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1442                            nullptr, GV->getName() + ".hwasan.descriptor");
1443     auto *GVRelPtr = ConstantExpr::getTrunc(
1444         ConstantExpr::getAdd(
1445             ConstantExpr::getSub(
1446                 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1447                 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1448             ConstantInt::get(Int64Ty, DescriptorPos)),
1449         Int32Ty);
1450     uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1451     auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1452     Descriptor->setComdat(NewGV->getComdat());
1453     Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1454     Descriptor->setSection("hwasan_globals");
1455     Descriptor->setMetadata(LLVMContext::MD_associated,
1456                             MDNode::get(*C, ValueAsMetadata::get(NewGV)));
1457     appendToCompilerUsed(M, Descriptor);
1458   }
1459 
1460   Constant *Aliasee = ConstantExpr::getIntToPtr(
1461       ConstantExpr::getAdd(
1462           ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1463           ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1464       GV->getType());
1465   auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1466                                     GV->getLinkage(), "", Aliasee, &M);
1467   Alias->setVisibility(GV->getVisibility());
1468   Alias->takeName(GV);
1469   GV->replaceAllUsesWith(Alias);
1470   GV->eraseFromParent();
1471 }
1472 
1473 void HWAddressSanitizer::instrumentGlobals() {
1474   std::vector<GlobalVariable *> Globals;
1475   for (GlobalVariable &GV : M.globals()) {
1476     if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1477         GV.isThreadLocal())
1478       continue;
1479 
1480     // Common symbols can't have aliases point to them, so they can't be tagged.
1481     if (GV.hasCommonLinkage())
1482       continue;
1483 
1484     // Globals with custom sections may be used in __start_/__stop_ enumeration,
1485     // which would be broken both by adding tags and potentially by the extra
1486     // padding/alignment that we insert.
1487     if (GV.hasSection())
1488       continue;
1489 
1490     Globals.push_back(&GV);
1491   }
1492 
1493   MD5 Hasher;
1494   Hasher.update(M.getSourceFileName());
1495   MD5::MD5Result Hash;
1496   Hasher.final(Hash);
1497   uint8_t Tag = Hash[0];
1498 
1499   for (GlobalVariable *GV : Globals) {
1500     // Skip tag 0 in order to avoid collisions with untagged memory.
1501     if (Tag == 0)
1502       Tag = 1;
1503     instrumentGlobal(GV, Tag++);
1504   }
1505 }
1506 
1507 void HWAddressSanitizer::instrumentPersonalityFunctions() {
1508   // We need to untag stack frames as we unwind past them. That is the job of
1509   // the personality function wrapper, which either wraps an existing
1510   // personality function or acts as a personality function on its own. Each
1511   // function that has a personality function or that can be unwound past has
1512   // its personality function changed to a thunk that calls the personality
1513   // function wrapper in the runtime.
1514   MapVector<Constant *, std::vector<Function *>> PersonalityFns;
1515   for (Function &F : M) {
1516     if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1517       continue;
1518 
1519     if (F.hasPersonalityFn()) {
1520       PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1521     } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1522       PersonalityFns[nullptr].push_back(&F);
1523     }
1524   }
1525 
1526   if (PersonalityFns.empty())
1527     return;
1528 
1529   FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1530       "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1531       Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1532   FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1533   FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1534 
1535   for (auto &P : PersonalityFns) {
1536     std::string ThunkName = kHwasanPersonalityThunkName;
1537     if (P.first)
1538       ThunkName += ("." + P.first->getName()).str();
1539     FunctionType *ThunkFnTy = FunctionType::get(
1540         Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1541     bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1542                                cast<GlobalValue>(P.first)->hasLocalLinkage());
1543     auto *ThunkFn = Function::Create(ThunkFnTy,
1544                                      IsLocal ? GlobalValue::InternalLinkage
1545                                              : GlobalValue::LinkOnceODRLinkage,
1546                                      ThunkName, &M);
1547     if (!IsLocal) {
1548       ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1549       ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1550     }
1551 
1552     auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1553     IRBuilder<> IRB(BB);
1554     CallInst *WrapperCall = IRB.CreateCall(
1555         HwasanPersonalityWrapper,
1556         {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1557          ThunkFn->getArg(3), ThunkFn->getArg(4),
1558          P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1559                  : Constant::getNullValue(Int8PtrTy),
1560          IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1561          IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1562     WrapperCall->setTailCall();
1563     IRB.CreateRet(WrapperCall);
1564 
1565     for (Function *F : P.second)
1566       F->setPersonalityFn(ThunkFn);
1567   }
1568 }
1569 
1570 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1571                                              bool InstrumentWithCalls) {
1572   Scale = kDefaultShadowScale;
1573   if (TargetTriple.isOSFuchsia()) {
1574     // Fuchsia is always PIE, which means that the beginning of the address
1575     // space is always available.
1576     InGlobal = false;
1577     InTls = false;
1578     Offset = 0;
1579     WithFrameRecord = true;
1580   } else if (ClMappingOffset.getNumOccurrences() > 0) {
1581     InGlobal = false;
1582     InTls = false;
1583     Offset = ClMappingOffset;
1584     WithFrameRecord = false;
1585   } else if (ClEnableKhwasan || InstrumentWithCalls) {
1586     InGlobal = false;
1587     InTls = false;
1588     Offset = 0;
1589     WithFrameRecord = false;
1590   } else if (ClWithIfunc) {
1591     InGlobal = true;
1592     InTls = false;
1593     Offset = kDynamicShadowSentinel;
1594     WithFrameRecord = false;
1595   } else if (ClWithTls) {
1596     InGlobal = false;
1597     InTls = true;
1598     Offset = kDynamicShadowSentinel;
1599     WithFrameRecord = true;
1600   } else {
1601     InGlobal = false;
1602     InTls = false;
1603     Offset = kDynamicShadowSentinel;
1604     WithFrameRecord = false;
1605   }
1606 }
1607