1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of HWAddressSanitizer, an address basic correctness
11 /// checker based on tagged addressing.
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/Analysis/PostDominators.h"
22 #include "llvm/Analysis/StackSafetyAnalysis.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/BinaryFormat/Dwarf.h"
25 #include "llvm/BinaryFormat/ELF.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/InstIterator.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/MDBuilder.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Value.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
52 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
53 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
54 #include "llvm/Transforms/Utils/ModuleUtils.h"
55 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "hwasan"
60 
61 const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
62 const char kHwasanNoteName[] = "hwasan.note";
63 const char kHwasanInitName[] = "__hwasan_init";
64 const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
65 
66 const char kHwasanShadowMemoryDynamicAddress[] =
67     "__hwasan_shadow_memory_dynamic_address";
68 
69 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
70 static const size_t kNumberOfAccessSizes = 5;
71 
72 static const size_t kDefaultShadowScale = 4;
73 static const uint64_t kDynamicShadowSentinel =
74     std::numeric_limits<uint64_t>::max();
75 
76 static const unsigned kShadowBaseAlignment = 32;
77 
78 static cl::opt<std::string>
79     ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
80                                  cl::desc("Prefix for memory access callbacks"),
81                                  cl::Hidden, cl::init("__hwasan_"));
82 
83 static cl::opt<bool> ClKasanMemIntrinCallbackPrefix(
84     "hwasan-kernel-mem-intrinsic-prefix",
85     cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
86     cl::init(false));
87 
88 static cl::opt<bool> ClInstrumentWithCalls(
89     "hwasan-instrument-with-calls",
90     cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
91     cl::init(false));
92 
93 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
94                                        cl::desc("instrument read instructions"),
95                                        cl::Hidden, cl::init(true));
96 
97 static cl::opt<bool>
98     ClInstrumentWrites("hwasan-instrument-writes",
99                        cl::desc("instrument write instructions"), cl::Hidden,
100                        cl::init(true));
101 
102 static cl::opt<bool> ClInstrumentAtomics(
103     "hwasan-instrument-atomics",
104     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
105     cl::init(true));
106 
107 static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
108                                        cl::desc("instrument byval arguments"),
109                                        cl::Hidden, cl::init(true));
110 
111 static cl::opt<bool>
112     ClRecover("hwasan-recover",
113               cl::desc("Enable recovery mode (continue-after-error)."),
114               cl::Hidden, cl::init(false));
115 
116 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
117                                        cl::desc("instrument stack (allocas)"),
118                                        cl::Hidden, cl::init(true));
119 
120 static cl::opt<bool>
121     ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
122                      cl::Hidden, cl::desc("Use Stack Safety analysis results"),
123                      cl::Optional);
124 
125 static cl::opt<size_t> ClMaxLifetimes(
126     "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
127     cl::ReallyHidden,
128     cl::desc("How many lifetime ends to handle for a single alloca."),
129     cl::Optional);
130 
131 static cl::opt<bool>
132     ClUseAfterScope("hwasan-use-after-scope",
133                     cl::desc("detect use after scope within function"),
134                     cl::Hidden, cl::init(false));
135 
136 static cl::opt<bool> ClUARRetagToZero(
137     "hwasan-uar-retag-to-zero",
138     cl::desc("Clear alloca tags before returning from the function to allow "
139              "non-instrumented and instrumented function calls mix. When set "
140              "to false, allocas are retagged before returning from the "
141              "function to detect use after return."),
142     cl::Hidden, cl::init(true));
143 
144 static cl::opt<bool> ClGenerateTagsWithCalls(
145     "hwasan-generate-tags-with-calls",
146     cl::desc("generate new tags with runtime library calls"), cl::Hidden,
147     cl::init(false));
148 
149 static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
150                                cl::Hidden, cl::init(false));
151 
152 static cl::opt<int> ClMatchAllTag(
153     "hwasan-match-all-tag",
154     cl::desc("don't report bad accesses via pointers with this tag"),
155     cl::Hidden, cl::init(-1));
156 
157 static cl::opt<bool>
158     ClEnableKhwasan("hwasan-kernel",
159                     cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
160                     cl::Hidden, cl::init(false));
161 
162 // These flags allow to change the shadow mapping and control how shadow memory
163 // is accessed. The shadow mapping looks like:
164 //    Shadow = (Mem >> scale) + offset
165 
166 static cl::opt<uint64_t>
167     ClMappingOffset("hwasan-mapping-offset",
168                     cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
169                     cl::Hidden, cl::init(0));
170 
171 static cl::opt<bool>
172     ClWithIfunc("hwasan-with-ifunc",
173                 cl::desc("Access dynamic shadow through an ifunc global on "
174                          "platforms that support this"),
175                 cl::Hidden, cl::init(false));
176 
177 static cl::opt<bool> ClWithTls(
178     "hwasan-with-tls",
179     cl::desc("Access dynamic shadow through an thread-local pointer on "
180              "platforms that support this"),
181     cl::Hidden, cl::init(true));
182 
183 static cl::opt<bool>
184     ClRecordStackHistory("hwasan-record-stack-history",
185                          cl::desc("Record stack frames with tagged allocations "
186                                   "in a thread-local ring buffer"),
187                          cl::Hidden, cl::init(true));
188 static cl::opt<bool>
189     ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
190                               cl::desc("instrument memory intrinsics"),
191                               cl::Hidden, cl::init(true));
192 
193 static cl::opt<bool>
194     ClInstrumentLandingPads("hwasan-instrument-landing-pads",
195                             cl::desc("instrument landing pads"), cl::Hidden,
196                             cl::init(false));
197 
198 static cl::opt<bool> ClUseShortGranules(
199     "hwasan-use-short-granules",
200     cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
201     cl::init(false));
202 
203 static cl::opt<bool> ClInstrumentPersonalityFunctions(
204     "hwasan-instrument-personality-functions",
205     cl::desc("instrument personality functions"), cl::Hidden);
206 
207 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
208                                        cl::desc("inline all checks"),
209                                        cl::Hidden, cl::init(false));
210 
211 // Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
212 static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
213                                       cl::desc("Use page aliasing in HWASan"),
214                                       cl::Hidden, cl::init(false));
215 
216 namespace {
217 
218 bool shouldUsePageAliases(const Triple &TargetTriple) {
219   return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
220 }
221 
222 bool shouldInstrumentStack(const Triple &TargetTriple) {
223   return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
224 }
225 
226 bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
227   return ClInstrumentWithCalls || TargetTriple.getArch() == Triple::x86_64;
228 }
229 
230 bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
231   return ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
232                                               : !DisableOptimization;
233 }
234 
235 bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
236                                   bool DisableOptimization) {
237   return shouldInstrumentStack(TargetTriple) &&
238          mightUseStackSafetyAnalysis(DisableOptimization);
239 }
240 
241 bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
242   return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
243 }
244 
245 /// An instrumentation pass implementing detection of addressability bugs
246 /// using tagged pointers.
247 class HWAddressSanitizer {
248 public:
249   HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
250                      const StackSafetyGlobalInfo *SSI)
251       : M(M), SSI(SSI) {
252     this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
253     this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
254                               ? ClEnableKhwasan
255                               : CompileKernel;
256 
257     initializeModule();
258   }
259 
260   void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
261 
262   bool sanitizeFunction(Function &F,
263                         llvm::function_ref<const DominatorTree &()> GetDT,
264                         llvm::function_ref<const PostDominatorTree &()> GetPDT);
265   void initializeModule();
266   void createHwasanCtorComdat();
267 
268   void initializeCallbacks(Module &M);
269 
270   Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
271 
272   Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
273   Value *getShadowNonTls(IRBuilder<> &IRB);
274 
275   void untagPointerOperand(Instruction *I, Value *Addr);
276   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
277 
278   int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
279   void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
280                                   unsigned AccessSizeIndex,
281                                   Instruction *InsertBefore);
282   void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
283                                  unsigned AccessSizeIndex,
284                                  Instruction *InsertBefore);
285   bool ignoreMemIntrinsic(MemIntrinsic *MI);
286   void instrumentMemIntrinsic(MemIntrinsic *MI);
287   bool instrumentMemAccess(InterestingMemoryOperand &O);
288   bool ignoreAccess(Instruction *Inst, Value *Ptr);
289   void getInterestingMemoryOperands(
290       Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
291 
292   bool isInterestingAlloca(const AllocaInst &AI);
293   void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
294   Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
295   Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
296   bool instrumentStack(memtag::StackInfo &Info, Value *StackTag,
297                        llvm::function_ref<const DominatorTree &()> GetDT,
298                        llvm::function_ref<const PostDominatorTree &()> GetPDT);
299   Value *readRegister(IRBuilder<> &IRB, StringRef Name);
300   bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
301   Value *getNextTagWithCall(IRBuilder<> &IRB);
302   Value *getStackBaseTag(IRBuilder<> &IRB);
303   Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
304                       unsigned AllocaNo);
305   Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
306 
307   Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
308   Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
309   unsigned retagMask(unsigned AllocaNo);
310 
311   void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
312 
313   void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
314   void instrumentGlobals();
315 
316   void instrumentPersonalityFunctions();
317 
318 private:
319   LLVMContext *C;
320   Module &M;
321   const StackSafetyGlobalInfo *SSI;
322   Triple TargetTriple;
323   FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
324   FunctionCallee HWAsanHandleVfork;
325 
326   /// This struct defines the shadow mapping using the rule:
327   ///   shadow = (mem >> Scale) + Offset.
328   /// If InGlobal is true, then
329   ///   extern char __hwasan_shadow[];
330   ///   shadow = (mem >> Scale) + &__hwasan_shadow
331   /// If InTls is true, then
332   ///   extern char *__hwasan_tls;
333   ///   shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
334   ///
335   /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
336   /// ring buffer for storing stack allocations on targets that support it.
337   struct ShadowMapping {
338     int Scale;
339     uint64_t Offset;
340     bool InGlobal;
341     bool InTls;
342     bool WithFrameRecord;
343 
344     void init(Triple &TargetTriple, bool InstrumentWithCalls);
345     uint64_t getObjectAlignment() const { return 1ULL << Scale; }
346   };
347 
348   ShadowMapping Mapping;
349 
350   Type *VoidTy = Type::getVoidTy(M.getContext());
351   Type *IntptrTy;
352   Type *Int8PtrTy;
353   Type *Int8Ty;
354   Type *Int32Ty;
355   Type *Int64Ty = Type::getInt64Ty(M.getContext());
356 
357   bool CompileKernel;
358   bool Recover;
359   bool OutlinedChecks;
360   bool UseShortGranules;
361   bool InstrumentLandingPads;
362   bool InstrumentWithCalls;
363   bool InstrumentStack;
364   bool DetectUseAfterScope;
365   bool UsePageAliases;
366 
367   bool HasMatchAllTag = false;
368   uint8_t MatchAllTag = 0;
369 
370   unsigned PointerTagShift;
371   uint64_t TagMaskByte;
372 
373   Function *HwasanCtorFunction;
374 
375   FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
376   FunctionCallee HwasanMemoryAccessCallbackSized[2];
377 
378   FunctionCallee HwasanTagMemoryFunc;
379   FunctionCallee HwasanGenerateTagFunc;
380 
381   Constant *ShadowGlobal;
382 
383   Value *ShadowBase = nullptr;
384   Value *StackBaseTag = nullptr;
385   GlobalValue *ThreadPtrGlobal = nullptr;
386 };
387 
388 } // end anonymous namespace
389 
390 PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
391                                               ModuleAnalysisManager &MAM) {
392   const StackSafetyGlobalInfo *SSI = nullptr;
393   auto TargetTriple = llvm::Triple(M.getTargetTriple());
394   if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
395     SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(M);
396 
397   HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
398   bool Modified = false;
399   auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
400   for (Function &F : M) {
401     Modified |= HWASan.sanitizeFunction(
402         F,
403         [&]() -> const DominatorTree & {
404           return FAM.getResult<DominatorTreeAnalysis>(F);
405         },
406         [&]() -> const PostDominatorTree & {
407           return FAM.getResult<PostDominatorTreeAnalysis>(F);
408         });
409   }
410   if (Modified)
411     return PreservedAnalyses::none();
412   return PreservedAnalyses::all();
413 }
414 void HWAddressSanitizerPass::printPipeline(
415     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
416   static_cast<PassInfoMixin<HWAddressSanitizerPass> *>(this)->printPipeline(
417       OS, MapClassName2PassName);
418   OS << "<";
419   if (Options.CompileKernel)
420     OS << "kernel;";
421   if (Options.Recover)
422     OS << "recover";
423   OS << ">";
424 }
425 
426 void HWAddressSanitizer::createHwasanCtorComdat() {
427   std::tie(HwasanCtorFunction, std::ignore) =
428       getOrCreateSanitizerCtorAndInitFunctions(
429           M, kHwasanModuleCtorName, kHwasanInitName,
430           /*InitArgTypes=*/{},
431           /*InitArgs=*/{},
432           // This callback is invoked when the functions are created the first
433           // time. Hook them into the global ctors list in that case:
434           [&](Function *Ctor, FunctionCallee) {
435             Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
436             Ctor->setComdat(CtorComdat);
437             appendToGlobalCtors(M, Ctor, 0, Ctor);
438           });
439 
440   // Create a note that contains pointers to the list of global
441   // descriptors. Adding a note to the output file will cause the linker to
442   // create a PT_NOTE program header pointing to the note that we can use to
443   // find the descriptor list starting from the program headers. A function
444   // provided by the runtime initializes the shadow memory for the globals by
445   // accessing the descriptor list via the note. The dynamic loader needs to
446   // call this function whenever a library is loaded.
447   //
448   // The reason why we use a note for this instead of a more conventional
449   // approach of having a global constructor pass a descriptor list pointer to
450   // the runtime is because of an order of initialization problem. With
451   // constructors we can encounter the following problematic scenario:
452   //
453   // 1) library A depends on library B and also interposes one of B's symbols
454   // 2) B's constructors are called before A's (as required for correctness)
455   // 3) during construction, B accesses one of its "own" globals (actually
456   //    interposed by A) and triggers a HWASAN failure due to the initialization
457   //    for A not having happened yet
458   //
459   // Even without interposition it is possible to run into similar situations in
460   // cases where two libraries mutually depend on each other.
461   //
462   // We only need one note per binary, so put everything for the note in a
463   // comdat. This needs to be a comdat with an .init_array section to prevent
464   // newer versions of lld from discarding the note.
465   //
466   // Create the note even if we aren't instrumenting globals. This ensures that
467   // binaries linked from object files with both instrumented and
468   // non-instrumented globals will end up with a note, even if a comdat from an
469   // object file with non-instrumented globals is selected. The note is harmless
470   // if the runtime doesn't support it, since it will just be ignored.
471   Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
472 
473   Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
474   auto Start =
475       new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
476                          nullptr, "__start_hwasan_globals");
477   Start->setVisibility(GlobalValue::HiddenVisibility);
478   Start->setDSOLocal(true);
479   auto Stop =
480       new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
481                          nullptr, "__stop_hwasan_globals");
482   Stop->setVisibility(GlobalValue::HiddenVisibility);
483   Stop->setDSOLocal(true);
484 
485   // Null-terminated so actually 8 bytes, which are required in order to align
486   // the note properly.
487   auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
488 
489   auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
490                                  Int32Ty, Int32Ty);
491   auto *Note =
492       new GlobalVariable(M, NoteTy, /*isConstant=*/true,
493                          GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName);
494   Note->setSection(".note.hwasan.globals");
495   Note->setComdat(NoteComdat);
496   Note->setAlignment(Align(4));
497   Note->setDSOLocal(true);
498 
499   // The pointers in the note need to be relative so that the note ends up being
500   // placed in rodata, which is the standard location for notes.
501   auto CreateRelPtr = [&](Constant *Ptr) {
502     return ConstantExpr::getTrunc(
503         ConstantExpr::getSub(ConstantExpr::getPtrToInt(Ptr, Int64Ty),
504                              ConstantExpr::getPtrToInt(Note, Int64Ty)),
505         Int32Ty);
506   };
507   Note->setInitializer(ConstantStruct::getAnon(
508       {ConstantInt::get(Int32Ty, 8),                           // n_namesz
509        ConstantInt::get(Int32Ty, 8),                           // n_descsz
510        ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
511        Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
512   appendToCompilerUsed(M, Note);
513 
514   // Create a zero-length global in hwasan_globals so that the linker will
515   // always create start and stop symbols.
516   auto Dummy = new GlobalVariable(
517       M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
518       Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
519   Dummy->setSection("hwasan_globals");
520   Dummy->setComdat(NoteComdat);
521   Dummy->setMetadata(LLVMContext::MD_associated,
522                      MDNode::get(*C, ValueAsMetadata::get(Note)));
523   appendToCompilerUsed(M, Dummy);
524 }
525 
526 /// Module-level initialization.
527 ///
528 /// inserts a call to __hwasan_init to the module's constructor list.
529 void HWAddressSanitizer::initializeModule() {
530   LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
531   auto &DL = M.getDataLayout();
532 
533   TargetTriple = Triple(M.getTargetTriple());
534 
535   // x86_64 currently has two modes:
536   // - Intel LAM (default)
537   // - pointer aliasing (heap only)
538   bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
539   UsePageAliases = shouldUsePageAliases(TargetTriple);
540   InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
541   InstrumentStack = shouldInstrumentStack(TargetTriple);
542   DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
543   PointerTagShift = IsX86_64 ? 57 : 56;
544   TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
545 
546   Mapping.init(TargetTriple, InstrumentWithCalls);
547 
548   C = &(M.getContext());
549   IRBuilder<> IRB(*C);
550   IntptrTy = IRB.getIntPtrTy(DL);
551   Int8PtrTy = IRB.getInt8PtrTy();
552   Int8Ty = IRB.getInt8Ty();
553   Int32Ty = IRB.getInt32Ty();
554 
555   HwasanCtorFunction = nullptr;
556 
557   // Older versions of Android do not have the required runtime support for
558   // short granules, global or personality function instrumentation. On other
559   // platforms we currently require using the latest version of the runtime.
560   bool NewRuntime =
561       !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
562 
563   UseShortGranules =
564       ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
565   OutlinedChecks =
566       TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() &&
567       (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
568 
569   if (ClMatchAllTag.getNumOccurrences()) {
570     if (ClMatchAllTag != -1) {
571       HasMatchAllTag = true;
572       MatchAllTag = ClMatchAllTag & 0xFF;
573     }
574   } else if (CompileKernel) {
575     HasMatchAllTag = true;
576     MatchAllTag = 0xFF;
577   }
578 
579   // If we don't have personality function support, fall back to landing pads.
580   InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
581                               ? ClInstrumentLandingPads
582                               : !NewRuntime;
583 
584   if (!CompileKernel) {
585     createHwasanCtorComdat();
586     bool InstrumentGlobals =
587         ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
588 
589     if (InstrumentGlobals && !UsePageAliases)
590       instrumentGlobals();
591 
592     bool InstrumentPersonalityFunctions =
593         ClInstrumentPersonalityFunctions.getNumOccurrences()
594             ? ClInstrumentPersonalityFunctions
595             : NewRuntime;
596     if (InstrumentPersonalityFunctions)
597       instrumentPersonalityFunctions();
598   }
599 
600   if (!TargetTriple.isAndroid()) {
601     Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
602       auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
603                                     GlobalValue::ExternalLinkage, nullptr,
604                                     "__hwasan_tls", nullptr,
605                                     GlobalVariable::InitialExecTLSModel);
606       appendToCompilerUsed(M, GV);
607       return GV;
608     });
609     ThreadPtrGlobal = cast<GlobalVariable>(C);
610   }
611 }
612 
613 void HWAddressSanitizer::initializeCallbacks(Module &M) {
614   IRBuilder<> IRB(*C);
615   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
616     const std::string TypeStr = AccessIsWrite ? "store" : "load";
617     const std::string EndingStr = Recover ? "_noabort" : "";
618 
619     HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
620         ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
621         FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
622 
623     for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
624          AccessSizeIndex++) {
625       HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
626           M.getOrInsertFunction(
627               ClMemoryAccessCallbackPrefix + TypeStr +
628                   itostr(1ULL << AccessSizeIndex) + EndingStr,
629               FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
630     }
631   }
632 
633   HwasanTagMemoryFunc = M.getOrInsertFunction(
634       "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
635   HwasanGenerateTagFunc =
636       M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
637 
638   ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
639                                      ArrayType::get(IRB.getInt8Ty(), 0));
640 
641   const std::string MemIntrinCallbackPrefix =
642       (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
643           ? std::string("")
644           : ClMemoryAccessCallbackPrefix;
645   HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
646                                         IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
647                                         IRB.getInt8PtrTy(), IntptrTy);
648   HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
649                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
650                                        IRB.getInt8PtrTy(), IntptrTy);
651   HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
652                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
653                                        IRB.getInt32Ty(), IntptrTy);
654 
655   HWAsanHandleVfork =
656       M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
657 }
658 
659 Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
660   // An empty inline asm with input reg == output reg.
661   // An opaque no-op cast, basically.
662   // This prevents code bloat as a result of rematerializing trivial definitions
663   // such as constants or global addresses at every load and store.
664   InlineAsm *Asm =
665       InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
666                      StringRef(""), StringRef("=r,0"),
667                      /*hasSideEffects=*/false);
668   return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
669 }
670 
671 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
672   return getOpaqueNoopCast(IRB, ShadowGlobal);
673 }
674 
675 Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
676   if (Mapping.Offset != kDynamicShadowSentinel)
677     return getOpaqueNoopCast(
678         IRB, ConstantExpr::getIntToPtr(
679                  ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
680 
681   if (Mapping.InGlobal) {
682     return getDynamicShadowIfunc(IRB);
683   } else {
684     Value *GlobalDynamicAddress =
685         IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
686             kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
687     return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
688   }
689 }
690 
691 bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
692   // Do not instrument acesses from different address spaces; we cannot deal
693   // with them.
694   Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
695   if (PtrTy->getPointerAddressSpace() != 0)
696     return true;
697 
698   // Ignore swifterror addresses.
699   // swifterror memory addresses are mem2reg promoted by instruction
700   // selection. As such they cannot have regular uses like an instrumentation
701   // function and it makes no sense to track them as memory.
702   if (Ptr->isSwiftError())
703     return true;
704 
705   if (findAllocaForValue(Ptr)) {
706     if (!InstrumentStack)
707       return true;
708     if (SSI && SSI->stackAccessIsSafe(*Inst))
709       return true;
710   }
711   return false;
712 }
713 
714 void HWAddressSanitizer::getInterestingMemoryOperands(
715     Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
716   // Skip memory accesses inserted by another instrumentation.
717   if (I->hasMetadata(LLVMContext::MD_nosanitize))
718     return;
719 
720   // Do not instrument the load fetching the dynamic shadow address.
721   if (ShadowBase == I)
722     return;
723 
724   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
725     if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
726       return;
727     Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
728                              LI->getType(), LI->getAlign());
729   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
730     if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
731       return;
732     Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
733                              SI->getValueOperand()->getType(), SI->getAlign());
734   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
735     if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
736       return;
737     Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
738                              RMW->getValOperand()->getType(), None);
739   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
740     if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
741       return;
742     Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
743                              XCHG->getCompareOperand()->getType(), None);
744   } else if (auto CI = dyn_cast<CallInst>(I)) {
745     for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
746       if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
747           ignoreAccess(I, CI->getArgOperand(ArgNo)))
748         continue;
749       Type *Ty = CI->getParamByValType(ArgNo);
750       Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
751     }
752   }
753 }
754 
755 static unsigned getPointerOperandIndex(Instruction *I) {
756   if (LoadInst *LI = dyn_cast<LoadInst>(I))
757     return LI->getPointerOperandIndex();
758   if (StoreInst *SI = dyn_cast<StoreInst>(I))
759     return SI->getPointerOperandIndex();
760   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
761     return RMW->getPointerOperandIndex();
762   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
763     return XCHG->getPointerOperandIndex();
764   report_fatal_error("Unexpected instruction");
765   return -1;
766 }
767 
768 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
769   size_t Res = countTrailingZeros(TypeSize / 8);
770   assert(Res < kNumberOfAccessSizes);
771   return Res;
772 }
773 
774 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
775   if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64)
776     return;
777 
778   IRBuilder<> IRB(I);
779   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
780   Value *UntaggedPtr =
781       IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
782   I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
783 }
784 
785 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
786   // Mem >> Scale
787   Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
788   if (Mapping.Offset == 0)
789     return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
790   // (Mem >> Scale) + Offset
791   return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
792 }
793 
794 int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
795                                           unsigned AccessSizeIndex) {
796   return (CompileKernel << HWASanAccessInfo::CompileKernelShift) +
797          (HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) +
798          (MatchAllTag << HWASanAccessInfo::MatchAllShift) +
799          (Recover << HWASanAccessInfo::RecoverShift) +
800          (IsWrite << HWASanAccessInfo::IsWriteShift) +
801          (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
802 }
803 
804 void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
805                                                     unsigned AccessSizeIndex,
806                                                     Instruction *InsertBefore) {
807   assert(!UsePageAliases);
808   const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
809   IRBuilder<> IRB(InsertBefore);
810   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
811   Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
812   IRB.CreateCall(Intrinsic::getDeclaration(
813                      M, UseShortGranules
814                             ? Intrinsic::hwasan_check_memaccess_shortgranules
815                             : Intrinsic::hwasan_check_memaccess),
816                  {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
817 }
818 
819 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
820                                                    unsigned AccessSizeIndex,
821                                                    Instruction *InsertBefore) {
822   assert(!UsePageAliases);
823   const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
824   IRBuilder<> IRB(InsertBefore);
825 
826   Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
827   Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift),
828                                   IRB.getInt8Ty());
829   Value *AddrLong = untagPointer(IRB, PtrLong);
830   Value *Shadow = memToShadow(AddrLong, IRB);
831   Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
832   Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
833 
834   if (HasMatchAllTag) {
835     Value *TagNotIgnored = IRB.CreateICmpNE(
836         PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag));
837     TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
838   }
839 
840   Instruction *CheckTerm =
841       SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
842                                 MDBuilder(*C).createBranchWeights(1, 100000));
843 
844   IRB.SetInsertPoint(CheckTerm);
845   Value *OutOfShortGranuleTagRange =
846       IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
847   Instruction *CheckFailTerm =
848       SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
849                                 MDBuilder(*C).createBranchWeights(1, 100000));
850 
851   IRB.SetInsertPoint(CheckTerm);
852   Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
853   PtrLowBits = IRB.CreateAdd(
854       PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
855   Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
856   SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
857                             MDBuilder(*C).createBranchWeights(1, 100000),
858                             (DomTreeUpdater *)nullptr, nullptr,
859                             CheckFailTerm->getParent());
860 
861   IRB.SetInsertPoint(CheckTerm);
862   Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
863   InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
864   Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
865   Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
866   SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
867                             MDBuilder(*C).createBranchWeights(1, 100000),
868                             (DomTreeUpdater *)nullptr, nullptr,
869                             CheckFailTerm->getParent());
870 
871   IRB.SetInsertPoint(CheckFailTerm);
872   InlineAsm *Asm;
873   switch (TargetTriple.getArch()) {
874   case Triple::x86_64:
875     // The signal handler will find the data address in rdi.
876     Asm = InlineAsm::get(
877         FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
878         "int3\nnopl " +
879             itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
880             "(%rax)",
881         "{rdi}",
882         /*hasSideEffects=*/true);
883     break;
884   case Triple::aarch64:
885   case Triple::aarch64_be:
886     // The signal handler will find the data address in x0.
887     Asm = InlineAsm::get(
888         FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
889         "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
890         "{x0}",
891         /*hasSideEffects=*/true);
892     break;
893   default:
894     report_fatal_error("unsupported architecture");
895   }
896   IRB.CreateCall(Asm, PtrLong);
897   if (Recover)
898     cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
899 }
900 
901 bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
902   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
903     return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
904            (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
905   }
906   if (isa<MemSetInst>(MI))
907     return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
908   return false;
909 }
910 
911 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
912   IRBuilder<> IRB(MI);
913   if (isa<MemTransferInst>(MI)) {
914     IRB.CreateCall(
915         isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
916         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
917          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
918          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
919   } else if (isa<MemSetInst>(MI)) {
920     IRB.CreateCall(
921         HWAsanMemset,
922         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
923          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
924          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
925   }
926   MI->eraseFromParent();
927 }
928 
929 bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
930   Value *Addr = O.getPtr();
931 
932   LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
933 
934   if (O.MaybeMask)
935     return false; // FIXME
936 
937   IRBuilder<> IRB(O.getInsn());
938   if (isPowerOf2_64(O.TypeSize) &&
939       (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
940       (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) ||
941        *O.Alignment >= O.TypeSize / 8)) {
942     size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
943     if (InstrumentWithCalls) {
944       IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
945                      IRB.CreatePointerCast(Addr, IntptrTy));
946     } else if (OutlinedChecks) {
947       instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
948     } else {
949       instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
950     }
951   } else {
952     IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
953                    {IRB.CreatePointerCast(Addr, IntptrTy),
954                     ConstantInt::get(IntptrTy, O.TypeSize / 8)});
955   }
956   untagPointerOperand(O.getInsn(), Addr);
957 
958   return true;
959 }
960 
961 void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
962                                    size_t Size) {
963   size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
964   if (!UseShortGranules)
965     Size = AlignedSize;
966 
967   Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
968   if (InstrumentWithCalls) {
969     IRB.CreateCall(HwasanTagMemoryFunc,
970                    {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
971                     ConstantInt::get(IntptrTy, AlignedSize)});
972   } else {
973     size_t ShadowSize = Size >> Mapping.Scale;
974     Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
975     // If this memset is not inlined, it will be intercepted in the hwasan
976     // runtime library. That's OK, because the interceptor skips the checks if
977     // the address is in the shadow region.
978     // FIXME: the interceptor is not as fast as real memset. Consider lowering
979     // llvm.memset right here into either a sequence of stores, or a call to
980     // hwasan_tag_memory.
981     if (ShadowSize)
982       IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1));
983     if (Size != AlignedSize) {
984       IRB.CreateStore(
985           ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()),
986           IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
987       IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
988                                    Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
989                                    AlignedSize - 1));
990     }
991   }
992 }
993 
994 unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
995   if (TargetTriple.getArch() == Triple::x86_64)
996     return AllocaNo & TagMaskByte;
997 
998   // A list of 8-bit numbers that have at most one run of non-zero bits.
999   // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1000   // masks.
1001   // The list does not include the value 255, which is used for UAR.
1002   //
1003   // Because we are more likely to use earlier elements of this list than later
1004   // ones, it is sorted in increasing order of probability of collision with a
1005   // mask allocated (temporally) nearby. The program that generated this list
1006   // can be found at:
1007   // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1008   static unsigned FastMasks[] = {0,  128, 64,  192, 32,  96,  224, 112, 240,
1009                                  48, 16,  120, 248, 56,  24,  8,   124, 252,
1010                                  60, 28,  12,  4,   126, 254, 62,  30,  14,
1011                                  6,  2,   127, 63,  31,  15,  7,   3,   1};
1012   return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
1013 }
1014 
1015 Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1016   if (TargetTriple.getArch() == Triple::x86_64) {
1017     Constant *TagMask = ConstantInt::get(IntptrTy, TagMaskByte);
1018     Value *NewTag = IRB.CreateAnd(OldTag, TagMask);
1019     return NewTag;
1020   }
1021   // aarch64 uses 8-bit tags, so no mask is needed.
1022   return OldTag;
1023 }
1024 
1025 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1026   return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1027 }
1028 
1029 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1030   if (ClGenerateTagsWithCalls)
1031     return getNextTagWithCall(IRB);
1032   if (StackBaseTag)
1033     return StackBaseTag;
1034   // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1035   // first).
1036   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1037   auto GetStackPointerFn = Intrinsic::getDeclaration(
1038       M, Intrinsic::frameaddress,
1039       IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1040   Value *StackPointer = IRB.CreateCall(
1041       GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
1042 
1043   // Extract some entropy from the stack pointer for the tags.
1044   // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1045   // between functions).
1046   Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
1047   Value *StackTag =
1048       applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1049                                       IRB.CreateLShr(StackPointerLong, 20)));
1050   StackTag->setName("hwasan.stack.base.tag");
1051   return StackTag;
1052 }
1053 
1054 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1055                                         AllocaInst *AI, unsigned AllocaNo) {
1056   if (ClGenerateTagsWithCalls)
1057     return getNextTagWithCall(IRB);
1058   return IRB.CreateXor(StackTag,
1059                        ConstantInt::get(IntptrTy, retagMask(AllocaNo)));
1060 }
1061 
1062 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
1063   if (ClUARRetagToZero)
1064     return ConstantInt::get(IntptrTy, 0);
1065   if (ClGenerateTagsWithCalls)
1066     return getNextTagWithCall(IRB);
1067   return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, TagMaskByte));
1068 }
1069 
1070 // Add a tag to an address.
1071 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1072                                       Value *PtrLong, Value *Tag) {
1073   assert(!UsePageAliases);
1074   Value *TaggedPtrLong;
1075   if (CompileKernel) {
1076     // Kernel addresses have 0xFF in the most significant byte.
1077     Value *ShiftedTag =
1078         IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1079                      ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1080     TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1081   } else {
1082     // Userspace can simply do OR (tag << PointerTagShift);
1083     Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1084     TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1085   }
1086   return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1087 }
1088 
1089 // Remove tag from an address.
1090 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1091   assert(!UsePageAliases);
1092   Value *UntaggedPtrLong;
1093   if (CompileKernel) {
1094     // Kernel addresses have 0xFF in the most significant byte.
1095     UntaggedPtrLong =
1096         IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1097                                                0xFFULL << PointerTagShift));
1098   } else {
1099     // Userspace addresses have 0x00.
1100     UntaggedPtrLong =
1101         IRB.CreateAnd(PtrLong, ConstantInt::get(PtrLong->getType(),
1102                                                 ~(0xFFULL << PointerTagShift)));
1103   }
1104   return UntaggedPtrLong;
1105 }
1106 
1107 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1108   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1109   if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1110     // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1111     // in Bionic's libc/private/bionic_tls.h.
1112     Function *ThreadPointerFunc =
1113         Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1114     Value *SlotPtr = IRB.CreatePointerCast(
1115         IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
1116                                IRB.CreateCall(ThreadPointerFunc), 0x30),
1117         Ty->getPointerTo(0));
1118     return SlotPtr;
1119   }
1120   if (ThreadPtrGlobal)
1121     return ThreadPtrGlobal;
1122 
1123   return nullptr;
1124 }
1125 
1126 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1127   if (!Mapping.InTls)
1128     ShadowBase = getShadowNonTls(IRB);
1129   else if (!WithFrameRecord && TargetTriple.isAndroid())
1130     ShadowBase = getDynamicShadowIfunc(IRB);
1131 
1132   if (!WithFrameRecord && ShadowBase)
1133     return;
1134 
1135   Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1136   assert(SlotPtr);
1137 
1138   Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1139   // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
1140   Value *ThreadLongMaybeUntagged =
1141       TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
1142 
1143   if (WithFrameRecord) {
1144     Function *F = IRB.GetInsertBlock()->getParent();
1145     StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1146 
1147     // Prepare ring buffer data.
1148     Value *PC;
1149     if (TargetTriple.getArch() == Triple::aarch64)
1150       PC = readRegister(IRB, "pc");
1151     else
1152       PC = IRB.CreatePtrToInt(F, IntptrTy);
1153     Module *M = F->getParent();
1154     auto GetStackPointerFn = Intrinsic::getDeclaration(
1155         M, Intrinsic::frameaddress,
1156         IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1157     Value *SP = IRB.CreatePtrToInt(
1158         IRB.CreateCall(GetStackPointerFn,
1159                        {Constant::getNullValue(IRB.getInt32Ty())}),
1160         IntptrTy);
1161     // Mix SP and PC.
1162     // Assumptions:
1163     // PC is 0x0000PPPPPPPPPPPP  (48 bits are meaningful, others are zero)
1164     // SP is 0xsssssssssssSSSS0  (4 lower bits are zero)
1165     // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1166     //       0xSSSSPPPPPPPPPPPP
1167     SP = IRB.CreateShl(SP, 44);
1168 
1169     // Store data to ring buffer.
1170     Value *RecordPtr =
1171         IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
1172     IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
1173 
1174     // Update the ring buffer. Top byte of ThreadLong defines the size of the
1175     // buffer in pages, it must be a power of two, and the start of the buffer
1176     // must be aligned by twice that much. Therefore wrap around of the ring
1177     // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1178     // The use of AShr instead of LShr is due to
1179     //   https://bugs.llvm.org/show_bug.cgi?id=39030
1180     // Runtime library makes sure not to use the highest bit.
1181     Value *WrapMask = IRB.CreateXor(
1182         IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1183         ConstantInt::get(IntptrTy, (uint64_t)-1));
1184     Value *ThreadLongNew = IRB.CreateAnd(
1185         IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1186     IRB.CreateStore(ThreadLongNew, SlotPtr);
1187   }
1188 
1189   if (!ShadowBase) {
1190     // Get shadow base address by aligning RecordPtr up.
1191     // Note: this is not correct if the pointer is already aligned.
1192     // Runtime library will make sure this never happens.
1193     ShadowBase = IRB.CreateAdd(
1194         IRB.CreateOr(
1195             ThreadLongMaybeUntagged,
1196             ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1197         ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1198     ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1199   }
1200 }
1201 
1202 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1203   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1204   Function *ReadRegister =
1205       Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1206   MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1207   Value *Args[] = {MetadataAsValue::get(*C, MD)};
1208   return IRB.CreateCall(ReadRegister, Args);
1209 }
1210 
1211 bool HWAddressSanitizer::instrumentLandingPads(
1212     SmallVectorImpl<Instruction *> &LandingPadVec) {
1213   for (auto *LP : LandingPadVec) {
1214     IRBuilder<> IRB(LP->getNextNode());
1215     IRB.CreateCall(
1216         HWAsanHandleVfork,
1217         {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1218                                                                       : "sp")});
1219   }
1220   return true;
1221 }
1222 
1223 static bool isLifetimeIntrinsic(Value *V) {
1224   auto *II = dyn_cast<IntrinsicInst>(V);
1225   return II && II->isLifetimeStartOrEnd();
1226 }
1227 
1228 bool HWAddressSanitizer::instrumentStack(
1229     memtag::StackInfo &SInfo, Value *StackTag,
1230     llvm::function_ref<const DominatorTree &()> GetDT,
1231     llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1232   // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1233   // alloca addresses using that. Unfortunately, offsets are not known yet
1234   // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1235   // temp, shift-OR it into each alloca address and xor with the retag mask.
1236   // This generates one extra instruction per alloca use.
1237   unsigned int I = 0;
1238 
1239   for (auto &KV : SInfo.AllocasToInstrument) {
1240     auto N = I++;
1241     auto *AI = KV.first;
1242     memtag::AllocaInfo &Info = KV.second;
1243     IRBuilder<> IRB(AI->getNextNode());
1244 
1245     // Replace uses of the alloca with tagged address.
1246     Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
1247     Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1248     Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1249     std::string Name =
1250         AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1251     Replacement->setName(Name + ".hwasan");
1252 
1253     size_t Size = memtag::getAllocaSizeInBytes(*AI);
1254     size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1255 
1256     Value *AICast = IRB.CreatePointerCast(AI, Int8PtrTy);
1257 
1258     auto HandleLifetime = [&](IntrinsicInst *II) {
1259       // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1260       // set of assumptions we need to make about the lifetime. Without this we
1261       // would need to ensure that we can track the lifetime pointer to a
1262       // constant offset from the alloca, and would still need to change the
1263       // size to include the extra alignment we use for the untagging to make
1264       // the size consistent.
1265       //
1266       // The check for standard lifetime below makes sure that we have exactly
1267       // one set of start / end in any execution (i.e. the ends are not
1268       // reachable from each other), so this will not cause any problems.
1269       II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1270       II->setArgOperand(1, AICast);
1271     };
1272     llvm::for_each(Info.LifetimeStart, HandleLifetime);
1273     llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1274 
1275     AI->replaceUsesWithIf(Replacement, [AICast, AILong](Use &U) {
1276       auto *User = U.getUser();
1277       return User != AILong && User != AICast && !isLifetimeIntrinsic(User);
1278     });
1279 
1280     for (auto *DDI : Info.DbgVariableIntrinsics) {
1281       // Prepend "tag_offset, N" to the dwarf expression.
1282       // Tag offset logically applies to the alloca pointer, and it makes sense
1283       // to put it at the beginning of the expression.
1284       SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset,
1285                                          retagMask(N)};
1286       for (size_t LocNo = 0; LocNo < DDI->getNumVariableLocationOps(); ++LocNo)
1287         if (DDI->getVariableLocationOp(LocNo) == AI)
1288           DDI->setExpression(DIExpression::appendOpsToArg(DDI->getExpression(),
1289                                                           NewOps, LocNo));
1290     }
1291 
1292     auto TagEnd = [&](Instruction *Node) {
1293       IRB.SetInsertPoint(Node);
1294       Value *UARTag = getUARTag(IRB, StackTag);
1295       // When untagging, use the `AlignedSize` because we need to set the tags
1296       // for the entire alloca to zero. If we used `Size` here, we would
1297       // keep the last granule tagged, and store zero in the last byte of the
1298       // last granule, due to how short granules are implemented.
1299       tagAlloca(IRB, AI, UARTag, AlignedSize);
1300     };
1301     // Calls to functions that may return twice (e.g. setjmp) confuse the
1302     // postdominator analysis, and will leave us to keep memory tagged after
1303     // function return. Work around this by always untagging at every return
1304     // statement if return_twice functions are called.
1305     bool StandardLifetime =
1306         SInfo.UnrecognizedLifetimes.empty() &&
1307         memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd,
1308                                    &GetDT(), ClMaxLifetimes) &&
1309         !SInfo.CallsReturnTwice;
1310     if (DetectUseAfterScope && StandardLifetime) {
1311       IntrinsicInst *Start = Info.LifetimeStart[0];
1312       IRB.SetInsertPoint(Start->getNextNode());
1313       tagAlloca(IRB, AI, Tag, Size);
1314       if (!memtag::forAllReachableExits(GetDT(), GetPDT(), Start,
1315                                         Info.LifetimeEnd, SInfo.RetVec,
1316                                         TagEnd)) {
1317         for (auto *End : Info.LifetimeEnd)
1318           End->eraseFromParent();
1319       }
1320     } else {
1321       tagAlloca(IRB, AI, Tag, Size);
1322       for (auto *RI : SInfo.RetVec)
1323         TagEnd(RI);
1324       // We inserted tagging outside of the lifetimes, so we have to remove
1325       // them.
1326       for (auto &II : Info.LifetimeStart)
1327         II->eraseFromParent();
1328       for (auto &II : Info.LifetimeEnd)
1329         II->eraseFromParent();
1330     }
1331     memtag::alignAndPadAlloca(Info, Align(Mapping.getObjectAlignment()));
1332   }
1333   for (auto &I : SInfo.UnrecognizedLifetimes)
1334     I->eraseFromParent();
1335   return true;
1336 }
1337 
1338 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1339   return (AI.getAllocatedType()->isSized() &&
1340           // FIXME: instrument dynamic allocas, too
1341           AI.isStaticAlloca() &&
1342           // alloca() may be called with 0 size, ignore it.
1343           memtag::getAllocaSizeInBytes(AI) > 0 &&
1344           // We are only interested in allocas not promotable to registers.
1345           // Promotable allocas are common under -O0.
1346           !isAllocaPromotable(&AI) &&
1347           // inalloca allocas are not treated as static, and we don't want
1348           // dynamic alloca instrumentation for them as well.
1349           !AI.isUsedWithInAlloca() &&
1350           // swifterror allocas are register promoted by ISel
1351           !AI.isSwiftError()) &&
1352          // safe allocas are not interesting
1353          !(SSI && SSI->isSafe(AI));
1354 }
1355 
1356 bool HWAddressSanitizer::sanitizeFunction(
1357     Function &F, llvm::function_ref<const DominatorTree &()> GetDT,
1358     llvm::function_ref<const PostDominatorTree &()> GetPDT) {
1359   if (&F == HwasanCtorFunction)
1360     return false;
1361 
1362   if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1363     return false;
1364 
1365   LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1366 
1367   SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1368   SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1369   SmallVector<Instruction *, 8> LandingPadVec;
1370 
1371   memtag::StackInfoBuilder SIB(
1372       [this](const AllocaInst &AI) { return isInterestingAlloca(AI); });
1373   for (auto &Inst : instructions(F)) {
1374     if (InstrumentStack) {
1375       SIB.visit(Inst);
1376     }
1377 
1378     if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1379       LandingPadVec.push_back(&Inst);
1380 
1381     getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1382 
1383     if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1384       if (!ignoreMemIntrinsic(MI))
1385         IntrinToInstrument.push_back(MI);
1386   }
1387 
1388   memtag::StackInfo &SInfo = SIB.get();
1389 
1390   initializeCallbacks(*F.getParent());
1391 
1392   bool Changed = false;
1393 
1394   if (!LandingPadVec.empty())
1395     Changed |= instrumentLandingPads(LandingPadVec);
1396 
1397   if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1398       F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1399     // __hwasan_personality_thunk is a no-op for functions without an
1400     // instrumented stack, so we can drop it.
1401     F.setPersonalityFn(nullptr);
1402     Changed = true;
1403   }
1404 
1405   if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1406       IntrinToInstrument.empty())
1407     return Changed;
1408 
1409   assert(!ShadowBase);
1410 
1411   Instruction *InsertPt = &*F.getEntryBlock().begin();
1412   IRBuilder<> EntryIRB(InsertPt);
1413   emitPrologue(EntryIRB,
1414                /*WithFrameRecord*/ ClRecordStackHistory &&
1415                    Mapping.WithFrameRecord &&
1416                    !SInfo.AllocasToInstrument.empty());
1417 
1418   if (!SInfo.AllocasToInstrument.empty()) {
1419     Value *StackTag =
1420         ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1421     instrumentStack(SInfo, StackTag, GetDT, GetPDT);
1422   }
1423 
1424   // If we split the entry block, move any allocas that were originally in the
1425   // entry block back into the entry block so that they aren't treated as
1426   // dynamic allocas.
1427   if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1428     InsertPt = &*F.getEntryBlock().begin();
1429     for (Instruction &I :
1430          llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1431       if (auto *AI = dyn_cast<AllocaInst>(&I))
1432         if (isa<ConstantInt>(AI->getArraySize()))
1433           I.moveBefore(InsertPt);
1434     }
1435   }
1436 
1437   for (auto &Operand : OperandsToInstrument)
1438     instrumentMemAccess(Operand);
1439 
1440   if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1441     for (auto Inst : IntrinToInstrument)
1442       instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1443   }
1444 
1445   ShadowBase = nullptr;
1446   StackBaseTag = nullptr;
1447 
1448   return true;
1449 }
1450 
1451 void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1452   assert(!UsePageAliases);
1453   Constant *Initializer = GV->getInitializer();
1454   uint64_t SizeInBytes =
1455       M.getDataLayout().getTypeAllocSize(Initializer->getType());
1456   uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1457   if (SizeInBytes != NewSize) {
1458     // Pad the initializer out to the next multiple of 16 bytes and add the
1459     // required short granule tag.
1460     std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1461     Init.back() = Tag;
1462     Constant *Padding = ConstantDataArray::get(*C, Init);
1463     Initializer = ConstantStruct::getAnon({Initializer, Padding});
1464   }
1465 
1466   auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1467                                    GlobalValue::ExternalLinkage, Initializer,
1468                                    GV->getName() + ".hwasan");
1469   NewGV->copyAttributesFrom(GV);
1470   NewGV->setLinkage(GlobalValue::PrivateLinkage);
1471   NewGV->copyMetadata(GV, 0);
1472   NewGV->setAlignment(
1473       MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment())));
1474 
1475   // It is invalid to ICF two globals that have different tags. In the case
1476   // where the size of the global is a multiple of the tag granularity the
1477   // contents of the globals may be the same but the tags (i.e. symbol values)
1478   // may be different, and the symbols are not considered during ICF. In the
1479   // case where the size is not a multiple of the granularity, the short granule
1480   // tags would discriminate two globals with different tags, but there would
1481   // otherwise be nothing stopping such a global from being incorrectly ICF'd
1482   // with an uninstrumented (i.e. tag 0) global that happened to have the short
1483   // granule tag in the last byte.
1484   NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1485 
1486   // Descriptor format (assuming little-endian):
1487   // bytes 0-3: relative address of global
1488   // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1489   // it isn't, we create multiple descriptors)
1490   // byte 7: tag
1491   auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1492   const uint64_t MaxDescriptorSize = 0xfffff0;
1493   for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1494        DescriptorPos += MaxDescriptorSize) {
1495     auto *Descriptor =
1496         new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1497                            nullptr, GV->getName() + ".hwasan.descriptor");
1498     auto *GVRelPtr = ConstantExpr::getTrunc(
1499         ConstantExpr::getAdd(
1500             ConstantExpr::getSub(
1501                 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1502                 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1503             ConstantInt::get(Int64Ty, DescriptorPos)),
1504         Int32Ty);
1505     uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1506     auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1507     Descriptor->setComdat(NewGV->getComdat());
1508     Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1509     Descriptor->setSection("hwasan_globals");
1510     Descriptor->setMetadata(LLVMContext::MD_associated,
1511                             MDNode::get(*C, ValueAsMetadata::get(NewGV)));
1512     appendToCompilerUsed(M, Descriptor);
1513   }
1514 
1515   Constant *Aliasee = ConstantExpr::getIntToPtr(
1516       ConstantExpr::getAdd(
1517           ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1518           ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1519       GV->getType());
1520   auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1521                                     GV->getLinkage(), "", Aliasee, &M);
1522   Alias->setVisibility(GV->getVisibility());
1523   Alias->takeName(GV);
1524   GV->replaceAllUsesWith(Alias);
1525   GV->eraseFromParent();
1526 }
1527 
1528 static DenseSet<GlobalVariable *> getExcludedGlobals(Module &M) {
1529   NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
1530   if (!Globals)
1531     return DenseSet<GlobalVariable *>();
1532   DenseSet<GlobalVariable *> Excluded(Globals->getNumOperands());
1533   for (auto MDN : Globals->operands()) {
1534     // Metadata node contains the global and the fields of "Entry".
1535     assert(MDN->getNumOperands() == 5);
1536     auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
1537     // The optimizer may optimize away a global entirely.
1538     if (!V)
1539       continue;
1540     auto *StrippedV = V->stripPointerCasts();
1541     auto *GV = dyn_cast<GlobalVariable>(StrippedV);
1542     if (!GV)
1543       continue;
1544     ConstantInt *IsExcluded = mdconst::extract<ConstantInt>(MDN->getOperand(4));
1545     if (IsExcluded->isOne())
1546       Excluded.insert(GV);
1547   }
1548   return Excluded;
1549 }
1550 
1551 void HWAddressSanitizer::instrumentGlobals() {
1552   std::vector<GlobalVariable *> Globals;
1553   auto ExcludedGlobals = getExcludedGlobals(M);
1554   for (GlobalVariable &GV : M.globals()) {
1555     if (ExcludedGlobals.count(&GV))
1556       continue;
1557 
1558     if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1559         GV.isThreadLocal())
1560       continue;
1561 
1562     // Common symbols can't have aliases point to them, so they can't be tagged.
1563     if (GV.hasCommonLinkage())
1564       continue;
1565 
1566     // Globals with custom sections may be used in __start_/__stop_ enumeration,
1567     // which would be broken both by adding tags and potentially by the extra
1568     // padding/alignment that we insert.
1569     if (GV.hasSection())
1570       continue;
1571 
1572     Globals.push_back(&GV);
1573   }
1574 
1575   MD5 Hasher;
1576   Hasher.update(M.getSourceFileName());
1577   MD5::MD5Result Hash;
1578   Hasher.final(Hash);
1579   uint8_t Tag = Hash[0];
1580 
1581   for (GlobalVariable *GV : Globals) {
1582     Tag &= TagMaskByte;
1583     // Skip tag 0 in order to avoid collisions with untagged memory.
1584     if (Tag == 0)
1585       Tag = 1;
1586     instrumentGlobal(GV, Tag++);
1587   }
1588 }
1589 
1590 void HWAddressSanitizer::instrumentPersonalityFunctions() {
1591   // We need to untag stack frames as we unwind past them. That is the job of
1592   // the personality function wrapper, which either wraps an existing
1593   // personality function or acts as a personality function on its own. Each
1594   // function that has a personality function or that can be unwound past has
1595   // its personality function changed to a thunk that calls the personality
1596   // function wrapper in the runtime.
1597   MapVector<Constant *, std::vector<Function *>> PersonalityFns;
1598   for (Function &F : M) {
1599     if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1600       continue;
1601 
1602     if (F.hasPersonalityFn()) {
1603       PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1604     } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1605       PersonalityFns[nullptr].push_back(&F);
1606     }
1607   }
1608 
1609   if (PersonalityFns.empty())
1610     return;
1611 
1612   FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1613       "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1614       Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1615   FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1616   FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1617 
1618   for (auto &P : PersonalityFns) {
1619     std::string ThunkName = kHwasanPersonalityThunkName;
1620     if (P.first)
1621       ThunkName += ("." + P.first->getName()).str();
1622     FunctionType *ThunkFnTy = FunctionType::get(
1623         Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1624     bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1625                                cast<GlobalValue>(P.first)->hasLocalLinkage());
1626     auto *ThunkFn = Function::Create(ThunkFnTy,
1627                                      IsLocal ? GlobalValue::InternalLinkage
1628                                              : GlobalValue::LinkOnceODRLinkage,
1629                                      ThunkName, &M);
1630     if (!IsLocal) {
1631       ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1632       ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1633     }
1634 
1635     auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1636     IRBuilder<> IRB(BB);
1637     CallInst *WrapperCall = IRB.CreateCall(
1638         HwasanPersonalityWrapper,
1639         {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1640          ThunkFn->getArg(3), ThunkFn->getArg(4),
1641          P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1642                  : Constant::getNullValue(Int8PtrTy),
1643          IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1644          IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1645     WrapperCall->setTailCall();
1646     IRB.CreateRet(WrapperCall);
1647 
1648     for (Function *F : P.second)
1649       F->setPersonalityFn(ThunkFn);
1650   }
1651 }
1652 
1653 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1654                                              bool InstrumentWithCalls) {
1655   Scale = kDefaultShadowScale;
1656   if (TargetTriple.isOSFuchsia()) {
1657     // Fuchsia is always PIE, which means that the beginning of the address
1658     // space is always available.
1659     InGlobal = false;
1660     InTls = false;
1661     Offset = 0;
1662     WithFrameRecord = true;
1663   } else if (ClMappingOffset.getNumOccurrences() > 0) {
1664     InGlobal = false;
1665     InTls = false;
1666     Offset = ClMappingOffset;
1667     WithFrameRecord = false;
1668   } else if (ClEnableKhwasan || InstrumentWithCalls) {
1669     InGlobal = false;
1670     InTls = false;
1671     Offset = 0;
1672     WithFrameRecord = false;
1673   } else if (ClWithIfunc) {
1674     InGlobal = true;
1675     InTls = false;
1676     Offset = kDynamicShadowSentinel;
1677     WithFrameRecord = false;
1678   } else if (ClWithTls) {
1679     InGlobal = false;
1680     InTls = true;
1681     Offset = kDynamicShadowSentinel;
1682     WithFrameRecord = true;
1683   } else {
1684     InGlobal = false;
1685     InTls = false;
1686     Offset = kDynamicShadowSentinel;
1687     WithFrameRecord = false;
1688   }
1689 }
1690