1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 //  http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/CallSite.h"
31 #include "llvm/IR/DIBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/MC/MCSectionMachO.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/DataTypes.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/Endian.h"
48 #include "llvm/Support/ScopedPrinter.h"
49 #include "llvm/Support/SwapByteOrder.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/Transforms/Instrumentation.h"
52 #include "llvm/Transforms/Scalar.h"
53 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
54 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
55 #include "llvm/Transforms/Utils/Cloning.h"
56 #include "llvm/Transforms/Utils/Local.h"
57 #include "llvm/Transforms/Utils/ModuleUtils.h"
58 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
59 #include <algorithm>
60 #include <iomanip>
61 #include <limits>
62 #include <sstream>
63 #include <string>
64 #include <system_error>
65 
66 using namespace llvm;
67 
68 #define DEBUG_TYPE "asan"
69 
70 static const uint64_t kDefaultShadowScale = 3;
71 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
72 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
73 static const uint64_t kDynamicShadowSentinel = ~(uint64_t)0;
74 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
75 static const uint64_t kIOSSimShadowOffset32 = 1ULL << 30;
76 static const uint64_t kIOSSimShadowOffset64 = kDefaultShadowOffset64;
77 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000;  // < 2G.
78 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
79 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
80 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
81 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
82 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
83 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
84 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
85 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
86 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
87 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
88 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
89 // The shadow memory space is dynamically allocated.
90 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
91 
92 static const size_t kMinStackMallocSize = 1 << 6;   // 64B
93 static const size_t kMaxStackMallocSize = 1 << 16;  // 64K
94 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
95 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
96 
97 static const char *const kAsanModuleCtorName = "asan.module_ctor";
98 static const char *const kAsanModuleDtorName = "asan.module_dtor";
99 static const uint64_t kAsanCtorAndDtorPriority = 1;
100 static const char *const kAsanReportErrorTemplate = "__asan_report_";
101 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
102 static const char *const kAsanUnregisterGlobalsName =
103     "__asan_unregister_globals";
104 static const char *const kAsanRegisterImageGlobalsName =
105   "__asan_register_image_globals";
106 static const char *const kAsanUnregisterImageGlobalsName =
107   "__asan_unregister_image_globals";
108 static const char *const kAsanRegisterElfGlobalsName =
109   "__asan_register_elf_globals";
110 static const char *const kAsanUnregisterElfGlobalsName =
111   "__asan_unregister_elf_globals";
112 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
113 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
114 static const char *const kAsanInitName = "__asan_init";
115 static const char *const kAsanVersionCheckName =
116     "__asan_version_mismatch_check_v8";
117 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
118 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
119 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
120 static const int kMaxAsanStackMallocSizeClass = 10;
121 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
122 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
123 static const char *const kAsanGenPrefix = "__asan_gen_";
124 static const char *const kODRGenPrefix = "__odr_asan_gen_";
125 static const char *const kSanCovGenPrefix = "__sancov_gen_";
126 static const char *const kAsanSetShadowPrefix = "__asan_set_shadow_";
127 static const char *const kAsanPoisonStackMemoryName =
128     "__asan_poison_stack_memory";
129 static const char *const kAsanUnpoisonStackMemoryName =
130     "__asan_unpoison_stack_memory";
131 
132 // ASan version script has __asan_* wildcard. Triple underscore prevents a
133 // linker (gold) warning about attempting to export a local symbol.
134 static const char *const kAsanGlobalsRegisteredFlagName =
135     "___asan_globals_registered";
136 
137 static const char *const kAsanOptionDetectUseAfterReturn =
138     "__asan_option_detect_stack_use_after_return";
139 
140 static const char *const kAsanShadowMemoryDynamicAddress =
141     "__asan_shadow_memory_dynamic_address";
142 
143 static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
144 static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
145 
146 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
147 static const size_t kNumberOfAccessSizes = 5;
148 
149 static const unsigned kAllocaRzSize = 32;
150 
151 // Command-line flags.
152 static cl::opt<bool> ClEnableKasan(
153     "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
154     cl::Hidden, cl::init(false));
155 static cl::opt<bool> ClRecover(
156     "asan-recover",
157     cl::desc("Enable recovery mode (continue-after-error)."),
158     cl::Hidden, cl::init(false));
159 
160 // This flag may need to be replaced with -f[no-]asan-reads.
161 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
162                                        cl::desc("instrument read instructions"),
163                                        cl::Hidden, cl::init(true));
164 static cl::opt<bool> ClInstrumentWrites(
165     "asan-instrument-writes", cl::desc("instrument write instructions"),
166     cl::Hidden, cl::init(true));
167 static cl::opt<bool> ClInstrumentAtomics(
168     "asan-instrument-atomics",
169     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
170     cl::init(true));
171 static cl::opt<bool> ClAlwaysSlowPath(
172     "asan-always-slow-path",
173     cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
174     cl::init(false));
175 static cl::opt<bool> ClForceDynamicShadow(
176     "asan-force-dynamic-shadow",
177     cl::desc("Load shadow address into a local variable for each function"),
178     cl::Hidden, cl::init(false));
179 
180 // This flag limits the number of instructions to be instrumented
181 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
182 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
183 // set it to 10000.
184 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
185     "asan-max-ins-per-bb", cl::init(10000),
186     cl::desc("maximal number of instructions to instrument in any given BB"),
187     cl::Hidden);
188 // This flag may need to be replaced with -f[no]asan-stack.
189 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
190                              cl::Hidden, cl::init(true));
191 static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
192     "asan-max-inline-poisoning-size",
193     cl::desc(
194         "Inline shadow poisoning for blocks up to the given size in bytes."),
195     cl::Hidden, cl::init(64));
196 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
197                                       cl::desc("Check stack-use-after-return"),
198                                       cl::Hidden, cl::init(true));
199 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
200                                         cl::desc("Create redzones for byval "
201                                                  "arguments (extra copy "
202                                                  "required)"), cl::Hidden,
203                                         cl::init(true));
204 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
205                                      cl::desc("Check stack-use-after-scope"),
206                                      cl::Hidden, cl::init(false));
207 // This flag may need to be replaced with -f[no]asan-globals.
208 static cl::opt<bool> ClGlobals("asan-globals",
209                                cl::desc("Handle global objects"), cl::Hidden,
210                                cl::init(true));
211 static cl::opt<bool> ClInitializers("asan-initialization-order",
212                                     cl::desc("Handle C++ initializer order"),
213                                     cl::Hidden, cl::init(true));
214 static cl::opt<bool> ClInvalidPointerPairs(
215     "asan-detect-invalid-pointer-pair",
216     cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
217     cl::init(false));
218 static cl::opt<unsigned> ClRealignStack(
219     "asan-realign-stack",
220     cl::desc("Realign stack to the value of this flag (power of two)"),
221     cl::Hidden, cl::init(32));
222 static cl::opt<int> ClInstrumentationWithCallsThreshold(
223     "asan-instrumentation-with-call-threshold",
224     cl::desc(
225         "If the function being instrumented contains more than "
226         "this number of memory accesses, use callbacks instead of "
227         "inline checks (-1 means never use callbacks)."),
228     cl::Hidden, cl::init(7000));
229 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
230     "asan-memory-access-callback-prefix",
231     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
232     cl::init("__asan_"));
233 static cl::opt<bool>
234     ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
235                                cl::desc("instrument dynamic allocas"),
236                                cl::Hidden, cl::init(true));
237 static cl::opt<bool> ClSkipPromotableAllocas(
238     "asan-skip-promotable-allocas",
239     cl::desc("Do not instrument promotable allocas"), cl::Hidden,
240     cl::init(true));
241 
242 // These flags allow to change the shadow mapping.
243 // The shadow mapping looks like
244 //    Shadow = (Mem >> scale) + offset
245 static cl::opt<int> ClMappingScale("asan-mapping-scale",
246                                    cl::desc("scale of asan shadow mapping"),
247                                    cl::Hidden, cl::init(0));
248 static cl::opt<unsigned long long> ClMappingOffset(
249     "asan-mapping-offset",
250     cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden,
251     cl::init(0));
252 
253 // Optimization flags. Not user visible, used mostly for testing
254 // and benchmarking the tool.
255 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
256                            cl::Hidden, cl::init(true));
257 static cl::opt<bool> ClOptSameTemp(
258     "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
259     cl::Hidden, cl::init(true));
260 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
261                                   cl::desc("Don't instrument scalar globals"),
262                                   cl::Hidden, cl::init(true));
263 static cl::opt<bool> ClOptStack(
264     "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
265     cl::Hidden, cl::init(false));
266 
267 static cl::opt<bool> ClDynamicAllocaStack(
268     "asan-stack-dynamic-alloca",
269     cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
270     cl::init(true));
271 
272 static cl::opt<uint32_t> ClForceExperiment(
273     "asan-force-experiment",
274     cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
275     cl::init(0));
276 
277 static cl::opt<bool>
278     ClUsePrivateAliasForGlobals("asan-use-private-alias",
279                                 cl::desc("Use private aliases for global"
280                                          " variables"),
281                                 cl::Hidden, cl::init(false));
282 
283 static cl::opt<bool>
284     ClUseGlobalsGC("asan-globals-live-support",
285                    cl::desc("Use linker features to support dead "
286                             "code stripping of globals"),
287                    cl::Hidden, cl::init(true));
288 
289 // This is on by default even though there is a bug in gold:
290 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
291 static cl::opt<bool>
292     ClWithComdat("asan-with-comdat",
293                  cl::desc("Place ASan constructors in comdat sections"),
294                  cl::Hidden, cl::init(true));
295 
296 // Debug flags.
297 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
298                             cl::init(0));
299 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
300                                  cl::Hidden, cl::init(0));
301 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
302                                         cl::desc("Debug func"));
303 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
304                                cl::Hidden, cl::init(-1));
305 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
306                                cl::Hidden, cl::init(-1));
307 
308 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
309 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
310 STATISTIC(NumOptimizedAccessesToGlobalVar,
311           "Number of optimized accesses to global vars");
312 STATISTIC(NumOptimizedAccessesToStackVar,
313           "Number of optimized accesses to stack vars");
314 
315 namespace {
316 /// Frontend-provided metadata for source location.
317 struct LocationMetadata {
318   StringRef Filename;
319   int LineNo;
320   int ColumnNo;
321 
322   LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
323 
324   bool empty() const { return Filename.empty(); }
325 
326   void parse(MDNode *MDN) {
327     assert(MDN->getNumOperands() == 3);
328     MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
329     Filename = DIFilename->getString();
330     LineNo =
331         mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
332     ColumnNo =
333         mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
334   }
335 };
336 
337 /// Frontend-provided metadata for global variables.
338 class GlobalsMetadata {
339  public:
340   struct Entry {
341     Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {}
342     LocationMetadata SourceLoc;
343     StringRef Name;
344     bool IsDynInit;
345     bool IsBlacklisted;
346   };
347 
348   GlobalsMetadata() : inited_(false) {}
349 
350   void reset() {
351     inited_ = false;
352     Entries.clear();
353   }
354 
355   void init(Module &M) {
356     assert(!inited_);
357     inited_ = true;
358     NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
359     if (!Globals) return;
360     for (auto MDN : Globals->operands()) {
361       // Metadata node contains the global and the fields of "Entry".
362       assert(MDN->getNumOperands() == 5);
363       auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
364       // The optimizer may optimize away a global entirely.
365       if (!GV) continue;
366       // We can already have an entry for GV if it was merged with another
367       // global.
368       Entry &E = Entries[GV];
369       if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
370         E.SourceLoc.parse(Loc);
371       if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
372         E.Name = Name->getString();
373       ConstantInt *IsDynInit =
374           mdconst::extract<ConstantInt>(MDN->getOperand(3));
375       E.IsDynInit |= IsDynInit->isOne();
376       ConstantInt *IsBlacklisted =
377           mdconst::extract<ConstantInt>(MDN->getOperand(4));
378       E.IsBlacklisted |= IsBlacklisted->isOne();
379     }
380   }
381 
382   /// Returns metadata entry for a given global.
383   Entry get(GlobalVariable *G) const {
384     auto Pos = Entries.find(G);
385     return (Pos != Entries.end()) ? Pos->second : Entry();
386   }
387 
388  private:
389   bool inited_;
390   DenseMap<GlobalVariable *, Entry> Entries;
391 };
392 
393 /// This struct defines the shadow mapping using the rule:
394 ///   shadow = (mem >> Scale) ADD-or-OR Offset.
395 struct ShadowMapping {
396   int Scale;
397   uint64_t Offset;
398   bool OrShadowOffset;
399 };
400 
401 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
402                                       bool IsKasan) {
403   bool IsAndroid = TargetTriple.isAndroid();
404   bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS();
405   bool IsFreeBSD = TargetTriple.isOSFreeBSD();
406   bool IsNetBSD = TargetTriple.isOSNetBSD();
407   bool IsPS4CPU = TargetTriple.isPS4CPU();
408   bool IsLinux = TargetTriple.isOSLinux();
409   bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
410                  TargetTriple.getArch() == llvm::Triple::ppc64le;
411   bool IsSystemZ = TargetTriple.getArch() == llvm::Triple::systemz;
412   bool IsX86 = TargetTriple.getArch() == llvm::Triple::x86;
413   bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
414   bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
415                   TargetTriple.getArch() == llvm::Triple::mipsel;
416   bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
417                   TargetTriple.getArch() == llvm::Triple::mips64el;
418   bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64;
419   bool IsWindows = TargetTriple.isOSWindows();
420   bool IsFuchsia = TargetTriple.isOSFuchsia();
421 
422   ShadowMapping Mapping;
423 
424   if (LongSize == 32) {
425     // Android is always PIE, which means that the beginning of the address
426     // space is always available.
427     if (IsAndroid)
428       Mapping.Offset = 0;
429     else if (IsMIPS32)
430       Mapping.Offset = kMIPS32_ShadowOffset32;
431     else if (IsFreeBSD)
432       Mapping.Offset = kFreeBSD_ShadowOffset32;
433     else if (IsIOS)
434       // If we're targeting iOS and x86, the binary is built for iOS simulator.
435       Mapping.Offset = IsX86 ? kIOSSimShadowOffset32 : kIOSShadowOffset32;
436     else if (IsWindows)
437       Mapping.Offset = kWindowsShadowOffset32;
438     else
439       Mapping.Offset = kDefaultShadowOffset32;
440   } else {  // LongSize == 64
441     // Fuchsia is always PIE, which means that the beginning of the address
442     // space is always available.
443     if (IsFuchsia)
444       Mapping.Offset = 0;
445     else if (IsPPC64)
446       Mapping.Offset = kPPC64_ShadowOffset64;
447     else if (IsSystemZ)
448       Mapping.Offset = kSystemZ_ShadowOffset64;
449     else if (IsFreeBSD)
450       Mapping.Offset = kFreeBSD_ShadowOffset64;
451     else if (IsNetBSD)
452       Mapping.Offset = kNetBSD_ShadowOffset64;
453     else if (IsPS4CPU)
454       Mapping.Offset = kPS4CPU_ShadowOffset64;
455     else if (IsLinux && IsX86_64) {
456       if (IsKasan)
457         Mapping.Offset = kLinuxKasan_ShadowOffset64;
458       else
459         Mapping.Offset = kSmallX86_64ShadowOffset;
460     } else if (IsWindows && IsX86_64) {
461       Mapping.Offset = kWindowsShadowOffset64;
462     } else if (IsMIPS64)
463       Mapping.Offset = kMIPS64_ShadowOffset64;
464     else if (IsIOS)
465       // If we're targeting iOS and x86, the binary is built for iOS simulator.
466       // We are using dynamic shadow offset on the 64-bit devices.
467       Mapping.Offset =
468         IsX86_64 ? kIOSSimShadowOffset64 : kDynamicShadowSentinel;
469     else if (IsAArch64)
470       Mapping.Offset = kAArch64_ShadowOffset64;
471     else
472       Mapping.Offset = kDefaultShadowOffset64;
473   }
474 
475   if (ClForceDynamicShadow) {
476     Mapping.Offset = kDynamicShadowSentinel;
477   }
478 
479   Mapping.Scale = kDefaultShadowScale;
480   if (ClMappingScale.getNumOccurrences() > 0) {
481     Mapping.Scale = ClMappingScale;
482   }
483 
484   if (ClMappingOffset.getNumOccurrences() > 0) {
485     Mapping.Offset = ClMappingOffset;
486   }
487 
488   // OR-ing shadow offset if more efficient (at least on x86) if the offset
489   // is a power of two, but on ppc64 we have to use add since the shadow
490   // offset is not necessary 1/8-th of the address space.  On SystemZ,
491   // we could OR the constant in a single instruction, but it's more
492   // efficient to load it once and use indexed addressing.
493   Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU &&
494                            !(Mapping.Offset & (Mapping.Offset - 1)) &&
495                            Mapping.Offset != kDynamicShadowSentinel;
496 
497   return Mapping;
498 }
499 
500 static size_t RedzoneSizeForScale(int MappingScale) {
501   // Redzone used for stack and globals is at least 32 bytes.
502   // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
503   return std::max(32U, 1U << MappingScale);
504 }
505 
506 /// AddressSanitizer: instrument the code in module to find memory bugs.
507 struct AddressSanitizer : public FunctionPass {
508   explicit AddressSanitizer(bool CompileKernel = false, bool Recover = false,
509                             bool UseAfterScope = false)
510       : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan),
511         Recover(Recover || ClRecover),
512         UseAfterScope(UseAfterScope || ClUseAfterScope),
513         LocalDynamicShadow(nullptr) {
514     initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
515   }
516   StringRef getPassName() const override {
517     return "AddressSanitizerFunctionPass";
518   }
519   void getAnalysisUsage(AnalysisUsage &AU) const override {
520     AU.addRequired<DominatorTreeWrapperPass>();
521     AU.addRequired<TargetLibraryInfoWrapperPass>();
522   }
523   uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
524     uint64_t ArraySize = 1;
525     if (AI.isArrayAllocation()) {
526       const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
527       assert(CI && "non-constant array size");
528       ArraySize = CI->getZExtValue();
529     }
530     Type *Ty = AI.getAllocatedType();
531     uint64_t SizeInBytes =
532         AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
533     return SizeInBytes * ArraySize;
534   }
535   /// Check if we want (and can) handle this alloca.
536   bool isInterestingAlloca(const AllocaInst &AI);
537 
538   /// If it is an interesting memory access, return the PointerOperand
539   /// and set IsWrite/Alignment. Otherwise return nullptr.
540   /// MaybeMask is an output parameter for the mask Value, if we're looking at a
541   /// masked load/store.
542   Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
543                                    uint64_t *TypeSize, unsigned *Alignment,
544                                    Value **MaybeMask = nullptr);
545   void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
546                      bool UseCalls, const DataLayout &DL);
547   void instrumentPointerComparisonOrSubtraction(Instruction *I);
548   void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
549                          Value *Addr, uint32_t TypeSize, bool IsWrite,
550                          Value *SizeArgument, bool UseCalls, uint32_t Exp);
551   void instrumentUnusualSizeOrAlignment(Instruction *I,
552                                         Instruction *InsertBefore, Value *Addr,
553                                         uint32_t TypeSize, bool IsWrite,
554                                         Value *SizeArgument, bool UseCalls,
555                                         uint32_t Exp);
556   Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
557                            Value *ShadowValue, uint32_t TypeSize);
558   Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
559                                  bool IsWrite, size_t AccessSizeIndex,
560                                  Value *SizeArgument, uint32_t Exp);
561   void instrumentMemIntrinsic(MemIntrinsic *MI);
562   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
563   bool runOnFunction(Function &F) override;
564   bool maybeInsertAsanInitAtFunctionEntry(Function &F);
565   void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
566   void markEscapedLocalAllocas(Function &F);
567   bool doInitialization(Module &M) override;
568   bool doFinalization(Module &M) override;
569   static char ID;  // Pass identification, replacement for typeid
570 
571   DominatorTree &getDominatorTree() const { return *DT; }
572 
573  private:
574   void initializeCallbacks(Module &M);
575 
576   bool LooksLikeCodeInBug11395(Instruction *I);
577   bool GlobalIsLinkerInitialized(GlobalVariable *G);
578   bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
579                     uint64_t TypeSize) const;
580 
581   /// Helper to cleanup per-function state.
582   struct FunctionStateRAII {
583     AddressSanitizer *Pass;
584     FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
585       assert(Pass->ProcessedAllocas.empty() &&
586              "last pass forgot to clear cache");
587       assert(!Pass->LocalDynamicShadow);
588     }
589     ~FunctionStateRAII() {
590       Pass->LocalDynamicShadow = nullptr;
591       Pass->ProcessedAllocas.clear();
592     }
593   };
594 
595   LLVMContext *C;
596   Triple TargetTriple;
597   int LongSize;
598   bool CompileKernel;
599   bool Recover;
600   bool UseAfterScope;
601   Type *IntptrTy;
602   ShadowMapping Mapping;
603   DominatorTree *DT;
604   Function *AsanHandleNoReturnFunc;
605   Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
606   // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
607   Function *AsanErrorCallback[2][2][kNumberOfAccessSizes];
608   Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
609   // This array is indexed by AccessIsWrite and Experiment.
610   Function *AsanErrorCallbackSized[2][2];
611   Function *AsanMemoryAccessCallbackSized[2][2];
612   Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
613   InlineAsm *EmptyAsm;
614   Value *LocalDynamicShadow;
615   GlobalsMetadata GlobalsMD;
616   DenseMap<const AllocaInst *, bool> ProcessedAllocas;
617 
618   friend struct FunctionStackPoisoner;
619 };
620 
621 class AddressSanitizerModule : public ModulePass {
622 public:
623   explicit AddressSanitizerModule(bool CompileKernel = false,
624                                   bool Recover = false,
625                                   bool UseGlobalsGC = true)
626       : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan),
627         Recover(Recover || ClRecover),
628         UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC),
629         // Not a typo: ClWithComdat is almost completely pointless without
630         // ClUseGlobalsGC (because then it only works on modules without
631         // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
632         // and both suffer from gold PR19002 for which UseGlobalsGC constructor
633         // argument is designed as workaround. Therefore, disable both
634         // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
635         // do globals-gc.
636         UseCtorComdat(UseGlobalsGC && ClWithComdat) {}
637   bool runOnModule(Module &M) override;
638   static char ID; // Pass identification, replacement for typeid
639   StringRef getPassName() const override { return "AddressSanitizerModule"; }
640 
641 private:
642   void initializeCallbacks(Module &M);
643 
644   bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
645   void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
646                              ArrayRef<GlobalVariable *> ExtendedGlobals,
647                              ArrayRef<Constant *> MetadataInitializers);
648   void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
649                             ArrayRef<GlobalVariable *> ExtendedGlobals,
650                             ArrayRef<Constant *> MetadataInitializers,
651                             const std::string &UniqueModuleId);
652   void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
653                               ArrayRef<GlobalVariable *> ExtendedGlobals,
654                               ArrayRef<Constant *> MetadataInitializers);
655   void
656   InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
657                                      ArrayRef<GlobalVariable *> ExtendedGlobals,
658                                      ArrayRef<Constant *> MetadataInitializers);
659 
660   GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
661                                        StringRef OriginalName);
662   void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
663                                   StringRef InternalSuffix);
664   IRBuilder<> CreateAsanModuleDtor(Module &M);
665 
666   bool ShouldInstrumentGlobal(GlobalVariable *G);
667   bool ShouldUseMachOGlobalsSection() const;
668   StringRef getGlobalMetadataSection() const;
669   void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
670   void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
671   size_t MinRedzoneSizeForGlobal() const {
672     return RedzoneSizeForScale(Mapping.Scale);
673   }
674 
675   GlobalsMetadata GlobalsMD;
676   bool CompileKernel;
677   bool Recover;
678   bool UseGlobalsGC;
679   bool UseCtorComdat;
680   Type *IntptrTy;
681   LLVMContext *C;
682   Triple TargetTriple;
683   ShadowMapping Mapping;
684   Function *AsanPoisonGlobals;
685   Function *AsanUnpoisonGlobals;
686   Function *AsanRegisterGlobals;
687   Function *AsanUnregisterGlobals;
688   Function *AsanRegisterImageGlobals;
689   Function *AsanUnregisterImageGlobals;
690   Function *AsanRegisterElfGlobals;
691   Function *AsanUnregisterElfGlobals;
692 
693   Function *AsanCtorFunction = nullptr;
694   Function *AsanDtorFunction = nullptr;
695 };
696 
697 // Stack poisoning does not play well with exception handling.
698 // When an exception is thrown, we essentially bypass the code
699 // that unpoisones the stack. This is why the run-time library has
700 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
701 // stack in the interceptor. This however does not work inside the
702 // actual function which catches the exception. Most likely because the
703 // compiler hoists the load of the shadow value somewhere too high.
704 // This causes asan to report a non-existing bug on 453.povray.
705 // It sounds like an LLVM bug.
706 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
707   Function &F;
708   AddressSanitizer &ASan;
709   DIBuilder DIB;
710   LLVMContext *C;
711   Type *IntptrTy;
712   Type *IntptrPtrTy;
713   ShadowMapping Mapping;
714 
715   SmallVector<AllocaInst *, 16> AllocaVec;
716   SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
717   SmallVector<Instruction *, 8> RetVec;
718   unsigned StackAlignment;
719 
720   Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
721       *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
722   Function *AsanSetShadowFunc[0x100] = {};
723   Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
724   Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc;
725 
726   // Stores a place and arguments of poisoning/unpoisoning call for alloca.
727   struct AllocaPoisonCall {
728     IntrinsicInst *InsBefore;
729     AllocaInst *AI;
730     uint64_t Size;
731     bool DoPoison;
732   };
733   SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
734   SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
735 
736   SmallVector<AllocaInst *, 1> DynamicAllocaVec;
737   SmallVector<IntrinsicInst *, 1> StackRestoreVec;
738   AllocaInst *DynamicAllocaLayout = nullptr;
739   IntrinsicInst *LocalEscapeCall = nullptr;
740 
741   // Maps Value to an AllocaInst from which the Value is originated.
742   typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
743   AllocaForValueMapTy AllocaForValue;
744 
745   bool HasNonEmptyInlineAsm = false;
746   bool HasReturnsTwiceCall = false;
747   std::unique_ptr<CallInst> EmptyInlineAsm;
748 
749   FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
750       : F(F),
751         ASan(ASan),
752         DIB(*F.getParent(), /*AllowUnresolved*/ false),
753         C(ASan.C),
754         IntptrTy(ASan.IntptrTy),
755         IntptrPtrTy(PointerType::get(IntptrTy, 0)),
756         Mapping(ASan.Mapping),
757         StackAlignment(1 << Mapping.Scale),
758         EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
759 
760   bool runOnFunction() {
761     if (!ClStack) return false;
762 
763     if (ClRedzoneByvalArgs)
764       copyArgsPassedByValToAllocas();
765 
766     // Collect alloca, ret, lifetime instructions etc.
767     for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
768 
769     if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
770 
771     initializeCallbacks(*F.getParent());
772 
773     processDynamicAllocas();
774     processStaticAllocas();
775 
776     if (ClDebugStack) {
777       DEBUG(dbgs() << F);
778     }
779     return true;
780   }
781 
782   // Arguments marked with the "byval" attribute are implicitly copied without
783   // using an alloca instruction.  To produce redzones for those arguments, we
784   // copy them a second time into memory allocated with an alloca instruction.
785   void copyArgsPassedByValToAllocas();
786 
787   // Finds all Alloca instructions and puts
788   // poisoned red zones around all of them.
789   // Then unpoison everything back before the function returns.
790   void processStaticAllocas();
791   void processDynamicAllocas();
792 
793   void createDynamicAllocasInitStorage();
794 
795   // ----------------------- Visitors.
796   /// \brief Collect all Ret instructions.
797   void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
798 
799   /// \brief Collect all Resume instructions.
800   void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
801 
802   /// \brief Collect all CatchReturnInst instructions.
803   void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
804 
805   void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
806                                         Value *SavedStack) {
807     IRBuilder<> IRB(InstBefore);
808     Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
809     // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
810     // need to adjust extracted SP to compute the address of the most recent
811     // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
812     // this purpose.
813     if (!isa<ReturnInst>(InstBefore)) {
814       Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
815           InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
816           {IntptrTy});
817 
818       Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
819 
820       DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
821                                      DynamicAreaOffset);
822     }
823 
824     IRB.CreateCall(AsanAllocasUnpoisonFunc,
825                    {IRB.CreateLoad(DynamicAllocaLayout), DynamicAreaPtr});
826   }
827 
828   // Unpoison dynamic allocas redzones.
829   void unpoisonDynamicAllocas() {
830     for (auto &Ret : RetVec)
831       unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
832 
833     for (auto &StackRestoreInst : StackRestoreVec)
834       unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
835                                        StackRestoreInst->getOperand(0));
836   }
837 
838   // Deploy and poison redzones around dynamic alloca call. To do this, we
839   // should replace this call with another one with changed parameters and
840   // replace all its uses with new address, so
841   //   addr = alloca type, old_size, align
842   // is replaced by
843   //   new_size = (old_size + additional_size) * sizeof(type)
844   //   tmp = alloca i8, new_size, max(align, 32)
845   //   addr = tmp + 32 (first 32 bytes are for the left redzone).
846   // Additional_size is added to make new memory allocation contain not only
847   // requested memory, but also left, partial and right redzones.
848   void handleDynamicAllocaCall(AllocaInst *AI);
849 
850   /// \brief Collect Alloca instructions we want (and can) handle.
851   void visitAllocaInst(AllocaInst &AI) {
852     if (!ASan.isInterestingAlloca(AI)) {
853       if (AI.isStaticAlloca()) {
854         // Skip over allocas that are present *before* the first instrumented
855         // alloca, we don't want to move those around.
856         if (AllocaVec.empty())
857           return;
858 
859         StaticAllocasToMoveUp.push_back(&AI);
860       }
861       return;
862     }
863 
864     StackAlignment = std::max(StackAlignment, AI.getAlignment());
865     if (!AI.isStaticAlloca())
866       DynamicAllocaVec.push_back(&AI);
867     else
868       AllocaVec.push_back(&AI);
869   }
870 
871   /// \brief Collect lifetime intrinsic calls to check for use-after-scope
872   /// errors.
873   void visitIntrinsicInst(IntrinsicInst &II) {
874     Intrinsic::ID ID = II.getIntrinsicID();
875     if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
876     if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
877     if (!ASan.UseAfterScope)
878       return;
879     if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end)
880       return;
881     // Found lifetime intrinsic, add ASan instrumentation if necessary.
882     ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
883     // If size argument is undefined, don't do anything.
884     if (Size->isMinusOne()) return;
885     // Check that size doesn't saturate uint64_t and can
886     // be stored in IntptrTy.
887     const uint64_t SizeValue = Size->getValue().getLimitedValue();
888     if (SizeValue == ~0ULL ||
889         !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
890       return;
891     // Find alloca instruction that corresponds to llvm.lifetime argument.
892     AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
893     if (!AI || !ASan.isInterestingAlloca(*AI))
894       return;
895     bool DoPoison = (ID == Intrinsic::lifetime_end);
896     AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
897     if (AI->isStaticAlloca())
898       StaticAllocaPoisonCallVec.push_back(APC);
899     else if (ClInstrumentDynamicAllocas)
900       DynamicAllocaPoisonCallVec.push_back(APC);
901   }
902 
903   void visitCallSite(CallSite CS) {
904     Instruction *I = CS.getInstruction();
905     if (CallInst *CI = dyn_cast<CallInst>(I)) {
906       HasNonEmptyInlineAsm |=
907           CI->isInlineAsm() && !CI->isIdenticalTo(EmptyInlineAsm.get());
908       HasReturnsTwiceCall |= CI->canReturnTwice();
909     }
910   }
911 
912   // ---------------------- Helpers.
913   void initializeCallbacks(Module &M);
914 
915   bool doesDominateAllExits(const Instruction *I) const {
916     for (auto Ret : RetVec) {
917       if (!ASan.getDominatorTree().dominates(I, Ret)) return false;
918     }
919     return true;
920   }
921 
922   /// Finds alloca where the value comes from.
923   AllocaInst *findAllocaForValue(Value *V);
924 
925   // Copies bytes from ShadowBytes into shadow memory for indexes where
926   // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
927   // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
928   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
929                     IRBuilder<> &IRB, Value *ShadowBase);
930   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
931                     size_t Begin, size_t End, IRBuilder<> &IRB,
932                     Value *ShadowBase);
933   void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
934                           ArrayRef<uint8_t> ShadowBytes, size_t Begin,
935                           size_t End, IRBuilder<> &IRB, Value *ShadowBase);
936 
937   void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
938 
939   Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
940                                bool Dynamic);
941   PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
942                      Instruction *ThenTerm, Value *ValueIfFalse);
943 };
944 
945 } // anonymous namespace
946 
947 char AddressSanitizer::ID = 0;
948 INITIALIZE_PASS_BEGIN(
949     AddressSanitizer, "asan",
950     "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
951     false)
952 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
953 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
954 INITIALIZE_PASS_END(
955     AddressSanitizer, "asan",
956     "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
957     false)
958 FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel,
959                                                        bool Recover,
960                                                        bool UseAfterScope) {
961   assert(!CompileKernel || Recover);
962   return new AddressSanitizer(CompileKernel, Recover, UseAfterScope);
963 }
964 
965 char AddressSanitizerModule::ID = 0;
966 INITIALIZE_PASS(
967     AddressSanitizerModule, "asan-module",
968     "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
969     "ModulePass",
970     false, false)
971 ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel,
972                                                    bool Recover,
973                                                    bool UseGlobalsGC) {
974   assert(!CompileKernel || Recover);
975   return new AddressSanitizerModule(CompileKernel, Recover, UseGlobalsGC);
976 }
977 
978 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
979   size_t Res = countTrailingZeros(TypeSize / 8);
980   assert(Res < kNumberOfAccessSizes);
981   return Res;
982 }
983 
984 // \brief Create a constant for Str so that we can pass it to the run-time lib.
985 static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
986                                                     bool AllowMerging) {
987   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
988   // We use private linkage for module-local strings. If they can be merged
989   // with another one, we set the unnamed_addr attribute.
990   GlobalVariable *GV =
991       new GlobalVariable(M, StrConst->getType(), true,
992                          GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
993   if (AllowMerging) GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
994   GV->setAlignment(1);  // Strings may not be merged w/o setting align 1.
995   return GV;
996 }
997 
998 /// \brief Create a global describing a source location.
999 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
1000                                                        LocationMetadata MD) {
1001   Constant *LocData[] = {
1002       createPrivateGlobalForString(M, MD.Filename, true),
1003       ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
1004       ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
1005   };
1006   auto LocStruct = ConstantStruct::getAnon(LocData);
1007   auto GV = new GlobalVariable(M, LocStruct->getType(), true,
1008                                GlobalValue::PrivateLinkage, LocStruct,
1009                                kAsanGenPrefix);
1010   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1011   return GV;
1012 }
1013 
1014 /// \brief Check if \p G has been created by a trusted compiler pass.
1015 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
1016   // Do not instrument asan globals.
1017   if (G->getName().startswith(kAsanGenPrefix) ||
1018       G->getName().startswith(kSanCovGenPrefix) ||
1019       G->getName().startswith(kODRGenPrefix))
1020     return true;
1021 
1022   // Do not instrument gcov counter arrays.
1023   if (G->getName() == "__llvm_gcov_ctr")
1024     return true;
1025 
1026   return false;
1027 }
1028 
1029 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1030   // Shadow >> scale
1031   Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1032   if (Mapping.Offset == 0) return Shadow;
1033   // (Shadow >> scale) | offset
1034   Value *ShadowBase;
1035   if (LocalDynamicShadow)
1036     ShadowBase = LocalDynamicShadow;
1037   else
1038     ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1039   if (Mapping.OrShadowOffset)
1040     return IRB.CreateOr(Shadow, ShadowBase);
1041   else
1042     return IRB.CreateAdd(Shadow, ShadowBase);
1043 }
1044 
1045 // Instrument memset/memmove/memcpy
1046 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1047   IRBuilder<> IRB(MI);
1048   if (isa<MemTransferInst>(MI)) {
1049     IRB.CreateCall(
1050         isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1051         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1052          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1053          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1054   } else if (isa<MemSetInst>(MI)) {
1055     IRB.CreateCall(
1056         AsanMemset,
1057         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1058          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1059          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1060   }
1061   MI->eraseFromParent();
1062 }
1063 
1064 /// Check if we want (and can) handle this alloca.
1065 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1066   auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1067 
1068   if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1069     return PreviouslySeenAllocaInfo->getSecond();
1070 
1071   bool IsInteresting =
1072       (AI.getAllocatedType()->isSized() &&
1073        // alloca() may be called with 0 size, ignore it.
1074        ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) &&
1075        // We are only interested in allocas not promotable to registers.
1076        // Promotable allocas are common under -O0.
1077        (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
1078        // inalloca allocas are not treated as static, and we don't want
1079        // dynamic alloca instrumentation for them as well.
1080        !AI.isUsedWithInAlloca() &&
1081        // swifterror allocas are register promoted by ISel
1082        !AI.isSwiftError());
1083 
1084   ProcessedAllocas[&AI] = IsInteresting;
1085   return IsInteresting;
1086 }
1087 
1088 Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
1089                                                    bool *IsWrite,
1090                                                    uint64_t *TypeSize,
1091                                                    unsigned *Alignment,
1092                                                    Value **MaybeMask) {
1093   // Skip memory accesses inserted by another instrumentation.
1094   if (I->getMetadata("nosanitize")) return nullptr;
1095 
1096   // Do not instrument the load fetching the dynamic shadow address.
1097   if (LocalDynamicShadow == I)
1098     return nullptr;
1099 
1100   Value *PtrOperand = nullptr;
1101   const DataLayout &DL = I->getModule()->getDataLayout();
1102   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1103     if (!ClInstrumentReads) return nullptr;
1104     *IsWrite = false;
1105     *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
1106     *Alignment = LI->getAlignment();
1107     PtrOperand = LI->getPointerOperand();
1108   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1109     if (!ClInstrumentWrites) return nullptr;
1110     *IsWrite = true;
1111     *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
1112     *Alignment = SI->getAlignment();
1113     PtrOperand = SI->getPointerOperand();
1114   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1115     if (!ClInstrumentAtomics) return nullptr;
1116     *IsWrite = true;
1117     *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
1118     *Alignment = 0;
1119     PtrOperand = RMW->getPointerOperand();
1120   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1121     if (!ClInstrumentAtomics) return nullptr;
1122     *IsWrite = true;
1123     *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
1124     *Alignment = 0;
1125     PtrOperand = XCHG->getPointerOperand();
1126   } else if (auto CI = dyn_cast<CallInst>(I)) {
1127     auto *F = dyn_cast<Function>(CI->getCalledValue());
1128     if (F && (F->getName().startswith("llvm.masked.load.") ||
1129               F->getName().startswith("llvm.masked.store."))) {
1130       unsigned OpOffset = 0;
1131       if (F->getName().startswith("llvm.masked.store.")) {
1132         if (!ClInstrumentWrites)
1133           return nullptr;
1134         // Masked store has an initial operand for the value.
1135         OpOffset = 1;
1136         *IsWrite = true;
1137       } else {
1138         if (!ClInstrumentReads)
1139           return nullptr;
1140         *IsWrite = false;
1141       }
1142 
1143       auto BasePtr = CI->getOperand(0 + OpOffset);
1144       auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
1145       *TypeSize = DL.getTypeStoreSizeInBits(Ty);
1146       if (auto AlignmentConstant =
1147               dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1148         *Alignment = (unsigned)AlignmentConstant->getZExtValue();
1149       else
1150         *Alignment = 1; // No alignment guarantees. We probably got Undef
1151       if (MaybeMask)
1152         *MaybeMask = CI->getOperand(2 + OpOffset);
1153       PtrOperand = BasePtr;
1154     }
1155   }
1156 
1157   if (PtrOperand) {
1158     // Do not instrument acesses from different address spaces; we cannot deal
1159     // with them.
1160     Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
1161     if (PtrTy->getPointerAddressSpace() != 0)
1162       return nullptr;
1163 
1164     // Ignore swifterror addresses.
1165     // swifterror memory addresses are mem2reg promoted by instruction
1166     // selection. As such they cannot have regular uses like an instrumentation
1167     // function and it makes no sense to track them as memory.
1168     if (PtrOperand->isSwiftError())
1169       return nullptr;
1170   }
1171 
1172   // Treat memory accesses to promotable allocas as non-interesting since they
1173   // will not cause memory violations. This greatly speeds up the instrumented
1174   // executable at -O0.
1175   if (ClSkipPromotableAllocas)
1176     if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
1177       return isInterestingAlloca(*AI) ? AI : nullptr;
1178 
1179   return PtrOperand;
1180 }
1181 
1182 static bool isPointerOperand(Value *V) {
1183   return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1184 }
1185 
1186 // This is a rough heuristic; it may cause both false positives and
1187 // false negatives. The proper implementation requires cooperation with
1188 // the frontend.
1189 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
1190   if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1191     if (!Cmp->isRelational()) return false;
1192   } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1193     if (BO->getOpcode() != Instruction::Sub) return false;
1194   } else {
1195     return false;
1196   }
1197   return isPointerOperand(I->getOperand(0)) &&
1198          isPointerOperand(I->getOperand(1));
1199 }
1200 
1201 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1202   // If a global variable does not have dynamic initialization we don't
1203   // have to instrument it.  However, if a global does not have initializer
1204   // at all, we assume it has dynamic initializer (in other TU).
1205   return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
1206 }
1207 
1208 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1209     Instruction *I) {
1210   IRBuilder<> IRB(I);
1211   Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1212   Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1213   for (Value *&i : Param) {
1214     if (i->getType()->isPointerTy())
1215       i = IRB.CreatePointerCast(i, IntptrTy);
1216   }
1217   IRB.CreateCall(F, Param);
1218 }
1219 
1220 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1221                                 Instruction *InsertBefore, Value *Addr,
1222                                 unsigned Alignment, unsigned Granularity,
1223                                 uint32_t TypeSize, bool IsWrite,
1224                                 Value *SizeArgument, bool UseCalls,
1225                                 uint32_t Exp) {
1226   // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1227   // if the data is properly aligned.
1228   if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
1229        TypeSize == 128) &&
1230       (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
1231     return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
1232                                    nullptr, UseCalls, Exp);
1233   Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
1234                                          IsWrite, nullptr, UseCalls, Exp);
1235 }
1236 
1237 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
1238                                         const DataLayout &DL, Type *IntptrTy,
1239                                         Value *Mask, Instruction *I,
1240                                         Value *Addr, unsigned Alignment,
1241                                         unsigned Granularity, uint32_t TypeSize,
1242                                         bool IsWrite, Value *SizeArgument,
1243                                         bool UseCalls, uint32_t Exp) {
1244   auto *VTy = cast<PointerType>(Addr->getType())->getElementType();
1245   uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1246   unsigned Num = VTy->getVectorNumElements();
1247   auto Zero = ConstantInt::get(IntptrTy, 0);
1248   for (unsigned Idx = 0; Idx < Num; ++Idx) {
1249     Value *InstrumentedAddress = nullptr;
1250     Instruction *InsertBefore = I;
1251     if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
1252       // dyn_cast as we might get UndefValue
1253       if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
1254         if (Masked->isZero())
1255           // Mask is constant false, so no instrumentation needed.
1256           continue;
1257         // If we have a true or undef value, fall through to doInstrumentAddress
1258         // with InsertBefore == I
1259       }
1260     } else {
1261       IRBuilder<> IRB(I);
1262       Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
1263       TerminatorInst *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
1264       InsertBefore = ThenTerm;
1265     }
1266 
1267     IRBuilder<> IRB(InsertBefore);
1268     InstrumentedAddress =
1269         IRB.CreateGEP(Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
1270     doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
1271                         Granularity, ElemTypeSize, IsWrite, SizeArgument,
1272                         UseCalls, Exp);
1273   }
1274 }
1275 
1276 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1277                                      Instruction *I, bool UseCalls,
1278                                      const DataLayout &DL) {
1279   bool IsWrite = false;
1280   unsigned Alignment = 0;
1281   uint64_t TypeSize = 0;
1282   Value *MaybeMask = nullptr;
1283   Value *Addr =
1284       isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
1285   assert(Addr);
1286 
1287   // Optimization experiments.
1288   // The experiments can be used to evaluate potential optimizations that remove
1289   // instrumentation (assess false negatives). Instead of completely removing
1290   // some instrumentation, you set Exp to a non-zero value (mask of optimization
1291   // experiments that want to remove instrumentation of this instruction).
1292   // If Exp is non-zero, this pass will emit special calls into runtime
1293   // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1294   // make runtime terminate the program in a special way (with a different
1295   // exit status). Then you run the new compiler on a buggy corpus, collect
1296   // the special terminations (ideally, you don't see them at all -- no false
1297   // negatives) and make the decision on the optimization.
1298   uint32_t Exp = ClForceExperiment;
1299 
1300   if (ClOpt && ClOptGlobals) {
1301     // If initialization order checking is disabled, a simple access to a
1302     // dynamically initialized global is always valid.
1303     GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
1304     if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1305         isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
1306       NumOptimizedAccessesToGlobalVar++;
1307       return;
1308     }
1309   }
1310 
1311   if (ClOpt && ClOptStack) {
1312     // A direct inbounds access to a stack variable is always valid.
1313     if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
1314         isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
1315       NumOptimizedAccessesToStackVar++;
1316       return;
1317     }
1318   }
1319 
1320   if (IsWrite)
1321     NumInstrumentedWrites++;
1322   else
1323     NumInstrumentedReads++;
1324 
1325   unsigned Granularity = 1 << Mapping.Scale;
1326   if (MaybeMask) {
1327     instrumentMaskedLoadOrStore(this, DL, IntptrTy, MaybeMask, I, Addr,
1328                                 Alignment, Granularity, TypeSize, IsWrite,
1329                                 nullptr, UseCalls, Exp);
1330   } else {
1331     doInstrumentAddress(this, I, I, Addr, Alignment, Granularity, TypeSize,
1332                         IsWrite, nullptr, UseCalls, Exp);
1333   }
1334 }
1335 
1336 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1337                                                  Value *Addr, bool IsWrite,
1338                                                  size_t AccessSizeIndex,
1339                                                  Value *SizeArgument,
1340                                                  uint32_t Exp) {
1341   IRBuilder<> IRB(InsertBefore);
1342   Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1343   CallInst *Call = nullptr;
1344   if (SizeArgument) {
1345     if (Exp == 0)
1346       Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
1347                             {Addr, SizeArgument});
1348     else
1349       Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
1350                             {Addr, SizeArgument, ExpVal});
1351   } else {
1352     if (Exp == 0)
1353       Call =
1354           IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1355     else
1356       Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
1357                             {Addr, ExpVal});
1358   }
1359 
1360   // We don't do Call->setDoesNotReturn() because the BB already has
1361   // UnreachableInst at the end.
1362   // This EmptyAsm is required to avoid callback merge.
1363   IRB.CreateCall(EmptyAsm, {});
1364   return Call;
1365 }
1366 
1367 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1368                                            Value *ShadowValue,
1369                                            uint32_t TypeSize) {
1370   size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1371   // Addr & (Granularity - 1)
1372   Value *LastAccessedByte =
1373       IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1374   // (Addr & (Granularity - 1)) + size - 1
1375   if (TypeSize / 8 > 1)
1376     LastAccessedByte = IRB.CreateAdd(
1377         LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
1378   // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1379   LastAccessedByte =
1380       IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1381   // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1382   return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1383 }
1384 
1385 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1386                                          Instruction *InsertBefore, Value *Addr,
1387                                          uint32_t TypeSize, bool IsWrite,
1388                                          Value *SizeArgument, bool UseCalls,
1389                                          uint32_t Exp) {
1390   IRBuilder<> IRB(InsertBefore);
1391   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1392   size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1393 
1394   if (UseCalls) {
1395     if (Exp == 0)
1396       IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1397                      AddrLong);
1398     else
1399       IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1400                      {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1401     return;
1402   }
1403 
1404   Type *ShadowTy =
1405       IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1406   Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1407   Value *ShadowPtr = memToShadow(AddrLong, IRB);
1408   Value *CmpVal = Constant::getNullValue(ShadowTy);
1409   Value *ShadowValue =
1410       IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1411 
1412   Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
1413   size_t Granularity = 1ULL << Mapping.Scale;
1414   TerminatorInst *CrashTerm = nullptr;
1415 
1416   if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1417     // We use branch weights for the slow path check, to indicate that the slow
1418     // path is rarely taken. This seems to be the case for SPEC benchmarks.
1419     TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
1420         Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1421     assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1422     BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1423     IRB.SetInsertPoint(CheckTerm);
1424     Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1425     if (Recover) {
1426       CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1427     } else {
1428       BasicBlock *CrashBlock =
1429         BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1430       CrashTerm = new UnreachableInst(*C, CrashBlock);
1431       BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1432       ReplaceInstWithInst(CheckTerm, NewTerm);
1433     }
1434   } else {
1435     CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1436   }
1437 
1438   Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1439                                          AccessSizeIndex, SizeArgument, Exp);
1440   Crash->setDebugLoc(OrigIns->getDebugLoc());
1441 }
1442 
1443 // Instrument unusual size or unusual alignment.
1444 // We can not do it with a single check, so we do 1-byte check for the first
1445 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1446 // to report the actual access size.
1447 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1448     Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
1449     bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1450   IRBuilder<> IRB(InsertBefore);
1451   Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
1452   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1453   if (UseCalls) {
1454     if (Exp == 0)
1455       IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1456                      {AddrLong, Size});
1457     else
1458       IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1459                      {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1460   } else {
1461     Value *LastByte = IRB.CreateIntToPtr(
1462         IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
1463         Addr->getType());
1464     instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
1465     instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
1466   }
1467 }
1468 
1469 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
1470                                                   GlobalValue *ModuleName) {
1471   // Set up the arguments to our poison/unpoison functions.
1472   IRBuilder<> IRB(&GlobalInit.front(),
1473                   GlobalInit.front().getFirstInsertionPt());
1474 
1475   // Add a call to poison all external globals before the given function starts.
1476   Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1477   IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1478 
1479   // Add calls to unpoison all globals before each return instruction.
1480   for (auto &BB : GlobalInit.getBasicBlockList())
1481     if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1482       CallInst::Create(AsanUnpoisonGlobals, "", RI);
1483 }
1484 
1485 void AddressSanitizerModule::createInitializerPoisonCalls(
1486     Module &M, GlobalValue *ModuleName) {
1487   GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1488   if (!GV)
1489     return;
1490 
1491   ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1492   if (!CA)
1493     return;
1494 
1495   for (Use &OP : CA->operands()) {
1496     if (isa<ConstantAggregateZero>(OP)) continue;
1497     ConstantStruct *CS = cast<ConstantStruct>(OP);
1498 
1499     // Must have a function or null ptr.
1500     if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1501       if (F->getName() == kAsanModuleCtorName) continue;
1502       ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
1503       // Don't instrument CTORs that will run before asan.module_ctor.
1504       if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
1505       poisonOneInitializer(*F, ModuleName);
1506     }
1507   }
1508 }
1509 
1510 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
1511   Type *Ty = G->getValueType();
1512   DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1513 
1514   if (GlobalsMD.get(G).IsBlacklisted) return false;
1515   if (!Ty->isSized()) return false;
1516   if (!G->hasInitializer()) return false;
1517   if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
1518   // Touch only those globals that will not be defined in other modules.
1519   // Don't handle ODR linkage types and COMDATs since other modules may be built
1520   // without ASan.
1521   if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
1522       G->getLinkage() != GlobalVariable::PrivateLinkage &&
1523       G->getLinkage() != GlobalVariable::InternalLinkage)
1524     return false;
1525   if (G->hasComdat()) return false;
1526   // Two problems with thread-locals:
1527   //   - The address of the main thread's copy can't be computed at link-time.
1528   //   - Need to poison all copies, not just the main thread's one.
1529   if (G->isThreadLocal()) return false;
1530   // For now, just ignore this Global if the alignment is large.
1531   if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
1532 
1533   if (G->hasSection()) {
1534     StringRef Section = G->getSection();
1535 
1536     // Globals from llvm.metadata aren't emitted, do not instrument them.
1537     if (Section == "llvm.metadata") return false;
1538     // Do not instrument globals from special LLVM sections.
1539     if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false;
1540 
1541     // Do not instrument function pointers to initialization and termination
1542     // routines: dynamic linker will not properly handle redzones.
1543     if (Section.startswith(".preinit_array") ||
1544         Section.startswith(".init_array") ||
1545         Section.startswith(".fini_array")) {
1546       return false;
1547     }
1548 
1549     // Callbacks put into the CRT initializer/terminator sections
1550     // should not be instrumented.
1551     // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
1552     // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1553     if (Section.startswith(".CRT")) {
1554       DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
1555       return false;
1556     }
1557 
1558     if (TargetTriple.isOSBinFormatMachO()) {
1559       StringRef ParsedSegment, ParsedSection;
1560       unsigned TAA = 0, StubSize = 0;
1561       bool TAAParsed;
1562       std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
1563           Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
1564       assert(ErrorCode.empty() && "Invalid section specifier.");
1565 
1566       // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1567       // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1568       // them.
1569       if (ParsedSegment == "__OBJC" ||
1570           (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
1571         DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1572         return false;
1573       }
1574       // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
1575       // Constant CFString instances are compiled in the following way:
1576       //  -- the string buffer is emitted into
1577       //     __TEXT,__cstring,cstring_literals
1578       //  -- the constant NSConstantString structure referencing that buffer
1579       //     is placed into __DATA,__cfstring
1580       // Therefore there's no point in placing redzones into __DATA,__cfstring.
1581       // Moreover, it causes the linker to crash on OS X 10.7
1582       if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
1583         DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1584         return false;
1585       }
1586       // The linker merges the contents of cstring_literals and removes the
1587       // trailing zeroes.
1588       if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1589         DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1590         return false;
1591       }
1592     }
1593   }
1594 
1595   return true;
1596 }
1597 
1598 // On Mach-O platforms, we emit global metadata in a separate section of the
1599 // binary in order to allow the linker to properly dead strip. This is only
1600 // supported on recent versions of ld64.
1601 bool AddressSanitizerModule::ShouldUseMachOGlobalsSection() const {
1602   if (!TargetTriple.isOSBinFormatMachO())
1603     return false;
1604 
1605   if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
1606     return true;
1607   if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
1608     return true;
1609   if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
1610     return true;
1611 
1612   return false;
1613 }
1614 
1615 StringRef AddressSanitizerModule::getGlobalMetadataSection() const {
1616   switch (TargetTriple.getObjectFormat()) {
1617   case Triple::COFF:  return ".ASAN$GL";
1618   case Triple::ELF:   return "asan_globals";
1619   case Triple::MachO: return "__DATA,__asan_globals,regular";
1620   default: break;
1621   }
1622   llvm_unreachable("unsupported object format");
1623 }
1624 
1625 void AddressSanitizerModule::initializeCallbacks(Module &M) {
1626   IRBuilder<> IRB(*C);
1627 
1628   // Declare our poisoning and unpoisoning functions.
1629   AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1630       kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy));
1631   AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
1632   AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1633       kAsanUnpoisonGlobalsName, IRB.getVoidTy()));
1634   AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
1635 
1636   // Declare functions that register/unregister globals.
1637   AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1638       kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy));
1639   AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1640   AsanUnregisterGlobals = checkSanitizerInterfaceFunction(
1641       M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(),
1642                             IntptrTy, IntptrTy));
1643   AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1644 
1645   // Declare the functions that find globals in a shared object and then invoke
1646   // the (un)register function on them.
1647   AsanRegisterImageGlobals =
1648       checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1649           kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy));
1650   AsanRegisterImageGlobals->setLinkage(Function::ExternalLinkage);
1651 
1652   AsanUnregisterImageGlobals =
1653       checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1654           kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy));
1655   AsanUnregisterImageGlobals->setLinkage(Function::ExternalLinkage);
1656 
1657   AsanRegisterElfGlobals = checkSanitizerInterfaceFunction(
1658       M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
1659                             IntptrTy, IntptrTy, IntptrTy));
1660   AsanRegisterElfGlobals->setLinkage(Function::ExternalLinkage);
1661 
1662   AsanUnregisterElfGlobals = checkSanitizerInterfaceFunction(
1663       M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
1664                             IntptrTy, IntptrTy, IntptrTy));
1665   AsanUnregisterElfGlobals->setLinkage(Function::ExternalLinkage);
1666 }
1667 
1668 // Put the metadata and the instrumented global in the same group. This ensures
1669 // that the metadata is discarded if the instrumented global is discarded.
1670 void AddressSanitizerModule::SetComdatForGlobalMetadata(
1671     GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
1672   Module &M = *G->getParent();
1673   Comdat *C = G->getComdat();
1674   if (!C) {
1675     if (!G->hasName()) {
1676       // If G is unnamed, it must be internal. Give it an artificial name
1677       // so we can put it in a comdat.
1678       assert(G->hasLocalLinkage());
1679       G->setName(Twine(kAsanGenPrefix) + "_anon_global");
1680     }
1681 
1682     if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
1683       std::string Name = G->getName();
1684       Name += InternalSuffix;
1685       C = M.getOrInsertComdat(Name);
1686     } else {
1687       C = M.getOrInsertComdat(G->getName());
1688     }
1689 
1690     // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF.
1691     if (TargetTriple.isOSBinFormatCOFF())
1692       C->setSelectionKind(Comdat::NoDuplicates);
1693     G->setComdat(C);
1694   }
1695 
1696   assert(G->hasComdat());
1697   Metadata->setComdat(G->getComdat());
1698 }
1699 
1700 // Create a separate metadata global and put it in the appropriate ASan
1701 // global registration section.
1702 GlobalVariable *
1703 AddressSanitizerModule::CreateMetadataGlobal(Module &M, Constant *Initializer,
1704                                              StringRef OriginalName) {
1705   auto Linkage = TargetTriple.isOSBinFormatMachO()
1706                      ? GlobalVariable::InternalLinkage
1707                      : GlobalVariable::PrivateLinkage;
1708   GlobalVariable *Metadata = new GlobalVariable(
1709       M, Initializer->getType(), false, Linkage, Initializer,
1710       Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
1711   Metadata->setSection(getGlobalMetadataSection());
1712   return Metadata;
1713 }
1714 
1715 IRBuilder<> AddressSanitizerModule::CreateAsanModuleDtor(Module &M) {
1716   AsanDtorFunction =
1717       Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
1718                        GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1719   BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1720 
1721   return IRBuilder<>(ReturnInst::Create(*C, AsanDtorBB));
1722 }
1723 
1724 void AddressSanitizerModule::InstrumentGlobalsCOFF(
1725     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
1726     ArrayRef<Constant *> MetadataInitializers) {
1727   assert(ExtendedGlobals.size() == MetadataInitializers.size());
1728   auto &DL = M.getDataLayout();
1729 
1730   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
1731     Constant *Initializer = MetadataInitializers[i];
1732     GlobalVariable *G = ExtendedGlobals[i];
1733     GlobalVariable *Metadata =
1734         CreateMetadataGlobal(M, Initializer, G->getName());
1735 
1736     // The MSVC linker always inserts padding when linking incrementally. We
1737     // cope with that by aligning each struct to its size, which must be a power
1738     // of two.
1739     unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
1740     assert(isPowerOf2_32(SizeOfGlobalStruct) &&
1741            "global metadata will not be padded appropriately");
1742     Metadata->setAlignment(SizeOfGlobalStruct);
1743 
1744     SetComdatForGlobalMetadata(G, Metadata, "");
1745   }
1746 }
1747 
1748 void AddressSanitizerModule::InstrumentGlobalsELF(
1749     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
1750     ArrayRef<Constant *> MetadataInitializers,
1751     const std::string &UniqueModuleId) {
1752   assert(ExtendedGlobals.size() == MetadataInitializers.size());
1753 
1754   SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
1755   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
1756     GlobalVariable *G = ExtendedGlobals[i];
1757     GlobalVariable *Metadata =
1758         CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
1759     MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
1760     Metadata->setMetadata(LLVMContext::MD_associated, MD);
1761     MetadataGlobals[i] = Metadata;
1762 
1763     SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
1764   }
1765 
1766   // Update llvm.compiler.used, adding the new metadata globals. This is
1767   // needed so that during LTO these variables stay alive.
1768   if (!MetadataGlobals.empty())
1769     appendToCompilerUsed(M, MetadataGlobals);
1770 
1771   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
1772   // to look up the loaded image that contains it. Second, we can store in it
1773   // whether registration has already occurred, to prevent duplicate
1774   // registration.
1775   //
1776   // Common linkage ensures that there is only one global per shared library.
1777   GlobalVariable *RegisteredFlag = new GlobalVariable(
1778       M, IntptrTy, false, GlobalVariable::CommonLinkage,
1779       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
1780   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
1781 
1782   // Create start and stop symbols.
1783   GlobalVariable *StartELFMetadata = new GlobalVariable(
1784       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
1785       "__start_" + getGlobalMetadataSection());
1786   StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
1787   GlobalVariable *StopELFMetadata = new GlobalVariable(
1788       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
1789       "__stop_" + getGlobalMetadataSection());
1790   StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
1791 
1792   // Create a call to register the globals with the runtime.
1793   IRB.CreateCall(AsanRegisterElfGlobals,
1794                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
1795                   IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
1796                   IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
1797 
1798   // We also need to unregister globals at the end, e.g., when a shared library
1799   // gets closed.
1800   IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
1801   IRB_Dtor.CreateCall(AsanUnregisterElfGlobals,
1802                       {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
1803                        IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
1804                        IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
1805 }
1806 
1807 void AddressSanitizerModule::InstrumentGlobalsMachO(
1808     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
1809     ArrayRef<Constant *> MetadataInitializers) {
1810   assert(ExtendedGlobals.size() == MetadataInitializers.size());
1811 
1812   // On recent Mach-O platforms, use a structure which binds the liveness of
1813   // the global variable to the metadata struct. Keep the list of "Liveness" GV
1814   // created to be added to llvm.compiler.used
1815   StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
1816   SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
1817 
1818   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
1819     Constant *Initializer = MetadataInitializers[i];
1820     GlobalVariable *G = ExtendedGlobals[i];
1821     GlobalVariable *Metadata =
1822         CreateMetadataGlobal(M, Initializer, G->getName());
1823 
1824     // On recent Mach-O platforms, we emit the global metadata in a way that
1825     // allows the linker to properly strip dead globals.
1826     auto LivenessBinder =
1827         ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
1828                             ConstantExpr::getPointerCast(Metadata, IntptrTy));
1829     GlobalVariable *Liveness = new GlobalVariable(
1830         M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
1831         Twine("__asan_binder_") + G->getName());
1832     Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
1833     LivenessGlobals[i] = Liveness;
1834   }
1835 
1836   // Update llvm.compiler.used, adding the new liveness globals. This is
1837   // needed so that during LTO these variables stay alive. The alternative
1838   // would be to have the linker handling the LTO symbols, but libLTO
1839   // current API does not expose access to the section for each symbol.
1840   if (!LivenessGlobals.empty())
1841     appendToCompilerUsed(M, LivenessGlobals);
1842 
1843   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
1844   // to look up the loaded image that contains it. Second, we can store in it
1845   // whether registration has already occurred, to prevent duplicate
1846   // registration.
1847   //
1848   // common linkage ensures that there is only one global per shared library.
1849   GlobalVariable *RegisteredFlag = new GlobalVariable(
1850       M, IntptrTy, false, GlobalVariable::CommonLinkage,
1851       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
1852   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
1853 
1854   IRB.CreateCall(AsanRegisterImageGlobals,
1855                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
1856 
1857   // We also need to unregister globals at the end, e.g., when a shared library
1858   // gets closed.
1859   IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
1860   IRB_Dtor.CreateCall(AsanUnregisterImageGlobals,
1861                       {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
1862 }
1863 
1864 void AddressSanitizerModule::InstrumentGlobalsWithMetadataArray(
1865     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
1866     ArrayRef<Constant *> MetadataInitializers) {
1867   assert(ExtendedGlobals.size() == MetadataInitializers.size());
1868   unsigned N = ExtendedGlobals.size();
1869   assert(N > 0);
1870 
1871   // On platforms that don't have a custom metadata section, we emit an array
1872   // of global metadata structures.
1873   ArrayType *ArrayOfGlobalStructTy =
1874       ArrayType::get(MetadataInitializers[0]->getType(), N);
1875   auto AllGlobals = new GlobalVariable(
1876       M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1877       ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
1878 
1879   IRB.CreateCall(AsanRegisterGlobals,
1880                  {IRB.CreatePointerCast(AllGlobals, IntptrTy),
1881                   ConstantInt::get(IntptrTy, N)});
1882 
1883   // We also need to unregister globals at the end, e.g., when a shared library
1884   // gets closed.
1885   IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
1886   IRB_Dtor.CreateCall(AsanUnregisterGlobals,
1887                       {IRB.CreatePointerCast(AllGlobals, IntptrTy),
1888                        ConstantInt::get(IntptrTy, N)});
1889 }
1890 
1891 // This function replaces all global variables with new variables that have
1892 // trailing redzones. It also creates a function that poisons
1893 // redzones and inserts this function into llvm.global_ctors.
1894 // Sets *CtorComdat to true if the global registration code emitted into the
1895 // asan constructor is comdat-compatible.
1896 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat) {
1897   *CtorComdat = false;
1898   GlobalsMD.init(M);
1899 
1900   SmallVector<GlobalVariable *, 16> GlobalsToChange;
1901 
1902   for (auto &G : M.globals()) {
1903     if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G);
1904   }
1905 
1906   size_t n = GlobalsToChange.size();
1907   if (n == 0) {
1908     *CtorComdat = true;
1909     return false;
1910   }
1911 
1912   auto &DL = M.getDataLayout();
1913 
1914   // A global is described by a structure
1915   //   size_t beg;
1916   //   size_t size;
1917   //   size_t size_with_redzone;
1918   //   const char *name;
1919   //   const char *module_name;
1920   //   size_t has_dynamic_init;
1921   //   void *source_location;
1922   //   size_t odr_indicator;
1923   // We initialize an array of such structures and pass it to a run-time call.
1924   StructType *GlobalStructTy =
1925       StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1926                       IntptrTy, IntptrTy, IntptrTy);
1927   SmallVector<GlobalVariable *, 16> NewGlobals(n);
1928   SmallVector<Constant *, 16> Initializers(n);
1929 
1930   bool HasDynamicallyInitializedGlobals = false;
1931 
1932   // We shouldn't merge same module names, as this string serves as unique
1933   // module ID in runtime.
1934   GlobalVariable *ModuleName = createPrivateGlobalForString(
1935       M, M.getModuleIdentifier(), /*AllowMerging*/ false);
1936 
1937   for (size_t i = 0; i < n; i++) {
1938     static const uint64_t kMaxGlobalRedzone = 1 << 18;
1939     GlobalVariable *G = GlobalsToChange[i];
1940 
1941     auto MD = GlobalsMD.get(G);
1942     StringRef NameForGlobal = G->getName();
1943     // Create string holding the global name (use global name from metadata
1944     // if it's available, otherwise just write the name of global variable).
1945     GlobalVariable *Name = createPrivateGlobalForString(
1946         M, MD.Name.empty() ? NameForGlobal : MD.Name,
1947         /*AllowMerging*/ true);
1948 
1949     Type *Ty = G->getValueType();
1950     uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
1951     uint64_t MinRZ = MinRedzoneSizeForGlobal();
1952     // MinRZ <= RZ <= kMaxGlobalRedzone
1953     // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1954     uint64_t RZ = std::max(
1955         MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
1956     uint64_t RightRedzoneSize = RZ;
1957     // Round up to MinRZ
1958     if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1959     assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1960     Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1961 
1962     StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
1963     Constant *NewInitializer = ConstantStruct::get(
1964         NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
1965 
1966     // Create a new global variable with enough space for a redzone.
1967     GlobalValue::LinkageTypes Linkage = G->getLinkage();
1968     if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1969       Linkage = GlobalValue::InternalLinkage;
1970     GlobalVariable *NewGlobal =
1971         new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
1972                            "", G, G->getThreadLocalMode());
1973     NewGlobal->copyAttributesFrom(G);
1974     NewGlobal->setAlignment(MinRZ);
1975 
1976     // Move null-terminated C strings to "__asan_cstring" section on Darwin.
1977     if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
1978         G->isConstant()) {
1979       auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
1980       if (Seq && Seq->isCString())
1981         NewGlobal->setSection("__TEXT,__asan_cstring,regular");
1982     }
1983 
1984     // Transfer the debug info.  The payload starts at offset zero so we can
1985     // copy the debug info over as is.
1986     SmallVector<DIGlobalVariableExpression *, 1> GVs;
1987     G->getDebugInfo(GVs);
1988     for (auto *GV : GVs)
1989       NewGlobal->addDebugInfo(GV);
1990 
1991     Value *Indices2[2];
1992     Indices2[0] = IRB.getInt32(0);
1993     Indices2[1] = IRB.getInt32(0);
1994 
1995     G->replaceAllUsesWith(
1996         ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
1997     NewGlobal->takeName(G);
1998     G->eraseFromParent();
1999     NewGlobals[i] = NewGlobal;
2000 
2001     Constant *SourceLoc;
2002     if (!MD.SourceLoc.empty()) {
2003       auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
2004       SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
2005     } else {
2006       SourceLoc = ConstantInt::get(IntptrTy, 0);
2007     }
2008 
2009     Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
2010     GlobalValue *InstrumentedGlobal = NewGlobal;
2011 
2012     bool CanUsePrivateAliases =
2013         TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2014         TargetTriple.isOSBinFormatWasm();
2015     if (CanUsePrivateAliases && ClUsePrivateAliasForGlobals) {
2016       // Create local alias for NewGlobal to avoid crash on ODR between
2017       // instrumented and non-instrumented libraries.
2018       auto *GA = GlobalAlias::create(GlobalValue::InternalLinkage,
2019                                      NameForGlobal + M.getName(), NewGlobal);
2020 
2021       // With local aliases, we need to provide another externally visible
2022       // symbol __odr_asan_XXX to detect ODR violation.
2023       auto *ODRIndicatorSym =
2024           new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2025                              Constant::getNullValue(IRB.getInt8Ty()),
2026                              kODRGenPrefix + NameForGlobal, nullptr,
2027                              NewGlobal->getThreadLocalMode());
2028 
2029       // Set meaningful attributes for indicator symbol.
2030       ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2031       ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2032       ODRIndicatorSym->setAlignment(1);
2033       ODRIndicator = ODRIndicatorSym;
2034       InstrumentedGlobal = GA;
2035     }
2036 
2037     Constant *Initializer = ConstantStruct::get(
2038         GlobalStructTy,
2039         ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2040         ConstantInt::get(IntptrTy, SizeInBytes),
2041         ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2042         ConstantExpr::getPointerCast(Name, IntptrTy),
2043         ConstantExpr::getPointerCast(ModuleName, IntptrTy),
2044         ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc,
2045         ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2046 
2047     if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
2048 
2049     DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2050 
2051     Initializers[i] = Initializer;
2052   }
2053 
2054   std::string ELFUniqueModuleId =
2055       (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M)
2056                                                         : "";
2057 
2058   if (!ELFUniqueModuleId.empty()) {
2059     InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
2060     *CtorComdat = true;
2061   } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2062     InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2063   } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2064     InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2065   } else {
2066     InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2067   }
2068 
2069   // Create calls for poisoning before initializers run and unpoisoning after.
2070   if (HasDynamicallyInitializedGlobals)
2071     createInitializerPoisonCalls(M, ModuleName);
2072 
2073   DEBUG(dbgs() << M);
2074   return true;
2075 }
2076 
2077 bool AddressSanitizerModule::runOnModule(Module &M) {
2078   C = &(M.getContext());
2079   int LongSize = M.getDataLayout().getPointerSizeInBits();
2080   IntptrTy = Type::getIntNTy(*C, LongSize);
2081   TargetTriple = Triple(M.getTargetTriple());
2082   Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
2083   initializeCallbacks(M);
2084 
2085   if (CompileKernel)
2086     return false;
2087 
2088   // Create a module constructor. A destructor is created lazily because not all
2089   // platforms, and not all modules need it.
2090   std::tie(AsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
2091       M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2092       /*InitArgs=*/{}, kAsanVersionCheckName);
2093 
2094   bool CtorComdat = true;
2095   bool Changed = false;
2096   // TODO(glider): temporarily disabled globals instrumentation for KASan.
2097   if (ClGlobals) {
2098     IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2099     Changed |= InstrumentGlobals(IRB, M, &CtorComdat);
2100   }
2101 
2102   // Put the constructor and destructor in comdat if both
2103   // (1) global instrumentation is not TU-specific
2104   // (2) target is ELF.
2105   if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2106     AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2107     appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority,
2108                         AsanCtorFunction);
2109     if (AsanDtorFunction) {
2110       AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2111       appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority,
2112                           AsanDtorFunction);
2113     }
2114   } else {
2115     appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
2116     if (AsanDtorFunction)
2117       appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
2118   }
2119 
2120   return Changed;
2121 }
2122 
2123 void AddressSanitizer::initializeCallbacks(Module &M) {
2124   IRBuilder<> IRB(*C);
2125   // Create __asan_report* callbacks.
2126   // IsWrite, TypeSize and Exp are encoded in the function name.
2127   for (int Exp = 0; Exp < 2; Exp++) {
2128     for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2129       const std::string TypeStr = AccessIsWrite ? "store" : "load";
2130       const std::string ExpStr = Exp ? "exp_" : "";
2131       const std::string SuffixStr = CompileKernel ? "N" : "_n";
2132       const std::string EndingStr = Recover ? "_noabort" : "";
2133 
2134       SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2135       SmallVector<Type *, 2> Args1{1, IntptrTy};
2136       if (Exp) {
2137         Type *ExpType = Type::getInt32Ty(*C);
2138         Args2.push_back(ExpType);
2139         Args1.push_back(ExpType);
2140       }
2141       AsanErrorCallbackSized[AccessIsWrite][Exp] =
2142           checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2143               kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr +
2144                   EndingStr,
2145               FunctionType::get(IRB.getVoidTy(), Args2, false)));
2146 
2147       AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
2148           checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2149               ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2150               FunctionType::get(IRB.getVoidTy(), Args2, false)));
2151 
2152       for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2153            AccessSizeIndex++) {
2154         const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2155         AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2156             checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2157                 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2158                 FunctionType::get(IRB.getVoidTy(), Args1, false)));
2159 
2160         AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2161             checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2162                 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2163                 FunctionType::get(IRB.getVoidTy(), Args1, false)));
2164       }
2165     }
2166   }
2167 
2168   const std::string MemIntrinCallbackPrefix =
2169       CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
2170   AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2171       MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
2172       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
2173   AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2174       MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
2175       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
2176   AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2177       MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
2178       IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy));
2179 
2180   AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction(
2181       M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()));
2182 
2183   AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2184       kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy));
2185   AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2186       kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy));
2187   // We insert an empty inline asm after __asan_report* to avoid callback merge.
2188   EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
2189                             StringRef(""), StringRef(""),
2190                             /*hasSideEffects=*/true);
2191 }
2192 
2193 // virtual
2194 bool AddressSanitizer::doInitialization(Module &M) {
2195   // Initialize the private fields. No one has accessed them before.
2196   GlobalsMD.init(M);
2197 
2198   C = &(M.getContext());
2199   LongSize = M.getDataLayout().getPointerSizeInBits();
2200   IntptrTy = Type::getIntNTy(*C, LongSize);
2201   TargetTriple = Triple(M.getTargetTriple());
2202 
2203   Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
2204   return true;
2205 }
2206 
2207 bool AddressSanitizer::doFinalization(Module &M) {
2208   GlobalsMD.reset();
2209   return false;
2210 }
2211 
2212 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2213   // For each NSObject descendant having a +load method, this method is invoked
2214   // by the ObjC runtime before any of the static constructors is called.
2215   // Therefore we need to instrument such methods with a call to __asan_init
2216   // at the beginning in order to initialize our runtime before any access to
2217   // the shadow memory.
2218   // We cannot just ignore these methods, because they may call other
2219   // instrumented functions.
2220   if (F.getName().find(" load]") != std::string::npos) {
2221     Function *AsanInitFunction =
2222         declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2223     IRBuilder<> IRB(&F.front(), F.front().begin());
2224     IRB.CreateCall(AsanInitFunction, {});
2225     return true;
2226   }
2227   return false;
2228 }
2229 
2230 void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2231   // Generate code only when dynamic addressing is needed.
2232   if (Mapping.Offset != kDynamicShadowSentinel)
2233     return;
2234 
2235   IRBuilder<> IRB(&F.front().front());
2236   Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2237       kAsanShadowMemoryDynamicAddress, IntptrTy);
2238   LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
2239 }
2240 
2241 void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2242   // Find the one possible call to llvm.localescape and pre-mark allocas passed
2243   // to it as uninteresting. This assumes we haven't started processing allocas
2244   // yet. This check is done up front because iterating the use list in
2245   // isInterestingAlloca would be algorithmically slower.
2246   assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2247 
2248   // Try to get the declaration of llvm.localescape. If it's not in the module,
2249   // we can exit early.
2250   if (!F.getParent()->getFunction("llvm.localescape")) return;
2251 
2252   // Look for a call to llvm.localescape call in the entry block. It can't be in
2253   // any other block.
2254   for (Instruction &I : F.getEntryBlock()) {
2255     IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2256     if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2257       // We found a call. Mark all the allocas passed in as uninteresting.
2258       for (Value *Arg : II->arg_operands()) {
2259         AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2260         assert(AI && AI->isStaticAlloca() &&
2261                "non-static alloca arg to localescape");
2262         ProcessedAllocas[AI] = false;
2263       }
2264       break;
2265     }
2266   }
2267 }
2268 
2269 bool AddressSanitizer::runOnFunction(Function &F) {
2270   if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2271   if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2272   if (F.getName().startswith("__asan_")) return false;
2273 
2274   bool FunctionModified = false;
2275 
2276   // If needed, insert __asan_init before checking for SanitizeAddress attr.
2277   // This function needs to be called even if the function body is not
2278   // instrumented.
2279   if (maybeInsertAsanInitAtFunctionEntry(F))
2280     FunctionModified = true;
2281 
2282   // Leave if the function doesn't need instrumentation.
2283   if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2284 
2285   DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2286 
2287   initializeCallbacks(*F.getParent());
2288   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2289 
2290   FunctionStateRAII CleanupObj(this);
2291 
2292   maybeInsertDynamicShadowAtFunctionEntry(F);
2293 
2294   // We can't instrument allocas used with llvm.localescape. Only static allocas
2295   // can be passed to that intrinsic.
2296   markEscapedLocalAllocas(F);
2297 
2298   // We want to instrument every address only once per basic block (unless there
2299   // are calls between uses).
2300   SmallSet<Value *, 16> TempsToInstrument;
2301   SmallVector<Instruction *, 16> ToInstrument;
2302   SmallVector<Instruction *, 8> NoReturnCalls;
2303   SmallVector<BasicBlock *, 16> AllBlocks;
2304   SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2305   int NumAllocas = 0;
2306   bool IsWrite;
2307   unsigned Alignment;
2308   uint64_t TypeSize;
2309   const TargetLibraryInfo *TLI =
2310       &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
2311 
2312   // Fill the set of memory operations to instrument.
2313   for (auto &BB : F) {
2314     AllBlocks.push_back(&BB);
2315     TempsToInstrument.clear();
2316     int NumInsnsPerBB = 0;
2317     for (auto &Inst : BB) {
2318       if (LooksLikeCodeInBug11395(&Inst)) return false;
2319       Value *MaybeMask = nullptr;
2320       if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
2321                                                   &Alignment, &MaybeMask)) {
2322         if (ClOpt && ClOptSameTemp) {
2323           // If we have a mask, skip instrumentation if we've already
2324           // instrumented the full object. But don't add to TempsToInstrument
2325           // because we might get another load/store with a different mask.
2326           if (MaybeMask) {
2327             if (TempsToInstrument.count(Addr))
2328               continue; // We've seen this (whole) temp in the current BB.
2329           } else {
2330             if (!TempsToInstrument.insert(Addr).second)
2331               continue; // We've seen this temp in the current BB.
2332           }
2333         }
2334       } else if (ClInvalidPointerPairs &&
2335                  isInterestingPointerComparisonOrSubtraction(&Inst)) {
2336         PointerComparisonsOrSubtracts.push_back(&Inst);
2337         continue;
2338       } else if (isa<MemIntrinsic>(Inst)) {
2339         // ok, take it.
2340       } else {
2341         if (isa<AllocaInst>(Inst)) NumAllocas++;
2342         CallSite CS(&Inst);
2343         if (CS) {
2344           // A call inside BB.
2345           TempsToInstrument.clear();
2346           if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction());
2347         }
2348         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
2349           maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
2350         continue;
2351       }
2352       ToInstrument.push_back(&Inst);
2353       NumInsnsPerBB++;
2354       if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
2355     }
2356   }
2357 
2358   bool UseCalls =
2359       CompileKernel ||
2360       (ClInstrumentationWithCallsThreshold >= 0 &&
2361        ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
2362   const DataLayout &DL = F.getParent()->getDataLayout();
2363   ObjectSizeOpts ObjSizeOpts;
2364   ObjSizeOpts.RoundToAlign = true;
2365   ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
2366 
2367   // Instrument.
2368   int NumInstrumented = 0;
2369   for (auto Inst : ToInstrument) {
2370     if (ClDebugMin < 0 || ClDebugMax < 0 ||
2371         (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
2372       if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
2373         instrumentMop(ObjSizeVis, Inst, UseCalls,
2374                       F.getParent()->getDataLayout());
2375       else
2376         instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
2377     }
2378     NumInstrumented++;
2379   }
2380 
2381   FunctionStackPoisoner FSP(F, *this);
2382   bool ChangedStack = FSP.runOnFunction();
2383 
2384   // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
2385   // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
2386   for (auto CI : NoReturnCalls) {
2387     IRBuilder<> IRB(CI);
2388     IRB.CreateCall(AsanHandleNoReturnFunc, {});
2389   }
2390 
2391   for (auto Inst : PointerComparisonsOrSubtracts) {
2392     instrumentPointerComparisonOrSubtraction(Inst);
2393     NumInstrumented++;
2394   }
2395 
2396   if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty())
2397     FunctionModified = true;
2398 
2399   DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
2400                << F << "\n");
2401 
2402   return FunctionModified;
2403 }
2404 
2405 // Workaround for bug 11395: we don't want to instrument stack in functions
2406 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
2407 // FIXME: remove once the bug 11395 is fixed.
2408 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
2409   if (LongSize != 32) return false;
2410   CallInst *CI = dyn_cast<CallInst>(I);
2411   if (!CI || !CI->isInlineAsm()) return false;
2412   if (CI->getNumArgOperands() <= 5) return false;
2413   // We have inline assembly with quite a few arguments.
2414   return true;
2415 }
2416 
2417 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
2418   IRBuilder<> IRB(*C);
2419   for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
2420     std::string Suffix = itostr(i);
2421     AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction(
2422         M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
2423                               IntptrTy));
2424     AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction(
2425         M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
2426                               IRB.getVoidTy(), IntptrTy, IntptrTy));
2427   }
2428   if (ASan.UseAfterScope) {
2429     AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
2430         M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
2431                               IntptrTy, IntptrTy));
2432     AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
2433         M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
2434                               IntptrTy, IntptrTy));
2435   }
2436 
2437   for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) {
2438     std::ostringstream Name;
2439     Name << kAsanSetShadowPrefix;
2440     Name << std::setw(2) << std::setfill('0') << std::hex << Val;
2441     AsanSetShadowFunc[Val] =
2442         checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2443             Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy));
2444   }
2445 
2446   AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2447       kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy));
2448   AsanAllocasUnpoisonFunc =
2449       checkSanitizerInterfaceFunction(M.getOrInsertFunction(
2450           kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy));
2451 }
2452 
2453 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
2454                                                ArrayRef<uint8_t> ShadowBytes,
2455                                                size_t Begin, size_t End,
2456                                                IRBuilder<> &IRB,
2457                                                Value *ShadowBase) {
2458   if (Begin >= End)
2459     return;
2460 
2461   const size_t LargestStoreSizeInBytes =
2462       std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
2463 
2464   const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
2465 
2466   // Poison given range in shadow using larges store size with out leading and
2467   // trailing zeros in ShadowMask. Zeros never change, so they need neither
2468   // poisoning nor up-poisoning. Still we don't mind if some of them get into a
2469   // middle of a store.
2470   for (size_t i = Begin; i < End;) {
2471     if (!ShadowMask[i]) {
2472       assert(!ShadowBytes[i]);
2473       ++i;
2474       continue;
2475     }
2476 
2477     size_t StoreSizeInBytes = LargestStoreSizeInBytes;
2478     // Fit store size into the range.
2479     while (StoreSizeInBytes > End - i)
2480       StoreSizeInBytes /= 2;
2481 
2482     // Minimize store size by trimming trailing zeros.
2483     for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
2484       while (j <= StoreSizeInBytes / 2)
2485         StoreSizeInBytes /= 2;
2486     }
2487 
2488     uint64_t Val = 0;
2489     for (size_t j = 0; j < StoreSizeInBytes; j++) {
2490       if (IsLittleEndian)
2491         Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
2492       else
2493         Val = (Val << 8) | ShadowBytes[i + j];
2494     }
2495 
2496     Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
2497     Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
2498     IRB.CreateAlignedStore(
2499         Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1);
2500 
2501     i += StoreSizeInBytes;
2502   }
2503 }
2504 
2505 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2506                                          ArrayRef<uint8_t> ShadowBytes,
2507                                          IRBuilder<> &IRB, Value *ShadowBase) {
2508   copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
2509 }
2510 
2511 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2512                                          ArrayRef<uint8_t> ShadowBytes,
2513                                          size_t Begin, size_t End,
2514                                          IRBuilder<> &IRB, Value *ShadowBase) {
2515   assert(ShadowMask.size() == ShadowBytes.size());
2516   size_t Done = Begin;
2517   for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
2518     if (!ShadowMask[i]) {
2519       assert(!ShadowBytes[i]);
2520       continue;
2521     }
2522     uint8_t Val = ShadowBytes[i];
2523     if (!AsanSetShadowFunc[Val])
2524       continue;
2525 
2526     // Skip same values.
2527     for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
2528     }
2529 
2530     if (j - i >= ClMaxInlinePoisoningSize) {
2531       copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
2532       IRB.CreateCall(AsanSetShadowFunc[Val],
2533                      {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
2534                       ConstantInt::get(IntptrTy, j - i)});
2535       Done = j;
2536     }
2537   }
2538 
2539   copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
2540 }
2541 
2542 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
2543 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
2544 static int StackMallocSizeClass(uint64_t LocalStackSize) {
2545   assert(LocalStackSize <= kMaxStackMallocSize);
2546   uint64_t MaxSize = kMinStackMallocSize;
2547   for (int i = 0;; i++, MaxSize *= 2)
2548     if (LocalStackSize <= MaxSize) return i;
2549   llvm_unreachable("impossible LocalStackSize");
2550 }
2551 
2552 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
2553   Instruction *CopyInsertPoint = &F.front().front();
2554   if (CopyInsertPoint == ASan.LocalDynamicShadow) {
2555     // Insert after the dynamic shadow location is determined
2556     CopyInsertPoint = CopyInsertPoint->getNextNode();
2557     assert(CopyInsertPoint);
2558   }
2559   IRBuilder<> IRB(CopyInsertPoint);
2560   const DataLayout &DL = F.getParent()->getDataLayout();
2561   for (Argument &Arg : F.args()) {
2562     if (Arg.hasByValAttr()) {
2563       Type *Ty = Arg.getType()->getPointerElementType();
2564       unsigned Align = Arg.getParamAlignment();
2565       if (Align == 0) Align = DL.getABITypeAlignment(Ty);
2566 
2567       const std::string &Name = Arg.hasName() ? Arg.getName().str() :
2568           "Arg" + llvm::to_string(Arg.getArgNo());
2569       AllocaInst *AI = IRB.CreateAlloca(Ty, nullptr, Twine(Name) + ".byval");
2570       AI->setAlignment(Align);
2571       Arg.replaceAllUsesWith(AI);
2572 
2573       uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2574       IRB.CreateMemCpy(AI, &Arg, AllocSize, Align);
2575     }
2576   }
2577 }
2578 
2579 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
2580                                           Value *ValueIfTrue,
2581                                           Instruction *ThenTerm,
2582                                           Value *ValueIfFalse) {
2583   PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
2584   BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
2585   PHI->addIncoming(ValueIfFalse, CondBlock);
2586   BasicBlock *ThenBlock = ThenTerm->getParent();
2587   PHI->addIncoming(ValueIfTrue, ThenBlock);
2588   return PHI;
2589 }
2590 
2591 Value *FunctionStackPoisoner::createAllocaForLayout(
2592     IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
2593   AllocaInst *Alloca;
2594   if (Dynamic) {
2595     Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
2596                               ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
2597                               "MyAlloca");
2598   } else {
2599     Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
2600                               nullptr, "MyAlloca");
2601     assert(Alloca->isStaticAlloca());
2602   }
2603   assert((ClRealignStack & (ClRealignStack - 1)) == 0);
2604   size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
2605   Alloca->setAlignment(FrameAlignment);
2606   return IRB.CreatePointerCast(Alloca, IntptrTy);
2607 }
2608 
2609 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
2610   BasicBlock &FirstBB = *F.begin();
2611   IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
2612   DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
2613   IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
2614   DynamicAllocaLayout->setAlignment(32);
2615 }
2616 
2617 void FunctionStackPoisoner::processDynamicAllocas() {
2618   if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
2619     assert(DynamicAllocaPoisonCallVec.empty());
2620     return;
2621   }
2622 
2623   // Insert poison calls for lifetime intrinsics for dynamic allocas.
2624   for (const auto &APC : DynamicAllocaPoisonCallVec) {
2625     assert(APC.InsBefore);
2626     assert(APC.AI);
2627     assert(ASan.isInterestingAlloca(*APC.AI));
2628     assert(!APC.AI->isStaticAlloca());
2629 
2630     IRBuilder<> IRB(APC.InsBefore);
2631     poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
2632     // Dynamic allocas will be unpoisoned unconditionally below in
2633     // unpoisonDynamicAllocas.
2634     // Flag that we need unpoison static allocas.
2635   }
2636 
2637   // Handle dynamic allocas.
2638   createDynamicAllocasInitStorage();
2639   for (auto &AI : DynamicAllocaVec)
2640     handleDynamicAllocaCall(AI);
2641   unpoisonDynamicAllocas();
2642 }
2643 
2644 void FunctionStackPoisoner::processStaticAllocas() {
2645   if (AllocaVec.empty()) {
2646     assert(StaticAllocaPoisonCallVec.empty());
2647     return;
2648   }
2649 
2650   int StackMallocIdx = -1;
2651   DebugLoc EntryDebugLocation;
2652   if (auto SP = F.getSubprogram())
2653     EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP);
2654 
2655   Instruction *InsBefore = AllocaVec[0];
2656   IRBuilder<> IRB(InsBefore);
2657   IRB.SetCurrentDebugLocation(EntryDebugLocation);
2658 
2659   // Make sure non-instrumented allocas stay in the entry block. Otherwise,
2660   // debug info is broken, because only entry-block allocas are treated as
2661   // regular stack slots.
2662   auto InsBeforeB = InsBefore->getParent();
2663   assert(InsBeforeB == &F.getEntryBlock());
2664   for (auto *AI : StaticAllocasToMoveUp)
2665     if (AI->getParent() == InsBeforeB)
2666       AI->moveBefore(InsBefore);
2667 
2668   // If we have a call to llvm.localescape, keep it in the entry block.
2669   if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
2670 
2671   SmallVector<ASanStackVariableDescription, 16> SVD;
2672   SVD.reserve(AllocaVec.size());
2673   for (AllocaInst *AI : AllocaVec) {
2674     ASanStackVariableDescription D = {AI->getName().data(),
2675                                       ASan.getAllocaSizeInBytes(*AI),
2676                                       0,
2677                                       AI->getAlignment(),
2678                                       AI,
2679                                       0,
2680                                       0};
2681     SVD.push_back(D);
2682   }
2683 
2684   // Minimal header size (left redzone) is 4 pointers,
2685   // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
2686   size_t MinHeaderSize = ASan.LongSize / 2;
2687   const ASanStackFrameLayout &L =
2688       ComputeASanStackFrameLayout(SVD, 1ULL << Mapping.Scale, MinHeaderSize);
2689 
2690   // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
2691   DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
2692   for (auto &Desc : SVD)
2693     AllocaToSVDMap[Desc.AI] = &Desc;
2694 
2695   // Update SVD with information from lifetime intrinsics.
2696   for (const auto &APC : StaticAllocaPoisonCallVec) {
2697     assert(APC.InsBefore);
2698     assert(APC.AI);
2699     assert(ASan.isInterestingAlloca(*APC.AI));
2700     assert(APC.AI->isStaticAlloca());
2701 
2702     ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
2703     Desc.LifetimeSize = Desc.Size;
2704     if (const DILocation *FnLoc = EntryDebugLocation.get()) {
2705       if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
2706         if (LifetimeLoc->getFile() == FnLoc->getFile())
2707           if (unsigned Line = LifetimeLoc->getLine())
2708             Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
2709       }
2710     }
2711   }
2712 
2713   auto DescriptionString = ComputeASanStackFrameDescription(SVD);
2714   DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
2715   uint64_t LocalStackSize = L.FrameSize;
2716   bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
2717                        LocalStackSize <= kMaxStackMallocSize;
2718   bool DoDynamicAlloca = ClDynamicAllocaStack;
2719   // Don't do dynamic alloca or stack malloc if:
2720   // 1) There is inline asm: too often it makes assumptions on which registers
2721   //    are available.
2722   // 2) There is a returns_twice call (typically setjmp), which is
2723   //    optimization-hostile, and doesn't play well with introduced indirect
2724   //    register-relative calculation of local variable addresses.
2725   DoDynamicAlloca &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
2726   DoStackMalloc &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
2727 
2728   Value *StaticAlloca =
2729       DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
2730 
2731   Value *FakeStack;
2732   Value *LocalStackBase;
2733 
2734   if (DoStackMalloc) {
2735     // void *FakeStack = __asan_option_detect_stack_use_after_return
2736     //     ? __asan_stack_malloc_N(LocalStackSize)
2737     //     : nullptr;
2738     // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
2739     Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
2740         kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
2741     Value *UseAfterReturnIsEnabled =
2742         IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUseAfterReturn),
2743                          Constant::getNullValue(IRB.getInt32Ty()));
2744     Instruction *Term =
2745         SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
2746     IRBuilder<> IRBIf(Term);
2747     IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
2748     StackMallocIdx = StackMallocSizeClass(LocalStackSize);
2749     assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
2750     Value *FakeStackValue =
2751         IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
2752                          ConstantInt::get(IntptrTy, LocalStackSize));
2753     IRB.SetInsertPoint(InsBefore);
2754     IRB.SetCurrentDebugLocation(EntryDebugLocation);
2755     FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
2756                           ConstantInt::get(IntptrTy, 0));
2757 
2758     Value *NoFakeStack =
2759         IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
2760     Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
2761     IRBIf.SetInsertPoint(Term);
2762     IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
2763     Value *AllocaValue =
2764         DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
2765     IRB.SetInsertPoint(InsBefore);
2766     IRB.SetCurrentDebugLocation(EntryDebugLocation);
2767     LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
2768   } else {
2769     // void *FakeStack = nullptr;
2770     // void *LocalStackBase = alloca(LocalStackSize);
2771     FakeStack = ConstantInt::get(IntptrTy, 0);
2772     LocalStackBase =
2773         DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
2774   }
2775 
2776   // Replace Alloca instructions with base+offset.
2777   for (const auto &Desc : SVD) {
2778     AllocaInst *AI = Desc.AI;
2779     Value *NewAllocaPtr = IRB.CreateIntToPtr(
2780         IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
2781         AI->getType());
2782     replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, DIExpression::NoDeref);
2783     AI->replaceAllUsesWith(NewAllocaPtr);
2784   }
2785 
2786   // The left-most redzone has enough space for at least 4 pointers.
2787   // Write the Magic value to redzone[0].
2788   Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
2789   IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
2790                   BasePlus0);
2791   // Write the frame description constant to redzone[1].
2792   Value *BasePlus1 = IRB.CreateIntToPtr(
2793       IRB.CreateAdd(LocalStackBase,
2794                     ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
2795       IntptrPtrTy);
2796   GlobalVariable *StackDescriptionGlobal =
2797       createPrivateGlobalForString(*F.getParent(), DescriptionString,
2798                                    /*AllowMerging*/ true);
2799   Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
2800   IRB.CreateStore(Description, BasePlus1);
2801   // Write the PC to redzone[2].
2802   Value *BasePlus2 = IRB.CreateIntToPtr(
2803       IRB.CreateAdd(LocalStackBase,
2804                     ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
2805       IntptrPtrTy);
2806   IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
2807 
2808   const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
2809 
2810   // Poison the stack red zones at the entry.
2811   Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
2812   // As mask we must use most poisoned case: red zones and after scope.
2813   // As bytes we can use either the same or just red zones only.
2814   copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
2815 
2816   if (!StaticAllocaPoisonCallVec.empty()) {
2817     const auto &ShadowInScope = GetShadowBytes(SVD, L);
2818 
2819     // Poison static allocas near lifetime intrinsics.
2820     for (const auto &APC : StaticAllocaPoisonCallVec) {
2821       const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
2822       assert(Desc.Offset % L.Granularity == 0);
2823       size_t Begin = Desc.Offset / L.Granularity;
2824       size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
2825 
2826       IRBuilder<> IRB(APC.InsBefore);
2827       copyToShadow(ShadowAfterScope,
2828                    APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
2829                    IRB, ShadowBase);
2830     }
2831   }
2832 
2833   SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
2834   SmallVector<uint8_t, 64> ShadowAfterReturn;
2835 
2836   // (Un)poison the stack before all ret instructions.
2837   for (auto Ret : RetVec) {
2838     IRBuilder<> IRBRet(Ret);
2839     // Mark the current frame as retired.
2840     IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
2841                        BasePlus0);
2842     if (DoStackMalloc) {
2843       assert(StackMallocIdx >= 0);
2844       // if FakeStack != 0  // LocalStackBase == FakeStack
2845       //     // In use-after-return mode, poison the whole stack frame.
2846       //     if StackMallocIdx <= 4
2847       //         // For small sizes inline the whole thing:
2848       //         memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
2849       //         **SavedFlagPtr(FakeStack) = 0
2850       //     else
2851       //         __asan_stack_free_N(FakeStack, LocalStackSize)
2852       // else
2853       //     <This is not a fake stack; unpoison the redzones>
2854       Value *Cmp =
2855           IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
2856       TerminatorInst *ThenTerm, *ElseTerm;
2857       SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
2858 
2859       IRBuilder<> IRBPoison(ThenTerm);
2860       if (StackMallocIdx <= 4) {
2861         int ClassSize = kMinStackMallocSize << StackMallocIdx;
2862         ShadowAfterReturn.resize(ClassSize / L.Granularity,
2863                                  kAsanStackUseAfterReturnMagic);
2864         copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
2865                      ShadowBase);
2866         Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
2867             FakeStack,
2868             ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
2869         Value *SavedFlagPtr = IRBPoison.CreateLoad(
2870             IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
2871         IRBPoison.CreateStore(
2872             Constant::getNullValue(IRBPoison.getInt8Ty()),
2873             IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
2874       } else {
2875         // For larger frames call __asan_stack_free_*.
2876         IRBPoison.CreateCall(
2877             AsanStackFreeFunc[StackMallocIdx],
2878             {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
2879       }
2880 
2881       IRBuilder<> IRBElse(ElseTerm);
2882       copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
2883     } else {
2884       copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
2885     }
2886   }
2887 
2888   // We are done. Remove the old unused alloca instructions.
2889   for (auto AI : AllocaVec) AI->eraseFromParent();
2890 }
2891 
2892 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
2893                                          IRBuilder<> &IRB, bool DoPoison) {
2894   // For now just insert the call to ASan runtime.
2895   Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
2896   Value *SizeArg = ConstantInt::get(IntptrTy, Size);
2897   IRB.CreateCall(
2898       DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
2899       {AddrArg, SizeArg});
2900 }
2901 
2902 // Handling llvm.lifetime intrinsics for a given %alloca:
2903 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
2904 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
2905 //     invalid accesses) and unpoison it for llvm.lifetime.start (the memory
2906 //     could be poisoned by previous llvm.lifetime.end instruction, as the
2907 //     variable may go in and out of scope several times, e.g. in loops).
2908 // (3) if we poisoned at least one %alloca in a function,
2909 //     unpoison the whole stack frame at function exit.
2910 
2911 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
2912   if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
2913     // We're interested only in allocas we can handle.
2914     return ASan.isInterestingAlloca(*AI) ? AI : nullptr;
2915   // See if we've already calculated (or started to calculate) alloca for a
2916   // given value.
2917   AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
2918   if (I != AllocaForValue.end()) return I->second;
2919   // Store 0 while we're calculating alloca for value V to avoid
2920   // infinite recursion if the value references itself.
2921   AllocaForValue[V] = nullptr;
2922   AllocaInst *Res = nullptr;
2923   if (CastInst *CI = dyn_cast<CastInst>(V))
2924     Res = findAllocaForValue(CI->getOperand(0));
2925   else if (PHINode *PN = dyn_cast<PHINode>(V)) {
2926     for (Value *IncValue : PN->incoming_values()) {
2927       // Allow self-referencing phi-nodes.
2928       if (IncValue == PN) continue;
2929       AllocaInst *IncValueAI = findAllocaForValue(IncValue);
2930       // AI for incoming values should exist and should all be equal.
2931       if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
2932         return nullptr;
2933       Res = IncValueAI;
2934     }
2935   } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) {
2936     Res = findAllocaForValue(EP->getPointerOperand());
2937   } else {
2938     DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V << "\n");
2939   }
2940   if (Res) AllocaForValue[V] = Res;
2941   return Res;
2942 }
2943 
2944 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
2945   IRBuilder<> IRB(AI);
2946 
2947   const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
2948   const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
2949 
2950   Value *Zero = Constant::getNullValue(IntptrTy);
2951   Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
2952   Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
2953 
2954   // Since we need to extend alloca with additional memory to locate
2955   // redzones, and OldSize is number of allocated blocks with
2956   // ElementSize size, get allocated memory size in bytes by
2957   // OldSize * ElementSize.
2958   const unsigned ElementSize =
2959       F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
2960   Value *OldSize =
2961       IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
2962                     ConstantInt::get(IntptrTy, ElementSize));
2963 
2964   // PartialSize = OldSize % 32
2965   Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
2966 
2967   // Misalign = kAllocaRzSize - PartialSize;
2968   Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
2969 
2970   // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
2971   Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
2972   Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
2973 
2974   // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
2975   // Align is added to locate left redzone, PartialPadding for possible
2976   // partial redzone and kAllocaRzSize for right redzone respectively.
2977   Value *AdditionalChunkSize = IRB.CreateAdd(
2978       ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
2979 
2980   Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
2981 
2982   // Insert new alloca with new NewSize and Align params.
2983   AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
2984   NewAlloca->setAlignment(Align);
2985 
2986   // NewAddress = Address + Align
2987   Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
2988                                     ConstantInt::get(IntptrTy, Align));
2989 
2990   // Insert __asan_alloca_poison call for new created alloca.
2991   IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
2992 
2993   // Store the last alloca's address to DynamicAllocaLayout. We'll need this
2994   // for unpoisoning stuff.
2995   IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
2996 
2997   Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
2998 
2999   // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3000   AI->replaceAllUsesWith(NewAddressPtr);
3001 
3002   // We are done. Erase old alloca from parent.
3003   AI->eraseFromParent();
3004 }
3005 
3006 // isSafeAccess returns true if Addr is always inbounds with respect to its
3007 // base object. For example, it is a field access or an array access with
3008 // constant inbounds index.
3009 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3010                                     Value *Addr, uint64_t TypeSize) const {
3011   SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
3012   if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
3013   uint64_t Size = SizeOffset.first.getZExtValue();
3014   int64_t Offset = SizeOffset.second.getSExtValue();
3015   // Three checks are required to ensure safety:
3016   // . Offset >= 0  (since the offset is given from the base ptr)
3017   // . Size >= Offset  (unsigned)
3018   // . Size - Offset >= NeededSize  (unsigned)
3019   return Offset >= 0 && Size >= uint64_t(Offset) &&
3020          Size - uint64_t(Offset) >= TypeSize / 8;
3021 }
3022