1 //===- AddressSanitizer.cpp - memory error detector -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Details of the algorithm:
11 //  https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
12 //
13 // FIXME: This sanitizer does not yet handle scalable vectors
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/BinaryFormat/MachO.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/Comdat.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DIBuilder.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugInfoMetadata.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GlobalAlias.h"
46 #include "llvm/IR/GlobalValue.h"
47 #include "llvm/IR/GlobalVariable.h"
48 #include "llvm/IR/IRBuilder.h"
49 #include "llvm/IR/InlineAsm.h"
50 #include "llvm/IR/InstVisitor.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/MDBuilder.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/Use.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/MC/MCSectionMachO.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/MathExtras.h"
71 #include "llvm/Support/ScopedPrinter.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Transforms/Instrumentation.h"
74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
75 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
76 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/ModuleUtils.h"
80 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstddef>
84 #include <cstdint>
85 #include <iomanip>
86 #include <limits>
87 #include <memory>
88 #include <sstream>
89 #include <string>
90 #include <tuple>
91 
92 using namespace llvm;
93 
94 #define DEBUG_TYPE "asan"
95 
96 static const uint64_t kDefaultShadowScale = 3;
97 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
99 static const uint64_t kDynamicShadowSentinel =
100     std::numeric_limits<uint64_t>::max();
101 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF;  // < 2G.
102 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
103 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
107 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
108 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
109 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000;
110 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
111 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
112 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
113 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
114 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
115 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
116 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
117 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
118 static const uint64_t kEmscriptenShadowOffset = 0;
119 
120 // The shadow memory space is dynamically allocated.
121 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
122 
123 static const size_t kMinStackMallocSize = 1 << 6;   // 64B
124 static const size_t kMaxStackMallocSize = 1 << 16;  // 64K
125 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
126 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
127 
128 const char kAsanModuleCtorName[] = "asan.module_ctor";
129 const char kAsanModuleDtorName[] = "asan.module_dtor";
130 static const uint64_t kAsanCtorAndDtorPriority = 1;
131 // On Emscripten, the system needs more than one priorities for constructors.
132 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50;
133 const char kAsanReportErrorTemplate[] = "__asan_report_";
134 const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
135 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
136 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
137 const char kAsanUnregisterImageGlobalsName[] =
138     "__asan_unregister_image_globals";
139 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
140 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
141 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
142 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
143 const char kAsanInitName[] = "__asan_init";
144 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
145 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
146 const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
147 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
148 static const int kMaxAsanStackMallocSizeClass = 10;
149 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
150 const char kAsanStackMallocAlwaysNameTemplate[] =
151     "__asan_stack_malloc_always_";
152 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
153 const char kAsanGenPrefix[] = "___asan_gen_";
154 const char kODRGenPrefix[] = "__odr_asan_gen_";
155 const char kSanCovGenPrefix[] = "__sancov_gen_";
156 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
157 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
158 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
159 
160 // ASan version script has __asan_* wildcard. Triple underscore prevents a
161 // linker (gold) warning about attempting to export a local symbol.
162 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
163 
164 const char kAsanOptionDetectUseAfterReturn[] =
165     "__asan_option_detect_stack_use_after_return";
166 
167 const char kAsanShadowMemoryDynamicAddress[] =
168     "__asan_shadow_memory_dynamic_address";
169 
170 const char kAsanAllocaPoison[] = "__asan_alloca_poison";
171 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
172 
173 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
174 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
175 
176 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
177 static const size_t kNumberOfAccessSizes = 5;
178 
179 static const unsigned kAllocaRzSize = 32;
180 
181 // ASanAccessInfo implementation constants.
182 constexpr size_t kCompileKernelShift = 0;
183 constexpr size_t kCompileKernelMask = 0x1;
184 constexpr size_t kAccessSizeIndexShift = 1;
185 constexpr size_t kAccessSizeIndexMask = 0xf;
186 constexpr size_t kIsWriteShift = 5;
187 constexpr size_t kIsWriteMask = 0x1;
188 
189 // Command-line flags.
190 
191 static cl::opt<bool> ClEnableKasan(
192     "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
193     cl::Hidden, cl::init(false));
194 
195 static cl::opt<bool> ClRecover(
196     "asan-recover",
197     cl::desc("Enable recovery mode (continue-after-error)."),
198     cl::Hidden, cl::init(false));
199 
200 static cl::opt<bool> ClInsertVersionCheck(
201     "asan-guard-against-version-mismatch",
202     cl::desc("Guard against compiler/runtime version mismatch."),
203     cl::Hidden, cl::init(true));
204 
205 // This flag may need to be replaced with -f[no-]asan-reads.
206 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
207                                        cl::desc("instrument read instructions"),
208                                        cl::Hidden, cl::init(true));
209 
210 static cl::opt<bool> ClInstrumentWrites(
211     "asan-instrument-writes", cl::desc("instrument write instructions"),
212     cl::Hidden, cl::init(true));
213 
214 static cl::opt<bool> ClInstrumentAtomics(
215     "asan-instrument-atomics",
216     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
217     cl::init(true));
218 
219 static cl::opt<bool>
220     ClInstrumentByval("asan-instrument-byval",
221                       cl::desc("instrument byval call arguments"), cl::Hidden,
222                       cl::init(true));
223 
224 static cl::opt<bool> ClAlwaysSlowPath(
225     "asan-always-slow-path",
226     cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
227     cl::init(false));
228 
229 static cl::opt<bool> ClForceDynamicShadow(
230     "asan-force-dynamic-shadow",
231     cl::desc("Load shadow address into a local variable for each function"),
232     cl::Hidden, cl::init(false));
233 
234 static cl::opt<bool>
235     ClWithIfunc("asan-with-ifunc",
236                 cl::desc("Access dynamic shadow through an ifunc global on "
237                          "platforms that support this"),
238                 cl::Hidden, cl::init(true));
239 
240 static cl::opt<bool> ClWithIfuncSuppressRemat(
241     "asan-with-ifunc-suppress-remat",
242     cl::desc("Suppress rematerialization of dynamic shadow address by passing "
243              "it through inline asm in prologue."),
244     cl::Hidden, cl::init(true));
245 
246 // This flag limits the number of instructions to be instrumented
247 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
248 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
249 // set it to 10000.
250 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
251     "asan-max-ins-per-bb", cl::init(10000),
252     cl::desc("maximal number of instructions to instrument in any given BB"),
253     cl::Hidden);
254 
255 // This flag may need to be replaced with -f[no]asan-stack.
256 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
257                              cl::Hidden, cl::init(true));
258 static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
259     "asan-max-inline-poisoning-size",
260     cl::desc(
261         "Inline shadow poisoning for blocks up to the given size in bytes."),
262     cl::Hidden, cl::init(64));
263 
264 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn(
265     "asan-use-after-return",
266     cl::desc("Sets the mode of detection for stack-use-after-return."),
267     cl::values(
268         clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
269                    "Never detect stack use after return."),
270         clEnumValN(
271             AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
272             "Detect stack use after return if "
273             "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
274         clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
275                    "Always detect stack use after return.")),
276     cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
277 
278 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
279                                         cl::desc("Create redzones for byval "
280                                                  "arguments (extra copy "
281                                                  "required)"), cl::Hidden,
282                                         cl::init(true));
283 
284 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
285                                      cl::desc("Check stack-use-after-scope"),
286                                      cl::Hidden, cl::init(false));
287 
288 // This flag may need to be replaced with -f[no]asan-globals.
289 static cl::opt<bool> ClGlobals("asan-globals",
290                                cl::desc("Handle global objects"), cl::Hidden,
291                                cl::init(true));
292 
293 static cl::opt<bool> ClInitializers("asan-initialization-order",
294                                     cl::desc("Handle C++ initializer order"),
295                                     cl::Hidden, cl::init(true));
296 
297 static cl::opt<bool> ClInvalidPointerPairs(
298     "asan-detect-invalid-pointer-pair",
299     cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
300     cl::init(false));
301 
302 static cl::opt<bool> ClInvalidPointerCmp(
303     "asan-detect-invalid-pointer-cmp",
304     cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
305     cl::init(false));
306 
307 static cl::opt<bool> ClInvalidPointerSub(
308     "asan-detect-invalid-pointer-sub",
309     cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
310     cl::init(false));
311 
312 static cl::opt<unsigned> ClRealignStack(
313     "asan-realign-stack",
314     cl::desc("Realign stack to the value of this flag (power of two)"),
315     cl::Hidden, cl::init(32));
316 
317 static cl::opt<int> ClInstrumentationWithCallsThreshold(
318     "asan-instrumentation-with-call-threshold",
319     cl::desc(
320         "If the function being instrumented contains more than "
321         "this number of memory accesses, use callbacks instead of "
322         "inline checks (-1 means never use callbacks)."),
323     cl::Hidden, cl::init(7000));
324 
325 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
326     "asan-memory-access-callback-prefix",
327     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
328     cl::init("__asan_"));
329 
330 static cl::opt<bool>
331     ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
332                                cl::desc("instrument dynamic allocas"),
333                                cl::Hidden, cl::init(true));
334 
335 static cl::opt<bool> ClSkipPromotableAllocas(
336     "asan-skip-promotable-allocas",
337     cl::desc("Do not instrument promotable allocas"), cl::Hidden,
338     cl::init(true));
339 
340 // These flags allow to change the shadow mapping.
341 // The shadow mapping looks like
342 //    Shadow = (Mem >> scale) + offset
343 
344 static cl::opt<int> ClMappingScale("asan-mapping-scale",
345                                    cl::desc("scale of asan shadow mapping"),
346                                    cl::Hidden, cl::init(0));
347 
348 static cl::opt<uint64_t>
349     ClMappingOffset("asan-mapping-offset",
350                     cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
351                     cl::Hidden, cl::init(0));
352 
353 // Optimization flags. Not user visible, used mostly for testing
354 // and benchmarking the tool.
355 
356 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
357                            cl::Hidden, cl::init(true));
358 
359 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
360                                          cl::desc("Optimize callbacks"),
361                                          cl::Hidden, cl::init(false));
362 
363 static cl::opt<bool> ClOptSameTemp(
364     "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
365     cl::Hidden, cl::init(true));
366 
367 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
368                                   cl::desc("Don't instrument scalar globals"),
369                                   cl::Hidden, cl::init(true));
370 
371 static cl::opt<bool> ClOptStack(
372     "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
373     cl::Hidden, cl::init(false));
374 
375 static cl::opt<bool> ClDynamicAllocaStack(
376     "asan-stack-dynamic-alloca",
377     cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
378     cl::init(true));
379 
380 static cl::opt<uint32_t> ClForceExperiment(
381     "asan-force-experiment",
382     cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
383     cl::init(0));
384 
385 static cl::opt<bool>
386     ClUsePrivateAlias("asan-use-private-alias",
387                       cl::desc("Use private aliases for global variables"),
388                       cl::Hidden, cl::init(false));
389 
390 static cl::opt<bool>
391     ClUseOdrIndicator("asan-use-odr-indicator",
392                       cl::desc("Use odr indicators to improve ODR reporting"),
393                       cl::Hidden, cl::init(false));
394 
395 static cl::opt<bool>
396     ClUseGlobalsGC("asan-globals-live-support",
397                    cl::desc("Use linker features to support dead "
398                             "code stripping of globals"),
399                    cl::Hidden, cl::init(true));
400 
401 // This is on by default even though there is a bug in gold:
402 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
403 static cl::opt<bool>
404     ClWithComdat("asan-with-comdat",
405                  cl::desc("Place ASan constructors in comdat sections"),
406                  cl::Hidden, cl::init(true));
407 
408 static cl::opt<AsanDtorKind> ClOverrideDestructorKind(
409     "asan-destructor-kind",
410     cl::desc("Sets the ASan destructor kind. The default is to use the value "
411              "provided to the pass constructor"),
412     cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
413                clEnumValN(AsanDtorKind::Global, "global",
414                           "Use global destructors")),
415     cl::init(AsanDtorKind::Invalid), cl::Hidden);
416 
417 // Debug flags.
418 
419 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
420                             cl::init(0));
421 
422 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
423                                  cl::Hidden, cl::init(0));
424 
425 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
426                                         cl::desc("Debug func"));
427 
428 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
429                                cl::Hidden, cl::init(-1));
430 
431 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
432                                cl::Hidden, cl::init(-1));
433 
434 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
435 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
436 STATISTIC(NumOptimizedAccessesToGlobalVar,
437           "Number of optimized accesses to global vars");
438 STATISTIC(NumOptimizedAccessesToStackVar,
439           "Number of optimized accesses to stack vars");
440 
441 namespace {
442 
443 /// This struct defines the shadow mapping using the rule:
444 ///   shadow = (mem >> Scale) ADD-or-OR Offset.
445 /// If InGlobal is true, then
446 ///   extern char __asan_shadow[];
447 ///   shadow = (mem >> Scale) + &__asan_shadow
448 struct ShadowMapping {
449   int Scale;
450   uint64_t Offset;
451   bool OrShadowOffset;
452   bool InGlobal;
453 };
454 
455 } // end anonymous namespace
456 
457 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
458                                       bool IsKasan) {
459   bool IsAndroid = TargetTriple.isAndroid();
460   bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS();
461   bool IsMacOS = TargetTriple.isMacOSX();
462   bool IsFreeBSD = TargetTriple.isOSFreeBSD();
463   bool IsNetBSD = TargetTriple.isOSNetBSD();
464   bool IsPS4CPU = TargetTriple.isPS4CPU();
465   bool IsLinux = TargetTriple.isOSLinux();
466   bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
467                  TargetTriple.getArch() == Triple::ppc64le;
468   bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
469   bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
470   bool IsMIPS32 = TargetTriple.isMIPS32();
471   bool IsMIPS64 = TargetTriple.isMIPS64();
472   bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
473   bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
474   bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
475   bool IsWindows = TargetTriple.isOSWindows();
476   bool IsFuchsia = TargetTriple.isOSFuchsia();
477   bool IsEmscripten = TargetTriple.isOSEmscripten();
478   bool IsAMDGPU = TargetTriple.isAMDGPU();
479 
480   ShadowMapping Mapping;
481 
482   Mapping.Scale = kDefaultShadowScale;
483   if (ClMappingScale.getNumOccurrences() > 0) {
484     Mapping.Scale = ClMappingScale;
485   }
486 
487   if (LongSize == 32) {
488     if (IsAndroid)
489       Mapping.Offset = kDynamicShadowSentinel;
490     else if (IsMIPS32)
491       Mapping.Offset = kMIPS32_ShadowOffset32;
492     else if (IsFreeBSD)
493       Mapping.Offset = kFreeBSD_ShadowOffset32;
494     else if (IsNetBSD)
495       Mapping.Offset = kNetBSD_ShadowOffset32;
496     else if (IsIOS)
497       Mapping.Offset = kDynamicShadowSentinel;
498     else if (IsWindows)
499       Mapping.Offset = kWindowsShadowOffset32;
500     else if (IsEmscripten)
501       Mapping.Offset = kEmscriptenShadowOffset;
502     else
503       Mapping.Offset = kDefaultShadowOffset32;
504   } else {  // LongSize == 64
505     // Fuchsia is always PIE, which means that the beginning of the address
506     // space is always available.
507     if (IsFuchsia)
508       Mapping.Offset = 0;
509     else if (IsPPC64)
510       Mapping.Offset = kPPC64_ShadowOffset64;
511     else if (IsSystemZ)
512       Mapping.Offset = kSystemZ_ShadowOffset64;
513     else if (IsFreeBSD && !IsMIPS64) {
514       if (IsKasan)
515         Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
516       else
517         Mapping.Offset = kFreeBSD_ShadowOffset64;
518     } else if (IsNetBSD) {
519       if (IsKasan)
520         Mapping.Offset = kNetBSDKasan_ShadowOffset64;
521       else
522         Mapping.Offset = kNetBSD_ShadowOffset64;
523     } else if (IsPS4CPU)
524       Mapping.Offset = kPS4CPU_ShadowOffset64;
525     else if (IsLinux && IsX86_64) {
526       if (IsKasan)
527         Mapping.Offset = kLinuxKasan_ShadowOffset64;
528       else
529         Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
530                           (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
531     } else if (IsWindows && IsX86_64) {
532       Mapping.Offset = kWindowsShadowOffset64;
533     } else if (IsMIPS64)
534       Mapping.Offset = kMIPS64_ShadowOffset64;
535     else if (IsIOS)
536       Mapping.Offset = kDynamicShadowSentinel;
537     else if (IsMacOS && IsAArch64)
538       Mapping.Offset = kDynamicShadowSentinel;
539     else if (IsAArch64)
540       Mapping.Offset = kAArch64_ShadowOffset64;
541     else if (IsRISCV64)
542       Mapping.Offset = kRISCV64_ShadowOffset64;
543     else if (IsAMDGPU)
544       Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
545                         (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
546     else
547       Mapping.Offset = kDefaultShadowOffset64;
548   }
549 
550   if (ClForceDynamicShadow) {
551     Mapping.Offset = kDynamicShadowSentinel;
552   }
553 
554   if (ClMappingOffset.getNumOccurrences() > 0) {
555     Mapping.Offset = ClMappingOffset;
556   }
557 
558   // OR-ing shadow offset if more efficient (at least on x86) if the offset
559   // is a power of two, but on ppc64 we have to use add since the shadow
560   // offset is not necessary 1/8-th of the address space.  On SystemZ,
561   // we could OR the constant in a single instruction, but it's more
562   // efficient to load it once and use indexed addressing.
563   Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU &&
564                            !IsRISCV64 &&
565                            !(Mapping.Offset & (Mapping.Offset - 1)) &&
566                            Mapping.Offset != kDynamicShadowSentinel;
567   bool IsAndroidWithIfuncSupport =
568       IsAndroid && !TargetTriple.isAndroidVersionLT(21);
569   Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
570 
571   return Mapping;
572 }
573 
574 namespace llvm {
575 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
576                                bool IsKasan, uint64_t *ShadowBase,
577                                int *MappingScale, bool *OrShadowOffset) {
578   auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
579   *ShadowBase = Mapping.Offset;
580   *MappingScale = Mapping.Scale;
581   *OrShadowOffset = Mapping.OrShadowOffset;
582 }
583 
584 ASanAccessInfo::ASanAccessInfo(int32_t Packed)
585     : Packed(Packed),
586       AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
587       IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
588       CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
589 
590 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
591                                uint8_t AccessSizeIndex)
592     : Packed((IsWrite << kIsWriteShift) +
593              (CompileKernel << kCompileKernelShift) +
594              (AccessSizeIndex << kAccessSizeIndexShift)),
595       AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
596       CompileKernel(CompileKernel) {}
597 
598 } // namespace llvm
599 
600 static uint64_t getRedzoneSizeForScale(int MappingScale) {
601   // Redzone used for stack and globals is at least 32 bytes.
602   // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
603   return std::max(32U, 1U << MappingScale);
604 }
605 
606 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
607   if (TargetTriple.isOSEmscripten()) {
608     return kAsanEmscriptenCtorAndDtorPriority;
609   } else {
610     return kAsanCtorAndDtorPriority;
611   }
612 }
613 
614 namespace {
615 
616 /// Module analysis for getting various metadata about the module.
617 class ASanGlobalsMetadataWrapperPass : public ModulePass {
618 public:
619   static char ID;
620 
621   ASanGlobalsMetadataWrapperPass() : ModulePass(ID) {
622     initializeASanGlobalsMetadataWrapperPassPass(
623         *PassRegistry::getPassRegistry());
624   }
625 
626   bool runOnModule(Module &M) override {
627     GlobalsMD = GlobalsMetadata(M);
628     return false;
629   }
630 
631   StringRef getPassName() const override {
632     return "ASanGlobalsMetadataWrapperPass";
633   }
634 
635   void getAnalysisUsage(AnalysisUsage &AU) const override {
636     AU.setPreservesAll();
637   }
638 
639   GlobalsMetadata &getGlobalsMD() { return GlobalsMD; }
640 
641 private:
642   GlobalsMetadata GlobalsMD;
643 };
644 
645 char ASanGlobalsMetadataWrapperPass::ID = 0;
646 
647 /// AddressSanitizer: instrument the code in module to find memory bugs.
648 struct AddressSanitizer {
649   AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD,
650                    bool CompileKernel = false, bool Recover = false,
651                    bool UseAfterScope = false,
652                    AsanDetectStackUseAfterReturnMode UseAfterReturn =
653                        AsanDetectStackUseAfterReturnMode::Runtime)
654       : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
655                                                             : CompileKernel),
656         Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
657         UseAfterScope(UseAfterScope || ClUseAfterScope),
658         UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
659                                                             : UseAfterReturn),
660         GlobalsMD(*GlobalsMD) {
661     C = &(M.getContext());
662     LongSize = M.getDataLayout().getPointerSizeInBits();
663     IntptrTy = Type::getIntNTy(*C, LongSize);
664     Int8PtrTy = Type::getInt8PtrTy(*C);
665     Int32Ty = Type::getInt32Ty(*C);
666     TargetTriple = Triple(M.getTargetTriple());
667 
668     Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
669 
670     assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
671   }
672 
673   uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
674     uint64_t ArraySize = 1;
675     if (AI.isArrayAllocation()) {
676       const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
677       assert(CI && "non-constant array size");
678       ArraySize = CI->getZExtValue();
679     }
680     Type *Ty = AI.getAllocatedType();
681     uint64_t SizeInBytes =
682         AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
683     return SizeInBytes * ArraySize;
684   }
685 
686   /// Check if we want (and can) handle this alloca.
687   bool isInterestingAlloca(const AllocaInst &AI);
688 
689   bool ignoreAccess(Value *Ptr);
690   void getInterestingMemoryOperands(
691       Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
692 
693   void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
694                      InterestingMemoryOperand &O, bool UseCalls,
695                      const DataLayout &DL);
696   void instrumentPointerComparisonOrSubtraction(Instruction *I);
697   void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
698                          Value *Addr, uint32_t TypeSize, bool IsWrite,
699                          Value *SizeArgument, bool UseCalls, uint32_t Exp);
700   Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
701                                        Instruction *InsertBefore, Value *Addr,
702                                        uint32_t TypeSize, bool IsWrite,
703                                        Value *SizeArgument);
704   void instrumentUnusualSizeOrAlignment(Instruction *I,
705                                         Instruction *InsertBefore, Value *Addr,
706                                         uint32_t TypeSize, bool IsWrite,
707                                         Value *SizeArgument, bool UseCalls,
708                                         uint32_t Exp);
709   Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
710                            Value *ShadowValue, uint32_t TypeSize);
711   Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
712                                  bool IsWrite, size_t AccessSizeIndex,
713                                  Value *SizeArgument, uint32_t Exp);
714   void instrumentMemIntrinsic(MemIntrinsic *MI);
715   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
716   bool suppressInstrumentationSiteForDebug(int &Instrumented);
717   bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
718   bool maybeInsertAsanInitAtFunctionEntry(Function &F);
719   bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
720   void markEscapedLocalAllocas(Function &F);
721 
722 private:
723   friend struct FunctionStackPoisoner;
724 
725   void initializeCallbacks(Module &M);
726 
727   bool LooksLikeCodeInBug11395(Instruction *I);
728   bool GlobalIsLinkerInitialized(GlobalVariable *G);
729   bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
730                     uint64_t TypeSize) const;
731 
732   /// Helper to cleanup per-function state.
733   struct FunctionStateRAII {
734     AddressSanitizer *Pass;
735 
736     FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
737       assert(Pass->ProcessedAllocas.empty() &&
738              "last pass forgot to clear cache");
739       assert(!Pass->LocalDynamicShadow);
740     }
741 
742     ~FunctionStateRAII() {
743       Pass->LocalDynamicShadow = nullptr;
744       Pass->ProcessedAllocas.clear();
745     }
746   };
747 
748   LLVMContext *C;
749   Triple TargetTriple;
750   int LongSize;
751   bool CompileKernel;
752   bool Recover;
753   bool UseAfterScope;
754   AsanDetectStackUseAfterReturnMode UseAfterReturn;
755   Type *IntptrTy;
756   Type *Int8PtrTy;
757   Type *Int32Ty;
758   ShadowMapping Mapping;
759   FunctionCallee AsanHandleNoReturnFunc;
760   FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
761   Constant *AsanShadowGlobal;
762 
763   // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
764   FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
765   FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
766 
767   // These arrays is indexed by AccessIsWrite and Experiment.
768   FunctionCallee AsanErrorCallbackSized[2][2];
769   FunctionCallee AsanMemoryAccessCallbackSized[2][2];
770 
771   FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
772   Value *LocalDynamicShadow = nullptr;
773   const GlobalsMetadata &GlobalsMD;
774   DenseMap<const AllocaInst *, bool> ProcessedAllocas;
775 
776   FunctionCallee AMDGPUAddressShared;
777   FunctionCallee AMDGPUAddressPrivate;
778 };
779 
780 class AddressSanitizerLegacyPass : public FunctionPass {
781 public:
782   static char ID;
783 
784   explicit AddressSanitizerLegacyPass(
785       bool CompileKernel = false, bool Recover = false,
786       bool UseAfterScope = false,
787       AsanDetectStackUseAfterReturnMode UseAfterReturn =
788           AsanDetectStackUseAfterReturnMode::Runtime)
789       : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover),
790         UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) {
791     initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
792   }
793 
794   StringRef getPassName() const override {
795     return "AddressSanitizerFunctionPass";
796   }
797 
798   void getAnalysisUsage(AnalysisUsage &AU) const override {
799     AU.addRequired<ASanGlobalsMetadataWrapperPass>();
800     AU.addRequired<TargetLibraryInfoWrapperPass>();
801   }
802 
803   bool runOnFunction(Function &F) override {
804     GlobalsMetadata &GlobalsMD =
805         getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
806     const TargetLibraryInfo *TLI =
807         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
808     AddressSanitizer ASan(*F.getParent(), &GlobalsMD, CompileKernel, Recover,
809                           UseAfterScope, UseAfterReturn);
810     return ASan.instrumentFunction(F, TLI);
811   }
812 
813 private:
814   bool CompileKernel;
815   bool Recover;
816   bool UseAfterScope;
817   AsanDetectStackUseAfterReturnMode UseAfterReturn;
818 };
819 
820 class ModuleAddressSanitizer {
821 public:
822   ModuleAddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD,
823                          bool CompileKernel = false, bool Recover = false,
824                          bool UseGlobalsGC = true, bool UseOdrIndicator = false,
825                          AsanDtorKind DestructorKind = AsanDtorKind::Global)
826       : GlobalsMD(*GlobalsMD),
827         CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
828                                                             : CompileKernel),
829         Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
830         UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
831         // Enable aliases as they should have no downside with ODR indicators.
832         UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias),
833         UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator),
834         // Not a typo: ClWithComdat is almost completely pointless without
835         // ClUseGlobalsGC (because then it only works on modules without
836         // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
837         // and both suffer from gold PR19002 for which UseGlobalsGC constructor
838         // argument is designed as workaround. Therefore, disable both
839         // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
840         // do globals-gc.
841         UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
842         DestructorKind(DestructorKind) {
843     C = &(M.getContext());
844     int LongSize = M.getDataLayout().getPointerSizeInBits();
845     IntptrTy = Type::getIntNTy(*C, LongSize);
846     TargetTriple = Triple(M.getTargetTriple());
847     Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
848 
849     if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
850       this->DestructorKind = ClOverrideDestructorKind;
851     assert(this->DestructorKind != AsanDtorKind::Invalid);
852   }
853 
854   bool instrumentModule(Module &);
855 
856 private:
857   void initializeCallbacks(Module &M);
858 
859   bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
860   void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
861                              ArrayRef<GlobalVariable *> ExtendedGlobals,
862                              ArrayRef<Constant *> MetadataInitializers);
863   void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
864                             ArrayRef<GlobalVariable *> ExtendedGlobals,
865                             ArrayRef<Constant *> MetadataInitializers,
866                             const std::string &UniqueModuleId);
867   void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
868                               ArrayRef<GlobalVariable *> ExtendedGlobals,
869                               ArrayRef<Constant *> MetadataInitializers);
870   void
871   InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
872                                      ArrayRef<GlobalVariable *> ExtendedGlobals,
873                                      ArrayRef<Constant *> MetadataInitializers);
874 
875   GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
876                                        StringRef OriginalName);
877   void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
878                                   StringRef InternalSuffix);
879   Instruction *CreateAsanModuleDtor(Module &M);
880 
881   const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
882   bool shouldInstrumentGlobal(GlobalVariable *G) const;
883   bool ShouldUseMachOGlobalsSection() const;
884   StringRef getGlobalMetadataSection() const;
885   void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
886   void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
887   uint64_t getMinRedzoneSizeForGlobal() const {
888     return getRedzoneSizeForScale(Mapping.Scale);
889   }
890   uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
891   int GetAsanVersion(const Module &M) const;
892 
893   const GlobalsMetadata &GlobalsMD;
894   bool CompileKernel;
895   bool Recover;
896   bool UseGlobalsGC;
897   bool UsePrivateAlias;
898   bool UseOdrIndicator;
899   bool UseCtorComdat;
900   AsanDtorKind DestructorKind;
901   Type *IntptrTy;
902   LLVMContext *C;
903   Triple TargetTriple;
904   ShadowMapping Mapping;
905   FunctionCallee AsanPoisonGlobals;
906   FunctionCallee AsanUnpoisonGlobals;
907   FunctionCallee AsanRegisterGlobals;
908   FunctionCallee AsanUnregisterGlobals;
909   FunctionCallee AsanRegisterImageGlobals;
910   FunctionCallee AsanUnregisterImageGlobals;
911   FunctionCallee AsanRegisterElfGlobals;
912   FunctionCallee AsanUnregisterElfGlobals;
913 
914   Function *AsanCtorFunction = nullptr;
915   Function *AsanDtorFunction = nullptr;
916 };
917 
918 class ModuleAddressSanitizerLegacyPass : public ModulePass {
919 public:
920   static char ID;
921 
922   explicit ModuleAddressSanitizerLegacyPass(
923       bool CompileKernel = false, bool Recover = false, bool UseGlobalGC = true,
924       bool UseOdrIndicator = false,
925       AsanDtorKind DestructorKind = AsanDtorKind::Global)
926       : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover),
927         UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator),
928         DestructorKind(DestructorKind) {
929     initializeModuleAddressSanitizerLegacyPassPass(
930         *PassRegistry::getPassRegistry());
931   }
932 
933   StringRef getPassName() const override { return "ModuleAddressSanitizer"; }
934 
935   void getAnalysisUsage(AnalysisUsage &AU) const override {
936     AU.addRequired<ASanGlobalsMetadataWrapperPass>();
937   }
938 
939   bool runOnModule(Module &M) override {
940     GlobalsMetadata &GlobalsMD =
941         getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
942     ModuleAddressSanitizer ASanModule(M, &GlobalsMD, CompileKernel, Recover,
943                                       UseGlobalGC, UseOdrIndicator,
944                                       DestructorKind);
945     return ASanModule.instrumentModule(M);
946   }
947 
948 private:
949   bool CompileKernel;
950   bool Recover;
951   bool UseGlobalGC;
952   bool UseOdrIndicator;
953   AsanDtorKind DestructorKind;
954 };
955 
956 // Stack poisoning does not play well with exception handling.
957 // When an exception is thrown, we essentially bypass the code
958 // that unpoisones the stack. This is why the run-time library has
959 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
960 // stack in the interceptor. This however does not work inside the
961 // actual function which catches the exception. Most likely because the
962 // compiler hoists the load of the shadow value somewhere too high.
963 // This causes asan to report a non-existing bug on 453.povray.
964 // It sounds like an LLVM bug.
965 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
966   Function &F;
967   AddressSanitizer &ASan;
968   DIBuilder DIB;
969   LLVMContext *C;
970   Type *IntptrTy;
971   Type *IntptrPtrTy;
972   ShadowMapping Mapping;
973 
974   SmallVector<AllocaInst *, 16> AllocaVec;
975   SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
976   SmallVector<Instruction *, 8> RetVec;
977 
978   FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
979       AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
980   FunctionCallee AsanSetShadowFunc[0x100] = {};
981   FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
982   FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
983 
984   // Stores a place and arguments of poisoning/unpoisoning call for alloca.
985   struct AllocaPoisonCall {
986     IntrinsicInst *InsBefore;
987     AllocaInst *AI;
988     uint64_t Size;
989     bool DoPoison;
990   };
991   SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
992   SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
993   bool HasUntracedLifetimeIntrinsic = false;
994 
995   SmallVector<AllocaInst *, 1> DynamicAllocaVec;
996   SmallVector<IntrinsicInst *, 1> StackRestoreVec;
997   AllocaInst *DynamicAllocaLayout = nullptr;
998   IntrinsicInst *LocalEscapeCall = nullptr;
999 
1000   bool HasInlineAsm = false;
1001   bool HasReturnsTwiceCall = false;
1002   bool PoisonStack;
1003 
1004   FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
1005       : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
1006         C(ASan.C), IntptrTy(ASan.IntptrTy),
1007         IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
1008         PoisonStack(ClStack &&
1009                     !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
1010 
1011   bool runOnFunction() {
1012     if (!PoisonStack)
1013       return false;
1014 
1015     if (ClRedzoneByvalArgs)
1016       copyArgsPassedByValToAllocas();
1017 
1018     // Collect alloca, ret, lifetime instructions etc.
1019     for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1020 
1021     if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1022 
1023     initializeCallbacks(*F.getParent());
1024 
1025     if (HasUntracedLifetimeIntrinsic) {
1026       // If there are lifetime intrinsics which couldn't be traced back to an
1027       // alloca, we may not know exactly when a variable enters scope, and
1028       // therefore should "fail safe" by not poisoning them.
1029       StaticAllocaPoisonCallVec.clear();
1030       DynamicAllocaPoisonCallVec.clear();
1031     }
1032 
1033     processDynamicAllocas();
1034     processStaticAllocas();
1035 
1036     if (ClDebugStack) {
1037       LLVM_DEBUG(dbgs() << F);
1038     }
1039     return true;
1040   }
1041 
1042   // Arguments marked with the "byval" attribute are implicitly copied without
1043   // using an alloca instruction.  To produce redzones for those arguments, we
1044   // copy them a second time into memory allocated with an alloca instruction.
1045   void copyArgsPassedByValToAllocas();
1046 
1047   // Finds all Alloca instructions and puts
1048   // poisoned red zones around all of them.
1049   // Then unpoison everything back before the function returns.
1050   void processStaticAllocas();
1051   void processDynamicAllocas();
1052 
1053   void createDynamicAllocasInitStorage();
1054 
1055   // ----------------------- Visitors.
1056   /// Collect all Ret instructions, or the musttail call instruction if it
1057   /// precedes the return instruction.
1058   void visitReturnInst(ReturnInst &RI) {
1059     if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1060       RetVec.push_back(CI);
1061     else
1062       RetVec.push_back(&RI);
1063   }
1064 
1065   /// Collect all Resume instructions.
1066   void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1067 
1068   /// Collect all CatchReturnInst instructions.
1069   void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1070 
1071   void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1072                                         Value *SavedStack) {
1073     IRBuilder<> IRB(InstBefore);
1074     Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1075     // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1076     // need to adjust extracted SP to compute the address of the most recent
1077     // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1078     // this purpose.
1079     if (!isa<ReturnInst>(InstBefore)) {
1080       Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
1081           InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
1082           {IntptrTy});
1083 
1084       Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
1085 
1086       DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1087                                      DynamicAreaOffset);
1088     }
1089 
1090     IRB.CreateCall(
1091         AsanAllocasUnpoisonFunc,
1092         {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1093   }
1094 
1095   // Unpoison dynamic allocas redzones.
1096   void unpoisonDynamicAllocas() {
1097     for (Instruction *Ret : RetVec)
1098       unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1099 
1100     for (Instruction *StackRestoreInst : StackRestoreVec)
1101       unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1102                                        StackRestoreInst->getOperand(0));
1103   }
1104 
1105   // Deploy and poison redzones around dynamic alloca call. To do this, we
1106   // should replace this call with another one with changed parameters and
1107   // replace all its uses with new address, so
1108   //   addr = alloca type, old_size, align
1109   // is replaced by
1110   //   new_size = (old_size + additional_size) * sizeof(type)
1111   //   tmp = alloca i8, new_size, max(align, 32)
1112   //   addr = tmp + 32 (first 32 bytes are for the left redzone).
1113   // Additional_size is added to make new memory allocation contain not only
1114   // requested memory, but also left, partial and right redzones.
1115   void handleDynamicAllocaCall(AllocaInst *AI);
1116 
1117   /// Collect Alloca instructions we want (and can) handle.
1118   void visitAllocaInst(AllocaInst &AI) {
1119     if (!ASan.isInterestingAlloca(AI)) {
1120       if (AI.isStaticAlloca()) {
1121         // Skip over allocas that are present *before* the first instrumented
1122         // alloca, we don't want to move those around.
1123         if (AllocaVec.empty())
1124           return;
1125 
1126         StaticAllocasToMoveUp.push_back(&AI);
1127       }
1128       return;
1129     }
1130 
1131     if (!AI.isStaticAlloca())
1132       DynamicAllocaVec.push_back(&AI);
1133     else
1134       AllocaVec.push_back(&AI);
1135   }
1136 
1137   /// Collect lifetime intrinsic calls to check for use-after-scope
1138   /// errors.
1139   void visitIntrinsicInst(IntrinsicInst &II) {
1140     Intrinsic::ID ID = II.getIntrinsicID();
1141     if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1142     if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1143     if (!ASan.UseAfterScope)
1144       return;
1145     if (!II.isLifetimeStartOrEnd())
1146       return;
1147     // Found lifetime intrinsic, add ASan instrumentation if necessary.
1148     auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1149     // If size argument is undefined, don't do anything.
1150     if (Size->isMinusOne()) return;
1151     // Check that size doesn't saturate uint64_t and can
1152     // be stored in IntptrTy.
1153     const uint64_t SizeValue = Size->getValue().getLimitedValue();
1154     if (SizeValue == ~0ULL ||
1155         !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1156       return;
1157     // Find alloca instruction that corresponds to llvm.lifetime argument.
1158     // Currently we can only handle lifetime markers pointing to the
1159     // beginning of the alloca.
1160     AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1161     if (!AI) {
1162       HasUntracedLifetimeIntrinsic = true;
1163       return;
1164     }
1165     // We're interested only in allocas we can handle.
1166     if (!ASan.isInterestingAlloca(*AI))
1167       return;
1168     bool DoPoison = (ID == Intrinsic::lifetime_end);
1169     AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1170     if (AI->isStaticAlloca())
1171       StaticAllocaPoisonCallVec.push_back(APC);
1172     else if (ClInstrumentDynamicAllocas)
1173       DynamicAllocaPoisonCallVec.push_back(APC);
1174   }
1175 
1176   void visitCallBase(CallBase &CB) {
1177     if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1178       HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1179       HasReturnsTwiceCall |= CI->canReturnTwice();
1180     }
1181   }
1182 
1183   // ---------------------- Helpers.
1184   void initializeCallbacks(Module &M);
1185 
1186   // Copies bytes from ShadowBytes into shadow memory for indexes where
1187   // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1188   // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1189   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1190                     IRBuilder<> &IRB, Value *ShadowBase);
1191   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1192                     size_t Begin, size_t End, IRBuilder<> &IRB,
1193                     Value *ShadowBase);
1194   void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1195                           ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1196                           size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1197 
1198   void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1199 
1200   Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1201                                bool Dynamic);
1202   PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1203                      Instruction *ThenTerm, Value *ValueIfFalse);
1204 };
1205 
1206 } // end anonymous namespace
1207 
1208 void LocationMetadata::parse(MDNode *MDN) {
1209   assert(MDN->getNumOperands() == 3);
1210   MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
1211   Filename = DIFilename->getString();
1212   LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
1213   ColumnNo =
1214       mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
1215 }
1216 
1217 // FIXME: It would be cleaner to instead attach relevant metadata to the globals
1218 // we want to sanitize instead and reading this metadata on each pass over a
1219 // function instead of reading module level metadata at first.
1220 GlobalsMetadata::GlobalsMetadata(Module &M) {
1221   NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
1222   if (!Globals)
1223     return;
1224   for (auto MDN : Globals->operands()) {
1225     // Metadata node contains the global and the fields of "Entry".
1226     assert(MDN->getNumOperands() == 5);
1227     auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
1228     // The optimizer may optimize away a global entirely.
1229     if (!V)
1230       continue;
1231     auto *StrippedV = V->stripPointerCasts();
1232     auto *GV = dyn_cast<GlobalVariable>(StrippedV);
1233     if (!GV)
1234       continue;
1235     // We can already have an entry for GV if it was merged with another
1236     // global.
1237     Entry &E = Entries[GV];
1238     if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
1239       E.SourceLoc.parse(Loc);
1240     if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
1241       E.Name = Name->getString();
1242     ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3));
1243     E.IsDynInit |= IsDynInit->isOne();
1244     ConstantInt *IsExcluded =
1245         mdconst::extract<ConstantInt>(MDN->getOperand(4));
1246     E.IsExcluded |= IsExcluded->isOne();
1247   }
1248 }
1249 
1250 AnalysisKey ASanGlobalsMetadataAnalysis::Key;
1251 
1252 GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M,
1253                                                  ModuleAnalysisManager &AM) {
1254   return GlobalsMetadata(M);
1255 }
1256 
1257 PreservedAnalyses AddressSanitizerPass::run(Function &F,
1258                                             AnalysisManager<Function> &AM) {
1259   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
1260   Module &M = *F.getParent();
1261   if (auto *R = MAMProxy.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) {
1262     const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1263     AddressSanitizer Sanitizer(M, R, Options.CompileKernel, Options.Recover,
1264                                Options.UseAfterScope, Options.UseAfterReturn);
1265     if (Sanitizer.instrumentFunction(F, TLI))
1266       return PreservedAnalyses::none();
1267     return PreservedAnalyses::all();
1268   }
1269 
1270   report_fatal_error(
1271       "The ASanGlobalsMetadataAnalysis is required to run before "
1272       "AddressSanitizer can run");
1273   return PreservedAnalyses::all();
1274 }
1275 
1276 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass(
1277     bool CompileKernel, bool Recover, bool UseGlobalGC, bool UseOdrIndicator,
1278     AsanDtorKind DestructorKind)
1279     : CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC),
1280       UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {}
1281 
1282 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M,
1283                                                   AnalysisManager<Module> &AM) {
1284   GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M);
1285   ModuleAddressSanitizer Sanitizer(M, &GlobalsMD, CompileKernel, Recover,
1286                                    UseGlobalGC, UseOdrIndicator,
1287                                    DestructorKind);
1288   if (Sanitizer.instrumentModule(M))
1289     return PreservedAnalyses::none();
1290   return PreservedAnalyses::all();
1291 }
1292 
1293 INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md",
1294                 "Read metadata to mark which globals should be instrumented "
1295                 "when running ASan.",
1296                 false, true)
1297 
1298 char AddressSanitizerLegacyPass::ID = 0;
1299 
1300 INITIALIZE_PASS_BEGIN(
1301     AddressSanitizerLegacyPass, "asan",
1302     "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
1303     false)
1304 INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass)
1305 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1306 INITIALIZE_PASS_END(
1307     AddressSanitizerLegacyPass, "asan",
1308     "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
1309     false)
1310 
1311 FunctionPass *llvm::createAddressSanitizerFunctionPass(
1312     bool CompileKernel, bool Recover, bool UseAfterScope,
1313     AsanDetectStackUseAfterReturnMode UseAfterReturn) {
1314   assert(!CompileKernel || Recover);
1315   return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope,
1316                                         UseAfterReturn);
1317 }
1318 
1319 char ModuleAddressSanitizerLegacyPass::ID = 0;
1320 
1321 INITIALIZE_PASS(
1322     ModuleAddressSanitizerLegacyPass, "asan-module",
1323     "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
1324     "ModulePass",
1325     false, false)
1326 
1327 ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass(
1328     bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator,
1329     AsanDtorKind Destructor) {
1330   assert(!CompileKernel || Recover);
1331   return new ModuleAddressSanitizerLegacyPass(
1332       CompileKernel, Recover, UseGlobalsGC, UseOdrIndicator, Destructor);
1333 }
1334 
1335 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
1336   size_t Res = countTrailingZeros(TypeSize / 8);
1337   assert(Res < kNumberOfAccessSizes);
1338   return Res;
1339 }
1340 
1341 /// Create a global describing a source location.
1342 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
1343                                                        LocationMetadata MD) {
1344   Constant *LocData[] = {
1345       createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix),
1346       ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
1347       ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
1348   };
1349   auto LocStruct = ConstantStruct::getAnon(LocData);
1350   auto GV = new GlobalVariable(M, LocStruct->getType(), true,
1351                                GlobalValue::PrivateLinkage, LocStruct,
1352                                kAsanGenPrefix);
1353   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1354   return GV;
1355 }
1356 
1357 /// Check if \p G has been created by a trusted compiler pass.
1358 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
1359   // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1360   if (G->getName().startswith("llvm."))
1361     return true;
1362 
1363   // Do not instrument asan globals.
1364   if (G->getName().startswith(kAsanGenPrefix) ||
1365       G->getName().startswith(kSanCovGenPrefix) ||
1366       G->getName().startswith(kODRGenPrefix))
1367     return true;
1368 
1369   // Do not instrument gcov counter arrays.
1370   if (G->getName() == "__llvm_gcov_ctr")
1371     return true;
1372 
1373   return false;
1374 }
1375 
1376 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) {
1377   Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1378   unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1379   if (AddrSpace == 3 || AddrSpace == 5)
1380     return true;
1381   return false;
1382 }
1383 
1384 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1385   // Shadow >> scale
1386   Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1387   if (Mapping.Offset == 0) return Shadow;
1388   // (Shadow >> scale) | offset
1389   Value *ShadowBase;
1390   if (LocalDynamicShadow)
1391     ShadowBase = LocalDynamicShadow;
1392   else
1393     ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1394   if (Mapping.OrShadowOffset)
1395     return IRB.CreateOr(Shadow, ShadowBase);
1396   else
1397     return IRB.CreateAdd(Shadow, ShadowBase);
1398 }
1399 
1400 // Instrument memset/memmove/memcpy
1401 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1402   IRBuilder<> IRB(MI);
1403   if (isa<MemTransferInst>(MI)) {
1404     IRB.CreateCall(
1405         isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1406         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1407          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1408          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1409   } else if (isa<MemSetInst>(MI)) {
1410     IRB.CreateCall(
1411         AsanMemset,
1412         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1413          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1414          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1415   }
1416   MI->eraseFromParent();
1417 }
1418 
1419 /// Check if we want (and can) handle this alloca.
1420 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1421   auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1422 
1423   if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1424     return PreviouslySeenAllocaInfo->getSecond();
1425 
1426   bool IsInteresting =
1427       (AI.getAllocatedType()->isSized() &&
1428        // alloca() may be called with 0 size, ignore it.
1429        ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) &&
1430        // We are only interested in allocas not promotable to registers.
1431        // Promotable allocas are common under -O0.
1432        (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
1433        // inalloca allocas are not treated as static, and we don't want
1434        // dynamic alloca instrumentation for them as well.
1435        !AI.isUsedWithInAlloca() &&
1436        // swifterror allocas are register promoted by ISel
1437        !AI.isSwiftError());
1438 
1439   ProcessedAllocas[&AI] = IsInteresting;
1440   return IsInteresting;
1441 }
1442 
1443 bool AddressSanitizer::ignoreAccess(Value *Ptr) {
1444   // Instrument acesses from different address spaces only for AMDGPU.
1445   Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1446   if (PtrTy->getPointerAddressSpace() != 0 &&
1447       !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1448     return true;
1449 
1450   // Ignore swifterror addresses.
1451   // swifterror memory addresses are mem2reg promoted by instruction
1452   // selection. As such they cannot have regular uses like an instrumentation
1453   // function and it makes no sense to track them as memory.
1454   if (Ptr->isSwiftError())
1455     return true;
1456 
1457   // Treat memory accesses to promotable allocas as non-interesting since they
1458   // will not cause memory violations. This greatly speeds up the instrumented
1459   // executable at -O0.
1460   if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1461     if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1462       return true;
1463 
1464   return false;
1465 }
1466 
1467 void AddressSanitizer::getInterestingMemoryOperands(
1468     Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
1469   // Skip memory accesses inserted by another instrumentation.
1470   if (I->hasMetadata("nosanitize"))
1471     return;
1472 
1473   // Do not instrument the load fetching the dynamic shadow address.
1474   if (LocalDynamicShadow == I)
1475     return;
1476 
1477   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1478     if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
1479       return;
1480     Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1481                              LI->getType(), LI->getAlign());
1482   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1483     if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
1484       return;
1485     Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1486                              SI->getValueOperand()->getType(), SI->getAlign());
1487   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1488     if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
1489       return;
1490     Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1491                              RMW->getValOperand()->getType(), None);
1492   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1493     if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
1494       return;
1495     Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1496                              XCHG->getCompareOperand()->getType(), None);
1497   } else if (auto CI = dyn_cast<CallInst>(I)) {
1498     auto *F = CI->getCalledFunction();
1499     if (F && (F->getName().startswith("llvm.masked.load.") ||
1500               F->getName().startswith("llvm.masked.store."))) {
1501       bool IsWrite = F->getName().startswith("llvm.masked.store.");
1502       // Masked store has an initial operand for the value.
1503       unsigned OpOffset = IsWrite ? 1 : 0;
1504       if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1505         return;
1506 
1507       auto BasePtr = CI->getOperand(OpOffset);
1508       if (ignoreAccess(BasePtr))
1509         return;
1510       auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
1511       MaybeAlign Alignment = Align(1);
1512       // Otherwise no alignment guarantees. We probably got Undef.
1513       if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1514         Alignment = Op->getMaybeAlignValue();
1515       Value *Mask = CI->getOperand(2 + OpOffset);
1516       Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1517     } else {
1518       for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
1519         if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1520             ignoreAccess(CI->getArgOperand(ArgNo)))
1521           continue;
1522         Type *Ty = CI->getParamByValType(ArgNo);
1523         Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1524       }
1525     }
1526   }
1527 }
1528 
1529 static bool isPointerOperand(Value *V) {
1530   return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1531 }
1532 
1533 // This is a rough heuristic; it may cause both false positives and
1534 // false negatives. The proper implementation requires cooperation with
1535 // the frontend.
1536 static bool isInterestingPointerComparison(Instruction *I) {
1537   if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1538     if (!Cmp->isRelational())
1539       return false;
1540   } else {
1541     return false;
1542   }
1543   return isPointerOperand(I->getOperand(0)) &&
1544          isPointerOperand(I->getOperand(1));
1545 }
1546 
1547 // This is a rough heuristic; it may cause both false positives and
1548 // false negatives. The proper implementation requires cooperation with
1549 // the frontend.
1550 static bool isInterestingPointerSubtraction(Instruction *I) {
1551   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1552     if (BO->getOpcode() != Instruction::Sub)
1553       return false;
1554   } else {
1555     return false;
1556   }
1557   return isPointerOperand(I->getOperand(0)) &&
1558          isPointerOperand(I->getOperand(1));
1559 }
1560 
1561 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1562   // If a global variable does not have dynamic initialization we don't
1563   // have to instrument it.  However, if a global does not have initializer
1564   // at all, we assume it has dynamic initializer (in other TU).
1565   //
1566   // FIXME: Metadata should be attched directly to the global directly instead
1567   // of being added to llvm.asan.globals.
1568   return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
1569 }
1570 
1571 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1572     Instruction *I) {
1573   IRBuilder<> IRB(I);
1574   FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1575   Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1576   for (Value *&i : Param) {
1577     if (i->getType()->isPointerTy())
1578       i = IRB.CreatePointerCast(i, IntptrTy);
1579   }
1580   IRB.CreateCall(F, Param);
1581 }
1582 
1583 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1584                                 Instruction *InsertBefore, Value *Addr,
1585                                 MaybeAlign Alignment, unsigned Granularity,
1586                                 uint32_t TypeSize, bool IsWrite,
1587                                 Value *SizeArgument, bool UseCalls,
1588                                 uint32_t Exp) {
1589   // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1590   // if the data is properly aligned.
1591   if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
1592        TypeSize == 128) &&
1593       (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8))
1594     return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
1595                                    nullptr, UseCalls, Exp);
1596   Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
1597                                          IsWrite, nullptr, UseCalls, Exp);
1598 }
1599 
1600 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
1601                                         const DataLayout &DL, Type *IntptrTy,
1602                                         Value *Mask, Instruction *I,
1603                                         Value *Addr, MaybeAlign Alignment,
1604                                         unsigned Granularity, uint32_t TypeSize,
1605                                         bool IsWrite, Value *SizeArgument,
1606                                         bool UseCalls, uint32_t Exp) {
1607   auto *VTy = cast<FixedVectorType>(
1608       cast<PointerType>(Addr->getType())->getElementType());
1609   uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1610   unsigned Num = VTy->getNumElements();
1611   auto Zero = ConstantInt::get(IntptrTy, 0);
1612   for (unsigned Idx = 0; Idx < Num; ++Idx) {
1613     Value *InstrumentedAddress = nullptr;
1614     Instruction *InsertBefore = I;
1615     if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
1616       // dyn_cast as we might get UndefValue
1617       if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
1618         if (Masked->isZero())
1619           // Mask is constant false, so no instrumentation needed.
1620           continue;
1621         // If we have a true or undef value, fall through to doInstrumentAddress
1622         // with InsertBefore == I
1623       }
1624     } else {
1625       IRBuilder<> IRB(I);
1626       Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
1627       Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
1628       InsertBefore = ThenTerm;
1629     }
1630 
1631     IRBuilder<> IRB(InsertBefore);
1632     InstrumentedAddress =
1633         IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
1634     doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
1635                         Granularity, ElemTypeSize, IsWrite, SizeArgument,
1636                         UseCalls, Exp);
1637   }
1638 }
1639 
1640 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1641                                      InterestingMemoryOperand &O, bool UseCalls,
1642                                      const DataLayout &DL) {
1643   Value *Addr = O.getPtr();
1644 
1645   // Optimization experiments.
1646   // The experiments can be used to evaluate potential optimizations that remove
1647   // instrumentation (assess false negatives). Instead of completely removing
1648   // some instrumentation, you set Exp to a non-zero value (mask of optimization
1649   // experiments that want to remove instrumentation of this instruction).
1650   // If Exp is non-zero, this pass will emit special calls into runtime
1651   // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1652   // make runtime terminate the program in a special way (with a different
1653   // exit status). Then you run the new compiler on a buggy corpus, collect
1654   // the special terminations (ideally, you don't see them at all -- no false
1655   // negatives) and make the decision on the optimization.
1656   uint32_t Exp = ClForceExperiment;
1657 
1658   if (ClOpt && ClOptGlobals) {
1659     // If initialization order checking is disabled, a simple access to a
1660     // dynamically initialized global is always valid.
1661     GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1662     if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1663         isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
1664       NumOptimizedAccessesToGlobalVar++;
1665       return;
1666     }
1667   }
1668 
1669   if (ClOpt && ClOptStack) {
1670     // A direct inbounds access to a stack variable is always valid.
1671     if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1672         isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
1673       NumOptimizedAccessesToStackVar++;
1674       return;
1675     }
1676   }
1677 
1678   if (O.IsWrite)
1679     NumInstrumentedWrites++;
1680   else
1681     NumInstrumentedReads++;
1682 
1683   unsigned Granularity = 1 << Mapping.Scale;
1684   if (O.MaybeMask) {
1685     instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(),
1686                                 Addr, O.Alignment, Granularity, O.TypeSize,
1687                                 O.IsWrite, nullptr, UseCalls, Exp);
1688   } else {
1689     doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1690                         Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls,
1691                         Exp);
1692   }
1693 }
1694 
1695 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1696                                                  Value *Addr, bool IsWrite,
1697                                                  size_t AccessSizeIndex,
1698                                                  Value *SizeArgument,
1699                                                  uint32_t Exp) {
1700   IRBuilder<> IRB(InsertBefore);
1701   Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1702   CallInst *Call = nullptr;
1703   if (SizeArgument) {
1704     if (Exp == 0)
1705       Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
1706                             {Addr, SizeArgument});
1707     else
1708       Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
1709                             {Addr, SizeArgument, ExpVal});
1710   } else {
1711     if (Exp == 0)
1712       Call =
1713           IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1714     else
1715       Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
1716                             {Addr, ExpVal});
1717   }
1718 
1719   Call->setCannotMerge();
1720   return Call;
1721 }
1722 
1723 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1724                                            Value *ShadowValue,
1725                                            uint32_t TypeSize) {
1726   size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1727   // Addr & (Granularity - 1)
1728   Value *LastAccessedByte =
1729       IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1730   // (Addr & (Granularity - 1)) + size - 1
1731   if (TypeSize / 8 > 1)
1732     LastAccessedByte = IRB.CreateAdd(
1733         LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
1734   // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1735   LastAccessedByte =
1736       IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1737   // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1738   return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1739 }
1740 
1741 Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1742     Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1743     uint32_t TypeSize, bool IsWrite, Value *SizeArgument) {
1744   // Do not instrument unsupported addrspaces.
1745   if (isUnsupportedAMDGPUAddrspace(Addr))
1746     return nullptr;
1747   Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1748   // Follow host instrumentation for global and constant addresses.
1749   if (PtrTy->getPointerAddressSpace() != 0)
1750     return InsertBefore;
1751   // Instrument generic addresses in supported addressspaces.
1752   IRBuilder<> IRB(InsertBefore);
1753   Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy());
1754   Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong});
1755   Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong});
1756   Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1757   Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate);
1758   Value *AddrSpaceZeroLanding =
1759       SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1760   InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1761   return InsertBefore;
1762 }
1763 
1764 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1765                                          Instruction *InsertBefore, Value *Addr,
1766                                          uint32_t TypeSize, bool IsWrite,
1767                                          Value *SizeArgument, bool UseCalls,
1768                                          uint32_t Exp) {
1769   if (TargetTriple.isAMDGPU()) {
1770     InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1771                                            TypeSize, IsWrite, SizeArgument);
1772     if (!InsertBefore)
1773       return;
1774   }
1775 
1776   IRBuilder<> IRB(InsertBefore);
1777   size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1778   const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1779 
1780   if (UseCalls && ClOptimizeCallbacks) {
1781     const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1782     Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1783     IRB.CreateCall(
1784         Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1785         {IRB.CreatePointerCast(Addr, Int8PtrTy),
1786          ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1787     return;
1788   }
1789 
1790   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1791   if (UseCalls) {
1792     if (Exp == 0)
1793       IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1794                      AddrLong);
1795     else
1796       IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1797                      {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1798     return;
1799   }
1800 
1801   Type *ShadowTy =
1802       IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1803   Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1804   Value *ShadowPtr = memToShadow(AddrLong, IRB);
1805   Value *CmpVal = Constant::getNullValue(ShadowTy);
1806   Value *ShadowValue =
1807       IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1808 
1809   Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
1810   size_t Granularity = 1ULL << Mapping.Scale;
1811   Instruction *CrashTerm = nullptr;
1812 
1813   if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1814     // We use branch weights for the slow path check, to indicate that the slow
1815     // path is rarely taken. This seems to be the case for SPEC benchmarks.
1816     Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1817         Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1818     assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1819     BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1820     IRB.SetInsertPoint(CheckTerm);
1821     Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1822     if (Recover) {
1823       CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1824     } else {
1825       BasicBlock *CrashBlock =
1826         BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1827       CrashTerm = new UnreachableInst(*C, CrashBlock);
1828       BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1829       ReplaceInstWithInst(CheckTerm, NewTerm);
1830     }
1831   } else {
1832     CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1833   }
1834 
1835   Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1836                                          AccessSizeIndex, SizeArgument, Exp);
1837   Crash->setDebugLoc(OrigIns->getDebugLoc());
1838 }
1839 
1840 // Instrument unusual size or unusual alignment.
1841 // We can not do it with a single check, so we do 1-byte check for the first
1842 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1843 // to report the actual access size.
1844 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1845     Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
1846     bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1847   IRBuilder<> IRB(InsertBefore);
1848   Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
1849   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1850   if (UseCalls) {
1851     if (Exp == 0)
1852       IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1853                      {AddrLong, Size});
1854     else
1855       IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1856                      {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1857   } else {
1858     Value *LastByte = IRB.CreateIntToPtr(
1859         IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
1860         Addr->getType());
1861     instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
1862     instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
1863   }
1864 }
1865 
1866 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1867                                                   GlobalValue *ModuleName) {
1868   // Set up the arguments to our poison/unpoison functions.
1869   IRBuilder<> IRB(&GlobalInit.front(),
1870                   GlobalInit.front().getFirstInsertionPt());
1871 
1872   // Add a call to poison all external globals before the given function starts.
1873   Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1874   IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1875 
1876   // Add calls to unpoison all globals before each return instruction.
1877   for (auto &BB : GlobalInit.getBasicBlockList())
1878     if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1879       CallInst::Create(AsanUnpoisonGlobals, "", RI);
1880 }
1881 
1882 void ModuleAddressSanitizer::createInitializerPoisonCalls(
1883     Module &M, GlobalValue *ModuleName) {
1884   GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1885   if (!GV)
1886     return;
1887 
1888   ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1889   if (!CA)
1890     return;
1891 
1892   for (Use &OP : CA->operands()) {
1893     if (isa<ConstantAggregateZero>(OP)) continue;
1894     ConstantStruct *CS = cast<ConstantStruct>(OP);
1895 
1896     // Must have a function or null ptr.
1897     if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1898       if (F->getName() == kAsanModuleCtorName) continue;
1899       auto *Priority = cast<ConstantInt>(CS->getOperand(0));
1900       // Don't instrument CTORs that will run before asan.module_ctor.
1901       if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
1902         continue;
1903       poisonOneInitializer(*F, ModuleName);
1904     }
1905   }
1906 }
1907 
1908 const GlobalVariable *
1909 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
1910   // In case this function should be expanded to include rules that do not just
1911   // apply when CompileKernel is true, either guard all existing rules with an
1912   // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
1913   // should also apply to user space.
1914   assert(CompileKernel && "Only expecting to be called when compiling kernel");
1915 
1916   const Constant *C = GA.getAliasee();
1917 
1918   // When compiling the kernel, globals that are aliased by symbols prefixed
1919   // by "__" are special and cannot be padded with a redzone.
1920   if (GA.getName().startswith("__"))
1921     return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
1922 
1923   return nullptr;
1924 }
1925 
1926 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
1927   Type *Ty = G->getValueType();
1928   LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1929 
1930   // FIXME: Metadata should be attched directly to the global directly instead
1931   // of being added to llvm.asan.globals.
1932   if (GlobalsMD.get(G).IsExcluded) return false;
1933   if (!Ty->isSized()) return false;
1934   if (!G->hasInitializer()) return false;
1935   // Globals in address space 1 and 4 are supported for AMDGPU.
1936   if (G->getAddressSpace() &&
1937       !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
1938     return false;
1939   if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
1940   // Two problems with thread-locals:
1941   //   - The address of the main thread's copy can't be computed at link-time.
1942   //   - Need to poison all copies, not just the main thread's one.
1943   if (G->isThreadLocal()) return false;
1944   // For now, just ignore this Global if the alignment is large.
1945   if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false;
1946 
1947   // For non-COFF targets, only instrument globals known to be defined by this
1948   // TU.
1949   // FIXME: We can instrument comdat globals on ELF if we are using the
1950   // GC-friendly metadata scheme.
1951   if (!TargetTriple.isOSBinFormatCOFF()) {
1952     if (!G->hasExactDefinition() || G->hasComdat())
1953       return false;
1954   } else {
1955     // On COFF, don't instrument non-ODR linkages.
1956     if (G->isInterposable())
1957       return false;
1958   }
1959 
1960   // If a comdat is present, it must have a selection kind that implies ODR
1961   // semantics: no duplicates, any, or exact match.
1962   if (Comdat *C = G->getComdat()) {
1963     switch (C->getSelectionKind()) {
1964     case Comdat::Any:
1965     case Comdat::ExactMatch:
1966     case Comdat::NoDeduplicate:
1967       break;
1968     case Comdat::Largest:
1969     case Comdat::SameSize:
1970       return false;
1971     }
1972   }
1973 
1974   if (G->hasSection()) {
1975     // The kernel uses explicit sections for mostly special global variables
1976     // that we should not instrument. E.g. the kernel may rely on their layout
1977     // without redzones, or remove them at link time ("discard.*"), etc.
1978     if (CompileKernel)
1979       return false;
1980 
1981     StringRef Section = G->getSection();
1982 
1983     // Globals from llvm.metadata aren't emitted, do not instrument them.
1984     if (Section == "llvm.metadata") return false;
1985     // Do not instrument globals from special LLVM sections.
1986     if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false;
1987 
1988     // Do not instrument function pointers to initialization and termination
1989     // routines: dynamic linker will not properly handle redzones.
1990     if (Section.startswith(".preinit_array") ||
1991         Section.startswith(".init_array") ||
1992         Section.startswith(".fini_array")) {
1993       return false;
1994     }
1995 
1996     // Do not instrument user-defined sections (with names resembling
1997     // valid C identifiers)
1998     if (TargetTriple.isOSBinFormatELF()) {
1999       if (llvm::all_of(Section,
2000                        [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2001         return false;
2002     }
2003 
2004     // On COFF, if the section name contains '$', it is highly likely that the
2005     // user is using section sorting to create an array of globals similar to
2006     // the way initialization callbacks are registered in .init_array and
2007     // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2008     // to such globals is counterproductive, because the intent is that they
2009     // will form an array, and out-of-bounds accesses are expected.
2010     // See https://github.com/google/sanitizers/issues/305
2011     // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2012     if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2013       LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2014                         << *G << "\n");
2015       return false;
2016     }
2017 
2018     if (TargetTriple.isOSBinFormatMachO()) {
2019       StringRef ParsedSegment, ParsedSection;
2020       unsigned TAA = 0, StubSize = 0;
2021       bool TAAParsed;
2022       cantFail(MCSectionMachO::ParseSectionSpecifier(
2023           Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2024 
2025       // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2026       // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2027       // them.
2028       if (ParsedSegment == "__OBJC" ||
2029           (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
2030         LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2031         return false;
2032       }
2033       // See https://github.com/google/sanitizers/issues/32
2034       // Constant CFString instances are compiled in the following way:
2035       //  -- the string buffer is emitted into
2036       //     __TEXT,__cstring,cstring_literals
2037       //  -- the constant NSConstantString structure referencing that buffer
2038       //     is placed into __DATA,__cfstring
2039       // Therefore there's no point in placing redzones into __DATA,__cfstring.
2040       // Moreover, it causes the linker to crash on OS X 10.7
2041       if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2042         LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2043         return false;
2044       }
2045       // The linker merges the contents of cstring_literals and removes the
2046       // trailing zeroes.
2047       if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2048         LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2049         return false;
2050       }
2051     }
2052   }
2053 
2054   if (CompileKernel) {
2055     // Globals that prefixed by "__" are special and cannot be padded with a
2056     // redzone.
2057     if (G->getName().startswith("__"))
2058       return false;
2059   }
2060 
2061   return true;
2062 }
2063 
2064 // On Mach-O platforms, we emit global metadata in a separate section of the
2065 // binary in order to allow the linker to properly dead strip. This is only
2066 // supported on recent versions of ld64.
2067 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2068   if (!TargetTriple.isOSBinFormatMachO())
2069     return false;
2070 
2071   if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2072     return true;
2073   if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2074     return true;
2075   if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2076     return true;
2077 
2078   return false;
2079 }
2080 
2081 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2082   switch (TargetTriple.getObjectFormat()) {
2083   case Triple::COFF:  return ".ASAN$GL";
2084   case Triple::ELF:   return "asan_globals";
2085   case Triple::MachO: return "__DATA,__asan_globals,regular";
2086   case Triple::Wasm:
2087   case Triple::GOFF:
2088   case Triple::XCOFF:
2089     report_fatal_error(
2090         "ModuleAddressSanitizer not implemented for object file format");
2091   case Triple::UnknownObjectFormat:
2092     break;
2093   }
2094   llvm_unreachable("unsupported object format");
2095 }
2096 
2097 void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
2098   IRBuilder<> IRB(*C);
2099 
2100   // Declare our poisoning and unpoisoning functions.
2101   AsanPoisonGlobals =
2102       M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2103   AsanUnpoisonGlobals =
2104       M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2105 
2106   // Declare functions that register/unregister globals.
2107   AsanRegisterGlobals = M.getOrInsertFunction(
2108       kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2109   AsanUnregisterGlobals = M.getOrInsertFunction(
2110       kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2111 
2112   // Declare the functions that find globals in a shared object and then invoke
2113   // the (un)register function on them.
2114   AsanRegisterImageGlobals = M.getOrInsertFunction(
2115       kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2116   AsanUnregisterImageGlobals = M.getOrInsertFunction(
2117       kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2118 
2119   AsanRegisterElfGlobals =
2120       M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2121                             IntptrTy, IntptrTy, IntptrTy);
2122   AsanUnregisterElfGlobals =
2123       M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2124                             IntptrTy, IntptrTy, IntptrTy);
2125 }
2126 
2127 // Put the metadata and the instrumented global in the same group. This ensures
2128 // that the metadata is discarded if the instrumented global is discarded.
2129 void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2130     GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2131   Module &M = *G->getParent();
2132   Comdat *C = G->getComdat();
2133   if (!C) {
2134     if (!G->hasName()) {
2135       // If G is unnamed, it must be internal. Give it an artificial name
2136       // so we can put it in a comdat.
2137       assert(G->hasLocalLinkage());
2138       G->setName(Twine(kAsanGenPrefix) + "_anon_global");
2139     }
2140 
2141     if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2142       std::string Name = std::string(G->getName());
2143       Name += InternalSuffix;
2144       C = M.getOrInsertComdat(Name);
2145     } else {
2146       C = M.getOrInsertComdat(G->getName());
2147     }
2148 
2149     // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2150     // linkage to internal linkage so that a symbol table entry is emitted. This
2151     // is necessary in order to create the comdat group.
2152     if (TargetTriple.isOSBinFormatCOFF()) {
2153       C->setSelectionKind(Comdat::NoDeduplicate);
2154       if (G->hasPrivateLinkage())
2155         G->setLinkage(GlobalValue::InternalLinkage);
2156     }
2157     G->setComdat(C);
2158   }
2159 
2160   assert(G->hasComdat());
2161   Metadata->setComdat(G->getComdat());
2162 }
2163 
2164 // Create a separate metadata global and put it in the appropriate ASan
2165 // global registration section.
2166 GlobalVariable *
2167 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
2168                                              StringRef OriginalName) {
2169   auto Linkage = TargetTriple.isOSBinFormatMachO()
2170                      ? GlobalVariable::InternalLinkage
2171                      : GlobalVariable::PrivateLinkage;
2172   GlobalVariable *Metadata = new GlobalVariable(
2173       M, Initializer->getType(), false, Linkage, Initializer,
2174       Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2175   Metadata->setSection(getGlobalMetadataSection());
2176   return Metadata;
2177 }
2178 
2179 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2180   AsanDtorFunction = Function::createWithDefaultAttr(
2181       FunctionType::get(Type::getVoidTy(*C), false),
2182       GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M);
2183   AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2184   // Ensure Dtor cannot be discarded, even if in a comdat.
2185   appendToUsed(M, {AsanDtorFunction});
2186   BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2187 
2188   return ReturnInst::Create(*C, AsanDtorBB);
2189 }
2190 
2191 void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2192     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2193     ArrayRef<Constant *> MetadataInitializers) {
2194   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2195   auto &DL = M.getDataLayout();
2196 
2197   SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2198   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2199     Constant *Initializer = MetadataInitializers[i];
2200     GlobalVariable *G = ExtendedGlobals[i];
2201     GlobalVariable *Metadata =
2202         CreateMetadataGlobal(M, Initializer, G->getName());
2203     MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2204     Metadata->setMetadata(LLVMContext::MD_associated, MD);
2205     MetadataGlobals[i] = Metadata;
2206 
2207     // The MSVC linker always inserts padding when linking incrementally. We
2208     // cope with that by aligning each struct to its size, which must be a power
2209     // of two.
2210     unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2211     assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2212            "global metadata will not be padded appropriately");
2213     Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2214 
2215     SetComdatForGlobalMetadata(G, Metadata, "");
2216   }
2217 
2218   // Update llvm.compiler.used, adding the new metadata globals. This is
2219   // needed so that during LTO these variables stay alive.
2220   if (!MetadataGlobals.empty())
2221     appendToCompilerUsed(M, MetadataGlobals);
2222 }
2223 
2224 void ModuleAddressSanitizer::InstrumentGlobalsELF(
2225     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2226     ArrayRef<Constant *> MetadataInitializers,
2227     const std::string &UniqueModuleId) {
2228   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2229 
2230   // Putting globals in a comdat changes the semantic and potentially cause
2231   // false negative odr violations at link time. If odr indicators are used, we
2232   // keep the comdat sections, as link time odr violations will be dectected on
2233   // the odr indicator symbols.
2234   bool UseComdatForGlobalsGC = UseOdrIndicator;
2235 
2236   SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2237   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2238     GlobalVariable *G = ExtendedGlobals[i];
2239     GlobalVariable *Metadata =
2240         CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2241     MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2242     Metadata->setMetadata(LLVMContext::MD_associated, MD);
2243     MetadataGlobals[i] = Metadata;
2244 
2245     if (UseComdatForGlobalsGC)
2246       SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2247   }
2248 
2249   // Update llvm.compiler.used, adding the new metadata globals. This is
2250   // needed so that during LTO these variables stay alive.
2251   if (!MetadataGlobals.empty())
2252     appendToCompilerUsed(M, MetadataGlobals);
2253 
2254   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2255   // to look up the loaded image that contains it. Second, we can store in it
2256   // whether registration has already occurred, to prevent duplicate
2257   // registration.
2258   //
2259   // Common linkage ensures that there is only one global per shared library.
2260   GlobalVariable *RegisteredFlag = new GlobalVariable(
2261       M, IntptrTy, false, GlobalVariable::CommonLinkage,
2262       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2263   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2264 
2265   // Create start and stop symbols.
2266   GlobalVariable *StartELFMetadata = new GlobalVariable(
2267       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2268       "__start_" + getGlobalMetadataSection());
2269   StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2270   GlobalVariable *StopELFMetadata = new GlobalVariable(
2271       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2272       "__stop_" + getGlobalMetadataSection());
2273   StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2274 
2275   // Create a call to register the globals with the runtime.
2276   IRB.CreateCall(AsanRegisterElfGlobals,
2277                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2278                   IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2279                   IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2280 
2281   // We also need to unregister globals at the end, e.g., when a shared library
2282   // gets closed.
2283   if (DestructorKind != AsanDtorKind::None) {
2284     IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2285     IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2286                        {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2287                         IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2288                         IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2289   }
2290 }
2291 
2292 void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2293     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2294     ArrayRef<Constant *> MetadataInitializers) {
2295   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2296 
2297   // On recent Mach-O platforms, use a structure which binds the liveness of
2298   // the global variable to the metadata struct. Keep the list of "Liveness" GV
2299   // created to be added to llvm.compiler.used
2300   StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2301   SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2302 
2303   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2304     Constant *Initializer = MetadataInitializers[i];
2305     GlobalVariable *G = ExtendedGlobals[i];
2306     GlobalVariable *Metadata =
2307         CreateMetadataGlobal(M, Initializer, G->getName());
2308 
2309     // On recent Mach-O platforms, we emit the global metadata in a way that
2310     // allows the linker to properly strip dead globals.
2311     auto LivenessBinder =
2312         ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2313                             ConstantExpr::getPointerCast(Metadata, IntptrTy));
2314     GlobalVariable *Liveness = new GlobalVariable(
2315         M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2316         Twine("__asan_binder_") + G->getName());
2317     Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2318     LivenessGlobals[i] = Liveness;
2319   }
2320 
2321   // Update llvm.compiler.used, adding the new liveness globals. This is
2322   // needed so that during LTO these variables stay alive. The alternative
2323   // would be to have the linker handling the LTO symbols, but libLTO
2324   // current API does not expose access to the section for each symbol.
2325   if (!LivenessGlobals.empty())
2326     appendToCompilerUsed(M, LivenessGlobals);
2327 
2328   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2329   // to look up the loaded image that contains it. Second, we can store in it
2330   // whether registration has already occurred, to prevent duplicate
2331   // registration.
2332   //
2333   // common linkage ensures that there is only one global per shared library.
2334   GlobalVariable *RegisteredFlag = new GlobalVariable(
2335       M, IntptrTy, false, GlobalVariable::CommonLinkage,
2336       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2337   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2338 
2339   IRB.CreateCall(AsanRegisterImageGlobals,
2340                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2341 
2342   // We also need to unregister globals at the end, e.g., when a shared library
2343   // gets closed.
2344   if (DestructorKind != AsanDtorKind::None) {
2345     IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2346     IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2347                        {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2348   }
2349 }
2350 
2351 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2352     IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2353     ArrayRef<Constant *> MetadataInitializers) {
2354   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2355   unsigned N = ExtendedGlobals.size();
2356   assert(N > 0);
2357 
2358   // On platforms that don't have a custom metadata section, we emit an array
2359   // of global metadata structures.
2360   ArrayType *ArrayOfGlobalStructTy =
2361       ArrayType::get(MetadataInitializers[0]->getType(), N);
2362   auto AllGlobals = new GlobalVariable(
2363       M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2364       ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2365   if (Mapping.Scale > 3)
2366     AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2367 
2368   IRB.CreateCall(AsanRegisterGlobals,
2369                  {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2370                   ConstantInt::get(IntptrTy, N)});
2371 
2372   // We also need to unregister globals at the end, e.g., when a shared library
2373   // gets closed.
2374   if (DestructorKind != AsanDtorKind::None) {
2375     IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2376     IrbDtor.CreateCall(AsanUnregisterGlobals,
2377                        {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2378                         ConstantInt::get(IntptrTy, N)});
2379   }
2380 }
2381 
2382 // This function replaces all global variables with new variables that have
2383 // trailing redzones. It also creates a function that poisons
2384 // redzones and inserts this function into llvm.global_ctors.
2385 // Sets *CtorComdat to true if the global registration code emitted into the
2386 // asan constructor is comdat-compatible.
2387 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
2388                                                bool *CtorComdat) {
2389   *CtorComdat = false;
2390 
2391   // Build set of globals that are aliased by some GA, where
2392   // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2393   SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2394   if (CompileKernel) {
2395     for (auto &GA : M.aliases()) {
2396       if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2397         AliasedGlobalExclusions.insert(GV);
2398     }
2399   }
2400 
2401   SmallVector<GlobalVariable *, 16> GlobalsToChange;
2402   for (auto &G : M.globals()) {
2403     if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2404       GlobalsToChange.push_back(&G);
2405   }
2406 
2407   size_t n = GlobalsToChange.size();
2408   if (n == 0) {
2409     *CtorComdat = true;
2410     return false;
2411   }
2412 
2413   auto &DL = M.getDataLayout();
2414 
2415   // A global is described by a structure
2416   //   size_t beg;
2417   //   size_t size;
2418   //   size_t size_with_redzone;
2419   //   const char *name;
2420   //   const char *module_name;
2421   //   size_t has_dynamic_init;
2422   //   void *source_location;
2423   //   size_t odr_indicator;
2424   // We initialize an array of such structures and pass it to a run-time call.
2425   StructType *GlobalStructTy =
2426       StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2427                       IntptrTy, IntptrTy, IntptrTy);
2428   SmallVector<GlobalVariable *, 16> NewGlobals(n);
2429   SmallVector<Constant *, 16> Initializers(n);
2430 
2431   bool HasDynamicallyInitializedGlobals = false;
2432 
2433   // We shouldn't merge same module names, as this string serves as unique
2434   // module ID in runtime.
2435   GlobalVariable *ModuleName = createPrivateGlobalForString(
2436       M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix);
2437 
2438   for (size_t i = 0; i < n; i++) {
2439     GlobalVariable *G = GlobalsToChange[i];
2440 
2441     // FIXME: Metadata should be attched directly to the global directly instead
2442     // of being added to llvm.asan.globals.
2443     auto MD = GlobalsMD.get(G);
2444     StringRef NameForGlobal = G->getName();
2445     // Create string holding the global name (use global name from metadata
2446     // if it's available, otherwise just write the name of global variable).
2447     GlobalVariable *Name = createPrivateGlobalForString(
2448         M, MD.Name.empty() ? NameForGlobal : MD.Name,
2449         /*AllowMerging*/ true, kAsanGenPrefix);
2450 
2451     Type *Ty = G->getValueType();
2452     const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2453     const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2454     Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2455 
2456     StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2457     Constant *NewInitializer = ConstantStruct::get(
2458         NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2459 
2460     // Create a new global variable with enough space for a redzone.
2461     GlobalValue::LinkageTypes Linkage = G->getLinkage();
2462     if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2463       Linkage = GlobalValue::InternalLinkage;
2464     GlobalVariable *NewGlobal = new GlobalVariable(
2465         M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2466         G->getThreadLocalMode(), G->getAddressSpace());
2467     NewGlobal->copyAttributesFrom(G);
2468     NewGlobal->setComdat(G->getComdat());
2469     NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal()));
2470     // Don't fold globals with redzones. ODR violation detector and redzone
2471     // poisoning implicitly creates a dependence on the global's address, so it
2472     // is no longer valid for it to be marked unnamed_addr.
2473     NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
2474 
2475     // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2476     if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2477         G->isConstant()) {
2478       auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2479       if (Seq && Seq->isCString())
2480         NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2481     }
2482 
2483     // Transfer the debug info and type metadata.  The payload starts at offset
2484     // zero so we can copy the metadata over as is.
2485     NewGlobal->copyMetadata(G, 0);
2486 
2487     Value *Indices2[2];
2488     Indices2[0] = IRB.getInt32(0);
2489     Indices2[1] = IRB.getInt32(0);
2490 
2491     G->replaceAllUsesWith(
2492         ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2493     NewGlobal->takeName(G);
2494     G->eraseFromParent();
2495     NewGlobals[i] = NewGlobal;
2496 
2497     Constant *SourceLoc;
2498     if (!MD.SourceLoc.empty()) {
2499       auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
2500       SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
2501     } else {
2502       SourceLoc = ConstantInt::get(IntptrTy, 0);
2503     }
2504 
2505     Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
2506     GlobalValue *InstrumentedGlobal = NewGlobal;
2507 
2508     bool CanUsePrivateAliases =
2509         TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2510         TargetTriple.isOSBinFormatWasm();
2511     if (CanUsePrivateAliases && UsePrivateAlias) {
2512       // Create local alias for NewGlobal to avoid crash on ODR between
2513       // instrumented and non-instrumented libraries.
2514       InstrumentedGlobal =
2515           GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
2516     }
2517 
2518     // ODR should not happen for local linkage.
2519     if (NewGlobal->hasLocalLinkage()) {
2520       ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
2521                                                IRB.getInt8PtrTy());
2522     } else if (UseOdrIndicator) {
2523       // With local aliases, we need to provide another externally visible
2524       // symbol __odr_asan_XXX to detect ODR violation.
2525       auto *ODRIndicatorSym =
2526           new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2527                              Constant::getNullValue(IRB.getInt8Ty()),
2528                              kODRGenPrefix + NameForGlobal, nullptr,
2529                              NewGlobal->getThreadLocalMode());
2530 
2531       // Set meaningful attributes for indicator symbol.
2532       ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2533       ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2534       ODRIndicatorSym->setAlignment(Align(1));
2535       ODRIndicator = ODRIndicatorSym;
2536     }
2537 
2538     Constant *Initializer = ConstantStruct::get(
2539         GlobalStructTy,
2540         ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2541         ConstantInt::get(IntptrTy, SizeInBytes),
2542         ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2543         ConstantExpr::getPointerCast(Name, IntptrTy),
2544         ConstantExpr::getPointerCast(ModuleName, IntptrTy),
2545         ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc,
2546         ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2547 
2548     if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
2549 
2550     LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2551 
2552     Initializers[i] = Initializer;
2553   }
2554 
2555   // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2556   // ConstantMerge'ing them.
2557   SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2558   for (size_t i = 0; i < n; i++) {
2559     GlobalVariable *G = NewGlobals[i];
2560     if (G->getName().empty()) continue;
2561     GlobalsToAddToUsedList.push_back(G);
2562   }
2563   appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2564 
2565   std::string ELFUniqueModuleId =
2566       (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M)
2567                                                         : "";
2568 
2569   if (!ELFUniqueModuleId.empty()) {
2570     InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
2571     *CtorComdat = true;
2572   } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2573     InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2574   } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2575     InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2576   } else {
2577     InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2578   }
2579 
2580   // Create calls for poisoning before initializers run and unpoisoning after.
2581   if (HasDynamicallyInitializedGlobals)
2582     createInitializerPoisonCalls(M, ModuleName);
2583 
2584   LLVM_DEBUG(dbgs() << M);
2585   return true;
2586 }
2587 
2588 uint64_t
2589 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2590   constexpr uint64_t kMaxRZ = 1 << 18;
2591   const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2592 
2593   uint64_t RZ = 0;
2594   if (SizeInBytes <= MinRZ / 2) {
2595     // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2596     // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2597     // half of MinRZ.
2598     RZ = MinRZ - SizeInBytes;
2599   } else {
2600     // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2601     RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ));
2602 
2603     // Round up to multiple of MinRZ.
2604     if (SizeInBytes % MinRZ)
2605       RZ += MinRZ - (SizeInBytes % MinRZ);
2606   }
2607 
2608   assert((RZ + SizeInBytes) % MinRZ == 0);
2609 
2610   return RZ;
2611 }
2612 
2613 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2614   int LongSize = M.getDataLayout().getPointerSizeInBits();
2615   bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2616   int Version = 8;
2617   // 32-bit Android is one version ahead because of the switch to dynamic
2618   // shadow.
2619   Version += (LongSize == 32 && isAndroid);
2620   return Version;
2621 }
2622 
2623 bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2624   initializeCallbacks(M);
2625 
2626   // Create a module constructor. A destructor is created lazily because not all
2627   // platforms, and not all modules need it.
2628   if (CompileKernel) {
2629     // The kernel always builds with its own runtime, and therefore does not
2630     // need the init and version check calls.
2631     AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2632   } else {
2633     std::string AsanVersion = std::to_string(GetAsanVersion(M));
2634     std::string VersionCheckName =
2635         ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2636     std::tie(AsanCtorFunction, std::ignore) =
2637         createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName,
2638                                             kAsanInitName, /*InitArgTypes=*/{},
2639                                             /*InitArgs=*/{}, VersionCheckName);
2640   }
2641 
2642   bool CtorComdat = true;
2643   if (ClGlobals) {
2644     IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2645     InstrumentGlobals(IRB, M, &CtorComdat);
2646   }
2647 
2648   const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2649 
2650   // Put the constructor and destructor in comdat if both
2651   // (1) global instrumentation is not TU-specific
2652   // (2) target is ELF.
2653   if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2654     AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2655     appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2656     if (AsanDtorFunction) {
2657       AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2658       appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2659     }
2660   } else {
2661     appendToGlobalCtors(M, AsanCtorFunction, Priority);
2662     if (AsanDtorFunction)
2663       appendToGlobalDtors(M, AsanDtorFunction, Priority);
2664   }
2665 
2666   return true;
2667 }
2668 
2669 void AddressSanitizer::initializeCallbacks(Module &M) {
2670   IRBuilder<> IRB(*C);
2671   // Create __asan_report* callbacks.
2672   // IsWrite, TypeSize and Exp are encoded in the function name.
2673   for (int Exp = 0; Exp < 2; Exp++) {
2674     for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2675       const std::string TypeStr = AccessIsWrite ? "store" : "load";
2676       const std::string ExpStr = Exp ? "exp_" : "";
2677       const std::string EndingStr = Recover ? "_noabort" : "";
2678 
2679       SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2680       SmallVector<Type *, 2> Args1{1, IntptrTy};
2681       if (Exp) {
2682         Type *ExpType = Type::getInt32Ty(*C);
2683         Args2.push_back(ExpType);
2684         Args1.push_back(ExpType);
2685       }
2686       AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2687           kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2688           FunctionType::get(IRB.getVoidTy(), Args2, false));
2689 
2690       AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2691           ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2692           FunctionType::get(IRB.getVoidTy(), Args2, false));
2693 
2694       for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2695            AccessSizeIndex++) {
2696         const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2697         AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2698             M.getOrInsertFunction(
2699                 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2700                 FunctionType::get(IRB.getVoidTy(), Args1, false));
2701 
2702         AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2703             M.getOrInsertFunction(
2704                 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2705                 FunctionType::get(IRB.getVoidTy(), Args1, false));
2706       }
2707     }
2708   }
2709 
2710   const std::string MemIntrinCallbackPrefix =
2711       CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
2712   AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2713                                       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2714                                       IRB.getInt8PtrTy(), IntptrTy);
2715   AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
2716                                      IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2717                                      IRB.getInt8PtrTy(), IntptrTy);
2718   AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2719                                      IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2720                                      IRB.getInt32Ty(), IntptrTy);
2721 
2722   AsanHandleNoReturnFunc =
2723       M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2724 
2725   AsanPtrCmpFunction =
2726       M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2727   AsanPtrSubFunction =
2728       M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2729   if (Mapping.InGlobal)
2730     AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2731                                            ArrayType::get(IRB.getInt8Ty(), 0));
2732 
2733   AMDGPUAddressShared = M.getOrInsertFunction(
2734       kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy());
2735   AMDGPUAddressPrivate = M.getOrInsertFunction(
2736       kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy());
2737 }
2738 
2739 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2740   // For each NSObject descendant having a +load method, this method is invoked
2741   // by the ObjC runtime before any of the static constructors is called.
2742   // Therefore we need to instrument such methods with a call to __asan_init
2743   // at the beginning in order to initialize our runtime before any access to
2744   // the shadow memory.
2745   // We cannot just ignore these methods, because they may call other
2746   // instrumented functions.
2747   if (F.getName().find(" load]") != std::string::npos) {
2748     FunctionCallee AsanInitFunction =
2749         declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2750     IRBuilder<> IRB(&F.front(), F.front().begin());
2751     IRB.CreateCall(AsanInitFunction, {});
2752     return true;
2753   }
2754   return false;
2755 }
2756 
2757 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2758   // Generate code only when dynamic addressing is needed.
2759   if (Mapping.Offset != kDynamicShadowSentinel)
2760     return false;
2761 
2762   IRBuilder<> IRB(&F.front().front());
2763   if (Mapping.InGlobal) {
2764     if (ClWithIfuncSuppressRemat) {
2765       // An empty inline asm with input reg == output reg.
2766       // An opaque pointer-to-int cast, basically.
2767       InlineAsm *Asm = InlineAsm::get(
2768           FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2769           StringRef(""), StringRef("=r,0"),
2770           /*hasSideEffects=*/false);
2771       LocalDynamicShadow =
2772           IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2773     } else {
2774       LocalDynamicShadow =
2775           IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2776     }
2777   } else {
2778     Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2779         kAsanShadowMemoryDynamicAddress, IntptrTy);
2780     LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2781   }
2782   return true;
2783 }
2784 
2785 void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2786   // Find the one possible call to llvm.localescape and pre-mark allocas passed
2787   // to it as uninteresting. This assumes we haven't started processing allocas
2788   // yet. This check is done up front because iterating the use list in
2789   // isInterestingAlloca would be algorithmically slower.
2790   assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2791 
2792   // Try to get the declaration of llvm.localescape. If it's not in the module,
2793   // we can exit early.
2794   if (!F.getParent()->getFunction("llvm.localescape")) return;
2795 
2796   // Look for a call to llvm.localescape call in the entry block. It can't be in
2797   // any other block.
2798   for (Instruction &I : F.getEntryBlock()) {
2799     IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2800     if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2801       // We found a call. Mark all the allocas passed in as uninteresting.
2802       for (Value *Arg : II->arg_operands()) {
2803         AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2804         assert(AI && AI->isStaticAlloca() &&
2805                "non-static alloca arg to localescape");
2806         ProcessedAllocas[AI] = false;
2807       }
2808       break;
2809     }
2810   }
2811 }
2812 
2813 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2814   bool ShouldInstrument =
2815       ClDebugMin < 0 || ClDebugMax < 0 ||
2816       (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2817   Instrumented++;
2818   return !ShouldInstrument;
2819 }
2820 
2821 bool AddressSanitizer::instrumentFunction(Function &F,
2822                                           const TargetLibraryInfo *TLI) {
2823   if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2824   if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2825   if (F.getName().startswith("__asan_")) return false;
2826 
2827   bool FunctionModified = false;
2828 
2829   // If needed, insert __asan_init before checking for SanitizeAddress attr.
2830   // This function needs to be called even if the function body is not
2831   // instrumented.
2832   if (maybeInsertAsanInitAtFunctionEntry(F))
2833     FunctionModified = true;
2834 
2835   // Leave if the function doesn't need instrumentation.
2836   if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2837 
2838   LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2839 
2840   initializeCallbacks(*F.getParent());
2841 
2842   FunctionStateRAII CleanupObj(this);
2843 
2844   FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2845 
2846   // We can't instrument allocas used with llvm.localescape. Only static allocas
2847   // can be passed to that intrinsic.
2848   markEscapedLocalAllocas(F);
2849 
2850   // We want to instrument every address only once per basic block (unless there
2851   // are calls between uses).
2852   SmallPtrSet<Value *, 16> TempsToInstrument;
2853   SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2854   SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2855   SmallVector<Instruction *, 8> NoReturnCalls;
2856   SmallVector<BasicBlock *, 16> AllBlocks;
2857   SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2858   int NumAllocas = 0;
2859 
2860   // Fill the set of memory operations to instrument.
2861   for (auto &BB : F) {
2862     AllBlocks.push_back(&BB);
2863     TempsToInstrument.clear();
2864     int NumInsnsPerBB = 0;
2865     for (auto &Inst : BB) {
2866       if (LooksLikeCodeInBug11395(&Inst)) return false;
2867       SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
2868       getInterestingMemoryOperands(&Inst, InterestingOperands);
2869 
2870       if (!InterestingOperands.empty()) {
2871         for (auto &Operand : InterestingOperands) {
2872           if (ClOpt && ClOptSameTemp) {
2873             Value *Ptr = Operand.getPtr();
2874             // If we have a mask, skip instrumentation if we've already
2875             // instrumented the full object. But don't add to TempsToInstrument
2876             // because we might get another load/store with a different mask.
2877             if (Operand.MaybeMask) {
2878               if (TempsToInstrument.count(Ptr))
2879                 continue; // We've seen this (whole) temp in the current BB.
2880             } else {
2881               if (!TempsToInstrument.insert(Ptr).second)
2882                 continue; // We've seen this temp in the current BB.
2883             }
2884           }
2885           OperandsToInstrument.push_back(Operand);
2886           NumInsnsPerBB++;
2887         }
2888       } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
2889                   isInterestingPointerComparison(&Inst)) ||
2890                  ((ClInvalidPointerPairs || ClInvalidPointerSub) &&
2891                   isInterestingPointerSubtraction(&Inst))) {
2892         PointerComparisonsOrSubtracts.push_back(&Inst);
2893       } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
2894         // ok, take it.
2895         IntrinToInstrument.push_back(MI);
2896         NumInsnsPerBB++;
2897       } else {
2898         if (isa<AllocaInst>(Inst)) NumAllocas++;
2899         if (auto *CB = dyn_cast<CallBase>(&Inst)) {
2900           // A call inside BB.
2901           TempsToInstrument.clear();
2902           if (CB->doesNotReturn() && !CB->hasMetadata("nosanitize"))
2903             NoReturnCalls.push_back(CB);
2904         }
2905         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
2906           maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
2907       }
2908       if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
2909     }
2910   }
2911 
2912   bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 &&
2913                    OperandsToInstrument.size() + IntrinToInstrument.size() >
2914                        (unsigned)ClInstrumentationWithCallsThreshold);
2915   const DataLayout &DL = F.getParent()->getDataLayout();
2916   ObjectSizeOpts ObjSizeOpts;
2917   ObjSizeOpts.RoundToAlign = true;
2918   ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
2919 
2920   // Instrument.
2921   int NumInstrumented = 0;
2922   for (auto &Operand : OperandsToInstrument) {
2923     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2924       instrumentMop(ObjSizeVis, Operand, UseCalls,
2925                     F.getParent()->getDataLayout());
2926     FunctionModified = true;
2927   }
2928   for (auto Inst : IntrinToInstrument) {
2929     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2930       instrumentMemIntrinsic(Inst);
2931     FunctionModified = true;
2932   }
2933 
2934   FunctionStackPoisoner FSP(F, *this);
2935   bool ChangedStack = FSP.runOnFunction();
2936 
2937   // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
2938   // See e.g. https://github.com/google/sanitizers/issues/37
2939   for (auto CI : NoReturnCalls) {
2940     IRBuilder<> IRB(CI);
2941     IRB.CreateCall(AsanHandleNoReturnFunc, {});
2942   }
2943 
2944   for (auto Inst : PointerComparisonsOrSubtracts) {
2945     instrumentPointerComparisonOrSubtraction(Inst);
2946     FunctionModified = true;
2947   }
2948 
2949   if (ChangedStack || !NoReturnCalls.empty())
2950     FunctionModified = true;
2951 
2952   LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
2953                     << F << "\n");
2954 
2955   return FunctionModified;
2956 }
2957 
2958 // Workaround for bug 11395: we don't want to instrument stack in functions
2959 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
2960 // FIXME: remove once the bug 11395 is fixed.
2961 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
2962   if (LongSize != 32) return false;
2963   CallInst *CI = dyn_cast<CallInst>(I);
2964   if (!CI || !CI->isInlineAsm()) return false;
2965   if (CI->getNumArgOperands() <= 5) return false;
2966   // We have inline assembly with quite a few arguments.
2967   return true;
2968 }
2969 
2970 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
2971   IRBuilder<> IRB(*C);
2972   if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
2973       ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
2974     const char *MallocNameTemplate =
2975         ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
2976             ? kAsanStackMallocAlwaysNameTemplate
2977             : kAsanStackMallocNameTemplate;
2978     for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
2979       std::string Suffix = itostr(Index);
2980       AsanStackMallocFunc[Index] = M.getOrInsertFunction(
2981           MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
2982       AsanStackFreeFunc[Index] =
2983           M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
2984                                 IRB.getVoidTy(), IntptrTy, IntptrTy);
2985     }
2986   }
2987   if (ASan.UseAfterScope) {
2988     AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
2989         kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2990     AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
2991         kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2992   }
2993 
2994   for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) {
2995     std::ostringstream Name;
2996     Name << kAsanSetShadowPrefix;
2997     Name << std::setw(2) << std::setfill('0') << std::hex << Val;
2998     AsanSetShadowFunc[Val] =
2999         M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3000   }
3001 
3002   AsanAllocaPoisonFunc = M.getOrInsertFunction(
3003       kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3004   AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3005       kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3006 }
3007 
3008 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3009                                                ArrayRef<uint8_t> ShadowBytes,
3010                                                size_t Begin, size_t End,
3011                                                IRBuilder<> &IRB,
3012                                                Value *ShadowBase) {
3013   if (Begin >= End)
3014     return;
3015 
3016   const size_t LargestStoreSizeInBytes =
3017       std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3018 
3019   const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
3020 
3021   // Poison given range in shadow using larges store size with out leading and
3022   // trailing zeros in ShadowMask. Zeros never change, so they need neither
3023   // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3024   // middle of a store.
3025   for (size_t i = Begin; i < End;) {
3026     if (!ShadowMask[i]) {
3027       assert(!ShadowBytes[i]);
3028       ++i;
3029       continue;
3030     }
3031 
3032     size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3033     // Fit store size into the range.
3034     while (StoreSizeInBytes > End - i)
3035       StoreSizeInBytes /= 2;
3036 
3037     // Minimize store size by trimming trailing zeros.
3038     for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3039       while (j <= StoreSizeInBytes / 2)
3040         StoreSizeInBytes /= 2;
3041     }
3042 
3043     uint64_t Val = 0;
3044     for (size_t j = 0; j < StoreSizeInBytes; j++) {
3045       if (IsLittleEndian)
3046         Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3047       else
3048         Val = (Val << 8) | ShadowBytes[i + j];
3049     }
3050 
3051     Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3052     Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3053     IRB.CreateAlignedStore(
3054         Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()),
3055         Align(1));
3056 
3057     i += StoreSizeInBytes;
3058   }
3059 }
3060 
3061 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3062                                          ArrayRef<uint8_t> ShadowBytes,
3063                                          IRBuilder<> &IRB, Value *ShadowBase) {
3064   copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3065 }
3066 
3067 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3068                                          ArrayRef<uint8_t> ShadowBytes,
3069                                          size_t Begin, size_t End,
3070                                          IRBuilder<> &IRB, Value *ShadowBase) {
3071   assert(ShadowMask.size() == ShadowBytes.size());
3072   size_t Done = Begin;
3073   for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3074     if (!ShadowMask[i]) {
3075       assert(!ShadowBytes[i]);
3076       continue;
3077     }
3078     uint8_t Val = ShadowBytes[i];
3079     if (!AsanSetShadowFunc[Val])
3080       continue;
3081 
3082     // Skip same values.
3083     for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3084     }
3085 
3086     if (j - i >= ClMaxInlinePoisoningSize) {
3087       copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3088       IRB.CreateCall(AsanSetShadowFunc[Val],
3089                      {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3090                       ConstantInt::get(IntptrTy, j - i)});
3091       Done = j;
3092     }
3093   }
3094 
3095   copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3096 }
3097 
3098 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
3099 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3100 static int StackMallocSizeClass(uint64_t LocalStackSize) {
3101   assert(LocalStackSize <= kMaxStackMallocSize);
3102   uint64_t MaxSize = kMinStackMallocSize;
3103   for (int i = 0;; i++, MaxSize *= 2)
3104     if (LocalStackSize <= MaxSize) return i;
3105   llvm_unreachable("impossible LocalStackSize");
3106 }
3107 
3108 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3109   Instruction *CopyInsertPoint = &F.front().front();
3110   if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3111     // Insert after the dynamic shadow location is determined
3112     CopyInsertPoint = CopyInsertPoint->getNextNode();
3113     assert(CopyInsertPoint);
3114   }
3115   IRBuilder<> IRB(CopyInsertPoint);
3116   const DataLayout &DL = F.getParent()->getDataLayout();
3117   for (Argument &Arg : F.args()) {
3118     if (Arg.hasByValAttr()) {
3119       Type *Ty = Arg.getParamByValType();
3120       const Align Alignment =
3121           DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3122 
3123       AllocaInst *AI = IRB.CreateAlloca(
3124           Ty, nullptr,
3125           (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3126               ".byval");
3127       AI->setAlignment(Alignment);
3128       Arg.replaceAllUsesWith(AI);
3129 
3130       uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3131       IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3132     }
3133   }
3134 }
3135 
3136 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3137                                           Value *ValueIfTrue,
3138                                           Instruction *ThenTerm,
3139                                           Value *ValueIfFalse) {
3140   PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3141   BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3142   PHI->addIncoming(ValueIfFalse, CondBlock);
3143   BasicBlock *ThenBlock = ThenTerm->getParent();
3144   PHI->addIncoming(ValueIfTrue, ThenBlock);
3145   return PHI;
3146 }
3147 
3148 Value *FunctionStackPoisoner::createAllocaForLayout(
3149     IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3150   AllocaInst *Alloca;
3151   if (Dynamic) {
3152     Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3153                               ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3154                               "MyAlloca");
3155   } else {
3156     Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3157                               nullptr, "MyAlloca");
3158     assert(Alloca->isStaticAlloca());
3159   }
3160   assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3161   size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
3162   Alloca->setAlignment(Align(FrameAlignment));
3163   return IRB.CreatePointerCast(Alloca, IntptrTy);
3164 }
3165 
3166 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3167   BasicBlock &FirstBB = *F.begin();
3168   IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3169   DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3170   IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3171   DynamicAllocaLayout->setAlignment(Align(32));
3172 }
3173 
3174 void FunctionStackPoisoner::processDynamicAllocas() {
3175   if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3176     assert(DynamicAllocaPoisonCallVec.empty());
3177     return;
3178   }
3179 
3180   // Insert poison calls for lifetime intrinsics for dynamic allocas.
3181   for (const auto &APC : DynamicAllocaPoisonCallVec) {
3182     assert(APC.InsBefore);
3183     assert(APC.AI);
3184     assert(ASan.isInterestingAlloca(*APC.AI));
3185     assert(!APC.AI->isStaticAlloca());
3186 
3187     IRBuilder<> IRB(APC.InsBefore);
3188     poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3189     // Dynamic allocas will be unpoisoned unconditionally below in
3190     // unpoisonDynamicAllocas.
3191     // Flag that we need unpoison static allocas.
3192   }
3193 
3194   // Handle dynamic allocas.
3195   createDynamicAllocasInitStorage();
3196   for (auto &AI : DynamicAllocaVec)
3197     handleDynamicAllocaCall(AI);
3198   unpoisonDynamicAllocas();
3199 }
3200 
3201 /// Collect instructions in the entry block after \p InsBefore which initialize
3202 /// permanent storage for a function argument. These instructions must remain in
3203 /// the entry block so that uninitialized values do not appear in backtraces. An
3204 /// added benefit is that this conserves spill slots. This does not move stores
3205 /// before instrumented / "interesting" allocas.
3206 static void findStoresToUninstrumentedArgAllocas(
3207     AddressSanitizer &ASan, Instruction &InsBefore,
3208     SmallVectorImpl<Instruction *> &InitInsts) {
3209   Instruction *Start = InsBefore.getNextNonDebugInstruction();
3210   for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3211     // Argument initialization looks like:
3212     // 1) store <Argument>, <Alloca> OR
3213     // 2) <CastArgument> = cast <Argument> to ...
3214     //    store <CastArgument> to <Alloca>
3215     // Do not consider any other kind of instruction.
3216     //
3217     // Note: This covers all known cases, but may not be exhaustive. An
3218     // alternative to pattern-matching stores is to DFS over all Argument uses:
3219     // this might be more general, but is probably much more complicated.
3220     if (isa<AllocaInst>(It) || isa<CastInst>(It))
3221       continue;
3222     if (auto *Store = dyn_cast<StoreInst>(It)) {
3223       // The store destination must be an alloca that isn't interesting for
3224       // ASan to instrument. These are moved up before InsBefore, and they're
3225       // not interesting because allocas for arguments can be mem2reg'd.
3226       auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3227       if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3228         continue;
3229 
3230       Value *Val = Store->getValueOperand();
3231       bool IsDirectArgInit = isa<Argument>(Val);
3232       bool IsArgInitViaCast =
3233           isa<CastInst>(Val) &&
3234           isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3235           // Check that the cast appears directly before the store. Otherwise
3236           // moving the cast before InsBefore may break the IR.
3237           Val == It->getPrevNonDebugInstruction();
3238       bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3239       if (!IsArgInit)
3240         continue;
3241 
3242       if (IsArgInitViaCast)
3243         InitInsts.push_back(cast<Instruction>(Val));
3244       InitInsts.push_back(Store);
3245       continue;
3246     }
3247 
3248     // Do not reorder past unknown instructions: argument initialization should
3249     // only involve casts and stores.
3250     return;
3251   }
3252 }
3253 
3254 void FunctionStackPoisoner::processStaticAllocas() {
3255   if (AllocaVec.empty()) {
3256     assert(StaticAllocaPoisonCallVec.empty());
3257     return;
3258   }
3259 
3260   int StackMallocIdx = -1;
3261   DebugLoc EntryDebugLocation;
3262   if (auto SP = F.getSubprogram())
3263     EntryDebugLocation =
3264         DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3265 
3266   Instruction *InsBefore = AllocaVec[0];
3267   IRBuilder<> IRB(InsBefore);
3268 
3269   // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3270   // debug info is broken, because only entry-block allocas are treated as
3271   // regular stack slots.
3272   auto InsBeforeB = InsBefore->getParent();
3273   assert(InsBeforeB == &F.getEntryBlock());
3274   for (auto *AI : StaticAllocasToMoveUp)
3275     if (AI->getParent() == InsBeforeB)
3276       AI->moveBefore(InsBefore);
3277 
3278   // Move stores of arguments into entry-block allocas as well. This prevents
3279   // extra stack slots from being generated (to house the argument values until
3280   // they can be stored into the allocas). This also prevents uninitialized
3281   // values from being shown in backtraces.
3282   SmallVector<Instruction *, 8> ArgInitInsts;
3283   findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3284   for (Instruction *ArgInitInst : ArgInitInsts)
3285     ArgInitInst->moveBefore(InsBefore);
3286 
3287   // If we have a call to llvm.localescape, keep it in the entry block.
3288   if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3289 
3290   SmallVector<ASanStackVariableDescription, 16> SVD;
3291   SVD.reserve(AllocaVec.size());
3292   for (AllocaInst *AI : AllocaVec) {
3293     ASanStackVariableDescription D = {AI->getName().data(),
3294                                       ASan.getAllocaSizeInBytes(*AI),
3295                                       0,
3296                                       AI->getAlignment(),
3297                                       AI,
3298                                       0,
3299                                       0};
3300     SVD.push_back(D);
3301   }
3302 
3303   // Minimal header size (left redzone) is 4 pointers,
3304   // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3305   size_t Granularity = 1ULL << Mapping.Scale;
3306   size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity);
3307   const ASanStackFrameLayout &L =
3308       ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3309 
3310   // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3311   DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
3312   for (auto &Desc : SVD)
3313     AllocaToSVDMap[Desc.AI] = &Desc;
3314 
3315   // Update SVD with information from lifetime intrinsics.
3316   for (const auto &APC : StaticAllocaPoisonCallVec) {
3317     assert(APC.InsBefore);
3318     assert(APC.AI);
3319     assert(ASan.isInterestingAlloca(*APC.AI));
3320     assert(APC.AI->isStaticAlloca());
3321 
3322     ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3323     Desc.LifetimeSize = Desc.Size;
3324     if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3325       if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3326         if (LifetimeLoc->getFile() == FnLoc->getFile())
3327           if (unsigned Line = LifetimeLoc->getLine())
3328             Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3329       }
3330     }
3331   }
3332 
3333   auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3334   LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3335   uint64_t LocalStackSize = L.FrameSize;
3336   bool DoStackMalloc =
3337       ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3338       !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3339   bool DoDynamicAlloca = ClDynamicAllocaStack;
3340   // Don't do dynamic alloca or stack malloc if:
3341   // 1) There is inline asm: too often it makes assumptions on which registers
3342   //    are available.
3343   // 2) There is a returns_twice call (typically setjmp), which is
3344   //    optimization-hostile, and doesn't play well with introduced indirect
3345   //    register-relative calculation of local variable addresses.
3346   DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3347   DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3348 
3349   Value *StaticAlloca =
3350       DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3351 
3352   Value *FakeStack;
3353   Value *LocalStackBase;
3354   Value *LocalStackBaseAlloca;
3355   uint8_t DIExprFlags = DIExpression::ApplyOffset;
3356 
3357   if (DoStackMalloc) {
3358     LocalStackBaseAlloca =
3359         IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3360     if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3361       // void *FakeStack = __asan_option_detect_stack_use_after_return
3362       //     ? __asan_stack_malloc_N(LocalStackSize)
3363       //     : nullptr;
3364       // void *LocalStackBase = (FakeStack) ? FakeStack :
3365       //                        alloca(LocalStackSize);
3366       Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3367           kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
3368       Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3369           IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3370           Constant::getNullValue(IRB.getInt32Ty()));
3371       Instruction *Term =
3372           SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3373       IRBuilder<> IRBIf(Term);
3374       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3375       assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3376       Value *FakeStackValue =
3377           IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3378                            ConstantInt::get(IntptrTy, LocalStackSize));
3379       IRB.SetInsertPoint(InsBefore);
3380       FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3381                             ConstantInt::get(IntptrTy, 0));
3382     } else {
3383       // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3384       // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3385       // void *LocalStackBase = (FakeStack) ? FakeStack :
3386       //                        alloca(LocalStackSize);
3387       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3388       FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3389                                  ConstantInt::get(IntptrTy, LocalStackSize));
3390     }
3391     Value *NoFakeStack =
3392         IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3393     Instruction *Term =
3394         SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3395     IRBuilder<> IRBIf(Term);
3396     Value *AllocaValue =
3397         DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3398 
3399     IRB.SetInsertPoint(InsBefore);
3400     LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3401     IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3402     DIExprFlags |= DIExpression::DerefBefore;
3403   } else {
3404     // void *FakeStack = nullptr;
3405     // void *LocalStackBase = alloca(LocalStackSize);
3406     FakeStack = ConstantInt::get(IntptrTy, 0);
3407     LocalStackBase =
3408         DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3409     LocalStackBaseAlloca = LocalStackBase;
3410   }
3411 
3412   // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3413   // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3414   // later passes and can result in dropped variable coverage in debug info.
3415   Value *LocalStackBaseAllocaPtr =
3416       isa<PtrToIntInst>(LocalStackBaseAlloca)
3417           ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3418           : LocalStackBaseAlloca;
3419   assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3420          "Variable descriptions relative to ASan stack base will be dropped");
3421 
3422   // Replace Alloca instructions with base+offset.
3423   for (const auto &Desc : SVD) {
3424     AllocaInst *AI = Desc.AI;
3425     replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3426                       Desc.Offset);
3427     Value *NewAllocaPtr = IRB.CreateIntToPtr(
3428         IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3429         AI->getType());
3430     AI->replaceAllUsesWith(NewAllocaPtr);
3431   }
3432 
3433   // The left-most redzone has enough space for at least 4 pointers.
3434   // Write the Magic value to redzone[0].
3435   Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3436   IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3437                   BasePlus0);
3438   // Write the frame description constant to redzone[1].
3439   Value *BasePlus1 = IRB.CreateIntToPtr(
3440       IRB.CreateAdd(LocalStackBase,
3441                     ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3442       IntptrPtrTy);
3443   GlobalVariable *StackDescriptionGlobal =
3444       createPrivateGlobalForString(*F.getParent(), DescriptionString,
3445                                    /*AllowMerging*/ true, kAsanGenPrefix);
3446   Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3447   IRB.CreateStore(Description, BasePlus1);
3448   // Write the PC to redzone[2].
3449   Value *BasePlus2 = IRB.CreateIntToPtr(
3450       IRB.CreateAdd(LocalStackBase,
3451                     ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3452       IntptrPtrTy);
3453   IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3454 
3455   const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3456 
3457   // Poison the stack red zones at the entry.
3458   Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3459   // As mask we must use most poisoned case: red zones and after scope.
3460   // As bytes we can use either the same or just red zones only.
3461   copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3462 
3463   if (!StaticAllocaPoisonCallVec.empty()) {
3464     const auto &ShadowInScope = GetShadowBytes(SVD, L);
3465 
3466     // Poison static allocas near lifetime intrinsics.
3467     for (const auto &APC : StaticAllocaPoisonCallVec) {
3468       const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3469       assert(Desc.Offset % L.Granularity == 0);
3470       size_t Begin = Desc.Offset / L.Granularity;
3471       size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3472 
3473       IRBuilder<> IRB(APC.InsBefore);
3474       copyToShadow(ShadowAfterScope,
3475                    APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3476                    IRB, ShadowBase);
3477     }
3478   }
3479 
3480   SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3481   SmallVector<uint8_t, 64> ShadowAfterReturn;
3482 
3483   // (Un)poison the stack before all ret instructions.
3484   for (Instruction *Ret : RetVec) {
3485     IRBuilder<> IRBRet(Ret);
3486     // Mark the current frame as retired.
3487     IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3488                        BasePlus0);
3489     if (DoStackMalloc) {
3490       assert(StackMallocIdx >= 0);
3491       // if FakeStack != 0  // LocalStackBase == FakeStack
3492       //     // In use-after-return mode, poison the whole stack frame.
3493       //     if StackMallocIdx <= 4
3494       //         // For small sizes inline the whole thing:
3495       //         memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3496       //         **SavedFlagPtr(FakeStack) = 0
3497       //     else
3498       //         __asan_stack_free_N(FakeStack, LocalStackSize)
3499       // else
3500       //     <This is not a fake stack; unpoison the redzones>
3501       Value *Cmp =
3502           IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3503       Instruction *ThenTerm, *ElseTerm;
3504       SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3505 
3506       IRBuilder<> IRBPoison(ThenTerm);
3507       if (StackMallocIdx <= 4) {
3508         int ClassSize = kMinStackMallocSize << StackMallocIdx;
3509         ShadowAfterReturn.resize(ClassSize / L.Granularity,
3510                                  kAsanStackUseAfterReturnMagic);
3511         copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3512                      ShadowBase);
3513         Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3514             FakeStack,
3515             ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3516         Value *SavedFlagPtr = IRBPoison.CreateLoad(
3517             IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3518         IRBPoison.CreateStore(
3519             Constant::getNullValue(IRBPoison.getInt8Ty()),
3520             IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
3521       } else {
3522         // For larger frames call __asan_stack_free_*.
3523         IRBPoison.CreateCall(
3524             AsanStackFreeFunc[StackMallocIdx],
3525             {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3526       }
3527 
3528       IRBuilder<> IRBElse(ElseTerm);
3529       copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3530     } else {
3531       copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3532     }
3533   }
3534 
3535   // We are done. Remove the old unused alloca instructions.
3536   for (auto AI : AllocaVec) AI->eraseFromParent();
3537 }
3538 
3539 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3540                                          IRBuilder<> &IRB, bool DoPoison) {
3541   // For now just insert the call to ASan runtime.
3542   Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3543   Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3544   IRB.CreateCall(
3545       DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3546       {AddrArg, SizeArg});
3547 }
3548 
3549 // Handling llvm.lifetime intrinsics for a given %alloca:
3550 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3551 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3552 //     invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3553 //     could be poisoned by previous llvm.lifetime.end instruction, as the
3554 //     variable may go in and out of scope several times, e.g. in loops).
3555 // (3) if we poisoned at least one %alloca in a function,
3556 //     unpoison the whole stack frame at function exit.
3557 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3558   IRBuilder<> IRB(AI);
3559 
3560   const unsigned Alignment = std::max(kAllocaRzSize, AI->getAlignment());
3561   const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3562 
3563   Value *Zero = Constant::getNullValue(IntptrTy);
3564   Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3565   Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3566 
3567   // Since we need to extend alloca with additional memory to locate
3568   // redzones, and OldSize is number of allocated blocks with
3569   // ElementSize size, get allocated memory size in bytes by
3570   // OldSize * ElementSize.
3571   const unsigned ElementSize =
3572       F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3573   Value *OldSize =
3574       IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3575                     ConstantInt::get(IntptrTy, ElementSize));
3576 
3577   // PartialSize = OldSize % 32
3578   Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3579 
3580   // Misalign = kAllocaRzSize - PartialSize;
3581   Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3582 
3583   // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3584   Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3585   Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3586 
3587   // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3588   // Alignment is added to locate left redzone, PartialPadding for possible
3589   // partial redzone and kAllocaRzSize for right redzone respectively.
3590   Value *AdditionalChunkSize = IRB.CreateAdd(
3591       ConstantInt::get(IntptrTy, Alignment + kAllocaRzSize), PartialPadding);
3592 
3593   Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3594 
3595   // Insert new alloca with new NewSize and Alignment params.
3596   AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3597   NewAlloca->setAlignment(Align(Alignment));
3598 
3599   // NewAddress = Address + Alignment
3600   Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3601                                     ConstantInt::get(IntptrTy, Alignment));
3602 
3603   // Insert __asan_alloca_poison call for new created alloca.
3604   IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
3605 
3606   // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3607   // for unpoisoning stuff.
3608   IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3609 
3610   Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3611 
3612   // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3613   AI->replaceAllUsesWith(NewAddressPtr);
3614 
3615   // We are done. Erase old alloca from parent.
3616   AI->eraseFromParent();
3617 }
3618 
3619 // isSafeAccess returns true if Addr is always inbounds with respect to its
3620 // base object. For example, it is a field access or an array access with
3621 // constant inbounds index.
3622 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3623                                     Value *Addr, uint64_t TypeSize) const {
3624   SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
3625   if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
3626   uint64_t Size = SizeOffset.first.getZExtValue();
3627   int64_t Offset = SizeOffset.second.getSExtValue();
3628   // Three checks are required to ensure safety:
3629   // . Offset >= 0  (since the offset is given from the base ptr)
3630   // . Size >= Offset  (unsigned)
3631   // . Size - Offset >= NeededSize  (unsigned)
3632   return Offset >= 0 && Size >= uint64_t(Offset) &&
3633          Size - uint64_t(Offset) >= TypeSize / 8;
3634 }
3635