1 //===- AddressSanitizer.cpp - memory error detector -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // Details of the algorithm: 11 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm 12 // 13 // FIXME: This sanitizer does not yet handle scalable vectors 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/BinaryFormat/MachO.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/Comdat.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DIBuilder.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugInfoMetadata.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GlobalAlias.h" 46 #include "llvm/IR/GlobalValue.h" 47 #include "llvm/IR/GlobalVariable.h" 48 #include "llvm/IR/IRBuilder.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/InstVisitor.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/MDBuilder.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/Module.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/Use.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/MC/MCSectionMachO.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/MathExtras.h" 71 #include "llvm/Support/ScopedPrinter.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Instrumentation.h" 74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 75 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" 76 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 77 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 78 #include "llvm/Transforms/Utils/Local.h" 79 #include "llvm/Transforms/Utils/ModuleUtils.h" 80 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 81 #include <algorithm> 82 #include <cassert> 83 #include <cstddef> 84 #include <cstdint> 85 #include <iomanip> 86 #include <limits> 87 #include <memory> 88 #include <sstream> 89 #include <string> 90 #include <tuple> 91 92 using namespace llvm; 93 94 #define DEBUG_TYPE "asan" 95 96 static const uint64_t kDefaultShadowScale = 3; 97 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 98 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 99 static const uint64_t kDynamicShadowSentinel = 100 std::numeric_limits<uint64_t>::max(); 101 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. 102 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; 103 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 104 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; 105 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 106 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 107 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 108 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 109 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000; 110 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 111 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 112 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000; 113 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; 114 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; 115 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; 116 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40; 117 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 118 static const uint64_t kEmscriptenShadowOffset = 0; 119 120 // The shadow memory space is dynamically allocated. 121 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 122 123 static const size_t kMinStackMallocSize = 1 << 6; // 64B 124 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 125 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 126 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 127 128 const char kAsanModuleCtorName[] = "asan.module_ctor"; 129 const char kAsanModuleDtorName[] = "asan.module_dtor"; 130 static const uint64_t kAsanCtorAndDtorPriority = 1; 131 // On Emscripten, the system needs more than one priorities for constructors. 132 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; 133 const char kAsanReportErrorTemplate[] = "__asan_report_"; 134 const char kAsanRegisterGlobalsName[] = "__asan_register_globals"; 135 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals"; 136 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals"; 137 const char kAsanUnregisterImageGlobalsName[] = 138 "__asan_unregister_image_globals"; 139 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals"; 140 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals"; 141 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init"; 142 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init"; 143 const char kAsanInitName[] = "__asan_init"; 144 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v"; 145 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp"; 146 const char kAsanPtrSub[] = "__sanitizer_ptr_sub"; 147 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; 148 static const int kMaxAsanStackMallocSizeClass = 10; 149 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; 150 const char kAsanStackMallocAlwaysNameTemplate[] = 151 "__asan_stack_malloc_always_"; 152 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; 153 const char kAsanGenPrefix[] = "___asan_gen_"; 154 const char kODRGenPrefix[] = "__odr_asan_gen_"; 155 const char kSanCovGenPrefix[] = "__sancov_gen_"; 156 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_"; 157 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory"; 158 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory"; 159 160 // ASan version script has __asan_* wildcard. Triple underscore prevents a 161 // linker (gold) warning about attempting to export a local symbol. 162 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered"; 163 164 const char kAsanOptionDetectUseAfterReturn[] = 165 "__asan_option_detect_stack_use_after_return"; 166 167 const char kAsanShadowMemoryDynamicAddress[] = 168 "__asan_shadow_memory_dynamic_address"; 169 170 const char kAsanAllocaPoison[] = "__asan_alloca_poison"; 171 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison"; 172 173 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared"; 174 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private"; 175 176 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 177 static const size_t kNumberOfAccessSizes = 5; 178 179 static const unsigned kAllocaRzSize = 32; 180 181 // Command-line flags. 182 183 static cl::opt<bool> ClEnableKasan( 184 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 185 cl::Hidden, cl::init(false)); 186 187 static cl::opt<bool> ClRecover( 188 "asan-recover", 189 cl::desc("Enable recovery mode (continue-after-error)."), 190 cl::Hidden, cl::init(false)); 191 192 static cl::opt<bool> ClInsertVersionCheck( 193 "asan-guard-against-version-mismatch", 194 cl::desc("Guard against compiler/runtime version mismatch."), 195 cl::Hidden, cl::init(true)); 196 197 // This flag may need to be replaced with -f[no-]asan-reads. 198 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 199 cl::desc("instrument read instructions"), 200 cl::Hidden, cl::init(true)); 201 202 static cl::opt<bool> ClInstrumentWrites( 203 "asan-instrument-writes", cl::desc("instrument write instructions"), 204 cl::Hidden, cl::init(true)); 205 206 static cl::opt<bool> ClInstrumentAtomics( 207 "asan-instrument-atomics", 208 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 209 cl::init(true)); 210 211 static cl::opt<bool> 212 ClInstrumentByval("asan-instrument-byval", 213 cl::desc("instrument byval call arguments"), cl::Hidden, 214 cl::init(true)); 215 216 static cl::opt<bool> ClAlwaysSlowPath( 217 "asan-always-slow-path", 218 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 219 cl::init(false)); 220 221 static cl::opt<bool> ClForceDynamicShadow( 222 "asan-force-dynamic-shadow", 223 cl::desc("Load shadow address into a local variable for each function"), 224 cl::Hidden, cl::init(false)); 225 226 static cl::opt<bool> 227 ClWithIfunc("asan-with-ifunc", 228 cl::desc("Access dynamic shadow through an ifunc global on " 229 "platforms that support this"), 230 cl::Hidden, cl::init(true)); 231 232 static cl::opt<bool> ClWithIfuncSuppressRemat( 233 "asan-with-ifunc-suppress-remat", 234 cl::desc("Suppress rematerialization of dynamic shadow address by passing " 235 "it through inline asm in prologue."), 236 cl::Hidden, cl::init(true)); 237 238 // This flag limits the number of instructions to be instrumented 239 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 240 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 241 // set it to 10000. 242 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 243 "asan-max-ins-per-bb", cl::init(10000), 244 cl::desc("maximal number of instructions to instrument in any given BB"), 245 cl::Hidden); 246 247 // This flag may need to be replaced with -f[no]asan-stack. 248 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 249 cl::Hidden, cl::init(true)); 250 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 251 "asan-max-inline-poisoning-size", 252 cl::desc( 253 "Inline shadow poisoning for blocks up to the given size in bytes."), 254 cl::Hidden, cl::init(64)); 255 256 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn( 257 "asan-use-after-return", 258 cl::desc("Sets the mode of detection for stack-use-after-return."), 259 cl::values( 260 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", 261 "Never detect stack use after return."), 262 clEnumValN( 263 AsanDetectStackUseAfterReturnMode::Runtime, "runtime", 264 "Detect stack use after return if " 265 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), 266 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", 267 "Always detect stack use after return.")), 268 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime)); 269 270 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", 271 cl::desc("Create redzones for byval " 272 "arguments (extra copy " 273 "required)"), cl::Hidden, 274 cl::init(true)); 275 276 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 277 cl::desc("Check stack-use-after-scope"), 278 cl::Hidden, cl::init(false)); 279 280 // This flag may need to be replaced with -f[no]asan-globals. 281 static cl::opt<bool> ClGlobals("asan-globals", 282 cl::desc("Handle global objects"), cl::Hidden, 283 cl::init(true)); 284 285 static cl::opt<bool> ClInitializers("asan-initialization-order", 286 cl::desc("Handle C++ initializer order"), 287 cl::Hidden, cl::init(true)); 288 289 static cl::opt<bool> ClInvalidPointerPairs( 290 "asan-detect-invalid-pointer-pair", 291 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 292 cl::init(false)); 293 294 static cl::opt<bool> ClInvalidPointerCmp( 295 "asan-detect-invalid-pointer-cmp", 296 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, 297 cl::init(false)); 298 299 static cl::opt<bool> ClInvalidPointerSub( 300 "asan-detect-invalid-pointer-sub", 301 cl::desc("Instrument - operations with pointer operands"), cl::Hidden, 302 cl::init(false)); 303 304 static cl::opt<unsigned> ClRealignStack( 305 "asan-realign-stack", 306 cl::desc("Realign stack to the value of this flag (power of two)"), 307 cl::Hidden, cl::init(32)); 308 309 static cl::opt<int> ClInstrumentationWithCallsThreshold( 310 "asan-instrumentation-with-call-threshold", 311 cl::desc( 312 "If the function being instrumented contains more than " 313 "this number of memory accesses, use callbacks instead of " 314 "inline checks (-1 means never use callbacks)."), 315 cl::Hidden, cl::init(7000)); 316 317 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 318 "asan-memory-access-callback-prefix", 319 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 320 cl::init("__asan_")); 321 322 static cl::opt<bool> 323 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 324 cl::desc("instrument dynamic allocas"), 325 cl::Hidden, cl::init(true)); 326 327 static cl::opt<bool> ClSkipPromotableAllocas( 328 "asan-skip-promotable-allocas", 329 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 330 cl::init(true)); 331 332 // These flags allow to change the shadow mapping. 333 // The shadow mapping looks like 334 // Shadow = (Mem >> scale) + offset 335 336 static cl::opt<int> ClMappingScale("asan-mapping-scale", 337 cl::desc("scale of asan shadow mapping"), 338 cl::Hidden, cl::init(0)); 339 340 static cl::opt<uint64_t> 341 ClMappingOffset("asan-mapping-offset", 342 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), 343 cl::Hidden, cl::init(0)); 344 345 // Optimization flags. Not user visible, used mostly for testing 346 // and benchmarking the tool. 347 348 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 349 cl::Hidden, cl::init(true)); 350 351 static cl::opt<bool> ClOptSameTemp( 352 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 353 cl::Hidden, cl::init(true)); 354 355 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 356 cl::desc("Don't instrument scalar globals"), 357 cl::Hidden, cl::init(true)); 358 359 static cl::opt<bool> ClOptStack( 360 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 361 cl::Hidden, cl::init(false)); 362 363 static cl::opt<bool> ClDynamicAllocaStack( 364 "asan-stack-dynamic-alloca", 365 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 366 cl::init(true)); 367 368 static cl::opt<uint32_t> ClForceExperiment( 369 "asan-force-experiment", 370 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 371 cl::init(0)); 372 373 static cl::opt<bool> 374 ClUsePrivateAlias("asan-use-private-alias", 375 cl::desc("Use private aliases for global variables"), 376 cl::Hidden, cl::init(false)); 377 378 static cl::opt<bool> 379 ClUseOdrIndicator("asan-use-odr-indicator", 380 cl::desc("Use odr indicators to improve ODR reporting"), 381 cl::Hidden, cl::init(false)); 382 383 static cl::opt<bool> 384 ClUseGlobalsGC("asan-globals-live-support", 385 cl::desc("Use linker features to support dead " 386 "code stripping of globals"), 387 cl::Hidden, cl::init(true)); 388 389 // This is on by default even though there is a bug in gold: 390 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 391 static cl::opt<bool> 392 ClWithComdat("asan-with-comdat", 393 cl::desc("Place ASan constructors in comdat sections"), 394 cl::Hidden, cl::init(true)); 395 396 static cl::opt<AsanDtorKind> ClOverrideDestructorKind( 397 "asan-destructor-kind", 398 cl::desc("Sets the ASan destructor kind. The default is to use the value " 399 "provided to the pass constructor"), 400 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), 401 clEnumValN(AsanDtorKind::Global, "global", 402 "Use global destructors")), 403 cl::init(AsanDtorKind::Invalid), cl::Hidden); 404 405 // Debug flags. 406 407 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 408 cl::init(0)); 409 410 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 411 cl::Hidden, cl::init(0)); 412 413 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 414 cl::desc("Debug func")); 415 416 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 417 cl::Hidden, cl::init(-1)); 418 419 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 420 cl::Hidden, cl::init(-1)); 421 422 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 423 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 424 STATISTIC(NumOptimizedAccessesToGlobalVar, 425 "Number of optimized accesses to global vars"); 426 STATISTIC(NumOptimizedAccessesToStackVar, 427 "Number of optimized accesses to stack vars"); 428 429 namespace { 430 431 /// This struct defines the shadow mapping using the rule: 432 /// shadow = (mem >> Scale) ADD-or-OR Offset. 433 /// If InGlobal is true, then 434 /// extern char __asan_shadow[]; 435 /// shadow = (mem >> Scale) + &__asan_shadow 436 struct ShadowMapping { 437 int Scale; 438 uint64_t Offset; 439 bool OrShadowOffset; 440 bool InGlobal; 441 }; 442 443 } // end anonymous namespace 444 445 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, 446 bool IsKasan) { 447 bool IsAndroid = TargetTriple.isAndroid(); 448 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS(); 449 bool IsMacOS = TargetTriple.isMacOSX(); 450 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 451 bool IsNetBSD = TargetTriple.isOSNetBSD(); 452 bool IsPS4CPU = TargetTriple.isPS4CPU(); 453 bool IsLinux = TargetTriple.isOSLinux(); 454 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || 455 TargetTriple.getArch() == Triple::ppc64le; 456 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; 457 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 458 bool IsMIPS32 = TargetTriple.isMIPS32(); 459 bool IsMIPS64 = TargetTriple.isMIPS64(); 460 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); 461 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; 462 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; 463 bool IsWindows = TargetTriple.isOSWindows(); 464 bool IsFuchsia = TargetTriple.isOSFuchsia(); 465 bool IsEmscripten = TargetTriple.isOSEmscripten(); 466 bool IsAMDGPU = TargetTriple.isAMDGPU(); 467 468 // Asan support for AMDGPU assumes X86 as the host right now. 469 if (IsAMDGPU) 470 IsX86_64 = true; 471 472 ShadowMapping Mapping; 473 474 Mapping.Scale = kDefaultShadowScale; 475 if (ClMappingScale.getNumOccurrences() > 0) { 476 Mapping.Scale = ClMappingScale; 477 } 478 479 if (LongSize == 32) { 480 if (IsAndroid) 481 Mapping.Offset = kDynamicShadowSentinel; 482 else if (IsMIPS32) 483 Mapping.Offset = kMIPS32_ShadowOffset32; 484 else if (IsFreeBSD) 485 Mapping.Offset = kFreeBSD_ShadowOffset32; 486 else if (IsNetBSD) 487 Mapping.Offset = kNetBSD_ShadowOffset32; 488 else if (IsIOS) 489 Mapping.Offset = kDynamicShadowSentinel; 490 else if (IsWindows) 491 Mapping.Offset = kWindowsShadowOffset32; 492 else if (IsEmscripten) 493 Mapping.Offset = kEmscriptenShadowOffset; 494 else 495 Mapping.Offset = kDefaultShadowOffset32; 496 } else { // LongSize == 64 497 // Fuchsia is always PIE, which means that the beginning of the address 498 // space is always available. 499 if (IsFuchsia) 500 Mapping.Offset = 0; 501 else if (IsPPC64) 502 Mapping.Offset = kPPC64_ShadowOffset64; 503 else if (IsSystemZ) 504 Mapping.Offset = kSystemZ_ShadowOffset64; 505 else if (IsFreeBSD && !IsMIPS64) { 506 if (IsKasan) 507 Mapping.Offset = kFreeBSDKasan_ShadowOffset64; 508 else 509 Mapping.Offset = kFreeBSD_ShadowOffset64; 510 } else if (IsNetBSD) { 511 if (IsKasan) 512 Mapping.Offset = kNetBSDKasan_ShadowOffset64; 513 else 514 Mapping.Offset = kNetBSD_ShadowOffset64; 515 } else if (IsPS4CPU) 516 Mapping.Offset = kPS4CPU_ShadowOffset64; 517 else if (IsLinux && IsX86_64) { 518 if (IsKasan) 519 Mapping.Offset = kLinuxKasan_ShadowOffset64; 520 else 521 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 522 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 523 } else if (IsWindows && IsX86_64) { 524 Mapping.Offset = kWindowsShadowOffset64; 525 } else if (IsMIPS64) 526 Mapping.Offset = kMIPS64_ShadowOffset64; 527 else if (IsIOS) 528 Mapping.Offset = kDynamicShadowSentinel; 529 else if (IsMacOS && IsAArch64) 530 Mapping.Offset = kDynamicShadowSentinel; 531 else if (IsAArch64) 532 Mapping.Offset = kAArch64_ShadowOffset64; 533 else if (IsRISCV64) 534 Mapping.Offset = kRISCV64_ShadowOffset64; 535 else 536 Mapping.Offset = kDefaultShadowOffset64; 537 } 538 539 if (ClForceDynamicShadow) { 540 Mapping.Offset = kDynamicShadowSentinel; 541 } 542 543 if (ClMappingOffset.getNumOccurrences() > 0) { 544 Mapping.Offset = ClMappingOffset; 545 } 546 547 // OR-ing shadow offset if more efficient (at least on x86) if the offset 548 // is a power of two, but on ppc64 we have to use add since the shadow 549 // offset is not necessary 1/8-th of the address space. On SystemZ, 550 // we could OR the constant in a single instruction, but it's more 551 // efficient to load it once and use indexed addressing. 552 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU && 553 !IsRISCV64 && 554 !(Mapping.Offset & (Mapping.Offset - 1)) && 555 Mapping.Offset != kDynamicShadowSentinel; 556 bool IsAndroidWithIfuncSupport = 557 IsAndroid && !TargetTriple.isAndroidVersionLT(21); 558 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; 559 560 return Mapping; 561 } 562 563 static uint64_t getRedzoneSizeForScale(int MappingScale) { 564 // Redzone used for stack and globals is at least 32 bytes. 565 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 566 return std::max(32U, 1U << MappingScale); 567 } 568 569 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { 570 if (TargetTriple.isOSEmscripten()) { 571 return kAsanEmscriptenCtorAndDtorPriority; 572 } else { 573 return kAsanCtorAndDtorPriority; 574 } 575 } 576 577 namespace { 578 579 /// Module analysis for getting various metadata about the module. 580 class ASanGlobalsMetadataWrapperPass : public ModulePass { 581 public: 582 static char ID; 583 584 ASanGlobalsMetadataWrapperPass() : ModulePass(ID) { 585 initializeASanGlobalsMetadataWrapperPassPass( 586 *PassRegistry::getPassRegistry()); 587 } 588 589 bool runOnModule(Module &M) override { 590 GlobalsMD = GlobalsMetadata(M); 591 return false; 592 } 593 594 StringRef getPassName() const override { 595 return "ASanGlobalsMetadataWrapperPass"; 596 } 597 598 void getAnalysisUsage(AnalysisUsage &AU) const override { 599 AU.setPreservesAll(); 600 } 601 602 GlobalsMetadata &getGlobalsMD() { return GlobalsMD; } 603 604 private: 605 GlobalsMetadata GlobalsMD; 606 }; 607 608 char ASanGlobalsMetadataWrapperPass::ID = 0; 609 610 /// AddressSanitizer: instrument the code in module to find memory bugs. 611 struct AddressSanitizer { 612 AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 613 bool CompileKernel = false, bool Recover = false, 614 bool UseAfterScope = false, 615 AsanDetectStackUseAfterReturnMode UseAfterReturn = 616 AsanDetectStackUseAfterReturnMode::Runtime) 617 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 618 : CompileKernel), 619 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 620 UseAfterScope(UseAfterScope || ClUseAfterScope), 621 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn 622 : UseAfterReturn), 623 GlobalsMD(*GlobalsMD) { 624 C = &(M.getContext()); 625 LongSize = M.getDataLayout().getPointerSizeInBits(); 626 IntptrTy = Type::getIntNTy(*C, LongSize); 627 TargetTriple = Triple(M.getTargetTriple()); 628 629 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 630 631 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid); 632 } 633 634 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 635 uint64_t ArraySize = 1; 636 if (AI.isArrayAllocation()) { 637 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 638 assert(CI && "non-constant array size"); 639 ArraySize = CI->getZExtValue(); 640 } 641 Type *Ty = AI.getAllocatedType(); 642 uint64_t SizeInBytes = 643 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 644 return SizeInBytes * ArraySize; 645 } 646 647 /// Check if we want (and can) handle this alloca. 648 bool isInterestingAlloca(const AllocaInst &AI); 649 650 bool ignoreAccess(Value *Ptr); 651 void getInterestingMemoryOperands( 652 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 653 654 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 655 InterestingMemoryOperand &O, bool UseCalls, 656 const DataLayout &DL); 657 void instrumentPointerComparisonOrSubtraction(Instruction *I); 658 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 659 Value *Addr, uint32_t TypeSize, bool IsWrite, 660 Value *SizeArgument, bool UseCalls, uint32_t Exp); 661 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, 662 Instruction *InsertBefore, Value *Addr, 663 uint32_t TypeSize, bool IsWrite, 664 Value *SizeArgument); 665 void instrumentUnusualSizeOrAlignment(Instruction *I, 666 Instruction *InsertBefore, Value *Addr, 667 uint32_t TypeSize, bool IsWrite, 668 Value *SizeArgument, bool UseCalls, 669 uint32_t Exp); 670 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 671 Value *ShadowValue, uint32_t TypeSize); 672 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 673 bool IsWrite, size_t AccessSizeIndex, 674 Value *SizeArgument, uint32_t Exp); 675 void instrumentMemIntrinsic(MemIntrinsic *MI); 676 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 677 bool suppressInstrumentationSiteForDebug(int &Instrumented); 678 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); 679 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 680 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); 681 void markEscapedLocalAllocas(Function &F); 682 683 private: 684 friend struct FunctionStackPoisoner; 685 686 void initializeCallbacks(Module &M); 687 688 bool LooksLikeCodeInBug11395(Instruction *I); 689 bool GlobalIsLinkerInitialized(GlobalVariable *G); 690 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 691 uint64_t TypeSize) const; 692 693 /// Helper to cleanup per-function state. 694 struct FunctionStateRAII { 695 AddressSanitizer *Pass; 696 697 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 698 assert(Pass->ProcessedAllocas.empty() && 699 "last pass forgot to clear cache"); 700 assert(!Pass->LocalDynamicShadow); 701 } 702 703 ~FunctionStateRAII() { 704 Pass->LocalDynamicShadow = nullptr; 705 Pass->ProcessedAllocas.clear(); 706 } 707 }; 708 709 LLVMContext *C; 710 Triple TargetTriple; 711 int LongSize; 712 bool CompileKernel; 713 bool Recover; 714 bool UseAfterScope; 715 AsanDetectStackUseAfterReturnMode UseAfterReturn; 716 Type *IntptrTy; 717 ShadowMapping Mapping; 718 FunctionCallee AsanHandleNoReturnFunc; 719 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; 720 Constant *AsanShadowGlobal; 721 722 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). 723 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; 724 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 725 726 // These arrays is indexed by AccessIsWrite and Experiment. 727 FunctionCallee AsanErrorCallbackSized[2][2]; 728 FunctionCallee AsanMemoryAccessCallbackSized[2][2]; 729 730 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; 731 Value *LocalDynamicShadow = nullptr; 732 const GlobalsMetadata &GlobalsMD; 733 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 734 735 FunctionCallee AMDGPUAddressShared; 736 FunctionCallee AMDGPUAddressPrivate; 737 }; 738 739 class AddressSanitizerLegacyPass : public FunctionPass { 740 public: 741 static char ID; 742 743 explicit AddressSanitizerLegacyPass( 744 bool CompileKernel = false, bool Recover = false, 745 bool UseAfterScope = false, 746 AsanDetectStackUseAfterReturnMode UseAfterReturn = 747 AsanDetectStackUseAfterReturnMode::Runtime) 748 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover), 749 UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) { 750 initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry()); 751 } 752 753 StringRef getPassName() const override { 754 return "AddressSanitizerFunctionPass"; 755 } 756 757 void getAnalysisUsage(AnalysisUsage &AU) const override { 758 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 759 AU.addRequired<TargetLibraryInfoWrapperPass>(); 760 } 761 762 bool runOnFunction(Function &F) override { 763 GlobalsMetadata &GlobalsMD = 764 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 765 const TargetLibraryInfo *TLI = 766 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 767 AddressSanitizer ASan(*F.getParent(), &GlobalsMD, CompileKernel, Recover, 768 UseAfterScope, UseAfterReturn); 769 return ASan.instrumentFunction(F, TLI); 770 } 771 772 private: 773 bool CompileKernel; 774 bool Recover; 775 bool UseAfterScope; 776 AsanDetectStackUseAfterReturnMode UseAfterReturn; 777 }; 778 779 class ModuleAddressSanitizer { 780 public: 781 ModuleAddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 782 bool CompileKernel = false, bool Recover = false, 783 bool UseGlobalsGC = true, bool UseOdrIndicator = false, 784 AsanDtorKind DestructorKind = AsanDtorKind::Global) 785 : GlobalsMD(*GlobalsMD), 786 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 787 : CompileKernel), 788 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 789 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), 790 // Enable aliases as they should have no downside with ODR indicators. 791 UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), 792 UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), 793 // Not a typo: ClWithComdat is almost completely pointless without 794 // ClUseGlobalsGC (because then it only works on modules without 795 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; 796 // and both suffer from gold PR19002 for which UseGlobalsGC constructor 797 // argument is designed as workaround. Therefore, disable both 798 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to 799 // do globals-gc. 800 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel), 801 DestructorKind(DestructorKind) { 802 C = &(M.getContext()); 803 int LongSize = M.getDataLayout().getPointerSizeInBits(); 804 IntptrTy = Type::getIntNTy(*C, LongSize); 805 TargetTriple = Triple(M.getTargetTriple()); 806 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 807 808 if (ClOverrideDestructorKind != AsanDtorKind::Invalid) 809 this->DestructorKind = ClOverrideDestructorKind; 810 assert(this->DestructorKind != AsanDtorKind::Invalid); 811 } 812 813 bool instrumentModule(Module &); 814 815 private: 816 void initializeCallbacks(Module &M); 817 818 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); 819 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, 820 ArrayRef<GlobalVariable *> ExtendedGlobals, 821 ArrayRef<Constant *> MetadataInitializers); 822 void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, 823 ArrayRef<GlobalVariable *> ExtendedGlobals, 824 ArrayRef<Constant *> MetadataInitializers, 825 const std::string &UniqueModuleId); 826 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, 827 ArrayRef<GlobalVariable *> ExtendedGlobals, 828 ArrayRef<Constant *> MetadataInitializers); 829 void 830 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, 831 ArrayRef<GlobalVariable *> ExtendedGlobals, 832 ArrayRef<Constant *> MetadataInitializers); 833 834 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, 835 StringRef OriginalName); 836 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, 837 StringRef InternalSuffix); 838 Instruction *CreateAsanModuleDtor(Module &M); 839 840 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const; 841 bool shouldInstrumentGlobal(GlobalVariable *G) const; 842 bool ShouldUseMachOGlobalsSection() const; 843 StringRef getGlobalMetadataSection() const; 844 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 845 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 846 uint64_t getMinRedzoneSizeForGlobal() const { 847 return getRedzoneSizeForScale(Mapping.Scale); 848 } 849 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; 850 int GetAsanVersion(const Module &M) const; 851 852 const GlobalsMetadata &GlobalsMD; 853 bool CompileKernel; 854 bool Recover; 855 bool UseGlobalsGC; 856 bool UsePrivateAlias; 857 bool UseOdrIndicator; 858 bool UseCtorComdat; 859 AsanDtorKind DestructorKind; 860 Type *IntptrTy; 861 LLVMContext *C; 862 Triple TargetTriple; 863 ShadowMapping Mapping; 864 FunctionCallee AsanPoisonGlobals; 865 FunctionCallee AsanUnpoisonGlobals; 866 FunctionCallee AsanRegisterGlobals; 867 FunctionCallee AsanUnregisterGlobals; 868 FunctionCallee AsanRegisterImageGlobals; 869 FunctionCallee AsanUnregisterImageGlobals; 870 FunctionCallee AsanRegisterElfGlobals; 871 FunctionCallee AsanUnregisterElfGlobals; 872 873 Function *AsanCtorFunction = nullptr; 874 Function *AsanDtorFunction = nullptr; 875 }; 876 877 class ModuleAddressSanitizerLegacyPass : public ModulePass { 878 public: 879 static char ID; 880 881 explicit ModuleAddressSanitizerLegacyPass( 882 bool CompileKernel = false, bool Recover = false, bool UseGlobalGC = true, 883 bool UseOdrIndicator = false, 884 AsanDtorKind DestructorKind = AsanDtorKind::Global) 885 : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover), 886 UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator), 887 DestructorKind(DestructorKind) { 888 initializeModuleAddressSanitizerLegacyPassPass( 889 *PassRegistry::getPassRegistry()); 890 } 891 892 StringRef getPassName() const override { return "ModuleAddressSanitizer"; } 893 894 void getAnalysisUsage(AnalysisUsage &AU) const override { 895 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 896 } 897 898 bool runOnModule(Module &M) override { 899 GlobalsMetadata &GlobalsMD = 900 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 901 ModuleAddressSanitizer ASanModule(M, &GlobalsMD, CompileKernel, Recover, 902 UseGlobalGC, UseOdrIndicator, 903 DestructorKind); 904 return ASanModule.instrumentModule(M); 905 } 906 907 private: 908 bool CompileKernel; 909 bool Recover; 910 bool UseGlobalGC; 911 bool UseOdrIndicator; 912 AsanDtorKind DestructorKind; 913 }; 914 915 // Stack poisoning does not play well with exception handling. 916 // When an exception is thrown, we essentially bypass the code 917 // that unpoisones the stack. This is why the run-time library has 918 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 919 // stack in the interceptor. This however does not work inside the 920 // actual function which catches the exception. Most likely because the 921 // compiler hoists the load of the shadow value somewhere too high. 922 // This causes asan to report a non-existing bug on 453.povray. 923 // It sounds like an LLVM bug. 924 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 925 Function &F; 926 AddressSanitizer &ASan; 927 DIBuilder DIB; 928 LLVMContext *C; 929 Type *IntptrTy; 930 Type *IntptrPtrTy; 931 ShadowMapping Mapping; 932 933 SmallVector<AllocaInst *, 16> AllocaVec; 934 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; 935 SmallVector<Instruction *, 8> RetVec; 936 unsigned StackAlignment; 937 938 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 939 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 940 FunctionCallee AsanSetShadowFunc[0x100] = {}; 941 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; 942 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; 943 944 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 945 struct AllocaPoisonCall { 946 IntrinsicInst *InsBefore; 947 AllocaInst *AI; 948 uint64_t Size; 949 bool DoPoison; 950 }; 951 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 952 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 953 bool HasUntracedLifetimeIntrinsic = false; 954 955 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 956 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 957 AllocaInst *DynamicAllocaLayout = nullptr; 958 IntrinsicInst *LocalEscapeCall = nullptr; 959 960 bool HasInlineAsm = false; 961 bool HasReturnsTwiceCall = false; 962 bool PoisonStack; 963 964 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 965 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 966 C(ASan.C), IntptrTy(ASan.IntptrTy), 967 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 968 StackAlignment(1 << Mapping.Scale), 969 PoisonStack(ClStack && 970 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} 971 972 bool runOnFunction() { 973 if (!PoisonStack) 974 return false; 975 976 if (ClRedzoneByvalArgs) 977 copyArgsPassedByValToAllocas(); 978 979 // Collect alloca, ret, lifetime instructions etc. 980 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 981 982 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 983 984 initializeCallbacks(*F.getParent()); 985 986 if (HasUntracedLifetimeIntrinsic) { 987 // If there are lifetime intrinsics which couldn't be traced back to an 988 // alloca, we may not know exactly when a variable enters scope, and 989 // therefore should "fail safe" by not poisoning them. 990 StaticAllocaPoisonCallVec.clear(); 991 DynamicAllocaPoisonCallVec.clear(); 992 } 993 994 processDynamicAllocas(); 995 processStaticAllocas(); 996 997 if (ClDebugStack) { 998 LLVM_DEBUG(dbgs() << F); 999 } 1000 return true; 1001 } 1002 1003 // Arguments marked with the "byval" attribute are implicitly copied without 1004 // using an alloca instruction. To produce redzones for those arguments, we 1005 // copy them a second time into memory allocated with an alloca instruction. 1006 void copyArgsPassedByValToAllocas(); 1007 1008 // Finds all Alloca instructions and puts 1009 // poisoned red zones around all of them. 1010 // Then unpoison everything back before the function returns. 1011 void processStaticAllocas(); 1012 void processDynamicAllocas(); 1013 1014 void createDynamicAllocasInitStorage(); 1015 1016 // ----------------------- Visitors. 1017 /// Collect all Ret instructions, or the musttail call instruction if it 1018 /// precedes the return instruction. 1019 void visitReturnInst(ReturnInst &RI) { 1020 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall()) 1021 RetVec.push_back(CI); 1022 else 1023 RetVec.push_back(&RI); 1024 } 1025 1026 /// Collect all Resume instructions. 1027 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 1028 1029 /// Collect all CatchReturnInst instructions. 1030 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 1031 1032 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 1033 Value *SavedStack) { 1034 IRBuilder<> IRB(InstBefore); 1035 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 1036 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 1037 // need to adjust extracted SP to compute the address of the most recent 1038 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 1039 // this purpose. 1040 if (!isa<ReturnInst>(InstBefore)) { 1041 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 1042 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 1043 {IntptrTy}); 1044 1045 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 1046 1047 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 1048 DynamicAreaOffset); 1049 } 1050 1051 IRB.CreateCall( 1052 AsanAllocasUnpoisonFunc, 1053 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); 1054 } 1055 1056 // Unpoison dynamic allocas redzones. 1057 void unpoisonDynamicAllocas() { 1058 for (Instruction *Ret : RetVec) 1059 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); 1060 1061 for (Instruction *StackRestoreInst : StackRestoreVec) 1062 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 1063 StackRestoreInst->getOperand(0)); 1064 } 1065 1066 // Deploy and poison redzones around dynamic alloca call. To do this, we 1067 // should replace this call with another one with changed parameters and 1068 // replace all its uses with new address, so 1069 // addr = alloca type, old_size, align 1070 // is replaced by 1071 // new_size = (old_size + additional_size) * sizeof(type) 1072 // tmp = alloca i8, new_size, max(align, 32) 1073 // addr = tmp + 32 (first 32 bytes are for the left redzone). 1074 // Additional_size is added to make new memory allocation contain not only 1075 // requested memory, but also left, partial and right redzones. 1076 void handleDynamicAllocaCall(AllocaInst *AI); 1077 1078 /// Collect Alloca instructions we want (and can) handle. 1079 void visitAllocaInst(AllocaInst &AI) { 1080 if (!ASan.isInterestingAlloca(AI)) { 1081 if (AI.isStaticAlloca()) { 1082 // Skip over allocas that are present *before* the first instrumented 1083 // alloca, we don't want to move those around. 1084 if (AllocaVec.empty()) 1085 return; 1086 1087 StaticAllocasToMoveUp.push_back(&AI); 1088 } 1089 return; 1090 } 1091 1092 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 1093 if (!AI.isStaticAlloca()) 1094 DynamicAllocaVec.push_back(&AI); 1095 else 1096 AllocaVec.push_back(&AI); 1097 } 1098 1099 /// Collect lifetime intrinsic calls to check for use-after-scope 1100 /// errors. 1101 void visitIntrinsicInst(IntrinsicInst &II) { 1102 Intrinsic::ID ID = II.getIntrinsicID(); 1103 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 1104 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 1105 if (!ASan.UseAfterScope) 1106 return; 1107 if (!II.isLifetimeStartOrEnd()) 1108 return; 1109 // Found lifetime intrinsic, add ASan instrumentation if necessary. 1110 auto *Size = cast<ConstantInt>(II.getArgOperand(0)); 1111 // If size argument is undefined, don't do anything. 1112 if (Size->isMinusOne()) return; 1113 // Check that size doesn't saturate uint64_t and can 1114 // be stored in IntptrTy. 1115 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 1116 if (SizeValue == ~0ULL || 1117 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 1118 return; 1119 // Find alloca instruction that corresponds to llvm.lifetime argument. 1120 // Currently we can only handle lifetime markers pointing to the 1121 // beginning of the alloca. 1122 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true); 1123 if (!AI) { 1124 HasUntracedLifetimeIntrinsic = true; 1125 return; 1126 } 1127 // We're interested only in allocas we can handle. 1128 if (!ASan.isInterestingAlloca(*AI)) 1129 return; 1130 bool DoPoison = (ID == Intrinsic::lifetime_end); 1131 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 1132 if (AI->isStaticAlloca()) 1133 StaticAllocaPoisonCallVec.push_back(APC); 1134 else if (ClInstrumentDynamicAllocas) 1135 DynamicAllocaPoisonCallVec.push_back(APC); 1136 } 1137 1138 void visitCallBase(CallBase &CB) { 1139 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1140 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; 1141 HasReturnsTwiceCall |= CI->canReturnTwice(); 1142 } 1143 } 1144 1145 // ---------------------- Helpers. 1146 void initializeCallbacks(Module &M); 1147 1148 // Copies bytes from ShadowBytes into shadow memory for indexes where 1149 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 1150 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 1151 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1152 IRBuilder<> &IRB, Value *ShadowBase); 1153 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1154 size_t Begin, size_t End, IRBuilder<> &IRB, 1155 Value *ShadowBase); 1156 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 1157 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 1158 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 1159 1160 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 1161 1162 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 1163 bool Dynamic); 1164 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 1165 Instruction *ThenTerm, Value *ValueIfFalse); 1166 }; 1167 1168 } // end anonymous namespace 1169 1170 void LocationMetadata::parse(MDNode *MDN) { 1171 assert(MDN->getNumOperands() == 3); 1172 MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); 1173 Filename = DIFilename->getString(); 1174 LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); 1175 ColumnNo = 1176 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); 1177 } 1178 1179 // FIXME: It would be cleaner to instead attach relevant metadata to the globals 1180 // we want to sanitize instead and reading this metadata on each pass over a 1181 // function instead of reading module level metadata at first. 1182 GlobalsMetadata::GlobalsMetadata(Module &M) { 1183 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); 1184 if (!Globals) 1185 return; 1186 for (auto MDN : Globals->operands()) { 1187 // Metadata node contains the global and the fields of "Entry". 1188 assert(MDN->getNumOperands() == 5); 1189 auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0)); 1190 // The optimizer may optimize away a global entirely. 1191 if (!V) 1192 continue; 1193 auto *StrippedV = V->stripPointerCasts(); 1194 auto *GV = dyn_cast<GlobalVariable>(StrippedV); 1195 if (!GV) 1196 continue; 1197 // We can already have an entry for GV if it was merged with another 1198 // global. 1199 Entry &E = Entries[GV]; 1200 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) 1201 E.SourceLoc.parse(Loc); 1202 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) 1203 E.Name = Name->getString(); 1204 ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3)); 1205 E.IsDynInit |= IsDynInit->isOne(); 1206 ConstantInt *IsExcluded = 1207 mdconst::extract<ConstantInt>(MDN->getOperand(4)); 1208 E.IsExcluded |= IsExcluded->isOne(); 1209 } 1210 } 1211 1212 AnalysisKey ASanGlobalsMetadataAnalysis::Key; 1213 1214 GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M, 1215 ModuleAnalysisManager &AM) { 1216 return GlobalsMetadata(M); 1217 } 1218 1219 AddressSanitizerPass::AddressSanitizerPass( 1220 bool CompileKernel, bool Recover, bool UseAfterScope, 1221 AsanDetectStackUseAfterReturnMode UseAfterReturn) 1222 : CompileKernel(CompileKernel), Recover(Recover), 1223 UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) {} 1224 1225 PreservedAnalyses AddressSanitizerPass::run(Function &F, 1226 AnalysisManager<Function> &AM) { 1227 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 1228 Module &M = *F.getParent(); 1229 if (auto *R = MAMProxy.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) { 1230 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 1231 AddressSanitizer Sanitizer(M, R, CompileKernel, Recover, UseAfterScope, 1232 UseAfterReturn); 1233 if (Sanitizer.instrumentFunction(F, TLI)) 1234 return PreservedAnalyses::none(); 1235 return PreservedAnalyses::all(); 1236 } 1237 1238 report_fatal_error( 1239 "The ASanGlobalsMetadataAnalysis is required to run before " 1240 "AddressSanitizer can run"); 1241 return PreservedAnalyses::all(); 1242 } 1243 1244 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass( 1245 bool CompileKernel, bool Recover, bool UseGlobalGC, bool UseOdrIndicator, 1246 AsanDtorKind DestructorKind) 1247 : CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC), 1248 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {} 1249 1250 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, 1251 AnalysisManager<Module> &AM) { 1252 GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M); 1253 ModuleAddressSanitizer Sanitizer(M, &GlobalsMD, CompileKernel, Recover, 1254 UseGlobalGC, UseOdrIndicator, 1255 DestructorKind); 1256 if (Sanitizer.instrumentModule(M)) 1257 return PreservedAnalyses::none(); 1258 return PreservedAnalyses::all(); 1259 } 1260 1261 INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md", 1262 "Read metadata to mark which globals should be instrumented " 1263 "when running ASan.", 1264 false, true) 1265 1266 char AddressSanitizerLegacyPass::ID = 0; 1267 1268 INITIALIZE_PASS_BEGIN( 1269 AddressSanitizerLegacyPass, "asan", 1270 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1271 false) 1272 INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass) 1273 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1274 INITIALIZE_PASS_END( 1275 AddressSanitizerLegacyPass, "asan", 1276 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1277 false) 1278 1279 FunctionPass *llvm::createAddressSanitizerFunctionPass( 1280 bool CompileKernel, bool Recover, bool UseAfterScope, 1281 AsanDetectStackUseAfterReturnMode UseAfterReturn) { 1282 assert(!CompileKernel || Recover); 1283 return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope, 1284 UseAfterReturn); 1285 } 1286 1287 char ModuleAddressSanitizerLegacyPass::ID = 0; 1288 1289 INITIALIZE_PASS( 1290 ModuleAddressSanitizerLegacyPass, "asan-module", 1291 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 1292 "ModulePass", 1293 false, false) 1294 1295 ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass( 1296 bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator, 1297 AsanDtorKind Destructor) { 1298 assert(!CompileKernel || Recover); 1299 return new ModuleAddressSanitizerLegacyPass( 1300 CompileKernel, Recover, UseGlobalsGC, UseOdrIndicator, Destructor); 1301 } 1302 1303 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 1304 size_t Res = countTrailingZeros(TypeSize / 8); 1305 assert(Res < kNumberOfAccessSizes); 1306 return Res; 1307 } 1308 1309 /// Create a global describing a source location. 1310 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, 1311 LocationMetadata MD) { 1312 Constant *LocData[] = { 1313 createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix), 1314 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), 1315 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), 1316 }; 1317 auto LocStruct = ConstantStruct::getAnon(LocData); 1318 auto GV = new GlobalVariable(M, LocStruct->getType(), true, 1319 GlobalValue::PrivateLinkage, LocStruct, 1320 kAsanGenPrefix); 1321 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1322 return GV; 1323 } 1324 1325 /// Check if \p G has been created by a trusted compiler pass. 1326 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 1327 // Do not instrument @llvm.global_ctors, @llvm.used, etc. 1328 if (G->getName().startswith("llvm.")) 1329 return true; 1330 1331 // Do not instrument asan globals. 1332 if (G->getName().startswith(kAsanGenPrefix) || 1333 G->getName().startswith(kSanCovGenPrefix) || 1334 G->getName().startswith(kODRGenPrefix)) 1335 return true; 1336 1337 // Do not instrument gcov counter arrays. 1338 if (G->getName() == "__llvm_gcov_ctr") 1339 return true; 1340 1341 return false; 1342 } 1343 1344 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { 1345 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1346 unsigned int AddrSpace = PtrTy->getPointerAddressSpace(); 1347 if (AddrSpace == 3 || AddrSpace == 5) 1348 return true; 1349 return false; 1350 } 1351 1352 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 1353 // Shadow >> scale 1354 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 1355 if (Mapping.Offset == 0) return Shadow; 1356 // (Shadow >> scale) | offset 1357 Value *ShadowBase; 1358 if (LocalDynamicShadow) 1359 ShadowBase = LocalDynamicShadow; 1360 else 1361 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 1362 if (Mapping.OrShadowOffset) 1363 return IRB.CreateOr(Shadow, ShadowBase); 1364 else 1365 return IRB.CreateAdd(Shadow, ShadowBase); 1366 } 1367 1368 // Instrument memset/memmove/memcpy 1369 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 1370 IRBuilder<> IRB(MI); 1371 if (isa<MemTransferInst>(MI)) { 1372 IRB.CreateCall( 1373 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 1374 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1375 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 1376 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1377 } else if (isa<MemSetInst>(MI)) { 1378 IRB.CreateCall( 1379 AsanMemset, 1380 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1381 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 1382 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1383 } 1384 MI->eraseFromParent(); 1385 } 1386 1387 /// Check if we want (and can) handle this alloca. 1388 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1389 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 1390 1391 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 1392 return PreviouslySeenAllocaInfo->getSecond(); 1393 1394 bool IsInteresting = 1395 (AI.getAllocatedType()->isSized() && 1396 // alloca() may be called with 0 size, ignore it. 1397 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 1398 // We are only interested in allocas not promotable to registers. 1399 // Promotable allocas are common under -O0. 1400 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 1401 // inalloca allocas are not treated as static, and we don't want 1402 // dynamic alloca instrumentation for them as well. 1403 !AI.isUsedWithInAlloca() && 1404 // swifterror allocas are register promoted by ISel 1405 !AI.isSwiftError()); 1406 1407 ProcessedAllocas[&AI] = IsInteresting; 1408 return IsInteresting; 1409 } 1410 1411 bool AddressSanitizer::ignoreAccess(Value *Ptr) { 1412 // Instrument acesses from different address spaces only for AMDGPU. 1413 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1414 if (PtrTy->getPointerAddressSpace() != 0 && 1415 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr))) 1416 return true; 1417 1418 // Ignore swifterror addresses. 1419 // swifterror memory addresses are mem2reg promoted by instruction 1420 // selection. As such they cannot have regular uses like an instrumentation 1421 // function and it makes no sense to track them as memory. 1422 if (Ptr->isSwiftError()) 1423 return true; 1424 1425 // Treat memory accesses to promotable allocas as non-interesting since they 1426 // will not cause memory violations. This greatly speeds up the instrumented 1427 // executable at -O0. 1428 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr)) 1429 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) 1430 return true; 1431 1432 return false; 1433 } 1434 1435 void AddressSanitizer::getInterestingMemoryOperands( 1436 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 1437 // Skip memory accesses inserted by another instrumentation. 1438 if (I->hasMetadata("nosanitize")) 1439 return; 1440 1441 // Do not instrument the load fetching the dynamic shadow address. 1442 if (LocalDynamicShadow == I) 1443 return; 1444 1445 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1446 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) 1447 return; 1448 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 1449 LI->getType(), LI->getAlign()); 1450 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1451 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) 1452 return; 1453 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 1454 SI->getValueOperand()->getType(), SI->getAlign()); 1455 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1456 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) 1457 return; 1458 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 1459 RMW->getValOperand()->getType(), None); 1460 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1461 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) 1462 return; 1463 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 1464 XCHG->getCompareOperand()->getType(), None); 1465 } else if (auto CI = dyn_cast<CallInst>(I)) { 1466 auto *F = CI->getCalledFunction(); 1467 if (F && (F->getName().startswith("llvm.masked.load.") || 1468 F->getName().startswith("llvm.masked.store."))) { 1469 bool IsWrite = F->getName().startswith("llvm.masked.store."); 1470 // Masked store has an initial operand for the value. 1471 unsigned OpOffset = IsWrite ? 1 : 0; 1472 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) 1473 return; 1474 1475 auto BasePtr = CI->getOperand(OpOffset); 1476 if (ignoreAccess(BasePtr)) 1477 return; 1478 auto Ty = cast<PointerType>(BasePtr->getType())->getElementType(); 1479 MaybeAlign Alignment = Align(1); 1480 // Otherwise no alignment guarantees. We probably got Undef. 1481 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) 1482 Alignment = Op->getMaybeAlignValue(); 1483 Value *Mask = CI->getOperand(2 + OpOffset); 1484 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); 1485 } else { 1486 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { 1487 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 1488 ignoreAccess(CI->getArgOperand(ArgNo))) 1489 continue; 1490 Type *Ty = CI->getParamByValType(ArgNo); 1491 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 1492 } 1493 } 1494 } 1495 } 1496 1497 static bool isPointerOperand(Value *V) { 1498 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1499 } 1500 1501 // This is a rough heuristic; it may cause both false positives and 1502 // false negatives. The proper implementation requires cooperation with 1503 // the frontend. 1504 static bool isInterestingPointerComparison(Instruction *I) { 1505 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1506 if (!Cmp->isRelational()) 1507 return false; 1508 } else { 1509 return false; 1510 } 1511 return isPointerOperand(I->getOperand(0)) && 1512 isPointerOperand(I->getOperand(1)); 1513 } 1514 1515 // This is a rough heuristic; it may cause both false positives and 1516 // false negatives. The proper implementation requires cooperation with 1517 // the frontend. 1518 static bool isInterestingPointerSubtraction(Instruction *I) { 1519 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1520 if (BO->getOpcode() != Instruction::Sub) 1521 return false; 1522 } else { 1523 return false; 1524 } 1525 return isPointerOperand(I->getOperand(0)) && 1526 isPointerOperand(I->getOperand(1)); 1527 } 1528 1529 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1530 // If a global variable does not have dynamic initialization we don't 1531 // have to instrument it. However, if a global does not have initializer 1532 // at all, we assume it has dynamic initializer (in other TU). 1533 // 1534 // FIXME: Metadata should be attched directly to the global directly instead 1535 // of being added to llvm.asan.globals. 1536 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; 1537 } 1538 1539 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1540 Instruction *I) { 1541 IRBuilder<> IRB(I); 1542 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1543 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1544 for (Value *&i : Param) { 1545 if (i->getType()->isPointerTy()) 1546 i = IRB.CreatePointerCast(i, IntptrTy); 1547 } 1548 IRB.CreateCall(F, Param); 1549 } 1550 1551 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, 1552 Instruction *InsertBefore, Value *Addr, 1553 MaybeAlign Alignment, unsigned Granularity, 1554 uint32_t TypeSize, bool IsWrite, 1555 Value *SizeArgument, bool UseCalls, 1556 uint32_t Exp) { 1557 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1558 // if the data is properly aligned. 1559 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1560 TypeSize == 128) && 1561 (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) 1562 return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, 1563 nullptr, UseCalls, Exp); 1564 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, 1565 IsWrite, nullptr, UseCalls, Exp); 1566 } 1567 1568 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, 1569 const DataLayout &DL, Type *IntptrTy, 1570 Value *Mask, Instruction *I, 1571 Value *Addr, MaybeAlign Alignment, 1572 unsigned Granularity, uint32_t TypeSize, 1573 bool IsWrite, Value *SizeArgument, 1574 bool UseCalls, uint32_t Exp) { 1575 auto *VTy = cast<FixedVectorType>( 1576 cast<PointerType>(Addr->getType())->getElementType()); 1577 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 1578 unsigned Num = VTy->getNumElements(); 1579 auto Zero = ConstantInt::get(IntptrTy, 0); 1580 for (unsigned Idx = 0; Idx < Num; ++Idx) { 1581 Value *InstrumentedAddress = nullptr; 1582 Instruction *InsertBefore = I; 1583 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 1584 // dyn_cast as we might get UndefValue 1585 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 1586 if (Masked->isZero()) 1587 // Mask is constant false, so no instrumentation needed. 1588 continue; 1589 // If we have a true or undef value, fall through to doInstrumentAddress 1590 // with InsertBefore == I 1591 } 1592 } else { 1593 IRBuilder<> IRB(I); 1594 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 1595 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 1596 InsertBefore = ThenTerm; 1597 } 1598 1599 IRBuilder<> IRB(InsertBefore); 1600 InstrumentedAddress = 1601 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 1602 doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, 1603 Granularity, ElemTypeSize, IsWrite, SizeArgument, 1604 UseCalls, Exp); 1605 } 1606 } 1607 1608 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1609 InterestingMemoryOperand &O, bool UseCalls, 1610 const DataLayout &DL) { 1611 Value *Addr = O.getPtr(); 1612 1613 // Optimization experiments. 1614 // The experiments can be used to evaluate potential optimizations that remove 1615 // instrumentation (assess false negatives). Instead of completely removing 1616 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1617 // experiments that want to remove instrumentation of this instruction). 1618 // If Exp is non-zero, this pass will emit special calls into runtime 1619 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1620 // make runtime terminate the program in a special way (with a different 1621 // exit status). Then you run the new compiler on a buggy corpus, collect 1622 // the special terminations (ideally, you don't see them at all -- no false 1623 // negatives) and make the decision on the optimization. 1624 uint32_t Exp = ClForceExperiment; 1625 1626 if (ClOpt && ClOptGlobals) { 1627 // If initialization order checking is disabled, a simple access to a 1628 // dynamically initialized global is always valid. 1629 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); 1630 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1631 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1632 NumOptimizedAccessesToGlobalVar++; 1633 return; 1634 } 1635 } 1636 1637 if (ClOpt && ClOptStack) { 1638 // A direct inbounds access to a stack variable is always valid. 1639 if (isa<AllocaInst>(getUnderlyingObject(Addr)) && 1640 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1641 NumOptimizedAccessesToStackVar++; 1642 return; 1643 } 1644 } 1645 1646 if (O.IsWrite) 1647 NumInstrumentedWrites++; 1648 else 1649 NumInstrumentedReads++; 1650 1651 unsigned Granularity = 1 << Mapping.Scale; 1652 if (O.MaybeMask) { 1653 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), 1654 Addr, O.Alignment, Granularity, O.TypeSize, 1655 O.IsWrite, nullptr, UseCalls, Exp); 1656 } else { 1657 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, 1658 Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, 1659 Exp); 1660 } 1661 } 1662 1663 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1664 Value *Addr, bool IsWrite, 1665 size_t AccessSizeIndex, 1666 Value *SizeArgument, 1667 uint32_t Exp) { 1668 IRBuilder<> IRB(InsertBefore); 1669 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1670 CallInst *Call = nullptr; 1671 if (SizeArgument) { 1672 if (Exp == 0) 1673 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1674 {Addr, SizeArgument}); 1675 else 1676 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1677 {Addr, SizeArgument, ExpVal}); 1678 } else { 1679 if (Exp == 0) 1680 Call = 1681 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1682 else 1683 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1684 {Addr, ExpVal}); 1685 } 1686 1687 Call->setCannotMerge(); 1688 return Call; 1689 } 1690 1691 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1692 Value *ShadowValue, 1693 uint32_t TypeSize) { 1694 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1695 // Addr & (Granularity - 1) 1696 Value *LastAccessedByte = 1697 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1698 // (Addr & (Granularity - 1)) + size - 1 1699 if (TypeSize / 8 > 1) 1700 LastAccessedByte = IRB.CreateAdd( 1701 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1702 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1703 LastAccessedByte = 1704 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1705 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1706 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1707 } 1708 1709 Instruction *AddressSanitizer::instrumentAMDGPUAddress( 1710 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, 1711 uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { 1712 // Do not instrument unsupported addrspaces. 1713 if (isUnsupportedAMDGPUAddrspace(Addr)) 1714 return nullptr; 1715 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1716 // Follow host instrumentation for global and constant addresses. 1717 if (PtrTy->getPointerAddressSpace() != 0) 1718 return InsertBefore; 1719 // Instrument generic addresses in supported addressspaces. 1720 IRBuilder<> IRB(InsertBefore); 1721 Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); 1722 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); 1723 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong}); 1724 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate); 1725 Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate); 1726 Value *AddrSpaceZeroLanding = 1727 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); 1728 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding); 1729 return InsertBefore; 1730 } 1731 1732 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1733 Instruction *InsertBefore, Value *Addr, 1734 uint32_t TypeSize, bool IsWrite, 1735 Value *SizeArgument, bool UseCalls, 1736 uint32_t Exp) { 1737 if (TargetTriple.isAMDGPU()) { 1738 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, 1739 TypeSize, IsWrite, SizeArgument); 1740 if (!InsertBefore) 1741 return; 1742 } 1743 1744 IRBuilder<> IRB(InsertBefore); 1745 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1746 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1747 1748 if (UseCalls) { 1749 if (Exp == 0) 1750 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1751 AddrLong); 1752 else 1753 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1754 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1755 return; 1756 } 1757 1758 Type *ShadowTy = 1759 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1760 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1761 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1762 Value *CmpVal = Constant::getNullValue(ShadowTy); 1763 Value *ShadowValue = 1764 IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1765 1766 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1767 size_t Granularity = 1ULL << Mapping.Scale; 1768 Instruction *CrashTerm = nullptr; 1769 1770 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1771 // We use branch weights for the slow path check, to indicate that the slow 1772 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1773 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 1774 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1775 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1776 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1777 IRB.SetInsertPoint(CheckTerm); 1778 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1779 if (Recover) { 1780 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1781 } else { 1782 BasicBlock *CrashBlock = 1783 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1784 CrashTerm = new UnreachableInst(*C, CrashBlock); 1785 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1786 ReplaceInstWithInst(CheckTerm, NewTerm); 1787 } 1788 } else { 1789 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1790 } 1791 1792 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1793 AccessSizeIndex, SizeArgument, Exp); 1794 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1795 } 1796 1797 // Instrument unusual size or unusual alignment. 1798 // We can not do it with a single check, so we do 1-byte check for the first 1799 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1800 // to report the actual access size. 1801 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1802 Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, 1803 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1804 IRBuilder<> IRB(InsertBefore); 1805 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1806 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1807 if (UseCalls) { 1808 if (Exp == 0) 1809 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1810 {AddrLong, Size}); 1811 else 1812 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1813 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1814 } else { 1815 Value *LastByte = IRB.CreateIntToPtr( 1816 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1817 Addr->getType()); 1818 instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); 1819 instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); 1820 } 1821 } 1822 1823 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, 1824 GlobalValue *ModuleName) { 1825 // Set up the arguments to our poison/unpoison functions. 1826 IRBuilder<> IRB(&GlobalInit.front(), 1827 GlobalInit.front().getFirstInsertionPt()); 1828 1829 // Add a call to poison all external globals before the given function starts. 1830 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1831 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1832 1833 // Add calls to unpoison all globals before each return instruction. 1834 for (auto &BB : GlobalInit.getBasicBlockList()) 1835 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1836 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1837 } 1838 1839 void ModuleAddressSanitizer::createInitializerPoisonCalls( 1840 Module &M, GlobalValue *ModuleName) { 1841 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1842 if (!GV) 1843 return; 1844 1845 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1846 if (!CA) 1847 return; 1848 1849 for (Use &OP : CA->operands()) { 1850 if (isa<ConstantAggregateZero>(OP)) continue; 1851 ConstantStruct *CS = cast<ConstantStruct>(OP); 1852 1853 // Must have a function or null ptr. 1854 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1855 if (F->getName() == kAsanModuleCtorName) continue; 1856 auto *Priority = cast<ConstantInt>(CS->getOperand(0)); 1857 // Don't instrument CTORs that will run before asan.module_ctor. 1858 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) 1859 continue; 1860 poisonOneInitializer(*F, ModuleName); 1861 } 1862 } 1863 } 1864 1865 const GlobalVariable * 1866 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const { 1867 // In case this function should be expanded to include rules that do not just 1868 // apply when CompileKernel is true, either guard all existing rules with an 1869 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules 1870 // should also apply to user space. 1871 assert(CompileKernel && "Only expecting to be called when compiling kernel"); 1872 1873 const Constant *C = GA.getAliasee(); 1874 1875 // When compiling the kernel, globals that are aliased by symbols prefixed 1876 // by "__" are special and cannot be padded with a redzone. 1877 if (GA.getName().startswith("__")) 1878 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases()); 1879 1880 return nullptr; 1881 } 1882 1883 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { 1884 Type *Ty = G->getValueType(); 1885 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1886 1887 // FIXME: Metadata should be attched directly to the global directly instead 1888 // of being added to llvm.asan.globals. 1889 if (GlobalsMD.get(G).IsExcluded) return false; 1890 if (!Ty->isSized()) return false; 1891 if (!G->hasInitializer()) return false; 1892 // Globals in address space 1 and 4 are supported for AMDGPU. 1893 if (G->getAddressSpace() && 1894 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) 1895 return false; 1896 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1897 // Two problems with thread-locals: 1898 // - The address of the main thread's copy can't be computed at link-time. 1899 // - Need to poison all copies, not just the main thread's one. 1900 if (G->isThreadLocal()) return false; 1901 // For now, just ignore this Global if the alignment is large. 1902 if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; 1903 1904 // For non-COFF targets, only instrument globals known to be defined by this 1905 // TU. 1906 // FIXME: We can instrument comdat globals on ELF if we are using the 1907 // GC-friendly metadata scheme. 1908 if (!TargetTriple.isOSBinFormatCOFF()) { 1909 if (!G->hasExactDefinition() || G->hasComdat()) 1910 return false; 1911 } else { 1912 // On COFF, don't instrument non-ODR linkages. 1913 if (G->isInterposable()) 1914 return false; 1915 } 1916 1917 // If a comdat is present, it must have a selection kind that implies ODR 1918 // semantics: no duplicates, any, or exact match. 1919 if (Comdat *C = G->getComdat()) { 1920 switch (C->getSelectionKind()) { 1921 case Comdat::Any: 1922 case Comdat::ExactMatch: 1923 case Comdat::NoDuplicates: 1924 break; 1925 case Comdat::Largest: 1926 case Comdat::SameSize: 1927 return false; 1928 } 1929 } 1930 1931 if (G->hasSection()) { 1932 // The kernel uses explicit sections for mostly special global variables 1933 // that we should not instrument. E.g. the kernel may rely on their layout 1934 // without redzones, or remove them at link time ("discard.*"), etc. 1935 if (CompileKernel) 1936 return false; 1937 1938 StringRef Section = G->getSection(); 1939 1940 // Globals from llvm.metadata aren't emitted, do not instrument them. 1941 if (Section == "llvm.metadata") return false; 1942 // Do not instrument globals from special LLVM sections. 1943 if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; 1944 1945 // Do not instrument function pointers to initialization and termination 1946 // routines: dynamic linker will not properly handle redzones. 1947 if (Section.startswith(".preinit_array") || 1948 Section.startswith(".init_array") || 1949 Section.startswith(".fini_array")) { 1950 return false; 1951 } 1952 1953 // Do not instrument user-defined sections (with names resembling 1954 // valid C identifiers) 1955 if (TargetTriple.isOSBinFormatELF()) { 1956 if (llvm::all_of(Section, 1957 [](char c) { return llvm::isAlnum(c) || c == '_'; })) 1958 return false; 1959 } 1960 1961 // On COFF, if the section name contains '$', it is highly likely that the 1962 // user is using section sorting to create an array of globals similar to 1963 // the way initialization callbacks are registered in .init_array and 1964 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones 1965 // to such globals is counterproductive, because the intent is that they 1966 // will form an array, and out-of-bounds accesses are expected. 1967 // See https://github.com/google/sanitizers/issues/305 1968 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1969 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { 1970 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): " 1971 << *G << "\n"); 1972 return false; 1973 } 1974 1975 if (TargetTriple.isOSBinFormatMachO()) { 1976 StringRef ParsedSegment, ParsedSection; 1977 unsigned TAA = 0, StubSize = 0; 1978 bool TAAParsed; 1979 cantFail(MCSectionMachO::ParseSectionSpecifier( 1980 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize)); 1981 1982 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1983 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1984 // them. 1985 if (ParsedSegment == "__OBJC" || 1986 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1987 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1988 return false; 1989 } 1990 // See https://github.com/google/sanitizers/issues/32 1991 // Constant CFString instances are compiled in the following way: 1992 // -- the string buffer is emitted into 1993 // __TEXT,__cstring,cstring_literals 1994 // -- the constant NSConstantString structure referencing that buffer 1995 // is placed into __DATA,__cfstring 1996 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1997 // Moreover, it causes the linker to crash on OS X 10.7 1998 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1999 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 2000 return false; 2001 } 2002 // The linker merges the contents of cstring_literals and removes the 2003 // trailing zeroes. 2004 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 2005 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 2006 return false; 2007 } 2008 } 2009 } 2010 2011 if (CompileKernel) { 2012 // Globals that prefixed by "__" are special and cannot be padded with a 2013 // redzone. 2014 if (G->getName().startswith("__")) 2015 return false; 2016 } 2017 2018 return true; 2019 } 2020 2021 // On Mach-O platforms, we emit global metadata in a separate section of the 2022 // binary in order to allow the linker to properly dead strip. This is only 2023 // supported on recent versions of ld64. 2024 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { 2025 if (!TargetTriple.isOSBinFormatMachO()) 2026 return false; 2027 2028 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 2029 return true; 2030 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 2031 return true; 2032 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 2033 return true; 2034 2035 return false; 2036 } 2037 2038 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { 2039 switch (TargetTriple.getObjectFormat()) { 2040 case Triple::COFF: return ".ASAN$GL"; 2041 case Triple::ELF: return "asan_globals"; 2042 case Triple::MachO: return "__DATA,__asan_globals,regular"; 2043 case Triple::Wasm: 2044 case Triple::GOFF: 2045 case Triple::XCOFF: 2046 report_fatal_error( 2047 "ModuleAddressSanitizer not implemented for object file format"); 2048 case Triple::UnknownObjectFormat: 2049 break; 2050 } 2051 llvm_unreachable("unsupported object format"); 2052 } 2053 2054 void ModuleAddressSanitizer::initializeCallbacks(Module &M) { 2055 IRBuilder<> IRB(*C); 2056 2057 // Declare our poisoning and unpoisoning functions. 2058 AsanPoisonGlobals = 2059 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); 2060 AsanUnpoisonGlobals = 2061 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); 2062 2063 // Declare functions that register/unregister globals. 2064 AsanRegisterGlobals = M.getOrInsertFunction( 2065 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2066 AsanUnregisterGlobals = M.getOrInsertFunction( 2067 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2068 2069 // Declare the functions that find globals in a shared object and then invoke 2070 // the (un)register function on them. 2071 AsanRegisterImageGlobals = M.getOrInsertFunction( 2072 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 2073 AsanUnregisterImageGlobals = M.getOrInsertFunction( 2074 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 2075 2076 AsanRegisterElfGlobals = 2077 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), 2078 IntptrTy, IntptrTy, IntptrTy); 2079 AsanUnregisterElfGlobals = 2080 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), 2081 IntptrTy, IntptrTy, IntptrTy); 2082 } 2083 2084 // Put the metadata and the instrumented global in the same group. This ensures 2085 // that the metadata is discarded if the instrumented global is discarded. 2086 void ModuleAddressSanitizer::SetComdatForGlobalMetadata( 2087 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { 2088 Module &M = *G->getParent(); 2089 Comdat *C = G->getComdat(); 2090 if (!C) { 2091 if (!G->hasName()) { 2092 // If G is unnamed, it must be internal. Give it an artificial name 2093 // so we can put it in a comdat. 2094 assert(G->hasLocalLinkage()); 2095 G->setName(Twine(kAsanGenPrefix) + "_anon_global"); 2096 } 2097 2098 if (!InternalSuffix.empty() && G->hasLocalLinkage()) { 2099 std::string Name = std::string(G->getName()); 2100 Name += InternalSuffix; 2101 C = M.getOrInsertComdat(Name); 2102 } else { 2103 C = M.getOrInsertComdat(G->getName()); 2104 } 2105 2106 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private 2107 // linkage to internal linkage so that a symbol table entry is emitted. This 2108 // is necessary in order to create the comdat group. 2109 if (TargetTriple.isOSBinFormatCOFF()) { 2110 C->setSelectionKind(Comdat::NoDuplicates); 2111 if (G->hasPrivateLinkage()) 2112 G->setLinkage(GlobalValue::InternalLinkage); 2113 } 2114 G->setComdat(C); 2115 } 2116 2117 assert(G->hasComdat()); 2118 Metadata->setComdat(G->getComdat()); 2119 } 2120 2121 // Create a separate metadata global and put it in the appropriate ASan 2122 // global registration section. 2123 GlobalVariable * 2124 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, 2125 StringRef OriginalName) { 2126 auto Linkage = TargetTriple.isOSBinFormatMachO() 2127 ? GlobalVariable::InternalLinkage 2128 : GlobalVariable::PrivateLinkage; 2129 GlobalVariable *Metadata = new GlobalVariable( 2130 M, Initializer->getType(), false, Linkage, Initializer, 2131 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); 2132 Metadata->setSection(getGlobalMetadataSection()); 2133 return Metadata; 2134 } 2135 2136 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { 2137 AsanDtorFunction = Function::createWithDefaultAttr( 2138 FunctionType::get(Type::getVoidTy(*C), false), 2139 GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M); 2140 AsanDtorFunction->addAttribute(AttributeList::FunctionIndex, 2141 Attribute::NoUnwind); 2142 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 2143 2144 return ReturnInst::Create(*C, AsanDtorBB); 2145 } 2146 2147 void ModuleAddressSanitizer::InstrumentGlobalsCOFF( 2148 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2149 ArrayRef<Constant *> MetadataInitializers) { 2150 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2151 auto &DL = M.getDataLayout(); 2152 2153 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2154 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2155 Constant *Initializer = MetadataInitializers[i]; 2156 GlobalVariable *G = ExtendedGlobals[i]; 2157 GlobalVariable *Metadata = 2158 CreateMetadataGlobal(M, Initializer, G->getName()); 2159 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2160 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2161 MetadataGlobals[i] = Metadata; 2162 2163 // The MSVC linker always inserts padding when linking incrementally. We 2164 // cope with that by aligning each struct to its size, which must be a power 2165 // of two. 2166 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); 2167 assert(isPowerOf2_32(SizeOfGlobalStruct) && 2168 "global metadata will not be padded appropriately"); 2169 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); 2170 2171 SetComdatForGlobalMetadata(G, Metadata, ""); 2172 } 2173 2174 // Update llvm.compiler.used, adding the new metadata globals. This is 2175 // needed so that during LTO these variables stay alive. 2176 if (!MetadataGlobals.empty()) 2177 appendToCompilerUsed(M, MetadataGlobals); 2178 } 2179 2180 void ModuleAddressSanitizer::InstrumentGlobalsELF( 2181 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2182 ArrayRef<Constant *> MetadataInitializers, 2183 const std::string &UniqueModuleId) { 2184 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2185 2186 // Putting globals in a comdat changes the semantic and potentially cause 2187 // false negative odr violations at link time. If odr indicators are used, we 2188 // keep the comdat sections, as link time odr violations will be dectected on 2189 // the odr indicator symbols. 2190 bool UseComdatForGlobalsGC = UseOdrIndicator; 2191 2192 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2193 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2194 GlobalVariable *G = ExtendedGlobals[i]; 2195 GlobalVariable *Metadata = 2196 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); 2197 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2198 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2199 MetadataGlobals[i] = Metadata; 2200 2201 if (UseComdatForGlobalsGC) 2202 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); 2203 } 2204 2205 // Update llvm.compiler.used, adding the new metadata globals. This is 2206 // needed so that during LTO these variables stay alive. 2207 if (!MetadataGlobals.empty()) 2208 appendToCompilerUsed(M, MetadataGlobals); 2209 2210 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2211 // to look up the loaded image that contains it. Second, we can store in it 2212 // whether registration has already occurred, to prevent duplicate 2213 // registration. 2214 // 2215 // Common linkage ensures that there is only one global per shared library. 2216 GlobalVariable *RegisteredFlag = new GlobalVariable( 2217 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2218 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2219 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2220 2221 // Create start and stop symbols. 2222 GlobalVariable *StartELFMetadata = new GlobalVariable( 2223 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2224 "__start_" + getGlobalMetadataSection()); 2225 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2226 GlobalVariable *StopELFMetadata = new GlobalVariable( 2227 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2228 "__stop_" + getGlobalMetadataSection()); 2229 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2230 2231 // Create a call to register the globals with the runtime. 2232 IRB.CreateCall(AsanRegisterElfGlobals, 2233 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2234 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2235 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2236 2237 // We also need to unregister globals at the end, e.g., when a shared library 2238 // gets closed. 2239 if (DestructorKind != AsanDtorKind::None) { 2240 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2241 IrbDtor.CreateCall(AsanUnregisterElfGlobals, 2242 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2243 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2244 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2245 } 2246 } 2247 2248 void ModuleAddressSanitizer::InstrumentGlobalsMachO( 2249 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2250 ArrayRef<Constant *> MetadataInitializers) { 2251 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2252 2253 // On recent Mach-O platforms, use a structure which binds the liveness of 2254 // the global variable to the metadata struct. Keep the list of "Liveness" GV 2255 // created to be added to llvm.compiler.used 2256 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); 2257 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); 2258 2259 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2260 Constant *Initializer = MetadataInitializers[i]; 2261 GlobalVariable *G = ExtendedGlobals[i]; 2262 GlobalVariable *Metadata = 2263 CreateMetadataGlobal(M, Initializer, G->getName()); 2264 2265 // On recent Mach-O platforms, we emit the global metadata in a way that 2266 // allows the linker to properly strip dead globals. 2267 auto LivenessBinder = 2268 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), 2269 ConstantExpr::getPointerCast(Metadata, IntptrTy)); 2270 GlobalVariable *Liveness = new GlobalVariable( 2271 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 2272 Twine("__asan_binder_") + G->getName()); 2273 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 2274 LivenessGlobals[i] = Liveness; 2275 } 2276 2277 // Update llvm.compiler.used, adding the new liveness globals. This is 2278 // needed so that during LTO these variables stay alive. The alternative 2279 // would be to have the linker handling the LTO symbols, but libLTO 2280 // current API does not expose access to the section for each symbol. 2281 if (!LivenessGlobals.empty()) 2282 appendToCompilerUsed(M, LivenessGlobals); 2283 2284 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2285 // to look up the loaded image that contains it. Second, we can store in it 2286 // whether registration has already occurred, to prevent duplicate 2287 // registration. 2288 // 2289 // common linkage ensures that there is only one global per shared library. 2290 GlobalVariable *RegisteredFlag = new GlobalVariable( 2291 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2292 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2293 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2294 2295 IRB.CreateCall(AsanRegisterImageGlobals, 2296 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2297 2298 // We also need to unregister globals at the end, e.g., when a shared library 2299 // gets closed. 2300 if (DestructorKind != AsanDtorKind::None) { 2301 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2302 IrbDtor.CreateCall(AsanUnregisterImageGlobals, 2303 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2304 } 2305 } 2306 2307 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( 2308 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2309 ArrayRef<Constant *> MetadataInitializers) { 2310 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2311 unsigned N = ExtendedGlobals.size(); 2312 assert(N > 0); 2313 2314 // On platforms that don't have a custom metadata section, we emit an array 2315 // of global metadata structures. 2316 ArrayType *ArrayOfGlobalStructTy = 2317 ArrayType::get(MetadataInitializers[0]->getType(), N); 2318 auto AllGlobals = new GlobalVariable( 2319 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 2320 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); 2321 if (Mapping.Scale > 3) 2322 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); 2323 2324 IRB.CreateCall(AsanRegisterGlobals, 2325 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2326 ConstantInt::get(IntptrTy, N)}); 2327 2328 // We also need to unregister globals at the end, e.g., when a shared library 2329 // gets closed. 2330 if (DestructorKind != AsanDtorKind::None) { 2331 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2332 IrbDtor.CreateCall(AsanUnregisterGlobals, 2333 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2334 ConstantInt::get(IntptrTy, N)}); 2335 } 2336 } 2337 2338 // This function replaces all global variables with new variables that have 2339 // trailing redzones. It also creates a function that poisons 2340 // redzones and inserts this function into llvm.global_ctors. 2341 // Sets *CtorComdat to true if the global registration code emitted into the 2342 // asan constructor is comdat-compatible. 2343 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, 2344 bool *CtorComdat) { 2345 *CtorComdat = false; 2346 2347 // Build set of globals that are aliased by some GA, where 2348 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable. 2349 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; 2350 if (CompileKernel) { 2351 for (auto &GA : M.aliases()) { 2352 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA)) 2353 AliasedGlobalExclusions.insert(GV); 2354 } 2355 } 2356 2357 SmallVector<GlobalVariable *, 16> GlobalsToChange; 2358 for (auto &G : M.globals()) { 2359 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) 2360 GlobalsToChange.push_back(&G); 2361 } 2362 2363 size_t n = GlobalsToChange.size(); 2364 if (n == 0) { 2365 *CtorComdat = true; 2366 return false; 2367 } 2368 2369 auto &DL = M.getDataLayout(); 2370 2371 // A global is described by a structure 2372 // size_t beg; 2373 // size_t size; 2374 // size_t size_with_redzone; 2375 // const char *name; 2376 // const char *module_name; 2377 // size_t has_dynamic_init; 2378 // void *source_location; 2379 // size_t odr_indicator; 2380 // We initialize an array of such structures and pass it to a run-time call. 2381 StructType *GlobalStructTy = 2382 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 2383 IntptrTy, IntptrTy, IntptrTy); 2384 SmallVector<GlobalVariable *, 16> NewGlobals(n); 2385 SmallVector<Constant *, 16> Initializers(n); 2386 2387 bool HasDynamicallyInitializedGlobals = false; 2388 2389 // We shouldn't merge same module names, as this string serves as unique 2390 // module ID in runtime. 2391 GlobalVariable *ModuleName = createPrivateGlobalForString( 2392 M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); 2393 2394 for (size_t i = 0; i < n; i++) { 2395 GlobalVariable *G = GlobalsToChange[i]; 2396 2397 // FIXME: Metadata should be attched directly to the global directly instead 2398 // of being added to llvm.asan.globals. 2399 auto MD = GlobalsMD.get(G); 2400 StringRef NameForGlobal = G->getName(); 2401 // Create string holding the global name (use global name from metadata 2402 // if it's available, otherwise just write the name of global variable). 2403 GlobalVariable *Name = createPrivateGlobalForString( 2404 M, MD.Name.empty() ? NameForGlobal : MD.Name, 2405 /*AllowMerging*/ true, kAsanGenPrefix); 2406 2407 Type *Ty = G->getValueType(); 2408 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 2409 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); 2410 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 2411 2412 StructType *NewTy = StructType::get(Ty, RightRedZoneTy); 2413 Constant *NewInitializer = ConstantStruct::get( 2414 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); 2415 2416 // Create a new global variable with enough space for a redzone. 2417 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 2418 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 2419 Linkage = GlobalValue::InternalLinkage; 2420 GlobalVariable *NewGlobal = new GlobalVariable( 2421 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, 2422 G->getThreadLocalMode(), G->getAddressSpace()); 2423 NewGlobal->copyAttributesFrom(G); 2424 NewGlobal->setComdat(G->getComdat()); 2425 NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); 2426 // Don't fold globals with redzones. ODR violation detector and redzone 2427 // poisoning implicitly creates a dependence on the global's address, so it 2428 // is no longer valid for it to be marked unnamed_addr. 2429 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 2430 2431 // Move null-terminated C strings to "__asan_cstring" section on Darwin. 2432 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && 2433 G->isConstant()) { 2434 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); 2435 if (Seq && Seq->isCString()) 2436 NewGlobal->setSection("__TEXT,__asan_cstring,regular"); 2437 } 2438 2439 // Transfer the debug info and type metadata. The payload starts at offset 2440 // zero so we can copy the metadata over as is. 2441 NewGlobal->copyMetadata(G, 0); 2442 2443 Value *Indices2[2]; 2444 Indices2[0] = IRB.getInt32(0); 2445 Indices2[1] = IRB.getInt32(0); 2446 2447 G->replaceAllUsesWith( 2448 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 2449 NewGlobal->takeName(G); 2450 G->eraseFromParent(); 2451 NewGlobals[i] = NewGlobal; 2452 2453 Constant *SourceLoc; 2454 if (!MD.SourceLoc.empty()) { 2455 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); 2456 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); 2457 } else { 2458 SourceLoc = ConstantInt::get(IntptrTy, 0); 2459 } 2460 2461 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 2462 GlobalValue *InstrumentedGlobal = NewGlobal; 2463 2464 bool CanUsePrivateAliases = 2465 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || 2466 TargetTriple.isOSBinFormatWasm(); 2467 if (CanUsePrivateAliases && UsePrivateAlias) { 2468 // Create local alias for NewGlobal to avoid crash on ODR between 2469 // instrumented and non-instrumented libraries. 2470 InstrumentedGlobal = 2471 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); 2472 } 2473 2474 // ODR should not happen for local linkage. 2475 if (NewGlobal->hasLocalLinkage()) { 2476 ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), 2477 IRB.getInt8PtrTy()); 2478 } else if (UseOdrIndicator) { 2479 // With local aliases, we need to provide another externally visible 2480 // symbol __odr_asan_XXX to detect ODR violation. 2481 auto *ODRIndicatorSym = 2482 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 2483 Constant::getNullValue(IRB.getInt8Ty()), 2484 kODRGenPrefix + NameForGlobal, nullptr, 2485 NewGlobal->getThreadLocalMode()); 2486 2487 // Set meaningful attributes for indicator symbol. 2488 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 2489 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 2490 ODRIndicatorSym->setAlignment(Align(1)); 2491 ODRIndicator = ODRIndicatorSym; 2492 } 2493 2494 Constant *Initializer = ConstantStruct::get( 2495 GlobalStructTy, 2496 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 2497 ConstantInt::get(IntptrTy, SizeInBytes), 2498 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 2499 ConstantExpr::getPointerCast(Name, IntptrTy), 2500 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 2501 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, 2502 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); 2503 2504 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; 2505 2506 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 2507 2508 Initializers[i] = Initializer; 2509 } 2510 2511 // Add instrumented globals to llvm.compiler.used list to avoid LTO from 2512 // ConstantMerge'ing them. 2513 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; 2514 for (size_t i = 0; i < n; i++) { 2515 GlobalVariable *G = NewGlobals[i]; 2516 if (G->getName().empty()) continue; 2517 GlobalsToAddToUsedList.push_back(G); 2518 } 2519 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); 2520 2521 std::string ELFUniqueModuleId = 2522 (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) 2523 : ""; 2524 2525 if (!ELFUniqueModuleId.empty()) { 2526 InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); 2527 *CtorComdat = true; 2528 } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { 2529 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); 2530 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { 2531 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); 2532 } else { 2533 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); 2534 } 2535 2536 // Create calls for poisoning before initializers run and unpoisoning after. 2537 if (HasDynamicallyInitializedGlobals) 2538 createInitializerPoisonCalls(M, ModuleName); 2539 2540 LLVM_DEBUG(dbgs() << M); 2541 return true; 2542 } 2543 2544 uint64_t 2545 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { 2546 constexpr uint64_t kMaxRZ = 1 << 18; 2547 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); 2548 2549 uint64_t RZ = 0; 2550 if (SizeInBytes <= MinRZ / 2) { 2551 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is 2552 // at least 32 bytes, optimize when SizeInBytes is less than or equal to 2553 // half of MinRZ. 2554 RZ = MinRZ - SizeInBytes; 2555 } else { 2556 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. 2557 RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); 2558 2559 // Round up to multiple of MinRZ. 2560 if (SizeInBytes % MinRZ) 2561 RZ += MinRZ - (SizeInBytes % MinRZ); 2562 } 2563 2564 assert((RZ + SizeInBytes) % MinRZ == 0); 2565 2566 return RZ; 2567 } 2568 2569 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { 2570 int LongSize = M.getDataLayout().getPointerSizeInBits(); 2571 bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); 2572 int Version = 8; 2573 // 32-bit Android is one version ahead because of the switch to dynamic 2574 // shadow. 2575 Version += (LongSize == 32 && isAndroid); 2576 return Version; 2577 } 2578 2579 bool ModuleAddressSanitizer::instrumentModule(Module &M) { 2580 initializeCallbacks(M); 2581 2582 // Create a module constructor. A destructor is created lazily because not all 2583 // platforms, and not all modules need it. 2584 if (CompileKernel) { 2585 // The kernel always builds with its own runtime, and therefore does not 2586 // need the init and version check calls. 2587 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); 2588 } else { 2589 std::string AsanVersion = std::to_string(GetAsanVersion(M)); 2590 std::string VersionCheckName = 2591 ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; 2592 std::tie(AsanCtorFunction, std::ignore) = 2593 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, 2594 kAsanInitName, /*InitArgTypes=*/{}, 2595 /*InitArgs=*/{}, VersionCheckName); 2596 } 2597 2598 bool CtorComdat = true; 2599 if (ClGlobals) { 2600 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); 2601 InstrumentGlobals(IRB, M, &CtorComdat); 2602 } 2603 2604 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); 2605 2606 // Put the constructor and destructor in comdat if both 2607 // (1) global instrumentation is not TU-specific 2608 // (2) target is ELF. 2609 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { 2610 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); 2611 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); 2612 if (AsanDtorFunction) { 2613 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); 2614 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); 2615 } 2616 } else { 2617 appendToGlobalCtors(M, AsanCtorFunction, Priority); 2618 if (AsanDtorFunction) 2619 appendToGlobalDtors(M, AsanDtorFunction, Priority); 2620 } 2621 2622 return true; 2623 } 2624 2625 void AddressSanitizer::initializeCallbacks(Module &M) { 2626 IRBuilder<> IRB(*C); 2627 // Create __asan_report* callbacks. 2628 // IsWrite, TypeSize and Exp are encoded in the function name. 2629 for (int Exp = 0; Exp < 2; Exp++) { 2630 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 2631 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 2632 const std::string ExpStr = Exp ? "exp_" : ""; 2633 const std::string EndingStr = Recover ? "_noabort" : ""; 2634 2635 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 2636 SmallVector<Type *, 2> Args1{1, IntptrTy}; 2637 if (Exp) { 2638 Type *ExpType = Type::getInt32Ty(*C); 2639 Args2.push_back(ExpType); 2640 Args1.push_back(ExpType); 2641 } 2642 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2643 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, 2644 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2645 2646 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2647 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 2648 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2649 2650 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 2651 AccessSizeIndex++) { 2652 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 2653 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2654 M.getOrInsertFunction( 2655 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 2656 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2657 2658 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2659 M.getOrInsertFunction( 2660 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 2661 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2662 } 2663 } 2664 } 2665 2666 const std::string MemIntrinCallbackPrefix = 2667 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 2668 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 2669 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2670 IRB.getInt8PtrTy(), IntptrTy); 2671 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 2672 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2673 IRB.getInt8PtrTy(), IntptrTy); 2674 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 2675 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2676 IRB.getInt32Ty(), IntptrTy); 2677 2678 AsanHandleNoReturnFunc = 2679 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); 2680 2681 AsanPtrCmpFunction = 2682 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); 2683 AsanPtrSubFunction = 2684 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); 2685 if (Mapping.InGlobal) 2686 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", 2687 ArrayType::get(IRB.getInt8Ty(), 0)); 2688 2689 AMDGPUAddressShared = M.getOrInsertFunction( 2690 kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2691 AMDGPUAddressPrivate = M.getOrInsertFunction( 2692 kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2693 } 2694 2695 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 2696 // For each NSObject descendant having a +load method, this method is invoked 2697 // by the ObjC runtime before any of the static constructors is called. 2698 // Therefore we need to instrument such methods with a call to __asan_init 2699 // at the beginning in order to initialize our runtime before any access to 2700 // the shadow memory. 2701 // We cannot just ignore these methods, because they may call other 2702 // instrumented functions. 2703 if (F.getName().find(" load]") != std::string::npos) { 2704 FunctionCallee AsanInitFunction = 2705 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); 2706 IRBuilder<> IRB(&F.front(), F.front().begin()); 2707 IRB.CreateCall(AsanInitFunction, {}); 2708 return true; 2709 } 2710 return false; 2711 } 2712 2713 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 2714 // Generate code only when dynamic addressing is needed. 2715 if (Mapping.Offset != kDynamicShadowSentinel) 2716 return false; 2717 2718 IRBuilder<> IRB(&F.front().front()); 2719 if (Mapping.InGlobal) { 2720 if (ClWithIfuncSuppressRemat) { 2721 // An empty inline asm with input reg == output reg. 2722 // An opaque pointer-to-int cast, basically. 2723 InlineAsm *Asm = InlineAsm::get( 2724 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), 2725 StringRef(""), StringRef("=r,0"), 2726 /*hasSideEffects=*/false); 2727 LocalDynamicShadow = 2728 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); 2729 } else { 2730 LocalDynamicShadow = 2731 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); 2732 } 2733 } else { 2734 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 2735 kAsanShadowMemoryDynamicAddress, IntptrTy); 2736 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 2737 } 2738 return true; 2739 } 2740 2741 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 2742 // Find the one possible call to llvm.localescape and pre-mark allocas passed 2743 // to it as uninteresting. This assumes we haven't started processing allocas 2744 // yet. This check is done up front because iterating the use list in 2745 // isInterestingAlloca would be algorithmically slower. 2746 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 2747 2748 // Try to get the declaration of llvm.localescape. If it's not in the module, 2749 // we can exit early. 2750 if (!F.getParent()->getFunction("llvm.localescape")) return; 2751 2752 // Look for a call to llvm.localescape call in the entry block. It can't be in 2753 // any other block. 2754 for (Instruction &I : F.getEntryBlock()) { 2755 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 2756 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 2757 // We found a call. Mark all the allocas passed in as uninteresting. 2758 for (Value *Arg : II->arg_operands()) { 2759 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 2760 assert(AI && AI->isStaticAlloca() && 2761 "non-static alloca arg to localescape"); 2762 ProcessedAllocas[AI] = false; 2763 } 2764 break; 2765 } 2766 } 2767 } 2768 2769 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { 2770 bool ShouldInstrument = 2771 ClDebugMin < 0 || ClDebugMax < 0 || 2772 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); 2773 Instrumented++; 2774 return !ShouldInstrument; 2775 } 2776 2777 bool AddressSanitizer::instrumentFunction(Function &F, 2778 const TargetLibraryInfo *TLI) { 2779 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 2780 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 2781 if (F.getName().startswith("__asan_")) return false; 2782 2783 bool FunctionModified = false; 2784 2785 // If needed, insert __asan_init before checking for SanitizeAddress attr. 2786 // This function needs to be called even if the function body is not 2787 // instrumented. 2788 if (maybeInsertAsanInitAtFunctionEntry(F)) 2789 FunctionModified = true; 2790 2791 // Leave if the function doesn't need instrumentation. 2792 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 2793 2794 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 2795 2796 initializeCallbacks(*F.getParent()); 2797 2798 FunctionStateRAII CleanupObj(this); 2799 2800 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); 2801 2802 // We can't instrument allocas used with llvm.localescape. Only static allocas 2803 // can be passed to that intrinsic. 2804 markEscapedLocalAllocas(F); 2805 2806 // We want to instrument every address only once per basic block (unless there 2807 // are calls between uses). 2808 SmallPtrSet<Value *, 16> TempsToInstrument; 2809 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 2810 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 2811 SmallVector<Instruction *, 8> NoReturnCalls; 2812 SmallVector<BasicBlock *, 16> AllBlocks; 2813 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 2814 int NumAllocas = 0; 2815 2816 // Fill the set of memory operations to instrument. 2817 for (auto &BB : F) { 2818 AllBlocks.push_back(&BB); 2819 TempsToInstrument.clear(); 2820 int NumInsnsPerBB = 0; 2821 for (auto &Inst : BB) { 2822 if (LooksLikeCodeInBug11395(&Inst)) return false; 2823 SmallVector<InterestingMemoryOperand, 1> InterestingOperands; 2824 getInterestingMemoryOperands(&Inst, InterestingOperands); 2825 2826 if (!InterestingOperands.empty()) { 2827 for (auto &Operand : InterestingOperands) { 2828 if (ClOpt && ClOptSameTemp) { 2829 Value *Ptr = Operand.getPtr(); 2830 // If we have a mask, skip instrumentation if we've already 2831 // instrumented the full object. But don't add to TempsToInstrument 2832 // because we might get another load/store with a different mask. 2833 if (Operand.MaybeMask) { 2834 if (TempsToInstrument.count(Ptr)) 2835 continue; // We've seen this (whole) temp in the current BB. 2836 } else { 2837 if (!TempsToInstrument.insert(Ptr).second) 2838 continue; // We've seen this temp in the current BB. 2839 } 2840 } 2841 OperandsToInstrument.push_back(Operand); 2842 NumInsnsPerBB++; 2843 } 2844 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && 2845 isInterestingPointerComparison(&Inst)) || 2846 ((ClInvalidPointerPairs || ClInvalidPointerSub) && 2847 isInterestingPointerSubtraction(&Inst))) { 2848 PointerComparisonsOrSubtracts.push_back(&Inst); 2849 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { 2850 // ok, take it. 2851 IntrinToInstrument.push_back(MI); 2852 NumInsnsPerBB++; 2853 } else { 2854 if (isa<AllocaInst>(Inst)) NumAllocas++; 2855 if (auto *CB = dyn_cast<CallBase>(&Inst)) { 2856 // A call inside BB. 2857 TempsToInstrument.clear(); 2858 if (CB->doesNotReturn() && !CB->hasMetadata("nosanitize")) 2859 NoReturnCalls.push_back(CB); 2860 } 2861 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 2862 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 2863 } 2864 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 2865 } 2866 } 2867 2868 bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && 2869 OperandsToInstrument.size() + IntrinToInstrument.size() > 2870 (unsigned)ClInstrumentationWithCallsThreshold); 2871 const DataLayout &DL = F.getParent()->getDataLayout(); 2872 ObjectSizeOpts ObjSizeOpts; 2873 ObjSizeOpts.RoundToAlign = true; 2874 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); 2875 2876 // Instrument. 2877 int NumInstrumented = 0; 2878 for (auto &Operand : OperandsToInstrument) { 2879 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2880 instrumentMop(ObjSizeVis, Operand, UseCalls, 2881 F.getParent()->getDataLayout()); 2882 FunctionModified = true; 2883 } 2884 for (auto Inst : IntrinToInstrument) { 2885 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2886 instrumentMemIntrinsic(Inst); 2887 FunctionModified = true; 2888 } 2889 2890 FunctionStackPoisoner FSP(F, *this); 2891 bool ChangedStack = FSP.runOnFunction(); 2892 2893 // We must unpoison the stack before NoReturn calls (throw, _exit, etc). 2894 // See e.g. https://github.com/google/sanitizers/issues/37 2895 for (auto CI : NoReturnCalls) { 2896 IRBuilder<> IRB(CI); 2897 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 2898 } 2899 2900 for (auto Inst : PointerComparisonsOrSubtracts) { 2901 instrumentPointerComparisonOrSubtraction(Inst); 2902 FunctionModified = true; 2903 } 2904 2905 if (ChangedStack || !NoReturnCalls.empty()) 2906 FunctionModified = true; 2907 2908 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2909 << F << "\n"); 2910 2911 return FunctionModified; 2912 } 2913 2914 // Workaround for bug 11395: we don't want to instrument stack in functions 2915 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2916 // FIXME: remove once the bug 11395 is fixed. 2917 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2918 if (LongSize != 32) return false; 2919 CallInst *CI = dyn_cast<CallInst>(I); 2920 if (!CI || !CI->isInlineAsm()) return false; 2921 if (CI->getNumArgOperands() <= 5) return false; 2922 // We have inline assembly with quite a few arguments. 2923 return true; 2924 } 2925 2926 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2927 IRBuilder<> IRB(*C); 2928 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always || 2929 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 2930 const char *MallocNameTemplate = 2931 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always 2932 ? kAsanStackMallocAlwaysNameTemplate 2933 : kAsanStackMallocNameTemplate; 2934 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) { 2935 std::string Suffix = itostr(Index); 2936 AsanStackMallocFunc[Index] = M.getOrInsertFunction( 2937 MallocNameTemplate + Suffix, IntptrTy, IntptrTy); 2938 AsanStackFreeFunc[Index] = 2939 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2940 IRB.getVoidTy(), IntptrTy, IntptrTy); 2941 } 2942 } 2943 if (ASan.UseAfterScope) { 2944 AsanPoisonStackMemoryFunc = M.getOrInsertFunction( 2945 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2946 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( 2947 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2948 } 2949 2950 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2951 std::ostringstream Name; 2952 Name << kAsanSetShadowPrefix; 2953 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2954 AsanSetShadowFunc[Val] = 2955 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); 2956 } 2957 2958 AsanAllocaPoisonFunc = M.getOrInsertFunction( 2959 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2960 AsanAllocasUnpoisonFunc = M.getOrInsertFunction( 2961 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2962 } 2963 2964 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2965 ArrayRef<uint8_t> ShadowBytes, 2966 size_t Begin, size_t End, 2967 IRBuilder<> &IRB, 2968 Value *ShadowBase) { 2969 if (Begin >= End) 2970 return; 2971 2972 const size_t LargestStoreSizeInBytes = 2973 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 2974 2975 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 2976 2977 // Poison given range in shadow using larges store size with out leading and 2978 // trailing zeros in ShadowMask. Zeros never change, so they need neither 2979 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 2980 // middle of a store. 2981 for (size_t i = Begin; i < End;) { 2982 if (!ShadowMask[i]) { 2983 assert(!ShadowBytes[i]); 2984 ++i; 2985 continue; 2986 } 2987 2988 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 2989 // Fit store size into the range. 2990 while (StoreSizeInBytes > End - i) 2991 StoreSizeInBytes /= 2; 2992 2993 // Minimize store size by trimming trailing zeros. 2994 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 2995 while (j <= StoreSizeInBytes / 2) 2996 StoreSizeInBytes /= 2; 2997 } 2998 2999 uint64_t Val = 0; 3000 for (size_t j = 0; j < StoreSizeInBytes; j++) { 3001 if (IsLittleEndian) 3002 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 3003 else 3004 Val = (Val << 8) | ShadowBytes[i + j]; 3005 } 3006 3007 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 3008 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 3009 IRB.CreateAlignedStore( 3010 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 3011 Align(1)); 3012 3013 i += StoreSizeInBytes; 3014 } 3015 } 3016 3017 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 3018 ArrayRef<uint8_t> ShadowBytes, 3019 IRBuilder<> &IRB, Value *ShadowBase) { 3020 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 3021 } 3022 3023 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 3024 ArrayRef<uint8_t> ShadowBytes, 3025 size_t Begin, size_t End, 3026 IRBuilder<> &IRB, Value *ShadowBase) { 3027 assert(ShadowMask.size() == ShadowBytes.size()); 3028 size_t Done = Begin; 3029 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 3030 if (!ShadowMask[i]) { 3031 assert(!ShadowBytes[i]); 3032 continue; 3033 } 3034 uint8_t Val = ShadowBytes[i]; 3035 if (!AsanSetShadowFunc[Val]) 3036 continue; 3037 3038 // Skip same values. 3039 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 3040 } 3041 3042 if (j - i >= ClMaxInlinePoisoningSize) { 3043 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 3044 IRB.CreateCall(AsanSetShadowFunc[Val], 3045 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 3046 ConstantInt::get(IntptrTy, j - i)}); 3047 Done = j; 3048 } 3049 } 3050 3051 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 3052 } 3053 3054 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 3055 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 3056 static int StackMallocSizeClass(uint64_t LocalStackSize) { 3057 assert(LocalStackSize <= kMaxStackMallocSize); 3058 uint64_t MaxSize = kMinStackMallocSize; 3059 for (int i = 0;; i++, MaxSize *= 2) 3060 if (LocalStackSize <= MaxSize) return i; 3061 llvm_unreachable("impossible LocalStackSize"); 3062 } 3063 3064 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { 3065 Instruction *CopyInsertPoint = &F.front().front(); 3066 if (CopyInsertPoint == ASan.LocalDynamicShadow) { 3067 // Insert after the dynamic shadow location is determined 3068 CopyInsertPoint = CopyInsertPoint->getNextNode(); 3069 assert(CopyInsertPoint); 3070 } 3071 IRBuilder<> IRB(CopyInsertPoint); 3072 const DataLayout &DL = F.getParent()->getDataLayout(); 3073 for (Argument &Arg : F.args()) { 3074 if (Arg.hasByValAttr()) { 3075 Type *Ty = Arg.getParamByValType(); 3076 const Align Alignment = 3077 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); 3078 3079 AllocaInst *AI = IRB.CreateAlloca( 3080 Ty, nullptr, 3081 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + 3082 ".byval"); 3083 AI->setAlignment(Alignment); 3084 Arg.replaceAllUsesWith(AI); 3085 3086 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 3087 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); 3088 } 3089 } 3090 } 3091 3092 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 3093 Value *ValueIfTrue, 3094 Instruction *ThenTerm, 3095 Value *ValueIfFalse) { 3096 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 3097 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 3098 PHI->addIncoming(ValueIfFalse, CondBlock); 3099 BasicBlock *ThenBlock = ThenTerm->getParent(); 3100 PHI->addIncoming(ValueIfTrue, ThenBlock); 3101 return PHI; 3102 } 3103 3104 Value *FunctionStackPoisoner::createAllocaForLayout( 3105 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 3106 AllocaInst *Alloca; 3107 if (Dynamic) { 3108 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 3109 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 3110 "MyAlloca"); 3111 } else { 3112 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 3113 nullptr, "MyAlloca"); 3114 assert(Alloca->isStaticAlloca()); 3115 } 3116 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 3117 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 3118 Alloca->setAlignment(Align(FrameAlignment)); 3119 return IRB.CreatePointerCast(Alloca, IntptrTy); 3120 } 3121 3122 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 3123 BasicBlock &FirstBB = *F.begin(); 3124 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 3125 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 3126 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 3127 DynamicAllocaLayout->setAlignment(Align(32)); 3128 } 3129 3130 void FunctionStackPoisoner::processDynamicAllocas() { 3131 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 3132 assert(DynamicAllocaPoisonCallVec.empty()); 3133 return; 3134 } 3135 3136 // Insert poison calls for lifetime intrinsics for dynamic allocas. 3137 for (const auto &APC : DynamicAllocaPoisonCallVec) { 3138 assert(APC.InsBefore); 3139 assert(APC.AI); 3140 assert(ASan.isInterestingAlloca(*APC.AI)); 3141 assert(!APC.AI->isStaticAlloca()); 3142 3143 IRBuilder<> IRB(APC.InsBefore); 3144 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 3145 // Dynamic allocas will be unpoisoned unconditionally below in 3146 // unpoisonDynamicAllocas. 3147 // Flag that we need unpoison static allocas. 3148 } 3149 3150 // Handle dynamic allocas. 3151 createDynamicAllocasInitStorage(); 3152 for (auto &AI : DynamicAllocaVec) 3153 handleDynamicAllocaCall(AI); 3154 unpoisonDynamicAllocas(); 3155 } 3156 3157 /// Collect instructions in the entry block after \p InsBefore which initialize 3158 /// permanent storage for a function argument. These instructions must remain in 3159 /// the entry block so that uninitialized values do not appear in backtraces. An 3160 /// added benefit is that this conserves spill slots. This does not move stores 3161 /// before instrumented / "interesting" allocas. 3162 static void findStoresToUninstrumentedArgAllocas( 3163 AddressSanitizer &ASan, Instruction &InsBefore, 3164 SmallVectorImpl<Instruction *> &InitInsts) { 3165 Instruction *Start = InsBefore.getNextNonDebugInstruction(); 3166 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { 3167 // Argument initialization looks like: 3168 // 1) store <Argument>, <Alloca> OR 3169 // 2) <CastArgument> = cast <Argument> to ... 3170 // store <CastArgument> to <Alloca> 3171 // Do not consider any other kind of instruction. 3172 // 3173 // Note: This covers all known cases, but may not be exhaustive. An 3174 // alternative to pattern-matching stores is to DFS over all Argument uses: 3175 // this might be more general, but is probably much more complicated. 3176 if (isa<AllocaInst>(It) || isa<CastInst>(It)) 3177 continue; 3178 if (auto *Store = dyn_cast<StoreInst>(It)) { 3179 // The store destination must be an alloca that isn't interesting for 3180 // ASan to instrument. These are moved up before InsBefore, and they're 3181 // not interesting because allocas for arguments can be mem2reg'd. 3182 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); 3183 if (!Alloca || ASan.isInterestingAlloca(*Alloca)) 3184 continue; 3185 3186 Value *Val = Store->getValueOperand(); 3187 bool IsDirectArgInit = isa<Argument>(Val); 3188 bool IsArgInitViaCast = 3189 isa<CastInst>(Val) && 3190 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && 3191 // Check that the cast appears directly before the store. Otherwise 3192 // moving the cast before InsBefore may break the IR. 3193 Val == It->getPrevNonDebugInstruction(); 3194 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; 3195 if (!IsArgInit) 3196 continue; 3197 3198 if (IsArgInitViaCast) 3199 InitInsts.push_back(cast<Instruction>(Val)); 3200 InitInsts.push_back(Store); 3201 continue; 3202 } 3203 3204 // Do not reorder past unknown instructions: argument initialization should 3205 // only involve casts and stores. 3206 return; 3207 } 3208 } 3209 3210 void FunctionStackPoisoner::processStaticAllocas() { 3211 if (AllocaVec.empty()) { 3212 assert(StaticAllocaPoisonCallVec.empty()); 3213 return; 3214 } 3215 3216 int StackMallocIdx = -1; 3217 DebugLoc EntryDebugLocation; 3218 if (auto SP = F.getSubprogram()) 3219 EntryDebugLocation = 3220 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); 3221 3222 Instruction *InsBefore = AllocaVec[0]; 3223 IRBuilder<> IRB(InsBefore); 3224 3225 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 3226 // debug info is broken, because only entry-block allocas are treated as 3227 // regular stack slots. 3228 auto InsBeforeB = InsBefore->getParent(); 3229 assert(InsBeforeB == &F.getEntryBlock()); 3230 for (auto *AI : StaticAllocasToMoveUp) 3231 if (AI->getParent() == InsBeforeB) 3232 AI->moveBefore(InsBefore); 3233 3234 // Move stores of arguments into entry-block allocas as well. This prevents 3235 // extra stack slots from being generated (to house the argument values until 3236 // they can be stored into the allocas). This also prevents uninitialized 3237 // values from being shown in backtraces. 3238 SmallVector<Instruction *, 8> ArgInitInsts; 3239 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); 3240 for (Instruction *ArgInitInst : ArgInitInsts) 3241 ArgInitInst->moveBefore(InsBefore); 3242 3243 // If we have a call to llvm.localescape, keep it in the entry block. 3244 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 3245 3246 SmallVector<ASanStackVariableDescription, 16> SVD; 3247 SVD.reserve(AllocaVec.size()); 3248 for (AllocaInst *AI : AllocaVec) { 3249 ASanStackVariableDescription D = {AI->getName().data(), 3250 ASan.getAllocaSizeInBytes(*AI), 3251 0, 3252 AI->getAlignment(), 3253 AI, 3254 0, 3255 0}; 3256 SVD.push_back(D); 3257 } 3258 3259 // Minimal header size (left redzone) is 4 pointers, 3260 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 3261 size_t Granularity = 1ULL << Mapping.Scale; 3262 size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity); 3263 const ASanStackFrameLayout &L = 3264 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); 3265 3266 // Build AllocaToSVDMap for ASanStackVariableDescription lookup. 3267 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; 3268 for (auto &Desc : SVD) 3269 AllocaToSVDMap[Desc.AI] = &Desc; 3270 3271 // Update SVD with information from lifetime intrinsics. 3272 for (const auto &APC : StaticAllocaPoisonCallVec) { 3273 assert(APC.InsBefore); 3274 assert(APC.AI); 3275 assert(ASan.isInterestingAlloca(*APC.AI)); 3276 assert(APC.AI->isStaticAlloca()); 3277 3278 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3279 Desc.LifetimeSize = Desc.Size; 3280 if (const DILocation *FnLoc = EntryDebugLocation.get()) { 3281 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { 3282 if (LifetimeLoc->getFile() == FnLoc->getFile()) 3283 if (unsigned Line = LifetimeLoc->getLine()) 3284 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); 3285 } 3286 } 3287 } 3288 3289 auto DescriptionString = ComputeASanStackFrameDescription(SVD); 3290 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); 3291 uint64_t LocalStackSize = L.FrameSize; 3292 bool DoStackMalloc = 3293 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never && 3294 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; 3295 bool DoDynamicAlloca = ClDynamicAllocaStack; 3296 // Don't do dynamic alloca or stack malloc if: 3297 // 1) There is inline asm: too often it makes assumptions on which registers 3298 // are available. 3299 // 2) There is a returns_twice call (typically setjmp), which is 3300 // optimization-hostile, and doesn't play well with introduced indirect 3301 // register-relative calculation of local variable addresses. 3302 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; 3303 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; 3304 3305 Value *StaticAlloca = 3306 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 3307 3308 Value *FakeStack; 3309 Value *LocalStackBase; 3310 Value *LocalStackBaseAlloca; 3311 uint8_t DIExprFlags = DIExpression::ApplyOffset; 3312 3313 if (DoStackMalloc) { 3314 LocalStackBaseAlloca = 3315 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); 3316 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 3317 // void *FakeStack = __asan_option_detect_stack_use_after_return 3318 // ? __asan_stack_malloc_N(LocalStackSize) 3319 // : nullptr; 3320 // void *LocalStackBase = (FakeStack) ? FakeStack : 3321 // alloca(LocalStackSize); 3322 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 3323 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 3324 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( 3325 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), 3326 Constant::getNullValue(IRB.getInt32Ty())); 3327 Instruction *Term = 3328 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 3329 IRBuilder<> IRBIf(Term); 3330 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3331 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 3332 Value *FakeStackValue = 3333 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3334 ConstantInt::get(IntptrTy, LocalStackSize)); 3335 IRB.SetInsertPoint(InsBefore); 3336 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 3337 ConstantInt::get(IntptrTy, 0)); 3338 } else { 3339 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always) 3340 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize); 3341 // void *LocalStackBase = (FakeStack) ? FakeStack : 3342 // alloca(LocalStackSize); 3343 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3344 FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3345 ConstantInt::get(IntptrTy, LocalStackSize)); 3346 } 3347 Value *NoFakeStack = 3348 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 3349 Instruction *Term = 3350 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 3351 IRBuilder<> IRBIf(Term); 3352 Value *AllocaValue = 3353 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 3354 3355 IRB.SetInsertPoint(InsBefore); 3356 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 3357 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); 3358 DIExprFlags |= DIExpression::DerefBefore; 3359 } else { 3360 // void *FakeStack = nullptr; 3361 // void *LocalStackBase = alloca(LocalStackSize); 3362 FakeStack = ConstantInt::get(IntptrTy, 0); 3363 LocalStackBase = 3364 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 3365 LocalStackBaseAlloca = LocalStackBase; 3366 } 3367 3368 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the 3369 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse 3370 // later passes and can result in dropped variable coverage in debug info. 3371 Value *LocalStackBaseAllocaPtr = 3372 isa<PtrToIntInst>(LocalStackBaseAlloca) 3373 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() 3374 : LocalStackBaseAlloca; 3375 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) && 3376 "Variable descriptions relative to ASan stack base will be dropped"); 3377 3378 // Replace Alloca instructions with base+offset. 3379 for (const auto &Desc : SVD) { 3380 AllocaInst *AI = Desc.AI; 3381 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, 3382 Desc.Offset); 3383 Value *NewAllocaPtr = IRB.CreateIntToPtr( 3384 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 3385 AI->getType()); 3386 AI->replaceAllUsesWith(NewAllocaPtr); 3387 } 3388 3389 // The left-most redzone has enough space for at least 4 pointers. 3390 // Write the Magic value to redzone[0]. 3391 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 3392 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 3393 BasePlus0); 3394 // Write the frame description constant to redzone[1]. 3395 Value *BasePlus1 = IRB.CreateIntToPtr( 3396 IRB.CreateAdd(LocalStackBase, 3397 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 3398 IntptrPtrTy); 3399 GlobalVariable *StackDescriptionGlobal = 3400 createPrivateGlobalForString(*F.getParent(), DescriptionString, 3401 /*AllowMerging*/ true, kAsanGenPrefix); 3402 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 3403 IRB.CreateStore(Description, BasePlus1); 3404 // Write the PC to redzone[2]. 3405 Value *BasePlus2 = IRB.CreateIntToPtr( 3406 IRB.CreateAdd(LocalStackBase, 3407 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 3408 IntptrPtrTy); 3409 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 3410 3411 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 3412 3413 // Poison the stack red zones at the entry. 3414 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 3415 // As mask we must use most poisoned case: red zones and after scope. 3416 // As bytes we can use either the same or just red zones only. 3417 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 3418 3419 if (!StaticAllocaPoisonCallVec.empty()) { 3420 const auto &ShadowInScope = GetShadowBytes(SVD, L); 3421 3422 // Poison static allocas near lifetime intrinsics. 3423 for (const auto &APC : StaticAllocaPoisonCallVec) { 3424 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3425 assert(Desc.Offset % L.Granularity == 0); 3426 size_t Begin = Desc.Offset / L.Granularity; 3427 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 3428 3429 IRBuilder<> IRB(APC.InsBefore); 3430 copyToShadow(ShadowAfterScope, 3431 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 3432 IRB, ShadowBase); 3433 } 3434 } 3435 3436 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 3437 SmallVector<uint8_t, 64> ShadowAfterReturn; 3438 3439 // (Un)poison the stack before all ret instructions. 3440 for (Instruction *Ret : RetVec) { 3441 IRBuilder<> IRBRet(Ret); 3442 // Mark the current frame as retired. 3443 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 3444 BasePlus0); 3445 if (DoStackMalloc) { 3446 assert(StackMallocIdx >= 0); 3447 // if FakeStack != 0 // LocalStackBase == FakeStack 3448 // // In use-after-return mode, poison the whole stack frame. 3449 // if StackMallocIdx <= 4 3450 // // For small sizes inline the whole thing: 3451 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 3452 // **SavedFlagPtr(FakeStack) = 0 3453 // else 3454 // __asan_stack_free_N(FakeStack, LocalStackSize) 3455 // else 3456 // <This is not a fake stack; unpoison the redzones> 3457 Value *Cmp = 3458 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 3459 Instruction *ThenTerm, *ElseTerm; 3460 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 3461 3462 IRBuilder<> IRBPoison(ThenTerm); 3463 if (StackMallocIdx <= 4) { 3464 int ClassSize = kMinStackMallocSize << StackMallocIdx; 3465 ShadowAfterReturn.resize(ClassSize / L.Granularity, 3466 kAsanStackUseAfterReturnMagic); 3467 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 3468 ShadowBase); 3469 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 3470 FakeStack, 3471 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 3472 Value *SavedFlagPtr = IRBPoison.CreateLoad( 3473 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 3474 IRBPoison.CreateStore( 3475 Constant::getNullValue(IRBPoison.getInt8Ty()), 3476 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 3477 } else { 3478 // For larger frames call __asan_stack_free_*. 3479 IRBPoison.CreateCall( 3480 AsanStackFreeFunc[StackMallocIdx], 3481 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 3482 } 3483 3484 IRBuilder<> IRBElse(ElseTerm); 3485 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); 3486 } else { 3487 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); 3488 } 3489 } 3490 3491 // We are done. Remove the old unused alloca instructions. 3492 for (auto AI : AllocaVec) AI->eraseFromParent(); 3493 } 3494 3495 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 3496 IRBuilder<> &IRB, bool DoPoison) { 3497 // For now just insert the call to ASan runtime. 3498 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 3499 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 3500 IRB.CreateCall( 3501 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 3502 {AddrArg, SizeArg}); 3503 } 3504 3505 // Handling llvm.lifetime intrinsics for a given %alloca: 3506 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 3507 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 3508 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 3509 // could be poisoned by previous llvm.lifetime.end instruction, as the 3510 // variable may go in and out of scope several times, e.g. in loops). 3511 // (3) if we poisoned at least one %alloca in a function, 3512 // unpoison the whole stack frame at function exit. 3513 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 3514 IRBuilder<> IRB(AI); 3515 3516 const unsigned Alignment = std::max(kAllocaRzSize, AI->getAlignment()); 3517 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 3518 3519 Value *Zero = Constant::getNullValue(IntptrTy); 3520 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 3521 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 3522 3523 // Since we need to extend alloca with additional memory to locate 3524 // redzones, and OldSize is number of allocated blocks with 3525 // ElementSize size, get allocated memory size in bytes by 3526 // OldSize * ElementSize. 3527 const unsigned ElementSize = 3528 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 3529 Value *OldSize = 3530 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 3531 ConstantInt::get(IntptrTy, ElementSize)); 3532 3533 // PartialSize = OldSize % 32 3534 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 3535 3536 // Misalign = kAllocaRzSize - PartialSize; 3537 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 3538 3539 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 3540 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 3541 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 3542 3543 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize 3544 // Alignment is added to locate left redzone, PartialPadding for possible 3545 // partial redzone and kAllocaRzSize for right redzone respectively. 3546 Value *AdditionalChunkSize = IRB.CreateAdd( 3547 ConstantInt::get(IntptrTy, Alignment + kAllocaRzSize), PartialPadding); 3548 3549 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 3550 3551 // Insert new alloca with new NewSize and Alignment params. 3552 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 3553 NewAlloca->setAlignment(Align(Alignment)); 3554 3555 // NewAddress = Address + Alignment 3556 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 3557 ConstantInt::get(IntptrTy, Alignment)); 3558 3559 // Insert __asan_alloca_poison call for new created alloca. 3560 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 3561 3562 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 3563 // for unpoisoning stuff. 3564 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 3565 3566 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 3567 3568 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 3569 AI->replaceAllUsesWith(NewAddressPtr); 3570 3571 // We are done. Erase old alloca from parent. 3572 AI->eraseFromParent(); 3573 } 3574 3575 // isSafeAccess returns true if Addr is always inbounds with respect to its 3576 // base object. For example, it is a field access or an array access with 3577 // constant inbounds index. 3578 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 3579 Value *Addr, uint64_t TypeSize) const { 3580 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 3581 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 3582 uint64_t Size = SizeOffset.first.getZExtValue(); 3583 int64_t Offset = SizeOffset.second.getSExtValue(); 3584 // Three checks are required to ensure safety: 3585 // . Offset >= 0 (since the offset is given from the base ptr) 3586 // . Size >= Offset (unsigned) 3587 // . Size - Offset >= NeededSize (unsigned) 3588 return Offset >= 0 && Size >= uint64_t(Offset) && 3589 Size - uint64_t(Offset) >= TypeSize / 8; 3590 } 3591