1 //===- AddressSanitizer.cpp - memory error detector -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // Details of the algorithm: 11 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm 12 // 13 // FIXME: This sanitizer does not yet handle scalable vectors 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/BinaryFormat/MachO.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/Comdat.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DIBuilder.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugInfoMetadata.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GlobalAlias.h" 46 #include "llvm/IR/GlobalValue.h" 47 #include "llvm/IR/GlobalVariable.h" 48 #include "llvm/IR/IRBuilder.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/InstVisitor.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/MDBuilder.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/Module.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/Use.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/MC/MCSectionMachO.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/MathExtras.h" 71 #include "llvm/Support/ScopedPrinter.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Instrumentation.h" 74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 75 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" 76 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 77 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 78 #include "llvm/Transforms/Utils/Local.h" 79 #include "llvm/Transforms/Utils/ModuleUtils.h" 80 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 81 #include <algorithm> 82 #include <cassert> 83 #include <cstddef> 84 #include <cstdint> 85 #include <iomanip> 86 #include <limits> 87 #include <memory> 88 #include <sstream> 89 #include <string> 90 #include <tuple> 91 92 using namespace llvm; 93 94 #define DEBUG_TYPE "asan" 95 96 static const uint64_t kDefaultShadowScale = 3; 97 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 98 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 99 static const uint64_t kDynamicShadowSentinel = 100 std::numeric_limits<uint64_t>::max(); 101 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. 102 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; 103 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 104 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; 105 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 106 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 107 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 108 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 109 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000; 110 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 111 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 112 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000; 113 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; 114 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; 115 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; 116 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40; 117 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 118 static const uint64_t kEmscriptenShadowOffset = 0; 119 120 static const uint64_t kMyriadShadowScale = 5; 121 static const uint64_t kMyriadMemoryOffset32 = 0x80000000ULL; 122 static const uint64_t kMyriadMemorySize32 = 0x20000000ULL; 123 static const uint64_t kMyriadTagShift = 29; 124 static const uint64_t kMyriadDDRTag = 4; 125 static const uint64_t kMyriadCacheBitMask32 = 0x40000000ULL; 126 127 // The shadow memory space is dynamically allocated. 128 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 129 130 static const size_t kMinStackMallocSize = 1 << 6; // 64B 131 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 132 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 133 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 134 135 const char kAsanModuleCtorName[] = "asan.module_ctor"; 136 const char kAsanModuleDtorName[] = "asan.module_dtor"; 137 static const uint64_t kAsanCtorAndDtorPriority = 1; 138 // On Emscripten, the system needs more than one priorities for constructors. 139 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; 140 const char kAsanReportErrorTemplate[] = "__asan_report_"; 141 const char kAsanRegisterGlobalsName[] = "__asan_register_globals"; 142 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals"; 143 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals"; 144 const char kAsanUnregisterImageGlobalsName[] = 145 "__asan_unregister_image_globals"; 146 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals"; 147 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals"; 148 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init"; 149 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init"; 150 const char kAsanInitName[] = "__asan_init"; 151 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v"; 152 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp"; 153 const char kAsanPtrSub[] = "__sanitizer_ptr_sub"; 154 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; 155 static const int kMaxAsanStackMallocSizeClass = 10; 156 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; 157 const char kAsanStackMallocAlwaysNameTemplate[] = 158 "__asan_stack_malloc_always_"; 159 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; 160 const char kAsanGenPrefix[] = "___asan_gen_"; 161 const char kODRGenPrefix[] = "__odr_asan_gen_"; 162 const char kSanCovGenPrefix[] = "__sancov_gen_"; 163 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_"; 164 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory"; 165 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory"; 166 167 // ASan version script has __asan_* wildcard. Triple underscore prevents a 168 // linker (gold) warning about attempting to export a local symbol. 169 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered"; 170 171 const char kAsanOptionDetectUseAfterReturn[] = 172 "__asan_option_detect_stack_use_after_return"; 173 174 const char kAsanShadowMemoryDynamicAddress[] = 175 "__asan_shadow_memory_dynamic_address"; 176 177 const char kAsanAllocaPoison[] = "__asan_alloca_poison"; 178 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison"; 179 180 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared"; 181 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private"; 182 183 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 184 static const size_t kNumberOfAccessSizes = 5; 185 186 static const unsigned kAllocaRzSize = 32; 187 188 // Command-line flags. 189 190 static cl::opt<bool> ClEnableKasan( 191 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 192 cl::Hidden, cl::init(false)); 193 194 static cl::opt<bool> ClRecover( 195 "asan-recover", 196 cl::desc("Enable recovery mode (continue-after-error)."), 197 cl::Hidden, cl::init(false)); 198 199 static cl::opt<bool> ClInsertVersionCheck( 200 "asan-guard-against-version-mismatch", 201 cl::desc("Guard against compiler/runtime version mismatch."), 202 cl::Hidden, cl::init(true)); 203 204 // This flag may need to be replaced with -f[no-]asan-reads. 205 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 206 cl::desc("instrument read instructions"), 207 cl::Hidden, cl::init(true)); 208 209 static cl::opt<bool> ClInstrumentWrites( 210 "asan-instrument-writes", cl::desc("instrument write instructions"), 211 cl::Hidden, cl::init(true)); 212 213 static cl::opt<bool> ClInstrumentAtomics( 214 "asan-instrument-atomics", 215 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 216 cl::init(true)); 217 218 static cl::opt<bool> 219 ClInstrumentByval("asan-instrument-byval", 220 cl::desc("instrument byval call arguments"), cl::Hidden, 221 cl::init(true)); 222 223 static cl::opt<bool> ClAlwaysSlowPath( 224 "asan-always-slow-path", 225 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 226 cl::init(false)); 227 228 static cl::opt<bool> ClForceDynamicShadow( 229 "asan-force-dynamic-shadow", 230 cl::desc("Load shadow address into a local variable for each function"), 231 cl::Hidden, cl::init(false)); 232 233 static cl::opt<bool> 234 ClWithIfunc("asan-with-ifunc", 235 cl::desc("Access dynamic shadow through an ifunc global on " 236 "platforms that support this"), 237 cl::Hidden, cl::init(true)); 238 239 static cl::opt<bool> ClWithIfuncSuppressRemat( 240 "asan-with-ifunc-suppress-remat", 241 cl::desc("Suppress rematerialization of dynamic shadow address by passing " 242 "it through inline asm in prologue."), 243 cl::Hidden, cl::init(true)); 244 245 // This flag limits the number of instructions to be instrumented 246 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 247 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 248 // set it to 10000. 249 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 250 "asan-max-ins-per-bb", cl::init(10000), 251 cl::desc("maximal number of instructions to instrument in any given BB"), 252 cl::Hidden); 253 254 // This flag may need to be replaced with -f[no]asan-stack. 255 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 256 cl::Hidden, cl::init(true)); 257 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 258 "asan-max-inline-poisoning-size", 259 cl::desc( 260 "Inline shadow poisoning for blocks up to the given size in bytes."), 261 cl::Hidden, cl::init(64)); 262 263 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn( 264 "asan-use-after-return", 265 cl::desc("Sets the mode of detection for stack-use-after-return."), 266 cl::values( 267 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", 268 "Never detect stack use after return."), 269 clEnumValN( 270 AsanDetectStackUseAfterReturnMode::Runtime, "runtime", 271 "Detect stack use after return if " 272 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), 273 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", 274 "Always detect stack use after return.")), 275 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime)); 276 277 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", 278 cl::desc("Create redzones for byval " 279 "arguments (extra copy " 280 "required)"), cl::Hidden, 281 cl::init(true)); 282 283 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 284 cl::desc("Check stack-use-after-scope"), 285 cl::Hidden, cl::init(false)); 286 287 // This flag may need to be replaced with -f[no]asan-globals. 288 static cl::opt<bool> ClGlobals("asan-globals", 289 cl::desc("Handle global objects"), cl::Hidden, 290 cl::init(true)); 291 292 static cl::opt<bool> ClInitializers("asan-initialization-order", 293 cl::desc("Handle C++ initializer order"), 294 cl::Hidden, cl::init(true)); 295 296 static cl::opt<bool> ClInvalidPointerPairs( 297 "asan-detect-invalid-pointer-pair", 298 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 299 cl::init(false)); 300 301 static cl::opt<bool> ClInvalidPointerCmp( 302 "asan-detect-invalid-pointer-cmp", 303 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, 304 cl::init(false)); 305 306 static cl::opt<bool> ClInvalidPointerSub( 307 "asan-detect-invalid-pointer-sub", 308 cl::desc("Instrument - operations with pointer operands"), cl::Hidden, 309 cl::init(false)); 310 311 static cl::opt<unsigned> ClRealignStack( 312 "asan-realign-stack", 313 cl::desc("Realign stack to the value of this flag (power of two)"), 314 cl::Hidden, cl::init(32)); 315 316 static cl::opt<int> ClInstrumentationWithCallsThreshold( 317 "asan-instrumentation-with-call-threshold", 318 cl::desc( 319 "If the function being instrumented contains more than " 320 "this number of memory accesses, use callbacks instead of " 321 "inline checks (-1 means never use callbacks)."), 322 cl::Hidden, cl::init(7000)); 323 324 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 325 "asan-memory-access-callback-prefix", 326 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 327 cl::init("__asan_")); 328 329 static cl::opt<bool> 330 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 331 cl::desc("instrument dynamic allocas"), 332 cl::Hidden, cl::init(true)); 333 334 static cl::opt<bool> ClSkipPromotableAllocas( 335 "asan-skip-promotable-allocas", 336 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 337 cl::init(true)); 338 339 // These flags allow to change the shadow mapping. 340 // The shadow mapping looks like 341 // Shadow = (Mem >> scale) + offset 342 343 static cl::opt<int> ClMappingScale("asan-mapping-scale", 344 cl::desc("scale of asan shadow mapping"), 345 cl::Hidden, cl::init(0)); 346 347 static cl::opt<uint64_t> 348 ClMappingOffset("asan-mapping-offset", 349 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), 350 cl::Hidden, cl::init(0)); 351 352 // Optimization flags. Not user visible, used mostly for testing 353 // and benchmarking the tool. 354 355 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 356 cl::Hidden, cl::init(true)); 357 358 static cl::opt<bool> ClOptSameTemp( 359 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 360 cl::Hidden, cl::init(true)); 361 362 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 363 cl::desc("Don't instrument scalar globals"), 364 cl::Hidden, cl::init(true)); 365 366 static cl::opt<bool> ClOptStack( 367 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 368 cl::Hidden, cl::init(false)); 369 370 static cl::opt<bool> ClDynamicAllocaStack( 371 "asan-stack-dynamic-alloca", 372 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 373 cl::init(true)); 374 375 static cl::opt<uint32_t> ClForceExperiment( 376 "asan-force-experiment", 377 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 378 cl::init(0)); 379 380 static cl::opt<bool> 381 ClUsePrivateAlias("asan-use-private-alias", 382 cl::desc("Use private aliases for global variables"), 383 cl::Hidden, cl::init(false)); 384 385 static cl::opt<bool> 386 ClUseOdrIndicator("asan-use-odr-indicator", 387 cl::desc("Use odr indicators to improve ODR reporting"), 388 cl::Hidden, cl::init(false)); 389 390 static cl::opt<bool> 391 ClUseGlobalsGC("asan-globals-live-support", 392 cl::desc("Use linker features to support dead " 393 "code stripping of globals"), 394 cl::Hidden, cl::init(true)); 395 396 // This is on by default even though there is a bug in gold: 397 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 398 static cl::opt<bool> 399 ClWithComdat("asan-with-comdat", 400 cl::desc("Place ASan constructors in comdat sections"), 401 cl::Hidden, cl::init(true)); 402 403 static cl::opt<AsanDtorKind> ClOverrideDestructorKind( 404 "asan-destructor-kind", 405 cl::desc("Sets the ASan destructor kind. The default is to use the value " 406 "provided to the pass constructor"), 407 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), 408 clEnumValN(AsanDtorKind::Global, "global", 409 "Use global destructors")), 410 cl::init(AsanDtorKind::Invalid), cl::Hidden); 411 412 // Debug flags. 413 414 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 415 cl::init(0)); 416 417 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 418 cl::Hidden, cl::init(0)); 419 420 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 421 cl::desc("Debug func")); 422 423 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 424 cl::Hidden, cl::init(-1)); 425 426 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 427 cl::Hidden, cl::init(-1)); 428 429 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 430 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 431 STATISTIC(NumOptimizedAccessesToGlobalVar, 432 "Number of optimized accesses to global vars"); 433 STATISTIC(NumOptimizedAccessesToStackVar, 434 "Number of optimized accesses to stack vars"); 435 436 namespace { 437 438 /// This struct defines the shadow mapping using the rule: 439 /// shadow = (mem >> Scale) ADD-or-OR Offset. 440 /// If InGlobal is true, then 441 /// extern char __asan_shadow[]; 442 /// shadow = (mem >> Scale) + &__asan_shadow 443 struct ShadowMapping { 444 int Scale; 445 uint64_t Offset; 446 bool OrShadowOffset; 447 bool InGlobal; 448 }; 449 450 } // end anonymous namespace 451 452 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, 453 bool IsKasan) { 454 bool IsAndroid = TargetTriple.isAndroid(); 455 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS(); 456 bool IsMacOS = TargetTriple.isMacOSX(); 457 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 458 bool IsNetBSD = TargetTriple.isOSNetBSD(); 459 bool IsPS4CPU = TargetTriple.isPS4CPU(); 460 bool IsLinux = TargetTriple.isOSLinux(); 461 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || 462 TargetTriple.getArch() == Triple::ppc64le; 463 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; 464 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 465 bool IsMIPS32 = TargetTriple.isMIPS32(); 466 bool IsMIPS64 = TargetTriple.isMIPS64(); 467 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); 468 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; 469 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; 470 bool IsWindows = TargetTriple.isOSWindows(); 471 bool IsFuchsia = TargetTriple.isOSFuchsia(); 472 bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad; 473 bool IsEmscripten = TargetTriple.isOSEmscripten(); 474 bool IsAMDGPU = TargetTriple.isAMDGPU(); 475 476 // Asan support for AMDGPU assumes X86 as the host right now. 477 if (IsAMDGPU) 478 IsX86_64 = true; 479 480 ShadowMapping Mapping; 481 482 Mapping.Scale = IsMyriad ? kMyriadShadowScale : kDefaultShadowScale; 483 if (ClMappingScale.getNumOccurrences() > 0) { 484 Mapping.Scale = ClMappingScale; 485 } 486 487 if (LongSize == 32) { 488 if (IsAndroid) 489 Mapping.Offset = kDynamicShadowSentinel; 490 else if (IsMIPS32) 491 Mapping.Offset = kMIPS32_ShadowOffset32; 492 else if (IsFreeBSD) 493 Mapping.Offset = kFreeBSD_ShadowOffset32; 494 else if (IsNetBSD) 495 Mapping.Offset = kNetBSD_ShadowOffset32; 496 else if (IsIOS) 497 Mapping.Offset = kDynamicShadowSentinel; 498 else if (IsWindows) 499 Mapping.Offset = kWindowsShadowOffset32; 500 else if (IsEmscripten) 501 Mapping.Offset = kEmscriptenShadowOffset; 502 else if (IsMyriad) { 503 uint64_t ShadowOffset = (kMyriadMemoryOffset32 + kMyriadMemorySize32 - 504 (kMyriadMemorySize32 >> Mapping.Scale)); 505 Mapping.Offset = ShadowOffset - (kMyriadMemoryOffset32 >> Mapping.Scale); 506 } else 507 Mapping.Offset = kDefaultShadowOffset32; 508 } else { // LongSize == 64 509 // Fuchsia is always PIE, which means that the beginning of the address 510 // space is always available. 511 if (IsFuchsia) 512 Mapping.Offset = 0; 513 else if (IsPPC64) 514 Mapping.Offset = kPPC64_ShadowOffset64; 515 else if (IsSystemZ) 516 Mapping.Offset = kSystemZ_ShadowOffset64; 517 else if (IsFreeBSD && !IsMIPS64) { 518 if (IsKasan) 519 Mapping.Offset = kFreeBSDKasan_ShadowOffset64; 520 else 521 Mapping.Offset = kFreeBSD_ShadowOffset64; 522 } else if (IsNetBSD) { 523 if (IsKasan) 524 Mapping.Offset = kNetBSDKasan_ShadowOffset64; 525 else 526 Mapping.Offset = kNetBSD_ShadowOffset64; 527 } else if (IsPS4CPU) 528 Mapping.Offset = kPS4CPU_ShadowOffset64; 529 else if (IsLinux && IsX86_64) { 530 if (IsKasan) 531 Mapping.Offset = kLinuxKasan_ShadowOffset64; 532 else 533 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 534 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 535 } else if (IsWindows && IsX86_64) { 536 Mapping.Offset = kWindowsShadowOffset64; 537 } else if (IsMIPS64) 538 Mapping.Offset = kMIPS64_ShadowOffset64; 539 else if (IsIOS) 540 Mapping.Offset = kDynamicShadowSentinel; 541 else if (IsMacOS && IsAArch64) 542 Mapping.Offset = kDynamicShadowSentinel; 543 else if (IsAArch64) 544 Mapping.Offset = kAArch64_ShadowOffset64; 545 else if (IsRISCV64) 546 Mapping.Offset = kRISCV64_ShadowOffset64; 547 else 548 Mapping.Offset = kDefaultShadowOffset64; 549 } 550 551 if (ClForceDynamicShadow) { 552 Mapping.Offset = kDynamicShadowSentinel; 553 } 554 555 if (ClMappingOffset.getNumOccurrences() > 0) { 556 Mapping.Offset = ClMappingOffset; 557 } 558 559 // OR-ing shadow offset if more efficient (at least on x86) if the offset 560 // is a power of two, but on ppc64 we have to use add since the shadow 561 // offset is not necessary 1/8-th of the address space. On SystemZ, 562 // we could OR the constant in a single instruction, but it's more 563 // efficient to load it once and use indexed addressing. 564 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU && 565 !IsRISCV64 && 566 !(Mapping.Offset & (Mapping.Offset - 1)) && 567 Mapping.Offset != kDynamicShadowSentinel; 568 bool IsAndroidWithIfuncSupport = 569 IsAndroid && !TargetTriple.isAndroidVersionLT(21); 570 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; 571 572 return Mapping; 573 } 574 575 static uint64_t getRedzoneSizeForScale(int MappingScale) { 576 // Redzone used for stack and globals is at least 32 bytes. 577 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 578 return std::max(32U, 1U << MappingScale); 579 } 580 581 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { 582 if (TargetTriple.isOSEmscripten()) { 583 return kAsanEmscriptenCtorAndDtorPriority; 584 } else { 585 return kAsanCtorAndDtorPriority; 586 } 587 } 588 589 namespace { 590 591 /// Module analysis for getting various metadata about the module. 592 class ASanGlobalsMetadataWrapperPass : public ModulePass { 593 public: 594 static char ID; 595 596 ASanGlobalsMetadataWrapperPass() : ModulePass(ID) { 597 initializeASanGlobalsMetadataWrapperPassPass( 598 *PassRegistry::getPassRegistry()); 599 } 600 601 bool runOnModule(Module &M) override { 602 GlobalsMD = GlobalsMetadata(M); 603 return false; 604 } 605 606 StringRef getPassName() const override { 607 return "ASanGlobalsMetadataWrapperPass"; 608 } 609 610 void getAnalysisUsage(AnalysisUsage &AU) const override { 611 AU.setPreservesAll(); 612 } 613 614 GlobalsMetadata &getGlobalsMD() { return GlobalsMD; } 615 616 private: 617 GlobalsMetadata GlobalsMD; 618 }; 619 620 char ASanGlobalsMetadataWrapperPass::ID = 0; 621 622 /// AddressSanitizer: instrument the code in module to find memory bugs. 623 struct AddressSanitizer { 624 AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 625 bool CompileKernel = false, bool Recover = false, 626 bool UseAfterScope = false, 627 AsanDetectStackUseAfterReturnMode UseAfterReturn = 628 AsanDetectStackUseAfterReturnMode::Runtime) 629 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 630 : CompileKernel), 631 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 632 UseAfterScope(UseAfterScope || ClUseAfterScope), 633 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn 634 : UseAfterReturn), 635 GlobalsMD(*GlobalsMD) { 636 C = &(M.getContext()); 637 LongSize = M.getDataLayout().getPointerSizeInBits(); 638 IntptrTy = Type::getIntNTy(*C, LongSize); 639 TargetTriple = Triple(M.getTargetTriple()); 640 641 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 642 643 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid); 644 } 645 646 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 647 uint64_t ArraySize = 1; 648 if (AI.isArrayAllocation()) { 649 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 650 assert(CI && "non-constant array size"); 651 ArraySize = CI->getZExtValue(); 652 } 653 Type *Ty = AI.getAllocatedType(); 654 uint64_t SizeInBytes = 655 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 656 return SizeInBytes * ArraySize; 657 } 658 659 /// Check if we want (and can) handle this alloca. 660 bool isInterestingAlloca(const AllocaInst &AI); 661 662 bool ignoreAccess(Value *Ptr); 663 void getInterestingMemoryOperands( 664 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 665 666 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 667 InterestingMemoryOperand &O, bool UseCalls, 668 const DataLayout &DL); 669 void instrumentPointerComparisonOrSubtraction(Instruction *I); 670 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 671 Value *Addr, uint32_t TypeSize, bool IsWrite, 672 Value *SizeArgument, bool UseCalls, uint32_t Exp); 673 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, 674 Instruction *InsertBefore, Value *Addr, 675 uint32_t TypeSize, bool IsWrite, 676 Value *SizeArgument); 677 void instrumentUnusualSizeOrAlignment(Instruction *I, 678 Instruction *InsertBefore, Value *Addr, 679 uint32_t TypeSize, bool IsWrite, 680 Value *SizeArgument, bool UseCalls, 681 uint32_t Exp); 682 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 683 Value *ShadowValue, uint32_t TypeSize); 684 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 685 bool IsWrite, size_t AccessSizeIndex, 686 Value *SizeArgument, uint32_t Exp); 687 void instrumentMemIntrinsic(MemIntrinsic *MI); 688 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 689 bool suppressInstrumentationSiteForDebug(int &Instrumented); 690 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); 691 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 692 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); 693 void markEscapedLocalAllocas(Function &F); 694 695 private: 696 friend struct FunctionStackPoisoner; 697 698 void initializeCallbacks(Module &M); 699 700 bool LooksLikeCodeInBug11395(Instruction *I); 701 bool GlobalIsLinkerInitialized(GlobalVariable *G); 702 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 703 uint64_t TypeSize) const; 704 705 /// Helper to cleanup per-function state. 706 struct FunctionStateRAII { 707 AddressSanitizer *Pass; 708 709 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 710 assert(Pass->ProcessedAllocas.empty() && 711 "last pass forgot to clear cache"); 712 assert(!Pass->LocalDynamicShadow); 713 } 714 715 ~FunctionStateRAII() { 716 Pass->LocalDynamicShadow = nullptr; 717 Pass->ProcessedAllocas.clear(); 718 } 719 }; 720 721 LLVMContext *C; 722 Triple TargetTriple; 723 int LongSize; 724 bool CompileKernel; 725 bool Recover; 726 bool UseAfterScope; 727 AsanDetectStackUseAfterReturnMode UseAfterReturn; 728 Type *IntptrTy; 729 ShadowMapping Mapping; 730 FunctionCallee AsanHandleNoReturnFunc; 731 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; 732 Constant *AsanShadowGlobal; 733 734 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). 735 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; 736 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 737 738 // These arrays is indexed by AccessIsWrite and Experiment. 739 FunctionCallee AsanErrorCallbackSized[2][2]; 740 FunctionCallee AsanMemoryAccessCallbackSized[2][2]; 741 742 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; 743 Value *LocalDynamicShadow = nullptr; 744 const GlobalsMetadata &GlobalsMD; 745 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 746 747 FunctionCallee AMDGPUAddressShared; 748 FunctionCallee AMDGPUAddressPrivate; 749 }; 750 751 class AddressSanitizerLegacyPass : public FunctionPass { 752 public: 753 static char ID; 754 755 explicit AddressSanitizerLegacyPass( 756 bool CompileKernel = false, bool Recover = false, 757 bool UseAfterScope = false, 758 AsanDetectStackUseAfterReturnMode UseAfterReturn = 759 AsanDetectStackUseAfterReturnMode::Runtime) 760 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover), 761 UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) { 762 initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry()); 763 } 764 765 StringRef getPassName() const override { 766 return "AddressSanitizerFunctionPass"; 767 } 768 769 void getAnalysisUsage(AnalysisUsage &AU) const override { 770 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 771 AU.addRequired<TargetLibraryInfoWrapperPass>(); 772 } 773 774 bool runOnFunction(Function &F) override { 775 GlobalsMetadata &GlobalsMD = 776 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 777 const TargetLibraryInfo *TLI = 778 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 779 AddressSanitizer ASan(*F.getParent(), &GlobalsMD, CompileKernel, Recover, 780 UseAfterScope, UseAfterReturn); 781 return ASan.instrumentFunction(F, TLI); 782 } 783 784 private: 785 bool CompileKernel; 786 bool Recover; 787 bool UseAfterScope; 788 AsanDetectStackUseAfterReturnMode UseAfterReturn; 789 }; 790 791 class ModuleAddressSanitizer { 792 public: 793 ModuleAddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 794 bool CompileKernel = false, bool Recover = false, 795 bool UseGlobalsGC = true, bool UseOdrIndicator = false, 796 AsanDtorKind DestructorKind = AsanDtorKind::Global) 797 : GlobalsMD(*GlobalsMD), 798 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 799 : CompileKernel), 800 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 801 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), 802 // Enable aliases as they should have no downside with ODR indicators. 803 UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), 804 UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), 805 // Not a typo: ClWithComdat is almost completely pointless without 806 // ClUseGlobalsGC (because then it only works on modules without 807 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; 808 // and both suffer from gold PR19002 for which UseGlobalsGC constructor 809 // argument is designed as workaround. Therefore, disable both 810 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to 811 // do globals-gc. 812 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel), 813 DestructorKind(DestructorKind) { 814 C = &(M.getContext()); 815 int LongSize = M.getDataLayout().getPointerSizeInBits(); 816 IntptrTy = Type::getIntNTy(*C, LongSize); 817 TargetTriple = Triple(M.getTargetTriple()); 818 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 819 820 if (ClOverrideDestructorKind != AsanDtorKind::Invalid) 821 this->DestructorKind = ClOverrideDestructorKind; 822 assert(this->DestructorKind != AsanDtorKind::Invalid); 823 } 824 825 bool instrumentModule(Module &); 826 827 private: 828 void initializeCallbacks(Module &M); 829 830 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); 831 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, 832 ArrayRef<GlobalVariable *> ExtendedGlobals, 833 ArrayRef<Constant *> MetadataInitializers); 834 void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, 835 ArrayRef<GlobalVariable *> ExtendedGlobals, 836 ArrayRef<Constant *> MetadataInitializers, 837 const std::string &UniqueModuleId); 838 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, 839 ArrayRef<GlobalVariable *> ExtendedGlobals, 840 ArrayRef<Constant *> MetadataInitializers); 841 void 842 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, 843 ArrayRef<GlobalVariable *> ExtendedGlobals, 844 ArrayRef<Constant *> MetadataInitializers); 845 846 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, 847 StringRef OriginalName); 848 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, 849 StringRef InternalSuffix); 850 Instruction *CreateAsanModuleDtor(Module &M); 851 852 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const; 853 bool shouldInstrumentGlobal(GlobalVariable *G) const; 854 bool ShouldUseMachOGlobalsSection() const; 855 StringRef getGlobalMetadataSection() const; 856 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 857 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 858 uint64_t getMinRedzoneSizeForGlobal() const { 859 return getRedzoneSizeForScale(Mapping.Scale); 860 } 861 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; 862 int GetAsanVersion(const Module &M) const; 863 864 const GlobalsMetadata &GlobalsMD; 865 bool CompileKernel; 866 bool Recover; 867 bool UseGlobalsGC; 868 bool UsePrivateAlias; 869 bool UseOdrIndicator; 870 bool UseCtorComdat; 871 AsanDtorKind DestructorKind; 872 Type *IntptrTy; 873 LLVMContext *C; 874 Triple TargetTriple; 875 ShadowMapping Mapping; 876 FunctionCallee AsanPoisonGlobals; 877 FunctionCallee AsanUnpoisonGlobals; 878 FunctionCallee AsanRegisterGlobals; 879 FunctionCallee AsanUnregisterGlobals; 880 FunctionCallee AsanRegisterImageGlobals; 881 FunctionCallee AsanUnregisterImageGlobals; 882 FunctionCallee AsanRegisterElfGlobals; 883 FunctionCallee AsanUnregisterElfGlobals; 884 885 Function *AsanCtorFunction = nullptr; 886 Function *AsanDtorFunction = nullptr; 887 }; 888 889 class ModuleAddressSanitizerLegacyPass : public ModulePass { 890 public: 891 static char ID; 892 893 explicit ModuleAddressSanitizerLegacyPass( 894 bool CompileKernel = false, bool Recover = false, bool UseGlobalGC = true, 895 bool UseOdrIndicator = false, 896 AsanDtorKind DestructorKind = AsanDtorKind::Global) 897 : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover), 898 UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator), 899 DestructorKind(DestructorKind) { 900 initializeModuleAddressSanitizerLegacyPassPass( 901 *PassRegistry::getPassRegistry()); 902 } 903 904 StringRef getPassName() const override { return "ModuleAddressSanitizer"; } 905 906 void getAnalysisUsage(AnalysisUsage &AU) const override { 907 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 908 } 909 910 bool runOnModule(Module &M) override { 911 GlobalsMetadata &GlobalsMD = 912 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 913 ModuleAddressSanitizer ASanModule(M, &GlobalsMD, CompileKernel, Recover, 914 UseGlobalGC, UseOdrIndicator, 915 DestructorKind); 916 return ASanModule.instrumentModule(M); 917 } 918 919 private: 920 bool CompileKernel; 921 bool Recover; 922 bool UseGlobalGC; 923 bool UseOdrIndicator; 924 AsanDtorKind DestructorKind; 925 }; 926 927 // Stack poisoning does not play well with exception handling. 928 // When an exception is thrown, we essentially bypass the code 929 // that unpoisones the stack. This is why the run-time library has 930 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 931 // stack in the interceptor. This however does not work inside the 932 // actual function which catches the exception. Most likely because the 933 // compiler hoists the load of the shadow value somewhere too high. 934 // This causes asan to report a non-existing bug on 453.povray. 935 // It sounds like an LLVM bug. 936 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 937 Function &F; 938 AddressSanitizer &ASan; 939 DIBuilder DIB; 940 LLVMContext *C; 941 Type *IntptrTy; 942 Type *IntptrPtrTy; 943 ShadowMapping Mapping; 944 945 SmallVector<AllocaInst *, 16> AllocaVec; 946 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; 947 SmallVector<Instruction *, 8> RetVec; 948 unsigned StackAlignment; 949 950 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 951 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 952 FunctionCallee AsanSetShadowFunc[0x100] = {}; 953 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; 954 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; 955 956 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 957 struct AllocaPoisonCall { 958 IntrinsicInst *InsBefore; 959 AllocaInst *AI; 960 uint64_t Size; 961 bool DoPoison; 962 }; 963 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 964 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 965 bool HasUntracedLifetimeIntrinsic = false; 966 967 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 968 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 969 AllocaInst *DynamicAllocaLayout = nullptr; 970 IntrinsicInst *LocalEscapeCall = nullptr; 971 972 bool HasInlineAsm = false; 973 bool HasReturnsTwiceCall = false; 974 bool PoisonStack; 975 976 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 977 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 978 C(ASan.C), IntptrTy(ASan.IntptrTy), 979 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 980 StackAlignment(1 << Mapping.Scale), 981 PoisonStack(ClStack && 982 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} 983 984 bool runOnFunction() { 985 if (!PoisonStack) 986 return false; 987 988 if (ClRedzoneByvalArgs) 989 copyArgsPassedByValToAllocas(); 990 991 // Collect alloca, ret, lifetime instructions etc. 992 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 993 994 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 995 996 initializeCallbacks(*F.getParent()); 997 998 if (HasUntracedLifetimeIntrinsic) { 999 // If there are lifetime intrinsics which couldn't be traced back to an 1000 // alloca, we may not know exactly when a variable enters scope, and 1001 // therefore should "fail safe" by not poisoning them. 1002 StaticAllocaPoisonCallVec.clear(); 1003 DynamicAllocaPoisonCallVec.clear(); 1004 } 1005 1006 processDynamicAllocas(); 1007 processStaticAllocas(); 1008 1009 if (ClDebugStack) { 1010 LLVM_DEBUG(dbgs() << F); 1011 } 1012 return true; 1013 } 1014 1015 // Arguments marked with the "byval" attribute are implicitly copied without 1016 // using an alloca instruction. To produce redzones for those arguments, we 1017 // copy them a second time into memory allocated with an alloca instruction. 1018 void copyArgsPassedByValToAllocas(); 1019 1020 // Finds all Alloca instructions and puts 1021 // poisoned red zones around all of them. 1022 // Then unpoison everything back before the function returns. 1023 void processStaticAllocas(); 1024 void processDynamicAllocas(); 1025 1026 void createDynamicAllocasInitStorage(); 1027 1028 // ----------------------- Visitors. 1029 /// Collect all Ret instructions, or the musttail call instruction if it 1030 /// precedes the return instruction. 1031 void visitReturnInst(ReturnInst &RI) { 1032 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall()) 1033 RetVec.push_back(CI); 1034 else 1035 RetVec.push_back(&RI); 1036 } 1037 1038 /// Collect all Resume instructions. 1039 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 1040 1041 /// Collect all CatchReturnInst instructions. 1042 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 1043 1044 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 1045 Value *SavedStack) { 1046 IRBuilder<> IRB(InstBefore); 1047 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 1048 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 1049 // need to adjust extracted SP to compute the address of the most recent 1050 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 1051 // this purpose. 1052 if (!isa<ReturnInst>(InstBefore)) { 1053 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 1054 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 1055 {IntptrTy}); 1056 1057 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 1058 1059 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 1060 DynamicAreaOffset); 1061 } 1062 1063 IRB.CreateCall( 1064 AsanAllocasUnpoisonFunc, 1065 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); 1066 } 1067 1068 // Unpoison dynamic allocas redzones. 1069 void unpoisonDynamicAllocas() { 1070 for (Instruction *Ret : RetVec) 1071 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); 1072 1073 for (Instruction *StackRestoreInst : StackRestoreVec) 1074 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 1075 StackRestoreInst->getOperand(0)); 1076 } 1077 1078 // Deploy and poison redzones around dynamic alloca call. To do this, we 1079 // should replace this call with another one with changed parameters and 1080 // replace all its uses with new address, so 1081 // addr = alloca type, old_size, align 1082 // is replaced by 1083 // new_size = (old_size + additional_size) * sizeof(type) 1084 // tmp = alloca i8, new_size, max(align, 32) 1085 // addr = tmp + 32 (first 32 bytes are for the left redzone). 1086 // Additional_size is added to make new memory allocation contain not only 1087 // requested memory, but also left, partial and right redzones. 1088 void handleDynamicAllocaCall(AllocaInst *AI); 1089 1090 /// Collect Alloca instructions we want (and can) handle. 1091 void visitAllocaInst(AllocaInst &AI) { 1092 if (!ASan.isInterestingAlloca(AI)) { 1093 if (AI.isStaticAlloca()) { 1094 // Skip over allocas that are present *before* the first instrumented 1095 // alloca, we don't want to move those around. 1096 if (AllocaVec.empty()) 1097 return; 1098 1099 StaticAllocasToMoveUp.push_back(&AI); 1100 } 1101 return; 1102 } 1103 1104 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 1105 if (!AI.isStaticAlloca()) 1106 DynamicAllocaVec.push_back(&AI); 1107 else 1108 AllocaVec.push_back(&AI); 1109 } 1110 1111 /// Collect lifetime intrinsic calls to check for use-after-scope 1112 /// errors. 1113 void visitIntrinsicInst(IntrinsicInst &II) { 1114 Intrinsic::ID ID = II.getIntrinsicID(); 1115 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 1116 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 1117 if (!ASan.UseAfterScope) 1118 return; 1119 if (!II.isLifetimeStartOrEnd()) 1120 return; 1121 // Found lifetime intrinsic, add ASan instrumentation if necessary. 1122 auto *Size = cast<ConstantInt>(II.getArgOperand(0)); 1123 // If size argument is undefined, don't do anything. 1124 if (Size->isMinusOne()) return; 1125 // Check that size doesn't saturate uint64_t and can 1126 // be stored in IntptrTy. 1127 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 1128 if (SizeValue == ~0ULL || 1129 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 1130 return; 1131 // Find alloca instruction that corresponds to llvm.lifetime argument. 1132 // Currently we can only handle lifetime markers pointing to the 1133 // beginning of the alloca. 1134 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true); 1135 if (!AI) { 1136 HasUntracedLifetimeIntrinsic = true; 1137 return; 1138 } 1139 // We're interested only in allocas we can handle. 1140 if (!ASan.isInterestingAlloca(*AI)) 1141 return; 1142 bool DoPoison = (ID == Intrinsic::lifetime_end); 1143 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 1144 if (AI->isStaticAlloca()) 1145 StaticAllocaPoisonCallVec.push_back(APC); 1146 else if (ClInstrumentDynamicAllocas) 1147 DynamicAllocaPoisonCallVec.push_back(APC); 1148 } 1149 1150 void visitCallBase(CallBase &CB) { 1151 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1152 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; 1153 HasReturnsTwiceCall |= CI->canReturnTwice(); 1154 } 1155 } 1156 1157 // ---------------------- Helpers. 1158 void initializeCallbacks(Module &M); 1159 1160 // Copies bytes from ShadowBytes into shadow memory for indexes where 1161 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 1162 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 1163 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1164 IRBuilder<> &IRB, Value *ShadowBase); 1165 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1166 size_t Begin, size_t End, IRBuilder<> &IRB, 1167 Value *ShadowBase); 1168 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 1169 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 1170 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 1171 1172 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 1173 1174 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 1175 bool Dynamic); 1176 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 1177 Instruction *ThenTerm, Value *ValueIfFalse); 1178 }; 1179 1180 } // end anonymous namespace 1181 1182 void LocationMetadata::parse(MDNode *MDN) { 1183 assert(MDN->getNumOperands() == 3); 1184 MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); 1185 Filename = DIFilename->getString(); 1186 LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); 1187 ColumnNo = 1188 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); 1189 } 1190 1191 // FIXME: It would be cleaner to instead attach relevant metadata to the globals 1192 // we want to sanitize instead and reading this metadata on each pass over a 1193 // function instead of reading module level metadata at first. 1194 GlobalsMetadata::GlobalsMetadata(Module &M) { 1195 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); 1196 if (!Globals) 1197 return; 1198 for (auto MDN : Globals->operands()) { 1199 // Metadata node contains the global and the fields of "Entry". 1200 assert(MDN->getNumOperands() == 5); 1201 auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0)); 1202 // The optimizer may optimize away a global entirely. 1203 if (!V) 1204 continue; 1205 auto *StrippedV = V->stripPointerCasts(); 1206 auto *GV = dyn_cast<GlobalVariable>(StrippedV); 1207 if (!GV) 1208 continue; 1209 // We can already have an entry for GV if it was merged with another 1210 // global. 1211 Entry &E = Entries[GV]; 1212 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) 1213 E.SourceLoc.parse(Loc); 1214 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) 1215 E.Name = Name->getString(); 1216 ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3)); 1217 E.IsDynInit |= IsDynInit->isOne(); 1218 ConstantInt *IsExcluded = 1219 mdconst::extract<ConstantInt>(MDN->getOperand(4)); 1220 E.IsExcluded |= IsExcluded->isOne(); 1221 } 1222 } 1223 1224 AnalysisKey ASanGlobalsMetadataAnalysis::Key; 1225 1226 GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M, 1227 ModuleAnalysisManager &AM) { 1228 return GlobalsMetadata(M); 1229 } 1230 1231 AddressSanitizerPass::AddressSanitizerPass( 1232 bool CompileKernel, bool Recover, bool UseAfterScope, 1233 AsanDetectStackUseAfterReturnMode UseAfterReturn) 1234 : CompileKernel(CompileKernel), Recover(Recover), 1235 UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) {} 1236 1237 PreservedAnalyses AddressSanitizerPass::run(Function &F, 1238 AnalysisManager<Function> &AM) { 1239 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 1240 Module &M = *F.getParent(); 1241 if (auto *R = MAMProxy.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) { 1242 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 1243 AddressSanitizer Sanitizer(M, R, CompileKernel, Recover, UseAfterScope, 1244 UseAfterReturn); 1245 if (Sanitizer.instrumentFunction(F, TLI)) 1246 return PreservedAnalyses::none(); 1247 return PreservedAnalyses::all(); 1248 } 1249 1250 report_fatal_error( 1251 "The ASanGlobalsMetadataAnalysis is required to run before " 1252 "AddressSanitizer can run"); 1253 return PreservedAnalyses::all(); 1254 } 1255 1256 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass( 1257 bool CompileKernel, bool Recover, bool UseGlobalGC, bool UseOdrIndicator, 1258 AsanDtorKind DestructorKind) 1259 : CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC), 1260 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {} 1261 1262 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, 1263 AnalysisManager<Module> &AM) { 1264 GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M); 1265 ModuleAddressSanitizer Sanitizer(M, &GlobalsMD, CompileKernel, Recover, 1266 UseGlobalGC, UseOdrIndicator, 1267 DestructorKind); 1268 if (Sanitizer.instrumentModule(M)) 1269 return PreservedAnalyses::none(); 1270 return PreservedAnalyses::all(); 1271 } 1272 1273 INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md", 1274 "Read metadata to mark which globals should be instrumented " 1275 "when running ASan.", 1276 false, true) 1277 1278 char AddressSanitizerLegacyPass::ID = 0; 1279 1280 INITIALIZE_PASS_BEGIN( 1281 AddressSanitizerLegacyPass, "asan", 1282 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1283 false) 1284 INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass) 1285 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1286 INITIALIZE_PASS_END( 1287 AddressSanitizerLegacyPass, "asan", 1288 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1289 false) 1290 1291 FunctionPass *llvm::createAddressSanitizerFunctionPass( 1292 bool CompileKernel, bool Recover, bool UseAfterScope, 1293 AsanDetectStackUseAfterReturnMode UseAfterReturn) { 1294 assert(!CompileKernel || Recover); 1295 return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope, 1296 UseAfterReturn); 1297 } 1298 1299 char ModuleAddressSanitizerLegacyPass::ID = 0; 1300 1301 INITIALIZE_PASS( 1302 ModuleAddressSanitizerLegacyPass, "asan-module", 1303 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 1304 "ModulePass", 1305 false, false) 1306 1307 ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass( 1308 bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator, 1309 AsanDtorKind Destructor) { 1310 assert(!CompileKernel || Recover); 1311 return new ModuleAddressSanitizerLegacyPass( 1312 CompileKernel, Recover, UseGlobalsGC, UseOdrIndicator, Destructor); 1313 } 1314 1315 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 1316 size_t Res = countTrailingZeros(TypeSize / 8); 1317 assert(Res < kNumberOfAccessSizes); 1318 return Res; 1319 } 1320 1321 /// Create a global describing a source location. 1322 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, 1323 LocationMetadata MD) { 1324 Constant *LocData[] = { 1325 createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix), 1326 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), 1327 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), 1328 }; 1329 auto LocStruct = ConstantStruct::getAnon(LocData); 1330 auto GV = new GlobalVariable(M, LocStruct->getType(), true, 1331 GlobalValue::PrivateLinkage, LocStruct, 1332 kAsanGenPrefix); 1333 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1334 return GV; 1335 } 1336 1337 /// Check if \p G has been created by a trusted compiler pass. 1338 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 1339 // Do not instrument @llvm.global_ctors, @llvm.used, etc. 1340 if (G->getName().startswith("llvm.")) 1341 return true; 1342 1343 // Do not instrument asan globals. 1344 if (G->getName().startswith(kAsanGenPrefix) || 1345 G->getName().startswith(kSanCovGenPrefix) || 1346 G->getName().startswith(kODRGenPrefix)) 1347 return true; 1348 1349 // Do not instrument gcov counter arrays. 1350 if (G->getName() == "__llvm_gcov_ctr") 1351 return true; 1352 1353 return false; 1354 } 1355 1356 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { 1357 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1358 unsigned int AddrSpace = PtrTy->getPointerAddressSpace(); 1359 if (AddrSpace == 3 || AddrSpace == 5) 1360 return true; 1361 return false; 1362 } 1363 1364 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 1365 // Shadow >> scale 1366 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 1367 if (Mapping.Offset == 0) return Shadow; 1368 // (Shadow >> scale) | offset 1369 Value *ShadowBase; 1370 if (LocalDynamicShadow) 1371 ShadowBase = LocalDynamicShadow; 1372 else 1373 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 1374 if (Mapping.OrShadowOffset) 1375 return IRB.CreateOr(Shadow, ShadowBase); 1376 else 1377 return IRB.CreateAdd(Shadow, ShadowBase); 1378 } 1379 1380 // Instrument memset/memmove/memcpy 1381 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 1382 IRBuilder<> IRB(MI); 1383 if (isa<MemTransferInst>(MI)) { 1384 IRB.CreateCall( 1385 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 1386 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1387 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 1388 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1389 } else if (isa<MemSetInst>(MI)) { 1390 IRB.CreateCall( 1391 AsanMemset, 1392 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1393 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 1394 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1395 } 1396 MI->eraseFromParent(); 1397 } 1398 1399 /// Check if we want (and can) handle this alloca. 1400 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1401 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 1402 1403 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 1404 return PreviouslySeenAllocaInfo->getSecond(); 1405 1406 bool IsInteresting = 1407 (AI.getAllocatedType()->isSized() && 1408 // alloca() may be called with 0 size, ignore it. 1409 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 1410 // We are only interested in allocas not promotable to registers. 1411 // Promotable allocas are common under -O0. 1412 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 1413 // inalloca allocas are not treated as static, and we don't want 1414 // dynamic alloca instrumentation for them as well. 1415 !AI.isUsedWithInAlloca() && 1416 // swifterror allocas are register promoted by ISel 1417 !AI.isSwiftError()); 1418 1419 ProcessedAllocas[&AI] = IsInteresting; 1420 return IsInteresting; 1421 } 1422 1423 bool AddressSanitizer::ignoreAccess(Value *Ptr) { 1424 // Instrument acesses from different address spaces only for AMDGPU. 1425 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1426 if (PtrTy->getPointerAddressSpace() != 0 && 1427 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr))) 1428 return true; 1429 1430 // Ignore swifterror addresses. 1431 // swifterror memory addresses are mem2reg promoted by instruction 1432 // selection. As such they cannot have regular uses like an instrumentation 1433 // function and it makes no sense to track them as memory. 1434 if (Ptr->isSwiftError()) 1435 return true; 1436 1437 // Treat memory accesses to promotable allocas as non-interesting since they 1438 // will not cause memory violations. This greatly speeds up the instrumented 1439 // executable at -O0. 1440 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr)) 1441 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) 1442 return true; 1443 1444 return false; 1445 } 1446 1447 void AddressSanitizer::getInterestingMemoryOperands( 1448 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 1449 // Skip memory accesses inserted by another instrumentation. 1450 if (I->hasMetadata("nosanitize")) 1451 return; 1452 1453 // Do not instrument the load fetching the dynamic shadow address. 1454 if (LocalDynamicShadow == I) 1455 return; 1456 1457 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1458 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) 1459 return; 1460 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 1461 LI->getType(), LI->getAlign()); 1462 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1463 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) 1464 return; 1465 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 1466 SI->getValueOperand()->getType(), SI->getAlign()); 1467 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1468 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) 1469 return; 1470 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 1471 RMW->getValOperand()->getType(), None); 1472 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1473 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) 1474 return; 1475 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 1476 XCHG->getCompareOperand()->getType(), None); 1477 } else if (auto CI = dyn_cast<CallInst>(I)) { 1478 auto *F = CI->getCalledFunction(); 1479 if (F && (F->getName().startswith("llvm.masked.load.") || 1480 F->getName().startswith("llvm.masked.store."))) { 1481 bool IsWrite = F->getName().startswith("llvm.masked.store."); 1482 // Masked store has an initial operand for the value. 1483 unsigned OpOffset = IsWrite ? 1 : 0; 1484 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) 1485 return; 1486 1487 auto BasePtr = CI->getOperand(OpOffset); 1488 if (ignoreAccess(BasePtr)) 1489 return; 1490 auto Ty = cast<PointerType>(BasePtr->getType())->getElementType(); 1491 MaybeAlign Alignment = Align(1); 1492 // Otherwise no alignment guarantees. We probably got Undef. 1493 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) 1494 Alignment = Op->getMaybeAlignValue(); 1495 Value *Mask = CI->getOperand(2 + OpOffset); 1496 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); 1497 } else { 1498 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { 1499 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 1500 ignoreAccess(CI->getArgOperand(ArgNo))) 1501 continue; 1502 Type *Ty = CI->getParamByValType(ArgNo); 1503 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 1504 } 1505 } 1506 } 1507 } 1508 1509 static bool isPointerOperand(Value *V) { 1510 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1511 } 1512 1513 // This is a rough heuristic; it may cause both false positives and 1514 // false negatives. The proper implementation requires cooperation with 1515 // the frontend. 1516 static bool isInterestingPointerComparison(Instruction *I) { 1517 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1518 if (!Cmp->isRelational()) 1519 return false; 1520 } else { 1521 return false; 1522 } 1523 return isPointerOperand(I->getOperand(0)) && 1524 isPointerOperand(I->getOperand(1)); 1525 } 1526 1527 // This is a rough heuristic; it may cause both false positives and 1528 // false negatives. The proper implementation requires cooperation with 1529 // the frontend. 1530 static bool isInterestingPointerSubtraction(Instruction *I) { 1531 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1532 if (BO->getOpcode() != Instruction::Sub) 1533 return false; 1534 } else { 1535 return false; 1536 } 1537 return isPointerOperand(I->getOperand(0)) && 1538 isPointerOperand(I->getOperand(1)); 1539 } 1540 1541 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1542 // If a global variable does not have dynamic initialization we don't 1543 // have to instrument it. However, if a global does not have initializer 1544 // at all, we assume it has dynamic initializer (in other TU). 1545 // 1546 // FIXME: Metadata should be attched directly to the global directly instead 1547 // of being added to llvm.asan.globals. 1548 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; 1549 } 1550 1551 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1552 Instruction *I) { 1553 IRBuilder<> IRB(I); 1554 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1555 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1556 for (Value *&i : Param) { 1557 if (i->getType()->isPointerTy()) 1558 i = IRB.CreatePointerCast(i, IntptrTy); 1559 } 1560 IRB.CreateCall(F, Param); 1561 } 1562 1563 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, 1564 Instruction *InsertBefore, Value *Addr, 1565 MaybeAlign Alignment, unsigned Granularity, 1566 uint32_t TypeSize, bool IsWrite, 1567 Value *SizeArgument, bool UseCalls, 1568 uint32_t Exp) { 1569 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1570 // if the data is properly aligned. 1571 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1572 TypeSize == 128) && 1573 (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) 1574 return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, 1575 nullptr, UseCalls, Exp); 1576 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, 1577 IsWrite, nullptr, UseCalls, Exp); 1578 } 1579 1580 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, 1581 const DataLayout &DL, Type *IntptrTy, 1582 Value *Mask, Instruction *I, 1583 Value *Addr, MaybeAlign Alignment, 1584 unsigned Granularity, uint32_t TypeSize, 1585 bool IsWrite, Value *SizeArgument, 1586 bool UseCalls, uint32_t Exp) { 1587 auto *VTy = cast<FixedVectorType>( 1588 cast<PointerType>(Addr->getType())->getElementType()); 1589 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 1590 unsigned Num = VTy->getNumElements(); 1591 auto Zero = ConstantInt::get(IntptrTy, 0); 1592 for (unsigned Idx = 0; Idx < Num; ++Idx) { 1593 Value *InstrumentedAddress = nullptr; 1594 Instruction *InsertBefore = I; 1595 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 1596 // dyn_cast as we might get UndefValue 1597 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 1598 if (Masked->isZero()) 1599 // Mask is constant false, so no instrumentation needed. 1600 continue; 1601 // If we have a true or undef value, fall through to doInstrumentAddress 1602 // with InsertBefore == I 1603 } 1604 } else { 1605 IRBuilder<> IRB(I); 1606 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 1607 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 1608 InsertBefore = ThenTerm; 1609 } 1610 1611 IRBuilder<> IRB(InsertBefore); 1612 InstrumentedAddress = 1613 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 1614 doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, 1615 Granularity, ElemTypeSize, IsWrite, SizeArgument, 1616 UseCalls, Exp); 1617 } 1618 } 1619 1620 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1621 InterestingMemoryOperand &O, bool UseCalls, 1622 const DataLayout &DL) { 1623 Value *Addr = O.getPtr(); 1624 1625 // Optimization experiments. 1626 // The experiments can be used to evaluate potential optimizations that remove 1627 // instrumentation (assess false negatives). Instead of completely removing 1628 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1629 // experiments that want to remove instrumentation of this instruction). 1630 // If Exp is non-zero, this pass will emit special calls into runtime 1631 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1632 // make runtime terminate the program in a special way (with a different 1633 // exit status). Then you run the new compiler on a buggy corpus, collect 1634 // the special terminations (ideally, you don't see them at all -- no false 1635 // negatives) and make the decision on the optimization. 1636 uint32_t Exp = ClForceExperiment; 1637 1638 if (ClOpt && ClOptGlobals) { 1639 // If initialization order checking is disabled, a simple access to a 1640 // dynamically initialized global is always valid. 1641 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); 1642 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1643 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1644 NumOptimizedAccessesToGlobalVar++; 1645 return; 1646 } 1647 } 1648 1649 if (ClOpt && ClOptStack) { 1650 // A direct inbounds access to a stack variable is always valid. 1651 if (isa<AllocaInst>(getUnderlyingObject(Addr)) && 1652 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1653 NumOptimizedAccessesToStackVar++; 1654 return; 1655 } 1656 } 1657 1658 if (O.IsWrite) 1659 NumInstrumentedWrites++; 1660 else 1661 NumInstrumentedReads++; 1662 1663 unsigned Granularity = 1 << Mapping.Scale; 1664 if (O.MaybeMask) { 1665 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), 1666 Addr, O.Alignment, Granularity, O.TypeSize, 1667 O.IsWrite, nullptr, UseCalls, Exp); 1668 } else { 1669 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, 1670 Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, 1671 Exp); 1672 } 1673 } 1674 1675 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1676 Value *Addr, bool IsWrite, 1677 size_t AccessSizeIndex, 1678 Value *SizeArgument, 1679 uint32_t Exp) { 1680 IRBuilder<> IRB(InsertBefore); 1681 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1682 CallInst *Call = nullptr; 1683 if (SizeArgument) { 1684 if (Exp == 0) 1685 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1686 {Addr, SizeArgument}); 1687 else 1688 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1689 {Addr, SizeArgument, ExpVal}); 1690 } else { 1691 if (Exp == 0) 1692 Call = 1693 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1694 else 1695 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1696 {Addr, ExpVal}); 1697 } 1698 1699 Call->setCannotMerge(); 1700 return Call; 1701 } 1702 1703 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1704 Value *ShadowValue, 1705 uint32_t TypeSize) { 1706 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1707 // Addr & (Granularity - 1) 1708 Value *LastAccessedByte = 1709 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1710 // (Addr & (Granularity - 1)) + size - 1 1711 if (TypeSize / 8 > 1) 1712 LastAccessedByte = IRB.CreateAdd( 1713 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1714 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1715 LastAccessedByte = 1716 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1717 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1718 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1719 } 1720 1721 Instruction *AddressSanitizer::instrumentAMDGPUAddress( 1722 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, 1723 uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { 1724 // Do not instrument unsupported addrspaces. 1725 if (isUnsupportedAMDGPUAddrspace(Addr)) 1726 return nullptr; 1727 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1728 // Follow host instrumentation for global and constant addresses. 1729 if (PtrTy->getPointerAddressSpace() != 0) 1730 return InsertBefore; 1731 // Instrument generic addresses in supported addressspaces. 1732 IRBuilder<> IRB(InsertBefore); 1733 Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); 1734 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); 1735 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong}); 1736 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate); 1737 Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate); 1738 Value *AddrSpaceZeroLanding = 1739 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); 1740 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding); 1741 return InsertBefore; 1742 } 1743 1744 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1745 Instruction *InsertBefore, Value *Addr, 1746 uint32_t TypeSize, bool IsWrite, 1747 Value *SizeArgument, bool UseCalls, 1748 uint32_t Exp) { 1749 bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad; 1750 1751 if (TargetTriple.isAMDGPU()) { 1752 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, 1753 TypeSize, IsWrite, SizeArgument); 1754 if (!InsertBefore) 1755 return; 1756 } 1757 1758 IRBuilder<> IRB(InsertBefore); 1759 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1760 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1761 1762 if (UseCalls) { 1763 if (Exp == 0) 1764 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1765 AddrLong); 1766 else 1767 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1768 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1769 return; 1770 } 1771 1772 if (IsMyriad) { 1773 // Strip the cache bit and do range check. 1774 // AddrLong &= ~kMyriadCacheBitMask32 1775 AddrLong = IRB.CreateAnd(AddrLong, ~kMyriadCacheBitMask32); 1776 // Tag = AddrLong >> kMyriadTagShift 1777 Value *Tag = IRB.CreateLShr(AddrLong, kMyriadTagShift); 1778 // Tag == kMyriadDDRTag 1779 Value *TagCheck = 1780 IRB.CreateICmpEQ(Tag, ConstantInt::get(IntptrTy, kMyriadDDRTag)); 1781 1782 Instruction *TagCheckTerm = 1783 SplitBlockAndInsertIfThen(TagCheck, InsertBefore, false, 1784 MDBuilder(*C).createBranchWeights(1, 100000)); 1785 assert(cast<BranchInst>(TagCheckTerm)->isUnconditional()); 1786 IRB.SetInsertPoint(TagCheckTerm); 1787 InsertBefore = TagCheckTerm; 1788 } 1789 1790 Type *ShadowTy = 1791 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1792 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1793 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1794 Value *CmpVal = Constant::getNullValue(ShadowTy); 1795 Value *ShadowValue = 1796 IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1797 1798 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1799 size_t Granularity = 1ULL << Mapping.Scale; 1800 Instruction *CrashTerm = nullptr; 1801 1802 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1803 // We use branch weights for the slow path check, to indicate that the slow 1804 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1805 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 1806 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1807 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1808 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1809 IRB.SetInsertPoint(CheckTerm); 1810 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1811 if (Recover) { 1812 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1813 } else { 1814 BasicBlock *CrashBlock = 1815 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1816 CrashTerm = new UnreachableInst(*C, CrashBlock); 1817 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1818 ReplaceInstWithInst(CheckTerm, NewTerm); 1819 } 1820 } else { 1821 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1822 } 1823 1824 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1825 AccessSizeIndex, SizeArgument, Exp); 1826 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1827 } 1828 1829 // Instrument unusual size or unusual alignment. 1830 // We can not do it with a single check, so we do 1-byte check for the first 1831 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1832 // to report the actual access size. 1833 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1834 Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, 1835 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1836 IRBuilder<> IRB(InsertBefore); 1837 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1838 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1839 if (UseCalls) { 1840 if (Exp == 0) 1841 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1842 {AddrLong, Size}); 1843 else 1844 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1845 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1846 } else { 1847 Value *LastByte = IRB.CreateIntToPtr( 1848 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1849 Addr->getType()); 1850 instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); 1851 instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); 1852 } 1853 } 1854 1855 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, 1856 GlobalValue *ModuleName) { 1857 // Set up the arguments to our poison/unpoison functions. 1858 IRBuilder<> IRB(&GlobalInit.front(), 1859 GlobalInit.front().getFirstInsertionPt()); 1860 1861 // Add a call to poison all external globals before the given function starts. 1862 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1863 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1864 1865 // Add calls to unpoison all globals before each return instruction. 1866 for (auto &BB : GlobalInit.getBasicBlockList()) 1867 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1868 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1869 } 1870 1871 void ModuleAddressSanitizer::createInitializerPoisonCalls( 1872 Module &M, GlobalValue *ModuleName) { 1873 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1874 if (!GV) 1875 return; 1876 1877 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1878 if (!CA) 1879 return; 1880 1881 for (Use &OP : CA->operands()) { 1882 if (isa<ConstantAggregateZero>(OP)) continue; 1883 ConstantStruct *CS = cast<ConstantStruct>(OP); 1884 1885 // Must have a function or null ptr. 1886 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1887 if (F->getName() == kAsanModuleCtorName) continue; 1888 auto *Priority = cast<ConstantInt>(CS->getOperand(0)); 1889 // Don't instrument CTORs that will run before asan.module_ctor. 1890 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) 1891 continue; 1892 poisonOneInitializer(*F, ModuleName); 1893 } 1894 } 1895 } 1896 1897 const GlobalVariable * 1898 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const { 1899 // In case this function should be expanded to include rules that do not just 1900 // apply when CompileKernel is true, either guard all existing rules with an 1901 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules 1902 // should also apply to user space. 1903 assert(CompileKernel && "Only expecting to be called when compiling kernel"); 1904 1905 const Constant *C = GA.getAliasee(); 1906 1907 // When compiling the kernel, globals that are aliased by symbols prefixed 1908 // by "__" are special and cannot be padded with a redzone. 1909 if (GA.getName().startswith("__")) 1910 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases()); 1911 1912 return nullptr; 1913 } 1914 1915 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { 1916 Type *Ty = G->getValueType(); 1917 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1918 1919 // FIXME: Metadata should be attched directly to the global directly instead 1920 // of being added to llvm.asan.globals. 1921 if (GlobalsMD.get(G).IsExcluded) return false; 1922 if (!Ty->isSized()) return false; 1923 if (!G->hasInitializer()) return false; 1924 // Globals in address space 1 and 4 are supported for AMDGPU. 1925 if (G->getAddressSpace() && 1926 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) 1927 return false; 1928 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1929 // Two problems with thread-locals: 1930 // - The address of the main thread's copy can't be computed at link-time. 1931 // - Need to poison all copies, not just the main thread's one. 1932 if (G->isThreadLocal()) return false; 1933 // For now, just ignore this Global if the alignment is large. 1934 if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; 1935 1936 // For non-COFF targets, only instrument globals known to be defined by this 1937 // TU. 1938 // FIXME: We can instrument comdat globals on ELF if we are using the 1939 // GC-friendly metadata scheme. 1940 if (!TargetTriple.isOSBinFormatCOFF()) { 1941 if (!G->hasExactDefinition() || G->hasComdat()) 1942 return false; 1943 } else { 1944 // On COFF, don't instrument non-ODR linkages. 1945 if (G->isInterposable()) 1946 return false; 1947 } 1948 1949 // If a comdat is present, it must have a selection kind that implies ODR 1950 // semantics: no duplicates, any, or exact match. 1951 if (Comdat *C = G->getComdat()) { 1952 switch (C->getSelectionKind()) { 1953 case Comdat::Any: 1954 case Comdat::ExactMatch: 1955 case Comdat::NoDuplicates: 1956 break; 1957 case Comdat::Largest: 1958 case Comdat::SameSize: 1959 return false; 1960 } 1961 } 1962 1963 if (G->hasSection()) { 1964 // The kernel uses explicit sections for mostly special global variables 1965 // that we should not instrument. E.g. the kernel may rely on their layout 1966 // without redzones, or remove them at link time ("discard.*"), etc. 1967 if (CompileKernel) 1968 return false; 1969 1970 StringRef Section = G->getSection(); 1971 1972 // Globals from llvm.metadata aren't emitted, do not instrument them. 1973 if (Section == "llvm.metadata") return false; 1974 // Do not instrument globals from special LLVM sections. 1975 if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; 1976 1977 // Do not instrument function pointers to initialization and termination 1978 // routines: dynamic linker will not properly handle redzones. 1979 if (Section.startswith(".preinit_array") || 1980 Section.startswith(".init_array") || 1981 Section.startswith(".fini_array")) { 1982 return false; 1983 } 1984 1985 // Do not instrument user-defined sections (with names resembling 1986 // valid C identifiers) 1987 if (TargetTriple.isOSBinFormatELF()) { 1988 if (llvm::all_of(Section, 1989 [](char c) { return llvm::isAlnum(c) || c == '_'; })) 1990 return false; 1991 } 1992 1993 // On COFF, if the section name contains '$', it is highly likely that the 1994 // user is using section sorting to create an array of globals similar to 1995 // the way initialization callbacks are registered in .init_array and 1996 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones 1997 // to such globals is counterproductive, because the intent is that they 1998 // will form an array, and out-of-bounds accesses are expected. 1999 // See https://github.com/google/sanitizers/issues/305 2000 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 2001 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { 2002 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): " 2003 << *G << "\n"); 2004 return false; 2005 } 2006 2007 if (TargetTriple.isOSBinFormatMachO()) { 2008 StringRef ParsedSegment, ParsedSection; 2009 unsigned TAA = 0, StubSize = 0; 2010 bool TAAParsed; 2011 cantFail(MCSectionMachO::ParseSectionSpecifier( 2012 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize)); 2013 2014 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 2015 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 2016 // them. 2017 if (ParsedSegment == "__OBJC" || 2018 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 2019 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 2020 return false; 2021 } 2022 // See https://github.com/google/sanitizers/issues/32 2023 // Constant CFString instances are compiled in the following way: 2024 // -- the string buffer is emitted into 2025 // __TEXT,__cstring,cstring_literals 2026 // -- the constant NSConstantString structure referencing that buffer 2027 // is placed into __DATA,__cfstring 2028 // Therefore there's no point in placing redzones into __DATA,__cfstring. 2029 // Moreover, it causes the linker to crash on OS X 10.7 2030 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 2031 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 2032 return false; 2033 } 2034 // The linker merges the contents of cstring_literals and removes the 2035 // trailing zeroes. 2036 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 2037 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 2038 return false; 2039 } 2040 } 2041 } 2042 2043 if (CompileKernel) { 2044 // Globals that prefixed by "__" are special and cannot be padded with a 2045 // redzone. 2046 if (G->getName().startswith("__")) 2047 return false; 2048 } 2049 2050 return true; 2051 } 2052 2053 // On Mach-O platforms, we emit global metadata in a separate section of the 2054 // binary in order to allow the linker to properly dead strip. This is only 2055 // supported on recent versions of ld64. 2056 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { 2057 if (!TargetTriple.isOSBinFormatMachO()) 2058 return false; 2059 2060 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 2061 return true; 2062 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 2063 return true; 2064 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 2065 return true; 2066 2067 return false; 2068 } 2069 2070 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { 2071 switch (TargetTriple.getObjectFormat()) { 2072 case Triple::COFF: return ".ASAN$GL"; 2073 case Triple::ELF: return "asan_globals"; 2074 case Triple::MachO: return "__DATA,__asan_globals,regular"; 2075 case Triple::Wasm: 2076 case Triple::GOFF: 2077 case Triple::XCOFF: 2078 report_fatal_error( 2079 "ModuleAddressSanitizer not implemented for object file format"); 2080 case Triple::UnknownObjectFormat: 2081 break; 2082 } 2083 llvm_unreachable("unsupported object format"); 2084 } 2085 2086 void ModuleAddressSanitizer::initializeCallbacks(Module &M) { 2087 IRBuilder<> IRB(*C); 2088 2089 // Declare our poisoning and unpoisoning functions. 2090 AsanPoisonGlobals = 2091 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); 2092 AsanUnpoisonGlobals = 2093 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); 2094 2095 // Declare functions that register/unregister globals. 2096 AsanRegisterGlobals = M.getOrInsertFunction( 2097 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2098 AsanUnregisterGlobals = M.getOrInsertFunction( 2099 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2100 2101 // Declare the functions that find globals in a shared object and then invoke 2102 // the (un)register function on them. 2103 AsanRegisterImageGlobals = M.getOrInsertFunction( 2104 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 2105 AsanUnregisterImageGlobals = M.getOrInsertFunction( 2106 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 2107 2108 AsanRegisterElfGlobals = 2109 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), 2110 IntptrTy, IntptrTy, IntptrTy); 2111 AsanUnregisterElfGlobals = 2112 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), 2113 IntptrTy, IntptrTy, IntptrTy); 2114 } 2115 2116 // Put the metadata and the instrumented global in the same group. This ensures 2117 // that the metadata is discarded if the instrumented global is discarded. 2118 void ModuleAddressSanitizer::SetComdatForGlobalMetadata( 2119 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { 2120 Module &M = *G->getParent(); 2121 Comdat *C = G->getComdat(); 2122 if (!C) { 2123 if (!G->hasName()) { 2124 // If G is unnamed, it must be internal. Give it an artificial name 2125 // so we can put it in a comdat. 2126 assert(G->hasLocalLinkage()); 2127 G->setName(Twine(kAsanGenPrefix) + "_anon_global"); 2128 } 2129 2130 if (!InternalSuffix.empty() && G->hasLocalLinkage()) { 2131 std::string Name = std::string(G->getName()); 2132 Name += InternalSuffix; 2133 C = M.getOrInsertComdat(Name); 2134 } else { 2135 C = M.getOrInsertComdat(G->getName()); 2136 } 2137 2138 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private 2139 // linkage to internal linkage so that a symbol table entry is emitted. This 2140 // is necessary in order to create the comdat group. 2141 if (TargetTriple.isOSBinFormatCOFF()) { 2142 C->setSelectionKind(Comdat::NoDuplicates); 2143 if (G->hasPrivateLinkage()) 2144 G->setLinkage(GlobalValue::InternalLinkage); 2145 } 2146 G->setComdat(C); 2147 } 2148 2149 assert(G->hasComdat()); 2150 Metadata->setComdat(G->getComdat()); 2151 } 2152 2153 // Create a separate metadata global and put it in the appropriate ASan 2154 // global registration section. 2155 GlobalVariable * 2156 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, 2157 StringRef OriginalName) { 2158 auto Linkage = TargetTriple.isOSBinFormatMachO() 2159 ? GlobalVariable::InternalLinkage 2160 : GlobalVariable::PrivateLinkage; 2161 GlobalVariable *Metadata = new GlobalVariable( 2162 M, Initializer->getType(), false, Linkage, Initializer, 2163 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); 2164 Metadata->setSection(getGlobalMetadataSection()); 2165 return Metadata; 2166 } 2167 2168 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { 2169 AsanDtorFunction = Function::createWithDefaultAttr( 2170 FunctionType::get(Type::getVoidTy(*C), false), 2171 GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M); 2172 AsanDtorFunction->addAttribute(AttributeList::FunctionIndex, 2173 Attribute::NoUnwind); 2174 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 2175 2176 return ReturnInst::Create(*C, AsanDtorBB); 2177 } 2178 2179 void ModuleAddressSanitizer::InstrumentGlobalsCOFF( 2180 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2181 ArrayRef<Constant *> MetadataInitializers) { 2182 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2183 auto &DL = M.getDataLayout(); 2184 2185 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2186 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2187 Constant *Initializer = MetadataInitializers[i]; 2188 GlobalVariable *G = ExtendedGlobals[i]; 2189 GlobalVariable *Metadata = 2190 CreateMetadataGlobal(M, Initializer, G->getName()); 2191 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2192 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2193 MetadataGlobals[i] = Metadata; 2194 2195 // The MSVC linker always inserts padding when linking incrementally. We 2196 // cope with that by aligning each struct to its size, which must be a power 2197 // of two. 2198 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); 2199 assert(isPowerOf2_32(SizeOfGlobalStruct) && 2200 "global metadata will not be padded appropriately"); 2201 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); 2202 2203 SetComdatForGlobalMetadata(G, Metadata, ""); 2204 } 2205 2206 // Update llvm.compiler.used, adding the new metadata globals. This is 2207 // needed so that during LTO these variables stay alive. 2208 if (!MetadataGlobals.empty()) 2209 appendToCompilerUsed(M, MetadataGlobals); 2210 } 2211 2212 void ModuleAddressSanitizer::InstrumentGlobalsELF( 2213 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2214 ArrayRef<Constant *> MetadataInitializers, 2215 const std::string &UniqueModuleId) { 2216 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2217 2218 // Putting globals in a comdat changes the semantic and potentially cause 2219 // false negative odr violations at link time. If odr indicators are used, we 2220 // keep the comdat sections, as link time odr violations will be dectected on 2221 // the odr indicator symbols. 2222 bool UseComdatForGlobalsGC = UseOdrIndicator; 2223 2224 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2225 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2226 GlobalVariable *G = ExtendedGlobals[i]; 2227 GlobalVariable *Metadata = 2228 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); 2229 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2230 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2231 MetadataGlobals[i] = Metadata; 2232 2233 if (UseComdatForGlobalsGC) 2234 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); 2235 } 2236 2237 // Update llvm.compiler.used, adding the new metadata globals. This is 2238 // needed so that during LTO these variables stay alive. 2239 if (!MetadataGlobals.empty()) 2240 appendToCompilerUsed(M, MetadataGlobals); 2241 2242 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2243 // to look up the loaded image that contains it. Second, we can store in it 2244 // whether registration has already occurred, to prevent duplicate 2245 // registration. 2246 // 2247 // Common linkage ensures that there is only one global per shared library. 2248 GlobalVariable *RegisteredFlag = new GlobalVariable( 2249 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2250 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2251 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2252 2253 // Create start and stop symbols. 2254 GlobalVariable *StartELFMetadata = new GlobalVariable( 2255 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2256 "__start_" + getGlobalMetadataSection()); 2257 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2258 GlobalVariable *StopELFMetadata = new GlobalVariable( 2259 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2260 "__stop_" + getGlobalMetadataSection()); 2261 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2262 2263 // Create a call to register the globals with the runtime. 2264 IRB.CreateCall(AsanRegisterElfGlobals, 2265 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2266 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2267 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2268 2269 // We also need to unregister globals at the end, e.g., when a shared library 2270 // gets closed. 2271 if (DestructorKind != AsanDtorKind::None) { 2272 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2273 IrbDtor.CreateCall(AsanUnregisterElfGlobals, 2274 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2275 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2276 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2277 } 2278 } 2279 2280 void ModuleAddressSanitizer::InstrumentGlobalsMachO( 2281 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2282 ArrayRef<Constant *> MetadataInitializers) { 2283 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2284 2285 // On recent Mach-O platforms, use a structure which binds the liveness of 2286 // the global variable to the metadata struct. Keep the list of "Liveness" GV 2287 // created to be added to llvm.compiler.used 2288 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); 2289 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); 2290 2291 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2292 Constant *Initializer = MetadataInitializers[i]; 2293 GlobalVariable *G = ExtendedGlobals[i]; 2294 GlobalVariable *Metadata = 2295 CreateMetadataGlobal(M, Initializer, G->getName()); 2296 2297 // On recent Mach-O platforms, we emit the global metadata in a way that 2298 // allows the linker to properly strip dead globals. 2299 auto LivenessBinder = 2300 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), 2301 ConstantExpr::getPointerCast(Metadata, IntptrTy)); 2302 GlobalVariable *Liveness = new GlobalVariable( 2303 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 2304 Twine("__asan_binder_") + G->getName()); 2305 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 2306 LivenessGlobals[i] = Liveness; 2307 } 2308 2309 // Update llvm.compiler.used, adding the new liveness globals. This is 2310 // needed so that during LTO these variables stay alive. The alternative 2311 // would be to have the linker handling the LTO symbols, but libLTO 2312 // current API does not expose access to the section for each symbol. 2313 if (!LivenessGlobals.empty()) 2314 appendToCompilerUsed(M, LivenessGlobals); 2315 2316 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2317 // to look up the loaded image that contains it. Second, we can store in it 2318 // whether registration has already occurred, to prevent duplicate 2319 // registration. 2320 // 2321 // common linkage ensures that there is only one global per shared library. 2322 GlobalVariable *RegisteredFlag = new GlobalVariable( 2323 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2324 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2325 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2326 2327 IRB.CreateCall(AsanRegisterImageGlobals, 2328 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2329 2330 // We also need to unregister globals at the end, e.g., when a shared library 2331 // gets closed. 2332 if (DestructorKind != AsanDtorKind::None) { 2333 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2334 IrbDtor.CreateCall(AsanUnregisterImageGlobals, 2335 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2336 } 2337 } 2338 2339 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( 2340 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2341 ArrayRef<Constant *> MetadataInitializers) { 2342 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2343 unsigned N = ExtendedGlobals.size(); 2344 assert(N > 0); 2345 2346 // On platforms that don't have a custom metadata section, we emit an array 2347 // of global metadata structures. 2348 ArrayType *ArrayOfGlobalStructTy = 2349 ArrayType::get(MetadataInitializers[0]->getType(), N); 2350 auto AllGlobals = new GlobalVariable( 2351 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 2352 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); 2353 if (Mapping.Scale > 3) 2354 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); 2355 2356 IRB.CreateCall(AsanRegisterGlobals, 2357 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2358 ConstantInt::get(IntptrTy, N)}); 2359 2360 // We also need to unregister globals at the end, e.g., when a shared library 2361 // gets closed. 2362 if (DestructorKind != AsanDtorKind::None) { 2363 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2364 IrbDtor.CreateCall(AsanUnregisterGlobals, 2365 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2366 ConstantInt::get(IntptrTy, N)}); 2367 } 2368 } 2369 2370 // This function replaces all global variables with new variables that have 2371 // trailing redzones. It also creates a function that poisons 2372 // redzones and inserts this function into llvm.global_ctors. 2373 // Sets *CtorComdat to true if the global registration code emitted into the 2374 // asan constructor is comdat-compatible. 2375 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, 2376 bool *CtorComdat) { 2377 *CtorComdat = false; 2378 2379 // Build set of globals that are aliased by some GA, where 2380 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable. 2381 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; 2382 if (CompileKernel) { 2383 for (auto &GA : M.aliases()) { 2384 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA)) 2385 AliasedGlobalExclusions.insert(GV); 2386 } 2387 } 2388 2389 SmallVector<GlobalVariable *, 16> GlobalsToChange; 2390 for (auto &G : M.globals()) { 2391 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) 2392 GlobalsToChange.push_back(&G); 2393 } 2394 2395 size_t n = GlobalsToChange.size(); 2396 if (n == 0) { 2397 *CtorComdat = true; 2398 return false; 2399 } 2400 2401 auto &DL = M.getDataLayout(); 2402 2403 // A global is described by a structure 2404 // size_t beg; 2405 // size_t size; 2406 // size_t size_with_redzone; 2407 // const char *name; 2408 // const char *module_name; 2409 // size_t has_dynamic_init; 2410 // void *source_location; 2411 // size_t odr_indicator; 2412 // We initialize an array of such structures and pass it to a run-time call. 2413 StructType *GlobalStructTy = 2414 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 2415 IntptrTy, IntptrTy, IntptrTy); 2416 SmallVector<GlobalVariable *, 16> NewGlobals(n); 2417 SmallVector<Constant *, 16> Initializers(n); 2418 2419 bool HasDynamicallyInitializedGlobals = false; 2420 2421 // We shouldn't merge same module names, as this string serves as unique 2422 // module ID in runtime. 2423 GlobalVariable *ModuleName = createPrivateGlobalForString( 2424 M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); 2425 2426 for (size_t i = 0; i < n; i++) { 2427 GlobalVariable *G = GlobalsToChange[i]; 2428 2429 // FIXME: Metadata should be attched directly to the global directly instead 2430 // of being added to llvm.asan.globals. 2431 auto MD = GlobalsMD.get(G); 2432 StringRef NameForGlobal = G->getName(); 2433 // Create string holding the global name (use global name from metadata 2434 // if it's available, otherwise just write the name of global variable). 2435 GlobalVariable *Name = createPrivateGlobalForString( 2436 M, MD.Name.empty() ? NameForGlobal : MD.Name, 2437 /*AllowMerging*/ true, kAsanGenPrefix); 2438 2439 Type *Ty = G->getValueType(); 2440 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 2441 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); 2442 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 2443 2444 StructType *NewTy = StructType::get(Ty, RightRedZoneTy); 2445 Constant *NewInitializer = ConstantStruct::get( 2446 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); 2447 2448 // Create a new global variable with enough space for a redzone. 2449 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 2450 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 2451 Linkage = GlobalValue::InternalLinkage; 2452 GlobalVariable *NewGlobal = new GlobalVariable( 2453 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, 2454 G->getThreadLocalMode(), G->getAddressSpace()); 2455 NewGlobal->copyAttributesFrom(G); 2456 NewGlobal->setComdat(G->getComdat()); 2457 NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); 2458 // Don't fold globals with redzones. ODR violation detector and redzone 2459 // poisoning implicitly creates a dependence on the global's address, so it 2460 // is no longer valid for it to be marked unnamed_addr. 2461 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 2462 2463 // Move null-terminated C strings to "__asan_cstring" section on Darwin. 2464 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && 2465 G->isConstant()) { 2466 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); 2467 if (Seq && Seq->isCString()) 2468 NewGlobal->setSection("__TEXT,__asan_cstring,regular"); 2469 } 2470 2471 // Transfer the debug info and type metadata. The payload starts at offset 2472 // zero so we can copy the metadata over as is. 2473 NewGlobal->copyMetadata(G, 0); 2474 2475 Value *Indices2[2]; 2476 Indices2[0] = IRB.getInt32(0); 2477 Indices2[1] = IRB.getInt32(0); 2478 2479 G->replaceAllUsesWith( 2480 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 2481 NewGlobal->takeName(G); 2482 G->eraseFromParent(); 2483 NewGlobals[i] = NewGlobal; 2484 2485 Constant *SourceLoc; 2486 if (!MD.SourceLoc.empty()) { 2487 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); 2488 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); 2489 } else { 2490 SourceLoc = ConstantInt::get(IntptrTy, 0); 2491 } 2492 2493 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 2494 GlobalValue *InstrumentedGlobal = NewGlobal; 2495 2496 bool CanUsePrivateAliases = 2497 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || 2498 TargetTriple.isOSBinFormatWasm(); 2499 if (CanUsePrivateAliases && UsePrivateAlias) { 2500 // Create local alias for NewGlobal to avoid crash on ODR between 2501 // instrumented and non-instrumented libraries. 2502 InstrumentedGlobal = 2503 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); 2504 } 2505 2506 // ODR should not happen for local linkage. 2507 if (NewGlobal->hasLocalLinkage()) { 2508 ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), 2509 IRB.getInt8PtrTy()); 2510 } else if (UseOdrIndicator) { 2511 // With local aliases, we need to provide another externally visible 2512 // symbol __odr_asan_XXX to detect ODR violation. 2513 auto *ODRIndicatorSym = 2514 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 2515 Constant::getNullValue(IRB.getInt8Ty()), 2516 kODRGenPrefix + NameForGlobal, nullptr, 2517 NewGlobal->getThreadLocalMode()); 2518 2519 // Set meaningful attributes for indicator symbol. 2520 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 2521 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 2522 ODRIndicatorSym->setAlignment(Align(1)); 2523 ODRIndicator = ODRIndicatorSym; 2524 } 2525 2526 Constant *Initializer = ConstantStruct::get( 2527 GlobalStructTy, 2528 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 2529 ConstantInt::get(IntptrTy, SizeInBytes), 2530 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 2531 ConstantExpr::getPointerCast(Name, IntptrTy), 2532 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 2533 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, 2534 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); 2535 2536 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; 2537 2538 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 2539 2540 Initializers[i] = Initializer; 2541 } 2542 2543 // Add instrumented globals to llvm.compiler.used list to avoid LTO from 2544 // ConstantMerge'ing them. 2545 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; 2546 for (size_t i = 0; i < n; i++) { 2547 GlobalVariable *G = NewGlobals[i]; 2548 if (G->getName().empty()) continue; 2549 GlobalsToAddToUsedList.push_back(G); 2550 } 2551 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); 2552 2553 std::string ELFUniqueModuleId = 2554 (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) 2555 : ""; 2556 2557 if (!ELFUniqueModuleId.empty()) { 2558 InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); 2559 *CtorComdat = true; 2560 } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { 2561 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); 2562 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { 2563 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); 2564 } else { 2565 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); 2566 } 2567 2568 // Create calls for poisoning before initializers run and unpoisoning after. 2569 if (HasDynamicallyInitializedGlobals) 2570 createInitializerPoisonCalls(M, ModuleName); 2571 2572 LLVM_DEBUG(dbgs() << M); 2573 return true; 2574 } 2575 2576 uint64_t 2577 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { 2578 constexpr uint64_t kMaxRZ = 1 << 18; 2579 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); 2580 2581 uint64_t RZ = 0; 2582 if (SizeInBytes <= MinRZ / 2) { 2583 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is 2584 // at least 32 bytes, optimize when SizeInBytes is less than or equal to 2585 // half of MinRZ. 2586 RZ = MinRZ - SizeInBytes; 2587 } else { 2588 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. 2589 RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); 2590 2591 // Round up to multiple of MinRZ. 2592 if (SizeInBytes % MinRZ) 2593 RZ += MinRZ - (SizeInBytes % MinRZ); 2594 } 2595 2596 assert((RZ + SizeInBytes) % MinRZ == 0); 2597 2598 return RZ; 2599 } 2600 2601 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { 2602 int LongSize = M.getDataLayout().getPointerSizeInBits(); 2603 bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); 2604 int Version = 8; 2605 // 32-bit Android is one version ahead because of the switch to dynamic 2606 // shadow. 2607 Version += (LongSize == 32 && isAndroid); 2608 return Version; 2609 } 2610 2611 bool ModuleAddressSanitizer::instrumentModule(Module &M) { 2612 initializeCallbacks(M); 2613 2614 // Create a module constructor. A destructor is created lazily because not all 2615 // platforms, and not all modules need it. 2616 if (CompileKernel) { 2617 // The kernel always builds with its own runtime, and therefore does not 2618 // need the init and version check calls. 2619 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); 2620 } else { 2621 std::string AsanVersion = std::to_string(GetAsanVersion(M)); 2622 std::string VersionCheckName = 2623 ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; 2624 std::tie(AsanCtorFunction, std::ignore) = 2625 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, 2626 kAsanInitName, /*InitArgTypes=*/{}, 2627 /*InitArgs=*/{}, VersionCheckName); 2628 } 2629 2630 bool CtorComdat = true; 2631 if (ClGlobals) { 2632 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); 2633 InstrumentGlobals(IRB, M, &CtorComdat); 2634 } 2635 2636 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); 2637 2638 // Put the constructor and destructor in comdat if both 2639 // (1) global instrumentation is not TU-specific 2640 // (2) target is ELF. 2641 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { 2642 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); 2643 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); 2644 if (AsanDtorFunction) { 2645 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); 2646 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); 2647 } 2648 } else { 2649 appendToGlobalCtors(M, AsanCtorFunction, Priority); 2650 if (AsanDtorFunction) 2651 appendToGlobalDtors(M, AsanDtorFunction, Priority); 2652 } 2653 2654 return true; 2655 } 2656 2657 void AddressSanitizer::initializeCallbacks(Module &M) { 2658 IRBuilder<> IRB(*C); 2659 // Create __asan_report* callbacks. 2660 // IsWrite, TypeSize and Exp are encoded in the function name. 2661 for (int Exp = 0; Exp < 2; Exp++) { 2662 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 2663 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 2664 const std::string ExpStr = Exp ? "exp_" : ""; 2665 const std::string EndingStr = Recover ? "_noabort" : ""; 2666 2667 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 2668 SmallVector<Type *, 2> Args1{1, IntptrTy}; 2669 if (Exp) { 2670 Type *ExpType = Type::getInt32Ty(*C); 2671 Args2.push_back(ExpType); 2672 Args1.push_back(ExpType); 2673 } 2674 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2675 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, 2676 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2677 2678 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2679 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 2680 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2681 2682 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 2683 AccessSizeIndex++) { 2684 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 2685 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2686 M.getOrInsertFunction( 2687 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 2688 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2689 2690 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2691 M.getOrInsertFunction( 2692 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 2693 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2694 } 2695 } 2696 } 2697 2698 const std::string MemIntrinCallbackPrefix = 2699 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 2700 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 2701 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2702 IRB.getInt8PtrTy(), IntptrTy); 2703 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 2704 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2705 IRB.getInt8PtrTy(), IntptrTy); 2706 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 2707 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2708 IRB.getInt32Ty(), IntptrTy); 2709 2710 AsanHandleNoReturnFunc = 2711 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); 2712 2713 AsanPtrCmpFunction = 2714 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); 2715 AsanPtrSubFunction = 2716 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); 2717 if (Mapping.InGlobal) 2718 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", 2719 ArrayType::get(IRB.getInt8Ty(), 0)); 2720 2721 AMDGPUAddressShared = M.getOrInsertFunction( 2722 kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2723 AMDGPUAddressPrivate = M.getOrInsertFunction( 2724 kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2725 } 2726 2727 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 2728 // For each NSObject descendant having a +load method, this method is invoked 2729 // by the ObjC runtime before any of the static constructors is called. 2730 // Therefore we need to instrument such methods with a call to __asan_init 2731 // at the beginning in order to initialize our runtime before any access to 2732 // the shadow memory. 2733 // We cannot just ignore these methods, because they may call other 2734 // instrumented functions. 2735 if (F.getName().find(" load]") != std::string::npos) { 2736 FunctionCallee AsanInitFunction = 2737 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); 2738 IRBuilder<> IRB(&F.front(), F.front().begin()); 2739 IRB.CreateCall(AsanInitFunction, {}); 2740 return true; 2741 } 2742 return false; 2743 } 2744 2745 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 2746 // Generate code only when dynamic addressing is needed. 2747 if (Mapping.Offset != kDynamicShadowSentinel) 2748 return false; 2749 2750 IRBuilder<> IRB(&F.front().front()); 2751 if (Mapping.InGlobal) { 2752 if (ClWithIfuncSuppressRemat) { 2753 // An empty inline asm with input reg == output reg. 2754 // An opaque pointer-to-int cast, basically. 2755 InlineAsm *Asm = InlineAsm::get( 2756 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), 2757 StringRef(""), StringRef("=r,0"), 2758 /*hasSideEffects=*/false); 2759 LocalDynamicShadow = 2760 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); 2761 } else { 2762 LocalDynamicShadow = 2763 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); 2764 } 2765 } else { 2766 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 2767 kAsanShadowMemoryDynamicAddress, IntptrTy); 2768 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 2769 } 2770 return true; 2771 } 2772 2773 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 2774 // Find the one possible call to llvm.localescape and pre-mark allocas passed 2775 // to it as uninteresting. This assumes we haven't started processing allocas 2776 // yet. This check is done up front because iterating the use list in 2777 // isInterestingAlloca would be algorithmically slower. 2778 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 2779 2780 // Try to get the declaration of llvm.localescape. If it's not in the module, 2781 // we can exit early. 2782 if (!F.getParent()->getFunction("llvm.localescape")) return; 2783 2784 // Look for a call to llvm.localescape call in the entry block. It can't be in 2785 // any other block. 2786 for (Instruction &I : F.getEntryBlock()) { 2787 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 2788 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 2789 // We found a call. Mark all the allocas passed in as uninteresting. 2790 for (Value *Arg : II->arg_operands()) { 2791 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 2792 assert(AI && AI->isStaticAlloca() && 2793 "non-static alloca arg to localescape"); 2794 ProcessedAllocas[AI] = false; 2795 } 2796 break; 2797 } 2798 } 2799 } 2800 2801 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { 2802 bool ShouldInstrument = 2803 ClDebugMin < 0 || ClDebugMax < 0 || 2804 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); 2805 Instrumented++; 2806 return !ShouldInstrument; 2807 } 2808 2809 bool AddressSanitizer::instrumentFunction(Function &F, 2810 const TargetLibraryInfo *TLI) { 2811 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 2812 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 2813 if (F.getName().startswith("__asan_")) return false; 2814 2815 bool FunctionModified = false; 2816 2817 // If needed, insert __asan_init before checking for SanitizeAddress attr. 2818 // This function needs to be called even if the function body is not 2819 // instrumented. 2820 if (maybeInsertAsanInitAtFunctionEntry(F)) 2821 FunctionModified = true; 2822 2823 // Leave if the function doesn't need instrumentation. 2824 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 2825 2826 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 2827 2828 initializeCallbacks(*F.getParent()); 2829 2830 FunctionStateRAII CleanupObj(this); 2831 2832 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); 2833 2834 // We can't instrument allocas used with llvm.localescape. Only static allocas 2835 // can be passed to that intrinsic. 2836 markEscapedLocalAllocas(F); 2837 2838 // We want to instrument every address only once per basic block (unless there 2839 // are calls between uses). 2840 SmallPtrSet<Value *, 16> TempsToInstrument; 2841 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 2842 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 2843 SmallVector<Instruction *, 8> NoReturnCalls; 2844 SmallVector<BasicBlock *, 16> AllBlocks; 2845 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 2846 int NumAllocas = 0; 2847 2848 // Fill the set of memory operations to instrument. 2849 for (auto &BB : F) { 2850 AllBlocks.push_back(&BB); 2851 TempsToInstrument.clear(); 2852 int NumInsnsPerBB = 0; 2853 for (auto &Inst : BB) { 2854 if (LooksLikeCodeInBug11395(&Inst)) return false; 2855 SmallVector<InterestingMemoryOperand, 1> InterestingOperands; 2856 getInterestingMemoryOperands(&Inst, InterestingOperands); 2857 2858 if (!InterestingOperands.empty()) { 2859 for (auto &Operand : InterestingOperands) { 2860 if (ClOpt && ClOptSameTemp) { 2861 Value *Ptr = Operand.getPtr(); 2862 // If we have a mask, skip instrumentation if we've already 2863 // instrumented the full object. But don't add to TempsToInstrument 2864 // because we might get another load/store with a different mask. 2865 if (Operand.MaybeMask) { 2866 if (TempsToInstrument.count(Ptr)) 2867 continue; // We've seen this (whole) temp in the current BB. 2868 } else { 2869 if (!TempsToInstrument.insert(Ptr).second) 2870 continue; // We've seen this temp in the current BB. 2871 } 2872 } 2873 OperandsToInstrument.push_back(Operand); 2874 NumInsnsPerBB++; 2875 } 2876 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && 2877 isInterestingPointerComparison(&Inst)) || 2878 ((ClInvalidPointerPairs || ClInvalidPointerSub) && 2879 isInterestingPointerSubtraction(&Inst))) { 2880 PointerComparisonsOrSubtracts.push_back(&Inst); 2881 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { 2882 // ok, take it. 2883 IntrinToInstrument.push_back(MI); 2884 NumInsnsPerBB++; 2885 } else { 2886 if (isa<AllocaInst>(Inst)) NumAllocas++; 2887 if (auto *CB = dyn_cast<CallBase>(&Inst)) { 2888 // A call inside BB. 2889 TempsToInstrument.clear(); 2890 if (CB->doesNotReturn() && !CB->hasMetadata("nosanitize")) 2891 NoReturnCalls.push_back(CB); 2892 } 2893 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 2894 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 2895 } 2896 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 2897 } 2898 } 2899 2900 bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && 2901 OperandsToInstrument.size() + IntrinToInstrument.size() > 2902 (unsigned)ClInstrumentationWithCallsThreshold); 2903 const DataLayout &DL = F.getParent()->getDataLayout(); 2904 ObjectSizeOpts ObjSizeOpts; 2905 ObjSizeOpts.RoundToAlign = true; 2906 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); 2907 2908 // Instrument. 2909 int NumInstrumented = 0; 2910 for (auto &Operand : OperandsToInstrument) { 2911 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2912 instrumentMop(ObjSizeVis, Operand, UseCalls, 2913 F.getParent()->getDataLayout()); 2914 FunctionModified = true; 2915 } 2916 for (auto Inst : IntrinToInstrument) { 2917 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2918 instrumentMemIntrinsic(Inst); 2919 FunctionModified = true; 2920 } 2921 2922 FunctionStackPoisoner FSP(F, *this); 2923 bool ChangedStack = FSP.runOnFunction(); 2924 2925 // We must unpoison the stack before NoReturn calls (throw, _exit, etc). 2926 // See e.g. https://github.com/google/sanitizers/issues/37 2927 for (auto CI : NoReturnCalls) { 2928 IRBuilder<> IRB(CI); 2929 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 2930 } 2931 2932 for (auto Inst : PointerComparisonsOrSubtracts) { 2933 instrumentPointerComparisonOrSubtraction(Inst); 2934 FunctionModified = true; 2935 } 2936 2937 if (ChangedStack || !NoReturnCalls.empty()) 2938 FunctionModified = true; 2939 2940 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2941 << F << "\n"); 2942 2943 return FunctionModified; 2944 } 2945 2946 // Workaround for bug 11395: we don't want to instrument stack in functions 2947 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2948 // FIXME: remove once the bug 11395 is fixed. 2949 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2950 if (LongSize != 32) return false; 2951 CallInst *CI = dyn_cast<CallInst>(I); 2952 if (!CI || !CI->isInlineAsm()) return false; 2953 if (CI->getNumArgOperands() <= 5) return false; 2954 // We have inline assembly with quite a few arguments. 2955 return true; 2956 } 2957 2958 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2959 IRBuilder<> IRB(*C); 2960 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always || 2961 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 2962 const char *MallocNameTemplate = 2963 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always 2964 ? kAsanStackMallocAlwaysNameTemplate 2965 : kAsanStackMallocNameTemplate; 2966 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) { 2967 std::string Suffix = itostr(Index); 2968 AsanStackMallocFunc[Index] = M.getOrInsertFunction( 2969 MallocNameTemplate + Suffix, IntptrTy, IntptrTy); 2970 AsanStackFreeFunc[Index] = 2971 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2972 IRB.getVoidTy(), IntptrTy, IntptrTy); 2973 } 2974 } 2975 if (ASan.UseAfterScope) { 2976 AsanPoisonStackMemoryFunc = M.getOrInsertFunction( 2977 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2978 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( 2979 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2980 } 2981 2982 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2983 std::ostringstream Name; 2984 Name << kAsanSetShadowPrefix; 2985 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2986 AsanSetShadowFunc[Val] = 2987 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); 2988 } 2989 2990 AsanAllocaPoisonFunc = M.getOrInsertFunction( 2991 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2992 AsanAllocasUnpoisonFunc = M.getOrInsertFunction( 2993 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2994 } 2995 2996 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2997 ArrayRef<uint8_t> ShadowBytes, 2998 size_t Begin, size_t End, 2999 IRBuilder<> &IRB, 3000 Value *ShadowBase) { 3001 if (Begin >= End) 3002 return; 3003 3004 const size_t LargestStoreSizeInBytes = 3005 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 3006 3007 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 3008 3009 // Poison given range in shadow using larges store size with out leading and 3010 // trailing zeros in ShadowMask. Zeros never change, so they need neither 3011 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 3012 // middle of a store. 3013 for (size_t i = Begin; i < End;) { 3014 if (!ShadowMask[i]) { 3015 assert(!ShadowBytes[i]); 3016 ++i; 3017 continue; 3018 } 3019 3020 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 3021 // Fit store size into the range. 3022 while (StoreSizeInBytes > End - i) 3023 StoreSizeInBytes /= 2; 3024 3025 // Minimize store size by trimming trailing zeros. 3026 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 3027 while (j <= StoreSizeInBytes / 2) 3028 StoreSizeInBytes /= 2; 3029 } 3030 3031 uint64_t Val = 0; 3032 for (size_t j = 0; j < StoreSizeInBytes; j++) { 3033 if (IsLittleEndian) 3034 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 3035 else 3036 Val = (Val << 8) | ShadowBytes[i + j]; 3037 } 3038 3039 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 3040 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 3041 IRB.CreateAlignedStore( 3042 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 3043 Align(1)); 3044 3045 i += StoreSizeInBytes; 3046 } 3047 } 3048 3049 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 3050 ArrayRef<uint8_t> ShadowBytes, 3051 IRBuilder<> &IRB, Value *ShadowBase) { 3052 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 3053 } 3054 3055 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 3056 ArrayRef<uint8_t> ShadowBytes, 3057 size_t Begin, size_t End, 3058 IRBuilder<> &IRB, Value *ShadowBase) { 3059 assert(ShadowMask.size() == ShadowBytes.size()); 3060 size_t Done = Begin; 3061 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 3062 if (!ShadowMask[i]) { 3063 assert(!ShadowBytes[i]); 3064 continue; 3065 } 3066 uint8_t Val = ShadowBytes[i]; 3067 if (!AsanSetShadowFunc[Val]) 3068 continue; 3069 3070 // Skip same values. 3071 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 3072 } 3073 3074 if (j - i >= ClMaxInlinePoisoningSize) { 3075 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 3076 IRB.CreateCall(AsanSetShadowFunc[Val], 3077 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 3078 ConstantInt::get(IntptrTy, j - i)}); 3079 Done = j; 3080 } 3081 } 3082 3083 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 3084 } 3085 3086 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 3087 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 3088 static int StackMallocSizeClass(uint64_t LocalStackSize) { 3089 assert(LocalStackSize <= kMaxStackMallocSize); 3090 uint64_t MaxSize = kMinStackMallocSize; 3091 for (int i = 0;; i++, MaxSize *= 2) 3092 if (LocalStackSize <= MaxSize) return i; 3093 llvm_unreachable("impossible LocalStackSize"); 3094 } 3095 3096 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { 3097 Instruction *CopyInsertPoint = &F.front().front(); 3098 if (CopyInsertPoint == ASan.LocalDynamicShadow) { 3099 // Insert after the dynamic shadow location is determined 3100 CopyInsertPoint = CopyInsertPoint->getNextNode(); 3101 assert(CopyInsertPoint); 3102 } 3103 IRBuilder<> IRB(CopyInsertPoint); 3104 const DataLayout &DL = F.getParent()->getDataLayout(); 3105 for (Argument &Arg : F.args()) { 3106 if (Arg.hasByValAttr()) { 3107 Type *Ty = Arg.getParamByValType(); 3108 const Align Alignment = 3109 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); 3110 3111 AllocaInst *AI = IRB.CreateAlloca( 3112 Ty, nullptr, 3113 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + 3114 ".byval"); 3115 AI->setAlignment(Alignment); 3116 Arg.replaceAllUsesWith(AI); 3117 3118 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 3119 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); 3120 } 3121 } 3122 } 3123 3124 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 3125 Value *ValueIfTrue, 3126 Instruction *ThenTerm, 3127 Value *ValueIfFalse) { 3128 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 3129 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 3130 PHI->addIncoming(ValueIfFalse, CondBlock); 3131 BasicBlock *ThenBlock = ThenTerm->getParent(); 3132 PHI->addIncoming(ValueIfTrue, ThenBlock); 3133 return PHI; 3134 } 3135 3136 Value *FunctionStackPoisoner::createAllocaForLayout( 3137 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 3138 AllocaInst *Alloca; 3139 if (Dynamic) { 3140 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 3141 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 3142 "MyAlloca"); 3143 } else { 3144 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 3145 nullptr, "MyAlloca"); 3146 assert(Alloca->isStaticAlloca()); 3147 } 3148 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 3149 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 3150 Alloca->setAlignment(Align(FrameAlignment)); 3151 return IRB.CreatePointerCast(Alloca, IntptrTy); 3152 } 3153 3154 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 3155 BasicBlock &FirstBB = *F.begin(); 3156 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 3157 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 3158 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 3159 DynamicAllocaLayout->setAlignment(Align(32)); 3160 } 3161 3162 void FunctionStackPoisoner::processDynamicAllocas() { 3163 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 3164 assert(DynamicAllocaPoisonCallVec.empty()); 3165 return; 3166 } 3167 3168 // Insert poison calls for lifetime intrinsics for dynamic allocas. 3169 for (const auto &APC : DynamicAllocaPoisonCallVec) { 3170 assert(APC.InsBefore); 3171 assert(APC.AI); 3172 assert(ASan.isInterestingAlloca(*APC.AI)); 3173 assert(!APC.AI->isStaticAlloca()); 3174 3175 IRBuilder<> IRB(APC.InsBefore); 3176 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 3177 // Dynamic allocas will be unpoisoned unconditionally below in 3178 // unpoisonDynamicAllocas. 3179 // Flag that we need unpoison static allocas. 3180 } 3181 3182 // Handle dynamic allocas. 3183 createDynamicAllocasInitStorage(); 3184 for (auto &AI : DynamicAllocaVec) 3185 handleDynamicAllocaCall(AI); 3186 unpoisonDynamicAllocas(); 3187 } 3188 3189 /// Collect instructions in the entry block after \p InsBefore which initialize 3190 /// permanent storage for a function argument. These instructions must remain in 3191 /// the entry block so that uninitialized values do not appear in backtraces. An 3192 /// added benefit is that this conserves spill slots. This does not move stores 3193 /// before instrumented / "interesting" allocas. 3194 static void findStoresToUninstrumentedArgAllocas( 3195 AddressSanitizer &ASan, Instruction &InsBefore, 3196 SmallVectorImpl<Instruction *> &InitInsts) { 3197 Instruction *Start = InsBefore.getNextNonDebugInstruction(); 3198 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { 3199 // Argument initialization looks like: 3200 // 1) store <Argument>, <Alloca> OR 3201 // 2) <CastArgument> = cast <Argument> to ... 3202 // store <CastArgument> to <Alloca> 3203 // Do not consider any other kind of instruction. 3204 // 3205 // Note: This covers all known cases, but may not be exhaustive. An 3206 // alternative to pattern-matching stores is to DFS over all Argument uses: 3207 // this might be more general, but is probably much more complicated. 3208 if (isa<AllocaInst>(It) || isa<CastInst>(It)) 3209 continue; 3210 if (auto *Store = dyn_cast<StoreInst>(It)) { 3211 // The store destination must be an alloca that isn't interesting for 3212 // ASan to instrument. These are moved up before InsBefore, and they're 3213 // not interesting because allocas for arguments can be mem2reg'd. 3214 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); 3215 if (!Alloca || ASan.isInterestingAlloca(*Alloca)) 3216 continue; 3217 3218 Value *Val = Store->getValueOperand(); 3219 bool IsDirectArgInit = isa<Argument>(Val); 3220 bool IsArgInitViaCast = 3221 isa<CastInst>(Val) && 3222 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && 3223 // Check that the cast appears directly before the store. Otherwise 3224 // moving the cast before InsBefore may break the IR. 3225 Val == It->getPrevNonDebugInstruction(); 3226 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; 3227 if (!IsArgInit) 3228 continue; 3229 3230 if (IsArgInitViaCast) 3231 InitInsts.push_back(cast<Instruction>(Val)); 3232 InitInsts.push_back(Store); 3233 continue; 3234 } 3235 3236 // Do not reorder past unknown instructions: argument initialization should 3237 // only involve casts and stores. 3238 return; 3239 } 3240 } 3241 3242 void FunctionStackPoisoner::processStaticAllocas() { 3243 if (AllocaVec.empty()) { 3244 assert(StaticAllocaPoisonCallVec.empty()); 3245 return; 3246 } 3247 3248 int StackMallocIdx = -1; 3249 DebugLoc EntryDebugLocation; 3250 if (auto SP = F.getSubprogram()) 3251 EntryDebugLocation = 3252 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); 3253 3254 Instruction *InsBefore = AllocaVec[0]; 3255 IRBuilder<> IRB(InsBefore); 3256 3257 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 3258 // debug info is broken, because only entry-block allocas are treated as 3259 // regular stack slots. 3260 auto InsBeforeB = InsBefore->getParent(); 3261 assert(InsBeforeB == &F.getEntryBlock()); 3262 for (auto *AI : StaticAllocasToMoveUp) 3263 if (AI->getParent() == InsBeforeB) 3264 AI->moveBefore(InsBefore); 3265 3266 // Move stores of arguments into entry-block allocas as well. This prevents 3267 // extra stack slots from being generated (to house the argument values until 3268 // they can be stored into the allocas). This also prevents uninitialized 3269 // values from being shown in backtraces. 3270 SmallVector<Instruction *, 8> ArgInitInsts; 3271 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); 3272 for (Instruction *ArgInitInst : ArgInitInsts) 3273 ArgInitInst->moveBefore(InsBefore); 3274 3275 // If we have a call to llvm.localescape, keep it in the entry block. 3276 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 3277 3278 SmallVector<ASanStackVariableDescription, 16> SVD; 3279 SVD.reserve(AllocaVec.size()); 3280 for (AllocaInst *AI : AllocaVec) { 3281 ASanStackVariableDescription D = {AI->getName().data(), 3282 ASan.getAllocaSizeInBytes(*AI), 3283 0, 3284 AI->getAlignment(), 3285 AI, 3286 0, 3287 0}; 3288 SVD.push_back(D); 3289 } 3290 3291 // Minimal header size (left redzone) is 4 pointers, 3292 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 3293 size_t Granularity = 1ULL << Mapping.Scale; 3294 size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity); 3295 const ASanStackFrameLayout &L = 3296 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); 3297 3298 // Build AllocaToSVDMap for ASanStackVariableDescription lookup. 3299 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; 3300 for (auto &Desc : SVD) 3301 AllocaToSVDMap[Desc.AI] = &Desc; 3302 3303 // Update SVD with information from lifetime intrinsics. 3304 for (const auto &APC : StaticAllocaPoisonCallVec) { 3305 assert(APC.InsBefore); 3306 assert(APC.AI); 3307 assert(ASan.isInterestingAlloca(*APC.AI)); 3308 assert(APC.AI->isStaticAlloca()); 3309 3310 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3311 Desc.LifetimeSize = Desc.Size; 3312 if (const DILocation *FnLoc = EntryDebugLocation.get()) { 3313 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { 3314 if (LifetimeLoc->getFile() == FnLoc->getFile()) 3315 if (unsigned Line = LifetimeLoc->getLine()) 3316 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); 3317 } 3318 } 3319 } 3320 3321 auto DescriptionString = ComputeASanStackFrameDescription(SVD); 3322 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); 3323 uint64_t LocalStackSize = L.FrameSize; 3324 bool DoStackMalloc = 3325 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never && 3326 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; 3327 bool DoDynamicAlloca = ClDynamicAllocaStack; 3328 // Don't do dynamic alloca or stack malloc if: 3329 // 1) There is inline asm: too often it makes assumptions on which registers 3330 // are available. 3331 // 2) There is a returns_twice call (typically setjmp), which is 3332 // optimization-hostile, and doesn't play well with introduced indirect 3333 // register-relative calculation of local variable addresses. 3334 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; 3335 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; 3336 3337 Value *StaticAlloca = 3338 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 3339 3340 Value *FakeStack; 3341 Value *LocalStackBase; 3342 Value *LocalStackBaseAlloca; 3343 uint8_t DIExprFlags = DIExpression::ApplyOffset; 3344 3345 if (DoStackMalloc) { 3346 LocalStackBaseAlloca = 3347 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); 3348 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 3349 // void *FakeStack = __asan_option_detect_stack_use_after_return 3350 // ? __asan_stack_malloc_N(LocalStackSize) 3351 // : nullptr; 3352 // void *LocalStackBase = (FakeStack) ? FakeStack : 3353 // alloca(LocalStackSize); 3354 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 3355 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 3356 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( 3357 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), 3358 Constant::getNullValue(IRB.getInt32Ty())); 3359 Instruction *Term = 3360 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 3361 IRBuilder<> IRBIf(Term); 3362 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3363 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 3364 Value *FakeStackValue = 3365 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3366 ConstantInt::get(IntptrTy, LocalStackSize)); 3367 IRB.SetInsertPoint(InsBefore); 3368 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 3369 ConstantInt::get(IntptrTy, 0)); 3370 } else { 3371 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always) 3372 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize); 3373 // void *LocalStackBase = (FakeStack) ? FakeStack : 3374 // alloca(LocalStackSize); 3375 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3376 FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3377 ConstantInt::get(IntptrTy, LocalStackSize)); 3378 } 3379 Value *NoFakeStack = 3380 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 3381 Instruction *Term = 3382 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 3383 IRBuilder<> IRBIf(Term); 3384 Value *AllocaValue = 3385 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 3386 3387 IRB.SetInsertPoint(InsBefore); 3388 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 3389 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); 3390 DIExprFlags |= DIExpression::DerefBefore; 3391 } else { 3392 // void *FakeStack = nullptr; 3393 // void *LocalStackBase = alloca(LocalStackSize); 3394 FakeStack = ConstantInt::get(IntptrTy, 0); 3395 LocalStackBase = 3396 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 3397 LocalStackBaseAlloca = LocalStackBase; 3398 } 3399 3400 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the 3401 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse 3402 // later passes and can result in dropped variable coverage in debug info. 3403 Value *LocalStackBaseAllocaPtr = 3404 isa<PtrToIntInst>(LocalStackBaseAlloca) 3405 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() 3406 : LocalStackBaseAlloca; 3407 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) && 3408 "Variable descriptions relative to ASan stack base will be dropped"); 3409 3410 // Replace Alloca instructions with base+offset. 3411 for (const auto &Desc : SVD) { 3412 AllocaInst *AI = Desc.AI; 3413 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, 3414 Desc.Offset); 3415 Value *NewAllocaPtr = IRB.CreateIntToPtr( 3416 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 3417 AI->getType()); 3418 AI->replaceAllUsesWith(NewAllocaPtr); 3419 } 3420 3421 // The left-most redzone has enough space for at least 4 pointers. 3422 // Write the Magic value to redzone[0]. 3423 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 3424 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 3425 BasePlus0); 3426 // Write the frame description constant to redzone[1]. 3427 Value *BasePlus1 = IRB.CreateIntToPtr( 3428 IRB.CreateAdd(LocalStackBase, 3429 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 3430 IntptrPtrTy); 3431 GlobalVariable *StackDescriptionGlobal = 3432 createPrivateGlobalForString(*F.getParent(), DescriptionString, 3433 /*AllowMerging*/ true, kAsanGenPrefix); 3434 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 3435 IRB.CreateStore(Description, BasePlus1); 3436 // Write the PC to redzone[2]. 3437 Value *BasePlus2 = IRB.CreateIntToPtr( 3438 IRB.CreateAdd(LocalStackBase, 3439 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 3440 IntptrPtrTy); 3441 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 3442 3443 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 3444 3445 // Poison the stack red zones at the entry. 3446 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 3447 // As mask we must use most poisoned case: red zones and after scope. 3448 // As bytes we can use either the same or just red zones only. 3449 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 3450 3451 if (!StaticAllocaPoisonCallVec.empty()) { 3452 const auto &ShadowInScope = GetShadowBytes(SVD, L); 3453 3454 // Poison static allocas near lifetime intrinsics. 3455 for (const auto &APC : StaticAllocaPoisonCallVec) { 3456 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3457 assert(Desc.Offset % L.Granularity == 0); 3458 size_t Begin = Desc.Offset / L.Granularity; 3459 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 3460 3461 IRBuilder<> IRB(APC.InsBefore); 3462 copyToShadow(ShadowAfterScope, 3463 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 3464 IRB, ShadowBase); 3465 } 3466 } 3467 3468 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 3469 SmallVector<uint8_t, 64> ShadowAfterReturn; 3470 3471 // (Un)poison the stack before all ret instructions. 3472 for (Instruction *Ret : RetVec) { 3473 IRBuilder<> IRBRet(Ret); 3474 // Mark the current frame as retired. 3475 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 3476 BasePlus0); 3477 if (DoStackMalloc) { 3478 assert(StackMallocIdx >= 0); 3479 // if FakeStack != 0 // LocalStackBase == FakeStack 3480 // // In use-after-return mode, poison the whole stack frame. 3481 // if StackMallocIdx <= 4 3482 // // For small sizes inline the whole thing: 3483 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 3484 // **SavedFlagPtr(FakeStack) = 0 3485 // else 3486 // __asan_stack_free_N(FakeStack, LocalStackSize) 3487 // else 3488 // <This is not a fake stack; unpoison the redzones> 3489 Value *Cmp = 3490 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 3491 Instruction *ThenTerm, *ElseTerm; 3492 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 3493 3494 IRBuilder<> IRBPoison(ThenTerm); 3495 if (StackMallocIdx <= 4) { 3496 int ClassSize = kMinStackMallocSize << StackMallocIdx; 3497 ShadowAfterReturn.resize(ClassSize / L.Granularity, 3498 kAsanStackUseAfterReturnMagic); 3499 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 3500 ShadowBase); 3501 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 3502 FakeStack, 3503 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 3504 Value *SavedFlagPtr = IRBPoison.CreateLoad( 3505 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 3506 IRBPoison.CreateStore( 3507 Constant::getNullValue(IRBPoison.getInt8Ty()), 3508 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 3509 } else { 3510 // For larger frames call __asan_stack_free_*. 3511 IRBPoison.CreateCall( 3512 AsanStackFreeFunc[StackMallocIdx], 3513 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 3514 } 3515 3516 IRBuilder<> IRBElse(ElseTerm); 3517 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); 3518 } else { 3519 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); 3520 } 3521 } 3522 3523 // We are done. Remove the old unused alloca instructions. 3524 for (auto AI : AllocaVec) AI->eraseFromParent(); 3525 } 3526 3527 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 3528 IRBuilder<> &IRB, bool DoPoison) { 3529 // For now just insert the call to ASan runtime. 3530 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 3531 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 3532 IRB.CreateCall( 3533 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 3534 {AddrArg, SizeArg}); 3535 } 3536 3537 // Handling llvm.lifetime intrinsics for a given %alloca: 3538 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 3539 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 3540 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 3541 // could be poisoned by previous llvm.lifetime.end instruction, as the 3542 // variable may go in and out of scope several times, e.g. in loops). 3543 // (3) if we poisoned at least one %alloca in a function, 3544 // unpoison the whole stack frame at function exit. 3545 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 3546 IRBuilder<> IRB(AI); 3547 3548 const unsigned Alignment = std::max(kAllocaRzSize, AI->getAlignment()); 3549 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 3550 3551 Value *Zero = Constant::getNullValue(IntptrTy); 3552 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 3553 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 3554 3555 // Since we need to extend alloca with additional memory to locate 3556 // redzones, and OldSize is number of allocated blocks with 3557 // ElementSize size, get allocated memory size in bytes by 3558 // OldSize * ElementSize. 3559 const unsigned ElementSize = 3560 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 3561 Value *OldSize = 3562 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 3563 ConstantInt::get(IntptrTy, ElementSize)); 3564 3565 // PartialSize = OldSize % 32 3566 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 3567 3568 // Misalign = kAllocaRzSize - PartialSize; 3569 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 3570 3571 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 3572 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 3573 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 3574 3575 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize 3576 // Alignment is added to locate left redzone, PartialPadding for possible 3577 // partial redzone and kAllocaRzSize for right redzone respectively. 3578 Value *AdditionalChunkSize = IRB.CreateAdd( 3579 ConstantInt::get(IntptrTy, Alignment + kAllocaRzSize), PartialPadding); 3580 3581 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 3582 3583 // Insert new alloca with new NewSize and Alignment params. 3584 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 3585 NewAlloca->setAlignment(Align(Alignment)); 3586 3587 // NewAddress = Address + Alignment 3588 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 3589 ConstantInt::get(IntptrTy, Alignment)); 3590 3591 // Insert __asan_alloca_poison call for new created alloca. 3592 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 3593 3594 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 3595 // for unpoisoning stuff. 3596 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 3597 3598 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 3599 3600 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 3601 AI->replaceAllUsesWith(NewAddressPtr); 3602 3603 // We are done. Erase old alloca from parent. 3604 AI->eraseFromParent(); 3605 } 3606 3607 // isSafeAccess returns true if Addr is always inbounds with respect to its 3608 // base object. For example, it is a field access or an array access with 3609 // constant inbounds index. 3610 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 3611 Value *Addr, uint64_t TypeSize) const { 3612 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 3613 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 3614 uint64_t Size = SizeOffset.first.getZExtValue(); 3615 int64_t Offset = SizeOffset.second.getSExtValue(); 3616 // Three checks are required to ensure safety: 3617 // . Offset >= 0 (since the offset is given from the base ptr) 3618 // . Size >= Offset (unsigned) 3619 // . Size - Offset >= NeededSize (unsigned) 3620 return Offset >= 0 && Size >= uint64_t(Offset) && 3621 Size - uint64_t(Offset) >= TypeSize / 8; 3622 } 3623