1 //===- AddressSanitizer.cpp - memory error detector -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // Details of the algorithm: 11 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm 12 // 13 // FIXME: This sanitizer does not yet handle scalable vectors 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/BinaryFormat/MachO.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/Comdat.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DIBuilder.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DebugInfoMetadata.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GlobalAlias.h" 46 #include "llvm/IR/GlobalValue.h" 47 #include "llvm/IR/GlobalVariable.h" 48 #include "llvm/IR/IRBuilder.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/InstVisitor.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/MDBuilder.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/Module.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/Use.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/MC/MCSectionMachO.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/MathExtras.h" 71 #include "llvm/Support/ScopedPrinter.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Instrumentation.h" 74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 75 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 76 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 77 #include "llvm/Transforms/Utils/Local.h" 78 #include "llvm/Transforms/Utils/ModuleUtils.h" 79 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstddef> 83 #include <cstdint> 84 #include <iomanip> 85 #include <limits> 86 #include <memory> 87 #include <sstream> 88 #include <string> 89 #include <tuple> 90 91 using namespace llvm; 92 93 #define DEBUG_TYPE "asan" 94 95 static const uint64_t kDefaultShadowScale = 3; 96 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 97 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 98 static const uint64_t kDynamicShadowSentinel = 99 std::numeric_limits<uint64_t>::max(); 100 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. 101 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; 102 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 103 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; 104 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 105 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 106 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 107 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 108 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 109 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 110 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; 111 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; 112 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; 113 static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40; 114 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 115 static const uint64_t kEmscriptenShadowOffset = 0; 116 117 static const uint64_t kMyriadShadowScale = 5; 118 static const uint64_t kMyriadMemoryOffset32 = 0x80000000ULL; 119 static const uint64_t kMyriadMemorySize32 = 0x20000000ULL; 120 static const uint64_t kMyriadTagShift = 29; 121 static const uint64_t kMyriadDDRTag = 4; 122 static const uint64_t kMyriadCacheBitMask32 = 0x40000000ULL; 123 124 // The shadow memory space is dynamically allocated. 125 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 126 127 static const size_t kMinStackMallocSize = 1 << 6; // 64B 128 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 129 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 130 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 131 132 static const char *const kAsanModuleCtorName = "asan.module_ctor"; 133 static const char *const kAsanModuleDtorName = "asan.module_dtor"; 134 static const uint64_t kAsanCtorAndDtorPriority = 1; 135 // On Emscripten, the system needs more than one priorities for constructors. 136 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; 137 static const char *const kAsanReportErrorTemplate = "__asan_report_"; 138 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; 139 static const char *const kAsanUnregisterGlobalsName = 140 "__asan_unregister_globals"; 141 static const char *const kAsanRegisterImageGlobalsName = 142 "__asan_register_image_globals"; 143 static const char *const kAsanUnregisterImageGlobalsName = 144 "__asan_unregister_image_globals"; 145 static const char *const kAsanRegisterElfGlobalsName = 146 "__asan_register_elf_globals"; 147 static const char *const kAsanUnregisterElfGlobalsName = 148 "__asan_unregister_elf_globals"; 149 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; 150 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; 151 static const char *const kAsanInitName = "__asan_init"; 152 static const char *const kAsanVersionCheckNamePrefix = 153 "__asan_version_mismatch_check_v"; 154 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; 155 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; 156 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; 157 static const int kMaxAsanStackMallocSizeClass = 10; 158 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; 159 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; 160 static const char *const kAsanGenPrefix = "___asan_gen_"; 161 static const char *const kODRGenPrefix = "__odr_asan_gen_"; 162 static const char *const kSanCovGenPrefix = "__sancov_gen_"; 163 static const char *const kAsanSetShadowPrefix = "__asan_set_shadow_"; 164 static const char *const kAsanPoisonStackMemoryName = 165 "__asan_poison_stack_memory"; 166 static const char *const kAsanUnpoisonStackMemoryName = 167 "__asan_unpoison_stack_memory"; 168 169 // ASan version script has __asan_* wildcard. Triple underscore prevents a 170 // linker (gold) warning about attempting to export a local symbol. 171 static const char *const kAsanGlobalsRegisteredFlagName = 172 "___asan_globals_registered"; 173 174 static const char *const kAsanOptionDetectUseAfterReturn = 175 "__asan_option_detect_stack_use_after_return"; 176 177 static const char *const kAsanShadowMemoryDynamicAddress = 178 "__asan_shadow_memory_dynamic_address"; 179 180 static const char *const kAsanAllocaPoison = "__asan_alloca_poison"; 181 static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison"; 182 183 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 184 static const size_t kNumberOfAccessSizes = 5; 185 186 static const unsigned kAllocaRzSize = 32; 187 188 // Command-line flags. 189 190 static cl::opt<bool> ClEnableKasan( 191 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 192 cl::Hidden, cl::init(false)); 193 194 static cl::opt<bool> ClRecover( 195 "asan-recover", 196 cl::desc("Enable recovery mode (continue-after-error)."), 197 cl::Hidden, cl::init(false)); 198 199 static cl::opt<bool> ClInsertVersionCheck( 200 "asan-guard-against-version-mismatch", 201 cl::desc("Guard against compiler/runtime version mismatch."), 202 cl::Hidden, cl::init(true)); 203 204 // This flag may need to be replaced with -f[no-]asan-reads. 205 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 206 cl::desc("instrument read instructions"), 207 cl::Hidden, cl::init(true)); 208 209 static cl::opt<bool> ClInstrumentWrites( 210 "asan-instrument-writes", cl::desc("instrument write instructions"), 211 cl::Hidden, cl::init(true)); 212 213 static cl::opt<bool> ClInstrumentAtomics( 214 "asan-instrument-atomics", 215 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 216 cl::init(true)); 217 218 static cl::opt<bool> 219 ClInstrumentByval("asan-instrument-byval", 220 cl::desc("instrument byval call arguments"), cl::Hidden, 221 cl::init(true)); 222 223 static cl::opt<bool> ClAlwaysSlowPath( 224 "asan-always-slow-path", 225 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 226 cl::init(false)); 227 228 static cl::opt<bool> ClForceDynamicShadow( 229 "asan-force-dynamic-shadow", 230 cl::desc("Load shadow address into a local variable for each function"), 231 cl::Hidden, cl::init(false)); 232 233 static cl::opt<bool> 234 ClWithIfunc("asan-with-ifunc", 235 cl::desc("Access dynamic shadow through an ifunc global on " 236 "platforms that support this"), 237 cl::Hidden, cl::init(true)); 238 239 static cl::opt<bool> ClWithIfuncSuppressRemat( 240 "asan-with-ifunc-suppress-remat", 241 cl::desc("Suppress rematerialization of dynamic shadow address by passing " 242 "it through inline asm in prologue."), 243 cl::Hidden, cl::init(true)); 244 245 // This flag limits the number of instructions to be instrumented 246 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 247 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 248 // set it to 10000. 249 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 250 "asan-max-ins-per-bb", cl::init(10000), 251 cl::desc("maximal number of instructions to instrument in any given BB"), 252 cl::Hidden); 253 254 // This flag may need to be replaced with -f[no]asan-stack. 255 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 256 cl::Hidden, cl::init(true)); 257 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 258 "asan-max-inline-poisoning-size", 259 cl::desc( 260 "Inline shadow poisoning for blocks up to the given size in bytes."), 261 cl::Hidden, cl::init(64)); 262 263 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", 264 cl::desc("Check stack-use-after-return"), 265 cl::Hidden, cl::init(true)); 266 267 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", 268 cl::desc("Create redzones for byval " 269 "arguments (extra copy " 270 "required)"), cl::Hidden, 271 cl::init(true)); 272 273 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 274 cl::desc("Check stack-use-after-scope"), 275 cl::Hidden, cl::init(false)); 276 277 // This flag may need to be replaced with -f[no]asan-globals. 278 static cl::opt<bool> ClGlobals("asan-globals", 279 cl::desc("Handle global objects"), cl::Hidden, 280 cl::init(true)); 281 282 static cl::opt<bool> ClInitializers("asan-initialization-order", 283 cl::desc("Handle C++ initializer order"), 284 cl::Hidden, cl::init(true)); 285 286 static cl::opt<bool> ClInvalidPointerPairs( 287 "asan-detect-invalid-pointer-pair", 288 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 289 cl::init(false)); 290 291 static cl::opt<bool> ClInvalidPointerCmp( 292 "asan-detect-invalid-pointer-cmp", 293 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, 294 cl::init(false)); 295 296 static cl::opt<bool> ClInvalidPointerSub( 297 "asan-detect-invalid-pointer-sub", 298 cl::desc("Instrument - operations with pointer operands"), cl::Hidden, 299 cl::init(false)); 300 301 static cl::opt<unsigned> ClRealignStack( 302 "asan-realign-stack", 303 cl::desc("Realign stack to the value of this flag (power of two)"), 304 cl::Hidden, cl::init(32)); 305 306 static cl::opt<int> ClInstrumentationWithCallsThreshold( 307 "asan-instrumentation-with-call-threshold", 308 cl::desc( 309 "If the function being instrumented contains more than " 310 "this number of memory accesses, use callbacks instead of " 311 "inline checks (-1 means never use callbacks)."), 312 cl::Hidden, cl::init(7000)); 313 314 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 315 "asan-memory-access-callback-prefix", 316 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 317 cl::init("__asan_")); 318 319 static cl::opt<bool> 320 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 321 cl::desc("instrument dynamic allocas"), 322 cl::Hidden, cl::init(true)); 323 324 static cl::opt<bool> ClSkipPromotableAllocas( 325 "asan-skip-promotable-allocas", 326 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 327 cl::init(true)); 328 329 // These flags allow to change the shadow mapping. 330 // The shadow mapping looks like 331 // Shadow = (Mem >> scale) + offset 332 333 static cl::opt<int> ClMappingScale("asan-mapping-scale", 334 cl::desc("scale of asan shadow mapping"), 335 cl::Hidden, cl::init(0)); 336 337 static cl::opt<uint64_t> 338 ClMappingOffset("asan-mapping-offset", 339 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), 340 cl::Hidden, cl::init(0)); 341 342 // Optimization flags. Not user visible, used mostly for testing 343 // and benchmarking the tool. 344 345 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 346 cl::Hidden, cl::init(true)); 347 348 static cl::opt<bool> ClOptSameTemp( 349 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 350 cl::Hidden, cl::init(true)); 351 352 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 353 cl::desc("Don't instrument scalar globals"), 354 cl::Hidden, cl::init(true)); 355 356 static cl::opt<bool> ClOptStack( 357 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 358 cl::Hidden, cl::init(false)); 359 360 static cl::opt<bool> ClDynamicAllocaStack( 361 "asan-stack-dynamic-alloca", 362 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 363 cl::init(true)); 364 365 static cl::opt<uint32_t> ClForceExperiment( 366 "asan-force-experiment", 367 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 368 cl::init(0)); 369 370 static cl::opt<bool> 371 ClUsePrivateAlias("asan-use-private-alias", 372 cl::desc("Use private aliases for global variables"), 373 cl::Hidden, cl::init(false)); 374 375 static cl::opt<bool> 376 ClUseOdrIndicator("asan-use-odr-indicator", 377 cl::desc("Use odr indicators to improve ODR reporting"), 378 cl::Hidden, cl::init(false)); 379 380 static cl::opt<bool> 381 ClUseGlobalsGC("asan-globals-live-support", 382 cl::desc("Use linker features to support dead " 383 "code stripping of globals"), 384 cl::Hidden, cl::init(true)); 385 386 // This is on by default even though there is a bug in gold: 387 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 388 static cl::opt<bool> 389 ClWithComdat("asan-with-comdat", 390 cl::desc("Place ASan constructors in comdat sections"), 391 cl::Hidden, cl::init(true)); 392 393 // Debug flags. 394 395 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 396 cl::init(0)); 397 398 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 399 cl::Hidden, cl::init(0)); 400 401 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 402 cl::desc("Debug func")); 403 404 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 405 cl::Hidden, cl::init(-1)); 406 407 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 408 cl::Hidden, cl::init(-1)); 409 410 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 411 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 412 STATISTIC(NumOptimizedAccessesToGlobalVar, 413 "Number of optimized accesses to global vars"); 414 STATISTIC(NumOptimizedAccessesToStackVar, 415 "Number of optimized accesses to stack vars"); 416 417 namespace { 418 419 /// This struct defines the shadow mapping using the rule: 420 /// shadow = (mem >> Scale) ADD-or-OR Offset. 421 /// If InGlobal is true, then 422 /// extern char __asan_shadow[]; 423 /// shadow = (mem >> Scale) + &__asan_shadow 424 struct ShadowMapping { 425 int Scale; 426 uint64_t Offset; 427 bool OrShadowOffset; 428 bool InGlobal; 429 }; 430 431 } // end anonymous namespace 432 433 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, 434 bool IsKasan) { 435 bool IsAndroid = TargetTriple.isAndroid(); 436 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS(); 437 bool IsMacOS = TargetTriple.isMacOSX(); 438 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 439 bool IsNetBSD = TargetTriple.isOSNetBSD(); 440 bool IsPS4CPU = TargetTriple.isPS4CPU(); 441 bool IsLinux = TargetTriple.isOSLinux(); 442 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || 443 TargetTriple.getArch() == Triple::ppc64le; 444 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; 445 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 446 bool IsMIPS32 = TargetTriple.isMIPS32(); 447 bool IsMIPS64 = TargetTriple.isMIPS64(); 448 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); 449 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; 450 bool IsWindows = TargetTriple.isOSWindows(); 451 bool IsFuchsia = TargetTriple.isOSFuchsia(); 452 bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad; 453 bool IsEmscripten = TargetTriple.isOSEmscripten(); 454 455 ShadowMapping Mapping; 456 457 Mapping.Scale = IsMyriad ? kMyriadShadowScale : kDefaultShadowScale; 458 if (ClMappingScale.getNumOccurrences() > 0) { 459 Mapping.Scale = ClMappingScale; 460 } 461 462 if (LongSize == 32) { 463 if (IsAndroid) 464 Mapping.Offset = kDynamicShadowSentinel; 465 else if (IsMIPS32) 466 Mapping.Offset = kMIPS32_ShadowOffset32; 467 else if (IsFreeBSD) 468 Mapping.Offset = kFreeBSD_ShadowOffset32; 469 else if (IsNetBSD) 470 Mapping.Offset = kNetBSD_ShadowOffset32; 471 else if (IsIOS) 472 Mapping.Offset = kDynamicShadowSentinel; 473 else if (IsWindows) 474 Mapping.Offset = kWindowsShadowOffset32; 475 else if (IsEmscripten) 476 Mapping.Offset = kEmscriptenShadowOffset; 477 else if (IsMyriad) { 478 uint64_t ShadowOffset = (kMyriadMemoryOffset32 + kMyriadMemorySize32 - 479 (kMyriadMemorySize32 >> Mapping.Scale)); 480 Mapping.Offset = ShadowOffset - (kMyriadMemoryOffset32 >> Mapping.Scale); 481 } 482 else 483 Mapping.Offset = kDefaultShadowOffset32; 484 } else { // LongSize == 64 485 // Fuchsia is always PIE, which means that the beginning of the address 486 // space is always available. 487 if (IsFuchsia) 488 Mapping.Offset = 0; 489 else if (IsPPC64) 490 Mapping.Offset = kPPC64_ShadowOffset64; 491 else if (IsSystemZ) 492 Mapping.Offset = kSystemZ_ShadowOffset64; 493 else if (IsFreeBSD && !IsMIPS64) 494 Mapping.Offset = kFreeBSD_ShadowOffset64; 495 else if (IsNetBSD) { 496 if (IsKasan) 497 Mapping.Offset = kNetBSDKasan_ShadowOffset64; 498 else 499 Mapping.Offset = kNetBSD_ShadowOffset64; 500 } else if (IsPS4CPU) 501 Mapping.Offset = kPS4CPU_ShadowOffset64; 502 else if (IsLinux && IsX86_64) { 503 if (IsKasan) 504 Mapping.Offset = kLinuxKasan_ShadowOffset64; 505 else 506 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 507 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 508 } else if (IsWindows && IsX86_64) { 509 Mapping.Offset = kWindowsShadowOffset64; 510 } else if (IsMIPS64) 511 Mapping.Offset = kMIPS64_ShadowOffset64; 512 else if (IsIOS) 513 Mapping.Offset = kDynamicShadowSentinel; 514 else if (IsMacOS && IsAArch64) 515 Mapping.Offset = kDynamicShadowSentinel; 516 else if (IsAArch64) 517 Mapping.Offset = kAArch64_ShadowOffset64; 518 else 519 Mapping.Offset = kDefaultShadowOffset64; 520 } 521 522 if (ClForceDynamicShadow) { 523 Mapping.Offset = kDynamicShadowSentinel; 524 } 525 526 if (ClMappingOffset.getNumOccurrences() > 0) { 527 Mapping.Offset = ClMappingOffset; 528 } 529 530 // OR-ing shadow offset if more efficient (at least on x86) if the offset 531 // is a power of two, but on ppc64 we have to use add since the shadow 532 // offset is not necessary 1/8-th of the address space. On SystemZ, 533 // we could OR the constant in a single instruction, but it's more 534 // efficient to load it once and use indexed addressing. 535 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU && 536 !(Mapping.Offset & (Mapping.Offset - 1)) && 537 Mapping.Offset != kDynamicShadowSentinel; 538 bool IsAndroidWithIfuncSupport = 539 IsAndroid && !TargetTriple.isAndroidVersionLT(21); 540 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; 541 542 return Mapping; 543 } 544 545 static uint64_t getRedzoneSizeForScale(int MappingScale) { 546 // Redzone used for stack and globals is at least 32 bytes. 547 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 548 return std::max(32U, 1U << MappingScale); 549 } 550 551 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { 552 if (TargetTriple.isOSEmscripten()) { 553 return kAsanEmscriptenCtorAndDtorPriority; 554 } else { 555 return kAsanCtorAndDtorPriority; 556 } 557 } 558 559 // For a ret instruction followed by a musttail call, we cannot insert anything 560 // in between. Instead we use the musttail call instruction as the insertion 561 // point. 562 static Instruction *adjustForMusttailCall(Instruction *I) { 563 ReturnInst *RI = dyn_cast<ReturnInst>(I); 564 if (!RI) 565 return I; 566 Instruction *Prev = RI->getPrevNode(); 567 if (BitCastInst *BCI = dyn_cast_or_null<BitCastInst>(Prev)) 568 Prev = BCI->getPrevNode(); 569 if (CallInst *CI = dyn_cast_or_null<CallInst>(Prev)) 570 if (CI->isMustTailCall()) 571 return CI; 572 return RI; 573 } 574 575 namespace { 576 577 /// Module analysis for getting various metadata about the module. 578 class ASanGlobalsMetadataWrapperPass : public ModulePass { 579 public: 580 static char ID; 581 582 ASanGlobalsMetadataWrapperPass() : ModulePass(ID) { 583 initializeASanGlobalsMetadataWrapperPassPass( 584 *PassRegistry::getPassRegistry()); 585 } 586 587 bool runOnModule(Module &M) override { 588 GlobalsMD = GlobalsMetadata(M); 589 return false; 590 } 591 592 StringRef getPassName() const override { 593 return "ASanGlobalsMetadataWrapperPass"; 594 } 595 596 void getAnalysisUsage(AnalysisUsage &AU) const override { 597 AU.setPreservesAll(); 598 } 599 600 GlobalsMetadata &getGlobalsMD() { return GlobalsMD; } 601 602 private: 603 GlobalsMetadata GlobalsMD; 604 }; 605 606 char ASanGlobalsMetadataWrapperPass::ID = 0; 607 608 /// AddressSanitizer: instrument the code in module to find memory bugs. 609 struct AddressSanitizer { 610 AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 611 bool CompileKernel = false, bool Recover = false, 612 bool UseAfterScope = false) 613 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 614 : CompileKernel), 615 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 616 UseAfterScope(UseAfterScope || ClUseAfterScope), GlobalsMD(*GlobalsMD) { 617 C = &(M.getContext()); 618 LongSize = M.getDataLayout().getPointerSizeInBits(); 619 IntptrTy = Type::getIntNTy(*C, LongSize); 620 TargetTriple = Triple(M.getTargetTriple()); 621 622 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 623 } 624 625 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 626 uint64_t ArraySize = 1; 627 if (AI.isArrayAllocation()) { 628 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 629 assert(CI && "non-constant array size"); 630 ArraySize = CI->getZExtValue(); 631 } 632 Type *Ty = AI.getAllocatedType(); 633 uint64_t SizeInBytes = 634 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 635 return SizeInBytes * ArraySize; 636 } 637 638 /// Check if we want (and can) handle this alloca. 639 bool isInterestingAlloca(const AllocaInst &AI); 640 641 bool ignoreAccess(Value *Ptr); 642 void getInterestingMemoryOperands( 643 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 644 645 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 646 InterestingMemoryOperand &O, bool UseCalls, 647 const DataLayout &DL); 648 void instrumentPointerComparisonOrSubtraction(Instruction *I); 649 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 650 Value *Addr, uint32_t TypeSize, bool IsWrite, 651 Value *SizeArgument, bool UseCalls, uint32_t Exp); 652 void instrumentUnusualSizeOrAlignment(Instruction *I, 653 Instruction *InsertBefore, Value *Addr, 654 uint32_t TypeSize, bool IsWrite, 655 Value *SizeArgument, bool UseCalls, 656 uint32_t Exp); 657 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 658 Value *ShadowValue, uint32_t TypeSize); 659 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 660 bool IsWrite, size_t AccessSizeIndex, 661 Value *SizeArgument, uint32_t Exp); 662 void instrumentMemIntrinsic(MemIntrinsic *MI); 663 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 664 bool suppressInstrumentationSiteForDebug(int &Instrumented); 665 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); 666 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 667 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); 668 void markEscapedLocalAllocas(Function &F); 669 670 private: 671 friend struct FunctionStackPoisoner; 672 673 void initializeCallbacks(Module &M); 674 675 bool LooksLikeCodeInBug11395(Instruction *I); 676 bool GlobalIsLinkerInitialized(GlobalVariable *G); 677 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 678 uint64_t TypeSize) const; 679 680 /// Helper to cleanup per-function state. 681 struct FunctionStateRAII { 682 AddressSanitizer *Pass; 683 684 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 685 assert(Pass->ProcessedAllocas.empty() && 686 "last pass forgot to clear cache"); 687 assert(!Pass->LocalDynamicShadow); 688 } 689 690 ~FunctionStateRAII() { 691 Pass->LocalDynamicShadow = nullptr; 692 Pass->ProcessedAllocas.clear(); 693 } 694 }; 695 696 LLVMContext *C; 697 Triple TargetTriple; 698 int LongSize; 699 bool CompileKernel; 700 bool Recover; 701 bool UseAfterScope; 702 Type *IntptrTy; 703 ShadowMapping Mapping; 704 FunctionCallee AsanHandleNoReturnFunc; 705 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; 706 Constant *AsanShadowGlobal; 707 708 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). 709 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; 710 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 711 712 // These arrays is indexed by AccessIsWrite and Experiment. 713 FunctionCallee AsanErrorCallbackSized[2][2]; 714 FunctionCallee AsanMemoryAccessCallbackSized[2][2]; 715 716 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; 717 Value *LocalDynamicShadow = nullptr; 718 const GlobalsMetadata &GlobalsMD; 719 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 720 }; 721 722 class AddressSanitizerLegacyPass : public FunctionPass { 723 public: 724 static char ID; 725 726 explicit AddressSanitizerLegacyPass(bool CompileKernel = false, 727 bool Recover = false, 728 bool UseAfterScope = false) 729 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover), 730 UseAfterScope(UseAfterScope) { 731 initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry()); 732 } 733 734 StringRef getPassName() const override { 735 return "AddressSanitizerFunctionPass"; 736 } 737 738 void getAnalysisUsage(AnalysisUsage &AU) const override { 739 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 740 AU.addRequired<TargetLibraryInfoWrapperPass>(); 741 } 742 743 bool runOnFunction(Function &F) override { 744 GlobalsMetadata &GlobalsMD = 745 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 746 const TargetLibraryInfo *TLI = 747 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 748 AddressSanitizer ASan(*F.getParent(), &GlobalsMD, CompileKernel, Recover, 749 UseAfterScope); 750 return ASan.instrumentFunction(F, TLI); 751 } 752 753 private: 754 bool CompileKernel; 755 bool Recover; 756 bool UseAfterScope; 757 }; 758 759 class ModuleAddressSanitizer { 760 public: 761 ModuleAddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, 762 bool CompileKernel = false, bool Recover = false, 763 bool UseGlobalsGC = true, bool UseOdrIndicator = false) 764 : GlobalsMD(*GlobalsMD), 765 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 766 : CompileKernel), 767 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 768 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), 769 // Enable aliases as they should have no downside with ODR indicators. 770 UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), 771 UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), 772 // Not a typo: ClWithComdat is almost completely pointless without 773 // ClUseGlobalsGC (because then it only works on modules without 774 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; 775 // and both suffer from gold PR19002 for which UseGlobalsGC constructor 776 // argument is designed as workaround. Therefore, disable both 777 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to 778 // do globals-gc. 779 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel) { 780 C = &(M.getContext()); 781 int LongSize = M.getDataLayout().getPointerSizeInBits(); 782 IntptrTy = Type::getIntNTy(*C, LongSize); 783 TargetTriple = Triple(M.getTargetTriple()); 784 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 785 } 786 787 bool instrumentModule(Module &); 788 789 private: 790 void initializeCallbacks(Module &M); 791 792 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); 793 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, 794 ArrayRef<GlobalVariable *> ExtendedGlobals, 795 ArrayRef<Constant *> MetadataInitializers); 796 void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, 797 ArrayRef<GlobalVariable *> ExtendedGlobals, 798 ArrayRef<Constant *> MetadataInitializers, 799 const std::string &UniqueModuleId); 800 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, 801 ArrayRef<GlobalVariable *> ExtendedGlobals, 802 ArrayRef<Constant *> MetadataInitializers); 803 void 804 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, 805 ArrayRef<GlobalVariable *> ExtendedGlobals, 806 ArrayRef<Constant *> MetadataInitializers); 807 808 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, 809 StringRef OriginalName); 810 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, 811 StringRef InternalSuffix); 812 Instruction *CreateAsanModuleDtor(Module &M); 813 814 bool canInstrumentAliasedGlobal(const GlobalAlias &GA) const; 815 bool shouldInstrumentGlobal(GlobalVariable *G) const; 816 bool ShouldUseMachOGlobalsSection() const; 817 StringRef getGlobalMetadataSection() const; 818 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 819 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 820 uint64_t getMinRedzoneSizeForGlobal() const { 821 return getRedzoneSizeForScale(Mapping.Scale); 822 } 823 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; 824 int GetAsanVersion(const Module &M) const; 825 826 const GlobalsMetadata &GlobalsMD; 827 bool CompileKernel; 828 bool Recover; 829 bool UseGlobalsGC; 830 bool UsePrivateAlias; 831 bool UseOdrIndicator; 832 bool UseCtorComdat; 833 Type *IntptrTy; 834 LLVMContext *C; 835 Triple TargetTriple; 836 ShadowMapping Mapping; 837 FunctionCallee AsanPoisonGlobals; 838 FunctionCallee AsanUnpoisonGlobals; 839 FunctionCallee AsanRegisterGlobals; 840 FunctionCallee AsanUnregisterGlobals; 841 FunctionCallee AsanRegisterImageGlobals; 842 FunctionCallee AsanUnregisterImageGlobals; 843 FunctionCallee AsanRegisterElfGlobals; 844 FunctionCallee AsanUnregisterElfGlobals; 845 846 Function *AsanCtorFunction = nullptr; 847 Function *AsanDtorFunction = nullptr; 848 }; 849 850 class ModuleAddressSanitizerLegacyPass : public ModulePass { 851 public: 852 static char ID; 853 854 explicit ModuleAddressSanitizerLegacyPass(bool CompileKernel = false, 855 bool Recover = false, 856 bool UseGlobalGC = true, 857 bool UseOdrIndicator = false) 858 : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover), 859 UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator) { 860 initializeModuleAddressSanitizerLegacyPassPass( 861 *PassRegistry::getPassRegistry()); 862 } 863 864 StringRef getPassName() const override { return "ModuleAddressSanitizer"; } 865 866 void getAnalysisUsage(AnalysisUsage &AU) const override { 867 AU.addRequired<ASanGlobalsMetadataWrapperPass>(); 868 } 869 870 bool runOnModule(Module &M) override { 871 GlobalsMetadata &GlobalsMD = 872 getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); 873 ModuleAddressSanitizer ASanModule(M, &GlobalsMD, CompileKernel, Recover, 874 UseGlobalGC, UseOdrIndicator); 875 return ASanModule.instrumentModule(M); 876 } 877 878 private: 879 bool CompileKernel; 880 bool Recover; 881 bool UseGlobalGC; 882 bool UseOdrIndicator; 883 }; 884 885 // Stack poisoning does not play well with exception handling. 886 // When an exception is thrown, we essentially bypass the code 887 // that unpoisones the stack. This is why the run-time library has 888 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 889 // stack in the interceptor. This however does not work inside the 890 // actual function which catches the exception. Most likely because the 891 // compiler hoists the load of the shadow value somewhere too high. 892 // This causes asan to report a non-existing bug on 453.povray. 893 // It sounds like an LLVM bug. 894 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 895 Function &F; 896 AddressSanitizer &ASan; 897 DIBuilder DIB; 898 LLVMContext *C; 899 Type *IntptrTy; 900 Type *IntptrPtrTy; 901 ShadowMapping Mapping; 902 903 SmallVector<AllocaInst *, 16> AllocaVec; 904 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; 905 SmallVector<Instruction *, 8> RetVec; 906 unsigned StackAlignment; 907 908 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 909 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 910 FunctionCallee AsanSetShadowFunc[0x100] = {}; 911 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; 912 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; 913 914 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 915 struct AllocaPoisonCall { 916 IntrinsicInst *InsBefore; 917 AllocaInst *AI; 918 uint64_t Size; 919 bool DoPoison; 920 }; 921 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 922 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 923 bool HasUntracedLifetimeIntrinsic = false; 924 925 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 926 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 927 AllocaInst *DynamicAllocaLayout = nullptr; 928 IntrinsicInst *LocalEscapeCall = nullptr; 929 930 bool HasInlineAsm = false; 931 bool HasReturnsTwiceCall = false; 932 933 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 934 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 935 C(ASan.C), IntptrTy(ASan.IntptrTy), 936 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 937 StackAlignment(1 << Mapping.Scale) {} 938 939 bool runOnFunction() { 940 if (!ClStack) return false; 941 942 if (ClRedzoneByvalArgs) 943 copyArgsPassedByValToAllocas(); 944 945 // Collect alloca, ret, lifetime instructions etc. 946 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 947 948 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 949 950 initializeCallbacks(*F.getParent()); 951 952 if (HasUntracedLifetimeIntrinsic) { 953 // If there are lifetime intrinsics which couldn't be traced back to an 954 // alloca, we may not know exactly when a variable enters scope, and 955 // therefore should "fail safe" by not poisoning them. 956 StaticAllocaPoisonCallVec.clear(); 957 DynamicAllocaPoisonCallVec.clear(); 958 } 959 960 processDynamicAllocas(); 961 processStaticAllocas(); 962 963 if (ClDebugStack) { 964 LLVM_DEBUG(dbgs() << F); 965 } 966 return true; 967 } 968 969 // Arguments marked with the "byval" attribute are implicitly copied without 970 // using an alloca instruction. To produce redzones for those arguments, we 971 // copy them a second time into memory allocated with an alloca instruction. 972 void copyArgsPassedByValToAllocas(); 973 974 // Finds all Alloca instructions and puts 975 // poisoned red zones around all of them. 976 // Then unpoison everything back before the function returns. 977 void processStaticAllocas(); 978 void processDynamicAllocas(); 979 980 void createDynamicAllocasInitStorage(); 981 982 // ----------------------- Visitors. 983 /// Collect all Ret instructions. 984 void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } 985 986 /// Collect all Resume instructions. 987 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 988 989 /// Collect all CatchReturnInst instructions. 990 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 991 992 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 993 Value *SavedStack) { 994 IRBuilder<> IRB(InstBefore); 995 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 996 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 997 // need to adjust extracted SP to compute the address of the most recent 998 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 999 // this purpose. 1000 if (!isa<ReturnInst>(InstBefore)) { 1001 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 1002 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 1003 {IntptrTy}); 1004 1005 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 1006 1007 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 1008 DynamicAreaOffset); 1009 } 1010 1011 IRB.CreateCall( 1012 AsanAllocasUnpoisonFunc, 1013 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); 1014 } 1015 1016 // Unpoison dynamic allocas redzones. 1017 void unpoisonDynamicAllocas() { 1018 for (Instruction *Ret : RetVec) 1019 unpoisonDynamicAllocasBeforeInst(adjustForMusttailCall(Ret), 1020 DynamicAllocaLayout); 1021 1022 for (Instruction *StackRestoreInst : StackRestoreVec) 1023 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 1024 StackRestoreInst->getOperand(0)); 1025 } 1026 1027 // Deploy and poison redzones around dynamic alloca call. To do this, we 1028 // should replace this call with another one with changed parameters and 1029 // replace all its uses with new address, so 1030 // addr = alloca type, old_size, align 1031 // is replaced by 1032 // new_size = (old_size + additional_size) * sizeof(type) 1033 // tmp = alloca i8, new_size, max(align, 32) 1034 // addr = tmp + 32 (first 32 bytes are for the left redzone). 1035 // Additional_size is added to make new memory allocation contain not only 1036 // requested memory, but also left, partial and right redzones. 1037 void handleDynamicAllocaCall(AllocaInst *AI); 1038 1039 /// Collect Alloca instructions we want (and can) handle. 1040 void visitAllocaInst(AllocaInst &AI) { 1041 if (!ASan.isInterestingAlloca(AI)) { 1042 if (AI.isStaticAlloca()) { 1043 // Skip over allocas that are present *before* the first instrumented 1044 // alloca, we don't want to move those around. 1045 if (AllocaVec.empty()) 1046 return; 1047 1048 StaticAllocasToMoveUp.push_back(&AI); 1049 } 1050 return; 1051 } 1052 1053 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 1054 if (!AI.isStaticAlloca()) 1055 DynamicAllocaVec.push_back(&AI); 1056 else 1057 AllocaVec.push_back(&AI); 1058 } 1059 1060 /// Collect lifetime intrinsic calls to check for use-after-scope 1061 /// errors. 1062 void visitIntrinsicInst(IntrinsicInst &II) { 1063 Intrinsic::ID ID = II.getIntrinsicID(); 1064 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 1065 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 1066 if (!ASan.UseAfterScope) 1067 return; 1068 if (!II.isLifetimeStartOrEnd()) 1069 return; 1070 // Found lifetime intrinsic, add ASan instrumentation if necessary. 1071 auto *Size = cast<ConstantInt>(II.getArgOperand(0)); 1072 // If size argument is undefined, don't do anything. 1073 if (Size->isMinusOne()) return; 1074 // Check that size doesn't saturate uint64_t and can 1075 // be stored in IntptrTy. 1076 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 1077 if (SizeValue == ~0ULL || 1078 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 1079 return; 1080 // Find alloca instruction that corresponds to llvm.lifetime argument. 1081 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); 1082 if (!AI) { 1083 HasUntracedLifetimeIntrinsic = true; 1084 return; 1085 } 1086 // We're interested only in allocas we can handle. 1087 if (!ASan.isInterestingAlloca(*AI)) 1088 return; 1089 bool DoPoison = (ID == Intrinsic::lifetime_end); 1090 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 1091 if (AI->isStaticAlloca()) 1092 StaticAllocaPoisonCallVec.push_back(APC); 1093 else if (ClInstrumentDynamicAllocas) 1094 DynamicAllocaPoisonCallVec.push_back(APC); 1095 } 1096 1097 void visitCallBase(CallBase &CB) { 1098 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1099 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; 1100 HasReturnsTwiceCall |= CI->canReturnTwice(); 1101 } 1102 } 1103 1104 // ---------------------- Helpers. 1105 void initializeCallbacks(Module &M); 1106 1107 // Copies bytes from ShadowBytes into shadow memory for indexes where 1108 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 1109 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 1110 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1111 IRBuilder<> &IRB, Value *ShadowBase); 1112 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1113 size_t Begin, size_t End, IRBuilder<> &IRB, 1114 Value *ShadowBase); 1115 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 1116 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 1117 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 1118 1119 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 1120 1121 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 1122 bool Dynamic); 1123 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 1124 Instruction *ThenTerm, Value *ValueIfFalse); 1125 }; 1126 1127 } // end anonymous namespace 1128 1129 void LocationMetadata::parse(MDNode *MDN) { 1130 assert(MDN->getNumOperands() == 3); 1131 MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); 1132 Filename = DIFilename->getString(); 1133 LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); 1134 ColumnNo = 1135 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); 1136 } 1137 1138 // FIXME: It would be cleaner to instead attach relevant metadata to the globals 1139 // we want to sanitize instead and reading this metadata on each pass over a 1140 // function instead of reading module level metadata at first. 1141 GlobalsMetadata::GlobalsMetadata(Module &M) { 1142 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); 1143 if (!Globals) 1144 return; 1145 for (auto MDN : Globals->operands()) { 1146 // Metadata node contains the global and the fields of "Entry". 1147 assert(MDN->getNumOperands() == 5); 1148 auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0)); 1149 // The optimizer may optimize away a global entirely. 1150 if (!V) 1151 continue; 1152 auto *StrippedV = V->stripPointerCasts(); 1153 auto *GV = dyn_cast<GlobalVariable>(StrippedV); 1154 if (!GV) 1155 continue; 1156 // We can already have an entry for GV if it was merged with another 1157 // global. 1158 Entry &E = Entries[GV]; 1159 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) 1160 E.SourceLoc.parse(Loc); 1161 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) 1162 E.Name = Name->getString(); 1163 ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3)); 1164 E.IsDynInit |= IsDynInit->isOne(); 1165 ConstantInt *IsExcluded = 1166 mdconst::extract<ConstantInt>(MDN->getOperand(4)); 1167 E.IsExcluded |= IsExcluded->isOne(); 1168 } 1169 } 1170 1171 AnalysisKey ASanGlobalsMetadataAnalysis::Key; 1172 1173 GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M, 1174 ModuleAnalysisManager &AM) { 1175 return GlobalsMetadata(M); 1176 } 1177 1178 AddressSanitizerPass::AddressSanitizerPass(bool CompileKernel, bool Recover, 1179 bool UseAfterScope) 1180 : CompileKernel(CompileKernel), Recover(Recover), 1181 UseAfterScope(UseAfterScope) {} 1182 1183 PreservedAnalyses AddressSanitizerPass::run(Function &F, 1184 AnalysisManager<Function> &AM) { 1185 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 1186 Module &M = *F.getParent(); 1187 if (auto *R = MAMProxy.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) { 1188 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 1189 AddressSanitizer Sanitizer(M, R, CompileKernel, Recover, UseAfterScope); 1190 if (Sanitizer.instrumentFunction(F, TLI)) 1191 return PreservedAnalyses::none(); 1192 return PreservedAnalyses::all(); 1193 } 1194 1195 report_fatal_error( 1196 "The ASanGlobalsMetadataAnalysis is required to run before " 1197 "AddressSanitizer can run"); 1198 return PreservedAnalyses::all(); 1199 } 1200 1201 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass(bool CompileKernel, 1202 bool Recover, 1203 bool UseGlobalGC, 1204 bool UseOdrIndicator) 1205 : CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC), 1206 UseOdrIndicator(UseOdrIndicator) {} 1207 1208 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, 1209 AnalysisManager<Module> &AM) { 1210 GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M); 1211 ModuleAddressSanitizer Sanitizer(M, &GlobalsMD, CompileKernel, Recover, 1212 UseGlobalGC, UseOdrIndicator); 1213 if (Sanitizer.instrumentModule(M)) 1214 return PreservedAnalyses::none(); 1215 return PreservedAnalyses::all(); 1216 } 1217 1218 INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md", 1219 "Read metadata to mark which globals should be instrumented " 1220 "when running ASan.", 1221 false, true) 1222 1223 char AddressSanitizerLegacyPass::ID = 0; 1224 1225 INITIALIZE_PASS_BEGIN( 1226 AddressSanitizerLegacyPass, "asan", 1227 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1228 false) 1229 INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass) 1230 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1231 INITIALIZE_PASS_END( 1232 AddressSanitizerLegacyPass, "asan", 1233 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 1234 false) 1235 1236 FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel, 1237 bool Recover, 1238 bool UseAfterScope) { 1239 assert(!CompileKernel || Recover); 1240 return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope); 1241 } 1242 1243 char ModuleAddressSanitizerLegacyPass::ID = 0; 1244 1245 INITIALIZE_PASS( 1246 ModuleAddressSanitizerLegacyPass, "asan-module", 1247 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 1248 "ModulePass", 1249 false, false) 1250 1251 ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass( 1252 bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator) { 1253 assert(!CompileKernel || Recover); 1254 return new ModuleAddressSanitizerLegacyPass(CompileKernel, Recover, 1255 UseGlobalsGC, UseOdrIndicator); 1256 } 1257 1258 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 1259 size_t Res = countTrailingZeros(TypeSize / 8); 1260 assert(Res < kNumberOfAccessSizes); 1261 return Res; 1262 } 1263 1264 /// Create a global describing a source location. 1265 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, 1266 LocationMetadata MD) { 1267 Constant *LocData[] = { 1268 createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix), 1269 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), 1270 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), 1271 }; 1272 auto LocStruct = ConstantStruct::getAnon(LocData); 1273 auto GV = new GlobalVariable(M, LocStruct->getType(), true, 1274 GlobalValue::PrivateLinkage, LocStruct, 1275 kAsanGenPrefix); 1276 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1277 return GV; 1278 } 1279 1280 /// Check if \p G has been created by a trusted compiler pass. 1281 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 1282 // Do not instrument @llvm.global_ctors, @llvm.used, etc. 1283 if (G->getName().startswith("llvm.")) 1284 return true; 1285 1286 // Do not instrument asan globals. 1287 if (G->getName().startswith(kAsanGenPrefix) || 1288 G->getName().startswith(kSanCovGenPrefix) || 1289 G->getName().startswith(kODRGenPrefix)) 1290 return true; 1291 1292 // Do not instrument gcov counter arrays. 1293 if (G->getName() == "__llvm_gcov_ctr") 1294 return true; 1295 1296 return false; 1297 } 1298 1299 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 1300 // Shadow >> scale 1301 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 1302 if (Mapping.Offset == 0) return Shadow; 1303 // (Shadow >> scale) | offset 1304 Value *ShadowBase; 1305 if (LocalDynamicShadow) 1306 ShadowBase = LocalDynamicShadow; 1307 else 1308 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 1309 if (Mapping.OrShadowOffset) 1310 return IRB.CreateOr(Shadow, ShadowBase); 1311 else 1312 return IRB.CreateAdd(Shadow, ShadowBase); 1313 } 1314 1315 // Instrument memset/memmove/memcpy 1316 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 1317 IRBuilder<> IRB(MI); 1318 if (isa<MemTransferInst>(MI)) { 1319 IRB.CreateCall( 1320 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 1321 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1322 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 1323 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1324 } else if (isa<MemSetInst>(MI)) { 1325 IRB.CreateCall( 1326 AsanMemset, 1327 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1328 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 1329 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1330 } 1331 MI->eraseFromParent(); 1332 } 1333 1334 /// Check if we want (and can) handle this alloca. 1335 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1336 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 1337 1338 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 1339 return PreviouslySeenAllocaInfo->getSecond(); 1340 1341 bool IsInteresting = 1342 (AI.getAllocatedType()->isSized() && 1343 // alloca() may be called with 0 size, ignore it. 1344 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 1345 // We are only interested in allocas not promotable to registers. 1346 // Promotable allocas are common under -O0. 1347 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 1348 // inalloca allocas are not treated as static, and we don't want 1349 // dynamic alloca instrumentation for them as well. 1350 !AI.isUsedWithInAlloca() && 1351 // swifterror allocas are register promoted by ISel 1352 !AI.isSwiftError()); 1353 1354 ProcessedAllocas[&AI] = IsInteresting; 1355 return IsInteresting; 1356 } 1357 1358 bool AddressSanitizer::ignoreAccess(Value *Ptr) { 1359 // Do not instrument acesses from different address spaces; we cannot deal 1360 // with them. 1361 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1362 if (PtrTy->getPointerAddressSpace() != 0) 1363 return true; 1364 1365 // Ignore swifterror addresses. 1366 // swifterror memory addresses are mem2reg promoted by instruction 1367 // selection. As such they cannot have regular uses like an instrumentation 1368 // function and it makes no sense to track them as memory. 1369 if (Ptr->isSwiftError()) 1370 return true; 1371 1372 // Treat memory accesses to promotable allocas as non-interesting since they 1373 // will not cause memory violations. This greatly speeds up the instrumented 1374 // executable at -O0. 1375 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr)) 1376 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) 1377 return true; 1378 1379 return false; 1380 } 1381 1382 void AddressSanitizer::getInterestingMemoryOperands( 1383 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 1384 // Skip memory accesses inserted by another instrumentation. 1385 if (I->hasMetadata("nosanitize")) 1386 return; 1387 1388 // Do not instrument the load fetching the dynamic shadow address. 1389 if (LocalDynamicShadow == I) 1390 return; 1391 1392 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1393 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) 1394 return; 1395 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 1396 LI->getType(), LI->getAlign()); 1397 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1398 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) 1399 return; 1400 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 1401 SI->getValueOperand()->getType(), SI->getAlign()); 1402 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1403 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) 1404 return; 1405 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 1406 RMW->getValOperand()->getType(), None); 1407 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1408 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) 1409 return; 1410 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 1411 XCHG->getCompareOperand()->getType(), None); 1412 } else if (auto CI = dyn_cast<CallInst>(I)) { 1413 auto *F = CI->getCalledFunction(); 1414 if (F && (F->getName().startswith("llvm.masked.load.") || 1415 F->getName().startswith("llvm.masked.store."))) { 1416 bool IsWrite = F->getName().startswith("llvm.masked.store."); 1417 // Masked store has an initial operand for the value. 1418 unsigned OpOffset = IsWrite ? 1 : 0; 1419 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) 1420 return; 1421 1422 auto BasePtr = CI->getOperand(OpOffset); 1423 if (ignoreAccess(BasePtr)) 1424 return; 1425 auto Ty = cast<PointerType>(BasePtr->getType())->getElementType(); 1426 MaybeAlign Alignment = Align(1); 1427 // Otherwise no alignment guarantees. We probably got Undef. 1428 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) 1429 Alignment = Op->getMaybeAlignValue(); 1430 Value *Mask = CI->getOperand(2 + OpOffset); 1431 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); 1432 } else { 1433 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { 1434 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 1435 ignoreAccess(CI->getArgOperand(ArgNo))) 1436 continue; 1437 Type *Ty = CI->getParamByValType(ArgNo); 1438 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 1439 } 1440 } 1441 } 1442 } 1443 1444 static bool isPointerOperand(Value *V) { 1445 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1446 } 1447 1448 // This is a rough heuristic; it may cause both false positives and 1449 // false negatives. The proper implementation requires cooperation with 1450 // the frontend. 1451 static bool isInterestingPointerComparison(Instruction *I) { 1452 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1453 if (!Cmp->isRelational()) 1454 return false; 1455 } else { 1456 return false; 1457 } 1458 return isPointerOperand(I->getOperand(0)) && 1459 isPointerOperand(I->getOperand(1)); 1460 } 1461 1462 // This is a rough heuristic; it may cause both false positives and 1463 // false negatives. The proper implementation requires cooperation with 1464 // the frontend. 1465 static bool isInterestingPointerSubtraction(Instruction *I) { 1466 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1467 if (BO->getOpcode() != Instruction::Sub) 1468 return false; 1469 } else { 1470 return false; 1471 } 1472 return isPointerOperand(I->getOperand(0)) && 1473 isPointerOperand(I->getOperand(1)); 1474 } 1475 1476 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1477 // If a global variable does not have dynamic initialization we don't 1478 // have to instrument it. However, if a global does not have initializer 1479 // at all, we assume it has dynamic initializer (in other TU). 1480 // 1481 // FIXME: Metadata should be attched directly to the global directly instead 1482 // of being added to llvm.asan.globals. 1483 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; 1484 } 1485 1486 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1487 Instruction *I) { 1488 IRBuilder<> IRB(I); 1489 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1490 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1491 for (Value *&i : Param) { 1492 if (i->getType()->isPointerTy()) 1493 i = IRB.CreatePointerCast(i, IntptrTy); 1494 } 1495 IRB.CreateCall(F, Param); 1496 } 1497 1498 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, 1499 Instruction *InsertBefore, Value *Addr, 1500 MaybeAlign Alignment, unsigned Granularity, 1501 uint32_t TypeSize, bool IsWrite, 1502 Value *SizeArgument, bool UseCalls, 1503 uint32_t Exp) { 1504 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1505 // if the data is properly aligned. 1506 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1507 TypeSize == 128) && 1508 (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) 1509 return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, 1510 nullptr, UseCalls, Exp); 1511 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, 1512 IsWrite, nullptr, UseCalls, Exp); 1513 } 1514 1515 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, 1516 const DataLayout &DL, Type *IntptrTy, 1517 Value *Mask, Instruction *I, 1518 Value *Addr, MaybeAlign Alignment, 1519 unsigned Granularity, uint32_t TypeSize, 1520 bool IsWrite, Value *SizeArgument, 1521 bool UseCalls, uint32_t Exp) { 1522 auto *VTy = cast<FixedVectorType>( 1523 cast<PointerType>(Addr->getType())->getElementType()); 1524 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 1525 unsigned Num = VTy->getNumElements(); 1526 auto Zero = ConstantInt::get(IntptrTy, 0); 1527 for (unsigned Idx = 0; Idx < Num; ++Idx) { 1528 Value *InstrumentedAddress = nullptr; 1529 Instruction *InsertBefore = I; 1530 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 1531 // dyn_cast as we might get UndefValue 1532 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 1533 if (Masked->isZero()) 1534 // Mask is constant false, so no instrumentation needed. 1535 continue; 1536 // If we have a true or undef value, fall through to doInstrumentAddress 1537 // with InsertBefore == I 1538 } 1539 } else { 1540 IRBuilder<> IRB(I); 1541 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 1542 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 1543 InsertBefore = ThenTerm; 1544 } 1545 1546 IRBuilder<> IRB(InsertBefore); 1547 InstrumentedAddress = 1548 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 1549 doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, 1550 Granularity, ElemTypeSize, IsWrite, SizeArgument, 1551 UseCalls, Exp); 1552 } 1553 } 1554 1555 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1556 InterestingMemoryOperand &O, bool UseCalls, 1557 const DataLayout &DL) { 1558 Value *Addr = O.getPtr(); 1559 1560 // Optimization experiments. 1561 // The experiments can be used to evaluate potential optimizations that remove 1562 // instrumentation (assess false negatives). Instead of completely removing 1563 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1564 // experiments that want to remove instrumentation of this instruction). 1565 // If Exp is non-zero, this pass will emit special calls into runtime 1566 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1567 // make runtime terminate the program in a special way (with a different 1568 // exit status). Then you run the new compiler on a buggy corpus, collect 1569 // the special terminations (ideally, you don't see them at all -- no false 1570 // negatives) and make the decision on the optimization. 1571 uint32_t Exp = ClForceExperiment; 1572 1573 if (ClOpt && ClOptGlobals) { 1574 // If initialization order checking is disabled, a simple access to a 1575 // dynamically initialized global is always valid. 1576 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); 1577 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1578 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1579 NumOptimizedAccessesToGlobalVar++; 1580 return; 1581 } 1582 } 1583 1584 if (ClOpt && ClOptStack) { 1585 // A direct inbounds access to a stack variable is always valid. 1586 if (isa<AllocaInst>(getUnderlyingObject(Addr)) && 1587 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1588 NumOptimizedAccessesToStackVar++; 1589 return; 1590 } 1591 } 1592 1593 if (O.IsWrite) 1594 NumInstrumentedWrites++; 1595 else 1596 NumInstrumentedReads++; 1597 1598 unsigned Granularity = 1 << Mapping.Scale; 1599 if (O.MaybeMask) { 1600 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), 1601 Addr, O.Alignment, Granularity, O.TypeSize, 1602 O.IsWrite, nullptr, UseCalls, Exp); 1603 } else { 1604 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, 1605 Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, 1606 Exp); 1607 } 1608 } 1609 1610 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1611 Value *Addr, bool IsWrite, 1612 size_t AccessSizeIndex, 1613 Value *SizeArgument, 1614 uint32_t Exp) { 1615 IRBuilder<> IRB(InsertBefore); 1616 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1617 CallInst *Call = nullptr; 1618 if (SizeArgument) { 1619 if (Exp == 0) 1620 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1621 {Addr, SizeArgument}); 1622 else 1623 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1624 {Addr, SizeArgument, ExpVal}); 1625 } else { 1626 if (Exp == 0) 1627 Call = 1628 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1629 else 1630 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1631 {Addr, ExpVal}); 1632 } 1633 1634 Call->setCannotMerge(); 1635 return Call; 1636 } 1637 1638 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1639 Value *ShadowValue, 1640 uint32_t TypeSize) { 1641 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1642 // Addr & (Granularity - 1) 1643 Value *LastAccessedByte = 1644 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1645 // (Addr & (Granularity - 1)) + size - 1 1646 if (TypeSize / 8 > 1) 1647 LastAccessedByte = IRB.CreateAdd( 1648 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1649 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1650 LastAccessedByte = 1651 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1652 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1653 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1654 } 1655 1656 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1657 Instruction *InsertBefore, Value *Addr, 1658 uint32_t TypeSize, bool IsWrite, 1659 Value *SizeArgument, bool UseCalls, 1660 uint32_t Exp) { 1661 bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad; 1662 1663 IRBuilder<> IRB(InsertBefore); 1664 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1665 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1666 1667 if (UseCalls) { 1668 if (Exp == 0) 1669 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1670 AddrLong); 1671 else 1672 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1673 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1674 return; 1675 } 1676 1677 if (IsMyriad) { 1678 // Strip the cache bit and do range check. 1679 // AddrLong &= ~kMyriadCacheBitMask32 1680 AddrLong = IRB.CreateAnd(AddrLong, ~kMyriadCacheBitMask32); 1681 // Tag = AddrLong >> kMyriadTagShift 1682 Value *Tag = IRB.CreateLShr(AddrLong, kMyriadTagShift); 1683 // Tag == kMyriadDDRTag 1684 Value *TagCheck = 1685 IRB.CreateICmpEQ(Tag, ConstantInt::get(IntptrTy, kMyriadDDRTag)); 1686 1687 Instruction *TagCheckTerm = 1688 SplitBlockAndInsertIfThen(TagCheck, InsertBefore, false, 1689 MDBuilder(*C).createBranchWeights(1, 100000)); 1690 assert(cast<BranchInst>(TagCheckTerm)->isUnconditional()); 1691 IRB.SetInsertPoint(TagCheckTerm); 1692 InsertBefore = TagCheckTerm; 1693 } 1694 1695 Type *ShadowTy = 1696 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1697 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1698 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1699 Value *CmpVal = Constant::getNullValue(ShadowTy); 1700 Value *ShadowValue = 1701 IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1702 1703 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1704 size_t Granularity = 1ULL << Mapping.Scale; 1705 Instruction *CrashTerm = nullptr; 1706 1707 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1708 // We use branch weights for the slow path check, to indicate that the slow 1709 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1710 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 1711 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1712 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1713 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1714 IRB.SetInsertPoint(CheckTerm); 1715 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1716 if (Recover) { 1717 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1718 } else { 1719 BasicBlock *CrashBlock = 1720 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1721 CrashTerm = new UnreachableInst(*C, CrashBlock); 1722 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1723 ReplaceInstWithInst(CheckTerm, NewTerm); 1724 } 1725 } else { 1726 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1727 } 1728 1729 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1730 AccessSizeIndex, SizeArgument, Exp); 1731 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1732 } 1733 1734 // Instrument unusual size or unusual alignment. 1735 // We can not do it with a single check, so we do 1-byte check for the first 1736 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1737 // to report the actual access size. 1738 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1739 Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, 1740 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1741 IRBuilder<> IRB(InsertBefore); 1742 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1743 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1744 if (UseCalls) { 1745 if (Exp == 0) 1746 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1747 {AddrLong, Size}); 1748 else 1749 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1750 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1751 } else { 1752 Value *LastByte = IRB.CreateIntToPtr( 1753 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1754 Addr->getType()); 1755 instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); 1756 instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); 1757 } 1758 } 1759 1760 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, 1761 GlobalValue *ModuleName) { 1762 // Set up the arguments to our poison/unpoison functions. 1763 IRBuilder<> IRB(&GlobalInit.front(), 1764 GlobalInit.front().getFirstInsertionPt()); 1765 1766 // Add a call to poison all external globals before the given function starts. 1767 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1768 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1769 1770 // Add calls to unpoison all globals before each return instruction. 1771 for (auto &BB : GlobalInit.getBasicBlockList()) 1772 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1773 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1774 } 1775 1776 void ModuleAddressSanitizer::createInitializerPoisonCalls( 1777 Module &M, GlobalValue *ModuleName) { 1778 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1779 if (!GV) 1780 return; 1781 1782 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1783 if (!CA) 1784 return; 1785 1786 for (Use &OP : CA->operands()) { 1787 if (isa<ConstantAggregateZero>(OP)) continue; 1788 ConstantStruct *CS = cast<ConstantStruct>(OP); 1789 1790 // Must have a function or null ptr. 1791 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1792 if (F->getName() == kAsanModuleCtorName) continue; 1793 auto *Priority = cast<ConstantInt>(CS->getOperand(0)); 1794 // Don't instrument CTORs that will run before asan.module_ctor. 1795 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) 1796 continue; 1797 poisonOneInitializer(*F, ModuleName); 1798 } 1799 } 1800 } 1801 1802 bool ModuleAddressSanitizer::canInstrumentAliasedGlobal( 1803 const GlobalAlias &GA) const { 1804 // In case this function should be expanded to include rules that do not just 1805 // apply when CompileKernel is true, either guard all existing rules with an 1806 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules 1807 // should also apply to user space. 1808 assert(CompileKernel && "Only expecting to be called when compiling kernel"); 1809 1810 // When compiling the kernel, globals that are aliased by symbols prefixed 1811 // by "__" are special and cannot be padded with a redzone. 1812 if (GA.getName().startswith("__")) 1813 return false; 1814 1815 return true; 1816 } 1817 1818 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { 1819 Type *Ty = G->getValueType(); 1820 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1821 1822 // FIXME: Metadata should be attched directly to the global directly instead 1823 // of being added to llvm.asan.globals. 1824 if (GlobalsMD.get(G).IsExcluded) return false; 1825 if (!Ty->isSized()) return false; 1826 if (!G->hasInitializer()) return false; 1827 // Only instrument globals of default address spaces 1828 if (G->getAddressSpace()) return false; 1829 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1830 // Two problems with thread-locals: 1831 // - The address of the main thread's copy can't be computed at link-time. 1832 // - Need to poison all copies, not just the main thread's one. 1833 if (G->isThreadLocal()) return false; 1834 // For now, just ignore this Global if the alignment is large. 1835 if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; 1836 1837 // For non-COFF targets, only instrument globals known to be defined by this 1838 // TU. 1839 // FIXME: We can instrument comdat globals on ELF if we are using the 1840 // GC-friendly metadata scheme. 1841 if (!TargetTriple.isOSBinFormatCOFF()) { 1842 if (!G->hasExactDefinition() || G->hasComdat()) 1843 return false; 1844 } else { 1845 // On COFF, don't instrument non-ODR linkages. 1846 if (G->isInterposable()) 1847 return false; 1848 } 1849 1850 // If a comdat is present, it must have a selection kind that implies ODR 1851 // semantics: no duplicates, any, or exact match. 1852 if (Comdat *C = G->getComdat()) { 1853 switch (C->getSelectionKind()) { 1854 case Comdat::Any: 1855 case Comdat::ExactMatch: 1856 case Comdat::NoDuplicates: 1857 break; 1858 case Comdat::Largest: 1859 case Comdat::SameSize: 1860 return false; 1861 } 1862 } 1863 1864 if (G->hasSection()) { 1865 // The kernel uses explicit sections for mostly special global variables 1866 // that we should not instrument. E.g. the kernel may rely on their layout 1867 // without redzones, or remove them at link time ("discard.*"), etc. 1868 if (CompileKernel) 1869 return false; 1870 1871 StringRef Section = G->getSection(); 1872 1873 // Globals from llvm.metadata aren't emitted, do not instrument them. 1874 if (Section == "llvm.metadata") return false; 1875 // Do not instrument globals from special LLVM sections. 1876 if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; 1877 1878 // Do not instrument function pointers to initialization and termination 1879 // routines: dynamic linker will not properly handle redzones. 1880 if (Section.startswith(".preinit_array") || 1881 Section.startswith(".init_array") || 1882 Section.startswith(".fini_array")) { 1883 return false; 1884 } 1885 1886 // On COFF, if the section name contains '$', it is highly likely that the 1887 // user is using section sorting to create an array of globals similar to 1888 // the way initialization callbacks are registered in .init_array and 1889 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones 1890 // to such globals is counterproductive, because the intent is that they 1891 // will form an array, and out-of-bounds accesses are expected. 1892 // See https://github.com/google/sanitizers/issues/305 1893 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1894 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { 1895 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): " 1896 << *G << "\n"); 1897 return false; 1898 } 1899 1900 if (TargetTriple.isOSBinFormatMachO()) { 1901 StringRef ParsedSegment, ParsedSection; 1902 unsigned TAA = 0, StubSize = 0; 1903 bool TAAParsed; 1904 std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier( 1905 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize); 1906 assert(ErrorCode.empty() && "Invalid section specifier."); 1907 1908 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1909 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1910 // them. 1911 if (ParsedSegment == "__OBJC" || 1912 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1913 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1914 return false; 1915 } 1916 // See https://github.com/google/sanitizers/issues/32 1917 // Constant CFString instances are compiled in the following way: 1918 // -- the string buffer is emitted into 1919 // __TEXT,__cstring,cstring_literals 1920 // -- the constant NSConstantString structure referencing that buffer 1921 // is placed into __DATA,__cfstring 1922 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1923 // Moreover, it causes the linker to crash on OS X 10.7 1924 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1925 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 1926 return false; 1927 } 1928 // The linker merges the contents of cstring_literals and removes the 1929 // trailing zeroes. 1930 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 1931 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 1932 return false; 1933 } 1934 } 1935 } 1936 1937 if (CompileKernel) { 1938 // Globals that prefixed by "__" are special and cannot be padded with a 1939 // redzone. 1940 if (G->getName().startswith("__")) 1941 return false; 1942 } 1943 1944 return true; 1945 } 1946 1947 // On Mach-O platforms, we emit global metadata in a separate section of the 1948 // binary in order to allow the linker to properly dead strip. This is only 1949 // supported on recent versions of ld64. 1950 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { 1951 if (!TargetTriple.isOSBinFormatMachO()) 1952 return false; 1953 1954 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 1955 return true; 1956 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 1957 return true; 1958 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 1959 return true; 1960 1961 return false; 1962 } 1963 1964 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { 1965 switch (TargetTriple.getObjectFormat()) { 1966 case Triple::COFF: return ".ASAN$GL"; 1967 case Triple::ELF: return "asan_globals"; 1968 case Triple::MachO: return "__DATA,__asan_globals,regular"; 1969 case Triple::Wasm: 1970 case Triple::GOFF: 1971 case Triple::XCOFF: 1972 report_fatal_error( 1973 "ModuleAddressSanitizer not implemented for object file format"); 1974 case Triple::UnknownObjectFormat: 1975 break; 1976 } 1977 llvm_unreachable("unsupported object format"); 1978 } 1979 1980 void ModuleAddressSanitizer::initializeCallbacks(Module &M) { 1981 IRBuilder<> IRB(*C); 1982 1983 // Declare our poisoning and unpoisoning functions. 1984 AsanPoisonGlobals = 1985 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); 1986 AsanUnpoisonGlobals = 1987 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); 1988 1989 // Declare functions that register/unregister globals. 1990 AsanRegisterGlobals = M.getOrInsertFunction( 1991 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1992 AsanUnregisterGlobals = M.getOrInsertFunction( 1993 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1994 1995 // Declare the functions that find globals in a shared object and then invoke 1996 // the (un)register function on them. 1997 AsanRegisterImageGlobals = M.getOrInsertFunction( 1998 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 1999 AsanUnregisterImageGlobals = M.getOrInsertFunction( 2000 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 2001 2002 AsanRegisterElfGlobals = 2003 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), 2004 IntptrTy, IntptrTy, IntptrTy); 2005 AsanUnregisterElfGlobals = 2006 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), 2007 IntptrTy, IntptrTy, IntptrTy); 2008 } 2009 2010 // Put the metadata and the instrumented global in the same group. This ensures 2011 // that the metadata is discarded if the instrumented global is discarded. 2012 void ModuleAddressSanitizer::SetComdatForGlobalMetadata( 2013 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { 2014 Module &M = *G->getParent(); 2015 Comdat *C = G->getComdat(); 2016 if (!C) { 2017 if (!G->hasName()) { 2018 // If G is unnamed, it must be internal. Give it an artificial name 2019 // so we can put it in a comdat. 2020 assert(G->hasLocalLinkage()); 2021 G->setName(Twine(kAsanGenPrefix) + "_anon_global"); 2022 } 2023 2024 if (!InternalSuffix.empty() && G->hasLocalLinkage()) { 2025 std::string Name = std::string(G->getName()); 2026 Name += InternalSuffix; 2027 C = M.getOrInsertComdat(Name); 2028 } else { 2029 C = M.getOrInsertComdat(G->getName()); 2030 } 2031 2032 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private 2033 // linkage to internal linkage so that a symbol table entry is emitted. This 2034 // is necessary in order to create the comdat group. 2035 if (TargetTriple.isOSBinFormatCOFF()) { 2036 C->setSelectionKind(Comdat::NoDuplicates); 2037 if (G->hasPrivateLinkage()) 2038 G->setLinkage(GlobalValue::InternalLinkage); 2039 } 2040 G->setComdat(C); 2041 } 2042 2043 assert(G->hasComdat()); 2044 Metadata->setComdat(G->getComdat()); 2045 } 2046 2047 // Create a separate metadata global and put it in the appropriate ASan 2048 // global registration section. 2049 GlobalVariable * 2050 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, 2051 StringRef OriginalName) { 2052 auto Linkage = TargetTriple.isOSBinFormatMachO() 2053 ? GlobalVariable::InternalLinkage 2054 : GlobalVariable::PrivateLinkage; 2055 GlobalVariable *Metadata = new GlobalVariable( 2056 M, Initializer->getType(), false, Linkage, Initializer, 2057 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); 2058 Metadata->setSection(getGlobalMetadataSection()); 2059 return Metadata; 2060 } 2061 2062 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { 2063 AsanDtorFunction = 2064 Function::Create(FunctionType::get(Type::getVoidTy(*C), false), 2065 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); 2066 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 2067 2068 return ReturnInst::Create(*C, AsanDtorBB); 2069 } 2070 2071 void ModuleAddressSanitizer::InstrumentGlobalsCOFF( 2072 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2073 ArrayRef<Constant *> MetadataInitializers) { 2074 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2075 auto &DL = M.getDataLayout(); 2076 2077 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2078 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2079 Constant *Initializer = MetadataInitializers[i]; 2080 GlobalVariable *G = ExtendedGlobals[i]; 2081 GlobalVariable *Metadata = 2082 CreateMetadataGlobal(M, Initializer, G->getName()); 2083 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2084 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2085 MetadataGlobals[i] = Metadata; 2086 2087 // The MSVC linker always inserts padding when linking incrementally. We 2088 // cope with that by aligning each struct to its size, which must be a power 2089 // of two. 2090 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); 2091 assert(isPowerOf2_32(SizeOfGlobalStruct) && 2092 "global metadata will not be padded appropriately"); 2093 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); 2094 2095 SetComdatForGlobalMetadata(G, Metadata, ""); 2096 } 2097 2098 // Update llvm.compiler.used, adding the new metadata globals. This is 2099 // needed so that during LTO these variables stay alive. 2100 if (!MetadataGlobals.empty()) 2101 appendToCompilerUsed(M, MetadataGlobals); 2102 } 2103 2104 void ModuleAddressSanitizer::InstrumentGlobalsELF( 2105 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2106 ArrayRef<Constant *> MetadataInitializers, 2107 const std::string &UniqueModuleId) { 2108 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2109 2110 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2111 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2112 GlobalVariable *G = ExtendedGlobals[i]; 2113 GlobalVariable *Metadata = 2114 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); 2115 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2116 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2117 MetadataGlobals[i] = Metadata; 2118 2119 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); 2120 } 2121 2122 // Update llvm.compiler.used, adding the new metadata globals. This is 2123 // needed so that during LTO these variables stay alive. 2124 if (!MetadataGlobals.empty()) 2125 appendToCompilerUsed(M, MetadataGlobals); 2126 2127 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2128 // to look up the loaded image that contains it. Second, we can store in it 2129 // whether registration has already occurred, to prevent duplicate 2130 // registration. 2131 // 2132 // Common linkage ensures that there is only one global per shared library. 2133 GlobalVariable *RegisteredFlag = new GlobalVariable( 2134 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2135 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2136 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2137 2138 // Create start and stop symbols. 2139 GlobalVariable *StartELFMetadata = new GlobalVariable( 2140 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2141 "__start_" + getGlobalMetadataSection()); 2142 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2143 GlobalVariable *StopELFMetadata = new GlobalVariable( 2144 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2145 "__stop_" + getGlobalMetadataSection()); 2146 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2147 2148 // Create a call to register the globals with the runtime. 2149 IRB.CreateCall(AsanRegisterElfGlobals, 2150 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2151 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2152 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2153 2154 // We also need to unregister globals at the end, e.g., when a shared library 2155 // gets closed. 2156 IRBuilder<> IRB_Dtor(CreateAsanModuleDtor(M)); 2157 IRB_Dtor.CreateCall(AsanUnregisterElfGlobals, 2158 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2159 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2160 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2161 } 2162 2163 void ModuleAddressSanitizer::InstrumentGlobalsMachO( 2164 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2165 ArrayRef<Constant *> MetadataInitializers) { 2166 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2167 2168 // On recent Mach-O platforms, use a structure which binds the liveness of 2169 // the global variable to the metadata struct. Keep the list of "Liveness" GV 2170 // created to be added to llvm.compiler.used 2171 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); 2172 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); 2173 2174 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2175 Constant *Initializer = MetadataInitializers[i]; 2176 GlobalVariable *G = ExtendedGlobals[i]; 2177 GlobalVariable *Metadata = 2178 CreateMetadataGlobal(M, Initializer, G->getName()); 2179 2180 // On recent Mach-O platforms, we emit the global metadata in a way that 2181 // allows the linker to properly strip dead globals. 2182 auto LivenessBinder = 2183 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), 2184 ConstantExpr::getPointerCast(Metadata, IntptrTy)); 2185 GlobalVariable *Liveness = new GlobalVariable( 2186 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 2187 Twine("__asan_binder_") + G->getName()); 2188 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 2189 LivenessGlobals[i] = Liveness; 2190 } 2191 2192 // Update llvm.compiler.used, adding the new liveness globals. This is 2193 // needed so that during LTO these variables stay alive. The alternative 2194 // would be to have the linker handling the LTO symbols, but libLTO 2195 // current API does not expose access to the section for each symbol. 2196 if (!LivenessGlobals.empty()) 2197 appendToCompilerUsed(M, LivenessGlobals); 2198 2199 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2200 // to look up the loaded image that contains it. Second, we can store in it 2201 // whether registration has already occurred, to prevent duplicate 2202 // registration. 2203 // 2204 // common linkage ensures that there is only one global per shared library. 2205 GlobalVariable *RegisteredFlag = new GlobalVariable( 2206 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2207 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2208 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2209 2210 IRB.CreateCall(AsanRegisterImageGlobals, 2211 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2212 2213 // We also need to unregister globals at the end, e.g., when a shared library 2214 // gets closed. 2215 IRBuilder<> IRB_Dtor(CreateAsanModuleDtor(M)); 2216 IRB_Dtor.CreateCall(AsanUnregisterImageGlobals, 2217 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2218 } 2219 2220 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( 2221 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2222 ArrayRef<Constant *> MetadataInitializers) { 2223 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2224 unsigned N = ExtendedGlobals.size(); 2225 assert(N > 0); 2226 2227 // On platforms that don't have a custom metadata section, we emit an array 2228 // of global metadata structures. 2229 ArrayType *ArrayOfGlobalStructTy = 2230 ArrayType::get(MetadataInitializers[0]->getType(), N); 2231 auto AllGlobals = new GlobalVariable( 2232 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 2233 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); 2234 if (Mapping.Scale > 3) 2235 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); 2236 2237 IRB.CreateCall(AsanRegisterGlobals, 2238 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2239 ConstantInt::get(IntptrTy, N)}); 2240 2241 // We also need to unregister globals at the end, e.g., when a shared library 2242 // gets closed. 2243 IRBuilder<> IRB_Dtor(CreateAsanModuleDtor(M)); 2244 IRB_Dtor.CreateCall(AsanUnregisterGlobals, 2245 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2246 ConstantInt::get(IntptrTy, N)}); 2247 } 2248 2249 // This function replaces all global variables with new variables that have 2250 // trailing redzones. It also creates a function that poisons 2251 // redzones and inserts this function into llvm.global_ctors. 2252 // Sets *CtorComdat to true if the global registration code emitted into the 2253 // asan constructor is comdat-compatible. 2254 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, 2255 bool *CtorComdat) { 2256 *CtorComdat = false; 2257 2258 // Build set of globals that are aliased by some GA, where 2259 // canInstrumentAliasedGlobal(GA) returns false. 2260 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; 2261 if (CompileKernel) { 2262 for (auto &GA : M.aliases()) { 2263 if (const auto *GV = dyn_cast<GlobalVariable>(GA.getAliasee())) { 2264 if (!canInstrumentAliasedGlobal(GA)) 2265 AliasedGlobalExclusions.insert(GV); 2266 } 2267 } 2268 } 2269 2270 SmallVector<GlobalVariable *, 16> GlobalsToChange; 2271 for (auto &G : M.globals()) { 2272 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) 2273 GlobalsToChange.push_back(&G); 2274 } 2275 2276 size_t n = GlobalsToChange.size(); 2277 if (n == 0) { 2278 *CtorComdat = true; 2279 return false; 2280 } 2281 2282 auto &DL = M.getDataLayout(); 2283 2284 // A global is described by a structure 2285 // size_t beg; 2286 // size_t size; 2287 // size_t size_with_redzone; 2288 // const char *name; 2289 // const char *module_name; 2290 // size_t has_dynamic_init; 2291 // void *source_location; 2292 // size_t odr_indicator; 2293 // We initialize an array of such structures and pass it to a run-time call. 2294 StructType *GlobalStructTy = 2295 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 2296 IntptrTy, IntptrTy, IntptrTy); 2297 SmallVector<GlobalVariable *, 16> NewGlobals(n); 2298 SmallVector<Constant *, 16> Initializers(n); 2299 2300 bool HasDynamicallyInitializedGlobals = false; 2301 2302 // We shouldn't merge same module names, as this string serves as unique 2303 // module ID in runtime. 2304 GlobalVariable *ModuleName = createPrivateGlobalForString( 2305 M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); 2306 2307 for (size_t i = 0; i < n; i++) { 2308 GlobalVariable *G = GlobalsToChange[i]; 2309 2310 // FIXME: Metadata should be attched directly to the global directly instead 2311 // of being added to llvm.asan.globals. 2312 auto MD = GlobalsMD.get(G); 2313 StringRef NameForGlobal = G->getName(); 2314 // Create string holding the global name (use global name from metadata 2315 // if it's available, otherwise just write the name of global variable). 2316 GlobalVariable *Name = createPrivateGlobalForString( 2317 M, MD.Name.empty() ? NameForGlobal : MD.Name, 2318 /*AllowMerging*/ true, kAsanGenPrefix); 2319 2320 Type *Ty = G->getValueType(); 2321 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 2322 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); 2323 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 2324 2325 StructType *NewTy = StructType::get(Ty, RightRedZoneTy); 2326 Constant *NewInitializer = ConstantStruct::get( 2327 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); 2328 2329 // Create a new global variable with enough space for a redzone. 2330 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 2331 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 2332 Linkage = GlobalValue::InternalLinkage; 2333 GlobalVariable *NewGlobal = 2334 new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer, 2335 "", G, G->getThreadLocalMode()); 2336 NewGlobal->copyAttributesFrom(G); 2337 NewGlobal->setComdat(G->getComdat()); 2338 NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); 2339 // Don't fold globals with redzones. ODR violation detector and redzone 2340 // poisoning implicitly creates a dependence on the global's address, so it 2341 // is no longer valid for it to be marked unnamed_addr. 2342 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 2343 2344 // Move null-terminated C strings to "__asan_cstring" section on Darwin. 2345 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && 2346 G->isConstant()) { 2347 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); 2348 if (Seq && Seq->isCString()) 2349 NewGlobal->setSection("__TEXT,__asan_cstring,regular"); 2350 } 2351 2352 // Transfer the debug info. The payload starts at offset zero so we can 2353 // copy the debug info over as is. 2354 SmallVector<DIGlobalVariableExpression *, 1> GVs; 2355 G->getDebugInfo(GVs); 2356 for (auto *GV : GVs) 2357 NewGlobal->addDebugInfo(GV); 2358 2359 Value *Indices2[2]; 2360 Indices2[0] = IRB.getInt32(0); 2361 Indices2[1] = IRB.getInt32(0); 2362 2363 G->replaceAllUsesWith( 2364 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 2365 NewGlobal->takeName(G); 2366 G->eraseFromParent(); 2367 NewGlobals[i] = NewGlobal; 2368 2369 Constant *SourceLoc; 2370 if (!MD.SourceLoc.empty()) { 2371 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); 2372 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); 2373 } else { 2374 SourceLoc = ConstantInt::get(IntptrTy, 0); 2375 } 2376 2377 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 2378 GlobalValue *InstrumentedGlobal = NewGlobal; 2379 2380 bool CanUsePrivateAliases = 2381 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || 2382 TargetTriple.isOSBinFormatWasm(); 2383 if (CanUsePrivateAliases && UsePrivateAlias) { 2384 // Create local alias for NewGlobal to avoid crash on ODR between 2385 // instrumented and non-instrumented libraries. 2386 InstrumentedGlobal = 2387 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); 2388 } 2389 2390 // ODR should not happen for local linkage. 2391 if (NewGlobal->hasLocalLinkage()) { 2392 ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), 2393 IRB.getInt8PtrTy()); 2394 } else if (UseOdrIndicator) { 2395 // With local aliases, we need to provide another externally visible 2396 // symbol __odr_asan_XXX to detect ODR violation. 2397 auto *ODRIndicatorSym = 2398 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 2399 Constant::getNullValue(IRB.getInt8Ty()), 2400 kODRGenPrefix + NameForGlobal, nullptr, 2401 NewGlobal->getThreadLocalMode()); 2402 2403 // Set meaningful attributes for indicator symbol. 2404 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 2405 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 2406 ODRIndicatorSym->setAlignment(Align(1)); 2407 ODRIndicator = ODRIndicatorSym; 2408 } 2409 2410 Constant *Initializer = ConstantStruct::get( 2411 GlobalStructTy, 2412 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 2413 ConstantInt::get(IntptrTy, SizeInBytes), 2414 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 2415 ConstantExpr::getPointerCast(Name, IntptrTy), 2416 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 2417 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, 2418 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); 2419 2420 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; 2421 2422 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 2423 2424 Initializers[i] = Initializer; 2425 } 2426 2427 // Add instrumented globals to llvm.compiler.used list to avoid LTO from 2428 // ConstantMerge'ing them. 2429 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; 2430 for (size_t i = 0; i < n; i++) { 2431 GlobalVariable *G = NewGlobals[i]; 2432 if (G->getName().empty()) continue; 2433 GlobalsToAddToUsedList.push_back(G); 2434 } 2435 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); 2436 2437 std::string ELFUniqueModuleId = 2438 (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) 2439 : ""; 2440 2441 if (!ELFUniqueModuleId.empty()) { 2442 InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); 2443 *CtorComdat = true; 2444 } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { 2445 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); 2446 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { 2447 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); 2448 } else { 2449 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); 2450 } 2451 2452 // Create calls for poisoning before initializers run and unpoisoning after. 2453 if (HasDynamicallyInitializedGlobals) 2454 createInitializerPoisonCalls(M, ModuleName); 2455 2456 LLVM_DEBUG(dbgs() << M); 2457 return true; 2458 } 2459 2460 uint64_t 2461 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { 2462 constexpr uint64_t kMaxRZ = 1 << 18; 2463 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); 2464 2465 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. 2466 uint64_t RZ = 2467 std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); 2468 2469 // Round up to multiple of MinRZ. 2470 if (SizeInBytes % MinRZ) 2471 RZ += MinRZ - (SizeInBytes % MinRZ); 2472 assert((RZ + SizeInBytes) % MinRZ == 0); 2473 2474 return RZ; 2475 } 2476 2477 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { 2478 int LongSize = M.getDataLayout().getPointerSizeInBits(); 2479 bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); 2480 int Version = 8; 2481 // 32-bit Android is one version ahead because of the switch to dynamic 2482 // shadow. 2483 Version += (LongSize == 32 && isAndroid); 2484 return Version; 2485 } 2486 2487 bool ModuleAddressSanitizer::instrumentModule(Module &M) { 2488 initializeCallbacks(M); 2489 2490 // Create a module constructor. A destructor is created lazily because not all 2491 // platforms, and not all modules need it. 2492 if (CompileKernel) { 2493 // The kernel always builds with its own runtime, and therefore does not 2494 // need the init and version check calls. 2495 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); 2496 } else { 2497 std::string AsanVersion = std::to_string(GetAsanVersion(M)); 2498 std::string VersionCheckName = 2499 ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; 2500 std::tie(AsanCtorFunction, std::ignore) = 2501 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, 2502 kAsanInitName, /*InitArgTypes=*/{}, 2503 /*InitArgs=*/{}, VersionCheckName); 2504 } 2505 2506 bool CtorComdat = true; 2507 if (ClGlobals) { 2508 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); 2509 InstrumentGlobals(IRB, M, &CtorComdat); 2510 } 2511 2512 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); 2513 2514 // Put the constructor and destructor in comdat if both 2515 // (1) global instrumentation is not TU-specific 2516 // (2) target is ELF. 2517 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { 2518 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); 2519 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); 2520 if (AsanDtorFunction) { 2521 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); 2522 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); 2523 } 2524 } else { 2525 appendToGlobalCtors(M, AsanCtorFunction, Priority); 2526 if (AsanDtorFunction) 2527 appendToGlobalDtors(M, AsanDtorFunction, Priority); 2528 } 2529 2530 return true; 2531 } 2532 2533 void AddressSanitizer::initializeCallbacks(Module &M) { 2534 IRBuilder<> IRB(*C); 2535 // Create __asan_report* callbacks. 2536 // IsWrite, TypeSize and Exp are encoded in the function name. 2537 for (int Exp = 0; Exp < 2; Exp++) { 2538 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 2539 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 2540 const std::string ExpStr = Exp ? "exp_" : ""; 2541 const std::string EndingStr = Recover ? "_noabort" : ""; 2542 2543 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 2544 SmallVector<Type *, 2> Args1{1, IntptrTy}; 2545 if (Exp) { 2546 Type *ExpType = Type::getInt32Ty(*C); 2547 Args2.push_back(ExpType); 2548 Args1.push_back(ExpType); 2549 } 2550 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2551 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, 2552 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2553 2554 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2555 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 2556 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2557 2558 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 2559 AccessSizeIndex++) { 2560 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 2561 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2562 M.getOrInsertFunction( 2563 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 2564 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2565 2566 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2567 M.getOrInsertFunction( 2568 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 2569 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2570 } 2571 } 2572 } 2573 2574 const std::string MemIntrinCallbackPrefix = 2575 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 2576 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 2577 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2578 IRB.getInt8PtrTy(), IntptrTy); 2579 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 2580 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2581 IRB.getInt8PtrTy(), IntptrTy); 2582 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 2583 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2584 IRB.getInt32Ty(), IntptrTy); 2585 2586 AsanHandleNoReturnFunc = 2587 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); 2588 2589 AsanPtrCmpFunction = 2590 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); 2591 AsanPtrSubFunction = 2592 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); 2593 if (Mapping.InGlobal) 2594 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", 2595 ArrayType::get(IRB.getInt8Ty(), 0)); 2596 } 2597 2598 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 2599 // For each NSObject descendant having a +load method, this method is invoked 2600 // by the ObjC runtime before any of the static constructors is called. 2601 // Therefore we need to instrument such methods with a call to __asan_init 2602 // at the beginning in order to initialize our runtime before any access to 2603 // the shadow memory. 2604 // We cannot just ignore these methods, because they may call other 2605 // instrumented functions. 2606 if (F.getName().find(" load]") != std::string::npos) { 2607 FunctionCallee AsanInitFunction = 2608 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); 2609 IRBuilder<> IRB(&F.front(), F.front().begin()); 2610 IRB.CreateCall(AsanInitFunction, {}); 2611 return true; 2612 } 2613 return false; 2614 } 2615 2616 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 2617 // Generate code only when dynamic addressing is needed. 2618 if (Mapping.Offset != kDynamicShadowSentinel) 2619 return false; 2620 2621 IRBuilder<> IRB(&F.front().front()); 2622 if (Mapping.InGlobal) { 2623 if (ClWithIfuncSuppressRemat) { 2624 // An empty inline asm with input reg == output reg. 2625 // An opaque pointer-to-int cast, basically. 2626 InlineAsm *Asm = InlineAsm::get( 2627 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), 2628 StringRef(""), StringRef("=r,0"), 2629 /*hasSideEffects=*/false); 2630 LocalDynamicShadow = 2631 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); 2632 } else { 2633 LocalDynamicShadow = 2634 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); 2635 } 2636 } else { 2637 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 2638 kAsanShadowMemoryDynamicAddress, IntptrTy); 2639 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 2640 } 2641 return true; 2642 } 2643 2644 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 2645 // Find the one possible call to llvm.localescape and pre-mark allocas passed 2646 // to it as uninteresting. This assumes we haven't started processing allocas 2647 // yet. This check is done up front because iterating the use list in 2648 // isInterestingAlloca would be algorithmically slower. 2649 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 2650 2651 // Try to get the declaration of llvm.localescape. If it's not in the module, 2652 // we can exit early. 2653 if (!F.getParent()->getFunction("llvm.localescape")) return; 2654 2655 // Look for a call to llvm.localescape call in the entry block. It can't be in 2656 // any other block. 2657 for (Instruction &I : F.getEntryBlock()) { 2658 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 2659 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 2660 // We found a call. Mark all the allocas passed in as uninteresting. 2661 for (Value *Arg : II->arg_operands()) { 2662 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 2663 assert(AI && AI->isStaticAlloca() && 2664 "non-static alloca arg to localescape"); 2665 ProcessedAllocas[AI] = false; 2666 } 2667 break; 2668 } 2669 } 2670 } 2671 2672 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { 2673 bool ShouldInstrument = 2674 ClDebugMin < 0 || ClDebugMax < 0 || 2675 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); 2676 Instrumented++; 2677 return !ShouldInstrument; 2678 } 2679 2680 bool AddressSanitizer::instrumentFunction(Function &F, 2681 const TargetLibraryInfo *TLI) { 2682 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 2683 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 2684 if (F.getName().startswith("__asan_")) return false; 2685 2686 bool FunctionModified = false; 2687 2688 // If needed, insert __asan_init before checking for SanitizeAddress attr. 2689 // This function needs to be called even if the function body is not 2690 // instrumented. 2691 if (maybeInsertAsanInitAtFunctionEntry(F)) 2692 FunctionModified = true; 2693 2694 // Leave if the function doesn't need instrumentation. 2695 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 2696 2697 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 2698 2699 initializeCallbacks(*F.getParent()); 2700 2701 FunctionStateRAII CleanupObj(this); 2702 2703 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); 2704 2705 // We can't instrument allocas used with llvm.localescape. Only static allocas 2706 // can be passed to that intrinsic. 2707 markEscapedLocalAllocas(F); 2708 2709 // We want to instrument every address only once per basic block (unless there 2710 // are calls between uses). 2711 SmallPtrSet<Value *, 16> TempsToInstrument; 2712 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 2713 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 2714 SmallVector<Instruction *, 8> NoReturnCalls; 2715 SmallVector<BasicBlock *, 16> AllBlocks; 2716 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 2717 int NumAllocas = 0; 2718 2719 // Fill the set of memory operations to instrument. 2720 for (auto &BB : F) { 2721 AllBlocks.push_back(&BB); 2722 TempsToInstrument.clear(); 2723 int NumInsnsPerBB = 0; 2724 for (auto &Inst : BB) { 2725 if (LooksLikeCodeInBug11395(&Inst)) return false; 2726 SmallVector<InterestingMemoryOperand, 1> InterestingOperands; 2727 getInterestingMemoryOperands(&Inst, InterestingOperands); 2728 2729 if (!InterestingOperands.empty()) { 2730 for (auto &Operand : InterestingOperands) { 2731 if (ClOpt && ClOptSameTemp) { 2732 Value *Ptr = Operand.getPtr(); 2733 // If we have a mask, skip instrumentation if we've already 2734 // instrumented the full object. But don't add to TempsToInstrument 2735 // because we might get another load/store with a different mask. 2736 if (Operand.MaybeMask) { 2737 if (TempsToInstrument.count(Ptr)) 2738 continue; // We've seen this (whole) temp in the current BB. 2739 } else { 2740 if (!TempsToInstrument.insert(Ptr).second) 2741 continue; // We've seen this temp in the current BB. 2742 } 2743 } 2744 OperandsToInstrument.push_back(Operand); 2745 NumInsnsPerBB++; 2746 } 2747 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && 2748 isInterestingPointerComparison(&Inst)) || 2749 ((ClInvalidPointerPairs || ClInvalidPointerSub) && 2750 isInterestingPointerSubtraction(&Inst))) { 2751 PointerComparisonsOrSubtracts.push_back(&Inst); 2752 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { 2753 // ok, take it. 2754 IntrinToInstrument.push_back(MI); 2755 NumInsnsPerBB++; 2756 } else { 2757 if (isa<AllocaInst>(Inst)) NumAllocas++; 2758 if (auto *CB = dyn_cast<CallBase>(&Inst)) { 2759 // A call inside BB. 2760 TempsToInstrument.clear(); 2761 if (CB->doesNotReturn() && !CB->hasMetadata("nosanitize")) 2762 NoReturnCalls.push_back(CB); 2763 } 2764 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 2765 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 2766 } 2767 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 2768 } 2769 } 2770 2771 bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && 2772 OperandsToInstrument.size() + IntrinToInstrument.size() > 2773 (unsigned)ClInstrumentationWithCallsThreshold); 2774 const DataLayout &DL = F.getParent()->getDataLayout(); 2775 ObjectSizeOpts ObjSizeOpts; 2776 ObjSizeOpts.RoundToAlign = true; 2777 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); 2778 2779 // Instrument. 2780 int NumInstrumented = 0; 2781 for (auto &Operand : OperandsToInstrument) { 2782 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2783 instrumentMop(ObjSizeVis, Operand, UseCalls, 2784 F.getParent()->getDataLayout()); 2785 FunctionModified = true; 2786 } 2787 for (auto Inst : IntrinToInstrument) { 2788 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2789 instrumentMemIntrinsic(Inst); 2790 FunctionModified = true; 2791 } 2792 2793 FunctionStackPoisoner FSP(F, *this); 2794 bool ChangedStack = FSP.runOnFunction(); 2795 2796 // We must unpoison the stack before NoReturn calls (throw, _exit, etc). 2797 // See e.g. https://github.com/google/sanitizers/issues/37 2798 for (auto CI : NoReturnCalls) { 2799 IRBuilder<> IRB(CI); 2800 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 2801 } 2802 2803 for (auto Inst : PointerComparisonsOrSubtracts) { 2804 instrumentPointerComparisonOrSubtraction(Inst); 2805 FunctionModified = true; 2806 } 2807 2808 if (ChangedStack || !NoReturnCalls.empty()) 2809 FunctionModified = true; 2810 2811 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2812 << F << "\n"); 2813 2814 return FunctionModified; 2815 } 2816 2817 // Workaround for bug 11395: we don't want to instrument stack in functions 2818 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2819 // FIXME: remove once the bug 11395 is fixed. 2820 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2821 if (LongSize != 32) return false; 2822 CallInst *CI = dyn_cast<CallInst>(I); 2823 if (!CI || !CI->isInlineAsm()) return false; 2824 if (CI->getNumArgOperands() <= 5) return false; 2825 // We have inline assembly with quite a few arguments. 2826 return true; 2827 } 2828 2829 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2830 IRBuilder<> IRB(*C); 2831 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { 2832 std::string Suffix = itostr(i); 2833 AsanStackMallocFunc[i] = M.getOrInsertFunction( 2834 kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy); 2835 AsanStackFreeFunc[i] = 2836 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2837 IRB.getVoidTy(), IntptrTy, IntptrTy); 2838 } 2839 if (ASan.UseAfterScope) { 2840 AsanPoisonStackMemoryFunc = M.getOrInsertFunction( 2841 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2842 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( 2843 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2844 } 2845 2846 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2847 std::ostringstream Name; 2848 Name << kAsanSetShadowPrefix; 2849 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2850 AsanSetShadowFunc[Val] = 2851 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); 2852 } 2853 2854 AsanAllocaPoisonFunc = M.getOrInsertFunction( 2855 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2856 AsanAllocasUnpoisonFunc = M.getOrInsertFunction( 2857 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2858 } 2859 2860 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2861 ArrayRef<uint8_t> ShadowBytes, 2862 size_t Begin, size_t End, 2863 IRBuilder<> &IRB, 2864 Value *ShadowBase) { 2865 if (Begin >= End) 2866 return; 2867 2868 const size_t LargestStoreSizeInBytes = 2869 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 2870 2871 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 2872 2873 // Poison given range in shadow using larges store size with out leading and 2874 // trailing zeros in ShadowMask. Zeros never change, so they need neither 2875 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 2876 // middle of a store. 2877 for (size_t i = Begin; i < End;) { 2878 if (!ShadowMask[i]) { 2879 assert(!ShadowBytes[i]); 2880 ++i; 2881 continue; 2882 } 2883 2884 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 2885 // Fit store size into the range. 2886 while (StoreSizeInBytes > End - i) 2887 StoreSizeInBytes /= 2; 2888 2889 // Minimize store size by trimming trailing zeros. 2890 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 2891 while (j <= StoreSizeInBytes / 2) 2892 StoreSizeInBytes /= 2; 2893 } 2894 2895 uint64_t Val = 0; 2896 for (size_t j = 0; j < StoreSizeInBytes; j++) { 2897 if (IsLittleEndian) 2898 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 2899 else 2900 Val = (Val << 8) | ShadowBytes[i + j]; 2901 } 2902 2903 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 2904 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 2905 IRB.CreateAlignedStore( 2906 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 2907 Align(1)); 2908 2909 i += StoreSizeInBytes; 2910 } 2911 } 2912 2913 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2914 ArrayRef<uint8_t> ShadowBytes, 2915 IRBuilder<> &IRB, Value *ShadowBase) { 2916 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 2917 } 2918 2919 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2920 ArrayRef<uint8_t> ShadowBytes, 2921 size_t Begin, size_t End, 2922 IRBuilder<> &IRB, Value *ShadowBase) { 2923 assert(ShadowMask.size() == ShadowBytes.size()); 2924 size_t Done = Begin; 2925 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 2926 if (!ShadowMask[i]) { 2927 assert(!ShadowBytes[i]); 2928 continue; 2929 } 2930 uint8_t Val = ShadowBytes[i]; 2931 if (!AsanSetShadowFunc[Val]) 2932 continue; 2933 2934 // Skip same values. 2935 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 2936 } 2937 2938 if (j - i >= ClMaxInlinePoisoningSize) { 2939 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 2940 IRB.CreateCall(AsanSetShadowFunc[Val], 2941 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 2942 ConstantInt::get(IntptrTy, j - i)}); 2943 Done = j; 2944 } 2945 } 2946 2947 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 2948 } 2949 2950 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 2951 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 2952 static int StackMallocSizeClass(uint64_t LocalStackSize) { 2953 assert(LocalStackSize <= kMaxStackMallocSize); 2954 uint64_t MaxSize = kMinStackMallocSize; 2955 for (int i = 0;; i++, MaxSize *= 2) 2956 if (LocalStackSize <= MaxSize) return i; 2957 llvm_unreachable("impossible LocalStackSize"); 2958 } 2959 2960 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { 2961 Instruction *CopyInsertPoint = &F.front().front(); 2962 if (CopyInsertPoint == ASan.LocalDynamicShadow) { 2963 // Insert after the dynamic shadow location is determined 2964 CopyInsertPoint = CopyInsertPoint->getNextNode(); 2965 assert(CopyInsertPoint); 2966 } 2967 IRBuilder<> IRB(CopyInsertPoint); 2968 const DataLayout &DL = F.getParent()->getDataLayout(); 2969 for (Argument &Arg : F.args()) { 2970 if (Arg.hasByValAttr()) { 2971 Type *Ty = Arg.getParamByValType(); 2972 const Align Alignment = 2973 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); 2974 2975 AllocaInst *AI = IRB.CreateAlloca( 2976 Ty, nullptr, 2977 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + 2978 ".byval"); 2979 AI->setAlignment(Alignment); 2980 Arg.replaceAllUsesWith(AI); 2981 2982 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 2983 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); 2984 } 2985 } 2986 } 2987 2988 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 2989 Value *ValueIfTrue, 2990 Instruction *ThenTerm, 2991 Value *ValueIfFalse) { 2992 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 2993 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 2994 PHI->addIncoming(ValueIfFalse, CondBlock); 2995 BasicBlock *ThenBlock = ThenTerm->getParent(); 2996 PHI->addIncoming(ValueIfTrue, ThenBlock); 2997 return PHI; 2998 } 2999 3000 Value *FunctionStackPoisoner::createAllocaForLayout( 3001 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 3002 AllocaInst *Alloca; 3003 if (Dynamic) { 3004 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 3005 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 3006 "MyAlloca"); 3007 } else { 3008 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 3009 nullptr, "MyAlloca"); 3010 assert(Alloca->isStaticAlloca()); 3011 } 3012 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 3013 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 3014 Alloca->setAlignment(Align(FrameAlignment)); 3015 return IRB.CreatePointerCast(Alloca, IntptrTy); 3016 } 3017 3018 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 3019 BasicBlock &FirstBB = *F.begin(); 3020 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 3021 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 3022 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 3023 DynamicAllocaLayout->setAlignment(Align(32)); 3024 } 3025 3026 void FunctionStackPoisoner::processDynamicAllocas() { 3027 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 3028 assert(DynamicAllocaPoisonCallVec.empty()); 3029 return; 3030 } 3031 3032 // Insert poison calls for lifetime intrinsics for dynamic allocas. 3033 for (const auto &APC : DynamicAllocaPoisonCallVec) { 3034 assert(APC.InsBefore); 3035 assert(APC.AI); 3036 assert(ASan.isInterestingAlloca(*APC.AI)); 3037 assert(!APC.AI->isStaticAlloca()); 3038 3039 IRBuilder<> IRB(APC.InsBefore); 3040 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 3041 // Dynamic allocas will be unpoisoned unconditionally below in 3042 // unpoisonDynamicAllocas. 3043 // Flag that we need unpoison static allocas. 3044 } 3045 3046 // Handle dynamic allocas. 3047 createDynamicAllocasInitStorage(); 3048 for (auto &AI : DynamicAllocaVec) 3049 handleDynamicAllocaCall(AI); 3050 unpoisonDynamicAllocas(); 3051 } 3052 3053 /// Collect instructions in the entry block after \p InsBefore which initialize 3054 /// permanent storage for a function argument. These instructions must remain in 3055 /// the entry block so that uninitialized values do not appear in backtraces. An 3056 /// added benefit is that this conserves spill slots. This does not move stores 3057 /// before instrumented / "interesting" allocas. 3058 static void findStoresToUninstrumentedArgAllocas( 3059 AddressSanitizer &ASan, Instruction &InsBefore, 3060 SmallVectorImpl<Instruction *> &InitInsts) { 3061 Instruction *Start = InsBefore.getNextNonDebugInstruction(); 3062 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { 3063 // Argument initialization looks like: 3064 // 1) store <Argument>, <Alloca> OR 3065 // 2) <CastArgument> = cast <Argument> to ... 3066 // store <CastArgument> to <Alloca> 3067 // Do not consider any other kind of instruction. 3068 // 3069 // Note: This covers all known cases, but may not be exhaustive. An 3070 // alternative to pattern-matching stores is to DFS over all Argument uses: 3071 // this might be more general, but is probably much more complicated. 3072 if (isa<AllocaInst>(It) || isa<CastInst>(It)) 3073 continue; 3074 if (auto *Store = dyn_cast<StoreInst>(It)) { 3075 // The store destination must be an alloca that isn't interesting for 3076 // ASan to instrument. These are moved up before InsBefore, and they're 3077 // not interesting because allocas for arguments can be mem2reg'd. 3078 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); 3079 if (!Alloca || ASan.isInterestingAlloca(*Alloca)) 3080 continue; 3081 3082 Value *Val = Store->getValueOperand(); 3083 bool IsDirectArgInit = isa<Argument>(Val); 3084 bool IsArgInitViaCast = 3085 isa<CastInst>(Val) && 3086 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && 3087 // Check that the cast appears directly before the store. Otherwise 3088 // moving the cast before InsBefore may break the IR. 3089 Val == It->getPrevNonDebugInstruction(); 3090 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; 3091 if (!IsArgInit) 3092 continue; 3093 3094 if (IsArgInitViaCast) 3095 InitInsts.push_back(cast<Instruction>(Val)); 3096 InitInsts.push_back(Store); 3097 continue; 3098 } 3099 3100 // Do not reorder past unknown instructions: argument initialization should 3101 // only involve casts and stores. 3102 return; 3103 } 3104 } 3105 3106 void FunctionStackPoisoner::processStaticAllocas() { 3107 if (AllocaVec.empty()) { 3108 assert(StaticAllocaPoisonCallVec.empty()); 3109 return; 3110 } 3111 3112 int StackMallocIdx = -1; 3113 DebugLoc EntryDebugLocation; 3114 if (auto SP = F.getSubprogram()) 3115 EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP); 3116 3117 Instruction *InsBefore = AllocaVec[0]; 3118 IRBuilder<> IRB(InsBefore); 3119 3120 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 3121 // debug info is broken, because only entry-block allocas are treated as 3122 // regular stack slots. 3123 auto InsBeforeB = InsBefore->getParent(); 3124 assert(InsBeforeB == &F.getEntryBlock()); 3125 for (auto *AI : StaticAllocasToMoveUp) 3126 if (AI->getParent() == InsBeforeB) 3127 AI->moveBefore(InsBefore); 3128 3129 // Move stores of arguments into entry-block allocas as well. This prevents 3130 // extra stack slots from being generated (to house the argument values until 3131 // they can be stored into the allocas). This also prevents uninitialized 3132 // values from being shown in backtraces. 3133 SmallVector<Instruction *, 8> ArgInitInsts; 3134 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); 3135 for (Instruction *ArgInitInst : ArgInitInsts) 3136 ArgInitInst->moveBefore(InsBefore); 3137 3138 // If we have a call to llvm.localescape, keep it in the entry block. 3139 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 3140 3141 SmallVector<ASanStackVariableDescription, 16> SVD; 3142 SVD.reserve(AllocaVec.size()); 3143 for (AllocaInst *AI : AllocaVec) { 3144 ASanStackVariableDescription D = {AI->getName().data(), 3145 ASan.getAllocaSizeInBytes(*AI), 3146 0, 3147 AI->getAlignment(), 3148 AI, 3149 0, 3150 0}; 3151 SVD.push_back(D); 3152 } 3153 3154 // Minimal header size (left redzone) is 4 pointers, 3155 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 3156 size_t Granularity = 1ULL << Mapping.Scale; 3157 size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity); 3158 const ASanStackFrameLayout &L = 3159 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); 3160 3161 // Build AllocaToSVDMap for ASanStackVariableDescription lookup. 3162 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; 3163 for (auto &Desc : SVD) 3164 AllocaToSVDMap[Desc.AI] = &Desc; 3165 3166 // Update SVD with information from lifetime intrinsics. 3167 for (const auto &APC : StaticAllocaPoisonCallVec) { 3168 assert(APC.InsBefore); 3169 assert(APC.AI); 3170 assert(ASan.isInterestingAlloca(*APC.AI)); 3171 assert(APC.AI->isStaticAlloca()); 3172 3173 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3174 Desc.LifetimeSize = Desc.Size; 3175 if (const DILocation *FnLoc = EntryDebugLocation.get()) { 3176 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { 3177 if (LifetimeLoc->getFile() == FnLoc->getFile()) 3178 if (unsigned Line = LifetimeLoc->getLine()) 3179 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); 3180 } 3181 } 3182 } 3183 3184 auto DescriptionString = ComputeASanStackFrameDescription(SVD); 3185 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); 3186 uint64_t LocalStackSize = L.FrameSize; 3187 bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && 3188 LocalStackSize <= kMaxStackMallocSize; 3189 bool DoDynamicAlloca = ClDynamicAllocaStack; 3190 // Don't do dynamic alloca or stack malloc if: 3191 // 1) There is inline asm: too often it makes assumptions on which registers 3192 // are available. 3193 // 2) There is a returns_twice call (typically setjmp), which is 3194 // optimization-hostile, and doesn't play well with introduced indirect 3195 // register-relative calculation of local variable addresses. 3196 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; 3197 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; 3198 3199 Value *StaticAlloca = 3200 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 3201 3202 Value *FakeStack; 3203 Value *LocalStackBase; 3204 Value *LocalStackBaseAlloca; 3205 uint8_t DIExprFlags = DIExpression::ApplyOffset; 3206 3207 if (DoStackMalloc) { 3208 LocalStackBaseAlloca = 3209 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); 3210 // void *FakeStack = __asan_option_detect_stack_use_after_return 3211 // ? __asan_stack_malloc_N(LocalStackSize) 3212 // : nullptr; 3213 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize); 3214 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 3215 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 3216 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( 3217 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), 3218 Constant::getNullValue(IRB.getInt32Ty())); 3219 Instruction *Term = 3220 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 3221 IRBuilder<> IRBIf(Term); 3222 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3223 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 3224 Value *FakeStackValue = 3225 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3226 ConstantInt::get(IntptrTy, LocalStackSize)); 3227 IRB.SetInsertPoint(InsBefore); 3228 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 3229 ConstantInt::get(IntptrTy, 0)); 3230 3231 Value *NoFakeStack = 3232 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 3233 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 3234 IRBIf.SetInsertPoint(Term); 3235 Value *AllocaValue = 3236 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 3237 3238 IRB.SetInsertPoint(InsBefore); 3239 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 3240 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); 3241 DIExprFlags |= DIExpression::DerefBefore; 3242 } else { 3243 // void *FakeStack = nullptr; 3244 // void *LocalStackBase = alloca(LocalStackSize); 3245 FakeStack = ConstantInt::get(IntptrTy, 0); 3246 LocalStackBase = 3247 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 3248 LocalStackBaseAlloca = LocalStackBase; 3249 } 3250 3251 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the 3252 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse 3253 // later passes and can result in dropped variable coverage in debug info. 3254 Value *LocalStackBaseAllocaPtr = 3255 isa<PtrToIntInst>(LocalStackBaseAlloca) 3256 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() 3257 : LocalStackBaseAlloca; 3258 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) && 3259 "Variable descriptions relative to ASan stack base will be dropped"); 3260 3261 // Replace Alloca instructions with base+offset. 3262 for (const auto &Desc : SVD) { 3263 AllocaInst *AI = Desc.AI; 3264 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, 3265 Desc.Offset); 3266 Value *NewAllocaPtr = IRB.CreateIntToPtr( 3267 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 3268 AI->getType()); 3269 AI->replaceAllUsesWith(NewAllocaPtr); 3270 } 3271 3272 // The left-most redzone has enough space for at least 4 pointers. 3273 // Write the Magic value to redzone[0]. 3274 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 3275 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 3276 BasePlus0); 3277 // Write the frame description constant to redzone[1]. 3278 Value *BasePlus1 = IRB.CreateIntToPtr( 3279 IRB.CreateAdd(LocalStackBase, 3280 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 3281 IntptrPtrTy); 3282 GlobalVariable *StackDescriptionGlobal = 3283 createPrivateGlobalForString(*F.getParent(), DescriptionString, 3284 /*AllowMerging*/ true, kAsanGenPrefix); 3285 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 3286 IRB.CreateStore(Description, BasePlus1); 3287 // Write the PC to redzone[2]. 3288 Value *BasePlus2 = IRB.CreateIntToPtr( 3289 IRB.CreateAdd(LocalStackBase, 3290 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 3291 IntptrPtrTy); 3292 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 3293 3294 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 3295 3296 // Poison the stack red zones at the entry. 3297 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 3298 // As mask we must use most poisoned case: red zones and after scope. 3299 // As bytes we can use either the same or just red zones only. 3300 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 3301 3302 if (!StaticAllocaPoisonCallVec.empty()) { 3303 const auto &ShadowInScope = GetShadowBytes(SVD, L); 3304 3305 // Poison static allocas near lifetime intrinsics. 3306 for (const auto &APC : StaticAllocaPoisonCallVec) { 3307 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3308 assert(Desc.Offset % L.Granularity == 0); 3309 size_t Begin = Desc.Offset / L.Granularity; 3310 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 3311 3312 IRBuilder<> IRB(APC.InsBefore); 3313 copyToShadow(ShadowAfterScope, 3314 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 3315 IRB, ShadowBase); 3316 } 3317 } 3318 3319 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 3320 SmallVector<uint8_t, 64> ShadowAfterReturn; 3321 3322 // (Un)poison the stack before all ret instructions. 3323 for (Instruction *Ret : RetVec) { 3324 Instruction *Adjusted = adjustForMusttailCall(Ret); 3325 IRBuilder<> IRBRet(Adjusted); 3326 // Mark the current frame as retired. 3327 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 3328 BasePlus0); 3329 if (DoStackMalloc) { 3330 assert(StackMallocIdx >= 0); 3331 // if FakeStack != 0 // LocalStackBase == FakeStack 3332 // // In use-after-return mode, poison the whole stack frame. 3333 // if StackMallocIdx <= 4 3334 // // For small sizes inline the whole thing: 3335 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 3336 // **SavedFlagPtr(FakeStack) = 0 3337 // else 3338 // __asan_stack_free_N(FakeStack, LocalStackSize) 3339 // else 3340 // <This is not a fake stack; unpoison the redzones> 3341 Value *Cmp = 3342 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 3343 Instruction *ThenTerm, *ElseTerm; 3344 SplitBlockAndInsertIfThenElse(Cmp, Adjusted, &ThenTerm, &ElseTerm); 3345 3346 IRBuilder<> IRBPoison(ThenTerm); 3347 if (StackMallocIdx <= 4) { 3348 int ClassSize = kMinStackMallocSize << StackMallocIdx; 3349 ShadowAfterReturn.resize(ClassSize / L.Granularity, 3350 kAsanStackUseAfterReturnMagic); 3351 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 3352 ShadowBase); 3353 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 3354 FakeStack, 3355 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 3356 Value *SavedFlagPtr = IRBPoison.CreateLoad( 3357 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 3358 IRBPoison.CreateStore( 3359 Constant::getNullValue(IRBPoison.getInt8Ty()), 3360 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 3361 } else { 3362 // For larger frames call __asan_stack_free_*. 3363 IRBPoison.CreateCall( 3364 AsanStackFreeFunc[StackMallocIdx], 3365 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 3366 } 3367 3368 IRBuilder<> IRBElse(ElseTerm); 3369 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); 3370 } else { 3371 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); 3372 } 3373 } 3374 3375 // We are done. Remove the old unused alloca instructions. 3376 for (auto AI : AllocaVec) AI->eraseFromParent(); 3377 } 3378 3379 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 3380 IRBuilder<> &IRB, bool DoPoison) { 3381 // For now just insert the call to ASan runtime. 3382 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 3383 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 3384 IRB.CreateCall( 3385 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 3386 {AddrArg, SizeArg}); 3387 } 3388 3389 // Handling llvm.lifetime intrinsics for a given %alloca: 3390 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 3391 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 3392 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 3393 // could be poisoned by previous llvm.lifetime.end instruction, as the 3394 // variable may go in and out of scope several times, e.g. in loops). 3395 // (3) if we poisoned at least one %alloca in a function, 3396 // unpoison the whole stack frame at function exit. 3397 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 3398 IRBuilder<> IRB(AI); 3399 3400 const unsigned Alignment = std::max(kAllocaRzSize, AI->getAlignment()); 3401 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 3402 3403 Value *Zero = Constant::getNullValue(IntptrTy); 3404 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 3405 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 3406 3407 // Since we need to extend alloca with additional memory to locate 3408 // redzones, and OldSize is number of allocated blocks with 3409 // ElementSize size, get allocated memory size in bytes by 3410 // OldSize * ElementSize. 3411 const unsigned ElementSize = 3412 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 3413 Value *OldSize = 3414 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 3415 ConstantInt::get(IntptrTy, ElementSize)); 3416 3417 // PartialSize = OldSize % 32 3418 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 3419 3420 // Misalign = kAllocaRzSize - PartialSize; 3421 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 3422 3423 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 3424 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 3425 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 3426 3427 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize 3428 // Alignment is added to locate left redzone, PartialPadding for possible 3429 // partial redzone and kAllocaRzSize for right redzone respectively. 3430 Value *AdditionalChunkSize = IRB.CreateAdd( 3431 ConstantInt::get(IntptrTy, Alignment + kAllocaRzSize), PartialPadding); 3432 3433 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 3434 3435 // Insert new alloca with new NewSize and Alignment params. 3436 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 3437 NewAlloca->setAlignment(Align(Alignment)); 3438 3439 // NewAddress = Address + Alignment 3440 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 3441 ConstantInt::get(IntptrTy, Alignment)); 3442 3443 // Insert __asan_alloca_poison call for new created alloca. 3444 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 3445 3446 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 3447 // for unpoisoning stuff. 3448 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 3449 3450 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 3451 3452 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 3453 AI->replaceAllUsesWith(NewAddressPtr); 3454 3455 // We are done. Erase old alloca from parent. 3456 AI->eraseFromParent(); 3457 } 3458 3459 // isSafeAccess returns true if Addr is always inbounds with respect to its 3460 // base object. For example, it is a field access or an array access with 3461 // constant inbounds index. 3462 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 3463 Value *Addr, uint64_t TypeSize) const { 3464 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 3465 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 3466 uint64_t Size = SizeOffset.first.getZExtValue(); 3467 int64_t Offset = SizeOffset.second.getSExtValue(); 3468 // Three checks are required to ensure safety: 3469 // . Offset >= 0 (since the offset is given from the base ptr) 3470 // . Size >= Offset (unsigned) 3471 // . Size - Offset >= NeededSize (unsigned) 3472 return Offset >= 0 && Size >= uint64_t(Offset) && 3473 Size - uint64_t(Offset) >= TypeSize / 8; 3474 } 3475