1 //===- AddressSanitizer.cpp - memory error detector -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address basic correctness 10 // checker. 11 // Details of the algorithm: 12 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm 13 // 14 // FIXME: This sanitizer does not yet handle scalable vectors 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/StackSafetyAnalysis.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/BinaryFormat/MachO.h" 34 #include "llvm/Demangle/Demangle.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Comdat.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DIBuilder.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfoMetadata.h" 44 #include "llvm/IR/DebugLoc.h" 45 #include "llvm/IR/DerivedTypes.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/IRBuilder.h" 51 #include "llvm/IR/InlineAsm.h" 52 #include "llvm/IR/InstVisitor.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/MDBuilder.h" 60 #include "llvm/IR/Metadata.h" 61 #include "llvm/IR/Module.h" 62 #include "llvm/IR/Type.h" 63 #include "llvm/IR/Use.h" 64 #include "llvm/IR/Value.h" 65 #include "llvm/MC/MCSectionMachO.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/MathExtras.h" 71 #include "llvm/Support/raw_ostream.h" 72 #include "llvm/Transforms/Instrumentation.h" 73 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 74 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" 75 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 76 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 77 #include "llvm/Transforms/Utils/Local.h" 78 #include "llvm/Transforms/Utils/ModuleUtils.h" 79 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstddef> 83 #include <cstdint> 84 #include <iomanip> 85 #include <limits> 86 #include <sstream> 87 #include <string> 88 #include <tuple> 89 90 using namespace llvm; 91 92 #define DEBUG_TYPE "asan" 93 94 static const uint64_t kDefaultShadowScale = 3; 95 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 96 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 97 static const uint64_t kDynamicShadowSentinel = 98 std::numeric_limits<uint64_t>::max(); 99 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. 100 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; 101 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 102 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; 103 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 104 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 105 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 106 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 107 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000; 108 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 109 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 110 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000; 111 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; 112 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; 113 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; 114 static const uint64_t kPS_ShadowOffset64 = 1ULL << 40; 115 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 116 static const uint64_t kEmscriptenShadowOffset = 0; 117 118 // The shadow memory space is dynamically allocated. 119 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 120 121 static const size_t kMinStackMallocSize = 1 << 6; // 64B 122 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 123 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 124 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 125 126 const char kAsanModuleCtorName[] = "asan.module_ctor"; 127 const char kAsanModuleDtorName[] = "asan.module_dtor"; 128 static const uint64_t kAsanCtorAndDtorPriority = 1; 129 // On Emscripten, the system needs more than one priorities for constructors. 130 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; 131 const char kAsanReportErrorTemplate[] = "__asan_report_"; 132 const char kAsanRegisterGlobalsName[] = "__asan_register_globals"; 133 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals"; 134 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals"; 135 const char kAsanUnregisterImageGlobalsName[] = 136 "__asan_unregister_image_globals"; 137 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals"; 138 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals"; 139 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init"; 140 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init"; 141 const char kAsanInitName[] = "__asan_init"; 142 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v"; 143 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp"; 144 const char kAsanPtrSub[] = "__sanitizer_ptr_sub"; 145 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; 146 static const int kMaxAsanStackMallocSizeClass = 10; 147 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; 148 const char kAsanStackMallocAlwaysNameTemplate[] = 149 "__asan_stack_malloc_always_"; 150 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; 151 const char kAsanGenPrefix[] = "___asan_gen_"; 152 const char kODRGenPrefix[] = "__odr_asan_gen_"; 153 const char kSanCovGenPrefix[] = "__sancov_gen_"; 154 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_"; 155 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory"; 156 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory"; 157 158 // ASan version script has __asan_* wildcard. Triple underscore prevents a 159 // linker (gold) warning about attempting to export a local symbol. 160 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered"; 161 162 const char kAsanOptionDetectUseAfterReturn[] = 163 "__asan_option_detect_stack_use_after_return"; 164 165 const char kAsanShadowMemoryDynamicAddress[] = 166 "__asan_shadow_memory_dynamic_address"; 167 168 const char kAsanAllocaPoison[] = "__asan_alloca_poison"; 169 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison"; 170 171 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared"; 172 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private"; 173 174 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 175 static const size_t kNumberOfAccessSizes = 5; 176 177 static const uint64_t kAllocaRzSize = 32; 178 179 // ASanAccessInfo implementation constants. 180 constexpr size_t kCompileKernelShift = 0; 181 constexpr size_t kCompileKernelMask = 0x1; 182 constexpr size_t kAccessSizeIndexShift = 1; 183 constexpr size_t kAccessSizeIndexMask = 0xf; 184 constexpr size_t kIsWriteShift = 5; 185 constexpr size_t kIsWriteMask = 0x1; 186 187 // Command-line flags. 188 189 static cl::opt<bool> ClEnableKasan( 190 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 191 cl::Hidden, cl::init(false)); 192 193 static cl::opt<bool> ClRecover( 194 "asan-recover", 195 cl::desc("Enable recovery mode (continue-after-error)."), 196 cl::Hidden, cl::init(false)); 197 198 static cl::opt<bool> ClInsertVersionCheck( 199 "asan-guard-against-version-mismatch", 200 cl::desc("Guard against compiler/runtime version mismatch."), 201 cl::Hidden, cl::init(true)); 202 203 // This flag may need to be replaced with -f[no-]asan-reads. 204 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 205 cl::desc("instrument read instructions"), 206 cl::Hidden, cl::init(true)); 207 208 static cl::opt<bool> ClInstrumentWrites( 209 "asan-instrument-writes", cl::desc("instrument write instructions"), 210 cl::Hidden, cl::init(true)); 211 212 static cl::opt<bool> 213 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false), 214 cl::Hidden, cl::desc("Use Stack Safety analysis results"), 215 cl::Optional); 216 217 static cl::opt<bool> ClInstrumentAtomics( 218 "asan-instrument-atomics", 219 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 220 cl::init(true)); 221 222 static cl::opt<bool> 223 ClInstrumentByval("asan-instrument-byval", 224 cl::desc("instrument byval call arguments"), cl::Hidden, 225 cl::init(true)); 226 227 static cl::opt<bool> ClAlwaysSlowPath( 228 "asan-always-slow-path", 229 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 230 cl::init(false)); 231 232 static cl::opt<bool> ClForceDynamicShadow( 233 "asan-force-dynamic-shadow", 234 cl::desc("Load shadow address into a local variable for each function"), 235 cl::Hidden, cl::init(false)); 236 237 static cl::opt<bool> 238 ClWithIfunc("asan-with-ifunc", 239 cl::desc("Access dynamic shadow through an ifunc global on " 240 "platforms that support this"), 241 cl::Hidden, cl::init(true)); 242 243 static cl::opt<bool> ClWithIfuncSuppressRemat( 244 "asan-with-ifunc-suppress-remat", 245 cl::desc("Suppress rematerialization of dynamic shadow address by passing " 246 "it through inline asm in prologue."), 247 cl::Hidden, cl::init(true)); 248 249 // This flag limits the number of instructions to be instrumented 250 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 251 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 252 // set it to 10000. 253 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 254 "asan-max-ins-per-bb", cl::init(10000), 255 cl::desc("maximal number of instructions to instrument in any given BB"), 256 cl::Hidden); 257 258 // This flag may need to be replaced with -f[no]asan-stack. 259 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 260 cl::Hidden, cl::init(true)); 261 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 262 "asan-max-inline-poisoning-size", 263 cl::desc( 264 "Inline shadow poisoning for blocks up to the given size in bytes."), 265 cl::Hidden, cl::init(64)); 266 267 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn( 268 "asan-use-after-return", 269 cl::desc("Sets the mode of detection for stack-use-after-return."), 270 cl::values( 271 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", 272 "Never detect stack use after return."), 273 clEnumValN( 274 AsanDetectStackUseAfterReturnMode::Runtime, "runtime", 275 "Detect stack use after return if " 276 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), 277 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", 278 "Always detect stack use after return.")), 279 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime)); 280 281 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", 282 cl::desc("Create redzones for byval " 283 "arguments (extra copy " 284 "required)"), cl::Hidden, 285 cl::init(true)); 286 287 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 288 cl::desc("Check stack-use-after-scope"), 289 cl::Hidden, cl::init(false)); 290 291 // This flag may need to be replaced with -f[no]asan-globals. 292 static cl::opt<bool> ClGlobals("asan-globals", 293 cl::desc("Handle global objects"), cl::Hidden, 294 cl::init(true)); 295 296 static cl::opt<bool> ClInitializers("asan-initialization-order", 297 cl::desc("Handle C++ initializer order"), 298 cl::Hidden, cl::init(true)); 299 300 static cl::opt<bool> ClInvalidPointerPairs( 301 "asan-detect-invalid-pointer-pair", 302 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 303 cl::init(false)); 304 305 static cl::opt<bool> ClInvalidPointerCmp( 306 "asan-detect-invalid-pointer-cmp", 307 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, 308 cl::init(false)); 309 310 static cl::opt<bool> ClInvalidPointerSub( 311 "asan-detect-invalid-pointer-sub", 312 cl::desc("Instrument - operations with pointer operands"), cl::Hidden, 313 cl::init(false)); 314 315 static cl::opt<unsigned> ClRealignStack( 316 "asan-realign-stack", 317 cl::desc("Realign stack to the value of this flag (power of two)"), 318 cl::Hidden, cl::init(32)); 319 320 static cl::opt<int> ClInstrumentationWithCallsThreshold( 321 "asan-instrumentation-with-call-threshold", 322 cl::desc( 323 "If the function being instrumented contains more than " 324 "this number of memory accesses, use callbacks instead of " 325 "inline checks (-1 means never use callbacks)."), 326 cl::Hidden, cl::init(7000)); 327 328 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 329 "asan-memory-access-callback-prefix", 330 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 331 cl::init("__asan_")); 332 333 static cl::opt<bool> ClKasanMemIntrinCallbackPrefix( 334 "asan-kernel-mem-intrinsic-prefix", 335 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, 336 cl::init(false)); 337 338 static cl::opt<bool> 339 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 340 cl::desc("instrument dynamic allocas"), 341 cl::Hidden, cl::init(true)); 342 343 static cl::opt<bool> ClSkipPromotableAllocas( 344 "asan-skip-promotable-allocas", 345 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 346 cl::init(true)); 347 348 // These flags allow to change the shadow mapping. 349 // The shadow mapping looks like 350 // Shadow = (Mem >> scale) + offset 351 352 static cl::opt<int> ClMappingScale("asan-mapping-scale", 353 cl::desc("scale of asan shadow mapping"), 354 cl::Hidden, cl::init(0)); 355 356 static cl::opt<uint64_t> 357 ClMappingOffset("asan-mapping-offset", 358 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), 359 cl::Hidden, cl::init(0)); 360 361 // Optimization flags. Not user visible, used mostly for testing 362 // and benchmarking the tool. 363 364 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 365 cl::Hidden, cl::init(true)); 366 367 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks", 368 cl::desc("Optimize callbacks"), 369 cl::Hidden, cl::init(false)); 370 371 static cl::opt<bool> ClOptSameTemp( 372 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 373 cl::Hidden, cl::init(true)); 374 375 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 376 cl::desc("Don't instrument scalar globals"), 377 cl::Hidden, cl::init(true)); 378 379 static cl::opt<bool> ClOptStack( 380 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 381 cl::Hidden, cl::init(false)); 382 383 static cl::opt<bool> ClDynamicAllocaStack( 384 "asan-stack-dynamic-alloca", 385 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 386 cl::init(true)); 387 388 static cl::opt<uint32_t> ClForceExperiment( 389 "asan-force-experiment", 390 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 391 cl::init(0)); 392 393 static cl::opt<bool> 394 ClUsePrivateAlias("asan-use-private-alias", 395 cl::desc("Use private aliases for global variables"), 396 cl::Hidden, cl::init(false)); 397 398 static cl::opt<bool> 399 ClUseOdrIndicator("asan-use-odr-indicator", 400 cl::desc("Use odr indicators to improve ODR reporting"), 401 cl::Hidden, cl::init(false)); 402 403 static cl::opt<bool> 404 ClUseGlobalsGC("asan-globals-live-support", 405 cl::desc("Use linker features to support dead " 406 "code stripping of globals"), 407 cl::Hidden, cl::init(true)); 408 409 // This is on by default even though there is a bug in gold: 410 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 411 static cl::opt<bool> 412 ClWithComdat("asan-with-comdat", 413 cl::desc("Place ASan constructors in comdat sections"), 414 cl::Hidden, cl::init(true)); 415 416 static cl::opt<AsanDtorKind> ClOverrideDestructorKind( 417 "asan-destructor-kind", 418 cl::desc("Sets the ASan destructor kind. The default is to use the value " 419 "provided to the pass constructor"), 420 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), 421 clEnumValN(AsanDtorKind::Global, "global", 422 "Use global destructors")), 423 cl::init(AsanDtorKind::Invalid), cl::Hidden); 424 425 // Debug flags. 426 427 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 428 cl::init(0)); 429 430 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 431 cl::Hidden, cl::init(0)); 432 433 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 434 cl::desc("Debug func")); 435 436 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 437 cl::Hidden, cl::init(-1)); 438 439 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 440 cl::Hidden, cl::init(-1)); 441 442 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 443 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 444 STATISTIC(NumOptimizedAccessesToGlobalVar, 445 "Number of optimized accesses to global vars"); 446 STATISTIC(NumOptimizedAccessesToStackVar, 447 "Number of optimized accesses to stack vars"); 448 449 namespace { 450 451 /// This struct defines the shadow mapping using the rule: 452 /// shadow = (mem >> Scale) ADD-or-OR Offset. 453 /// If InGlobal is true, then 454 /// extern char __asan_shadow[]; 455 /// shadow = (mem >> Scale) + &__asan_shadow 456 struct ShadowMapping { 457 int Scale; 458 uint64_t Offset; 459 bool OrShadowOffset; 460 bool InGlobal; 461 }; 462 463 } // end anonymous namespace 464 465 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, 466 bool IsKasan) { 467 bool IsAndroid = TargetTriple.isAndroid(); 468 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() || 469 TargetTriple.isDriverKit(); 470 bool IsMacOS = TargetTriple.isMacOSX(); 471 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 472 bool IsNetBSD = TargetTriple.isOSNetBSD(); 473 bool IsPS = TargetTriple.isPS(); 474 bool IsLinux = TargetTriple.isOSLinux(); 475 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || 476 TargetTriple.getArch() == Triple::ppc64le; 477 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; 478 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 479 bool IsMIPS32 = TargetTriple.isMIPS32(); 480 bool IsMIPS64 = TargetTriple.isMIPS64(); 481 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); 482 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; 483 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; 484 bool IsWindows = TargetTriple.isOSWindows(); 485 bool IsFuchsia = TargetTriple.isOSFuchsia(); 486 bool IsEmscripten = TargetTriple.isOSEmscripten(); 487 bool IsAMDGPU = TargetTriple.isAMDGPU(); 488 489 ShadowMapping Mapping; 490 491 Mapping.Scale = kDefaultShadowScale; 492 if (ClMappingScale.getNumOccurrences() > 0) { 493 Mapping.Scale = ClMappingScale; 494 } 495 496 if (LongSize == 32) { 497 if (IsAndroid) 498 Mapping.Offset = kDynamicShadowSentinel; 499 else if (IsMIPS32) 500 Mapping.Offset = kMIPS32_ShadowOffset32; 501 else if (IsFreeBSD) 502 Mapping.Offset = kFreeBSD_ShadowOffset32; 503 else if (IsNetBSD) 504 Mapping.Offset = kNetBSD_ShadowOffset32; 505 else if (IsIOS) 506 Mapping.Offset = kDynamicShadowSentinel; 507 else if (IsWindows) 508 Mapping.Offset = kWindowsShadowOffset32; 509 else if (IsEmscripten) 510 Mapping.Offset = kEmscriptenShadowOffset; 511 else 512 Mapping.Offset = kDefaultShadowOffset32; 513 } else { // LongSize == 64 514 // Fuchsia is always PIE, which means that the beginning of the address 515 // space is always available. 516 if (IsFuchsia) 517 Mapping.Offset = 0; 518 else if (IsPPC64) 519 Mapping.Offset = kPPC64_ShadowOffset64; 520 else if (IsSystemZ) 521 Mapping.Offset = kSystemZ_ShadowOffset64; 522 else if (IsFreeBSD && !IsMIPS64) { 523 if (IsKasan) 524 Mapping.Offset = kFreeBSDKasan_ShadowOffset64; 525 else 526 Mapping.Offset = kFreeBSD_ShadowOffset64; 527 } else if (IsNetBSD) { 528 if (IsKasan) 529 Mapping.Offset = kNetBSDKasan_ShadowOffset64; 530 else 531 Mapping.Offset = kNetBSD_ShadowOffset64; 532 } else if (IsPS) 533 Mapping.Offset = kPS_ShadowOffset64; 534 else if (IsLinux && IsX86_64) { 535 if (IsKasan) 536 Mapping.Offset = kLinuxKasan_ShadowOffset64; 537 else 538 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 539 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 540 } else if (IsWindows && IsX86_64) { 541 Mapping.Offset = kWindowsShadowOffset64; 542 } else if (IsMIPS64) 543 Mapping.Offset = kMIPS64_ShadowOffset64; 544 else if (IsIOS) 545 Mapping.Offset = kDynamicShadowSentinel; 546 else if (IsMacOS && IsAArch64) 547 Mapping.Offset = kDynamicShadowSentinel; 548 else if (IsAArch64) 549 Mapping.Offset = kAArch64_ShadowOffset64; 550 else if (IsRISCV64) 551 Mapping.Offset = kRISCV64_ShadowOffset64; 552 else if (IsAMDGPU) 553 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 554 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 555 else 556 Mapping.Offset = kDefaultShadowOffset64; 557 } 558 559 if (ClForceDynamicShadow) { 560 Mapping.Offset = kDynamicShadowSentinel; 561 } 562 563 if (ClMappingOffset.getNumOccurrences() > 0) { 564 Mapping.Offset = ClMappingOffset; 565 } 566 567 // OR-ing shadow offset if more efficient (at least on x86) if the offset 568 // is a power of two, but on ppc64 we have to use add since the shadow 569 // offset is not necessary 1/8-th of the address space. On SystemZ, 570 // we could OR the constant in a single instruction, but it's more 571 // efficient to load it once and use indexed addressing. 572 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS && 573 !IsRISCV64 && 574 !(Mapping.Offset & (Mapping.Offset - 1)) && 575 Mapping.Offset != kDynamicShadowSentinel; 576 bool IsAndroidWithIfuncSupport = 577 IsAndroid && !TargetTriple.isAndroidVersionLT(21); 578 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; 579 580 return Mapping; 581 } 582 583 namespace llvm { 584 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, 585 bool IsKasan, uint64_t *ShadowBase, 586 int *MappingScale, bool *OrShadowOffset) { 587 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan); 588 *ShadowBase = Mapping.Offset; 589 *MappingScale = Mapping.Scale; 590 *OrShadowOffset = Mapping.OrShadowOffset; 591 } 592 593 ASanAccessInfo::ASanAccessInfo(int32_t Packed) 594 : Packed(Packed), 595 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask), 596 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask), 597 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {} 598 599 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel, 600 uint8_t AccessSizeIndex) 601 : Packed((IsWrite << kIsWriteShift) + 602 (CompileKernel << kCompileKernelShift) + 603 (AccessSizeIndex << kAccessSizeIndexShift)), 604 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite), 605 CompileKernel(CompileKernel) {} 606 607 } // namespace llvm 608 609 static uint64_t getRedzoneSizeForScale(int MappingScale) { 610 // Redzone used for stack and globals is at least 32 bytes. 611 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 612 return std::max(32U, 1U << MappingScale); 613 } 614 615 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { 616 if (TargetTriple.isOSEmscripten()) { 617 return kAsanEmscriptenCtorAndDtorPriority; 618 } else { 619 return kAsanCtorAndDtorPriority; 620 } 621 } 622 623 namespace { 624 625 /// AddressSanitizer: instrument the code in module to find memory bugs. 626 struct AddressSanitizer { 627 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI, 628 bool CompileKernel = false, bool Recover = false, 629 bool UseAfterScope = false, 630 AsanDetectStackUseAfterReturnMode UseAfterReturn = 631 AsanDetectStackUseAfterReturnMode::Runtime) 632 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 633 : CompileKernel), 634 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 635 UseAfterScope(UseAfterScope || ClUseAfterScope), 636 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn 637 : UseAfterReturn), 638 SSGI(SSGI) { 639 C = &(M.getContext()); 640 LongSize = M.getDataLayout().getPointerSizeInBits(); 641 IntptrTy = Type::getIntNTy(*C, LongSize); 642 Int8PtrTy = Type::getInt8PtrTy(*C); 643 Int32Ty = Type::getInt32Ty(*C); 644 TargetTriple = Triple(M.getTargetTriple()); 645 646 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 647 648 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid); 649 } 650 651 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 652 uint64_t ArraySize = 1; 653 if (AI.isArrayAllocation()) { 654 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 655 assert(CI && "non-constant array size"); 656 ArraySize = CI->getZExtValue(); 657 } 658 Type *Ty = AI.getAllocatedType(); 659 uint64_t SizeInBytes = 660 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 661 return SizeInBytes * ArraySize; 662 } 663 664 /// Check if we want (and can) handle this alloca. 665 bool isInterestingAlloca(const AllocaInst &AI); 666 667 bool ignoreAccess(Instruction *Inst, Value *Ptr); 668 void getInterestingMemoryOperands( 669 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 670 671 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 672 InterestingMemoryOperand &O, bool UseCalls, 673 const DataLayout &DL); 674 void instrumentPointerComparisonOrSubtraction(Instruction *I); 675 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 676 Value *Addr, uint32_t TypeSize, bool IsWrite, 677 Value *SizeArgument, bool UseCalls, uint32_t Exp); 678 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, 679 Instruction *InsertBefore, Value *Addr, 680 uint32_t TypeSize, bool IsWrite, 681 Value *SizeArgument); 682 void instrumentUnusualSizeOrAlignment(Instruction *I, 683 Instruction *InsertBefore, Value *Addr, 684 uint32_t TypeSize, bool IsWrite, 685 Value *SizeArgument, bool UseCalls, 686 uint32_t Exp); 687 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 688 Value *ShadowValue, uint32_t TypeSize); 689 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 690 bool IsWrite, size_t AccessSizeIndex, 691 Value *SizeArgument, uint32_t Exp); 692 void instrumentMemIntrinsic(MemIntrinsic *MI); 693 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 694 bool suppressInstrumentationSiteForDebug(int &Instrumented); 695 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); 696 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 697 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); 698 void markEscapedLocalAllocas(Function &F); 699 700 private: 701 friend struct FunctionStackPoisoner; 702 703 void initializeCallbacks(Module &M); 704 705 bool LooksLikeCodeInBug11395(Instruction *I); 706 bool GlobalIsLinkerInitialized(GlobalVariable *G); 707 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 708 uint64_t TypeSize) const; 709 710 /// Helper to cleanup per-function state. 711 struct FunctionStateRAII { 712 AddressSanitizer *Pass; 713 714 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 715 assert(Pass->ProcessedAllocas.empty() && 716 "last pass forgot to clear cache"); 717 assert(!Pass->LocalDynamicShadow); 718 } 719 720 ~FunctionStateRAII() { 721 Pass->LocalDynamicShadow = nullptr; 722 Pass->ProcessedAllocas.clear(); 723 } 724 }; 725 726 LLVMContext *C; 727 Triple TargetTriple; 728 int LongSize; 729 bool CompileKernel; 730 bool Recover; 731 bool UseAfterScope; 732 AsanDetectStackUseAfterReturnMode UseAfterReturn; 733 Type *IntptrTy; 734 Type *Int8PtrTy; 735 Type *Int32Ty; 736 ShadowMapping Mapping; 737 FunctionCallee AsanHandleNoReturnFunc; 738 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; 739 Constant *AsanShadowGlobal; 740 741 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). 742 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; 743 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 744 745 // These arrays is indexed by AccessIsWrite and Experiment. 746 FunctionCallee AsanErrorCallbackSized[2][2]; 747 FunctionCallee AsanMemoryAccessCallbackSized[2][2]; 748 749 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; 750 Value *LocalDynamicShadow = nullptr; 751 const StackSafetyGlobalInfo *SSGI; 752 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 753 754 FunctionCallee AMDGPUAddressShared; 755 FunctionCallee AMDGPUAddressPrivate; 756 }; 757 758 class ModuleAddressSanitizer { 759 public: 760 ModuleAddressSanitizer(Module &M, bool CompileKernel = false, 761 bool Recover = false, bool UseGlobalsGC = true, 762 bool UseOdrIndicator = false, 763 AsanDtorKind DestructorKind = AsanDtorKind::Global) 764 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 765 : CompileKernel), 766 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 767 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), 768 // Enable aliases as they should have no downside with ODR indicators. 769 UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), 770 UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), 771 // Not a typo: ClWithComdat is almost completely pointless without 772 // ClUseGlobalsGC (because then it only works on modules without 773 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; 774 // and both suffer from gold PR19002 for which UseGlobalsGC constructor 775 // argument is designed as workaround. Therefore, disable both 776 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to 777 // do globals-gc. 778 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel), 779 DestructorKind(DestructorKind) { 780 C = &(M.getContext()); 781 int LongSize = M.getDataLayout().getPointerSizeInBits(); 782 IntptrTy = Type::getIntNTy(*C, LongSize); 783 TargetTriple = Triple(M.getTargetTriple()); 784 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 785 786 if (ClOverrideDestructorKind != AsanDtorKind::Invalid) 787 this->DestructorKind = ClOverrideDestructorKind; 788 assert(this->DestructorKind != AsanDtorKind::Invalid); 789 } 790 791 bool instrumentModule(Module &); 792 793 private: 794 void initializeCallbacks(Module &M); 795 796 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); 797 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, 798 ArrayRef<GlobalVariable *> ExtendedGlobals, 799 ArrayRef<Constant *> MetadataInitializers); 800 void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, 801 ArrayRef<GlobalVariable *> ExtendedGlobals, 802 ArrayRef<Constant *> MetadataInitializers, 803 const std::string &UniqueModuleId); 804 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, 805 ArrayRef<GlobalVariable *> ExtendedGlobals, 806 ArrayRef<Constant *> MetadataInitializers); 807 void 808 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, 809 ArrayRef<GlobalVariable *> ExtendedGlobals, 810 ArrayRef<Constant *> MetadataInitializers); 811 812 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, 813 StringRef OriginalName); 814 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, 815 StringRef InternalSuffix); 816 Instruction *CreateAsanModuleDtor(Module &M); 817 818 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const; 819 bool shouldInstrumentGlobal(GlobalVariable *G) const; 820 bool ShouldUseMachOGlobalsSection() const; 821 StringRef getGlobalMetadataSection() const; 822 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 823 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 824 uint64_t getMinRedzoneSizeForGlobal() const { 825 return getRedzoneSizeForScale(Mapping.Scale); 826 } 827 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; 828 int GetAsanVersion(const Module &M) const; 829 830 bool CompileKernel; 831 bool Recover; 832 bool UseGlobalsGC; 833 bool UsePrivateAlias; 834 bool UseOdrIndicator; 835 bool UseCtorComdat; 836 AsanDtorKind DestructorKind; 837 Type *IntptrTy; 838 LLVMContext *C; 839 Triple TargetTriple; 840 ShadowMapping Mapping; 841 FunctionCallee AsanPoisonGlobals; 842 FunctionCallee AsanUnpoisonGlobals; 843 FunctionCallee AsanRegisterGlobals; 844 FunctionCallee AsanUnregisterGlobals; 845 FunctionCallee AsanRegisterImageGlobals; 846 FunctionCallee AsanUnregisterImageGlobals; 847 FunctionCallee AsanRegisterElfGlobals; 848 FunctionCallee AsanUnregisterElfGlobals; 849 850 Function *AsanCtorFunction = nullptr; 851 Function *AsanDtorFunction = nullptr; 852 }; 853 854 // Stack poisoning does not play well with exception handling. 855 // When an exception is thrown, we essentially bypass the code 856 // that unpoisones the stack. This is why the run-time library has 857 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 858 // stack in the interceptor. This however does not work inside the 859 // actual function which catches the exception. Most likely because the 860 // compiler hoists the load of the shadow value somewhere too high. 861 // This causes asan to report a non-existing bug on 453.povray. 862 // It sounds like an LLVM bug. 863 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 864 Function &F; 865 AddressSanitizer &ASan; 866 DIBuilder DIB; 867 LLVMContext *C; 868 Type *IntptrTy; 869 Type *IntptrPtrTy; 870 ShadowMapping Mapping; 871 872 SmallVector<AllocaInst *, 16> AllocaVec; 873 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; 874 SmallVector<Instruction *, 8> RetVec; 875 876 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 877 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 878 FunctionCallee AsanSetShadowFunc[0x100] = {}; 879 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; 880 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; 881 882 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 883 struct AllocaPoisonCall { 884 IntrinsicInst *InsBefore; 885 AllocaInst *AI; 886 uint64_t Size; 887 bool DoPoison; 888 }; 889 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 890 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 891 bool HasUntracedLifetimeIntrinsic = false; 892 893 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 894 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 895 AllocaInst *DynamicAllocaLayout = nullptr; 896 IntrinsicInst *LocalEscapeCall = nullptr; 897 898 bool HasInlineAsm = false; 899 bool HasReturnsTwiceCall = false; 900 bool PoisonStack; 901 902 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 903 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 904 C(ASan.C), IntptrTy(ASan.IntptrTy), 905 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 906 PoisonStack(ClStack && 907 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} 908 909 bool runOnFunction() { 910 if (!PoisonStack) 911 return false; 912 913 if (ClRedzoneByvalArgs) 914 copyArgsPassedByValToAllocas(); 915 916 // Collect alloca, ret, lifetime instructions etc. 917 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 918 919 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 920 921 initializeCallbacks(*F.getParent()); 922 923 if (HasUntracedLifetimeIntrinsic) { 924 // If there are lifetime intrinsics which couldn't be traced back to an 925 // alloca, we may not know exactly when a variable enters scope, and 926 // therefore should "fail safe" by not poisoning them. 927 StaticAllocaPoisonCallVec.clear(); 928 DynamicAllocaPoisonCallVec.clear(); 929 } 930 931 processDynamicAllocas(); 932 processStaticAllocas(); 933 934 if (ClDebugStack) { 935 LLVM_DEBUG(dbgs() << F); 936 } 937 return true; 938 } 939 940 // Arguments marked with the "byval" attribute are implicitly copied without 941 // using an alloca instruction. To produce redzones for those arguments, we 942 // copy them a second time into memory allocated with an alloca instruction. 943 void copyArgsPassedByValToAllocas(); 944 945 // Finds all Alloca instructions and puts 946 // poisoned red zones around all of them. 947 // Then unpoison everything back before the function returns. 948 void processStaticAllocas(); 949 void processDynamicAllocas(); 950 951 void createDynamicAllocasInitStorage(); 952 953 // ----------------------- Visitors. 954 /// Collect all Ret instructions, or the musttail call instruction if it 955 /// precedes the return instruction. 956 void visitReturnInst(ReturnInst &RI) { 957 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall()) 958 RetVec.push_back(CI); 959 else 960 RetVec.push_back(&RI); 961 } 962 963 /// Collect all Resume instructions. 964 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 965 966 /// Collect all CatchReturnInst instructions. 967 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 968 969 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 970 Value *SavedStack) { 971 IRBuilder<> IRB(InstBefore); 972 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 973 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 974 // need to adjust extracted SP to compute the address of the most recent 975 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 976 // this purpose. 977 if (!isa<ReturnInst>(InstBefore)) { 978 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 979 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 980 {IntptrTy}); 981 982 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 983 984 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 985 DynamicAreaOffset); 986 } 987 988 IRB.CreateCall( 989 AsanAllocasUnpoisonFunc, 990 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); 991 } 992 993 // Unpoison dynamic allocas redzones. 994 void unpoisonDynamicAllocas() { 995 for (Instruction *Ret : RetVec) 996 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); 997 998 for (Instruction *StackRestoreInst : StackRestoreVec) 999 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 1000 StackRestoreInst->getOperand(0)); 1001 } 1002 1003 // Deploy and poison redzones around dynamic alloca call. To do this, we 1004 // should replace this call with another one with changed parameters and 1005 // replace all its uses with new address, so 1006 // addr = alloca type, old_size, align 1007 // is replaced by 1008 // new_size = (old_size + additional_size) * sizeof(type) 1009 // tmp = alloca i8, new_size, max(align, 32) 1010 // addr = tmp + 32 (first 32 bytes are for the left redzone). 1011 // Additional_size is added to make new memory allocation contain not only 1012 // requested memory, but also left, partial and right redzones. 1013 void handleDynamicAllocaCall(AllocaInst *AI); 1014 1015 /// Collect Alloca instructions we want (and can) handle. 1016 void visitAllocaInst(AllocaInst &AI) { 1017 if (!ASan.isInterestingAlloca(AI)) { 1018 if (AI.isStaticAlloca()) { 1019 // Skip over allocas that are present *before* the first instrumented 1020 // alloca, we don't want to move those around. 1021 if (AllocaVec.empty()) 1022 return; 1023 1024 StaticAllocasToMoveUp.push_back(&AI); 1025 } 1026 return; 1027 } 1028 1029 if (!AI.isStaticAlloca()) 1030 DynamicAllocaVec.push_back(&AI); 1031 else 1032 AllocaVec.push_back(&AI); 1033 } 1034 1035 /// Collect lifetime intrinsic calls to check for use-after-scope 1036 /// errors. 1037 void visitIntrinsicInst(IntrinsicInst &II) { 1038 Intrinsic::ID ID = II.getIntrinsicID(); 1039 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 1040 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 1041 if (!ASan.UseAfterScope) 1042 return; 1043 if (!II.isLifetimeStartOrEnd()) 1044 return; 1045 // Found lifetime intrinsic, add ASan instrumentation if necessary. 1046 auto *Size = cast<ConstantInt>(II.getArgOperand(0)); 1047 // If size argument is undefined, don't do anything. 1048 if (Size->isMinusOne()) return; 1049 // Check that size doesn't saturate uint64_t and can 1050 // be stored in IntptrTy. 1051 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 1052 if (SizeValue == ~0ULL || 1053 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 1054 return; 1055 // Find alloca instruction that corresponds to llvm.lifetime argument. 1056 // Currently we can only handle lifetime markers pointing to the 1057 // beginning of the alloca. 1058 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true); 1059 if (!AI) { 1060 HasUntracedLifetimeIntrinsic = true; 1061 return; 1062 } 1063 // We're interested only in allocas we can handle. 1064 if (!ASan.isInterestingAlloca(*AI)) 1065 return; 1066 bool DoPoison = (ID == Intrinsic::lifetime_end); 1067 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 1068 if (AI->isStaticAlloca()) 1069 StaticAllocaPoisonCallVec.push_back(APC); 1070 else if (ClInstrumentDynamicAllocas) 1071 DynamicAllocaPoisonCallVec.push_back(APC); 1072 } 1073 1074 void visitCallBase(CallBase &CB) { 1075 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1076 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; 1077 HasReturnsTwiceCall |= CI->canReturnTwice(); 1078 } 1079 } 1080 1081 // ---------------------- Helpers. 1082 void initializeCallbacks(Module &M); 1083 1084 // Copies bytes from ShadowBytes into shadow memory for indexes where 1085 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 1086 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 1087 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1088 IRBuilder<> &IRB, Value *ShadowBase); 1089 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1090 size_t Begin, size_t End, IRBuilder<> &IRB, 1091 Value *ShadowBase); 1092 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 1093 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 1094 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 1095 1096 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 1097 1098 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 1099 bool Dynamic); 1100 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 1101 Instruction *ThenTerm, Value *ValueIfFalse); 1102 }; 1103 1104 } // end anonymous namespace 1105 1106 void ModuleAddressSanitizerPass::printPipeline( 1107 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 1108 static_cast<PassInfoMixin<ModuleAddressSanitizerPass> *>(this)->printPipeline( 1109 OS, MapClassName2PassName); 1110 OS << "<"; 1111 if (Options.CompileKernel) 1112 OS << "kernel"; 1113 OS << ">"; 1114 } 1115 1116 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass( 1117 const AddressSanitizerOptions &Options, bool UseGlobalGC, 1118 bool UseOdrIndicator, AsanDtorKind DestructorKind) 1119 : Options(Options), UseGlobalGC(UseGlobalGC), 1120 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {} 1121 1122 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, 1123 ModuleAnalysisManager &MAM) { 1124 ModuleAddressSanitizer ModuleSanitizer(M, Options.CompileKernel, 1125 Options.Recover, UseGlobalGC, 1126 UseOdrIndicator, DestructorKind); 1127 bool Modified = false; 1128 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 1129 const StackSafetyGlobalInfo *const SSGI = 1130 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr; 1131 for (Function &F : M) { 1132 AddressSanitizer FunctionSanitizer(M, SSGI, Options.CompileKernel, 1133 Options.Recover, Options.UseAfterScope, 1134 Options.UseAfterReturn); 1135 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 1136 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); 1137 } 1138 Modified |= ModuleSanitizer.instrumentModule(M); 1139 return Modified ? PreservedAnalyses::none() : PreservedAnalyses::all(); 1140 } 1141 1142 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 1143 size_t Res = countTrailingZeros(TypeSize / 8); 1144 assert(Res < kNumberOfAccessSizes); 1145 return Res; 1146 } 1147 1148 /// Check if \p G has been created by a trusted compiler pass. 1149 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 1150 // Do not instrument @llvm.global_ctors, @llvm.used, etc. 1151 if (G->getName().startswith("llvm.") || 1152 // Do not instrument gcov counter arrays. 1153 G->getName().startswith("__llvm_gcov_ctr") || 1154 // Do not instrument rtti proxy symbols for function sanitizer. 1155 G->getName().startswith("__llvm_rtti_proxy")) 1156 return true; 1157 1158 // Do not instrument asan globals. 1159 if (G->getName().startswith(kAsanGenPrefix) || 1160 G->getName().startswith(kSanCovGenPrefix) || 1161 G->getName().startswith(kODRGenPrefix)) 1162 return true; 1163 1164 return false; 1165 } 1166 1167 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { 1168 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1169 unsigned int AddrSpace = PtrTy->getPointerAddressSpace(); 1170 if (AddrSpace == 3 || AddrSpace == 5) 1171 return true; 1172 return false; 1173 } 1174 1175 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 1176 // Shadow >> scale 1177 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 1178 if (Mapping.Offset == 0) return Shadow; 1179 // (Shadow >> scale) | offset 1180 Value *ShadowBase; 1181 if (LocalDynamicShadow) 1182 ShadowBase = LocalDynamicShadow; 1183 else 1184 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 1185 if (Mapping.OrShadowOffset) 1186 return IRB.CreateOr(Shadow, ShadowBase); 1187 else 1188 return IRB.CreateAdd(Shadow, ShadowBase); 1189 } 1190 1191 // Instrument memset/memmove/memcpy 1192 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 1193 IRBuilder<> IRB(MI); 1194 if (isa<MemTransferInst>(MI)) { 1195 IRB.CreateCall( 1196 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 1197 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1198 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 1199 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1200 } else if (isa<MemSetInst>(MI)) { 1201 IRB.CreateCall( 1202 AsanMemset, 1203 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1204 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 1205 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1206 } 1207 MI->eraseFromParent(); 1208 } 1209 1210 /// Check if we want (and can) handle this alloca. 1211 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1212 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 1213 1214 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 1215 return PreviouslySeenAllocaInfo->getSecond(); 1216 1217 bool IsInteresting = 1218 (AI.getAllocatedType()->isSized() && 1219 // alloca() may be called with 0 size, ignore it. 1220 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 1221 // We are only interested in allocas not promotable to registers. 1222 // Promotable allocas are common under -O0. 1223 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 1224 // inalloca allocas are not treated as static, and we don't want 1225 // dynamic alloca instrumentation for them as well. 1226 !AI.isUsedWithInAlloca() && 1227 // swifterror allocas are register promoted by ISel 1228 !AI.isSwiftError()); 1229 1230 ProcessedAllocas[&AI] = IsInteresting; 1231 return IsInteresting; 1232 } 1233 1234 bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) { 1235 // Instrument acesses from different address spaces only for AMDGPU. 1236 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1237 if (PtrTy->getPointerAddressSpace() != 0 && 1238 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr))) 1239 return true; 1240 1241 // Ignore swifterror addresses. 1242 // swifterror memory addresses are mem2reg promoted by instruction 1243 // selection. As such they cannot have regular uses like an instrumentation 1244 // function and it makes no sense to track them as memory. 1245 if (Ptr->isSwiftError()) 1246 return true; 1247 1248 // Treat memory accesses to promotable allocas as non-interesting since they 1249 // will not cause memory violations. This greatly speeds up the instrumented 1250 // executable at -O0. 1251 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr)) 1252 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) 1253 return true; 1254 1255 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) && 1256 findAllocaForValue(Ptr)) 1257 return true; 1258 1259 return false; 1260 } 1261 1262 void AddressSanitizer::getInterestingMemoryOperands( 1263 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 1264 // Do not instrument the load fetching the dynamic shadow address. 1265 if (LocalDynamicShadow == I) 1266 return; 1267 1268 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1269 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand())) 1270 return; 1271 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 1272 LI->getType(), LI->getAlign()); 1273 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1274 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand())) 1275 return; 1276 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 1277 SI->getValueOperand()->getType(), SI->getAlign()); 1278 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1279 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand())) 1280 return; 1281 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 1282 RMW->getValOperand()->getType(), None); 1283 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1284 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand())) 1285 return; 1286 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 1287 XCHG->getCompareOperand()->getType(), None); 1288 } else if (auto CI = dyn_cast<CallInst>(I)) { 1289 if (CI->getIntrinsicID() == Intrinsic::masked_load || 1290 CI->getIntrinsicID() == Intrinsic::masked_store) { 1291 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_store; 1292 // Masked store has an initial operand for the value. 1293 unsigned OpOffset = IsWrite ? 1 : 0; 1294 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) 1295 return; 1296 1297 auto BasePtr = CI->getOperand(OpOffset); 1298 if (ignoreAccess(I, BasePtr)) 1299 return; 1300 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType(); 1301 MaybeAlign Alignment = Align(1); 1302 // Otherwise no alignment guarantees. We probably got Undef. 1303 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) 1304 Alignment = Op->getMaybeAlignValue(); 1305 Value *Mask = CI->getOperand(2 + OpOffset); 1306 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); 1307 } else { 1308 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { 1309 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 1310 ignoreAccess(I, CI->getArgOperand(ArgNo))) 1311 continue; 1312 Type *Ty = CI->getParamByValType(ArgNo); 1313 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 1314 } 1315 } 1316 } 1317 } 1318 1319 static bool isPointerOperand(Value *V) { 1320 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1321 } 1322 1323 // This is a rough heuristic; it may cause both false positives and 1324 // false negatives. The proper implementation requires cooperation with 1325 // the frontend. 1326 static bool isInterestingPointerComparison(Instruction *I) { 1327 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1328 if (!Cmp->isRelational()) 1329 return false; 1330 } else { 1331 return false; 1332 } 1333 return isPointerOperand(I->getOperand(0)) && 1334 isPointerOperand(I->getOperand(1)); 1335 } 1336 1337 // This is a rough heuristic; it may cause both false positives and 1338 // false negatives. The proper implementation requires cooperation with 1339 // the frontend. 1340 static bool isInterestingPointerSubtraction(Instruction *I) { 1341 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1342 if (BO->getOpcode() != Instruction::Sub) 1343 return false; 1344 } else { 1345 return false; 1346 } 1347 return isPointerOperand(I->getOperand(0)) && 1348 isPointerOperand(I->getOperand(1)); 1349 } 1350 1351 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1352 // If a global variable does not have dynamic initialization we don't 1353 // have to instrument it. However, if a global does not have initializer 1354 // at all, we assume it has dynamic initializer (in other TU). 1355 if (!G->hasInitializer()) 1356 return false; 1357 1358 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit) 1359 return false; 1360 1361 return true; 1362 } 1363 1364 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1365 Instruction *I) { 1366 IRBuilder<> IRB(I); 1367 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1368 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1369 for (Value *&i : Param) { 1370 if (i->getType()->isPointerTy()) 1371 i = IRB.CreatePointerCast(i, IntptrTy); 1372 } 1373 IRB.CreateCall(F, Param); 1374 } 1375 1376 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, 1377 Instruction *InsertBefore, Value *Addr, 1378 MaybeAlign Alignment, unsigned Granularity, 1379 uint32_t TypeSize, bool IsWrite, 1380 Value *SizeArgument, bool UseCalls, 1381 uint32_t Exp) { 1382 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1383 // if the data is properly aligned. 1384 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1385 TypeSize == 128) && 1386 (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) 1387 return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, 1388 nullptr, UseCalls, Exp); 1389 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, 1390 IsWrite, nullptr, UseCalls, Exp); 1391 } 1392 1393 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, 1394 const DataLayout &DL, Type *IntptrTy, 1395 Value *Mask, Instruction *I, 1396 Value *Addr, MaybeAlign Alignment, 1397 unsigned Granularity, Type *OpType, 1398 bool IsWrite, Value *SizeArgument, 1399 bool UseCalls, uint32_t Exp) { 1400 auto *VTy = cast<FixedVectorType>(OpType); 1401 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 1402 unsigned Num = VTy->getNumElements(); 1403 auto Zero = ConstantInt::get(IntptrTy, 0); 1404 for (unsigned Idx = 0; Idx < Num; ++Idx) { 1405 Value *InstrumentedAddress = nullptr; 1406 Instruction *InsertBefore = I; 1407 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 1408 // dyn_cast as we might get UndefValue 1409 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 1410 if (Masked->isZero()) 1411 // Mask is constant false, so no instrumentation needed. 1412 continue; 1413 // If we have a true or undef value, fall through to doInstrumentAddress 1414 // with InsertBefore == I 1415 } 1416 } else { 1417 IRBuilder<> IRB(I); 1418 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 1419 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 1420 InsertBefore = ThenTerm; 1421 } 1422 1423 IRBuilder<> IRB(InsertBefore); 1424 InstrumentedAddress = 1425 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 1426 doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, 1427 Granularity, ElemTypeSize, IsWrite, SizeArgument, 1428 UseCalls, Exp); 1429 } 1430 } 1431 1432 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1433 InterestingMemoryOperand &O, bool UseCalls, 1434 const DataLayout &DL) { 1435 Value *Addr = O.getPtr(); 1436 1437 // Optimization experiments. 1438 // The experiments can be used to evaluate potential optimizations that remove 1439 // instrumentation (assess false negatives). Instead of completely removing 1440 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1441 // experiments that want to remove instrumentation of this instruction). 1442 // If Exp is non-zero, this pass will emit special calls into runtime 1443 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1444 // make runtime terminate the program in a special way (with a different 1445 // exit status). Then you run the new compiler on a buggy corpus, collect 1446 // the special terminations (ideally, you don't see them at all -- no false 1447 // negatives) and make the decision on the optimization. 1448 uint32_t Exp = ClForceExperiment; 1449 1450 if (ClOpt && ClOptGlobals) { 1451 // If initialization order checking is disabled, a simple access to a 1452 // dynamically initialized global is always valid. 1453 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); 1454 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1455 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1456 NumOptimizedAccessesToGlobalVar++; 1457 return; 1458 } 1459 } 1460 1461 if (ClOpt && ClOptStack) { 1462 // A direct inbounds access to a stack variable is always valid. 1463 if (isa<AllocaInst>(getUnderlyingObject(Addr)) && 1464 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1465 NumOptimizedAccessesToStackVar++; 1466 return; 1467 } 1468 } 1469 1470 if (O.IsWrite) 1471 NumInstrumentedWrites++; 1472 else 1473 NumInstrumentedReads++; 1474 1475 unsigned Granularity = 1 << Mapping.Scale; 1476 if (O.MaybeMask) { 1477 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), 1478 Addr, O.Alignment, Granularity, O.OpType, 1479 O.IsWrite, nullptr, UseCalls, Exp); 1480 } else { 1481 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, 1482 Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, 1483 Exp); 1484 } 1485 } 1486 1487 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1488 Value *Addr, bool IsWrite, 1489 size_t AccessSizeIndex, 1490 Value *SizeArgument, 1491 uint32_t Exp) { 1492 IRBuilder<> IRB(InsertBefore); 1493 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1494 CallInst *Call = nullptr; 1495 if (SizeArgument) { 1496 if (Exp == 0) 1497 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1498 {Addr, SizeArgument}); 1499 else 1500 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1501 {Addr, SizeArgument, ExpVal}); 1502 } else { 1503 if (Exp == 0) 1504 Call = 1505 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1506 else 1507 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1508 {Addr, ExpVal}); 1509 } 1510 1511 Call->setCannotMerge(); 1512 return Call; 1513 } 1514 1515 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1516 Value *ShadowValue, 1517 uint32_t TypeSize) { 1518 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1519 // Addr & (Granularity - 1) 1520 Value *LastAccessedByte = 1521 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1522 // (Addr & (Granularity - 1)) + size - 1 1523 if (TypeSize / 8 > 1) 1524 LastAccessedByte = IRB.CreateAdd( 1525 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1526 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1527 LastAccessedByte = 1528 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1529 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1530 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1531 } 1532 1533 Instruction *AddressSanitizer::instrumentAMDGPUAddress( 1534 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, 1535 uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { 1536 // Do not instrument unsupported addrspaces. 1537 if (isUnsupportedAMDGPUAddrspace(Addr)) 1538 return nullptr; 1539 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1540 // Follow host instrumentation for global and constant addresses. 1541 if (PtrTy->getPointerAddressSpace() != 0) 1542 return InsertBefore; 1543 // Instrument generic addresses in supported addressspaces. 1544 IRBuilder<> IRB(InsertBefore); 1545 Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); 1546 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); 1547 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong}); 1548 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate); 1549 Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate); 1550 Value *AddrSpaceZeroLanding = 1551 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); 1552 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding); 1553 return InsertBefore; 1554 } 1555 1556 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1557 Instruction *InsertBefore, Value *Addr, 1558 uint32_t TypeSize, bool IsWrite, 1559 Value *SizeArgument, bool UseCalls, 1560 uint32_t Exp) { 1561 if (TargetTriple.isAMDGPU()) { 1562 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, 1563 TypeSize, IsWrite, SizeArgument); 1564 if (!InsertBefore) 1565 return; 1566 } 1567 1568 IRBuilder<> IRB(InsertBefore); 1569 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1570 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); 1571 1572 if (UseCalls && ClOptimizeCallbacks) { 1573 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); 1574 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1575 IRB.CreateCall( 1576 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), 1577 {IRB.CreatePointerCast(Addr, Int8PtrTy), 1578 ConstantInt::get(Int32Ty, AccessInfo.Packed)}); 1579 return; 1580 } 1581 1582 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1583 if (UseCalls) { 1584 if (Exp == 0) 1585 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1586 AddrLong); 1587 else 1588 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1589 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1590 return; 1591 } 1592 1593 Type *ShadowTy = 1594 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1595 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1596 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1597 Value *CmpVal = Constant::getNullValue(ShadowTy); 1598 Value *ShadowValue = 1599 IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1600 1601 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1602 size_t Granularity = 1ULL << Mapping.Scale; 1603 Instruction *CrashTerm = nullptr; 1604 1605 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1606 // We use branch weights for the slow path check, to indicate that the slow 1607 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1608 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 1609 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1610 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1611 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1612 IRB.SetInsertPoint(CheckTerm); 1613 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1614 if (Recover) { 1615 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1616 } else { 1617 BasicBlock *CrashBlock = 1618 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1619 CrashTerm = new UnreachableInst(*C, CrashBlock); 1620 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1621 ReplaceInstWithInst(CheckTerm, NewTerm); 1622 } 1623 } else { 1624 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1625 } 1626 1627 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1628 AccessSizeIndex, SizeArgument, Exp); 1629 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1630 } 1631 1632 // Instrument unusual size or unusual alignment. 1633 // We can not do it with a single check, so we do 1-byte check for the first 1634 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1635 // to report the actual access size. 1636 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1637 Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, 1638 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1639 IRBuilder<> IRB(InsertBefore); 1640 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1641 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1642 if (UseCalls) { 1643 if (Exp == 0) 1644 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1645 {AddrLong, Size}); 1646 else 1647 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1648 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1649 } else { 1650 Value *LastByte = IRB.CreateIntToPtr( 1651 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1652 Addr->getType()); 1653 instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); 1654 instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); 1655 } 1656 } 1657 1658 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, 1659 GlobalValue *ModuleName) { 1660 // Set up the arguments to our poison/unpoison functions. 1661 IRBuilder<> IRB(&GlobalInit.front(), 1662 GlobalInit.front().getFirstInsertionPt()); 1663 1664 // Add a call to poison all external globals before the given function starts. 1665 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1666 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1667 1668 // Add calls to unpoison all globals before each return instruction. 1669 for (auto &BB : GlobalInit.getBasicBlockList()) 1670 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1671 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1672 } 1673 1674 void ModuleAddressSanitizer::createInitializerPoisonCalls( 1675 Module &M, GlobalValue *ModuleName) { 1676 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1677 if (!GV) 1678 return; 1679 1680 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1681 if (!CA) 1682 return; 1683 1684 for (Use &OP : CA->operands()) { 1685 if (isa<ConstantAggregateZero>(OP)) continue; 1686 ConstantStruct *CS = cast<ConstantStruct>(OP); 1687 1688 // Must have a function or null ptr. 1689 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1690 if (F->getName() == kAsanModuleCtorName) continue; 1691 auto *Priority = cast<ConstantInt>(CS->getOperand(0)); 1692 // Don't instrument CTORs that will run before asan.module_ctor. 1693 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) 1694 continue; 1695 poisonOneInitializer(*F, ModuleName); 1696 } 1697 } 1698 } 1699 1700 const GlobalVariable * 1701 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const { 1702 // In case this function should be expanded to include rules that do not just 1703 // apply when CompileKernel is true, either guard all existing rules with an 1704 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules 1705 // should also apply to user space. 1706 assert(CompileKernel && "Only expecting to be called when compiling kernel"); 1707 1708 const Constant *C = GA.getAliasee(); 1709 1710 // When compiling the kernel, globals that are aliased by symbols prefixed 1711 // by "__" are special and cannot be padded with a redzone. 1712 if (GA.getName().startswith("__")) 1713 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases()); 1714 1715 return nullptr; 1716 } 1717 1718 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { 1719 Type *Ty = G->getValueType(); 1720 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1721 1722 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress) 1723 return false; 1724 if (!Ty->isSized()) return false; 1725 if (!G->hasInitializer()) return false; 1726 // Globals in address space 1 and 4 are supported for AMDGPU. 1727 if (G->getAddressSpace() && 1728 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) 1729 return false; 1730 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1731 // Two problems with thread-locals: 1732 // - The address of the main thread's copy can't be computed at link-time. 1733 // - Need to poison all copies, not just the main thread's one. 1734 if (G->isThreadLocal()) return false; 1735 // For now, just ignore this Global if the alignment is large. 1736 if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; 1737 1738 // For non-COFF targets, only instrument globals known to be defined by this 1739 // TU. 1740 // FIXME: We can instrument comdat globals on ELF if we are using the 1741 // GC-friendly metadata scheme. 1742 if (!TargetTriple.isOSBinFormatCOFF()) { 1743 if (!G->hasExactDefinition() || G->hasComdat()) 1744 return false; 1745 } else { 1746 // On COFF, don't instrument non-ODR linkages. 1747 if (G->isInterposable()) 1748 return false; 1749 } 1750 1751 // If a comdat is present, it must have a selection kind that implies ODR 1752 // semantics: no duplicates, any, or exact match. 1753 if (Comdat *C = G->getComdat()) { 1754 switch (C->getSelectionKind()) { 1755 case Comdat::Any: 1756 case Comdat::ExactMatch: 1757 case Comdat::NoDeduplicate: 1758 break; 1759 case Comdat::Largest: 1760 case Comdat::SameSize: 1761 return false; 1762 } 1763 } 1764 1765 if (G->hasSection()) { 1766 // The kernel uses explicit sections for mostly special global variables 1767 // that we should not instrument. E.g. the kernel may rely on their layout 1768 // without redzones, or remove them at link time ("discard.*"), etc. 1769 if (CompileKernel) 1770 return false; 1771 1772 StringRef Section = G->getSection(); 1773 1774 // Globals from llvm.metadata aren't emitted, do not instrument them. 1775 if (Section == "llvm.metadata") return false; 1776 // Do not instrument globals from special LLVM sections. 1777 if (Section.contains("__llvm") || Section.contains("__LLVM")) 1778 return false; 1779 1780 // Do not instrument function pointers to initialization and termination 1781 // routines: dynamic linker will not properly handle redzones. 1782 if (Section.startswith(".preinit_array") || 1783 Section.startswith(".init_array") || 1784 Section.startswith(".fini_array")) { 1785 return false; 1786 } 1787 1788 // Do not instrument user-defined sections (with names resembling 1789 // valid C identifiers) 1790 if (TargetTriple.isOSBinFormatELF()) { 1791 if (llvm::all_of(Section, 1792 [](char c) { return llvm::isAlnum(c) || c == '_'; })) 1793 return false; 1794 } 1795 1796 // On COFF, if the section name contains '$', it is highly likely that the 1797 // user is using section sorting to create an array of globals similar to 1798 // the way initialization callbacks are registered in .init_array and 1799 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones 1800 // to such globals is counterproductive, because the intent is that they 1801 // will form an array, and out-of-bounds accesses are expected. 1802 // See https://github.com/google/sanitizers/issues/305 1803 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1804 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { 1805 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): " 1806 << *G << "\n"); 1807 return false; 1808 } 1809 1810 if (TargetTriple.isOSBinFormatMachO()) { 1811 StringRef ParsedSegment, ParsedSection; 1812 unsigned TAA = 0, StubSize = 0; 1813 bool TAAParsed; 1814 cantFail(MCSectionMachO::ParseSectionSpecifier( 1815 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize)); 1816 1817 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1818 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1819 // them. 1820 if (ParsedSegment == "__OBJC" || 1821 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1822 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1823 return false; 1824 } 1825 // See https://github.com/google/sanitizers/issues/32 1826 // Constant CFString instances are compiled in the following way: 1827 // -- the string buffer is emitted into 1828 // __TEXT,__cstring,cstring_literals 1829 // -- the constant NSConstantString structure referencing that buffer 1830 // is placed into __DATA,__cfstring 1831 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1832 // Moreover, it causes the linker to crash on OS X 10.7 1833 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1834 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 1835 return false; 1836 } 1837 // The linker merges the contents of cstring_literals and removes the 1838 // trailing zeroes. 1839 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 1840 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 1841 return false; 1842 } 1843 } 1844 } 1845 1846 if (CompileKernel) { 1847 // Globals that prefixed by "__" are special and cannot be padded with a 1848 // redzone. 1849 if (G->getName().startswith("__")) 1850 return false; 1851 } 1852 1853 return true; 1854 } 1855 1856 // On Mach-O platforms, we emit global metadata in a separate section of the 1857 // binary in order to allow the linker to properly dead strip. This is only 1858 // supported on recent versions of ld64. 1859 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { 1860 if (!TargetTriple.isOSBinFormatMachO()) 1861 return false; 1862 1863 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 1864 return true; 1865 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 1866 return true; 1867 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 1868 return true; 1869 if (TargetTriple.isDriverKit()) 1870 return true; 1871 1872 return false; 1873 } 1874 1875 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { 1876 switch (TargetTriple.getObjectFormat()) { 1877 case Triple::COFF: return ".ASAN$GL"; 1878 case Triple::ELF: return "asan_globals"; 1879 case Triple::MachO: return "__DATA,__asan_globals,regular"; 1880 case Triple::Wasm: 1881 case Triple::GOFF: 1882 case Triple::SPIRV: 1883 case Triple::XCOFF: 1884 case Triple::DXContainer: 1885 report_fatal_error( 1886 "ModuleAddressSanitizer not implemented for object file format"); 1887 case Triple::UnknownObjectFormat: 1888 break; 1889 } 1890 llvm_unreachable("unsupported object format"); 1891 } 1892 1893 void ModuleAddressSanitizer::initializeCallbacks(Module &M) { 1894 IRBuilder<> IRB(*C); 1895 1896 // Declare our poisoning and unpoisoning functions. 1897 AsanPoisonGlobals = 1898 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); 1899 AsanUnpoisonGlobals = 1900 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); 1901 1902 // Declare functions that register/unregister globals. 1903 AsanRegisterGlobals = M.getOrInsertFunction( 1904 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1905 AsanUnregisterGlobals = M.getOrInsertFunction( 1906 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1907 1908 // Declare the functions that find globals in a shared object and then invoke 1909 // the (un)register function on them. 1910 AsanRegisterImageGlobals = M.getOrInsertFunction( 1911 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 1912 AsanUnregisterImageGlobals = M.getOrInsertFunction( 1913 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 1914 1915 AsanRegisterElfGlobals = 1916 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), 1917 IntptrTy, IntptrTy, IntptrTy); 1918 AsanUnregisterElfGlobals = 1919 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), 1920 IntptrTy, IntptrTy, IntptrTy); 1921 } 1922 1923 // Put the metadata and the instrumented global in the same group. This ensures 1924 // that the metadata is discarded if the instrumented global is discarded. 1925 void ModuleAddressSanitizer::SetComdatForGlobalMetadata( 1926 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { 1927 Module &M = *G->getParent(); 1928 Comdat *C = G->getComdat(); 1929 if (!C) { 1930 if (!G->hasName()) { 1931 // If G is unnamed, it must be internal. Give it an artificial name 1932 // so we can put it in a comdat. 1933 assert(G->hasLocalLinkage()); 1934 G->setName(Twine(kAsanGenPrefix) + "_anon_global"); 1935 } 1936 1937 if (!InternalSuffix.empty() && G->hasLocalLinkage()) { 1938 std::string Name = std::string(G->getName()); 1939 Name += InternalSuffix; 1940 C = M.getOrInsertComdat(Name); 1941 } else { 1942 C = M.getOrInsertComdat(G->getName()); 1943 } 1944 1945 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private 1946 // linkage to internal linkage so that a symbol table entry is emitted. This 1947 // is necessary in order to create the comdat group. 1948 if (TargetTriple.isOSBinFormatCOFF()) { 1949 C->setSelectionKind(Comdat::NoDeduplicate); 1950 if (G->hasPrivateLinkage()) 1951 G->setLinkage(GlobalValue::InternalLinkage); 1952 } 1953 G->setComdat(C); 1954 } 1955 1956 assert(G->hasComdat()); 1957 Metadata->setComdat(G->getComdat()); 1958 } 1959 1960 // Create a separate metadata global and put it in the appropriate ASan 1961 // global registration section. 1962 GlobalVariable * 1963 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, 1964 StringRef OriginalName) { 1965 auto Linkage = TargetTriple.isOSBinFormatMachO() 1966 ? GlobalVariable::InternalLinkage 1967 : GlobalVariable::PrivateLinkage; 1968 GlobalVariable *Metadata = new GlobalVariable( 1969 M, Initializer->getType(), false, Linkage, Initializer, 1970 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); 1971 Metadata->setSection(getGlobalMetadataSection()); 1972 return Metadata; 1973 } 1974 1975 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { 1976 AsanDtorFunction = Function::createWithDefaultAttr( 1977 FunctionType::get(Type::getVoidTy(*C), false), 1978 GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M); 1979 AsanDtorFunction->addFnAttr(Attribute::NoUnwind); 1980 // Ensure Dtor cannot be discarded, even if in a comdat. 1981 appendToUsed(M, {AsanDtorFunction}); 1982 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 1983 1984 return ReturnInst::Create(*C, AsanDtorBB); 1985 } 1986 1987 void ModuleAddressSanitizer::InstrumentGlobalsCOFF( 1988 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 1989 ArrayRef<Constant *> MetadataInitializers) { 1990 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 1991 auto &DL = M.getDataLayout(); 1992 1993 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 1994 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 1995 Constant *Initializer = MetadataInitializers[i]; 1996 GlobalVariable *G = ExtendedGlobals[i]; 1997 GlobalVariable *Metadata = 1998 CreateMetadataGlobal(M, Initializer, G->getName()); 1999 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2000 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2001 MetadataGlobals[i] = Metadata; 2002 2003 // The MSVC linker always inserts padding when linking incrementally. We 2004 // cope with that by aligning each struct to its size, which must be a power 2005 // of two. 2006 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); 2007 assert(isPowerOf2_32(SizeOfGlobalStruct) && 2008 "global metadata will not be padded appropriately"); 2009 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); 2010 2011 SetComdatForGlobalMetadata(G, Metadata, ""); 2012 } 2013 2014 // Update llvm.compiler.used, adding the new metadata globals. This is 2015 // needed so that during LTO these variables stay alive. 2016 if (!MetadataGlobals.empty()) 2017 appendToCompilerUsed(M, MetadataGlobals); 2018 } 2019 2020 void ModuleAddressSanitizer::InstrumentGlobalsELF( 2021 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2022 ArrayRef<Constant *> MetadataInitializers, 2023 const std::string &UniqueModuleId) { 2024 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2025 2026 // Putting globals in a comdat changes the semantic and potentially cause 2027 // false negative odr violations at link time. If odr indicators are used, we 2028 // keep the comdat sections, as link time odr violations will be dectected on 2029 // the odr indicator symbols. 2030 bool UseComdatForGlobalsGC = UseOdrIndicator; 2031 2032 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2033 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2034 GlobalVariable *G = ExtendedGlobals[i]; 2035 GlobalVariable *Metadata = 2036 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); 2037 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2038 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2039 MetadataGlobals[i] = Metadata; 2040 2041 if (UseComdatForGlobalsGC) 2042 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); 2043 } 2044 2045 // Update llvm.compiler.used, adding the new metadata globals. This is 2046 // needed so that during LTO these variables stay alive. 2047 if (!MetadataGlobals.empty()) 2048 appendToCompilerUsed(M, MetadataGlobals); 2049 2050 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2051 // to look up the loaded image that contains it. Second, we can store in it 2052 // whether registration has already occurred, to prevent duplicate 2053 // registration. 2054 // 2055 // Common linkage ensures that there is only one global per shared library. 2056 GlobalVariable *RegisteredFlag = new GlobalVariable( 2057 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2058 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2059 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2060 2061 // Create start and stop symbols. 2062 GlobalVariable *StartELFMetadata = new GlobalVariable( 2063 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2064 "__start_" + getGlobalMetadataSection()); 2065 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2066 GlobalVariable *StopELFMetadata = new GlobalVariable( 2067 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2068 "__stop_" + getGlobalMetadataSection()); 2069 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2070 2071 // Create a call to register the globals with the runtime. 2072 IRB.CreateCall(AsanRegisterElfGlobals, 2073 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2074 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2075 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2076 2077 // We also need to unregister globals at the end, e.g., when a shared library 2078 // gets closed. 2079 if (DestructorKind != AsanDtorKind::None) { 2080 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2081 IrbDtor.CreateCall(AsanUnregisterElfGlobals, 2082 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2083 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2084 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2085 } 2086 } 2087 2088 void ModuleAddressSanitizer::InstrumentGlobalsMachO( 2089 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2090 ArrayRef<Constant *> MetadataInitializers) { 2091 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2092 2093 // On recent Mach-O platforms, use a structure which binds the liveness of 2094 // the global variable to the metadata struct. Keep the list of "Liveness" GV 2095 // created to be added to llvm.compiler.used 2096 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); 2097 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); 2098 2099 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2100 Constant *Initializer = MetadataInitializers[i]; 2101 GlobalVariable *G = ExtendedGlobals[i]; 2102 GlobalVariable *Metadata = 2103 CreateMetadataGlobal(M, Initializer, G->getName()); 2104 2105 // On recent Mach-O platforms, we emit the global metadata in a way that 2106 // allows the linker to properly strip dead globals. 2107 auto LivenessBinder = 2108 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), 2109 ConstantExpr::getPointerCast(Metadata, IntptrTy)); 2110 GlobalVariable *Liveness = new GlobalVariable( 2111 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 2112 Twine("__asan_binder_") + G->getName()); 2113 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 2114 LivenessGlobals[i] = Liveness; 2115 } 2116 2117 // Update llvm.compiler.used, adding the new liveness globals. This is 2118 // needed so that during LTO these variables stay alive. The alternative 2119 // would be to have the linker handling the LTO symbols, but libLTO 2120 // current API does not expose access to the section for each symbol. 2121 if (!LivenessGlobals.empty()) 2122 appendToCompilerUsed(M, LivenessGlobals); 2123 2124 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2125 // to look up the loaded image that contains it. Second, we can store in it 2126 // whether registration has already occurred, to prevent duplicate 2127 // registration. 2128 // 2129 // common linkage ensures that there is only one global per shared library. 2130 GlobalVariable *RegisteredFlag = new GlobalVariable( 2131 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2132 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2133 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2134 2135 IRB.CreateCall(AsanRegisterImageGlobals, 2136 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2137 2138 // We also need to unregister globals at the end, e.g., when a shared library 2139 // gets closed. 2140 if (DestructorKind != AsanDtorKind::None) { 2141 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2142 IrbDtor.CreateCall(AsanUnregisterImageGlobals, 2143 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2144 } 2145 } 2146 2147 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( 2148 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2149 ArrayRef<Constant *> MetadataInitializers) { 2150 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2151 unsigned N = ExtendedGlobals.size(); 2152 assert(N > 0); 2153 2154 // On platforms that don't have a custom metadata section, we emit an array 2155 // of global metadata structures. 2156 ArrayType *ArrayOfGlobalStructTy = 2157 ArrayType::get(MetadataInitializers[0]->getType(), N); 2158 auto AllGlobals = new GlobalVariable( 2159 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 2160 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); 2161 if (Mapping.Scale > 3) 2162 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); 2163 2164 IRB.CreateCall(AsanRegisterGlobals, 2165 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2166 ConstantInt::get(IntptrTy, N)}); 2167 2168 // We also need to unregister globals at the end, e.g., when a shared library 2169 // gets closed. 2170 if (DestructorKind != AsanDtorKind::None) { 2171 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2172 IrbDtor.CreateCall(AsanUnregisterGlobals, 2173 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2174 ConstantInt::get(IntptrTy, N)}); 2175 } 2176 } 2177 2178 // This function replaces all global variables with new variables that have 2179 // trailing redzones. It also creates a function that poisons 2180 // redzones and inserts this function into llvm.global_ctors. 2181 // Sets *CtorComdat to true if the global registration code emitted into the 2182 // asan constructor is comdat-compatible. 2183 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, 2184 bool *CtorComdat) { 2185 *CtorComdat = false; 2186 2187 // Build set of globals that are aliased by some GA, where 2188 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable. 2189 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; 2190 if (CompileKernel) { 2191 for (auto &GA : M.aliases()) { 2192 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA)) 2193 AliasedGlobalExclusions.insert(GV); 2194 } 2195 } 2196 2197 SmallVector<GlobalVariable *, 16> GlobalsToChange; 2198 for (auto &G : M.globals()) { 2199 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) 2200 GlobalsToChange.push_back(&G); 2201 } 2202 2203 size_t n = GlobalsToChange.size(); 2204 if (n == 0) { 2205 *CtorComdat = true; 2206 return false; 2207 } 2208 2209 auto &DL = M.getDataLayout(); 2210 2211 // A global is described by a structure 2212 // size_t beg; 2213 // size_t size; 2214 // size_t size_with_redzone; 2215 // const char *name; 2216 // const char *module_name; 2217 // size_t has_dynamic_init; 2218 // size_t padding_for_windows_msvc_incremental_link; 2219 // size_t odr_indicator; 2220 // We initialize an array of such structures and pass it to a run-time call. 2221 StructType *GlobalStructTy = 2222 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 2223 IntptrTy, IntptrTy, IntptrTy); 2224 SmallVector<GlobalVariable *, 16> NewGlobals(n); 2225 SmallVector<Constant *, 16> Initializers(n); 2226 2227 bool HasDynamicallyInitializedGlobals = false; 2228 2229 // We shouldn't merge same module names, as this string serves as unique 2230 // module ID in runtime. 2231 GlobalVariable *ModuleName = createPrivateGlobalForString( 2232 M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); 2233 2234 for (size_t i = 0; i < n; i++) { 2235 GlobalVariable *G = GlobalsToChange[i]; 2236 2237 GlobalValue::SanitizerMetadata MD; 2238 if (G->hasSanitizerMetadata()) 2239 MD = G->getSanitizerMetadata(); 2240 2241 // TODO: Symbol names in the descriptor can be demangled by the runtime 2242 // library. This could save ~0.4% of VM size for a private large binary. 2243 std::string NameForGlobal = llvm::demangle(G->getName().str()); 2244 GlobalVariable *Name = 2245 createPrivateGlobalForString(M, NameForGlobal, 2246 /*AllowMerging*/ true, kAsanGenPrefix); 2247 2248 Type *Ty = G->getValueType(); 2249 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 2250 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); 2251 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 2252 2253 StructType *NewTy = StructType::get(Ty, RightRedZoneTy); 2254 Constant *NewInitializer = ConstantStruct::get( 2255 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); 2256 2257 // Create a new global variable with enough space for a redzone. 2258 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 2259 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 2260 Linkage = GlobalValue::InternalLinkage; 2261 GlobalVariable *NewGlobal = new GlobalVariable( 2262 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, 2263 G->getThreadLocalMode(), G->getAddressSpace()); 2264 NewGlobal->copyAttributesFrom(G); 2265 NewGlobal->setComdat(G->getComdat()); 2266 NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); 2267 // Don't fold globals with redzones. ODR violation detector and redzone 2268 // poisoning implicitly creates a dependence on the global's address, so it 2269 // is no longer valid for it to be marked unnamed_addr. 2270 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 2271 2272 // Move null-terminated C strings to "__asan_cstring" section on Darwin. 2273 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && 2274 G->isConstant()) { 2275 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); 2276 if (Seq && Seq->isCString()) 2277 NewGlobal->setSection("__TEXT,__asan_cstring,regular"); 2278 } 2279 2280 // Transfer the debug info and type metadata. The payload starts at offset 2281 // zero so we can copy the metadata over as is. 2282 NewGlobal->copyMetadata(G, 0); 2283 2284 Value *Indices2[2]; 2285 Indices2[0] = IRB.getInt32(0); 2286 Indices2[1] = IRB.getInt32(0); 2287 2288 G->replaceAllUsesWith( 2289 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 2290 NewGlobal->takeName(G); 2291 G->eraseFromParent(); 2292 NewGlobals[i] = NewGlobal; 2293 2294 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 2295 GlobalValue *InstrumentedGlobal = NewGlobal; 2296 2297 bool CanUsePrivateAliases = 2298 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || 2299 TargetTriple.isOSBinFormatWasm(); 2300 if (CanUsePrivateAliases && UsePrivateAlias) { 2301 // Create local alias for NewGlobal to avoid crash on ODR between 2302 // instrumented and non-instrumented libraries. 2303 InstrumentedGlobal = 2304 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); 2305 } 2306 2307 // ODR should not happen for local linkage. 2308 if (NewGlobal->hasLocalLinkage()) { 2309 ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), 2310 IRB.getInt8PtrTy()); 2311 } else if (UseOdrIndicator) { 2312 // With local aliases, we need to provide another externally visible 2313 // symbol __odr_asan_XXX to detect ODR violation. 2314 auto *ODRIndicatorSym = 2315 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 2316 Constant::getNullValue(IRB.getInt8Ty()), 2317 kODRGenPrefix + NameForGlobal, nullptr, 2318 NewGlobal->getThreadLocalMode()); 2319 2320 // Set meaningful attributes for indicator symbol. 2321 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 2322 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 2323 ODRIndicatorSym->setAlignment(Align(1)); 2324 ODRIndicator = ODRIndicatorSym; 2325 } 2326 2327 Constant *Initializer = ConstantStruct::get( 2328 GlobalStructTy, 2329 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 2330 ConstantInt::get(IntptrTy, SizeInBytes), 2331 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 2332 ConstantExpr::getPointerCast(Name, IntptrTy), 2333 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 2334 ConstantInt::get(IntptrTy, MD.IsDynInit), 2335 Constant::getNullValue(IntptrTy), 2336 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); 2337 2338 if (ClInitializers && MD.IsDynInit) 2339 HasDynamicallyInitializedGlobals = true; 2340 2341 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 2342 2343 Initializers[i] = Initializer; 2344 } 2345 2346 // Add instrumented globals to llvm.compiler.used list to avoid LTO from 2347 // ConstantMerge'ing them. 2348 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; 2349 for (size_t i = 0; i < n; i++) { 2350 GlobalVariable *G = NewGlobals[i]; 2351 if (G->getName().empty()) continue; 2352 GlobalsToAddToUsedList.push_back(G); 2353 } 2354 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); 2355 2356 std::string ELFUniqueModuleId = 2357 (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) 2358 : ""; 2359 2360 if (!ELFUniqueModuleId.empty()) { 2361 InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); 2362 *CtorComdat = true; 2363 } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { 2364 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); 2365 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { 2366 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); 2367 } else { 2368 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); 2369 } 2370 2371 // Create calls for poisoning before initializers run and unpoisoning after. 2372 if (HasDynamicallyInitializedGlobals) 2373 createInitializerPoisonCalls(M, ModuleName); 2374 2375 LLVM_DEBUG(dbgs() << M); 2376 return true; 2377 } 2378 2379 uint64_t 2380 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { 2381 constexpr uint64_t kMaxRZ = 1 << 18; 2382 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); 2383 2384 uint64_t RZ = 0; 2385 if (SizeInBytes <= MinRZ / 2) { 2386 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is 2387 // at least 32 bytes, optimize when SizeInBytes is less than or equal to 2388 // half of MinRZ. 2389 RZ = MinRZ - SizeInBytes; 2390 } else { 2391 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. 2392 RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); 2393 2394 // Round up to multiple of MinRZ. 2395 if (SizeInBytes % MinRZ) 2396 RZ += MinRZ - (SizeInBytes % MinRZ); 2397 } 2398 2399 assert((RZ + SizeInBytes) % MinRZ == 0); 2400 2401 return RZ; 2402 } 2403 2404 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { 2405 int LongSize = M.getDataLayout().getPointerSizeInBits(); 2406 bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); 2407 int Version = 8; 2408 // 32-bit Android is one version ahead because of the switch to dynamic 2409 // shadow. 2410 Version += (LongSize == 32 && isAndroid); 2411 return Version; 2412 } 2413 2414 bool ModuleAddressSanitizer::instrumentModule(Module &M) { 2415 initializeCallbacks(M); 2416 2417 // Create a module constructor. A destructor is created lazily because not all 2418 // platforms, and not all modules need it. 2419 if (CompileKernel) { 2420 // The kernel always builds with its own runtime, and therefore does not 2421 // need the init and version check calls. 2422 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); 2423 } else { 2424 std::string AsanVersion = std::to_string(GetAsanVersion(M)); 2425 std::string VersionCheckName = 2426 ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; 2427 std::tie(AsanCtorFunction, std::ignore) = 2428 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, 2429 kAsanInitName, /*InitArgTypes=*/{}, 2430 /*InitArgs=*/{}, VersionCheckName); 2431 } 2432 2433 bool CtorComdat = true; 2434 if (ClGlobals) { 2435 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); 2436 InstrumentGlobals(IRB, M, &CtorComdat); 2437 } 2438 2439 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); 2440 2441 // Put the constructor and destructor in comdat if both 2442 // (1) global instrumentation is not TU-specific 2443 // (2) target is ELF. 2444 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { 2445 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); 2446 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); 2447 if (AsanDtorFunction) { 2448 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); 2449 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); 2450 } 2451 } else { 2452 appendToGlobalCtors(M, AsanCtorFunction, Priority); 2453 if (AsanDtorFunction) 2454 appendToGlobalDtors(M, AsanDtorFunction, Priority); 2455 } 2456 2457 return true; 2458 } 2459 2460 void AddressSanitizer::initializeCallbacks(Module &M) { 2461 IRBuilder<> IRB(*C); 2462 // Create __asan_report* callbacks. 2463 // IsWrite, TypeSize and Exp are encoded in the function name. 2464 for (int Exp = 0; Exp < 2; Exp++) { 2465 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 2466 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 2467 const std::string ExpStr = Exp ? "exp_" : ""; 2468 const std::string EndingStr = Recover ? "_noabort" : ""; 2469 2470 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 2471 SmallVector<Type *, 2> Args1{1, IntptrTy}; 2472 if (Exp) { 2473 Type *ExpType = Type::getInt32Ty(*C); 2474 Args2.push_back(ExpType); 2475 Args1.push_back(ExpType); 2476 } 2477 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2478 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, 2479 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2480 2481 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2482 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 2483 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2484 2485 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 2486 AccessSizeIndex++) { 2487 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 2488 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2489 M.getOrInsertFunction( 2490 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 2491 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2492 2493 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2494 M.getOrInsertFunction( 2495 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 2496 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2497 } 2498 } 2499 } 2500 2501 const std::string MemIntrinCallbackPrefix = 2502 (CompileKernel && !ClKasanMemIntrinCallbackPrefix) 2503 ? std::string("") 2504 : ClMemoryAccessCallbackPrefix; 2505 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 2506 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2507 IRB.getInt8PtrTy(), IntptrTy); 2508 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 2509 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2510 IRB.getInt8PtrTy(), IntptrTy); 2511 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 2512 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2513 IRB.getInt32Ty(), IntptrTy); 2514 2515 AsanHandleNoReturnFunc = 2516 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); 2517 2518 AsanPtrCmpFunction = 2519 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); 2520 AsanPtrSubFunction = 2521 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); 2522 if (Mapping.InGlobal) 2523 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", 2524 ArrayType::get(IRB.getInt8Ty(), 0)); 2525 2526 AMDGPUAddressShared = M.getOrInsertFunction( 2527 kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2528 AMDGPUAddressPrivate = M.getOrInsertFunction( 2529 kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2530 } 2531 2532 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 2533 // For each NSObject descendant having a +load method, this method is invoked 2534 // by the ObjC runtime before any of the static constructors is called. 2535 // Therefore we need to instrument such methods with a call to __asan_init 2536 // at the beginning in order to initialize our runtime before any access to 2537 // the shadow memory. 2538 // We cannot just ignore these methods, because they may call other 2539 // instrumented functions. 2540 if (F.getName().find(" load]") != std::string::npos) { 2541 FunctionCallee AsanInitFunction = 2542 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); 2543 IRBuilder<> IRB(&F.front(), F.front().begin()); 2544 IRB.CreateCall(AsanInitFunction, {}); 2545 return true; 2546 } 2547 return false; 2548 } 2549 2550 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 2551 // Generate code only when dynamic addressing is needed. 2552 if (Mapping.Offset != kDynamicShadowSentinel) 2553 return false; 2554 2555 IRBuilder<> IRB(&F.front().front()); 2556 if (Mapping.InGlobal) { 2557 if (ClWithIfuncSuppressRemat) { 2558 // An empty inline asm with input reg == output reg. 2559 // An opaque pointer-to-int cast, basically. 2560 InlineAsm *Asm = InlineAsm::get( 2561 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), 2562 StringRef(""), StringRef("=r,0"), 2563 /*hasSideEffects=*/false); 2564 LocalDynamicShadow = 2565 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); 2566 } else { 2567 LocalDynamicShadow = 2568 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); 2569 } 2570 } else { 2571 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 2572 kAsanShadowMemoryDynamicAddress, IntptrTy); 2573 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 2574 } 2575 return true; 2576 } 2577 2578 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 2579 // Find the one possible call to llvm.localescape and pre-mark allocas passed 2580 // to it as uninteresting. This assumes we haven't started processing allocas 2581 // yet. This check is done up front because iterating the use list in 2582 // isInterestingAlloca would be algorithmically slower. 2583 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 2584 2585 // Try to get the declaration of llvm.localescape. If it's not in the module, 2586 // we can exit early. 2587 if (!F.getParent()->getFunction("llvm.localescape")) return; 2588 2589 // Look for a call to llvm.localescape call in the entry block. It can't be in 2590 // any other block. 2591 for (Instruction &I : F.getEntryBlock()) { 2592 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 2593 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 2594 // We found a call. Mark all the allocas passed in as uninteresting. 2595 for (Value *Arg : II->args()) { 2596 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 2597 assert(AI && AI->isStaticAlloca() && 2598 "non-static alloca arg to localescape"); 2599 ProcessedAllocas[AI] = false; 2600 } 2601 break; 2602 } 2603 } 2604 } 2605 2606 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { 2607 bool ShouldInstrument = 2608 ClDebugMin < 0 || ClDebugMax < 0 || 2609 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); 2610 Instrumented++; 2611 return !ShouldInstrument; 2612 } 2613 2614 bool AddressSanitizer::instrumentFunction(Function &F, 2615 const TargetLibraryInfo *TLI) { 2616 if (F.empty()) 2617 return false; 2618 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 2619 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 2620 if (F.getName().startswith("__asan_")) return false; 2621 2622 bool FunctionModified = false; 2623 2624 // If needed, insert __asan_init before checking for SanitizeAddress attr. 2625 // This function needs to be called even if the function body is not 2626 // instrumented. 2627 if (maybeInsertAsanInitAtFunctionEntry(F)) 2628 FunctionModified = true; 2629 2630 // Leave if the function doesn't need instrumentation. 2631 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 2632 2633 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation)) 2634 return FunctionModified; 2635 2636 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 2637 2638 initializeCallbacks(*F.getParent()); 2639 2640 FunctionStateRAII CleanupObj(this); 2641 2642 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); 2643 2644 // We can't instrument allocas used with llvm.localescape. Only static allocas 2645 // can be passed to that intrinsic. 2646 markEscapedLocalAllocas(F); 2647 2648 // We want to instrument every address only once per basic block (unless there 2649 // are calls between uses). 2650 SmallPtrSet<Value *, 16> TempsToInstrument; 2651 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 2652 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 2653 SmallVector<Instruction *, 8> NoReturnCalls; 2654 SmallVector<BasicBlock *, 16> AllBlocks; 2655 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 2656 2657 // Fill the set of memory operations to instrument. 2658 for (auto &BB : F) { 2659 AllBlocks.push_back(&BB); 2660 TempsToInstrument.clear(); 2661 int NumInsnsPerBB = 0; 2662 for (auto &Inst : BB) { 2663 if (LooksLikeCodeInBug11395(&Inst)) return false; 2664 // Skip instructions inserted by another instrumentation. 2665 if (Inst.hasMetadata(LLVMContext::MD_nosanitize)) 2666 continue; 2667 SmallVector<InterestingMemoryOperand, 1> InterestingOperands; 2668 getInterestingMemoryOperands(&Inst, InterestingOperands); 2669 2670 if (!InterestingOperands.empty()) { 2671 for (auto &Operand : InterestingOperands) { 2672 if (ClOpt && ClOptSameTemp) { 2673 Value *Ptr = Operand.getPtr(); 2674 // If we have a mask, skip instrumentation if we've already 2675 // instrumented the full object. But don't add to TempsToInstrument 2676 // because we might get another load/store with a different mask. 2677 if (Operand.MaybeMask) { 2678 if (TempsToInstrument.count(Ptr)) 2679 continue; // We've seen this (whole) temp in the current BB. 2680 } else { 2681 if (!TempsToInstrument.insert(Ptr).second) 2682 continue; // We've seen this temp in the current BB. 2683 } 2684 } 2685 OperandsToInstrument.push_back(Operand); 2686 NumInsnsPerBB++; 2687 } 2688 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && 2689 isInterestingPointerComparison(&Inst)) || 2690 ((ClInvalidPointerPairs || ClInvalidPointerSub) && 2691 isInterestingPointerSubtraction(&Inst))) { 2692 PointerComparisonsOrSubtracts.push_back(&Inst); 2693 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { 2694 // ok, take it. 2695 IntrinToInstrument.push_back(MI); 2696 NumInsnsPerBB++; 2697 } else { 2698 if (auto *CB = dyn_cast<CallBase>(&Inst)) { 2699 // A call inside BB. 2700 TempsToInstrument.clear(); 2701 if (CB->doesNotReturn()) 2702 NoReturnCalls.push_back(CB); 2703 } 2704 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 2705 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 2706 } 2707 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 2708 } 2709 } 2710 2711 bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && 2712 OperandsToInstrument.size() + IntrinToInstrument.size() > 2713 (unsigned)ClInstrumentationWithCallsThreshold); 2714 const DataLayout &DL = F.getParent()->getDataLayout(); 2715 ObjectSizeOpts ObjSizeOpts; 2716 ObjSizeOpts.RoundToAlign = true; 2717 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); 2718 2719 // Instrument. 2720 int NumInstrumented = 0; 2721 for (auto &Operand : OperandsToInstrument) { 2722 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2723 instrumentMop(ObjSizeVis, Operand, UseCalls, 2724 F.getParent()->getDataLayout()); 2725 FunctionModified = true; 2726 } 2727 for (auto Inst : IntrinToInstrument) { 2728 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2729 instrumentMemIntrinsic(Inst); 2730 FunctionModified = true; 2731 } 2732 2733 FunctionStackPoisoner FSP(F, *this); 2734 bool ChangedStack = FSP.runOnFunction(); 2735 2736 // We must unpoison the stack before NoReturn calls (throw, _exit, etc). 2737 // See e.g. https://github.com/google/sanitizers/issues/37 2738 for (auto CI : NoReturnCalls) { 2739 IRBuilder<> IRB(CI); 2740 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 2741 } 2742 2743 for (auto Inst : PointerComparisonsOrSubtracts) { 2744 instrumentPointerComparisonOrSubtraction(Inst); 2745 FunctionModified = true; 2746 } 2747 2748 if (ChangedStack || !NoReturnCalls.empty()) 2749 FunctionModified = true; 2750 2751 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2752 << F << "\n"); 2753 2754 return FunctionModified; 2755 } 2756 2757 // Workaround for bug 11395: we don't want to instrument stack in functions 2758 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2759 // FIXME: remove once the bug 11395 is fixed. 2760 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2761 if (LongSize != 32) return false; 2762 CallInst *CI = dyn_cast<CallInst>(I); 2763 if (!CI || !CI->isInlineAsm()) return false; 2764 if (CI->arg_size() <= 5) 2765 return false; 2766 // We have inline assembly with quite a few arguments. 2767 return true; 2768 } 2769 2770 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2771 IRBuilder<> IRB(*C); 2772 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always || 2773 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 2774 const char *MallocNameTemplate = 2775 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always 2776 ? kAsanStackMallocAlwaysNameTemplate 2777 : kAsanStackMallocNameTemplate; 2778 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) { 2779 std::string Suffix = itostr(Index); 2780 AsanStackMallocFunc[Index] = M.getOrInsertFunction( 2781 MallocNameTemplate + Suffix, IntptrTy, IntptrTy); 2782 AsanStackFreeFunc[Index] = 2783 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2784 IRB.getVoidTy(), IntptrTy, IntptrTy); 2785 } 2786 } 2787 if (ASan.UseAfterScope) { 2788 AsanPoisonStackMemoryFunc = M.getOrInsertFunction( 2789 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2790 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( 2791 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2792 } 2793 2794 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2795 std::ostringstream Name; 2796 Name << kAsanSetShadowPrefix; 2797 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2798 AsanSetShadowFunc[Val] = 2799 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); 2800 } 2801 2802 AsanAllocaPoisonFunc = M.getOrInsertFunction( 2803 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2804 AsanAllocasUnpoisonFunc = M.getOrInsertFunction( 2805 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2806 } 2807 2808 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2809 ArrayRef<uint8_t> ShadowBytes, 2810 size_t Begin, size_t End, 2811 IRBuilder<> &IRB, 2812 Value *ShadowBase) { 2813 if (Begin >= End) 2814 return; 2815 2816 const size_t LargestStoreSizeInBytes = 2817 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 2818 2819 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 2820 2821 // Poison given range in shadow using larges store size with out leading and 2822 // trailing zeros in ShadowMask. Zeros never change, so they need neither 2823 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 2824 // middle of a store. 2825 for (size_t i = Begin; i < End;) { 2826 if (!ShadowMask[i]) { 2827 assert(!ShadowBytes[i]); 2828 ++i; 2829 continue; 2830 } 2831 2832 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 2833 // Fit store size into the range. 2834 while (StoreSizeInBytes > End - i) 2835 StoreSizeInBytes /= 2; 2836 2837 // Minimize store size by trimming trailing zeros. 2838 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 2839 while (j <= StoreSizeInBytes / 2) 2840 StoreSizeInBytes /= 2; 2841 } 2842 2843 uint64_t Val = 0; 2844 for (size_t j = 0; j < StoreSizeInBytes; j++) { 2845 if (IsLittleEndian) 2846 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 2847 else 2848 Val = (Val << 8) | ShadowBytes[i + j]; 2849 } 2850 2851 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 2852 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 2853 IRB.CreateAlignedStore( 2854 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 2855 Align(1)); 2856 2857 i += StoreSizeInBytes; 2858 } 2859 } 2860 2861 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2862 ArrayRef<uint8_t> ShadowBytes, 2863 IRBuilder<> &IRB, Value *ShadowBase) { 2864 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 2865 } 2866 2867 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2868 ArrayRef<uint8_t> ShadowBytes, 2869 size_t Begin, size_t End, 2870 IRBuilder<> &IRB, Value *ShadowBase) { 2871 assert(ShadowMask.size() == ShadowBytes.size()); 2872 size_t Done = Begin; 2873 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 2874 if (!ShadowMask[i]) { 2875 assert(!ShadowBytes[i]); 2876 continue; 2877 } 2878 uint8_t Val = ShadowBytes[i]; 2879 if (!AsanSetShadowFunc[Val]) 2880 continue; 2881 2882 // Skip same values. 2883 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 2884 } 2885 2886 if (j - i >= ClMaxInlinePoisoningSize) { 2887 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 2888 IRB.CreateCall(AsanSetShadowFunc[Val], 2889 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 2890 ConstantInt::get(IntptrTy, j - i)}); 2891 Done = j; 2892 } 2893 } 2894 2895 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 2896 } 2897 2898 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 2899 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 2900 static int StackMallocSizeClass(uint64_t LocalStackSize) { 2901 assert(LocalStackSize <= kMaxStackMallocSize); 2902 uint64_t MaxSize = kMinStackMallocSize; 2903 for (int i = 0;; i++, MaxSize *= 2) 2904 if (LocalStackSize <= MaxSize) return i; 2905 llvm_unreachable("impossible LocalStackSize"); 2906 } 2907 2908 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { 2909 Instruction *CopyInsertPoint = &F.front().front(); 2910 if (CopyInsertPoint == ASan.LocalDynamicShadow) { 2911 // Insert after the dynamic shadow location is determined 2912 CopyInsertPoint = CopyInsertPoint->getNextNode(); 2913 assert(CopyInsertPoint); 2914 } 2915 IRBuilder<> IRB(CopyInsertPoint); 2916 const DataLayout &DL = F.getParent()->getDataLayout(); 2917 for (Argument &Arg : F.args()) { 2918 if (Arg.hasByValAttr()) { 2919 Type *Ty = Arg.getParamByValType(); 2920 const Align Alignment = 2921 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); 2922 2923 AllocaInst *AI = IRB.CreateAlloca( 2924 Ty, nullptr, 2925 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + 2926 ".byval"); 2927 AI->setAlignment(Alignment); 2928 Arg.replaceAllUsesWith(AI); 2929 2930 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 2931 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); 2932 } 2933 } 2934 } 2935 2936 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 2937 Value *ValueIfTrue, 2938 Instruction *ThenTerm, 2939 Value *ValueIfFalse) { 2940 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 2941 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 2942 PHI->addIncoming(ValueIfFalse, CondBlock); 2943 BasicBlock *ThenBlock = ThenTerm->getParent(); 2944 PHI->addIncoming(ValueIfTrue, ThenBlock); 2945 return PHI; 2946 } 2947 2948 Value *FunctionStackPoisoner::createAllocaForLayout( 2949 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 2950 AllocaInst *Alloca; 2951 if (Dynamic) { 2952 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 2953 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 2954 "MyAlloca"); 2955 } else { 2956 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 2957 nullptr, "MyAlloca"); 2958 assert(Alloca->isStaticAlloca()); 2959 } 2960 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 2961 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack)); 2962 Alloca->setAlignment(Align(FrameAlignment)); 2963 return IRB.CreatePointerCast(Alloca, IntptrTy); 2964 } 2965 2966 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 2967 BasicBlock &FirstBB = *F.begin(); 2968 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 2969 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 2970 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 2971 DynamicAllocaLayout->setAlignment(Align(32)); 2972 } 2973 2974 void FunctionStackPoisoner::processDynamicAllocas() { 2975 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 2976 assert(DynamicAllocaPoisonCallVec.empty()); 2977 return; 2978 } 2979 2980 // Insert poison calls for lifetime intrinsics for dynamic allocas. 2981 for (const auto &APC : DynamicAllocaPoisonCallVec) { 2982 assert(APC.InsBefore); 2983 assert(APC.AI); 2984 assert(ASan.isInterestingAlloca(*APC.AI)); 2985 assert(!APC.AI->isStaticAlloca()); 2986 2987 IRBuilder<> IRB(APC.InsBefore); 2988 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 2989 // Dynamic allocas will be unpoisoned unconditionally below in 2990 // unpoisonDynamicAllocas. 2991 // Flag that we need unpoison static allocas. 2992 } 2993 2994 // Handle dynamic allocas. 2995 createDynamicAllocasInitStorage(); 2996 for (auto &AI : DynamicAllocaVec) 2997 handleDynamicAllocaCall(AI); 2998 unpoisonDynamicAllocas(); 2999 } 3000 3001 /// Collect instructions in the entry block after \p InsBefore which initialize 3002 /// permanent storage for a function argument. These instructions must remain in 3003 /// the entry block so that uninitialized values do not appear in backtraces. An 3004 /// added benefit is that this conserves spill slots. This does not move stores 3005 /// before instrumented / "interesting" allocas. 3006 static void findStoresToUninstrumentedArgAllocas( 3007 AddressSanitizer &ASan, Instruction &InsBefore, 3008 SmallVectorImpl<Instruction *> &InitInsts) { 3009 Instruction *Start = InsBefore.getNextNonDebugInstruction(); 3010 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { 3011 // Argument initialization looks like: 3012 // 1) store <Argument>, <Alloca> OR 3013 // 2) <CastArgument> = cast <Argument> to ... 3014 // store <CastArgument> to <Alloca> 3015 // Do not consider any other kind of instruction. 3016 // 3017 // Note: This covers all known cases, but may not be exhaustive. An 3018 // alternative to pattern-matching stores is to DFS over all Argument uses: 3019 // this might be more general, but is probably much more complicated. 3020 if (isa<AllocaInst>(It) || isa<CastInst>(It)) 3021 continue; 3022 if (auto *Store = dyn_cast<StoreInst>(It)) { 3023 // The store destination must be an alloca that isn't interesting for 3024 // ASan to instrument. These are moved up before InsBefore, and they're 3025 // not interesting because allocas for arguments can be mem2reg'd. 3026 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); 3027 if (!Alloca || ASan.isInterestingAlloca(*Alloca)) 3028 continue; 3029 3030 Value *Val = Store->getValueOperand(); 3031 bool IsDirectArgInit = isa<Argument>(Val); 3032 bool IsArgInitViaCast = 3033 isa<CastInst>(Val) && 3034 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && 3035 // Check that the cast appears directly before the store. Otherwise 3036 // moving the cast before InsBefore may break the IR. 3037 Val == It->getPrevNonDebugInstruction(); 3038 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; 3039 if (!IsArgInit) 3040 continue; 3041 3042 if (IsArgInitViaCast) 3043 InitInsts.push_back(cast<Instruction>(Val)); 3044 InitInsts.push_back(Store); 3045 continue; 3046 } 3047 3048 // Do not reorder past unknown instructions: argument initialization should 3049 // only involve casts and stores. 3050 return; 3051 } 3052 } 3053 3054 void FunctionStackPoisoner::processStaticAllocas() { 3055 if (AllocaVec.empty()) { 3056 assert(StaticAllocaPoisonCallVec.empty()); 3057 return; 3058 } 3059 3060 int StackMallocIdx = -1; 3061 DebugLoc EntryDebugLocation; 3062 if (auto SP = F.getSubprogram()) 3063 EntryDebugLocation = 3064 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); 3065 3066 Instruction *InsBefore = AllocaVec[0]; 3067 IRBuilder<> IRB(InsBefore); 3068 3069 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 3070 // debug info is broken, because only entry-block allocas are treated as 3071 // regular stack slots. 3072 auto InsBeforeB = InsBefore->getParent(); 3073 assert(InsBeforeB == &F.getEntryBlock()); 3074 for (auto *AI : StaticAllocasToMoveUp) 3075 if (AI->getParent() == InsBeforeB) 3076 AI->moveBefore(InsBefore); 3077 3078 // Move stores of arguments into entry-block allocas as well. This prevents 3079 // extra stack slots from being generated (to house the argument values until 3080 // they can be stored into the allocas). This also prevents uninitialized 3081 // values from being shown in backtraces. 3082 SmallVector<Instruction *, 8> ArgInitInsts; 3083 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); 3084 for (Instruction *ArgInitInst : ArgInitInsts) 3085 ArgInitInst->moveBefore(InsBefore); 3086 3087 // If we have a call to llvm.localescape, keep it in the entry block. 3088 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 3089 3090 SmallVector<ASanStackVariableDescription, 16> SVD; 3091 SVD.reserve(AllocaVec.size()); 3092 for (AllocaInst *AI : AllocaVec) { 3093 ASanStackVariableDescription D = {AI->getName().data(), 3094 ASan.getAllocaSizeInBytes(*AI), 3095 0, 3096 AI->getAlign().value(), 3097 AI, 3098 0, 3099 0}; 3100 SVD.push_back(D); 3101 } 3102 3103 // Minimal header size (left redzone) is 4 pointers, 3104 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 3105 uint64_t Granularity = 1ULL << Mapping.Scale; 3106 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity); 3107 const ASanStackFrameLayout &L = 3108 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); 3109 3110 // Build AllocaToSVDMap for ASanStackVariableDescription lookup. 3111 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; 3112 for (auto &Desc : SVD) 3113 AllocaToSVDMap[Desc.AI] = &Desc; 3114 3115 // Update SVD with information from lifetime intrinsics. 3116 for (const auto &APC : StaticAllocaPoisonCallVec) { 3117 assert(APC.InsBefore); 3118 assert(APC.AI); 3119 assert(ASan.isInterestingAlloca(*APC.AI)); 3120 assert(APC.AI->isStaticAlloca()); 3121 3122 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3123 Desc.LifetimeSize = Desc.Size; 3124 if (const DILocation *FnLoc = EntryDebugLocation.get()) { 3125 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { 3126 if (LifetimeLoc->getFile() == FnLoc->getFile()) 3127 if (unsigned Line = LifetimeLoc->getLine()) 3128 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); 3129 } 3130 } 3131 } 3132 3133 auto DescriptionString = ComputeASanStackFrameDescription(SVD); 3134 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); 3135 uint64_t LocalStackSize = L.FrameSize; 3136 bool DoStackMalloc = 3137 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never && 3138 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; 3139 bool DoDynamicAlloca = ClDynamicAllocaStack; 3140 // Don't do dynamic alloca or stack malloc if: 3141 // 1) There is inline asm: too often it makes assumptions on which registers 3142 // are available. 3143 // 2) There is a returns_twice call (typically setjmp), which is 3144 // optimization-hostile, and doesn't play well with introduced indirect 3145 // register-relative calculation of local variable addresses. 3146 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; 3147 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; 3148 3149 Value *StaticAlloca = 3150 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 3151 3152 Value *FakeStack; 3153 Value *LocalStackBase; 3154 Value *LocalStackBaseAlloca; 3155 uint8_t DIExprFlags = DIExpression::ApplyOffset; 3156 3157 if (DoStackMalloc) { 3158 LocalStackBaseAlloca = 3159 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); 3160 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 3161 // void *FakeStack = __asan_option_detect_stack_use_after_return 3162 // ? __asan_stack_malloc_N(LocalStackSize) 3163 // : nullptr; 3164 // void *LocalStackBase = (FakeStack) ? FakeStack : 3165 // alloca(LocalStackSize); 3166 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 3167 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 3168 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( 3169 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), 3170 Constant::getNullValue(IRB.getInt32Ty())); 3171 Instruction *Term = 3172 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 3173 IRBuilder<> IRBIf(Term); 3174 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3175 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 3176 Value *FakeStackValue = 3177 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3178 ConstantInt::get(IntptrTy, LocalStackSize)); 3179 IRB.SetInsertPoint(InsBefore); 3180 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 3181 ConstantInt::get(IntptrTy, 0)); 3182 } else { 3183 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always) 3184 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize); 3185 // void *LocalStackBase = (FakeStack) ? FakeStack : 3186 // alloca(LocalStackSize); 3187 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3188 FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3189 ConstantInt::get(IntptrTy, LocalStackSize)); 3190 } 3191 Value *NoFakeStack = 3192 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 3193 Instruction *Term = 3194 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 3195 IRBuilder<> IRBIf(Term); 3196 Value *AllocaValue = 3197 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 3198 3199 IRB.SetInsertPoint(InsBefore); 3200 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 3201 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); 3202 DIExprFlags |= DIExpression::DerefBefore; 3203 } else { 3204 // void *FakeStack = nullptr; 3205 // void *LocalStackBase = alloca(LocalStackSize); 3206 FakeStack = ConstantInt::get(IntptrTy, 0); 3207 LocalStackBase = 3208 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 3209 LocalStackBaseAlloca = LocalStackBase; 3210 } 3211 3212 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the 3213 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse 3214 // later passes and can result in dropped variable coverage in debug info. 3215 Value *LocalStackBaseAllocaPtr = 3216 isa<PtrToIntInst>(LocalStackBaseAlloca) 3217 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() 3218 : LocalStackBaseAlloca; 3219 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) && 3220 "Variable descriptions relative to ASan stack base will be dropped"); 3221 3222 // Replace Alloca instructions with base+offset. 3223 for (const auto &Desc : SVD) { 3224 AllocaInst *AI = Desc.AI; 3225 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, 3226 Desc.Offset); 3227 Value *NewAllocaPtr = IRB.CreateIntToPtr( 3228 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 3229 AI->getType()); 3230 AI->replaceAllUsesWith(NewAllocaPtr); 3231 } 3232 3233 // The left-most redzone has enough space for at least 4 pointers. 3234 // Write the Magic value to redzone[0]. 3235 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 3236 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 3237 BasePlus0); 3238 // Write the frame description constant to redzone[1]. 3239 Value *BasePlus1 = IRB.CreateIntToPtr( 3240 IRB.CreateAdd(LocalStackBase, 3241 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 3242 IntptrPtrTy); 3243 GlobalVariable *StackDescriptionGlobal = 3244 createPrivateGlobalForString(*F.getParent(), DescriptionString, 3245 /*AllowMerging*/ true, kAsanGenPrefix); 3246 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 3247 IRB.CreateStore(Description, BasePlus1); 3248 // Write the PC to redzone[2]. 3249 Value *BasePlus2 = IRB.CreateIntToPtr( 3250 IRB.CreateAdd(LocalStackBase, 3251 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 3252 IntptrPtrTy); 3253 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 3254 3255 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 3256 3257 // Poison the stack red zones at the entry. 3258 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 3259 // As mask we must use most poisoned case: red zones and after scope. 3260 // As bytes we can use either the same or just red zones only. 3261 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 3262 3263 if (!StaticAllocaPoisonCallVec.empty()) { 3264 const auto &ShadowInScope = GetShadowBytes(SVD, L); 3265 3266 // Poison static allocas near lifetime intrinsics. 3267 for (const auto &APC : StaticAllocaPoisonCallVec) { 3268 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3269 assert(Desc.Offset % L.Granularity == 0); 3270 size_t Begin = Desc.Offset / L.Granularity; 3271 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 3272 3273 IRBuilder<> IRB(APC.InsBefore); 3274 copyToShadow(ShadowAfterScope, 3275 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 3276 IRB, ShadowBase); 3277 } 3278 } 3279 3280 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 3281 SmallVector<uint8_t, 64> ShadowAfterReturn; 3282 3283 // (Un)poison the stack before all ret instructions. 3284 for (Instruction *Ret : RetVec) { 3285 IRBuilder<> IRBRet(Ret); 3286 // Mark the current frame as retired. 3287 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 3288 BasePlus0); 3289 if (DoStackMalloc) { 3290 assert(StackMallocIdx >= 0); 3291 // if FakeStack != 0 // LocalStackBase == FakeStack 3292 // // In use-after-return mode, poison the whole stack frame. 3293 // if StackMallocIdx <= 4 3294 // // For small sizes inline the whole thing: 3295 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 3296 // **SavedFlagPtr(FakeStack) = 0 3297 // else 3298 // __asan_stack_free_N(FakeStack, LocalStackSize) 3299 // else 3300 // <This is not a fake stack; unpoison the redzones> 3301 Value *Cmp = 3302 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 3303 Instruction *ThenTerm, *ElseTerm; 3304 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 3305 3306 IRBuilder<> IRBPoison(ThenTerm); 3307 if (StackMallocIdx <= 4) { 3308 int ClassSize = kMinStackMallocSize << StackMallocIdx; 3309 ShadowAfterReturn.resize(ClassSize / L.Granularity, 3310 kAsanStackUseAfterReturnMagic); 3311 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 3312 ShadowBase); 3313 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 3314 FakeStack, 3315 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 3316 Value *SavedFlagPtr = IRBPoison.CreateLoad( 3317 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 3318 IRBPoison.CreateStore( 3319 Constant::getNullValue(IRBPoison.getInt8Ty()), 3320 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 3321 } else { 3322 // For larger frames call __asan_stack_free_*. 3323 IRBPoison.CreateCall( 3324 AsanStackFreeFunc[StackMallocIdx], 3325 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 3326 } 3327 3328 IRBuilder<> IRBElse(ElseTerm); 3329 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); 3330 } else { 3331 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); 3332 } 3333 } 3334 3335 // We are done. Remove the old unused alloca instructions. 3336 for (auto AI : AllocaVec) AI->eraseFromParent(); 3337 } 3338 3339 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 3340 IRBuilder<> &IRB, bool DoPoison) { 3341 // For now just insert the call to ASan runtime. 3342 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 3343 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 3344 IRB.CreateCall( 3345 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 3346 {AddrArg, SizeArg}); 3347 } 3348 3349 // Handling llvm.lifetime intrinsics for a given %alloca: 3350 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 3351 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 3352 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 3353 // could be poisoned by previous llvm.lifetime.end instruction, as the 3354 // variable may go in and out of scope several times, e.g. in loops). 3355 // (3) if we poisoned at least one %alloca in a function, 3356 // unpoison the whole stack frame at function exit. 3357 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 3358 IRBuilder<> IRB(AI); 3359 3360 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign()); 3361 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 3362 3363 Value *Zero = Constant::getNullValue(IntptrTy); 3364 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 3365 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 3366 3367 // Since we need to extend alloca with additional memory to locate 3368 // redzones, and OldSize is number of allocated blocks with 3369 // ElementSize size, get allocated memory size in bytes by 3370 // OldSize * ElementSize. 3371 const unsigned ElementSize = 3372 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 3373 Value *OldSize = 3374 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 3375 ConstantInt::get(IntptrTy, ElementSize)); 3376 3377 // PartialSize = OldSize % 32 3378 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 3379 3380 // Misalign = kAllocaRzSize - PartialSize; 3381 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 3382 3383 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 3384 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 3385 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 3386 3387 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize 3388 // Alignment is added to locate left redzone, PartialPadding for possible 3389 // partial redzone and kAllocaRzSize for right redzone respectively. 3390 Value *AdditionalChunkSize = IRB.CreateAdd( 3391 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize), 3392 PartialPadding); 3393 3394 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 3395 3396 // Insert new alloca with new NewSize and Alignment params. 3397 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 3398 NewAlloca->setAlignment(Alignment); 3399 3400 // NewAddress = Address + Alignment 3401 Value *NewAddress = 3402 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 3403 ConstantInt::get(IntptrTy, Alignment.value())); 3404 3405 // Insert __asan_alloca_poison call for new created alloca. 3406 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 3407 3408 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 3409 // for unpoisoning stuff. 3410 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 3411 3412 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 3413 3414 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 3415 AI->replaceAllUsesWith(NewAddressPtr); 3416 3417 // We are done. Erase old alloca from parent. 3418 AI->eraseFromParent(); 3419 } 3420 3421 // isSafeAccess returns true if Addr is always inbounds with respect to its 3422 // base object. For example, it is a field access or an array access with 3423 // constant inbounds index. 3424 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 3425 Value *Addr, uint64_t TypeSize) const { 3426 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 3427 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 3428 uint64_t Size = SizeOffset.first.getZExtValue(); 3429 int64_t Offset = SizeOffset.second.getSExtValue(); 3430 // Three checks are required to ensure safety: 3431 // . Offset >= 0 (since the offset is given from the base ptr) 3432 // . Size >= Offset (unsigned) 3433 // . Size - Offset >= NeededSize (unsigned) 3434 return Offset >= 0 && Size >= uint64_t(Offset) && 3435 Size - uint64_t(Offset) >= TypeSize / 8; 3436 } 3437