1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // Details of the algorithm: 12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/CallSite.h" 29 #include "llvm/IR/DIBuilder.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/InlineAsm.h" 35 #include "llvm/IR/InstVisitor.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/LLVMContext.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCSectionMachO.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/DataTypes.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/Endian.h" 46 #include "llvm/Support/SwapByteOrder.h" 47 #include "llvm/Support/raw_ostream.h" 48 #include "llvm/Transforms/Instrumentation.h" 49 #include "llvm/Transforms/Scalar.h" 50 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 51 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 52 #include "llvm/Transforms/Utils/Cloning.h" 53 #include "llvm/Transforms/Utils/Local.h" 54 #include "llvm/Transforms/Utils/ModuleUtils.h" 55 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 56 #include <algorithm> 57 #include <iomanip> 58 #include <limits> 59 #include <sstream> 60 #include <string> 61 #include <system_error> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "asan" 66 67 static const uint64_t kDefaultShadowScale = 3; 68 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 69 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 70 static const uint64_t kDynamicShadowSentinel = ~(uint64_t)0; 71 static const uint64_t kIOSShadowOffset32 = 1ULL << 30; 72 static const uint64_t kIOSSimShadowOffset32 = 1ULL << 30; 73 static const uint64_t kIOSSimShadowOffset64 = kDefaultShadowOffset64; 74 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. 75 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 76 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; 77 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 78 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 79 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 80 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 81 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 82 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 83 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 84 // The shadow memory space is dynamically allocated. 85 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 86 87 static const size_t kMinStackMallocSize = 1 << 6; // 64B 88 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 89 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 90 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 91 92 static const char *const kAsanModuleCtorName = "asan.module_ctor"; 93 static const char *const kAsanModuleDtorName = "asan.module_dtor"; 94 static const uint64_t kAsanCtorAndDtorPriority = 1; 95 static const char *const kAsanReportErrorTemplate = "__asan_report_"; 96 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; 97 static const char *const kAsanUnregisterGlobalsName = 98 "__asan_unregister_globals"; 99 static const char *const kAsanRegisterImageGlobalsName = 100 "__asan_register_image_globals"; 101 static const char *const kAsanUnregisterImageGlobalsName = 102 "__asan_unregister_image_globals"; 103 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; 104 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; 105 static const char *const kAsanInitName = "__asan_init"; 106 static const char *const kAsanVersionCheckName = 107 "__asan_version_mismatch_check_v8"; 108 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; 109 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; 110 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; 111 static const int kMaxAsanStackMallocSizeClass = 10; 112 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; 113 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; 114 static const char *const kAsanGenPrefix = "__asan_gen_"; 115 static const char *const kODRGenPrefix = "__odr_asan_gen_"; 116 static const char *const kSanCovGenPrefix = "__sancov_gen_"; 117 static const char *const kAsanSetShadowPrefix = "__asan_set_shadow_"; 118 static const char *const kAsanPoisonStackMemoryName = 119 "__asan_poison_stack_memory"; 120 static const char *const kAsanUnpoisonStackMemoryName = 121 "__asan_unpoison_stack_memory"; 122 static const char *const kAsanGlobalsRegisteredFlagName = 123 "__asan_globals_registered"; 124 125 static const char *const kAsanOptionDetectUseAfterReturn = 126 "__asan_option_detect_stack_use_after_return"; 127 128 static const char *const kAsanShadowMemoryDynamicAddress = 129 "__asan_shadow_memory_dynamic_address"; 130 131 static const char *const kAsanAllocaPoison = "__asan_alloca_poison"; 132 static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison"; 133 134 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 135 static const size_t kNumberOfAccessSizes = 5; 136 137 static const unsigned kAllocaRzSize = 32; 138 139 // Command-line flags. 140 static cl::opt<bool> ClEnableKasan( 141 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 142 cl::Hidden, cl::init(false)); 143 static cl::opt<bool> ClRecover( 144 "asan-recover", 145 cl::desc("Enable recovery mode (continue-after-error)."), 146 cl::Hidden, cl::init(false)); 147 148 // This flag may need to be replaced with -f[no-]asan-reads. 149 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 150 cl::desc("instrument read instructions"), 151 cl::Hidden, cl::init(true)); 152 static cl::opt<bool> ClInstrumentWrites( 153 "asan-instrument-writes", cl::desc("instrument write instructions"), 154 cl::Hidden, cl::init(true)); 155 static cl::opt<bool> ClInstrumentAtomics( 156 "asan-instrument-atomics", 157 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 158 cl::init(true)); 159 static cl::opt<bool> ClAlwaysSlowPath( 160 "asan-always-slow-path", 161 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 162 cl::init(false)); 163 static cl::opt<bool> ClForceDynamicShadow( 164 "asan-force-dynamic-shadow", 165 cl::desc("Load shadow address into a local variable for each function"), 166 cl::Hidden, cl::init(false)); 167 168 // This flag limits the number of instructions to be instrumented 169 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 170 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 171 // set it to 10000. 172 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 173 "asan-max-ins-per-bb", cl::init(10000), 174 cl::desc("maximal number of instructions to instrument in any given BB"), 175 cl::Hidden); 176 // This flag may need to be replaced with -f[no]asan-stack. 177 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 178 cl::Hidden, cl::init(true)); 179 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 180 "asan-max-inline-poisoning-size", 181 cl::desc( 182 "Inline shadow poisoning for blocks up to the given size in bytes."), 183 cl::Hidden, cl::init(64)); 184 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", 185 cl::desc("Check stack-use-after-return"), 186 cl::Hidden, cl::init(true)); 187 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 188 cl::desc("Check stack-use-after-scope"), 189 cl::Hidden, cl::init(false)); 190 static cl::opt<bool> ClExperimentalPoisoning( 191 "asan-experimental-poisoning", 192 cl::desc("Enable experimental red zones and scope poisoning"), cl::Hidden, 193 cl::init(true)); 194 // This flag may need to be replaced with -f[no]asan-globals. 195 static cl::opt<bool> ClGlobals("asan-globals", 196 cl::desc("Handle global objects"), cl::Hidden, 197 cl::init(true)); 198 static cl::opt<bool> ClInitializers("asan-initialization-order", 199 cl::desc("Handle C++ initializer order"), 200 cl::Hidden, cl::init(true)); 201 static cl::opt<bool> ClInvalidPointerPairs( 202 "asan-detect-invalid-pointer-pair", 203 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 204 cl::init(false)); 205 static cl::opt<unsigned> ClRealignStack( 206 "asan-realign-stack", 207 cl::desc("Realign stack to the value of this flag (power of two)"), 208 cl::Hidden, cl::init(32)); 209 static cl::opt<int> ClInstrumentationWithCallsThreshold( 210 "asan-instrumentation-with-call-threshold", 211 cl::desc( 212 "If the function being instrumented contains more than " 213 "this number of memory accesses, use callbacks instead of " 214 "inline checks (-1 means never use callbacks)."), 215 cl::Hidden, cl::init(7000)); 216 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 217 "asan-memory-access-callback-prefix", 218 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 219 cl::init("__asan_")); 220 static cl::opt<bool> 221 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 222 cl::desc("instrument dynamic allocas"), 223 cl::Hidden, cl::init(true)); 224 static cl::opt<bool> ClSkipPromotableAllocas( 225 "asan-skip-promotable-allocas", 226 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 227 cl::init(true)); 228 229 // These flags allow to change the shadow mapping. 230 // The shadow mapping looks like 231 // Shadow = (Mem >> scale) + offset 232 static cl::opt<int> ClMappingScale("asan-mapping-scale", 233 cl::desc("scale of asan shadow mapping"), 234 cl::Hidden, cl::init(0)); 235 static cl::opt<unsigned long long> ClMappingOffset( 236 "asan-mapping-offset", 237 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, 238 cl::init(0)); 239 240 // Optimization flags. Not user visible, used mostly for testing 241 // and benchmarking the tool. 242 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 243 cl::Hidden, cl::init(true)); 244 static cl::opt<bool> ClOptSameTemp( 245 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 246 cl::Hidden, cl::init(true)); 247 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 248 cl::desc("Don't instrument scalar globals"), 249 cl::Hidden, cl::init(true)); 250 static cl::opt<bool> ClOptStack( 251 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 252 cl::Hidden, cl::init(false)); 253 254 static cl::opt<bool> ClDynamicAllocaStack( 255 "asan-stack-dynamic-alloca", 256 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 257 cl::init(true)); 258 259 static cl::opt<uint32_t> ClForceExperiment( 260 "asan-force-experiment", 261 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 262 cl::init(0)); 263 264 static cl::opt<bool> 265 ClUsePrivateAliasForGlobals("asan-use-private-alias", 266 cl::desc("Use private aliases for global" 267 " variables"), 268 cl::Hidden, cl::init(false)); 269 270 static cl::opt<bool> 271 ClUseMachOGlobalsSection("asan-globals-live-support", 272 cl::desc("Use linker features to support dead " 273 "code stripping of globals " 274 "(Mach-O only)"), 275 cl::Hidden, cl::init(false)); 276 277 // Debug flags. 278 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 279 cl::init(0)); 280 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 281 cl::Hidden, cl::init(0)); 282 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 283 cl::desc("Debug func")); 284 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 285 cl::Hidden, cl::init(-1)); 286 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 287 cl::Hidden, cl::init(-1)); 288 289 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 290 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 291 STATISTIC(NumOptimizedAccessesToGlobalVar, 292 "Number of optimized accesses to global vars"); 293 STATISTIC(NumOptimizedAccessesToStackVar, 294 "Number of optimized accesses to stack vars"); 295 296 namespace { 297 /// Frontend-provided metadata for source location. 298 struct LocationMetadata { 299 StringRef Filename; 300 int LineNo; 301 int ColumnNo; 302 303 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {} 304 305 bool empty() const { return Filename.empty(); } 306 307 void parse(MDNode *MDN) { 308 assert(MDN->getNumOperands() == 3); 309 MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); 310 Filename = DIFilename->getString(); 311 LineNo = 312 mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); 313 ColumnNo = 314 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); 315 } 316 }; 317 318 /// Frontend-provided metadata for global variables. 319 class GlobalsMetadata { 320 public: 321 struct Entry { 322 Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {} 323 LocationMetadata SourceLoc; 324 StringRef Name; 325 bool IsDynInit; 326 bool IsBlacklisted; 327 }; 328 329 GlobalsMetadata() : inited_(false) {} 330 331 void reset() { 332 inited_ = false; 333 Entries.clear(); 334 } 335 336 void init(Module &M) { 337 assert(!inited_); 338 inited_ = true; 339 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); 340 if (!Globals) return; 341 for (auto MDN : Globals->operands()) { 342 // Metadata node contains the global and the fields of "Entry". 343 assert(MDN->getNumOperands() == 5); 344 auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0)); 345 // The optimizer may optimize away a global entirely. 346 if (!GV) continue; 347 // We can already have an entry for GV if it was merged with another 348 // global. 349 Entry &E = Entries[GV]; 350 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) 351 E.SourceLoc.parse(Loc); 352 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) 353 E.Name = Name->getString(); 354 ConstantInt *IsDynInit = 355 mdconst::extract<ConstantInt>(MDN->getOperand(3)); 356 E.IsDynInit |= IsDynInit->isOne(); 357 ConstantInt *IsBlacklisted = 358 mdconst::extract<ConstantInt>(MDN->getOperand(4)); 359 E.IsBlacklisted |= IsBlacklisted->isOne(); 360 } 361 } 362 363 /// Returns metadata entry for a given global. 364 Entry get(GlobalVariable *G) const { 365 auto Pos = Entries.find(G); 366 return (Pos != Entries.end()) ? Pos->second : Entry(); 367 } 368 369 private: 370 bool inited_; 371 DenseMap<GlobalVariable *, Entry> Entries; 372 }; 373 374 /// This struct defines the shadow mapping using the rule: 375 /// shadow = (mem >> Scale) ADD-or-OR Offset. 376 struct ShadowMapping { 377 int Scale; 378 uint64_t Offset; 379 bool OrShadowOffset; 380 }; 381 382 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, 383 bool IsKasan) { 384 bool IsAndroid = TargetTriple.isAndroid(); 385 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS(); 386 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 387 bool IsLinux = TargetTriple.isOSLinux(); 388 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 || 389 TargetTriple.getArch() == llvm::Triple::ppc64le; 390 bool IsSystemZ = TargetTriple.getArch() == llvm::Triple::systemz; 391 bool IsX86 = TargetTriple.getArch() == llvm::Triple::x86; 392 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; 393 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips || 394 TargetTriple.getArch() == llvm::Triple::mipsel; 395 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || 396 TargetTriple.getArch() == llvm::Triple::mips64el; 397 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64; 398 bool IsWindows = TargetTriple.isOSWindows(); 399 400 ShadowMapping Mapping; 401 402 if (LongSize == 32) { 403 // Android is always PIE, which means that the beginning of the address 404 // space is always available. 405 if (IsAndroid) 406 Mapping.Offset = 0; 407 else if (IsMIPS32) 408 Mapping.Offset = kMIPS32_ShadowOffset32; 409 else if (IsFreeBSD) 410 Mapping.Offset = kFreeBSD_ShadowOffset32; 411 else if (IsIOS) 412 // If we're targeting iOS and x86, the binary is built for iOS simulator. 413 Mapping.Offset = IsX86 ? kIOSSimShadowOffset32 : kIOSShadowOffset32; 414 else if (IsWindows) 415 Mapping.Offset = kWindowsShadowOffset32; 416 else 417 Mapping.Offset = kDefaultShadowOffset32; 418 } else { // LongSize == 64 419 if (IsPPC64) 420 Mapping.Offset = kPPC64_ShadowOffset64; 421 else if (IsSystemZ) 422 Mapping.Offset = kSystemZ_ShadowOffset64; 423 else if (IsFreeBSD) 424 Mapping.Offset = kFreeBSD_ShadowOffset64; 425 else if (IsLinux && IsX86_64) { 426 if (IsKasan) 427 Mapping.Offset = kLinuxKasan_ShadowOffset64; 428 else 429 Mapping.Offset = kSmallX86_64ShadowOffset; 430 } else if (IsWindows && IsX86_64) { 431 Mapping.Offset = kWindowsShadowOffset64; 432 } else if (IsMIPS64) 433 Mapping.Offset = kMIPS64_ShadowOffset64; 434 else if (IsIOS) 435 // If we're targeting iOS and x86, the binary is built for iOS simulator. 436 // We are using dynamic shadow offset on the 64-bit devices. 437 Mapping.Offset = 438 IsX86_64 ? kIOSSimShadowOffset64 : kDynamicShadowSentinel; 439 else if (IsAArch64) 440 Mapping.Offset = kAArch64_ShadowOffset64; 441 else 442 Mapping.Offset = kDefaultShadowOffset64; 443 } 444 445 if (ClForceDynamicShadow) { 446 Mapping.Offset = kDynamicShadowSentinel; 447 } 448 449 Mapping.Scale = kDefaultShadowScale; 450 if (ClMappingScale.getNumOccurrences() > 0) { 451 Mapping.Scale = ClMappingScale; 452 } 453 454 if (ClMappingOffset.getNumOccurrences() > 0) { 455 Mapping.Offset = ClMappingOffset; 456 } 457 458 // OR-ing shadow offset if more efficient (at least on x86) if the offset 459 // is a power of two, but on ppc64 we have to use add since the shadow 460 // offset is not necessary 1/8-th of the address space. On SystemZ, 461 // we could OR the constant in a single instruction, but it's more 462 // efficient to load it once and use indexed addressing. 463 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ 464 && !(Mapping.Offset & (Mapping.Offset - 1)) 465 && Mapping.Offset != kDynamicShadowSentinel; 466 467 return Mapping; 468 } 469 470 static size_t RedzoneSizeForScale(int MappingScale) { 471 // Redzone used for stack and globals is at least 32 bytes. 472 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 473 return std::max(32U, 1U << MappingScale); 474 } 475 476 /// AddressSanitizer: instrument the code in module to find memory bugs. 477 struct AddressSanitizer : public FunctionPass { 478 explicit AddressSanitizer(bool CompileKernel = false, bool Recover = false, 479 bool UseAfterScope = false) 480 : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan), 481 Recover(Recover || ClRecover), 482 UseAfterScope(UseAfterScope || ClUseAfterScope), 483 LocalDynamicShadow(nullptr) { 484 initializeAddressSanitizerPass(*PassRegistry::getPassRegistry()); 485 } 486 StringRef getPassName() const override { 487 return "AddressSanitizerFunctionPass"; 488 } 489 void getAnalysisUsage(AnalysisUsage &AU) const override { 490 AU.addRequired<DominatorTreeWrapperPass>(); 491 AU.addRequired<TargetLibraryInfoWrapperPass>(); 492 } 493 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 494 uint64_t ArraySize = 1; 495 if (AI.isArrayAllocation()) { 496 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 497 assert(CI && "non-constant array size"); 498 ArraySize = CI->getZExtValue(); 499 } 500 Type *Ty = AI.getAllocatedType(); 501 uint64_t SizeInBytes = 502 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 503 return SizeInBytes * ArraySize; 504 } 505 /// Check if we want (and can) handle this alloca. 506 bool isInterestingAlloca(const AllocaInst &AI); 507 508 /// If it is an interesting memory access, return the PointerOperand 509 /// and set IsWrite/Alignment. Otherwise return nullptr. 510 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, 511 uint64_t *TypeSize, unsigned *Alignment); 512 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, 513 bool UseCalls, const DataLayout &DL); 514 void instrumentPointerComparisonOrSubtraction(Instruction *I); 515 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 516 Value *Addr, uint32_t TypeSize, bool IsWrite, 517 Value *SizeArgument, bool UseCalls, uint32_t Exp); 518 void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr, 519 uint32_t TypeSize, bool IsWrite, 520 Value *SizeArgument, bool UseCalls, 521 uint32_t Exp); 522 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 523 Value *ShadowValue, uint32_t TypeSize); 524 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 525 bool IsWrite, size_t AccessSizeIndex, 526 Value *SizeArgument, uint32_t Exp); 527 void instrumentMemIntrinsic(MemIntrinsic *MI); 528 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 529 bool runOnFunction(Function &F) override; 530 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 531 void maybeInsertDynamicShadowAtFunctionEntry(Function &F); 532 void markEscapedLocalAllocas(Function &F); 533 bool doInitialization(Module &M) override; 534 bool doFinalization(Module &M) override; 535 static char ID; // Pass identification, replacement for typeid 536 537 DominatorTree &getDominatorTree() const { return *DT; } 538 539 private: 540 void initializeCallbacks(Module &M); 541 542 bool LooksLikeCodeInBug11395(Instruction *I); 543 bool GlobalIsLinkerInitialized(GlobalVariable *G); 544 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 545 uint64_t TypeSize) const; 546 547 /// Helper to cleanup per-function state. 548 struct FunctionStateRAII { 549 AddressSanitizer *Pass; 550 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 551 assert(Pass->ProcessedAllocas.empty() && 552 "last pass forgot to clear cache"); 553 assert(!Pass->LocalDynamicShadow); 554 } 555 ~FunctionStateRAII() { 556 Pass->LocalDynamicShadow = nullptr; 557 Pass->ProcessedAllocas.clear(); 558 } 559 }; 560 561 LLVMContext *C; 562 Triple TargetTriple; 563 int LongSize; 564 bool CompileKernel; 565 bool Recover; 566 bool UseAfterScope; 567 Type *IntptrTy; 568 ShadowMapping Mapping; 569 DominatorTree *DT; 570 Function *AsanCtorFunction = nullptr; 571 Function *AsanInitFunction = nullptr; 572 Function *AsanHandleNoReturnFunc; 573 Function *AsanPtrCmpFunction, *AsanPtrSubFunction; 574 // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize). 575 Function *AsanErrorCallback[2][2][kNumberOfAccessSizes]; 576 Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 577 // This array is indexed by AccessIsWrite and Experiment. 578 Function *AsanErrorCallbackSized[2][2]; 579 Function *AsanMemoryAccessCallbackSized[2][2]; 580 Function *AsanMemmove, *AsanMemcpy, *AsanMemset; 581 InlineAsm *EmptyAsm; 582 Value *LocalDynamicShadow; 583 GlobalsMetadata GlobalsMD; 584 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 585 586 friend struct FunctionStackPoisoner; 587 }; 588 589 class AddressSanitizerModule : public ModulePass { 590 public: 591 explicit AddressSanitizerModule(bool CompileKernel = false, 592 bool Recover = false) 593 : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan), 594 Recover(Recover || ClRecover) {} 595 bool runOnModule(Module &M) override; 596 static char ID; // Pass identification, replacement for typeid 597 StringRef getPassName() const override { return "AddressSanitizerModule"; } 598 599 private: 600 void initializeCallbacks(Module &M); 601 602 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M); 603 bool ShouldInstrumentGlobal(GlobalVariable *G); 604 bool ShouldUseMachOGlobalsSection() const; 605 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 606 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 607 size_t MinRedzoneSizeForGlobal() const { 608 return RedzoneSizeForScale(Mapping.Scale); 609 } 610 611 GlobalsMetadata GlobalsMD; 612 bool CompileKernel; 613 bool Recover; 614 Type *IntptrTy; 615 LLVMContext *C; 616 Triple TargetTriple; 617 ShadowMapping Mapping; 618 Function *AsanPoisonGlobals; 619 Function *AsanUnpoisonGlobals; 620 Function *AsanRegisterGlobals; 621 Function *AsanUnregisterGlobals; 622 Function *AsanRegisterImageGlobals; 623 Function *AsanUnregisterImageGlobals; 624 }; 625 626 // Stack poisoning does not play well with exception handling. 627 // When an exception is thrown, we essentially bypass the code 628 // that unpoisones the stack. This is why the run-time library has 629 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 630 // stack in the interceptor. This however does not work inside the 631 // actual function which catches the exception. Most likely because the 632 // compiler hoists the load of the shadow value somewhere too high. 633 // This causes asan to report a non-existing bug on 453.povray. 634 // It sounds like an LLVM bug. 635 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 636 Function &F; 637 AddressSanitizer &ASan; 638 DIBuilder DIB; 639 LLVMContext *C; 640 Type *IntptrTy; 641 Type *IntptrPtrTy; 642 ShadowMapping Mapping; 643 644 SmallVector<AllocaInst *, 16> AllocaVec; 645 SmallSetVector<AllocaInst *, 16> NonInstrumentedStaticAllocaVec; 646 SmallVector<Instruction *, 8> RetVec; 647 unsigned StackAlignment; 648 649 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 650 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 651 Function *AsanSetShadowFunc[0x100] = {}; 652 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; 653 Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc; 654 655 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 656 struct AllocaPoisonCall { 657 IntrinsicInst *InsBefore; 658 AllocaInst *AI; 659 uint64_t Size; 660 bool DoPoison; 661 }; 662 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 663 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 664 665 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 666 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 667 AllocaInst *DynamicAllocaLayout = nullptr; 668 IntrinsicInst *LocalEscapeCall = nullptr; 669 670 // Maps Value to an AllocaInst from which the Value is originated. 671 typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy; 672 AllocaForValueMapTy AllocaForValue; 673 674 bool HasNonEmptyInlineAsm = false; 675 bool HasReturnsTwiceCall = false; 676 std::unique_ptr<CallInst> EmptyInlineAsm; 677 678 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 679 : F(F), 680 ASan(ASan), 681 DIB(*F.getParent(), /*AllowUnresolved*/ false), 682 C(ASan.C), 683 IntptrTy(ASan.IntptrTy), 684 IntptrPtrTy(PointerType::get(IntptrTy, 0)), 685 Mapping(ASan.Mapping), 686 StackAlignment(1 << Mapping.Scale), 687 EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} 688 689 bool runOnFunction() { 690 if (!ClStack) return false; 691 // Collect alloca, ret, lifetime instructions etc. 692 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 693 694 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 695 696 initializeCallbacks(*F.getParent()); 697 698 processDynamicAllocas(); 699 processStaticAllocas(); 700 701 if (ClDebugStack) { 702 DEBUG(dbgs() << F); 703 } 704 return true; 705 } 706 707 // Finds all Alloca instructions and puts 708 // poisoned red zones around all of them. 709 // Then unpoison everything back before the function returns. 710 void processStaticAllocas(); 711 void processDynamicAllocas(); 712 713 void createDynamicAllocasInitStorage(); 714 715 // ----------------------- Visitors. 716 /// \brief Collect all Ret instructions. 717 void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } 718 719 /// \brief Collect all Resume instructions. 720 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 721 722 /// \brief Collect all CatchReturnInst instructions. 723 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 724 725 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 726 Value *SavedStack) { 727 IRBuilder<> IRB(InstBefore); 728 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 729 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 730 // need to adjust extracted SP to compute the address of the most recent 731 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 732 // this purpose. 733 if (!isa<ReturnInst>(InstBefore)) { 734 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 735 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 736 {IntptrTy}); 737 738 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 739 740 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 741 DynamicAreaOffset); 742 } 743 744 IRB.CreateCall(AsanAllocasUnpoisonFunc, 745 {IRB.CreateLoad(DynamicAllocaLayout), DynamicAreaPtr}); 746 } 747 748 // Unpoison dynamic allocas redzones. 749 void unpoisonDynamicAllocas() { 750 for (auto &Ret : RetVec) 751 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); 752 753 for (auto &StackRestoreInst : StackRestoreVec) 754 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 755 StackRestoreInst->getOperand(0)); 756 } 757 758 // Deploy and poison redzones around dynamic alloca call. To do this, we 759 // should replace this call with another one with changed parameters and 760 // replace all its uses with new address, so 761 // addr = alloca type, old_size, align 762 // is replaced by 763 // new_size = (old_size + additional_size) * sizeof(type) 764 // tmp = alloca i8, new_size, max(align, 32) 765 // addr = tmp + 32 (first 32 bytes are for the left redzone). 766 // Additional_size is added to make new memory allocation contain not only 767 // requested memory, but also left, partial and right redzones. 768 void handleDynamicAllocaCall(AllocaInst *AI); 769 770 /// \brief Collect Alloca instructions we want (and can) handle. 771 void visitAllocaInst(AllocaInst &AI) { 772 if (!ASan.isInterestingAlloca(AI)) { 773 if (AI.isStaticAlloca()) NonInstrumentedStaticAllocaVec.insert(&AI); 774 return; 775 } 776 777 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 778 if (!AI.isStaticAlloca()) 779 DynamicAllocaVec.push_back(&AI); 780 else 781 AllocaVec.push_back(&AI); 782 } 783 784 /// \brief Collect lifetime intrinsic calls to check for use-after-scope 785 /// errors. 786 void visitIntrinsicInst(IntrinsicInst &II) { 787 Intrinsic::ID ID = II.getIntrinsicID(); 788 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 789 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 790 if (!ASan.UseAfterScope) 791 return; 792 if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end) 793 return; 794 // Found lifetime intrinsic, add ASan instrumentation if necessary. 795 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); 796 // If size argument is undefined, don't do anything. 797 if (Size->isMinusOne()) return; 798 // Check that size doesn't saturate uint64_t and can 799 // be stored in IntptrTy. 800 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 801 if (SizeValue == ~0ULL || 802 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 803 return; 804 // Find alloca instruction that corresponds to llvm.lifetime argument. 805 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); 806 if (!AI || !ASan.isInterestingAlloca(*AI)) 807 return; 808 bool DoPoison = (ID == Intrinsic::lifetime_end); 809 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 810 if (AI->isStaticAlloca()) 811 StaticAllocaPoisonCallVec.push_back(APC); 812 else if (ClInstrumentDynamicAllocas) 813 DynamicAllocaPoisonCallVec.push_back(APC); 814 } 815 816 void visitCallSite(CallSite CS) { 817 Instruction *I = CS.getInstruction(); 818 if (CallInst *CI = dyn_cast<CallInst>(I)) { 819 HasNonEmptyInlineAsm |= 820 CI->isInlineAsm() && !CI->isIdenticalTo(EmptyInlineAsm.get()); 821 HasReturnsTwiceCall |= CI->canReturnTwice(); 822 } 823 } 824 825 // ---------------------- Helpers. 826 void initializeCallbacks(Module &M); 827 828 bool doesDominateAllExits(const Instruction *I) const { 829 for (auto Ret : RetVec) { 830 if (!ASan.getDominatorTree().dominates(I, Ret)) return false; 831 } 832 return true; 833 } 834 835 /// Finds alloca where the value comes from. 836 AllocaInst *findAllocaForValue(Value *V); 837 838 // Copies bytes from ShadowBytes into shadow memory for indexes where 839 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 840 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 841 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 842 IRBuilder<> &IRB, Value *ShadowBase); 843 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 844 size_t Begin, size_t End, IRBuilder<> &IRB, 845 Value *ShadowBase); 846 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 847 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 848 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 849 850 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 851 852 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 853 bool Dynamic); 854 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 855 Instruction *ThenTerm, Value *ValueIfFalse); 856 }; 857 858 } // anonymous namespace 859 860 char AddressSanitizer::ID = 0; 861 INITIALIZE_PASS_BEGIN( 862 AddressSanitizer, "asan", 863 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 864 false) 865 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 866 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 867 INITIALIZE_PASS_END( 868 AddressSanitizer, "asan", 869 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, 870 false) 871 FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel, 872 bool Recover, 873 bool UseAfterScope) { 874 assert(!CompileKernel || Recover); 875 return new AddressSanitizer(CompileKernel, Recover, UseAfterScope); 876 } 877 878 char AddressSanitizerModule::ID = 0; 879 INITIALIZE_PASS( 880 AddressSanitizerModule, "asan-module", 881 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 882 "ModulePass", 883 false, false) 884 ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel, 885 bool Recover) { 886 assert(!CompileKernel || Recover); 887 return new AddressSanitizerModule(CompileKernel, Recover); 888 } 889 890 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 891 size_t Res = countTrailingZeros(TypeSize / 8); 892 assert(Res < kNumberOfAccessSizes); 893 return Res; 894 } 895 896 // \brief Create a constant for Str so that we can pass it to the run-time lib. 897 static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, 898 bool AllowMerging) { 899 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 900 // We use private linkage for module-local strings. If they can be merged 901 // with another one, we set the unnamed_addr attribute. 902 GlobalVariable *GV = 903 new GlobalVariable(M, StrConst->getType(), true, 904 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); 905 if (AllowMerging) GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 906 GV->setAlignment(1); // Strings may not be merged w/o setting align 1. 907 return GV; 908 } 909 910 /// \brief Create a global describing a source location. 911 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, 912 LocationMetadata MD) { 913 Constant *LocData[] = { 914 createPrivateGlobalForString(M, MD.Filename, true), 915 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), 916 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), 917 }; 918 auto LocStruct = ConstantStruct::getAnon(LocData); 919 auto GV = new GlobalVariable(M, LocStruct->getType(), true, 920 GlobalValue::PrivateLinkage, LocStruct, 921 kAsanGenPrefix); 922 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 923 return GV; 924 } 925 926 /// \brief Check if \p G has been created by a trusted compiler pass. 927 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 928 // Do not instrument asan globals. 929 if (G->getName().startswith(kAsanGenPrefix) || 930 G->getName().startswith(kSanCovGenPrefix) || 931 G->getName().startswith(kODRGenPrefix)) 932 return true; 933 934 // Do not instrument gcov counter arrays. 935 if (G->getName() == "__llvm_gcov_ctr") 936 return true; 937 938 return false; 939 } 940 941 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 942 // Shadow >> scale 943 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 944 if (Mapping.Offset == 0) return Shadow; 945 // (Shadow >> scale) | offset 946 Value *ShadowBase; 947 if (LocalDynamicShadow) 948 ShadowBase = LocalDynamicShadow; 949 else 950 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 951 if (Mapping.OrShadowOffset) 952 return IRB.CreateOr(Shadow, ShadowBase); 953 else 954 return IRB.CreateAdd(Shadow, ShadowBase); 955 } 956 957 // Instrument memset/memmove/memcpy 958 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 959 IRBuilder<> IRB(MI); 960 if (isa<MemTransferInst>(MI)) { 961 IRB.CreateCall( 962 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 963 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 964 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 965 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 966 } else if (isa<MemSetInst>(MI)) { 967 IRB.CreateCall( 968 AsanMemset, 969 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 970 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 971 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 972 } 973 MI->eraseFromParent(); 974 } 975 976 /// Check if we want (and can) handle this alloca. 977 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 978 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 979 980 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 981 return PreviouslySeenAllocaInfo->getSecond(); 982 983 bool IsInteresting = 984 (AI.getAllocatedType()->isSized() && 985 // alloca() may be called with 0 size, ignore it. 986 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 987 // We are only interested in allocas not promotable to registers. 988 // Promotable allocas are common under -O0. 989 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 990 // inalloca allocas are not treated as static, and we don't want 991 // dynamic alloca instrumentation for them as well. 992 !AI.isUsedWithInAlloca()); 993 994 ProcessedAllocas[&AI] = IsInteresting; 995 return IsInteresting; 996 } 997 998 /// If I is an interesting memory access, return the PointerOperand 999 /// and set IsWrite/Alignment. Otherwise return nullptr. 1000 Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, 1001 bool *IsWrite, 1002 uint64_t *TypeSize, 1003 unsigned *Alignment) { 1004 // Skip memory accesses inserted by another instrumentation. 1005 if (I->getMetadata("nosanitize")) return nullptr; 1006 1007 // Do not instrument the load fetching the dynamic shadow address. 1008 if (LocalDynamicShadow == I) 1009 return nullptr; 1010 1011 Value *PtrOperand = nullptr; 1012 const DataLayout &DL = I->getModule()->getDataLayout(); 1013 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1014 if (!ClInstrumentReads) return nullptr; 1015 *IsWrite = false; 1016 *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); 1017 *Alignment = LI->getAlignment(); 1018 PtrOperand = LI->getPointerOperand(); 1019 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1020 if (!ClInstrumentWrites) return nullptr; 1021 *IsWrite = true; 1022 *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); 1023 *Alignment = SI->getAlignment(); 1024 PtrOperand = SI->getPointerOperand(); 1025 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1026 if (!ClInstrumentAtomics) return nullptr; 1027 *IsWrite = true; 1028 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); 1029 *Alignment = 0; 1030 PtrOperand = RMW->getPointerOperand(); 1031 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1032 if (!ClInstrumentAtomics) return nullptr; 1033 *IsWrite = true; 1034 *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); 1035 *Alignment = 0; 1036 PtrOperand = XCHG->getPointerOperand(); 1037 } 1038 1039 // Do not instrument acesses from different address spaces; we cannot deal 1040 // with them. 1041 if (PtrOperand) { 1042 Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType()); 1043 if (PtrTy->getPointerAddressSpace() != 0) 1044 return nullptr; 1045 } 1046 1047 // Treat memory accesses to promotable allocas as non-interesting since they 1048 // will not cause memory violations. This greatly speeds up the instrumented 1049 // executable at -O0. 1050 if (ClSkipPromotableAllocas) 1051 if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand)) 1052 return isInterestingAlloca(*AI) ? AI : nullptr; 1053 1054 return PtrOperand; 1055 } 1056 1057 static bool isPointerOperand(Value *V) { 1058 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1059 } 1060 1061 // This is a rough heuristic; it may cause both false positives and 1062 // false negatives. The proper implementation requires cooperation with 1063 // the frontend. 1064 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { 1065 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1066 if (!Cmp->isRelational()) return false; 1067 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1068 if (BO->getOpcode() != Instruction::Sub) return false; 1069 } else { 1070 return false; 1071 } 1072 return isPointerOperand(I->getOperand(0)) && 1073 isPointerOperand(I->getOperand(1)); 1074 } 1075 1076 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1077 // If a global variable does not have dynamic initialization we don't 1078 // have to instrument it. However, if a global does not have initializer 1079 // at all, we assume it has dynamic initializer (in other TU). 1080 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; 1081 } 1082 1083 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1084 Instruction *I) { 1085 IRBuilder<> IRB(I); 1086 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1087 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1088 for (Value *&i : Param) { 1089 if (i->getType()->isPointerTy()) 1090 i = IRB.CreatePointerCast(i, IntptrTy); 1091 } 1092 IRB.CreateCall(F, Param); 1093 } 1094 1095 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1096 Instruction *I, bool UseCalls, 1097 const DataLayout &DL) { 1098 bool IsWrite = false; 1099 unsigned Alignment = 0; 1100 uint64_t TypeSize = 0; 1101 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment); 1102 assert(Addr); 1103 1104 // Optimization experiments. 1105 // The experiments can be used to evaluate potential optimizations that remove 1106 // instrumentation (assess false negatives). Instead of completely removing 1107 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1108 // experiments that want to remove instrumentation of this instruction). 1109 // If Exp is non-zero, this pass will emit special calls into runtime 1110 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1111 // make runtime terminate the program in a special way (with a different 1112 // exit status). Then you run the new compiler on a buggy corpus, collect 1113 // the special terminations (ideally, you don't see them at all -- no false 1114 // negatives) and make the decision on the optimization. 1115 uint32_t Exp = ClForceExperiment; 1116 1117 if (ClOpt && ClOptGlobals) { 1118 // If initialization order checking is disabled, a simple access to a 1119 // dynamically initialized global is always valid. 1120 GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL)); 1121 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1122 isSafeAccess(ObjSizeVis, Addr, TypeSize)) { 1123 NumOptimizedAccessesToGlobalVar++; 1124 return; 1125 } 1126 } 1127 1128 if (ClOpt && ClOptStack) { 1129 // A direct inbounds access to a stack variable is always valid. 1130 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && 1131 isSafeAccess(ObjSizeVis, Addr, TypeSize)) { 1132 NumOptimizedAccessesToStackVar++; 1133 return; 1134 } 1135 } 1136 1137 if (IsWrite) 1138 NumInstrumentedWrites++; 1139 else 1140 NumInstrumentedReads++; 1141 1142 unsigned Granularity = 1 << Mapping.Scale; 1143 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1144 // if the data is properly aligned. 1145 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1146 TypeSize == 128) && 1147 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) 1148 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls, 1149 Exp); 1150 instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr, 1151 UseCalls, Exp); 1152 } 1153 1154 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1155 Value *Addr, bool IsWrite, 1156 size_t AccessSizeIndex, 1157 Value *SizeArgument, 1158 uint32_t Exp) { 1159 IRBuilder<> IRB(InsertBefore); 1160 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1161 CallInst *Call = nullptr; 1162 if (SizeArgument) { 1163 if (Exp == 0) 1164 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1165 {Addr, SizeArgument}); 1166 else 1167 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1168 {Addr, SizeArgument, ExpVal}); 1169 } else { 1170 if (Exp == 0) 1171 Call = 1172 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1173 else 1174 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1175 {Addr, ExpVal}); 1176 } 1177 1178 // We don't do Call->setDoesNotReturn() because the BB already has 1179 // UnreachableInst at the end. 1180 // This EmptyAsm is required to avoid callback merge. 1181 IRB.CreateCall(EmptyAsm, {}); 1182 return Call; 1183 } 1184 1185 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1186 Value *ShadowValue, 1187 uint32_t TypeSize) { 1188 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1189 // Addr & (Granularity - 1) 1190 Value *LastAccessedByte = 1191 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1192 // (Addr & (Granularity - 1)) + size - 1 1193 if (TypeSize / 8 > 1) 1194 LastAccessedByte = IRB.CreateAdd( 1195 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1196 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1197 LastAccessedByte = 1198 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1199 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1200 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1201 } 1202 1203 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1204 Instruction *InsertBefore, Value *Addr, 1205 uint32_t TypeSize, bool IsWrite, 1206 Value *SizeArgument, bool UseCalls, 1207 uint32_t Exp) { 1208 IRBuilder<> IRB(InsertBefore); 1209 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1210 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1211 1212 if (UseCalls) { 1213 if (Exp == 0) 1214 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1215 AddrLong); 1216 else 1217 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1218 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1219 return; 1220 } 1221 1222 Type *ShadowTy = 1223 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1224 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1225 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1226 Value *CmpVal = Constant::getNullValue(ShadowTy); 1227 Value *ShadowValue = 1228 IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1229 1230 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1231 size_t Granularity = 1ULL << Mapping.Scale; 1232 TerminatorInst *CrashTerm = nullptr; 1233 1234 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1235 // We use branch weights for the slow path check, to indicate that the slow 1236 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1237 TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen( 1238 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1239 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1240 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1241 IRB.SetInsertPoint(CheckTerm); 1242 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1243 if (Recover) { 1244 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1245 } else { 1246 BasicBlock *CrashBlock = 1247 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1248 CrashTerm = new UnreachableInst(*C, CrashBlock); 1249 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1250 ReplaceInstWithInst(CheckTerm, NewTerm); 1251 } 1252 } else { 1253 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1254 } 1255 1256 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1257 AccessSizeIndex, SizeArgument, Exp); 1258 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1259 } 1260 1261 // Instrument unusual size or unusual alignment. 1262 // We can not do it with a single check, so we do 1-byte check for the first 1263 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1264 // to report the actual access size. 1265 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1266 Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite, 1267 Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1268 IRBuilder<> IRB(I); 1269 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1270 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1271 if (UseCalls) { 1272 if (Exp == 0) 1273 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1274 {AddrLong, Size}); 1275 else 1276 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1277 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1278 } else { 1279 Value *LastByte = IRB.CreateIntToPtr( 1280 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1281 Addr->getType()); 1282 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp); 1283 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp); 1284 } 1285 } 1286 1287 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, 1288 GlobalValue *ModuleName) { 1289 // Set up the arguments to our poison/unpoison functions. 1290 IRBuilder<> IRB(&GlobalInit.front(), 1291 GlobalInit.front().getFirstInsertionPt()); 1292 1293 // Add a call to poison all external globals before the given function starts. 1294 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1295 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1296 1297 // Add calls to unpoison all globals before each return instruction. 1298 for (auto &BB : GlobalInit.getBasicBlockList()) 1299 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1300 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1301 } 1302 1303 void AddressSanitizerModule::createInitializerPoisonCalls( 1304 Module &M, GlobalValue *ModuleName) { 1305 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1306 1307 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1308 for (Use &OP : CA->operands()) { 1309 if (isa<ConstantAggregateZero>(OP)) continue; 1310 ConstantStruct *CS = cast<ConstantStruct>(OP); 1311 1312 // Must have a function or null ptr. 1313 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1314 if (F->getName() == kAsanModuleCtorName) continue; 1315 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); 1316 // Don't instrument CTORs that will run before asan.module_ctor. 1317 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue; 1318 poisonOneInitializer(*F, ModuleName); 1319 } 1320 } 1321 } 1322 1323 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { 1324 Type *Ty = G->getValueType(); 1325 DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1326 1327 if (GlobalsMD.get(G).IsBlacklisted) return false; 1328 if (!Ty->isSized()) return false; 1329 if (!G->hasInitializer()) return false; 1330 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1331 // Touch only those globals that will not be defined in other modules. 1332 // Don't handle ODR linkage types and COMDATs since other modules may be built 1333 // without ASan. 1334 if (G->getLinkage() != GlobalVariable::ExternalLinkage && 1335 G->getLinkage() != GlobalVariable::PrivateLinkage && 1336 G->getLinkage() != GlobalVariable::InternalLinkage) 1337 return false; 1338 if (G->hasComdat()) return false; 1339 // Two problems with thread-locals: 1340 // - The address of the main thread's copy can't be computed at link-time. 1341 // - Need to poison all copies, not just the main thread's one. 1342 if (G->isThreadLocal()) return false; 1343 // For now, just ignore this Global if the alignment is large. 1344 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; 1345 1346 if (G->hasSection()) { 1347 StringRef Section = G->getSection(); 1348 1349 // Globals from llvm.metadata aren't emitted, do not instrument them. 1350 if (Section == "llvm.metadata") return false; 1351 // Do not instrument globals from special LLVM sections. 1352 if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; 1353 1354 // Do not instrument function pointers to initialization and termination 1355 // routines: dynamic linker will not properly handle redzones. 1356 if (Section.startswith(".preinit_array") || 1357 Section.startswith(".init_array") || 1358 Section.startswith(".fini_array")) { 1359 return false; 1360 } 1361 1362 // Callbacks put into the CRT initializer/terminator sections 1363 // should not be instrumented. 1364 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 1365 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1366 if (Section.startswith(".CRT")) { 1367 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); 1368 return false; 1369 } 1370 1371 if (TargetTriple.isOSBinFormatMachO()) { 1372 StringRef ParsedSegment, ParsedSection; 1373 unsigned TAA = 0, StubSize = 0; 1374 bool TAAParsed; 1375 std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier( 1376 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize); 1377 assert(ErrorCode.empty() && "Invalid section specifier."); 1378 1379 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1380 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1381 // them. 1382 if (ParsedSegment == "__OBJC" || 1383 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1384 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1385 return false; 1386 } 1387 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 1388 // Constant CFString instances are compiled in the following way: 1389 // -- the string buffer is emitted into 1390 // __TEXT,__cstring,cstring_literals 1391 // -- the constant NSConstantString structure referencing that buffer 1392 // is placed into __DATA,__cfstring 1393 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1394 // Moreover, it causes the linker to crash on OS X 10.7 1395 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1396 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 1397 return false; 1398 } 1399 // The linker merges the contents of cstring_literals and removes the 1400 // trailing zeroes. 1401 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 1402 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 1403 return false; 1404 } 1405 } 1406 } 1407 1408 return true; 1409 } 1410 1411 // On Mach-O platforms, we emit global metadata in a separate section of the 1412 // binary in order to allow the linker to properly dead strip. This is only 1413 // supported on recent versions of ld64. 1414 bool AddressSanitizerModule::ShouldUseMachOGlobalsSection() const { 1415 if (!ClUseMachOGlobalsSection) 1416 return false; 1417 1418 if (!TargetTriple.isOSBinFormatMachO()) 1419 return false; 1420 1421 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 1422 return true; 1423 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 1424 return true; 1425 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 1426 return true; 1427 1428 return false; 1429 } 1430 1431 void AddressSanitizerModule::initializeCallbacks(Module &M) { 1432 IRBuilder<> IRB(*C); 1433 1434 // Declare our poisoning and unpoisoning functions. 1435 AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1436 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr)); 1437 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); 1438 AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1439 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr)); 1440 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); 1441 1442 // Declare functions that register/unregister globals. 1443 AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1444 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1445 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); 1446 AsanUnregisterGlobals = checkSanitizerInterfaceFunction( 1447 M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(), 1448 IntptrTy, IntptrTy, nullptr)); 1449 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); 1450 1451 // Declare the functions that find globals in a shared object and then invoke 1452 // the (un)register function on them. 1453 AsanRegisterImageGlobals = checkSanitizerInterfaceFunction( 1454 M.getOrInsertFunction(kAsanRegisterImageGlobalsName, 1455 IRB.getVoidTy(), IntptrTy, nullptr)); 1456 AsanRegisterImageGlobals->setLinkage(Function::ExternalLinkage); 1457 1458 AsanUnregisterImageGlobals = checkSanitizerInterfaceFunction( 1459 M.getOrInsertFunction(kAsanUnregisterImageGlobalsName, 1460 IRB.getVoidTy(), IntptrTy, nullptr)); 1461 AsanUnregisterImageGlobals->setLinkage(Function::ExternalLinkage); 1462 } 1463 1464 // This function replaces all global variables with new variables that have 1465 // trailing redzones. It also creates a function that poisons 1466 // redzones and inserts this function into llvm.global_ctors. 1467 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { 1468 GlobalsMD.init(M); 1469 1470 SmallVector<GlobalVariable *, 16> GlobalsToChange; 1471 1472 for (auto &G : M.globals()) { 1473 if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G); 1474 } 1475 1476 size_t n = GlobalsToChange.size(); 1477 if (n == 0) return false; 1478 1479 // A global is described by a structure 1480 // size_t beg; 1481 // size_t size; 1482 // size_t size_with_redzone; 1483 // const char *name; 1484 // const char *module_name; 1485 // size_t has_dynamic_init; 1486 // void *source_location; 1487 // size_t odr_indicator; 1488 // We initialize an array of such structures and pass it to a run-time call. 1489 StructType *GlobalStructTy = 1490 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 1491 IntptrTy, IntptrTy, IntptrTy, nullptr); 1492 SmallVector<Constant *, 16> Initializers(n); 1493 1494 bool HasDynamicallyInitializedGlobals = false; 1495 1496 // We shouldn't merge same module names, as this string serves as unique 1497 // module ID in runtime. 1498 GlobalVariable *ModuleName = createPrivateGlobalForString( 1499 M, M.getModuleIdentifier(), /*AllowMerging*/ false); 1500 1501 auto &DL = M.getDataLayout(); 1502 for (size_t i = 0; i < n; i++) { 1503 static const uint64_t kMaxGlobalRedzone = 1 << 18; 1504 GlobalVariable *G = GlobalsToChange[i]; 1505 1506 auto MD = GlobalsMD.get(G); 1507 StringRef NameForGlobal = G->getName(); 1508 // Create string holding the global name (use global name from metadata 1509 // if it's available, otherwise just write the name of global variable). 1510 GlobalVariable *Name = createPrivateGlobalForString( 1511 M, MD.Name.empty() ? NameForGlobal : MD.Name, 1512 /*AllowMerging*/ true); 1513 1514 Type *Ty = G->getValueType(); 1515 uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 1516 uint64_t MinRZ = MinRedzoneSizeForGlobal(); 1517 // MinRZ <= RZ <= kMaxGlobalRedzone 1518 // and trying to make RZ to be ~ 1/4 of SizeInBytes. 1519 uint64_t RZ = std::max( 1520 MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ)); 1521 uint64_t RightRedzoneSize = RZ; 1522 // Round up to MinRZ 1523 if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); 1524 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); 1525 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 1526 1527 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr); 1528 Constant *NewInitializer = 1529 ConstantStruct::get(NewTy, G->getInitializer(), 1530 Constant::getNullValue(RightRedZoneTy), nullptr); 1531 1532 // Create a new global variable with enough space for a redzone. 1533 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 1534 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 1535 Linkage = GlobalValue::InternalLinkage; 1536 GlobalVariable *NewGlobal = 1537 new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer, 1538 "", G, G->getThreadLocalMode()); 1539 NewGlobal->copyAttributesFrom(G); 1540 NewGlobal->setAlignment(MinRZ); 1541 1542 // Transfer the debug info. The payload starts at offset zero so we can 1543 // copy the debug info over as is. 1544 SmallVector<DIGlobalVariable *, 1> GVs; 1545 G->getDebugInfo(GVs); 1546 for (auto *GV : GVs) 1547 NewGlobal->addDebugInfo(GV); 1548 1549 Value *Indices2[2]; 1550 Indices2[0] = IRB.getInt32(0); 1551 Indices2[1] = IRB.getInt32(0); 1552 1553 G->replaceAllUsesWith( 1554 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 1555 NewGlobal->takeName(G); 1556 G->eraseFromParent(); 1557 1558 Constant *SourceLoc; 1559 if (!MD.SourceLoc.empty()) { 1560 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); 1561 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); 1562 } else { 1563 SourceLoc = ConstantInt::get(IntptrTy, 0); 1564 } 1565 1566 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 1567 GlobalValue *InstrumentedGlobal = NewGlobal; 1568 1569 bool CanUsePrivateAliases = 1570 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO(); 1571 if (CanUsePrivateAliases && ClUsePrivateAliasForGlobals) { 1572 // Create local alias for NewGlobal to avoid crash on ODR between 1573 // instrumented and non-instrumented libraries. 1574 auto *GA = GlobalAlias::create(GlobalValue::InternalLinkage, 1575 NameForGlobal + M.getName(), NewGlobal); 1576 1577 // With local aliases, we need to provide another externally visible 1578 // symbol __odr_asan_XXX to detect ODR violation. 1579 auto *ODRIndicatorSym = 1580 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 1581 Constant::getNullValue(IRB.getInt8Ty()), 1582 kODRGenPrefix + NameForGlobal, nullptr, 1583 NewGlobal->getThreadLocalMode()); 1584 1585 // Set meaningful attributes for indicator symbol. 1586 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 1587 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 1588 ODRIndicatorSym->setAlignment(1); 1589 ODRIndicator = ODRIndicatorSym; 1590 InstrumentedGlobal = GA; 1591 } 1592 1593 Initializers[i] = ConstantStruct::get( 1594 GlobalStructTy, 1595 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 1596 ConstantInt::get(IntptrTy, SizeInBytes), 1597 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 1598 ConstantExpr::getPointerCast(Name, IntptrTy), 1599 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 1600 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, 1601 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy), nullptr); 1602 1603 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; 1604 1605 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 1606 } 1607 1608 1609 GlobalVariable *AllGlobals = nullptr; 1610 GlobalVariable *RegisteredFlag = nullptr; 1611 1612 // On recent Mach-O platforms, we emit the global metadata in a way that 1613 // allows the linker to properly strip dead globals. 1614 if (ShouldUseMachOGlobalsSection()) { 1615 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 1616 // to look up the loaded image that contains it. Second, we can store in it 1617 // whether registration has already occurred, to prevent duplicate 1618 // registration. 1619 // 1620 // Common linkage allows us to coalesce needles defined in each object 1621 // file so that there's only one per shared library. 1622 RegisteredFlag = new GlobalVariable( 1623 M, IntptrTy, false, GlobalVariable::CommonLinkage, 1624 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 1625 1626 // We also emit a structure which binds the liveness of the global 1627 // variable to the metadata struct. 1628 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy, nullptr); 1629 1630 // Keep the list of "Liveness" GV created to be added to llvm.compiler.used 1631 SmallVector<Constant *, 16> LivenessGlobals; 1632 LivenessGlobals.reserve(n); 1633 1634 for (size_t i = 0; i < n; i++) { 1635 GlobalVariable *Metadata = new GlobalVariable( 1636 M, GlobalStructTy, false, GlobalVariable::InternalLinkage, 1637 Initializers[i], ""); 1638 Metadata->setSection("__DATA,__asan_globals,regular"); 1639 Metadata->setAlignment(1); // don't leave padding in between 1640 1641 auto LivenessBinder = ConstantStruct::get(LivenessTy, 1642 Initializers[i]->getAggregateElement(0u), 1643 ConstantExpr::getPointerCast(Metadata, IntptrTy), 1644 nullptr); 1645 1646 // Recover the name of the variable this global is pointing to 1647 StringRef GVName = 1648 Initializers[i]->getAggregateElement(0u)->getOperand(0)->getName(); 1649 1650 GlobalVariable *Liveness = new GlobalVariable( 1651 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 1652 Twine("__asan_binder_") + GVName); 1653 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 1654 LivenessGlobals.push_back( 1655 ConstantExpr::getBitCast(Liveness, IRB.getInt8PtrTy())); 1656 } 1657 1658 if (!LivenessGlobals.empty()) { 1659 // Update llvm.compiler.used, adding the new liveness globals. This is 1660 // needed so that during LTO these variables stay alive. The alternative 1661 // would be to have the linker handling the LTO symbols, but libLTO 1662 // current 1663 // API does not expose access to the section for each symbol. 1664 if (GlobalVariable *LLVMUsed = 1665 M.getGlobalVariable("llvm.compiler.used")) { 1666 ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); 1667 for (auto &V : Inits->operands()) 1668 LivenessGlobals.push_back(cast<Constant>(&V)); 1669 LLVMUsed->eraseFromParent(); 1670 } 1671 llvm::ArrayType *ATy = 1672 llvm::ArrayType::get(IRB.getInt8PtrTy(), LivenessGlobals.size()); 1673 auto *LLVMUsed = new llvm::GlobalVariable( 1674 M, ATy, false, llvm::GlobalValue::AppendingLinkage, 1675 llvm::ConstantArray::get(ATy, LivenessGlobals), "llvm.compiler.used"); 1676 LLVMUsed->setSection("llvm.metadata"); 1677 } 1678 } else { 1679 // On all other platfoms, we just emit an array of global metadata 1680 // structures. 1681 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); 1682 AllGlobals = new GlobalVariable( 1683 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 1684 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); 1685 } 1686 1687 // Create calls for poisoning before initializers run and unpoisoning after. 1688 if (HasDynamicallyInitializedGlobals) 1689 createInitializerPoisonCalls(M, ModuleName); 1690 1691 // Create a call to register the globals with the runtime. 1692 if (ShouldUseMachOGlobalsSection()) { 1693 IRB.CreateCall(AsanRegisterImageGlobals, 1694 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 1695 } else { 1696 IRB.CreateCall(AsanRegisterGlobals, 1697 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 1698 ConstantInt::get(IntptrTy, n)}); 1699 } 1700 1701 // We also need to unregister globals at the end, e.g., when a shared library 1702 // gets closed. 1703 Function *AsanDtorFunction = 1704 Function::Create(FunctionType::get(Type::getVoidTy(*C), false), 1705 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); 1706 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 1707 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); 1708 1709 if (ShouldUseMachOGlobalsSection()) { 1710 IRB_Dtor.CreateCall(AsanUnregisterImageGlobals, 1711 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 1712 } else { 1713 IRB_Dtor.CreateCall(AsanUnregisterGlobals, 1714 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 1715 ConstantInt::get(IntptrTy, n)}); 1716 } 1717 1718 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority); 1719 1720 DEBUG(dbgs() << M); 1721 return true; 1722 } 1723 1724 bool AddressSanitizerModule::runOnModule(Module &M) { 1725 C = &(M.getContext()); 1726 int LongSize = M.getDataLayout().getPointerSizeInBits(); 1727 IntptrTy = Type::getIntNTy(*C, LongSize); 1728 TargetTriple = Triple(M.getTargetTriple()); 1729 Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); 1730 initializeCallbacks(M); 1731 1732 bool Changed = false; 1733 1734 // TODO(glider): temporarily disabled globals instrumentation for KASan. 1735 if (ClGlobals && !CompileKernel) { 1736 Function *CtorFunc = M.getFunction(kAsanModuleCtorName); 1737 assert(CtorFunc); 1738 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); 1739 Changed |= InstrumentGlobals(IRB, M); 1740 } 1741 1742 return Changed; 1743 } 1744 1745 void AddressSanitizer::initializeCallbacks(Module &M) { 1746 IRBuilder<> IRB(*C); 1747 // Create __asan_report* callbacks. 1748 // IsWrite, TypeSize and Exp are encoded in the function name. 1749 for (int Exp = 0; Exp < 2; Exp++) { 1750 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 1751 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 1752 const std::string ExpStr = Exp ? "exp_" : ""; 1753 const std::string SuffixStr = CompileKernel ? "N" : "_n"; 1754 const std::string EndingStr = Recover ? "_noabort" : ""; 1755 Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr; 1756 AsanErrorCallbackSized[AccessIsWrite][Exp] = 1757 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1758 kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr + EndingStr, 1759 IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); 1760 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = 1761 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1762 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 1763 IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); 1764 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 1765 AccessSizeIndex++) { 1766 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 1767 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 1768 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1769 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 1770 IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); 1771 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 1772 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1773 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 1774 IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); 1775 } 1776 } 1777 } 1778 1779 const std::string MemIntrinCallbackPrefix = 1780 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 1781 AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1782 MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(), 1783 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); 1784 AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1785 MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), 1786 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); 1787 AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1788 MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(), 1789 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr)); 1790 1791 AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction( 1792 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr)); 1793 1794 AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1795 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1796 AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 1797 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1798 // We insert an empty inline asm after __asan_report* to avoid callback merge. 1799 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 1800 StringRef(""), StringRef(""), 1801 /*hasSideEffects=*/true); 1802 } 1803 1804 // virtual 1805 bool AddressSanitizer::doInitialization(Module &M) { 1806 // Initialize the private fields. No one has accessed them before. 1807 1808 GlobalsMD.init(M); 1809 1810 C = &(M.getContext()); 1811 LongSize = M.getDataLayout().getPointerSizeInBits(); 1812 IntptrTy = Type::getIntNTy(*C, LongSize); 1813 TargetTriple = Triple(M.getTargetTriple()); 1814 1815 if (!CompileKernel) { 1816 std::tie(AsanCtorFunction, AsanInitFunction) = 1817 createSanitizerCtorAndInitFunctions( 1818 M, kAsanModuleCtorName, kAsanInitName, 1819 /*InitArgTypes=*/{}, /*InitArgs=*/{}, kAsanVersionCheckName); 1820 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); 1821 } 1822 Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); 1823 return true; 1824 } 1825 1826 bool AddressSanitizer::doFinalization(Module &M) { 1827 GlobalsMD.reset(); 1828 return false; 1829 } 1830 1831 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 1832 // For each NSObject descendant having a +load method, this method is invoked 1833 // by the ObjC runtime before any of the static constructors is called. 1834 // Therefore we need to instrument such methods with a call to __asan_init 1835 // at the beginning in order to initialize our runtime before any access to 1836 // the shadow memory. 1837 // We cannot just ignore these methods, because they may call other 1838 // instrumented functions. 1839 if (F.getName().find(" load]") != std::string::npos) { 1840 IRBuilder<> IRB(&F.front(), F.front().begin()); 1841 IRB.CreateCall(AsanInitFunction, {}); 1842 return true; 1843 } 1844 return false; 1845 } 1846 1847 void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 1848 // Generate code only when dynamic addressing is needed. 1849 if (Mapping.Offset != kDynamicShadowSentinel) 1850 return; 1851 1852 IRBuilder<> IRB(&F.front().front()); 1853 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 1854 kAsanShadowMemoryDynamicAddress, IntptrTy); 1855 LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress); 1856 } 1857 1858 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 1859 // Find the one possible call to llvm.localescape and pre-mark allocas passed 1860 // to it as uninteresting. This assumes we haven't started processing allocas 1861 // yet. This check is done up front because iterating the use list in 1862 // isInterestingAlloca would be algorithmically slower. 1863 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 1864 1865 // Try to get the declaration of llvm.localescape. If it's not in the module, 1866 // we can exit early. 1867 if (!F.getParent()->getFunction("llvm.localescape")) return; 1868 1869 // Look for a call to llvm.localescape call in the entry block. It can't be in 1870 // any other block. 1871 for (Instruction &I : F.getEntryBlock()) { 1872 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 1873 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 1874 // We found a call. Mark all the allocas passed in as uninteresting. 1875 for (Value *Arg : II->arg_operands()) { 1876 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 1877 assert(AI && AI->isStaticAlloca() && 1878 "non-static alloca arg to localescape"); 1879 ProcessedAllocas[AI] = false; 1880 } 1881 break; 1882 } 1883 } 1884 } 1885 1886 bool AddressSanitizer::runOnFunction(Function &F) { 1887 if (&F == AsanCtorFunction) return false; 1888 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 1889 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 1890 if (F.getName().startswith("__asan_")) return false; 1891 1892 bool FunctionModified = false; 1893 1894 // If needed, insert __asan_init before checking for SanitizeAddress attr. 1895 // This function needs to be called even if the function body is not 1896 // instrumented. 1897 if (maybeInsertAsanInitAtFunctionEntry(F)) 1898 FunctionModified = true; 1899 1900 // Leave if the function doesn't need instrumentation. 1901 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 1902 1903 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 1904 1905 initializeCallbacks(*F.getParent()); 1906 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1907 1908 FunctionStateRAII CleanupObj(this); 1909 1910 maybeInsertDynamicShadowAtFunctionEntry(F); 1911 1912 // We can't instrument allocas used with llvm.localescape. Only static allocas 1913 // can be passed to that intrinsic. 1914 markEscapedLocalAllocas(F); 1915 1916 // We want to instrument every address only once per basic block (unless there 1917 // are calls between uses). 1918 SmallSet<Value *, 16> TempsToInstrument; 1919 SmallVector<Instruction *, 16> ToInstrument; 1920 SmallVector<Instruction *, 8> NoReturnCalls; 1921 SmallVector<BasicBlock *, 16> AllBlocks; 1922 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 1923 int NumAllocas = 0; 1924 bool IsWrite; 1925 unsigned Alignment; 1926 uint64_t TypeSize; 1927 const TargetLibraryInfo *TLI = 1928 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1929 1930 // Fill the set of memory operations to instrument. 1931 for (auto &BB : F) { 1932 AllBlocks.push_back(&BB); 1933 TempsToInstrument.clear(); 1934 int NumInsnsPerBB = 0; 1935 for (auto &Inst : BB) { 1936 if (LooksLikeCodeInBug11395(&Inst)) return false; 1937 if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, 1938 &Alignment)) { 1939 if (ClOpt && ClOptSameTemp) { 1940 if (!TempsToInstrument.insert(Addr).second) 1941 continue; // We've seen this temp in the current BB. 1942 } 1943 } else if (ClInvalidPointerPairs && 1944 isInterestingPointerComparisonOrSubtraction(&Inst)) { 1945 PointerComparisonsOrSubtracts.push_back(&Inst); 1946 continue; 1947 } else if (isa<MemIntrinsic>(Inst)) { 1948 // ok, take it. 1949 } else { 1950 if (isa<AllocaInst>(Inst)) NumAllocas++; 1951 CallSite CS(&Inst); 1952 if (CS) { 1953 // A call inside BB. 1954 TempsToInstrument.clear(); 1955 if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction()); 1956 } 1957 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 1958 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 1959 continue; 1960 } 1961 ToInstrument.push_back(&Inst); 1962 NumInsnsPerBB++; 1963 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 1964 } 1965 } 1966 1967 bool UseCalls = 1968 CompileKernel || 1969 (ClInstrumentationWithCallsThreshold >= 0 && 1970 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold); 1971 const DataLayout &DL = F.getParent()->getDataLayout(); 1972 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), 1973 /*RoundToAlign=*/true); 1974 1975 // Instrument. 1976 int NumInstrumented = 0; 1977 for (auto Inst : ToInstrument) { 1978 if (ClDebugMin < 0 || ClDebugMax < 0 || 1979 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { 1980 if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) 1981 instrumentMop(ObjSizeVis, Inst, UseCalls, 1982 F.getParent()->getDataLayout()); 1983 else 1984 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 1985 } 1986 NumInstrumented++; 1987 } 1988 1989 FunctionStackPoisoner FSP(F, *this); 1990 bool ChangedStack = FSP.runOnFunction(); 1991 1992 // We must unpoison the stack before every NoReturn call (throw, _exit, etc). 1993 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 1994 for (auto CI : NoReturnCalls) { 1995 IRBuilder<> IRB(CI); 1996 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 1997 } 1998 1999 for (auto Inst : PointerComparisonsOrSubtracts) { 2000 instrumentPointerComparisonOrSubtraction(Inst); 2001 NumInstrumented++; 2002 } 2003 2004 if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty()) 2005 FunctionModified = true; 2006 2007 DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2008 << F << "\n"); 2009 2010 return FunctionModified; 2011 } 2012 2013 // Workaround for bug 11395: we don't want to instrument stack in functions 2014 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2015 // FIXME: remove once the bug 11395 is fixed. 2016 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2017 if (LongSize != 32) return false; 2018 CallInst *CI = dyn_cast<CallInst>(I); 2019 if (!CI || !CI->isInlineAsm()) return false; 2020 if (CI->getNumArgOperands() <= 5) return false; 2021 // We have inline assembly with quite a few arguments. 2022 return true; 2023 } 2024 2025 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2026 IRBuilder<> IRB(*C); 2027 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { 2028 std::string Suffix = itostr(i); 2029 AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction( 2030 M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy, 2031 IntptrTy, nullptr)); 2032 AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction( 2033 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2034 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 2035 } 2036 if (ASan.UseAfterScope) { 2037 AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction( 2038 M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(), 2039 IntptrTy, IntptrTy, nullptr)); 2040 AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction( 2041 M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), 2042 IntptrTy, IntptrTy, nullptr)); 2043 } 2044 2045 if (ClExperimentalPoisoning) { 2046 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2047 std::ostringstream Name; 2048 Name << kAsanSetShadowPrefix; 2049 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2050 AsanSetShadowFunc[Val] = 2051 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 2052 Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 2053 } 2054 } 2055 2056 AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 2057 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 2058 AsanAllocasUnpoisonFunc = 2059 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 2060 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 2061 } 2062 2063 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2064 ArrayRef<uint8_t> ShadowBytes, 2065 size_t Begin, size_t End, 2066 IRBuilder<> &IRB, 2067 Value *ShadowBase) { 2068 if (Begin >= End) 2069 return; 2070 2071 const size_t LargestStoreSizeInBytes = 2072 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 2073 2074 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 2075 2076 // Poison given range in shadow using larges store size with out leading and 2077 // trailing zeros in ShadowMask. Zeros never change, so they need neither 2078 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 2079 // middle of a store. 2080 for (size_t i = Begin; i < End;) { 2081 if (!ShadowMask[i]) { 2082 assert(!ShadowBytes[i]); 2083 ++i; 2084 continue; 2085 } 2086 2087 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 2088 // Fit store size into the range. 2089 while (StoreSizeInBytes > End - i) 2090 StoreSizeInBytes /= 2; 2091 2092 // Minimize store size by trimming trailing zeros. 2093 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 2094 while (j <= StoreSizeInBytes / 2) 2095 StoreSizeInBytes /= 2; 2096 } 2097 2098 uint64_t Val = 0; 2099 for (size_t j = 0; j < StoreSizeInBytes; j++) { 2100 if (IsLittleEndian) 2101 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 2102 else 2103 Val = (Val << 8) | ShadowBytes[i + j]; 2104 } 2105 2106 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 2107 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 2108 IRB.CreateAlignedStore( 2109 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1); 2110 2111 i += StoreSizeInBytes; 2112 } 2113 } 2114 2115 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2116 ArrayRef<uint8_t> ShadowBytes, 2117 IRBuilder<> &IRB, Value *ShadowBase) { 2118 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 2119 } 2120 2121 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2122 ArrayRef<uint8_t> ShadowBytes, 2123 size_t Begin, size_t End, 2124 IRBuilder<> &IRB, Value *ShadowBase) { 2125 assert(ShadowMask.size() == ShadowBytes.size()); 2126 size_t Done = Begin; 2127 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 2128 if (!ShadowMask[i]) { 2129 assert(!ShadowBytes[i]); 2130 continue; 2131 } 2132 uint8_t Val = ShadowBytes[i]; 2133 if (!AsanSetShadowFunc[Val]) 2134 continue; 2135 2136 // Skip same values. 2137 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 2138 } 2139 2140 if (j - i >= ClMaxInlinePoisoningSize) { 2141 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 2142 IRB.CreateCall(AsanSetShadowFunc[Val], 2143 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 2144 ConstantInt::get(IntptrTy, j - i)}); 2145 Done = j; 2146 } 2147 } 2148 2149 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 2150 } 2151 2152 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 2153 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 2154 static int StackMallocSizeClass(uint64_t LocalStackSize) { 2155 assert(LocalStackSize <= kMaxStackMallocSize); 2156 uint64_t MaxSize = kMinStackMallocSize; 2157 for (int i = 0;; i++, MaxSize *= 2) 2158 if (LocalStackSize <= MaxSize) return i; 2159 llvm_unreachable("impossible LocalStackSize"); 2160 } 2161 2162 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 2163 Value *ValueIfTrue, 2164 Instruction *ThenTerm, 2165 Value *ValueIfFalse) { 2166 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 2167 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 2168 PHI->addIncoming(ValueIfFalse, CondBlock); 2169 BasicBlock *ThenBlock = ThenTerm->getParent(); 2170 PHI->addIncoming(ValueIfTrue, ThenBlock); 2171 return PHI; 2172 } 2173 2174 Value *FunctionStackPoisoner::createAllocaForLayout( 2175 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 2176 AllocaInst *Alloca; 2177 if (Dynamic) { 2178 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 2179 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 2180 "MyAlloca"); 2181 } else { 2182 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 2183 nullptr, "MyAlloca"); 2184 assert(Alloca->isStaticAlloca()); 2185 } 2186 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 2187 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 2188 Alloca->setAlignment(FrameAlignment); 2189 return IRB.CreatePointerCast(Alloca, IntptrTy); 2190 } 2191 2192 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 2193 BasicBlock &FirstBB = *F.begin(); 2194 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 2195 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 2196 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 2197 DynamicAllocaLayout->setAlignment(32); 2198 } 2199 2200 void FunctionStackPoisoner::processDynamicAllocas() { 2201 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 2202 assert(DynamicAllocaPoisonCallVec.empty()); 2203 return; 2204 } 2205 2206 // Insert poison calls for lifetime intrinsics for dynamic allocas. 2207 for (const auto &APC : DynamicAllocaPoisonCallVec) { 2208 assert(APC.InsBefore); 2209 assert(APC.AI); 2210 assert(ASan.isInterestingAlloca(*APC.AI)); 2211 assert(!APC.AI->isStaticAlloca()); 2212 2213 IRBuilder<> IRB(APC.InsBefore); 2214 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 2215 // Dynamic allocas will be unpoisoned unconditionally below in 2216 // unpoisonDynamicAllocas. 2217 // Flag that we need unpoison static allocas. 2218 } 2219 2220 // Handle dynamic allocas. 2221 createDynamicAllocasInitStorage(); 2222 for (auto &AI : DynamicAllocaVec) 2223 handleDynamicAllocaCall(AI); 2224 unpoisonDynamicAllocas(); 2225 } 2226 2227 void FunctionStackPoisoner::processStaticAllocas() { 2228 if (AllocaVec.empty()) { 2229 assert(StaticAllocaPoisonCallVec.empty()); 2230 return; 2231 } 2232 2233 int StackMallocIdx = -1; 2234 DebugLoc EntryDebugLocation; 2235 if (auto SP = F.getSubprogram()) 2236 EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP); 2237 2238 Instruction *InsBefore = AllocaVec[0]; 2239 IRBuilder<> IRB(InsBefore); 2240 IRB.SetCurrentDebugLocation(EntryDebugLocation); 2241 2242 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 2243 // debug info is broken, because only entry-block allocas are treated as 2244 // regular stack slots. 2245 auto InsBeforeB = InsBefore->getParent(); 2246 assert(InsBeforeB == &F.getEntryBlock()); 2247 for (BasicBlock::iterator I(InsBefore); I != InsBeforeB->end(); ++I) 2248 if (auto *AI = dyn_cast<AllocaInst>(I)) 2249 if (NonInstrumentedStaticAllocaVec.count(AI) > 0) 2250 AI->moveBefore(InsBefore); 2251 2252 // If we have a call to llvm.localescape, keep it in the entry block. 2253 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 2254 2255 // Find static allocas with lifetime analysis. 2256 DenseMap<const AllocaInst *, const ASanStackVariableDescription *> 2257 AllocaToSVDMap; 2258 for (const auto &APC : StaticAllocaPoisonCallVec) { 2259 assert(APC.InsBefore); 2260 assert(APC.AI); 2261 assert(ASan.isInterestingAlloca(*APC.AI)); 2262 assert(APC.AI->isStaticAlloca()); 2263 2264 if (ClExperimentalPoisoning) { 2265 AllocaToSVDMap[APC.AI] = nullptr; 2266 } else { 2267 IRBuilder<> IRB(APC.InsBefore); 2268 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 2269 } 2270 } 2271 2272 SmallVector<ASanStackVariableDescription, 16> SVD; 2273 SVD.reserve(AllocaVec.size()); 2274 for (AllocaInst *AI : AllocaVec) { 2275 size_t UseAfterScopePoisonSize = 2276 AllocaToSVDMap.find(AI) != AllocaToSVDMap.end() 2277 ? ASan.getAllocaSizeInBytes(*AI) 2278 : 0; 2279 ASanStackVariableDescription D = {AI->getName().data(), 2280 ASan.getAllocaSizeInBytes(*AI), 2281 UseAfterScopePoisonSize, 2282 AI->getAlignment(), 2283 AI, 2284 0}; 2285 SVD.push_back(D); 2286 } 2287 // Minimal header size (left redzone) is 4 pointers, 2288 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 2289 size_t MinHeaderSize = ASan.LongSize / 2; 2290 const ASanStackFrameLayout &L = 2291 ComputeASanStackFrameLayout(SVD, 1ULL << Mapping.Scale, MinHeaderSize); 2292 2293 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); 2294 uint64_t LocalStackSize = L.FrameSize; 2295 bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && 2296 LocalStackSize <= kMaxStackMallocSize; 2297 bool DoDynamicAlloca = ClDynamicAllocaStack; 2298 // Don't do dynamic alloca or stack malloc if: 2299 // 1) There is inline asm: too often it makes assumptions on which registers 2300 // are available. 2301 // 2) There is a returns_twice call (typically setjmp), which is 2302 // optimization-hostile, and doesn't play well with introduced indirect 2303 // register-relative calculation of local variable addresses. 2304 DoDynamicAlloca &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall; 2305 DoStackMalloc &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall; 2306 2307 Value *StaticAlloca = 2308 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 2309 2310 Value *FakeStack; 2311 Value *LocalStackBase; 2312 2313 if (DoStackMalloc) { 2314 // void *FakeStack = __asan_option_detect_stack_use_after_return 2315 // ? __asan_stack_malloc_N(LocalStackSize) 2316 // : nullptr; 2317 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize); 2318 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 2319 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 2320 Value *UseAfterReturnIsEnabled = 2321 IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUseAfterReturn), 2322 Constant::getNullValue(IRB.getInt32Ty())); 2323 Instruction *Term = 2324 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 2325 IRBuilder<> IRBIf(Term); 2326 IRBIf.SetCurrentDebugLocation(EntryDebugLocation); 2327 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 2328 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 2329 Value *FakeStackValue = 2330 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 2331 ConstantInt::get(IntptrTy, LocalStackSize)); 2332 IRB.SetInsertPoint(InsBefore); 2333 IRB.SetCurrentDebugLocation(EntryDebugLocation); 2334 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 2335 ConstantInt::get(IntptrTy, 0)); 2336 2337 Value *NoFakeStack = 2338 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 2339 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 2340 IRBIf.SetInsertPoint(Term); 2341 IRBIf.SetCurrentDebugLocation(EntryDebugLocation); 2342 Value *AllocaValue = 2343 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 2344 IRB.SetInsertPoint(InsBefore); 2345 IRB.SetCurrentDebugLocation(EntryDebugLocation); 2346 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 2347 } else { 2348 // void *FakeStack = nullptr; 2349 // void *LocalStackBase = alloca(LocalStackSize); 2350 FakeStack = ConstantInt::get(IntptrTy, 0); 2351 LocalStackBase = 2352 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 2353 } 2354 2355 // Replace Alloca instructions with base+offset. 2356 for (const auto &Desc : SVD) { 2357 AllocaInst *AI = Desc.AI; 2358 Value *NewAllocaPtr = IRB.CreateIntToPtr( 2359 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 2360 AI->getType()); 2361 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true); 2362 AI->replaceAllUsesWith(NewAllocaPtr); 2363 } 2364 2365 // The left-most redzone has enough space for at least 4 pointers. 2366 // Write the Magic value to redzone[0]. 2367 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 2368 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 2369 BasePlus0); 2370 // Write the frame description constant to redzone[1]. 2371 Value *BasePlus1 = IRB.CreateIntToPtr( 2372 IRB.CreateAdd(LocalStackBase, 2373 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 2374 IntptrPtrTy); 2375 GlobalVariable *StackDescriptionGlobal = 2376 createPrivateGlobalForString(*F.getParent(), L.DescriptionString, 2377 /*AllowMerging*/ true); 2378 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 2379 IRB.CreateStore(Description, BasePlus1); 2380 // Write the PC to redzone[2]. 2381 Value *BasePlus2 = IRB.CreateIntToPtr( 2382 IRB.CreateAdd(LocalStackBase, 2383 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 2384 IntptrPtrTy); 2385 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 2386 2387 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 2388 2389 // Poison the stack red zones at the entry. 2390 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 2391 // As mask we must use most poisoned case: red zones and after scope. 2392 // As bytes we can use either the same or just red zones only. 2393 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 2394 2395 if (ClExperimentalPoisoning && !StaticAllocaPoisonCallVec.empty()) { 2396 // Complete AllocaToSVDMap 2397 for (const auto &Desc : SVD) { 2398 auto It = AllocaToSVDMap.find(Desc.AI); 2399 if (It != AllocaToSVDMap.end()) { 2400 It->second = &Desc; 2401 } 2402 } 2403 2404 const auto &ShadowInScope = GetShadowBytes(SVD, L); 2405 2406 // Poison static allocas near lifetime intrinsics. 2407 for (const auto &APC : StaticAllocaPoisonCallVec) { 2408 // Must be already set. 2409 assert(AllocaToSVDMap[APC.AI]); 2410 const auto &Desc = *AllocaToSVDMap[APC.AI]; 2411 assert(Desc.Offset % L.Granularity == 0); 2412 size_t Begin = Desc.Offset / L.Granularity; 2413 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 2414 2415 IRBuilder<> IRB(APC.InsBefore); 2416 copyToShadow(ShadowAfterScope, 2417 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 2418 IRB, ShadowBase); 2419 } 2420 } 2421 2422 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 2423 2424 auto UnpoisonStack = [&](IRBuilder<> &IRB) { 2425 // Do this always as poisonAlloca can be disabled with 2426 // detect_stack_use_after_scope=0. 2427 copyToShadow(ShadowAfterScope, ShadowClean, IRB, ShadowBase); 2428 if (!ClExperimentalPoisoning && !StaticAllocaPoisonCallVec.empty()) { 2429 // If we poisoned some allocas in llvm.lifetime analysis, 2430 // unpoison whole stack frame now. 2431 poisonAlloca(LocalStackBase, LocalStackSize, IRB, false); 2432 } 2433 }; 2434 2435 SmallVector<uint8_t, 64> ShadowAfterReturn; 2436 2437 // (Un)poison the stack before all ret instructions. 2438 for (auto Ret : RetVec) { 2439 IRBuilder<> IRBRet(Ret); 2440 // Mark the current frame as retired. 2441 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 2442 BasePlus0); 2443 if (DoStackMalloc) { 2444 assert(StackMallocIdx >= 0); 2445 // if FakeStack != 0 // LocalStackBase == FakeStack 2446 // // In use-after-return mode, poison the whole stack frame. 2447 // if StackMallocIdx <= 4 2448 // // For small sizes inline the whole thing: 2449 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 2450 // **SavedFlagPtr(FakeStack) = 0 2451 // else 2452 // __asan_stack_free_N(FakeStack, LocalStackSize) 2453 // else 2454 // <This is not a fake stack; unpoison the redzones> 2455 Value *Cmp = 2456 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 2457 TerminatorInst *ThenTerm, *ElseTerm; 2458 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 2459 2460 IRBuilder<> IRBPoison(ThenTerm); 2461 if (StackMallocIdx <= 4) { 2462 int ClassSize = kMinStackMallocSize << StackMallocIdx; 2463 ShadowAfterReturn.resize(ClassSize / L.Granularity, 2464 kAsanStackUseAfterReturnMagic); 2465 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 2466 ShadowBase); 2467 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 2468 FakeStack, 2469 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 2470 Value *SavedFlagPtr = IRBPoison.CreateLoad( 2471 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 2472 IRBPoison.CreateStore( 2473 Constant::getNullValue(IRBPoison.getInt8Ty()), 2474 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 2475 } else { 2476 // For larger frames call __asan_stack_free_*. 2477 IRBPoison.CreateCall( 2478 AsanStackFreeFunc[StackMallocIdx], 2479 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 2480 } 2481 2482 IRBuilder<> IRBElse(ElseTerm); 2483 UnpoisonStack(IRBElse); 2484 } else { 2485 UnpoisonStack(IRBRet); 2486 } 2487 } 2488 2489 // We are done. Remove the old unused alloca instructions. 2490 for (auto AI : AllocaVec) AI->eraseFromParent(); 2491 } 2492 2493 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 2494 IRBuilder<> &IRB, bool DoPoison) { 2495 // For now just insert the call to ASan runtime. 2496 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 2497 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 2498 IRB.CreateCall( 2499 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 2500 {AddrArg, SizeArg}); 2501 } 2502 2503 // Handling llvm.lifetime intrinsics for a given %alloca: 2504 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 2505 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 2506 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 2507 // could be poisoned by previous llvm.lifetime.end instruction, as the 2508 // variable may go in and out of scope several times, e.g. in loops). 2509 // (3) if we poisoned at least one %alloca in a function, 2510 // unpoison the whole stack frame at function exit. 2511 2512 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { 2513 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2514 // We're interested only in allocas we can handle. 2515 return ASan.isInterestingAlloca(*AI) ? AI : nullptr; 2516 // See if we've already calculated (or started to calculate) alloca for a 2517 // given value. 2518 AllocaForValueMapTy::iterator I = AllocaForValue.find(V); 2519 if (I != AllocaForValue.end()) return I->second; 2520 // Store 0 while we're calculating alloca for value V to avoid 2521 // infinite recursion if the value references itself. 2522 AllocaForValue[V] = nullptr; 2523 AllocaInst *Res = nullptr; 2524 if (CastInst *CI = dyn_cast<CastInst>(V)) 2525 Res = findAllocaForValue(CI->getOperand(0)); 2526 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 2527 for (Value *IncValue : PN->incoming_values()) { 2528 // Allow self-referencing phi-nodes. 2529 if (IncValue == PN) continue; 2530 AllocaInst *IncValueAI = findAllocaForValue(IncValue); 2531 // AI for incoming values should exist and should all be equal. 2532 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) 2533 return nullptr; 2534 Res = IncValueAI; 2535 } 2536 } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) { 2537 Res = findAllocaForValue(EP->getPointerOperand()); 2538 } else { 2539 DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V << "\n"); 2540 } 2541 if (Res) AllocaForValue[V] = Res; 2542 return Res; 2543 } 2544 2545 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 2546 IRBuilder<> IRB(AI); 2547 2548 const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment()); 2549 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 2550 2551 Value *Zero = Constant::getNullValue(IntptrTy); 2552 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 2553 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 2554 2555 // Since we need to extend alloca with additional memory to locate 2556 // redzones, and OldSize is number of allocated blocks with 2557 // ElementSize size, get allocated memory size in bytes by 2558 // OldSize * ElementSize. 2559 const unsigned ElementSize = 2560 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 2561 Value *OldSize = 2562 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 2563 ConstantInt::get(IntptrTy, ElementSize)); 2564 2565 // PartialSize = OldSize % 32 2566 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 2567 2568 // Misalign = kAllocaRzSize - PartialSize; 2569 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 2570 2571 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 2572 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 2573 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 2574 2575 // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize 2576 // Align is added to locate left redzone, PartialPadding for possible 2577 // partial redzone and kAllocaRzSize for right redzone respectively. 2578 Value *AdditionalChunkSize = IRB.CreateAdd( 2579 ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding); 2580 2581 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 2582 2583 // Insert new alloca with new NewSize and Align params. 2584 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 2585 NewAlloca->setAlignment(Align); 2586 2587 // NewAddress = Address + Align 2588 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 2589 ConstantInt::get(IntptrTy, Align)); 2590 2591 // Insert __asan_alloca_poison call for new created alloca. 2592 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 2593 2594 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 2595 // for unpoisoning stuff. 2596 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 2597 2598 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 2599 2600 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 2601 AI->replaceAllUsesWith(NewAddressPtr); 2602 2603 // We are done. Erase old alloca from parent. 2604 AI->eraseFromParent(); 2605 } 2606 2607 // isSafeAccess returns true if Addr is always inbounds with respect to its 2608 // base object. For example, it is a field access or an array access with 2609 // constant inbounds index. 2610 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 2611 Value *Addr, uint64_t TypeSize) const { 2612 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 2613 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 2614 uint64_t Size = SizeOffset.first.getZExtValue(); 2615 int64_t Offset = SizeOffset.second.getSExtValue(); 2616 // Three checks are required to ensure safety: 2617 // . Offset >= 0 (since the offset is given from the base ptr) 2618 // . Size >= Offset (unsigned) 2619 // . Size - Offset >= NeededSize (unsigned) 2620 return Offset >= 0 && Size >= uint64_t(Offset) && 2621 Size - uint64_t(Offset) >= TypeSize / 8; 2622 } 2623