1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // Details of the algorithm: 12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Instrumentation.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/DepthFirstIterator.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/SmallString.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/IR/CallSite.h" 27 #include "llvm/IR/DIBuilder.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/InstVisitor.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/IR/MDBuilder.h" 36 #include "llvm/IR/Module.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/DataTypes.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/Endian.h" 42 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/Cloning.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 #include "llvm/Transforms/Utils/ModuleUtils.h" 47 #include "llvm/Transforms/Utils/SpecialCaseList.h" 48 #include <algorithm> 49 #include <string> 50 #include <system_error> 51 52 using namespace llvm; 53 54 #define DEBUG_TYPE "asan" 55 56 static const uint64_t kDefaultShadowScale = 3; 57 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 58 static const uint64_t kIOSShadowOffset32 = 1ULL << 30; 59 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 60 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. 61 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; 62 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000; 63 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 64 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 65 66 static const size_t kMinStackMallocSize = 1 << 6; // 64B 67 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 68 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 69 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 70 71 static const char *const kAsanModuleCtorName = "asan.module_ctor"; 72 static const char *const kAsanModuleDtorName = "asan.module_dtor"; 73 static const int kAsanCtorAndDtorPriority = 1; 74 static const char *const kAsanReportErrorTemplate = "__asan_report_"; 75 static const char *const kAsanReportLoadN = "__asan_report_load_n"; 76 static const char *const kAsanReportStoreN = "__asan_report_store_n"; 77 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; 78 static const char *const kAsanUnregisterGlobalsName = 79 "__asan_unregister_globals"; 80 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; 81 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; 82 static const char *const kAsanInitName = "__asan_init_v3"; 83 static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init"; 84 static const char *const kAsanCovName = "__sanitizer_cov"; 85 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; 86 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; 87 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; 88 static const int kMaxAsanStackMallocSizeClass = 10; 89 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; 90 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; 91 static const char *const kAsanGenPrefix = "__asan_gen_"; 92 static const char *const kAsanPoisonStackMemoryName = 93 "__asan_poison_stack_memory"; 94 static const char *const kAsanUnpoisonStackMemoryName = 95 "__asan_unpoison_stack_memory"; 96 97 static const char *const kAsanOptionDetectUAR = 98 "__asan_option_detect_stack_use_after_return"; 99 100 #ifndef NDEBUG 101 static const int kAsanStackAfterReturnMagic = 0xf5; 102 #endif 103 104 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 105 static const size_t kNumberOfAccessSizes = 5; 106 107 // Command-line flags. 108 109 // This flag may need to be replaced with -f[no-]asan-reads. 110 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 111 cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); 112 static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes", 113 cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); 114 static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics", 115 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), 116 cl::Hidden, cl::init(true)); 117 static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path", 118 cl::desc("use instrumentation with slow path for all accesses"), 119 cl::Hidden, cl::init(false)); 120 // This flag limits the number of instructions to be instrumented 121 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 122 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 123 // set it to 10000. 124 static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", 125 cl::init(10000), 126 cl::desc("maximal number of instructions to instrument in any given BB"), 127 cl::Hidden); 128 // This flag may need to be replaced with -f[no]asan-stack. 129 static cl::opt<bool> ClStack("asan-stack", 130 cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); 131 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", 132 cl::desc("Check return-after-free"), cl::Hidden, cl::init(true)); 133 // This flag may need to be replaced with -f[no]asan-globals. 134 static cl::opt<bool> ClGlobals("asan-globals", 135 cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); 136 static cl::opt<int> ClCoverage("asan-coverage", 137 cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"), 138 cl::Hidden, cl::init(false)); 139 static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold", 140 cl::desc("Add coverage instrumentation only to the entry block if there " 141 "are more than this number of blocks."), 142 cl::Hidden, cl::init(1500)); 143 static cl::opt<bool> ClInitializers("asan-initialization-order", 144 cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true)); 145 static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", 146 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), 147 cl::Hidden, cl::init(false)); 148 static cl::opt<unsigned> ClRealignStack("asan-realign-stack", 149 cl::desc("Realign stack to the value of this flag (power of two)"), 150 cl::Hidden, cl::init(32)); 151 static cl::opt<std::string> ClBlacklistFile("asan-blacklist", 152 cl::desc("File containing the list of objects to ignore " 153 "during instrumentation"), cl::Hidden); 154 static cl::opt<int> ClInstrumentationWithCallsThreshold( 155 "asan-instrumentation-with-call-threshold", 156 cl::desc("If the function being instrumented contains more than " 157 "this number of memory accesses, use callbacks instead of " 158 "inline checks (-1 means never use callbacks)."), 159 cl::Hidden, cl::init(7000)); 160 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 161 "asan-memory-access-callback-prefix", 162 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 163 cl::init("__asan_")); 164 165 // This is an experimental feature that will allow to choose between 166 // instrumented and non-instrumented code at link-time. 167 // If this option is on, just before instrumenting a function we create its 168 // clone; if the function is not changed by asan the clone is deleted. 169 // If we end up with a clone, we put the instrumented function into a section 170 // called "ASAN" and the uninstrumented function into a section called "NOASAN". 171 // 172 // This is still a prototype, we need to figure out a way to keep two copies of 173 // a function so that the linker can easily choose one of them. 174 static cl::opt<bool> ClKeepUninstrumented("asan-keep-uninstrumented-functions", 175 cl::desc("Keep uninstrumented copies of functions"), 176 cl::Hidden, cl::init(false)); 177 178 // These flags allow to change the shadow mapping. 179 // The shadow mapping looks like 180 // Shadow = (Mem >> scale) + (1 << offset_log) 181 static cl::opt<int> ClMappingScale("asan-mapping-scale", 182 cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); 183 184 // Optimization flags. Not user visible, used mostly for testing 185 // and benchmarking the tool. 186 static cl::opt<bool> ClOpt("asan-opt", 187 cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); 188 static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp", 189 cl::desc("Instrument the same temp just once"), cl::Hidden, 190 cl::init(true)); 191 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 192 cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); 193 194 static cl::opt<bool> ClCheckLifetime("asan-check-lifetime", 195 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), 196 cl::Hidden, cl::init(false)); 197 198 // Debug flags. 199 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 200 cl::init(0)); 201 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 202 cl::Hidden, cl::init(0)); 203 static cl::opt<std::string> ClDebugFunc("asan-debug-func", 204 cl::Hidden, cl::desc("Debug func")); 205 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 206 cl::Hidden, cl::init(-1)); 207 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), 208 cl::Hidden, cl::init(-1)); 209 210 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 211 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 212 STATISTIC(NumOptimizedAccessesToGlobalArray, 213 "Number of optimized accesses to global arrays"); 214 STATISTIC(NumOptimizedAccessesToGlobalVar, 215 "Number of optimized accesses to global vars"); 216 217 namespace { 218 /// A set of dynamically initialized globals extracted from metadata. 219 class SetOfDynamicallyInitializedGlobals { 220 public: 221 void Init(Module& M) { 222 // Clang generates metadata identifying all dynamically initialized globals. 223 NamedMDNode *DynamicGlobals = 224 M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); 225 if (!DynamicGlobals) 226 return; 227 for (const auto MDN : DynamicGlobals->operands()) { 228 assert(MDN->getNumOperands() == 1); 229 Value *VG = MDN->getOperand(0); 230 // The optimizer may optimize away a global entirely, in which case we 231 // cannot instrument access to it. 232 if (!VG) 233 continue; 234 DynInitGlobals.insert(cast<GlobalVariable>(VG)); 235 } 236 } 237 bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; } 238 private: 239 SmallSet<GlobalValue*, 32> DynInitGlobals; 240 }; 241 242 /// This struct defines the shadow mapping using the rule: 243 /// shadow = (mem >> Scale) ADD-or-OR Offset. 244 struct ShadowMapping { 245 int Scale; 246 uint64_t Offset; 247 bool OrShadowOffset; 248 }; 249 250 static ShadowMapping getShadowMapping(const Module &M, int LongSize) { 251 llvm::Triple TargetTriple(M.getTargetTriple()); 252 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; 253 bool IsIOS = TargetTriple.getOS() == llvm::Triple::IOS; 254 bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD; 255 bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux; 256 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 || 257 TargetTriple.getArch() == llvm::Triple::ppc64le; 258 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; 259 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips || 260 TargetTriple.getArch() == llvm::Triple::mipsel; 261 262 ShadowMapping Mapping; 263 264 if (LongSize == 32) { 265 if (IsAndroid) 266 Mapping.Offset = 0; 267 else if (IsMIPS32) 268 Mapping.Offset = kMIPS32_ShadowOffset32; 269 else if (IsFreeBSD) 270 Mapping.Offset = kFreeBSD_ShadowOffset32; 271 else if (IsIOS) 272 Mapping.Offset = kIOSShadowOffset32; 273 else 274 Mapping.Offset = kDefaultShadowOffset32; 275 } else { // LongSize == 64 276 if (IsPPC64) 277 Mapping.Offset = kPPC64_ShadowOffset64; 278 else if (IsFreeBSD) 279 Mapping.Offset = kFreeBSD_ShadowOffset64; 280 else if (IsLinux && IsX86_64) 281 Mapping.Offset = kSmallX86_64ShadowOffset; 282 else 283 Mapping.Offset = kDefaultShadowOffset64; 284 } 285 286 Mapping.Scale = kDefaultShadowScale; 287 if (ClMappingScale) { 288 Mapping.Scale = ClMappingScale; 289 } 290 291 // OR-ing shadow offset if more efficient (at least on x86) if the offset 292 // is a power of two, but on ppc64 we have to use add since the shadow 293 // offset is not necessary 1/8-th of the address space. 294 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1)); 295 296 return Mapping; 297 } 298 299 static size_t RedzoneSizeForScale(int MappingScale) { 300 // Redzone used for stack and globals is at least 32 bytes. 301 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 302 return std::max(32U, 1U << MappingScale); 303 } 304 305 /// AddressSanitizer: instrument the code in module to find memory bugs. 306 struct AddressSanitizer : public FunctionPass { 307 AddressSanitizer() : FunctionPass(ID) {} 308 const char *getPassName() const override { 309 return "AddressSanitizerFunctionPass"; 310 } 311 void instrumentMop(Instruction *I, bool UseCalls); 312 void instrumentPointerComparisonOrSubtraction(Instruction *I); 313 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 314 Value *Addr, uint32_t TypeSize, bool IsWrite, 315 Value *SizeArgument, bool UseCalls); 316 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 317 Value *ShadowValue, uint32_t TypeSize); 318 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 319 bool IsWrite, size_t AccessSizeIndex, 320 Value *SizeArgument); 321 void instrumentMemIntrinsic(MemIntrinsic *MI); 322 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 323 bool runOnFunction(Function &F) override; 324 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 325 bool doInitialization(Module &M) override; 326 static char ID; // Pass identification, replacement for typeid 327 328 private: 329 void initializeCallbacks(Module &M); 330 331 bool LooksLikeCodeInBug11395(Instruction *I); 332 bool GlobalIsLinkerInitialized(GlobalVariable *G); 333 bool InjectCoverage(Function &F, const ArrayRef<BasicBlock*> AllBlocks); 334 void InjectCoverageAtBlock(Function &F, BasicBlock &BB); 335 336 LLVMContext *C; 337 const DataLayout *DL; 338 int LongSize; 339 Type *IntptrTy; 340 ShadowMapping Mapping; 341 Function *AsanCtorFunction; 342 Function *AsanInitFunction; 343 Function *AsanHandleNoReturnFunc; 344 Function *AsanCovFunction; 345 Function *AsanPtrCmpFunction, *AsanPtrSubFunction; 346 // This array is indexed by AccessIsWrite and log2(AccessSize). 347 Function *AsanErrorCallback[2][kNumberOfAccessSizes]; 348 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes]; 349 // This array is indexed by AccessIsWrite. 350 Function *AsanErrorCallbackSized[2], 351 *AsanMemoryAccessCallbackSized[2]; 352 Function *AsanMemmove, *AsanMemcpy, *AsanMemset; 353 InlineAsm *EmptyAsm; 354 SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; 355 356 friend struct FunctionStackPoisoner; 357 }; 358 359 class AddressSanitizerModule : public ModulePass { 360 public: 361 AddressSanitizerModule(StringRef BlacklistFile = StringRef()) 362 : ModulePass(ID), BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile 363 : BlacklistFile) {} 364 bool runOnModule(Module &M) override; 365 static char ID; // Pass identification, replacement for typeid 366 const char *getPassName() const override { 367 return "AddressSanitizerModule"; 368 } 369 370 private: 371 void initializeCallbacks(Module &M); 372 373 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M); 374 bool ShouldInstrumentGlobal(GlobalVariable *G); 375 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 376 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 377 size_t MinRedzoneSizeForGlobal() const { 378 return RedzoneSizeForScale(Mapping.Scale); 379 } 380 381 SmallString<64> BlacklistFile; 382 383 std::unique_ptr<SpecialCaseList> BL; 384 SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; 385 Type *IntptrTy; 386 LLVMContext *C; 387 const DataLayout *DL; 388 ShadowMapping Mapping; 389 Function *AsanPoisonGlobals; 390 Function *AsanUnpoisonGlobals; 391 Function *AsanRegisterGlobals; 392 Function *AsanUnregisterGlobals; 393 Function *AsanCovModuleInit; 394 }; 395 396 // Stack poisoning does not play well with exception handling. 397 // When an exception is thrown, we essentially bypass the code 398 // that unpoisones the stack. This is why the run-time library has 399 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 400 // stack in the interceptor. This however does not work inside the 401 // actual function which catches the exception. Most likely because the 402 // compiler hoists the load of the shadow value somewhere too high. 403 // This causes asan to report a non-existing bug on 453.povray. 404 // It sounds like an LLVM bug. 405 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 406 Function &F; 407 AddressSanitizer &ASan; 408 DIBuilder DIB; 409 LLVMContext *C; 410 Type *IntptrTy; 411 Type *IntptrPtrTy; 412 ShadowMapping Mapping; 413 414 SmallVector<AllocaInst*, 16> AllocaVec; 415 SmallVector<Instruction*, 8> RetVec; 416 unsigned StackAlignment; 417 418 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 419 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 420 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; 421 422 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 423 struct AllocaPoisonCall { 424 IntrinsicInst *InsBefore; 425 AllocaInst *AI; 426 uint64_t Size; 427 bool DoPoison; 428 }; 429 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec; 430 431 // Maps Value to an AllocaInst from which the Value is originated. 432 typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy; 433 AllocaForValueMapTy AllocaForValue; 434 435 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 436 : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C), 437 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), 438 Mapping(ASan.Mapping), 439 StackAlignment(1 << Mapping.Scale) {} 440 441 bool runOnFunction() { 442 if (!ClStack) return false; 443 // Collect alloca, ret, lifetime instructions etc. 444 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 445 visit(*BB); 446 447 if (AllocaVec.empty()) return false; 448 449 initializeCallbacks(*F.getParent()); 450 451 poisonStack(); 452 453 if (ClDebugStack) { 454 DEBUG(dbgs() << F); 455 } 456 return true; 457 } 458 459 // Finds all static Alloca instructions and puts 460 // poisoned red zones around all of them. 461 // Then unpoison everything back before the function returns. 462 void poisonStack(); 463 464 // ----------------------- Visitors. 465 /// \brief Collect all Ret instructions. 466 void visitReturnInst(ReturnInst &RI) { 467 RetVec.push_back(&RI); 468 } 469 470 /// \brief Collect Alloca instructions we want (and can) handle. 471 void visitAllocaInst(AllocaInst &AI) { 472 if (!isInterestingAlloca(AI)) return; 473 474 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 475 AllocaVec.push_back(&AI); 476 } 477 478 /// \brief Collect lifetime intrinsic calls to check for use-after-scope 479 /// errors. 480 void visitIntrinsicInst(IntrinsicInst &II) { 481 if (!ClCheckLifetime) return; 482 Intrinsic::ID ID = II.getIntrinsicID(); 483 if (ID != Intrinsic::lifetime_start && 484 ID != Intrinsic::lifetime_end) 485 return; 486 // Found lifetime intrinsic, add ASan instrumentation if necessary. 487 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); 488 // If size argument is undefined, don't do anything. 489 if (Size->isMinusOne()) return; 490 // Check that size doesn't saturate uint64_t and can 491 // be stored in IntptrTy. 492 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 493 if (SizeValue == ~0ULL || 494 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 495 return; 496 // Find alloca instruction that corresponds to llvm.lifetime argument. 497 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); 498 if (!AI) return; 499 bool DoPoison = (ID == Intrinsic::lifetime_end); 500 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 501 AllocaPoisonCallVec.push_back(APC); 502 } 503 504 // ---------------------- Helpers. 505 void initializeCallbacks(Module &M); 506 507 // Check if we want (and can) handle this alloca. 508 bool isInterestingAlloca(AllocaInst &AI) const { 509 return (!AI.isArrayAllocation() && AI.isStaticAlloca() && 510 AI.getAllocatedType()->isSized() && 511 // alloca() may be called with 0 size, ignore it. 512 getAllocaSizeInBytes(&AI) > 0); 513 } 514 515 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { 516 Type *Ty = AI->getAllocatedType(); 517 uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty); 518 return SizeInBytes; 519 } 520 /// Finds alloca where the value comes from. 521 AllocaInst *findAllocaForValue(Value *V); 522 void poisonRedZones(const ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, 523 Value *ShadowBase, bool DoPoison); 524 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 525 526 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase, 527 int Size); 528 }; 529 530 } // namespace 531 532 char AddressSanitizer::ID = 0; 533 INITIALIZE_PASS(AddressSanitizer, "asan", 534 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", 535 false, false) 536 FunctionPass *llvm::createAddressSanitizerFunctionPass() { 537 return new AddressSanitizer(); 538 } 539 540 char AddressSanitizerModule::ID = 0; 541 INITIALIZE_PASS(AddressSanitizerModule, "asan-module", 542 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 543 "ModulePass", false, false) 544 ModulePass *llvm::createAddressSanitizerModulePass(StringRef BlacklistFile) { 545 return new AddressSanitizerModule(BlacklistFile); 546 } 547 548 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 549 size_t Res = countTrailingZeros(TypeSize / 8); 550 assert(Res < kNumberOfAccessSizes); 551 return Res; 552 } 553 554 // \brief Create a constant for Str so that we can pass it to the run-time lib. 555 static GlobalVariable *createPrivateGlobalForString( 556 Module &M, StringRef Str, bool AllowMerging) { 557 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 558 // We use private linkage for module-local strings. If they can be merged 559 // with another one, we set the unnamed_addr attribute. 560 GlobalVariable *GV = 561 new GlobalVariable(M, StrConst->getType(), true, 562 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); 563 if (AllowMerging) 564 GV->setUnnamedAddr(true); 565 GV->setAlignment(1); // Strings may not be merged w/o setting align 1. 566 return GV; 567 } 568 569 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { 570 return G->getName().find(kAsanGenPrefix) == 0; 571 } 572 573 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 574 // Shadow >> scale 575 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 576 if (Mapping.Offset == 0) 577 return Shadow; 578 // (Shadow >> scale) | offset 579 if (Mapping.OrShadowOffset) 580 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); 581 else 582 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); 583 } 584 585 // Instrument memset/memmove/memcpy 586 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 587 IRBuilder<> IRB(MI); 588 if (isa<MemTransferInst>(MI)) { 589 IRB.CreateCall3( 590 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 591 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 592 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 593 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); 594 } else if (isa<MemSetInst>(MI)) { 595 IRB.CreateCall3( 596 AsanMemset, 597 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 598 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 599 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); 600 } 601 MI->eraseFromParent(); 602 } 603 604 // If I is an interesting memory access, return the PointerOperand 605 // and set IsWrite/Alignment. Otherwise return NULL. 606 static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, 607 unsigned *Alignment) { 608 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 609 if (!ClInstrumentReads) return nullptr; 610 *IsWrite = false; 611 *Alignment = LI->getAlignment(); 612 return LI->getPointerOperand(); 613 } 614 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 615 if (!ClInstrumentWrites) return nullptr; 616 *IsWrite = true; 617 *Alignment = SI->getAlignment(); 618 return SI->getPointerOperand(); 619 } 620 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 621 if (!ClInstrumentAtomics) return nullptr; 622 *IsWrite = true; 623 *Alignment = 0; 624 return RMW->getPointerOperand(); 625 } 626 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 627 if (!ClInstrumentAtomics) return nullptr; 628 *IsWrite = true; 629 *Alignment = 0; 630 return XCHG->getPointerOperand(); 631 } 632 return nullptr; 633 } 634 635 static bool isPointerOperand(Value *V) { 636 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 637 } 638 639 // This is a rough heuristic; it may cause both false positives and 640 // false negatives. The proper implementation requires cooperation with 641 // the frontend. 642 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { 643 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 644 if (!Cmp->isRelational()) 645 return false; 646 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 647 if (BO->getOpcode() != Instruction::Sub) 648 return false; 649 } else { 650 return false; 651 } 652 if (!isPointerOperand(I->getOperand(0)) || 653 !isPointerOperand(I->getOperand(1))) 654 return false; 655 return true; 656 } 657 658 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 659 // If a global variable does not have dynamic initialization we don't 660 // have to instrument it. However, if a global does not have initializer 661 // at all, we assume it has dynamic initializer (in other TU). 662 return G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G); 663 } 664 665 void 666 AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { 667 IRBuilder<> IRB(I); 668 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 669 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 670 for (int i = 0; i < 2; i++) { 671 if (Param[i]->getType()->isPointerTy()) 672 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy); 673 } 674 IRB.CreateCall2(F, Param[0], Param[1]); 675 } 676 677 void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { 678 bool IsWrite = false; 679 unsigned Alignment = 0; 680 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment); 681 assert(Addr); 682 if (ClOpt && ClOptGlobals) { 683 if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { 684 // If initialization order checking is disabled, a simple access to a 685 // dynamically initialized global is always valid. 686 if (!ClInitializers || GlobalIsLinkerInitialized(G)) { 687 NumOptimizedAccessesToGlobalVar++; 688 return; 689 } 690 } 691 ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr); 692 if (CE && CE->isGEPWithNoNotionalOverIndexing()) { 693 if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 694 if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { 695 NumOptimizedAccessesToGlobalArray++; 696 return; 697 } 698 } 699 } 700 } 701 702 Type *OrigPtrTy = Addr->getType(); 703 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 704 705 assert(OrigTy->isSized()); 706 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 707 708 assert((TypeSize % 8) == 0); 709 710 if (IsWrite) 711 NumInstrumentedWrites++; 712 else 713 NumInstrumentedReads++; 714 715 unsigned Granularity = 1 << Mapping.Scale; 716 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 717 // if the data is properly aligned. 718 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 719 TypeSize == 128) && 720 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) 721 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls); 722 // Instrument unusual size or unusual alignment. 723 // We can not do it with a single check, so we do 1-byte check for the first 724 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 725 // to report the actual access size. 726 IRBuilder<> IRB(I); 727 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 728 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 729 if (UseCalls) { 730 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size); 731 } else { 732 Value *LastByte = IRB.CreateIntToPtr( 733 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 734 OrigPtrTy); 735 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false); 736 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false); 737 } 738 } 739 740 // Validate the result of Module::getOrInsertFunction called for an interface 741 // function of AddressSanitizer. If the instrumented module defines a function 742 // with the same name, their prototypes must match, otherwise 743 // getOrInsertFunction returns a bitcast. 744 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 745 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast); 746 FuncOrBitcast->dump(); 747 report_fatal_error("trying to redefine an AddressSanitizer " 748 "interface function"); 749 } 750 751 Instruction *AddressSanitizer::generateCrashCode( 752 Instruction *InsertBefore, Value *Addr, 753 bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) { 754 IRBuilder<> IRB(InsertBefore); 755 CallInst *Call = SizeArgument 756 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument) 757 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr); 758 759 // We don't do Call->setDoesNotReturn() because the BB already has 760 // UnreachableInst at the end. 761 // This EmptyAsm is required to avoid callback merge. 762 IRB.CreateCall(EmptyAsm); 763 return Call; 764 } 765 766 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 767 Value *ShadowValue, 768 uint32_t TypeSize) { 769 size_t Granularity = 1 << Mapping.Scale; 770 // Addr & (Granularity - 1) 771 Value *LastAccessedByte = IRB.CreateAnd( 772 AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 773 // (Addr & (Granularity - 1)) + size - 1 774 if (TypeSize / 8 > 1) 775 LastAccessedByte = IRB.CreateAdd( 776 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 777 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 778 LastAccessedByte = IRB.CreateIntCast( 779 LastAccessedByte, ShadowValue->getType(), false); 780 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 781 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 782 } 783 784 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 785 Instruction *InsertBefore, Value *Addr, 786 uint32_t TypeSize, bool IsWrite, 787 Value *SizeArgument, bool UseCalls) { 788 IRBuilder<> IRB(InsertBefore); 789 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 790 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 791 792 if (UseCalls) { 793 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex], 794 AddrLong); 795 return; 796 } 797 798 Type *ShadowTy = IntegerType::get( 799 *C, std::max(8U, TypeSize >> Mapping.Scale)); 800 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 801 Value *ShadowPtr = memToShadow(AddrLong, IRB); 802 Value *CmpVal = Constant::getNullValue(ShadowTy); 803 Value *ShadowValue = IRB.CreateLoad( 804 IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 805 806 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 807 size_t Granularity = 1 << Mapping.Scale; 808 TerminatorInst *CrashTerm = nullptr; 809 810 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 811 TerminatorInst *CheckTerm = 812 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); 813 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional()); 814 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 815 IRB.SetInsertPoint(CheckTerm); 816 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 817 BasicBlock *CrashBlock = 818 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 819 CrashTerm = new UnreachableInst(*C, CrashBlock); 820 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 821 ReplaceInstWithInst(CheckTerm, NewTerm); 822 } else { 823 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); 824 } 825 826 Instruction *Crash = generateCrashCode( 827 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument); 828 Crash->setDebugLoc(OrigIns->getDebugLoc()); 829 } 830 831 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, 832 GlobalValue *ModuleName) { 833 // Set up the arguments to our poison/unpoison functions. 834 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt()); 835 836 // Add a call to poison all external globals before the given function starts. 837 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 838 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 839 840 // Add calls to unpoison all globals before each return instruction. 841 for (auto &BB : GlobalInit.getBasicBlockList()) 842 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 843 CallInst::Create(AsanUnpoisonGlobals, "", RI); 844 } 845 846 void AddressSanitizerModule::createInitializerPoisonCalls( 847 Module &M, GlobalValue *ModuleName) { 848 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 849 850 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 851 for (Use &OP : CA->operands()) { 852 if (isa<ConstantAggregateZero>(OP)) 853 continue; 854 ConstantStruct *CS = cast<ConstantStruct>(OP); 855 856 // Must have a function or null ptr. 857 // (CS->getOperand(0) is the init priority.) 858 if (Function* F = dyn_cast<Function>(CS->getOperand(1))) { 859 if (F->getName() != kAsanModuleCtorName) 860 poisonOneInitializer(*F, ModuleName); 861 } 862 } 863 } 864 865 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { 866 Type *Ty = cast<PointerType>(G->getType())->getElementType(); 867 DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 868 869 if (BL->isIn(*G)) return false; 870 if (!Ty->isSized()) return false; 871 if (!G->hasInitializer()) return false; 872 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global. 873 // Touch only those globals that will not be defined in other modules. 874 // Don't handle ODR type linkages since other modules may be built w/o asan. 875 if (G->getLinkage() != GlobalVariable::ExternalLinkage && 876 G->getLinkage() != GlobalVariable::PrivateLinkage && 877 G->getLinkage() != GlobalVariable::InternalLinkage) 878 return false; 879 // Two problems with thread-locals: 880 // - The address of the main thread's copy can't be computed at link-time. 881 // - Need to poison all copies, not just the main thread's one. 882 if (G->isThreadLocal()) 883 return false; 884 // For now, just ignore this Global if the alignment is large. 885 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; 886 887 // Ignore all the globals with the names starting with "\01L_OBJC_". 888 // Many of those are put into the .cstring section. The linker compresses 889 // that section by removing the spare \0s after the string terminator, so 890 // our redzones get broken. 891 if ((G->getName().find("\01L_OBJC_") == 0) || 892 (G->getName().find("\01l_OBJC_") == 0)) { 893 DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G << "\n"); 894 return false; 895 } 896 897 if (G->hasSection()) { 898 StringRef Section(G->getSection()); 899 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 900 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 901 // them. 902 if (Section.startswith("__OBJC,") || 903 Section.startswith("__DATA, __objc_")) { 904 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 905 return false; 906 } 907 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 908 // Constant CFString instances are compiled in the following way: 909 // -- the string buffer is emitted into 910 // __TEXT,__cstring,cstring_literals 911 // -- the constant NSConstantString structure referencing that buffer 912 // is placed into __DATA,__cfstring 913 // Therefore there's no point in placing redzones into __DATA,__cfstring. 914 // Moreover, it causes the linker to crash on OS X 10.7 915 if (Section.startswith("__DATA,__cfstring")) { 916 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 917 return false; 918 } 919 // The linker merges the contents of cstring_literals and removes the 920 // trailing zeroes. 921 if (Section.startswith("__TEXT,__cstring,cstring_literals")) { 922 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 923 return false; 924 } 925 926 // Callbacks put into the CRT initializer/terminator sections 927 // should not be instrumented. 928 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 929 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 930 if (Section.startswith(".CRT")) { 931 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); 932 return false; 933 } 934 935 // Globals from llvm.metadata aren't emitted, do not instrument them. 936 if (Section == "llvm.metadata") return false; 937 } 938 939 return true; 940 } 941 942 void AddressSanitizerModule::initializeCallbacks(Module &M) { 943 IRBuilder<> IRB(*C); 944 // Declare our poisoning and unpoisoning functions. 945 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( 946 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL)); 947 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); 948 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( 949 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); 950 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); 951 // Declare functions that register/unregister globals. 952 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( 953 kAsanRegisterGlobalsName, IRB.getVoidTy(), 954 IntptrTy, IntptrTy, NULL)); 955 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); 956 AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( 957 kAsanUnregisterGlobalsName, 958 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 959 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); 960 AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction( 961 kAsanCovModuleInitName, 962 IRB.getVoidTy(), IntptrTy, NULL)); 963 AsanCovModuleInit->setLinkage(Function::ExternalLinkage); 964 } 965 966 // This function replaces all global variables with new variables that have 967 // trailing redzones. It also creates a function that poisons 968 // redzones and inserts this function into llvm.global_ctors. 969 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { 970 DynamicallyInitializedGlobals.Init(M); 971 972 SmallVector<GlobalVariable *, 16> GlobalsToChange; 973 974 for (auto &G : M.globals()) { 975 if (ShouldInstrumentGlobal(&G)) 976 GlobalsToChange.push_back(&G); 977 } 978 979 size_t n = GlobalsToChange.size(); 980 if (n == 0) return false; 981 982 // A global is described by a structure 983 // size_t beg; 984 // size_t size; 985 // size_t size_with_redzone; 986 // const char *name; 987 // const char *module_name; 988 // size_t has_dynamic_init; 989 // We initialize an array of such structures and pass it to a run-time call. 990 StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy, 991 IntptrTy, IntptrTy, 992 IntptrTy, IntptrTy, NULL); 993 SmallVector<Constant *, 16> Initializers(n); 994 995 bool HasDynamicallyInitializedGlobals = false; 996 997 // We shouldn't merge same module names, as this string serves as unique 998 // module ID in runtime. 999 GlobalVariable *ModuleName = createPrivateGlobalForString( 1000 M, M.getModuleIdentifier(), /*AllowMerging*/false); 1001 1002 for (size_t i = 0; i < n; i++) { 1003 static const uint64_t kMaxGlobalRedzone = 1 << 18; 1004 GlobalVariable *G = GlobalsToChange[i]; 1005 PointerType *PtrTy = cast<PointerType>(G->getType()); 1006 Type *Ty = PtrTy->getElementType(); 1007 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty); 1008 uint64_t MinRZ = MinRedzoneSizeForGlobal(); 1009 // MinRZ <= RZ <= kMaxGlobalRedzone 1010 // and trying to make RZ to be ~ 1/4 of SizeInBytes. 1011 uint64_t RZ = std::max(MinRZ, 1012 std::min(kMaxGlobalRedzone, 1013 (SizeInBytes / MinRZ / 4) * MinRZ)); 1014 uint64_t RightRedzoneSize = RZ; 1015 // Round up to MinRZ 1016 if (SizeInBytes % MinRZ) 1017 RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); 1018 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); 1019 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 1020 // Determine whether this global should be poisoned in initialization. 1021 bool GlobalHasDynamicInitializer = 1022 DynamicallyInitializedGlobals.Contains(G); 1023 1024 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL); 1025 Constant *NewInitializer = ConstantStruct::get( 1026 NewTy, G->getInitializer(), 1027 Constant::getNullValue(RightRedZoneTy), NULL); 1028 1029 GlobalVariable *Name = 1030 createPrivateGlobalForString(M, G->getName(), /*AllowMerging*/true); 1031 1032 // Create a new global variable with enough space for a redzone. 1033 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 1034 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 1035 Linkage = GlobalValue::InternalLinkage; 1036 GlobalVariable *NewGlobal = new GlobalVariable( 1037 M, NewTy, G->isConstant(), Linkage, 1038 NewInitializer, "", G, G->getThreadLocalMode()); 1039 NewGlobal->copyAttributesFrom(G); 1040 NewGlobal->setAlignment(MinRZ); 1041 1042 Value *Indices2[2]; 1043 Indices2[0] = IRB.getInt32(0); 1044 Indices2[1] = IRB.getInt32(0); 1045 1046 G->replaceAllUsesWith( 1047 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true)); 1048 NewGlobal->takeName(G); 1049 G->eraseFromParent(); 1050 1051 Initializers[i] = ConstantStruct::get( 1052 GlobalStructTy, 1053 ConstantExpr::getPointerCast(NewGlobal, IntptrTy), 1054 ConstantInt::get(IntptrTy, SizeInBytes), 1055 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 1056 ConstantExpr::getPointerCast(Name, IntptrTy), 1057 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 1058 ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer), 1059 NULL); 1060 1061 // Populate the first and last globals declared in this TU. 1062 if (ClInitializers && GlobalHasDynamicInitializer) 1063 HasDynamicallyInitializedGlobals = true; 1064 1065 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 1066 } 1067 1068 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); 1069 GlobalVariable *AllGlobals = new GlobalVariable( 1070 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 1071 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); 1072 1073 // Create calls for poisoning before initializers run and unpoisoning after. 1074 if (HasDynamicallyInitializedGlobals) 1075 createInitializerPoisonCalls(M, ModuleName); 1076 IRB.CreateCall2(AsanRegisterGlobals, 1077 IRB.CreatePointerCast(AllGlobals, IntptrTy), 1078 ConstantInt::get(IntptrTy, n)); 1079 1080 // We also need to unregister globals at the end, e.g. when a shared library 1081 // gets closed. 1082 Function *AsanDtorFunction = Function::Create( 1083 FunctionType::get(Type::getVoidTy(*C), false), 1084 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); 1085 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 1086 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); 1087 IRB_Dtor.CreateCall2(AsanUnregisterGlobals, 1088 IRB.CreatePointerCast(AllGlobals, IntptrTy), 1089 ConstantInt::get(IntptrTy, n)); 1090 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority); 1091 1092 DEBUG(dbgs() << M); 1093 return true; 1094 } 1095 1096 bool AddressSanitizerModule::runOnModule(Module &M) { 1097 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1098 if (!DLP) 1099 return false; 1100 DL = &DLP->getDataLayout(); 1101 BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); 1102 C = &(M.getContext()); 1103 int LongSize = DL->getPointerSizeInBits(); 1104 IntptrTy = Type::getIntNTy(*C, LongSize); 1105 Mapping = getShadowMapping(M, LongSize); 1106 initializeCallbacks(M); 1107 1108 bool Changed = false; 1109 1110 Function *CtorFunc = M.getFunction(kAsanModuleCtorName); 1111 assert(CtorFunc); 1112 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); 1113 1114 if (ClCoverage > 0) { 1115 Function *CovFunc = M.getFunction(kAsanCovName); 1116 int nCov = CovFunc ? CovFunc->getNumUses() : 0; 1117 IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov)); 1118 Changed = true; 1119 } 1120 1121 if (ClGlobals && !BL->isIn(M)) Changed |= InstrumentGlobals(IRB, M); 1122 1123 return Changed; 1124 } 1125 1126 void AddressSanitizer::initializeCallbacks(Module &M) { 1127 IRBuilder<> IRB(*C); 1128 // Create __asan_report* callbacks. 1129 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 1130 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 1131 AccessSizeIndex++) { 1132 // IsWrite and TypeSize are encoded in the function name. 1133 std::string Suffix = 1134 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); 1135 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = 1136 checkInterfaceFunction( 1137 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix, 1138 IRB.getVoidTy(), IntptrTy, NULL)); 1139 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = 1140 checkInterfaceFunction( 1141 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix, 1142 IRB.getVoidTy(), IntptrTy, NULL)); 1143 } 1144 } 1145 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction( 1146 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1147 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction( 1148 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1149 1150 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction( 1151 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN", 1152 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1153 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction( 1154 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN", 1155 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1156 1157 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction( 1158 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), 1159 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL)); 1160 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction( 1161 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), 1162 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL)); 1163 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction( 1164 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(), 1165 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL)); 1166 1167 AsanHandleNoReturnFunc = checkInterfaceFunction( 1168 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL)); 1169 AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction( 1170 kAsanCovName, IRB.getVoidTy(), NULL)); 1171 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction( 1172 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1173 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction( 1174 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1175 // We insert an empty inline asm after __asan_report* to avoid callback merge. 1176 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 1177 StringRef(""), StringRef(""), 1178 /*hasSideEffects=*/true); 1179 } 1180 1181 // virtual 1182 bool AddressSanitizer::doInitialization(Module &M) { 1183 // Initialize the private fields. No one has accessed them before. 1184 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1185 if (!DLP) 1186 report_fatal_error("data layout missing"); 1187 DL = &DLP->getDataLayout(); 1188 1189 DynamicallyInitializedGlobals.Init(M); 1190 1191 C = &(M.getContext()); 1192 LongSize = DL->getPointerSizeInBits(); 1193 IntptrTy = Type::getIntNTy(*C, LongSize); 1194 1195 AsanCtorFunction = Function::Create( 1196 FunctionType::get(Type::getVoidTy(*C), false), 1197 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); 1198 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); 1199 // call __asan_init in the module ctor. 1200 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); 1201 AsanInitFunction = checkInterfaceFunction( 1202 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL)); 1203 AsanInitFunction->setLinkage(Function::ExternalLinkage); 1204 IRB.CreateCall(AsanInitFunction); 1205 1206 Mapping = getShadowMapping(M, LongSize); 1207 1208 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); 1209 return true; 1210 } 1211 1212 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 1213 // For each NSObject descendant having a +load method, this method is invoked 1214 // by the ObjC runtime before any of the static constructors is called. 1215 // Therefore we need to instrument such methods with a call to __asan_init 1216 // at the beginning in order to initialize our runtime before any access to 1217 // the shadow memory. 1218 // We cannot just ignore these methods, because they may call other 1219 // instrumented functions. 1220 if (F.getName().find(" load]") != std::string::npos) { 1221 IRBuilder<> IRB(F.begin()->begin()); 1222 IRB.CreateCall(AsanInitFunction); 1223 return true; 1224 } 1225 return false; 1226 } 1227 1228 void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) { 1229 BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end(); 1230 // Skip static allocas at the top of the entry block so they don't become 1231 // dynamic when we split the block. If we used our optimized stack layout, 1232 // then there will only be one alloca and it will come first. 1233 for (; IP != BE; ++IP) { 1234 AllocaInst *AI = dyn_cast<AllocaInst>(IP); 1235 if (!AI || !AI->isStaticAlloca()) 1236 break; 1237 } 1238 1239 DebugLoc EntryLoc = IP->getDebugLoc().getFnDebugLoc(*C); 1240 IRBuilder<> IRB(IP); 1241 IRB.SetCurrentDebugLocation(EntryLoc); 1242 Type *Int8Ty = IRB.getInt8Ty(); 1243 GlobalVariable *Guard = new GlobalVariable( 1244 *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage, 1245 Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName()); 1246 LoadInst *Load = IRB.CreateLoad(Guard); 1247 Load->setAtomic(Monotonic); 1248 Load->setAlignment(1); 1249 Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load); 1250 Instruction *Ins = SplitBlockAndInsertIfThen( 1251 Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1252 IRB.SetInsertPoint(Ins); 1253 IRB.SetCurrentDebugLocation(EntryLoc); 1254 // We pass &F to __sanitizer_cov. We could avoid this and rely on 1255 // GET_CALLER_PC, but having the PC of the first instruction is just nice. 1256 IRB.CreateCall(AsanCovFunction); 1257 StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard); 1258 Store->setAtomic(Monotonic); 1259 Store->setAlignment(1); 1260 } 1261 1262 // Poor man's coverage that works with ASan. 1263 // We create a Guard boolean variable with the same linkage 1264 // as the function and inject this code into the entry block (-asan-coverage=1) 1265 // or all blocks (-asan-coverage=2): 1266 // if (*Guard) { 1267 // __sanitizer_cov(&F); 1268 // *Guard = 1; 1269 // } 1270 // The accesses to Guard are atomic. The rest of the logic is 1271 // in __sanitizer_cov (it's fine to call it more than once). 1272 // 1273 // This coverage implementation provides very limited data: 1274 // it only tells if a given function (block) was ever executed. 1275 // No counters, no per-edge data. 1276 // But for many use cases this is what we need and the added slowdown 1277 // is negligible. This simple implementation will probably be obsoleted 1278 // by the upcoming Clang-based coverage implementation. 1279 // By having it here and now we hope to 1280 // a) get the functionality to users earlier and 1281 // b) collect usage statistics to help improve Clang coverage design. 1282 bool AddressSanitizer::InjectCoverage(Function &F, 1283 const ArrayRef<BasicBlock *> AllBlocks) { 1284 if (!ClCoverage) return false; 1285 1286 if (ClCoverage == 1 || 1287 (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) { 1288 InjectCoverageAtBlock(F, F.getEntryBlock()); 1289 } else { 1290 for (auto BB : AllBlocks) 1291 InjectCoverageAtBlock(F, *BB); 1292 } 1293 return true; 1294 } 1295 1296 bool AddressSanitizer::runOnFunction(Function &F) { 1297 if (&F == AsanCtorFunction) return false; 1298 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 1299 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 1300 initializeCallbacks(*F.getParent()); 1301 1302 // If needed, insert __asan_init before checking for SanitizeAddress attr. 1303 maybeInsertAsanInitAtFunctionEntry(F); 1304 1305 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) 1306 return false; 1307 1308 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) 1309 return false; 1310 1311 // We want to instrument every address only once per basic block (unless there 1312 // are calls between uses). 1313 SmallSet<Value*, 16> TempsToInstrument; 1314 SmallVector<Instruction*, 16> ToInstrument; 1315 SmallVector<Instruction*, 8> NoReturnCalls; 1316 SmallVector<BasicBlock*, 16> AllBlocks; 1317 SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts; 1318 int NumAllocas = 0; 1319 bool IsWrite; 1320 unsigned Alignment; 1321 1322 // Fill the set of memory operations to instrument. 1323 for (auto &BB : F) { 1324 AllBlocks.push_back(&BB); 1325 TempsToInstrument.clear(); 1326 int NumInsnsPerBB = 0; 1327 for (auto &Inst : BB) { 1328 if (LooksLikeCodeInBug11395(&Inst)) return false; 1329 if (Value *Addr = 1330 isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) { 1331 if (ClOpt && ClOptSameTemp) { 1332 if (!TempsToInstrument.insert(Addr)) 1333 continue; // We've seen this temp in the current BB. 1334 } 1335 } else if (ClInvalidPointerPairs && 1336 isInterestingPointerComparisonOrSubtraction(&Inst)) { 1337 PointerComparisonsOrSubtracts.push_back(&Inst); 1338 continue; 1339 } else if (isa<MemIntrinsic>(Inst)) { 1340 // ok, take it. 1341 } else { 1342 if (isa<AllocaInst>(Inst)) 1343 NumAllocas++; 1344 CallSite CS(&Inst); 1345 if (CS) { 1346 // A call inside BB. 1347 TempsToInstrument.clear(); 1348 if (CS.doesNotReturn()) 1349 NoReturnCalls.push_back(CS.getInstruction()); 1350 } 1351 continue; 1352 } 1353 ToInstrument.push_back(&Inst); 1354 NumInsnsPerBB++; 1355 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) 1356 break; 1357 } 1358 } 1359 1360 Function *UninstrumentedDuplicate = nullptr; 1361 bool LikelyToInstrument = 1362 !NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0); 1363 if (ClKeepUninstrumented && LikelyToInstrument) { 1364 ValueToValueMapTy VMap; 1365 UninstrumentedDuplicate = CloneFunction(&F, VMap, false); 1366 UninstrumentedDuplicate->removeFnAttr(Attribute::SanitizeAddress); 1367 UninstrumentedDuplicate->setName("NOASAN_" + F.getName()); 1368 F.getParent()->getFunctionList().push_back(UninstrumentedDuplicate); 1369 } 1370 1371 bool UseCalls = false; 1372 if (ClInstrumentationWithCallsThreshold >= 0 && 1373 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold) 1374 UseCalls = true; 1375 1376 // Instrument. 1377 int NumInstrumented = 0; 1378 for (auto Inst : ToInstrument) { 1379 if (ClDebugMin < 0 || ClDebugMax < 0 || 1380 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { 1381 if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) 1382 instrumentMop(Inst, UseCalls); 1383 else 1384 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 1385 } 1386 NumInstrumented++; 1387 } 1388 1389 FunctionStackPoisoner FSP(F, *this); 1390 bool ChangedStack = FSP.runOnFunction(); 1391 1392 // We must unpoison the stack before every NoReturn call (throw, _exit, etc). 1393 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 1394 for (auto CI : NoReturnCalls) { 1395 IRBuilder<> IRB(CI); 1396 IRB.CreateCall(AsanHandleNoReturnFunc); 1397 } 1398 1399 for (auto Inst : PointerComparisonsOrSubtracts) { 1400 instrumentPointerComparisonOrSubtraction(Inst); 1401 NumInstrumented++; 1402 } 1403 1404 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); 1405 1406 if (InjectCoverage(F, AllBlocks)) 1407 res = true; 1408 1409 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n"); 1410 1411 if (ClKeepUninstrumented) { 1412 if (!res) { 1413 // No instrumentation is done, no need for the duplicate. 1414 if (UninstrumentedDuplicate) 1415 UninstrumentedDuplicate->eraseFromParent(); 1416 } else { 1417 // The function was instrumented. We must have the duplicate. 1418 assert(UninstrumentedDuplicate); 1419 UninstrumentedDuplicate->setSection("NOASAN"); 1420 assert(!F.hasSection()); 1421 F.setSection("ASAN"); 1422 } 1423 } 1424 1425 return res; 1426 } 1427 1428 // Workaround for bug 11395: we don't want to instrument stack in functions 1429 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 1430 // FIXME: remove once the bug 11395 is fixed. 1431 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 1432 if (LongSize != 32) return false; 1433 CallInst *CI = dyn_cast<CallInst>(I); 1434 if (!CI || !CI->isInlineAsm()) return false; 1435 if (CI->getNumArgOperands() <= 5) return false; 1436 // We have inline assembly with quite a few arguments. 1437 return true; 1438 } 1439 1440 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 1441 IRBuilder<> IRB(*C); 1442 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { 1443 std::string Suffix = itostr(i); 1444 AsanStackMallocFunc[i] = checkInterfaceFunction( 1445 M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy, 1446 IntptrTy, IntptrTy, NULL)); 1447 AsanStackFreeFunc[i] = checkInterfaceFunction(M.getOrInsertFunction( 1448 kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy, 1449 IntptrTy, IntptrTy, NULL)); 1450 } 1451 AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( 1452 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1453 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( 1454 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); 1455 } 1456 1457 void 1458 FunctionStackPoisoner::poisonRedZones(const ArrayRef<uint8_t> ShadowBytes, 1459 IRBuilder<> &IRB, Value *ShadowBase, 1460 bool DoPoison) { 1461 size_t n = ShadowBytes.size(); 1462 size_t i = 0; 1463 // We need to (un)poison n bytes of stack shadow. Poison as many as we can 1464 // using 64-bit stores (if we are on 64-bit arch), then poison the rest 1465 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores. 1466 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8; 1467 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) { 1468 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { 1469 uint64_t Val = 0; 1470 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { 1471 if (ASan.DL->isLittleEndian()) 1472 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 1473 else 1474 Val = (Val << 8) | ShadowBytes[i + j]; 1475 } 1476 if (!Val) continue; 1477 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 1478 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8); 1479 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0); 1480 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo())); 1481 } 1482 } 1483 } 1484 1485 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 1486 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 1487 static int StackMallocSizeClass(uint64_t LocalStackSize) { 1488 assert(LocalStackSize <= kMaxStackMallocSize); 1489 uint64_t MaxSize = kMinStackMallocSize; 1490 for (int i = 0; ; i++, MaxSize *= 2) 1491 if (LocalStackSize <= MaxSize) 1492 return i; 1493 llvm_unreachable("impossible LocalStackSize"); 1494 } 1495 1496 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic. 1497 // We can not use MemSet intrinsic because it may end up calling the actual 1498 // memset. Size is a multiple of 8. 1499 // Currently this generates 8-byte stores on x86_64; it may be better to 1500 // generate wider stores. 1501 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( 1502 IRBuilder<> &IRB, Value *ShadowBase, int Size) { 1503 assert(!(Size % 8)); 1504 assert(kAsanStackAfterReturnMagic == 0xf5); 1505 for (int i = 0; i < Size; i += 8) { 1506 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 1507 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL), 1508 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); 1509 } 1510 } 1511 1512 static DebugLoc getFunctionEntryDebugLocation(Function &F) { 1513 for (const auto &Inst : F.getEntryBlock()) 1514 if (!isa<AllocaInst>(Inst)) 1515 return Inst.getDebugLoc(); 1516 return DebugLoc(); 1517 } 1518 1519 void FunctionStackPoisoner::poisonStack() { 1520 int StackMallocIdx = -1; 1521 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F); 1522 1523 assert(AllocaVec.size() > 0); 1524 Instruction *InsBefore = AllocaVec[0]; 1525 IRBuilder<> IRB(InsBefore); 1526 IRB.SetCurrentDebugLocation(EntryDebugLocation); 1527 1528 SmallVector<ASanStackVariableDescription, 16> SVD; 1529 SVD.reserve(AllocaVec.size()); 1530 for (AllocaInst *AI : AllocaVec) { 1531 ASanStackVariableDescription D = { AI->getName().data(), 1532 getAllocaSizeInBytes(AI), 1533 AI->getAlignment(), AI, 0}; 1534 SVD.push_back(D); 1535 } 1536 // Minimal header size (left redzone) is 4 pointers, 1537 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 1538 size_t MinHeaderSize = ASan.LongSize / 2; 1539 ASanStackFrameLayout L; 1540 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L); 1541 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); 1542 uint64_t LocalStackSize = L.FrameSize; 1543 bool DoStackMalloc = 1544 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; 1545 1546 Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize); 1547 AllocaInst *MyAlloca = 1548 new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore); 1549 MyAlloca->setDebugLoc(EntryDebugLocation); 1550 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 1551 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 1552 MyAlloca->setAlignment(FrameAlignment); 1553 assert(MyAlloca->isStaticAlloca()); 1554 Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy); 1555 Value *LocalStackBase = OrigStackBase; 1556 1557 if (DoStackMalloc) { 1558 // LocalStackBase = OrigStackBase 1559 // if (__asan_option_detect_stack_use_after_return) 1560 // LocalStackBase = __asan_stack_malloc_N(LocalStackBase, OrigStackBase); 1561 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 1562 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 1563 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal( 1564 kAsanOptionDetectUAR, IRB.getInt32Ty()); 1565 Value *Cmp = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR), 1566 Constant::getNullValue(IRB.getInt32Ty())); 1567 Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false); 1568 BasicBlock *CmpBlock = cast<Instruction>(Cmp)->getParent(); 1569 IRBuilder<> IRBIf(Term); 1570 IRBIf.SetCurrentDebugLocation(EntryDebugLocation); 1571 LocalStackBase = IRBIf.CreateCall2( 1572 AsanStackMallocFunc[StackMallocIdx], 1573 ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); 1574 BasicBlock *SetBlock = cast<Instruction>(LocalStackBase)->getParent(); 1575 IRB.SetInsertPoint(InsBefore); 1576 IRB.SetCurrentDebugLocation(EntryDebugLocation); 1577 PHINode *Phi = IRB.CreatePHI(IntptrTy, 2); 1578 Phi->addIncoming(OrigStackBase, CmpBlock); 1579 Phi->addIncoming(LocalStackBase, SetBlock); 1580 LocalStackBase = Phi; 1581 } 1582 1583 // Insert poison calls for lifetime intrinsics for alloca. 1584 bool HavePoisonedAllocas = false; 1585 for (const auto &APC : AllocaPoisonCallVec) { 1586 assert(APC.InsBefore); 1587 assert(APC.AI); 1588 IRBuilder<> IRB(APC.InsBefore); 1589 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 1590 HavePoisonedAllocas |= APC.DoPoison; 1591 } 1592 1593 // Replace Alloca instructions with base+offset. 1594 for (const auto &Desc : SVD) { 1595 AllocaInst *AI = Desc.AI; 1596 Value *NewAllocaPtr = IRB.CreateIntToPtr( 1597 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 1598 AI->getType()); 1599 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); 1600 AI->replaceAllUsesWith(NewAllocaPtr); 1601 } 1602 1603 // The left-most redzone has enough space for at least 4 pointers. 1604 // Write the Magic value to redzone[0]. 1605 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 1606 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 1607 BasePlus0); 1608 // Write the frame description constant to redzone[1]. 1609 Value *BasePlus1 = IRB.CreateIntToPtr( 1610 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), 1611 IntptrPtrTy); 1612 GlobalVariable *StackDescriptionGlobal = 1613 createPrivateGlobalForString(*F.getParent(), L.DescriptionString, 1614 /*AllowMerging*/true); 1615 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, 1616 IntptrTy); 1617 IRB.CreateStore(Description, BasePlus1); 1618 // Write the PC to redzone[2]. 1619 Value *BasePlus2 = IRB.CreateIntToPtr( 1620 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, 1621 2 * ASan.LongSize/8)), 1622 IntptrPtrTy); 1623 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 1624 1625 // Poison the stack redzones at the entry. 1626 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 1627 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true); 1628 1629 // (Un)poison the stack before all ret instructions. 1630 for (auto Ret : RetVec) { 1631 IRBuilder<> IRBRet(Ret); 1632 // Mark the current frame as retired. 1633 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 1634 BasePlus0); 1635 if (DoStackMalloc) { 1636 assert(StackMallocIdx >= 0); 1637 // if LocalStackBase != OrigStackBase: 1638 // // In use-after-return mode, poison the whole stack frame. 1639 // if StackMallocIdx <= 4 1640 // // For small sizes inline the whole thing: 1641 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 1642 // **SavedFlagPtr(LocalStackBase) = 0 1643 // else 1644 // __asan_stack_free_N(LocalStackBase, OrigStackBase) 1645 // else 1646 // <This is not a fake stack; unpoison the redzones> 1647 Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase); 1648 TerminatorInst *ThenTerm, *ElseTerm; 1649 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 1650 1651 IRBuilder<> IRBPoison(ThenTerm); 1652 if (StackMallocIdx <= 4) { 1653 int ClassSize = kMinStackMallocSize << StackMallocIdx; 1654 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase, 1655 ClassSize >> Mapping.Scale); 1656 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 1657 LocalStackBase, 1658 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 1659 Value *SavedFlagPtr = IRBPoison.CreateLoad( 1660 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 1661 IRBPoison.CreateStore( 1662 Constant::getNullValue(IRBPoison.getInt8Ty()), 1663 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 1664 } else { 1665 // For larger frames call __asan_stack_free_*. 1666 IRBPoison.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase, 1667 ConstantInt::get(IntptrTy, LocalStackSize), 1668 OrigStackBase); 1669 } 1670 1671 IRBuilder<> IRBElse(ElseTerm); 1672 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false); 1673 } else if (HavePoisonedAllocas) { 1674 // If we poisoned some allocas in llvm.lifetime analysis, 1675 // unpoison whole stack frame now. 1676 assert(LocalStackBase == OrigStackBase); 1677 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); 1678 } else { 1679 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false); 1680 } 1681 } 1682 1683 // We are done. Remove the old unused alloca instructions. 1684 for (auto AI : AllocaVec) 1685 AI->eraseFromParent(); 1686 } 1687 1688 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 1689 IRBuilder<> &IRB, bool DoPoison) { 1690 // For now just insert the call to ASan runtime. 1691 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 1692 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 1693 IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc 1694 : AsanUnpoisonStackMemoryFunc, 1695 AddrArg, SizeArg); 1696 } 1697 1698 // Handling llvm.lifetime intrinsics for a given %alloca: 1699 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 1700 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 1701 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 1702 // could be poisoned by previous llvm.lifetime.end instruction, as the 1703 // variable may go in and out of scope several times, e.g. in loops). 1704 // (3) if we poisoned at least one %alloca in a function, 1705 // unpoison the whole stack frame at function exit. 1706 1707 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { 1708 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) 1709 // We're intested only in allocas we can handle. 1710 return isInterestingAlloca(*AI) ? AI : nullptr; 1711 // See if we've already calculated (or started to calculate) alloca for a 1712 // given value. 1713 AllocaForValueMapTy::iterator I = AllocaForValue.find(V); 1714 if (I != AllocaForValue.end()) 1715 return I->second; 1716 // Store 0 while we're calculating alloca for value V to avoid 1717 // infinite recursion if the value references itself. 1718 AllocaForValue[V] = nullptr; 1719 AllocaInst *Res = nullptr; 1720 if (CastInst *CI = dyn_cast<CastInst>(V)) 1721 Res = findAllocaForValue(CI->getOperand(0)); 1722 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1723 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1724 Value *IncValue = PN->getIncomingValue(i); 1725 // Allow self-referencing phi-nodes. 1726 if (IncValue == PN) continue; 1727 AllocaInst *IncValueAI = findAllocaForValue(IncValue); 1728 // AI for incoming values should exist and should all be equal. 1729 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) 1730 return nullptr; 1731 Res = IncValueAI; 1732 } 1733 } 1734 if (Res) 1735 AllocaForValue[V] = Res; 1736 return Res; 1737 } 1738