1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // Details of the algorithm: 12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Instrumentation.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/DenseSet.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/SmallString.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/IR/CallSite.h" 28 #include "llvm/IR/DIBuilder.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/InlineAsm.h" 34 #include "llvm/IR/InstVisitor.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/IR/MDBuilder.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/MC/MCSectionMachO.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/DataTypes.h" 43 #include "llvm/Support/Debug.h" 44 #include "llvm/Support/Endian.h" 45 #include "llvm/Support/SwapByteOrder.h" 46 #include "llvm/Transforms/Scalar.h" 47 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 48 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 49 #include "llvm/Transforms/Utils/Cloning.h" 50 #include "llvm/Transforms/Utils/Local.h" 51 #include "llvm/Transforms/Utils/ModuleUtils.h" 52 #include <algorithm> 53 #include <string> 54 #include <system_error> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "asan" 59 60 static const uint64_t kDefaultShadowScale = 3; 61 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 62 static const uint64_t kIOSShadowOffset32 = 1ULL << 30; 63 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 64 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. 65 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; 66 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 67 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 68 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 69 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 70 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 71 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 72 73 static const size_t kMinStackMallocSize = 1 << 6; // 64B 74 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 75 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 76 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 77 78 static const char *const kAsanModuleCtorName = "asan.module_ctor"; 79 static const char *const kAsanModuleDtorName = "asan.module_dtor"; 80 static const uint64_t kAsanCtorAndDtorPriority = 1; 81 static const char *const kAsanReportErrorTemplate = "__asan_report_"; 82 static const char *const kAsanReportLoadN = "__asan_report_load_n"; 83 static const char *const kAsanReportStoreN = "__asan_report_store_n"; 84 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; 85 static const char *const kAsanUnregisterGlobalsName = 86 "__asan_unregister_globals"; 87 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; 88 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; 89 static const char *const kAsanInitName = "__asan_init_v5"; 90 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; 91 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; 92 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; 93 static const int kMaxAsanStackMallocSizeClass = 10; 94 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; 95 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; 96 static const char *const kAsanGenPrefix = "__asan_gen_"; 97 static const char *const kSanCovGenPrefix = "__sancov_gen_"; 98 static const char *const kAsanPoisonStackMemoryName = 99 "__asan_poison_stack_memory"; 100 static const char *const kAsanUnpoisonStackMemoryName = 101 "__asan_unpoison_stack_memory"; 102 103 static const char *const kAsanOptionDetectUAR = 104 "__asan_option_detect_stack_use_after_return"; 105 106 #ifndef NDEBUG 107 static const int kAsanStackAfterReturnMagic = 0xf5; 108 #endif 109 110 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 111 static const size_t kNumberOfAccessSizes = 5; 112 113 static const unsigned kAllocaRzSize = 32; 114 static const unsigned kAsanAllocaLeftMagic = 0xcacacacaU; 115 static const unsigned kAsanAllocaRightMagic = 0xcbcbcbcbU; 116 static const unsigned kAsanAllocaPartialVal1 = 0xcbcbcb00U; 117 static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU; 118 119 // Command-line flags. 120 121 // This flag may need to be replaced with -f[no-]asan-reads. 122 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 123 cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); 124 static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes", 125 cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); 126 static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics", 127 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), 128 cl::Hidden, cl::init(true)); 129 static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path", 130 cl::desc("use instrumentation with slow path for all accesses"), 131 cl::Hidden, cl::init(false)); 132 // This flag limits the number of instructions to be instrumented 133 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 134 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 135 // set it to 10000. 136 static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", 137 cl::init(10000), 138 cl::desc("maximal number of instructions to instrument in any given BB"), 139 cl::Hidden); 140 // This flag may need to be replaced with -f[no]asan-stack. 141 static cl::opt<bool> ClStack("asan-stack", 142 cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); 143 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", 144 cl::desc("Check return-after-free"), cl::Hidden, cl::init(true)); 145 // This flag may need to be replaced with -f[no]asan-globals. 146 static cl::opt<bool> ClGlobals("asan-globals", 147 cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); 148 static cl::opt<bool> ClInitializers("asan-initialization-order", 149 cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true)); 150 static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", 151 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), 152 cl::Hidden, cl::init(false)); 153 static cl::opt<unsigned> ClRealignStack("asan-realign-stack", 154 cl::desc("Realign stack to the value of this flag (power of two)"), 155 cl::Hidden, cl::init(32)); 156 static cl::opt<int> ClInstrumentationWithCallsThreshold( 157 "asan-instrumentation-with-call-threshold", 158 cl::desc("If the function being instrumented contains more than " 159 "this number of memory accesses, use callbacks instead of " 160 "inline checks (-1 means never use callbacks)."), 161 cl::Hidden, cl::init(7000)); 162 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 163 "asan-memory-access-callback-prefix", 164 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 165 cl::init("__asan_")); 166 static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas", 167 cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false)); 168 169 // These flags allow to change the shadow mapping. 170 // The shadow mapping looks like 171 // Shadow = (Mem >> scale) + (1 << offset_log) 172 static cl::opt<int> ClMappingScale("asan-mapping-scale", 173 cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); 174 175 // Optimization flags. Not user visible, used mostly for testing 176 // and benchmarking the tool. 177 static cl::opt<bool> ClOpt("asan-opt", 178 cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); 179 static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp", 180 cl::desc("Instrument the same temp just once"), cl::Hidden, 181 cl::init(true)); 182 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 183 cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); 184 185 static cl::opt<bool> ClCheckLifetime("asan-check-lifetime", 186 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), 187 cl::Hidden, cl::init(false)); 188 189 static cl::opt<bool> ClDynamicAllocaStack( 190 "asan-stack-dynamic-alloca", 191 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 192 cl::init(true)); 193 194 // Debug flags. 195 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 196 cl::init(0)); 197 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 198 cl::Hidden, cl::init(0)); 199 static cl::opt<std::string> ClDebugFunc("asan-debug-func", 200 cl::Hidden, cl::desc("Debug func")); 201 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 202 cl::Hidden, cl::init(-1)); 203 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), 204 cl::Hidden, cl::init(-1)); 205 206 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 207 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 208 STATISTIC(NumInstrumentedDynamicAllocas, 209 "Number of instrumented dynamic allocas"); 210 STATISTIC(NumOptimizedAccessesToGlobalArray, 211 "Number of optimized accesses to global arrays"); 212 STATISTIC(NumOptimizedAccessesToGlobalVar, 213 "Number of optimized accesses to global vars"); 214 215 namespace { 216 /// Frontend-provided metadata for source location. 217 struct LocationMetadata { 218 StringRef Filename; 219 int LineNo; 220 int ColumnNo; 221 222 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {} 223 224 bool empty() const { return Filename.empty(); } 225 226 void parse(MDNode *MDN) { 227 assert(MDN->getNumOperands() == 3); 228 MDString *MDFilename = cast<MDString>(MDN->getOperand(0)); 229 Filename = MDFilename->getString(); 230 LineNo = 231 mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); 232 ColumnNo = 233 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); 234 } 235 }; 236 237 /// Frontend-provided metadata for global variables. 238 class GlobalsMetadata { 239 public: 240 struct Entry { 241 Entry() 242 : SourceLoc(), Name(), IsDynInit(false), 243 IsBlacklisted(false) {} 244 LocationMetadata SourceLoc; 245 StringRef Name; 246 bool IsDynInit; 247 bool IsBlacklisted; 248 }; 249 250 GlobalsMetadata() : inited_(false) {} 251 252 void init(Module& M) { 253 assert(!inited_); 254 inited_ = true; 255 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); 256 if (!Globals) 257 return; 258 for (auto MDN : Globals->operands()) { 259 // Metadata node contains the global and the fields of "Entry". 260 assert(MDN->getNumOperands() == 5); 261 auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0)); 262 // The optimizer may optimize away a global entirely. 263 if (!GV) 264 continue; 265 // We can already have an entry for GV if it was merged with another 266 // global. 267 Entry &E = Entries[GV]; 268 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) 269 E.SourceLoc.parse(Loc); 270 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) 271 E.Name = Name->getString(); 272 ConstantInt *IsDynInit = 273 mdconst::extract<ConstantInt>(MDN->getOperand(3)); 274 E.IsDynInit |= IsDynInit->isOne(); 275 ConstantInt *IsBlacklisted = 276 mdconst::extract<ConstantInt>(MDN->getOperand(4)); 277 E.IsBlacklisted |= IsBlacklisted->isOne(); 278 } 279 } 280 281 /// Returns metadata entry for a given global. 282 Entry get(GlobalVariable *G) const { 283 auto Pos = Entries.find(G); 284 return (Pos != Entries.end()) ? Pos->second : Entry(); 285 } 286 287 private: 288 bool inited_; 289 DenseMap<GlobalVariable*, Entry> Entries; 290 }; 291 292 /// This struct defines the shadow mapping using the rule: 293 /// shadow = (mem >> Scale) ADD-or-OR Offset. 294 struct ShadowMapping { 295 int Scale; 296 uint64_t Offset; 297 bool OrShadowOffset; 298 }; 299 300 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) { 301 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; 302 bool IsIOS = TargetTriple.isiOS(); 303 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 304 bool IsLinux = TargetTriple.isOSLinux(); 305 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 || 306 TargetTriple.getArch() == llvm::Triple::ppc64le; 307 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; 308 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips || 309 TargetTriple.getArch() == llvm::Triple::mipsel; 310 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || 311 TargetTriple.getArch() == llvm::Triple::mips64el; 312 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64; 313 bool IsWindows = TargetTriple.isOSWindows(); 314 315 ShadowMapping Mapping; 316 317 if (LongSize == 32) { 318 if (IsAndroid) 319 Mapping.Offset = 0; 320 else if (IsMIPS32) 321 Mapping.Offset = kMIPS32_ShadowOffset32; 322 else if (IsFreeBSD) 323 Mapping.Offset = kFreeBSD_ShadowOffset32; 324 else if (IsIOS) 325 Mapping.Offset = kIOSShadowOffset32; 326 else if (IsWindows) 327 Mapping.Offset = kWindowsShadowOffset32; 328 else 329 Mapping.Offset = kDefaultShadowOffset32; 330 } else { // LongSize == 64 331 if (IsPPC64) 332 Mapping.Offset = kPPC64_ShadowOffset64; 333 else if (IsFreeBSD) 334 Mapping.Offset = kFreeBSD_ShadowOffset64; 335 else if (IsLinux && IsX86_64) 336 Mapping.Offset = kSmallX86_64ShadowOffset; 337 else if (IsMIPS64) 338 Mapping.Offset = kMIPS64_ShadowOffset64; 339 else if (IsAArch64) 340 Mapping.Offset = kAArch64_ShadowOffset64; 341 else 342 Mapping.Offset = kDefaultShadowOffset64; 343 } 344 345 Mapping.Scale = kDefaultShadowScale; 346 if (ClMappingScale) { 347 Mapping.Scale = ClMappingScale; 348 } 349 350 // OR-ing shadow offset if more efficient (at least on x86) if the offset 351 // is a power of two, but on ppc64 we have to use add since the shadow 352 // offset is not necessary 1/8-th of the address space. 353 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1)); 354 355 return Mapping; 356 } 357 358 static size_t RedzoneSizeForScale(int MappingScale) { 359 // Redzone used for stack and globals is at least 32 bytes. 360 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 361 return std::max(32U, 1U << MappingScale); 362 } 363 364 /// AddressSanitizer: instrument the code in module to find memory bugs. 365 struct AddressSanitizer : public FunctionPass { 366 AddressSanitizer() : FunctionPass(ID) { 367 initializeAddressSanitizerPass(*PassRegistry::getPassRegistry()); 368 } 369 const char *getPassName() const override { 370 return "AddressSanitizerFunctionPass"; 371 } 372 void getAnalysisUsage(AnalysisUsage &AU) const override { 373 AU.addRequired<DominatorTreeWrapperPass>(); 374 } 375 void instrumentMop(Instruction *I, bool UseCalls); 376 void instrumentPointerComparisonOrSubtraction(Instruction *I); 377 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 378 Value *Addr, uint32_t TypeSize, bool IsWrite, 379 Value *SizeArgument, bool UseCalls); 380 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 381 Value *ShadowValue, uint32_t TypeSize); 382 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 383 bool IsWrite, size_t AccessSizeIndex, 384 Value *SizeArgument); 385 void instrumentMemIntrinsic(MemIntrinsic *MI); 386 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 387 bool runOnFunction(Function &F) override; 388 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 389 bool doInitialization(Module &M) override; 390 static char ID; // Pass identification, replacement for typeid 391 392 DominatorTree &getDominatorTree() const { return *DT; } 393 394 private: 395 void initializeCallbacks(Module &M); 396 397 bool LooksLikeCodeInBug11395(Instruction *I); 398 bool GlobalIsLinkerInitialized(GlobalVariable *G); 399 400 LLVMContext *C; 401 const DataLayout *DL; 402 Triple TargetTriple; 403 int LongSize; 404 Type *IntptrTy; 405 ShadowMapping Mapping; 406 DominatorTree *DT; 407 Function *AsanCtorFunction; 408 Function *AsanInitFunction; 409 Function *AsanHandleNoReturnFunc; 410 Function *AsanPtrCmpFunction, *AsanPtrSubFunction; 411 // This array is indexed by AccessIsWrite and log2(AccessSize). 412 Function *AsanErrorCallback[2][kNumberOfAccessSizes]; 413 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes]; 414 // This array is indexed by AccessIsWrite. 415 Function *AsanErrorCallbackSized[2], 416 *AsanMemoryAccessCallbackSized[2]; 417 Function *AsanMemmove, *AsanMemcpy, *AsanMemset; 418 InlineAsm *EmptyAsm; 419 GlobalsMetadata GlobalsMD; 420 421 friend struct FunctionStackPoisoner; 422 }; 423 424 class AddressSanitizerModule : public ModulePass { 425 public: 426 AddressSanitizerModule() : ModulePass(ID) {} 427 bool runOnModule(Module &M) override; 428 static char ID; // Pass identification, replacement for typeid 429 const char *getPassName() const override { 430 return "AddressSanitizerModule"; 431 } 432 433 private: 434 void initializeCallbacks(Module &M); 435 436 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M); 437 bool ShouldInstrumentGlobal(GlobalVariable *G); 438 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 439 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 440 size_t MinRedzoneSizeForGlobal() const { 441 return RedzoneSizeForScale(Mapping.Scale); 442 } 443 444 GlobalsMetadata GlobalsMD; 445 Type *IntptrTy; 446 LLVMContext *C; 447 const DataLayout *DL; 448 Triple TargetTriple; 449 ShadowMapping Mapping; 450 Function *AsanPoisonGlobals; 451 Function *AsanUnpoisonGlobals; 452 Function *AsanRegisterGlobals; 453 Function *AsanUnregisterGlobals; 454 }; 455 456 // Stack poisoning does not play well with exception handling. 457 // When an exception is thrown, we essentially bypass the code 458 // that unpoisones the stack. This is why the run-time library has 459 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 460 // stack in the interceptor. This however does not work inside the 461 // actual function which catches the exception. Most likely because the 462 // compiler hoists the load of the shadow value somewhere too high. 463 // This causes asan to report a non-existing bug on 453.povray. 464 // It sounds like an LLVM bug. 465 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 466 Function &F; 467 AddressSanitizer &ASan; 468 DIBuilder DIB; 469 LLVMContext *C; 470 Type *IntptrTy; 471 Type *IntptrPtrTy; 472 ShadowMapping Mapping; 473 474 SmallVector<AllocaInst*, 16> AllocaVec; 475 SmallVector<Instruction*, 8> RetVec; 476 unsigned StackAlignment; 477 478 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 479 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 480 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; 481 482 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 483 struct AllocaPoisonCall { 484 IntrinsicInst *InsBefore; 485 AllocaInst *AI; 486 uint64_t Size; 487 bool DoPoison; 488 }; 489 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec; 490 491 // Stores left and right redzone shadow addresses for dynamic alloca 492 // and pointer to alloca instruction itself. 493 // LeftRzAddr is a shadow address for alloca left redzone. 494 // RightRzAddr is a shadow address for alloca right redzone. 495 struct DynamicAllocaCall { 496 AllocaInst *AI; 497 Value *LeftRzAddr; 498 Value *RightRzAddr; 499 bool Poison; 500 explicit DynamicAllocaCall(AllocaInst *AI, 501 Value *LeftRzAddr = nullptr, 502 Value *RightRzAddr = nullptr) 503 : AI(AI), LeftRzAddr(LeftRzAddr), RightRzAddr(RightRzAddr), Poison(true) 504 {} 505 }; 506 SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec; 507 508 // Maps Value to an AllocaInst from which the Value is originated. 509 typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy; 510 AllocaForValueMapTy AllocaForValue; 511 512 bool HasNonEmptyInlineAsm; 513 std::unique_ptr<CallInst> EmptyInlineAsm; 514 515 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 516 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 517 C(ASan.C), IntptrTy(ASan.IntptrTy), 518 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 519 StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false), 520 EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} 521 522 bool runOnFunction() { 523 if (!ClStack) return false; 524 // Collect alloca, ret, lifetime instructions etc. 525 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 526 visit(*BB); 527 528 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 529 530 initializeCallbacks(*F.getParent()); 531 532 poisonStack(); 533 534 if (ClDebugStack) { 535 DEBUG(dbgs() << F); 536 } 537 return true; 538 } 539 540 // Finds all Alloca instructions and puts 541 // poisoned red zones around all of them. 542 // Then unpoison everything back before the function returns. 543 void poisonStack(); 544 545 // ----------------------- Visitors. 546 /// \brief Collect all Ret instructions. 547 void visitReturnInst(ReturnInst &RI) { 548 RetVec.push_back(&RI); 549 } 550 551 // Unpoison dynamic allocas redzones. 552 void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) { 553 if (!AllocaCall.Poison) 554 return; 555 for (auto Ret : RetVec) { 556 IRBuilder<> IRBRet(Ret); 557 PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty()); 558 Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty()); 559 Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr, 560 ConstantInt::get(IntptrTy, 4)); 561 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr, 562 Int32PtrTy)); 563 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(PartialRzAddr, 564 Int32PtrTy)); 565 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr, 566 Int32PtrTy)); 567 } 568 } 569 570 // Right shift for BigEndian and left shift for LittleEndian. 571 Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) { 572 return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift) 573 : IRB.CreateLShr(Val, Shift); 574 } 575 576 // Compute PartialRzMagic for dynamic alloca call. Since we don't know the 577 // size of requested memory until runtime, we should compute it dynamically. 578 // If PartialSize is 0, PartialRzMagic would contain kAsanAllocaRightMagic, 579 // otherwise it would contain the value that we will use to poison the 580 // partial redzone for alloca call. 581 Value *computePartialRzMagic(Value *PartialSize, IRBuilder<> &IRB); 582 583 // Deploy and poison redzones around dynamic alloca call. To do this, we 584 // should replace this call with another one with changed parameters and 585 // replace all its uses with new address, so 586 // addr = alloca type, old_size, align 587 // is replaced by 588 // new_size = (old_size + additional_size) * sizeof(type) 589 // tmp = alloca i8, new_size, max(align, 32) 590 // addr = tmp + 32 (first 32 bytes are for the left redzone). 591 // Additional_size is added to make new memory allocation contain not only 592 // requested memory, but also left, partial and right redzones. 593 // After that, we should poison redzones: 594 // (1) Left redzone with kAsanAllocaLeftMagic. 595 // (2) Partial redzone with the value, computed in runtime by 596 // computePartialRzMagic function. 597 // (3) Right redzone with kAsanAllocaRightMagic. 598 void handleDynamicAllocaCall(DynamicAllocaCall &AllocaCall); 599 600 /// \brief Collect Alloca instructions we want (and can) handle. 601 void visitAllocaInst(AllocaInst &AI) { 602 if (!isInterestingAlloca(AI)) return; 603 604 StackAlignment = std::max(StackAlignment, AI.getAlignment()); 605 if (isDynamicAlloca(AI)) 606 DynamicAllocaVec.push_back(DynamicAllocaCall(&AI)); 607 else 608 AllocaVec.push_back(&AI); 609 } 610 611 /// \brief Collect lifetime intrinsic calls to check for use-after-scope 612 /// errors. 613 void visitIntrinsicInst(IntrinsicInst &II) { 614 if (!ClCheckLifetime) return; 615 Intrinsic::ID ID = II.getIntrinsicID(); 616 if (ID != Intrinsic::lifetime_start && 617 ID != Intrinsic::lifetime_end) 618 return; 619 // Found lifetime intrinsic, add ASan instrumentation if necessary. 620 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); 621 // If size argument is undefined, don't do anything. 622 if (Size->isMinusOne()) return; 623 // Check that size doesn't saturate uint64_t and can 624 // be stored in IntptrTy. 625 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 626 if (SizeValue == ~0ULL || 627 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 628 return; 629 // Find alloca instruction that corresponds to llvm.lifetime argument. 630 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); 631 if (!AI) return; 632 bool DoPoison = (ID == Intrinsic::lifetime_end); 633 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 634 AllocaPoisonCallVec.push_back(APC); 635 } 636 637 void visitCallInst(CallInst &CI) { 638 HasNonEmptyInlineAsm |= 639 CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get()); 640 } 641 642 // ---------------------- Helpers. 643 void initializeCallbacks(Module &M); 644 645 bool doesDominateAllExits(const Instruction *I) const { 646 for (auto Ret : RetVec) { 647 if (!ASan.getDominatorTree().dominates(I, Ret)) 648 return false; 649 } 650 return true; 651 } 652 653 bool isDynamicAlloca(AllocaInst &AI) const { 654 return AI.isArrayAllocation() || !AI.isStaticAlloca(); 655 } 656 657 // Check if we want (and can) handle this alloca. 658 bool isInterestingAlloca(AllocaInst &AI) const { 659 return (AI.getAllocatedType()->isSized() && 660 // alloca() may be called with 0 size, ignore it. 661 getAllocaSizeInBytes(&AI) > 0); 662 } 663 664 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { 665 Type *Ty = AI->getAllocatedType(); 666 uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty); 667 return SizeInBytes; 668 } 669 /// Finds alloca where the value comes from. 670 AllocaInst *findAllocaForValue(Value *V); 671 void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, 672 Value *ShadowBase, bool DoPoison); 673 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 674 675 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase, 676 int Size); 677 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 678 bool Dynamic); 679 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 680 Instruction *ThenTerm, Value *ValueIfFalse); 681 }; 682 683 } // namespace 684 685 char AddressSanitizer::ID = 0; 686 INITIALIZE_PASS_BEGIN(AddressSanitizer, "asan", 687 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", 688 false, false) 689 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 690 INITIALIZE_PASS_END(AddressSanitizer, "asan", 691 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", 692 false, false) 693 FunctionPass *llvm::createAddressSanitizerFunctionPass() { 694 return new AddressSanitizer(); 695 } 696 697 char AddressSanitizerModule::ID = 0; 698 INITIALIZE_PASS(AddressSanitizerModule, "asan-module", 699 "AddressSanitizer: detects use-after-free and out-of-bounds bugs." 700 "ModulePass", false, false) 701 ModulePass *llvm::createAddressSanitizerModulePass() { 702 return new AddressSanitizerModule(); 703 } 704 705 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 706 size_t Res = countTrailingZeros(TypeSize / 8); 707 assert(Res < kNumberOfAccessSizes); 708 return Res; 709 } 710 711 // \brief Create a constant for Str so that we can pass it to the run-time lib. 712 static GlobalVariable *createPrivateGlobalForString( 713 Module &M, StringRef Str, bool AllowMerging) { 714 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 715 // We use private linkage for module-local strings. If they can be merged 716 // with another one, we set the unnamed_addr attribute. 717 GlobalVariable *GV = 718 new GlobalVariable(M, StrConst->getType(), true, 719 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); 720 if (AllowMerging) 721 GV->setUnnamedAddr(true); 722 GV->setAlignment(1); // Strings may not be merged w/o setting align 1. 723 return GV; 724 } 725 726 /// \brief Create a global describing a source location. 727 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, 728 LocationMetadata MD) { 729 Constant *LocData[] = { 730 createPrivateGlobalForString(M, MD.Filename, true), 731 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), 732 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), 733 }; 734 auto LocStruct = ConstantStruct::getAnon(LocData); 735 auto GV = new GlobalVariable(M, LocStruct->getType(), true, 736 GlobalValue::PrivateLinkage, LocStruct, 737 kAsanGenPrefix); 738 GV->setUnnamedAddr(true); 739 return GV; 740 } 741 742 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { 743 return G->getName().find(kAsanGenPrefix) == 0 || 744 G->getName().find(kSanCovGenPrefix) == 0; 745 } 746 747 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 748 // Shadow >> scale 749 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 750 if (Mapping.Offset == 0) 751 return Shadow; 752 // (Shadow >> scale) | offset 753 if (Mapping.OrShadowOffset) 754 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); 755 else 756 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); 757 } 758 759 // Instrument memset/memmove/memcpy 760 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 761 IRBuilder<> IRB(MI); 762 if (isa<MemTransferInst>(MI)) { 763 IRB.CreateCall3( 764 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 765 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 766 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 767 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); 768 } else if (isa<MemSetInst>(MI)) { 769 IRB.CreateCall3( 770 AsanMemset, 771 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 772 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 773 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); 774 } 775 MI->eraseFromParent(); 776 } 777 778 // If I is an interesting memory access, return the PointerOperand 779 // and set IsWrite/Alignment. Otherwise return nullptr. 780 static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, 781 unsigned *Alignment) { 782 // Skip memory accesses inserted by another instrumentation. 783 if (I->getMetadata("nosanitize")) 784 return nullptr; 785 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 786 if (!ClInstrumentReads) return nullptr; 787 *IsWrite = false; 788 *Alignment = LI->getAlignment(); 789 return LI->getPointerOperand(); 790 } 791 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 792 if (!ClInstrumentWrites) return nullptr; 793 *IsWrite = true; 794 *Alignment = SI->getAlignment(); 795 return SI->getPointerOperand(); 796 } 797 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 798 if (!ClInstrumentAtomics) return nullptr; 799 *IsWrite = true; 800 *Alignment = 0; 801 return RMW->getPointerOperand(); 802 } 803 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 804 if (!ClInstrumentAtomics) return nullptr; 805 *IsWrite = true; 806 *Alignment = 0; 807 return XCHG->getPointerOperand(); 808 } 809 return nullptr; 810 } 811 812 static bool isPointerOperand(Value *V) { 813 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 814 } 815 816 // This is a rough heuristic; it may cause both false positives and 817 // false negatives. The proper implementation requires cooperation with 818 // the frontend. 819 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { 820 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 821 if (!Cmp->isRelational()) 822 return false; 823 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 824 if (BO->getOpcode() != Instruction::Sub) 825 return false; 826 } else { 827 return false; 828 } 829 if (!isPointerOperand(I->getOperand(0)) || 830 !isPointerOperand(I->getOperand(1))) 831 return false; 832 return true; 833 } 834 835 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 836 // If a global variable does not have dynamic initialization we don't 837 // have to instrument it. However, if a global does not have initializer 838 // at all, we assume it has dynamic initializer (in other TU). 839 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; 840 } 841 842 void 843 AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { 844 IRBuilder<> IRB(I); 845 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 846 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 847 for (int i = 0; i < 2; i++) { 848 if (Param[i]->getType()->isPointerTy()) 849 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy); 850 } 851 IRB.CreateCall2(F, Param[0], Param[1]); 852 } 853 854 void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { 855 bool IsWrite = false; 856 unsigned Alignment = 0; 857 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment); 858 assert(Addr); 859 if (ClOpt && ClOptGlobals) { 860 if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { 861 // If initialization order checking is disabled, a simple access to a 862 // dynamically initialized global is always valid. 863 if (!ClInitializers || GlobalIsLinkerInitialized(G)) { 864 NumOptimizedAccessesToGlobalVar++; 865 return; 866 } 867 } 868 ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr); 869 if (CE && CE->isGEPWithNoNotionalOverIndexing()) { 870 if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 871 if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { 872 NumOptimizedAccessesToGlobalArray++; 873 return; 874 } 875 } 876 } 877 } 878 879 Type *OrigPtrTy = Addr->getType(); 880 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 881 882 assert(OrigTy->isSized()); 883 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 884 885 assert((TypeSize % 8) == 0); 886 887 if (IsWrite) 888 NumInstrumentedWrites++; 889 else 890 NumInstrumentedReads++; 891 892 unsigned Granularity = 1 << Mapping.Scale; 893 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 894 // if the data is properly aligned. 895 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 896 TypeSize == 128) && 897 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) 898 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls); 899 // Instrument unusual size or unusual alignment. 900 // We can not do it with a single check, so we do 1-byte check for the first 901 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 902 // to report the actual access size. 903 IRBuilder<> IRB(I); 904 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 905 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 906 if (UseCalls) { 907 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size); 908 } else { 909 Value *LastByte = IRB.CreateIntToPtr( 910 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 911 OrigPtrTy); 912 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false); 913 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false); 914 } 915 } 916 917 // Validate the result of Module::getOrInsertFunction called for an interface 918 // function of AddressSanitizer. If the instrumented module defines a function 919 // with the same name, their prototypes must match, otherwise 920 // getOrInsertFunction returns a bitcast. 921 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 922 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast); 923 FuncOrBitcast->dump(); 924 report_fatal_error("trying to redefine an AddressSanitizer " 925 "interface function"); 926 } 927 928 Instruction *AddressSanitizer::generateCrashCode( 929 Instruction *InsertBefore, Value *Addr, 930 bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) { 931 IRBuilder<> IRB(InsertBefore); 932 CallInst *Call = SizeArgument 933 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument) 934 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr); 935 936 // We don't do Call->setDoesNotReturn() because the BB already has 937 // UnreachableInst at the end. 938 // This EmptyAsm is required to avoid callback merge. 939 IRB.CreateCall(EmptyAsm); 940 return Call; 941 } 942 943 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 944 Value *ShadowValue, 945 uint32_t TypeSize) { 946 size_t Granularity = 1 << Mapping.Scale; 947 // Addr & (Granularity - 1) 948 Value *LastAccessedByte = IRB.CreateAnd( 949 AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 950 // (Addr & (Granularity - 1)) + size - 1 951 if (TypeSize / 8 > 1) 952 LastAccessedByte = IRB.CreateAdd( 953 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 954 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 955 LastAccessedByte = IRB.CreateIntCast( 956 LastAccessedByte, ShadowValue->getType(), false); 957 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 958 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 959 } 960 961 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 962 Instruction *InsertBefore, Value *Addr, 963 uint32_t TypeSize, bool IsWrite, 964 Value *SizeArgument, bool UseCalls) { 965 IRBuilder<> IRB(InsertBefore); 966 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 967 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 968 969 if (UseCalls) { 970 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex], 971 AddrLong); 972 return; 973 } 974 975 Type *ShadowTy = IntegerType::get( 976 *C, std::max(8U, TypeSize >> Mapping.Scale)); 977 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 978 Value *ShadowPtr = memToShadow(AddrLong, IRB); 979 Value *CmpVal = Constant::getNullValue(ShadowTy); 980 Value *ShadowValue = IRB.CreateLoad( 981 IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 982 983 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 984 size_t Granularity = 1 << Mapping.Scale; 985 TerminatorInst *CrashTerm = nullptr; 986 987 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 988 // We use branch weights for the slow path check, to indicate that the slow 989 // path is rarely taken. This seems to be the case for SPEC benchmarks. 990 TerminatorInst *CheckTerm = 991 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false, 992 MDBuilder(*C).createBranchWeights(1, 100000)); 993 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional()); 994 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 995 IRB.SetInsertPoint(CheckTerm); 996 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 997 BasicBlock *CrashBlock = 998 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 999 CrashTerm = new UnreachableInst(*C, CrashBlock); 1000 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1001 ReplaceInstWithInst(CheckTerm, NewTerm); 1002 } else { 1003 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); 1004 } 1005 1006 Instruction *Crash = generateCrashCode( 1007 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument); 1008 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1009 } 1010 1011 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, 1012 GlobalValue *ModuleName) { 1013 // Set up the arguments to our poison/unpoison functions. 1014 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt()); 1015 1016 // Add a call to poison all external globals before the given function starts. 1017 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1018 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1019 1020 // Add calls to unpoison all globals before each return instruction. 1021 for (auto &BB : GlobalInit.getBasicBlockList()) 1022 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1023 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1024 } 1025 1026 void AddressSanitizerModule::createInitializerPoisonCalls( 1027 Module &M, GlobalValue *ModuleName) { 1028 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1029 1030 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1031 for (Use &OP : CA->operands()) { 1032 if (isa<ConstantAggregateZero>(OP)) 1033 continue; 1034 ConstantStruct *CS = cast<ConstantStruct>(OP); 1035 1036 // Must have a function or null ptr. 1037 if (Function* F = dyn_cast<Function>(CS->getOperand(1))) { 1038 if (F->getName() == kAsanModuleCtorName) continue; 1039 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); 1040 // Don't instrument CTORs that will run before asan.module_ctor. 1041 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue; 1042 poisonOneInitializer(*F, ModuleName); 1043 } 1044 } 1045 } 1046 1047 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { 1048 Type *Ty = cast<PointerType>(G->getType())->getElementType(); 1049 DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1050 1051 if (GlobalsMD.get(G).IsBlacklisted) return false; 1052 if (!Ty->isSized()) return false; 1053 if (!G->hasInitializer()) return false; 1054 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global. 1055 // Touch only those globals that will not be defined in other modules. 1056 // Don't handle ODR linkage types and COMDATs since other modules may be built 1057 // without ASan. 1058 if (G->getLinkage() != GlobalVariable::ExternalLinkage && 1059 G->getLinkage() != GlobalVariable::PrivateLinkage && 1060 G->getLinkage() != GlobalVariable::InternalLinkage) 1061 return false; 1062 if (G->hasComdat()) 1063 return false; 1064 // Two problems with thread-locals: 1065 // - The address of the main thread's copy can't be computed at link-time. 1066 // - Need to poison all copies, not just the main thread's one. 1067 if (G->isThreadLocal()) 1068 return false; 1069 // For now, just ignore this Global if the alignment is large. 1070 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; 1071 1072 if (G->hasSection()) { 1073 StringRef Section(G->getSection()); 1074 1075 if (TargetTriple.isOSBinFormatMachO()) { 1076 StringRef ParsedSegment, ParsedSection; 1077 unsigned TAA = 0, StubSize = 0; 1078 bool TAAParsed; 1079 std::string ErrorCode = 1080 MCSectionMachO::ParseSectionSpecifier(Section, ParsedSegment, 1081 ParsedSection, TAA, TAAParsed, 1082 StubSize); 1083 if (!ErrorCode.empty()) { 1084 report_fatal_error("Invalid section specifier '" + ParsedSection + 1085 "': " + ErrorCode + "."); 1086 } 1087 1088 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1089 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1090 // them. 1091 if (ParsedSegment == "__OBJC" || 1092 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1093 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1094 return false; 1095 } 1096 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 1097 // Constant CFString instances are compiled in the following way: 1098 // -- the string buffer is emitted into 1099 // __TEXT,__cstring,cstring_literals 1100 // -- the constant NSConstantString structure referencing that buffer 1101 // is placed into __DATA,__cfstring 1102 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1103 // Moreover, it causes the linker to crash on OS X 10.7 1104 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1105 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 1106 return false; 1107 } 1108 // The linker merges the contents of cstring_literals and removes the 1109 // trailing zeroes. 1110 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 1111 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 1112 return false; 1113 } 1114 } 1115 1116 // Callbacks put into the CRT initializer/terminator sections 1117 // should not be instrumented. 1118 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 1119 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1120 if (Section.startswith(".CRT")) { 1121 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); 1122 return false; 1123 } 1124 1125 // Globals from llvm.metadata aren't emitted, do not instrument them. 1126 if (Section == "llvm.metadata") return false; 1127 } 1128 1129 return true; 1130 } 1131 1132 void AddressSanitizerModule::initializeCallbacks(Module &M) { 1133 IRBuilder<> IRB(*C); 1134 // Declare our poisoning and unpoisoning functions. 1135 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( 1136 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr)); 1137 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); 1138 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( 1139 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr)); 1140 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); 1141 // Declare functions that register/unregister globals. 1142 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( 1143 kAsanRegisterGlobalsName, IRB.getVoidTy(), 1144 IntptrTy, IntptrTy, nullptr)); 1145 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); 1146 AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( 1147 kAsanUnregisterGlobalsName, 1148 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1149 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); 1150 } 1151 1152 // This function replaces all global variables with new variables that have 1153 // trailing redzones. It also creates a function that poisons 1154 // redzones and inserts this function into llvm.global_ctors. 1155 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { 1156 GlobalsMD.init(M); 1157 1158 SmallVector<GlobalVariable *, 16> GlobalsToChange; 1159 1160 for (auto &G : M.globals()) { 1161 if (ShouldInstrumentGlobal(&G)) 1162 GlobalsToChange.push_back(&G); 1163 } 1164 1165 size_t n = GlobalsToChange.size(); 1166 if (n == 0) return false; 1167 1168 // A global is described by a structure 1169 // size_t beg; 1170 // size_t size; 1171 // size_t size_with_redzone; 1172 // const char *name; 1173 // const char *module_name; 1174 // size_t has_dynamic_init; 1175 // void *source_location; 1176 // We initialize an array of such structures and pass it to a run-time call. 1177 StructType *GlobalStructTy = 1178 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 1179 IntptrTy, IntptrTy, nullptr); 1180 SmallVector<Constant *, 16> Initializers(n); 1181 1182 bool HasDynamicallyInitializedGlobals = false; 1183 1184 // We shouldn't merge same module names, as this string serves as unique 1185 // module ID in runtime. 1186 GlobalVariable *ModuleName = createPrivateGlobalForString( 1187 M, M.getModuleIdentifier(), /*AllowMerging*/false); 1188 1189 for (size_t i = 0; i < n; i++) { 1190 static const uint64_t kMaxGlobalRedzone = 1 << 18; 1191 GlobalVariable *G = GlobalsToChange[i]; 1192 1193 auto MD = GlobalsMD.get(G); 1194 // Create string holding the global name (use global name from metadata 1195 // if it's available, otherwise just write the name of global variable). 1196 GlobalVariable *Name = createPrivateGlobalForString( 1197 M, MD.Name.empty() ? G->getName() : MD.Name, 1198 /*AllowMerging*/ true); 1199 1200 PointerType *PtrTy = cast<PointerType>(G->getType()); 1201 Type *Ty = PtrTy->getElementType(); 1202 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty); 1203 uint64_t MinRZ = MinRedzoneSizeForGlobal(); 1204 // MinRZ <= RZ <= kMaxGlobalRedzone 1205 // and trying to make RZ to be ~ 1/4 of SizeInBytes. 1206 uint64_t RZ = std::max(MinRZ, 1207 std::min(kMaxGlobalRedzone, 1208 (SizeInBytes / MinRZ / 4) * MinRZ)); 1209 uint64_t RightRedzoneSize = RZ; 1210 // Round up to MinRZ 1211 if (SizeInBytes % MinRZ) 1212 RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); 1213 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); 1214 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 1215 1216 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr); 1217 Constant *NewInitializer = ConstantStruct::get( 1218 NewTy, G->getInitializer(), 1219 Constant::getNullValue(RightRedZoneTy), nullptr); 1220 1221 // Create a new global variable with enough space for a redzone. 1222 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 1223 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 1224 Linkage = GlobalValue::InternalLinkage; 1225 GlobalVariable *NewGlobal = new GlobalVariable( 1226 M, NewTy, G->isConstant(), Linkage, 1227 NewInitializer, "", G, G->getThreadLocalMode()); 1228 NewGlobal->copyAttributesFrom(G); 1229 NewGlobal->setAlignment(MinRZ); 1230 1231 Value *Indices2[2]; 1232 Indices2[0] = IRB.getInt32(0); 1233 Indices2[1] = IRB.getInt32(0); 1234 1235 G->replaceAllUsesWith( 1236 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true)); 1237 NewGlobal->takeName(G); 1238 G->eraseFromParent(); 1239 1240 Constant *SourceLoc; 1241 if (!MD.SourceLoc.empty()) { 1242 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); 1243 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); 1244 } else { 1245 SourceLoc = ConstantInt::get(IntptrTy, 0); 1246 } 1247 1248 Initializers[i] = ConstantStruct::get( 1249 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy), 1250 ConstantInt::get(IntptrTy, SizeInBytes), 1251 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 1252 ConstantExpr::getPointerCast(Name, IntptrTy), 1253 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 1254 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr); 1255 1256 if (ClInitializers && MD.IsDynInit) 1257 HasDynamicallyInitializedGlobals = true; 1258 1259 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 1260 } 1261 1262 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); 1263 GlobalVariable *AllGlobals = new GlobalVariable( 1264 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 1265 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); 1266 1267 // Create calls for poisoning before initializers run and unpoisoning after. 1268 if (HasDynamicallyInitializedGlobals) 1269 createInitializerPoisonCalls(M, ModuleName); 1270 IRB.CreateCall2(AsanRegisterGlobals, 1271 IRB.CreatePointerCast(AllGlobals, IntptrTy), 1272 ConstantInt::get(IntptrTy, n)); 1273 1274 // We also need to unregister globals at the end, e.g. when a shared library 1275 // gets closed. 1276 Function *AsanDtorFunction = Function::Create( 1277 FunctionType::get(Type::getVoidTy(*C), false), 1278 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); 1279 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 1280 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); 1281 IRB_Dtor.CreateCall2(AsanUnregisterGlobals, 1282 IRB.CreatePointerCast(AllGlobals, IntptrTy), 1283 ConstantInt::get(IntptrTy, n)); 1284 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority); 1285 1286 DEBUG(dbgs() << M); 1287 return true; 1288 } 1289 1290 bool AddressSanitizerModule::runOnModule(Module &M) { 1291 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1292 if (!DLP) 1293 return false; 1294 DL = &DLP->getDataLayout(); 1295 C = &(M.getContext()); 1296 int LongSize = DL->getPointerSizeInBits(); 1297 IntptrTy = Type::getIntNTy(*C, LongSize); 1298 TargetTriple = Triple(M.getTargetTriple()); 1299 Mapping = getShadowMapping(TargetTriple, LongSize); 1300 initializeCallbacks(M); 1301 1302 bool Changed = false; 1303 1304 Function *CtorFunc = M.getFunction(kAsanModuleCtorName); 1305 assert(CtorFunc); 1306 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); 1307 1308 if (ClGlobals) 1309 Changed |= InstrumentGlobals(IRB, M); 1310 1311 return Changed; 1312 } 1313 1314 void AddressSanitizer::initializeCallbacks(Module &M) { 1315 IRBuilder<> IRB(*C); 1316 // Create __asan_report* callbacks. 1317 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 1318 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 1319 AccessSizeIndex++) { 1320 // IsWrite and TypeSize are encoded in the function name. 1321 std::string Suffix = 1322 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); 1323 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = 1324 checkInterfaceFunction( 1325 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix, 1326 IRB.getVoidTy(), IntptrTy, nullptr)); 1327 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = 1328 checkInterfaceFunction( 1329 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix, 1330 IRB.getVoidTy(), IntptrTy, nullptr)); 1331 } 1332 } 1333 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction( 1334 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1335 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction( 1336 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1337 1338 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction( 1339 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN", 1340 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1341 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction( 1342 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN", 1343 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1344 1345 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction( 1346 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), 1347 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); 1348 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction( 1349 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), 1350 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); 1351 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction( 1352 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(), 1353 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr)); 1354 1355 AsanHandleNoReturnFunc = checkInterfaceFunction( 1356 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr)); 1357 1358 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction( 1359 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1360 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction( 1361 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1362 // We insert an empty inline asm after __asan_report* to avoid callback merge. 1363 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 1364 StringRef(""), StringRef(""), 1365 /*hasSideEffects=*/true); 1366 } 1367 1368 // virtual 1369 bool AddressSanitizer::doInitialization(Module &M) { 1370 // Initialize the private fields. No one has accessed them before. 1371 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1372 if (!DLP) 1373 report_fatal_error("data layout missing"); 1374 DL = &DLP->getDataLayout(); 1375 1376 GlobalsMD.init(M); 1377 1378 C = &(M.getContext()); 1379 LongSize = DL->getPointerSizeInBits(); 1380 IntptrTy = Type::getIntNTy(*C, LongSize); 1381 TargetTriple = Triple(M.getTargetTriple()); 1382 1383 AsanCtorFunction = Function::Create( 1384 FunctionType::get(Type::getVoidTy(*C), false), 1385 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); 1386 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); 1387 // call __asan_init in the module ctor. 1388 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); 1389 AsanInitFunction = checkInterfaceFunction( 1390 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr)); 1391 AsanInitFunction->setLinkage(Function::ExternalLinkage); 1392 IRB.CreateCall(AsanInitFunction); 1393 1394 Mapping = getShadowMapping(TargetTriple, LongSize); 1395 1396 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); 1397 return true; 1398 } 1399 1400 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 1401 // For each NSObject descendant having a +load method, this method is invoked 1402 // by the ObjC runtime before any of the static constructors is called. 1403 // Therefore we need to instrument such methods with a call to __asan_init 1404 // at the beginning in order to initialize our runtime before any access to 1405 // the shadow memory. 1406 // We cannot just ignore these methods, because they may call other 1407 // instrumented functions. 1408 if (F.getName().find(" load]") != std::string::npos) { 1409 IRBuilder<> IRB(F.begin()->begin()); 1410 IRB.CreateCall(AsanInitFunction); 1411 return true; 1412 } 1413 return false; 1414 } 1415 1416 bool AddressSanitizer::runOnFunction(Function &F) { 1417 if (&F == AsanCtorFunction) return false; 1418 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 1419 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 1420 initializeCallbacks(*F.getParent()); 1421 1422 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1423 1424 // If needed, insert __asan_init before checking for SanitizeAddress attr. 1425 maybeInsertAsanInitAtFunctionEntry(F); 1426 1427 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) 1428 return false; 1429 1430 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) 1431 return false; 1432 1433 // We want to instrument every address only once per basic block (unless there 1434 // are calls between uses). 1435 SmallSet<Value*, 16> TempsToInstrument; 1436 SmallVector<Instruction*, 16> ToInstrument; 1437 SmallVector<Instruction*, 8> NoReturnCalls; 1438 SmallVector<BasicBlock*, 16> AllBlocks; 1439 SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts; 1440 int NumAllocas = 0; 1441 bool IsWrite; 1442 unsigned Alignment; 1443 1444 // Fill the set of memory operations to instrument. 1445 for (auto &BB : F) { 1446 AllBlocks.push_back(&BB); 1447 TempsToInstrument.clear(); 1448 int NumInsnsPerBB = 0; 1449 for (auto &Inst : BB) { 1450 if (LooksLikeCodeInBug11395(&Inst)) return false; 1451 if (Value *Addr = 1452 isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) { 1453 if (ClOpt && ClOptSameTemp) { 1454 if (!TempsToInstrument.insert(Addr).second) 1455 continue; // We've seen this temp in the current BB. 1456 } 1457 } else if (ClInvalidPointerPairs && 1458 isInterestingPointerComparisonOrSubtraction(&Inst)) { 1459 PointerComparisonsOrSubtracts.push_back(&Inst); 1460 continue; 1461 } else if (isa<MemIntrinsic>(Inst)) { 1462 // ok, take it. 1463 } else { 1464 if (isa<AllocaInst>(Inst)) 1465 NumAllocas++; 1466 CallSite CS(&Inst); 1467 if (CS) { 1468 // A call inside BB. 1469 TempsToInstrument.clear(); 1470 if (CS.doesNotReturn()) 1471 NoReturnCalls.push_back(CS.getInstruction()); 1472 } 1473 continue; 1474 } 1475 ToInstrument.push_back(&Inst); 1476 NumInsnsPerBB++; 1477 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) 1478 break; 1479 } 1480 } 1481 1482 bool UseCalls = false; 1483 if (ClInstrumentationWithCallsThreshold >= 0 && 1484 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold) 1485 UseCalls = true; 1486 1487 // Instrument. 1488 int NumInstrumented = 0; 1489 for (auto Inst : ToInstrument) { 1490 if (ClDebugMin < 0 || ClDebugMax < 0 || 1491 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { 1492 if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) 1493 instrumentMop(Inst, UseCalls); 1494 else 1495 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 1496 } 1497 NumInstrumented++; 1498 } 1499 1500 FunctionStackPoisoner FSP(F, *this); 1501 bool ChangedStack = FSP.runOnFunction(); 1502 1503 // We must unpoison the stack before every NoReturn call (throw, _exit, etc). 1504 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 1505 for (auto CI : NoReturnCalls) { 1506 IRBuilder<> IRB(CI); 1507 IRB.CreateCall(AsanHandleNoReturnFunc); 1508 } 1509 1510 for (auto Inst : PointerComparisonsOrSubtracts) { 1511 instrumentPointerComparisonOrSubtraction(Inst); 1512 NumInstrumented++; 1513 } 1514 1515 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); 1516 1517 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n"); 1518 1519 return res; 1520 } 1521 1522 // Workaround for bug 11395: we don't want to instrument stack in functions 1523 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 1524 // FIXME: remove once the bug 11395 is fixed. 1525 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 1526 if (LongSize != 32) return false; 1527 CallInst *CI = dyn_cast<CallInst>(I); 1528 if (!CI || !CI->isInlineAsm()) return false; 1529 if (CI->getNumArgOperands() <= 5) return false; 1530 // We have inline assembly with quite a few arguments. 1531 return true; 1532 } 1533 1534 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 1535 IRBuilder<> IRB(*C); 1536 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { 1537 std::string Suffix = itostr(i); 1538 AsanStackMallocFunc[i] = checkInterfaceFunction(M.getOrInsertFunction( 1539 kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr)); 1540 AsanStackFreeFunc[i] = checkInterfaceFunction( 1541 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 1542 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); 1543 } 1544 AsanPoisonStackMemoryFunc = checkInterfaceFunction( 1545 M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(), 1546 IntptrTy, IntptrTy, nullptr)); 1547 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction( 1548 M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), 1549 IntptrTy, IntptrTy, nullptr)); 1550 } 1551 1552 void 1553 FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, 1554 IRBuilder<> &IRB, Value *ShadowBase, 1555 bool DoPoison) { 1556 size_t n = ShadowBytes.size(); 1557 size_t i = 0; 1558 // We need to (un)poison n bytes of stack shadow. Poison as many as we can 1559 // using 64-bit stores (if we are on 64-bit arch), then poison the rest 1560 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores. 1561 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8; 1562 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) { 1563 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { 1564 uint64_t Val = 0; 1565 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { 1566 if (ASan.DL->isLittleEndian()) 1567 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 1568 else 1569 Val = (Val << 8) | ShadowBytes[i + j]; 1570 } 1571 if (!Val) continue; 1572 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 1573 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8); 1574 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0); 1575 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo())); 1576 } 1577 } 1578 } 1579 1580 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 1581 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 1582 static int StackMallocSizeClass(uint64_t LocalStackSize) { 1583 assert(LocalStackSize <= kMaxStackMallocSize); 1584 uint64_t MaxSize = kMinStackMallocSize; 1585 for (int i = 0; ; i++, MaxSize *= 2) 1586 if (LocalStackSize <= MaxSize) 1587 return i; 1588 llvm_unreachable("impossible LocalStackSize"); 1589 } 1590 1591 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic. 1592 // We can not use MemSet intrinsic because it may end up calling the actual 1593 // memset. Size is a multiple of 8. 1594 // Currently this generates 8-byte stores on x86_64; it may be better to 1595 // generate wider stores. 1596 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( 1597 IRBuilder<> &IRB, Value *ShadowBase, int Size) { 1598 assert(!(Size % 8)); 1599 assert(kAsanStackAfterReturnMagic == 0xf5); 1600 for (int i = 0; i < Size; i += 8) { 1601 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 1602 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL), 1603 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); 1604 } 1605 } 1606 1607 static DebugLoc getFunctionEntryDebugLocation(Function &F) { 1608 for (const auto &Inst : F.getEntryBlock()) 1609 if (!isa<AllocaInst>(Inst)) 1610 return Inst.getDebugLoc(); 1611 return DebugLoc(); 1612 } 1613 1614 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 1615 Value *ValueIfTrue, 1616 Instruction *ThenTerm, 1617 Value *ValueIfFalse) { 1618 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 1619 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 1620 PHI->addIncoming(ValueIfFalse, CondBlock); 1621 BasicBlock *ThenBlock = ThenTerm->getParent(); 1622 PHI->addIncoming(ValueIfTrue, ThenBlock); 1623 return PHI; 1624 } 1625 1626 Value *FunctionStackPoisoner::createAllocaForLayout( 1627 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 1628 AllocaInst *Alloca; 1629 if (Dynamic) { 1630 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 1631 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 1632 "MyAlloca"); 1633 } else { 1634 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 1635 nullptr, "MyAlloca"); 1636 assert(Alloca->isStaticAlloca()); 1637 } 1638 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 1639 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); 1640 Alloca->setAlignment(FrameAlignment); 1641 return IRB.CreatePointerCast(Alloca, IntptrTy); 1642 } 1643 1644 void FunctionStackPoisoner::poisonStack() { 1645 assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0); 1646 1647 if (ClInstrumentAllocas) { 1648 // Handle dynamic allocas. 1649 for (auto &AllocaCall : DynamicAllocaVec) { 1650 handleDynamicAllocaCall(AllocaCall); 1651 unpoisonDynamicAlloca(AllocaCall); 1652 } 1653 } 1654 1655 if (AllocaVec.size() == 0) return; 1656 1657 int StackMallocIdx = -1; 1658 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F); 1659 1660 Instruction *InsBefore = AllocaVec[0]; 1661 IRBuilder<> IRB(InsBefore); 1662 IRB.SetCurrentDebugLocation(EntryDebugLocation); 1663 1664 SmallVector<ASanStackVariableDescription, 16> SVD; 1665 SVD.reserve(AllocaVec.size()); 1666 for (AllocaInst *AI : AllocaVec) { 1667 ASanStackVariableDescription D = { AI->getName().data(), 1668 getAllocaSizeInBytes(AI), 1669 AI->getAlignment(), AI, 0}; 1670 SVD.push_back(D); 1671 } 1672 // Minimal header size (left redzone) is 4 pointers, 1673 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 1674 size_t MinHeaderSize = ASan.LongSize / 2; 1675 ASanStackFrameLayout L; 1676 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L); 1677 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); 1678 uint64_t LocalStackSize = L.FrameSize; 1679 bool DoStackMalloc = 1680 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; 1681 // Don't do dynamic alloca in presence of inline asm: too often it 1682 // makes assumptions on which registers are available. 1683 bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm; 1684 1685 Value *StaticAlloca = 1686 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 1687 1688 Value *FakeStack; 1689 Value *LocalStackBase; 1690 1691 if (DoStackMalloc) { 1692 // void *FakeStack = __asan_option_detect_stack_use_after_return 1693 // ? __asan_stack_malloc_N(LocalStackSize) 1694 // : nullptr; 1695 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize); 1696 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal( 1697 kAsanOptionDetectUAR, IRB.getInt32Ty()); 1698 Value *UARIsEnabled = 1699 IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR), 1700 Constant::getNullValue(IRB.getInt32Ty())); 1701 Instruction *Term = 1702 SplitBlockAndInsertIfThen(UARIsEnabled, InsBefore, false); 1703 IRBuilder<> IRBIf(Term); 1704 IRBIf.SetCurrentDebugLocation(EntryDebugLocation); 1705 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 1706 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 1707 Value *FakeStackValue = 1708 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 1709 ConstantInt::get(IntptrTy, LocalStackSize)); 1710 IRB.SetInsertPoint(InsBefore); 1711 IRB.SetCurrentDebugLocation(EntryDebugLocation); 1712 FakeStack = createPHI(IRB, UARIsEnabled, FakeStackValue, Term, 1713 ConstantInt::get(IntptrTy, 0)); 1714 1715 Value *NoFakeStack = 1716 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 1717 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 1718 IRBIf.SetInsertPoint(Term); 1719 IRBIf.SetCurrentDebugLocation(EntryDebugLocation); 1720 Value *AllocaValue = 1721 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 1722 IRB.SetInsertPoint(InsBefore); 1723 IRB.SetCurrentDebugLocation(EntryDebugLocation); 1724 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 1725 } else { 1726 // void *FakeStack = nullptr; 1727 // void *LocalStackBase = alloca(LocalStackSize); 1728 FakeStack = ConstantInt::get(IntptrTy, 0); 1729 LocalStackBase = 1730 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 1731 } 1732 1733 // Insert poison calls for lifetime intrinsics for alloca. 1734 bool HavePoisonedAllocas = false; 1735 for (const auto &APC : AllocaPoisonCallVec) { 1736 assert(APC.InsBefore); 1737 assert(APC.AI); 1738 IRBuilder<> IRB(APC.InsBefore); 1739 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 1740 HavePoisonedAllocas |= APC.DoPoison; 1741 } 1742 1743 // Replace Alloca instructions with base+offset. 1744 for (const auto &Desc : SVD) { 1745 AllocaInst *AI = Desc.AI; 1746 Value *NewAllocaPtr = IRB.CreateIntToPtr( 1747 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 1748 AI->getType()); 1749 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true); 1750 AI->replaceAllUsesWith(NewAllocaPtr); 1751 } 1752 1753 // The left-most redzone has enough space for at least 4 pointers. 1754 // Write the Magic value to redzone[0]. 1755 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 1756 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 1757 BasePlus0); 1758 // Write the frame description constant to redzone[1]. 1759 Value *BasePlus1 = IRB.CreateIntToPtr( 1760 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), 1761 IntptrPtrTy); 1762 GlobalVariable *StackDescriptionGlobal = 1763 createPrivateGlobalForString(*F.getParent(), L.DescriptionString, 1764 /*AllowMerging*/true); 1765 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, 1766 IntptrTy); 1767 IRB.CreateStore(Description, BasePlus1); 1768 // Write the PC to redzone[2]. 1769 Value *BasePlus2 = IRB.CreateIntToPtr( 1770 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, 1771 2 * ASan.LongSize/8)), 1772 IntptrPtrTy); 1773 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 1774 1775 // Poison the stack redzones at the entry. 1776 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 1777 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true); 1778 1779 // (Un)poison the stack before all ret instructions. 1780 for (auto Ret : RetVec) { 1781 IRBuilder<> IRBRet(Ret); 1782 // Mark the current frame as retired. 1783 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 1784 BasePlus0); 1785 if (DoStackMalloc) { 1786 assert(StackMallocIdx >= 0); 1787 // if FakeStack != 0 // LocalStackBase == FakeStack 1788 // // In use-after-return mode, poison the whole stack frame. 1789 // if StackMallocIdx <= 4 1790 // // For small sizes inline the whole thing: 1791 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 1792 // **SavedFlagPtr(FakeStack) = 0 1793 // else 1794 // __asan_stack_free_N(FakeStack, LocalStackSize) 1795 // else 1796 // <This is not a fake stack; unpoison the redzones> 1797 Value *Cmp = 1798 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 1799 TerminatorInst *ThenTerm, *ElseTerm; 1800 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 1801 1802 IRBuilder<> IRBPoison(ThenTerm); 1803 if (StackMallocIdx <= 4) { 1804 int ClassSize = kMinStackMallocSize << StackMallocIdx; 1805 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase, 1806 ClassSize >> Mapping.Scale); 1807 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 1808 FakeStack, 1809 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 1810 Value *SavedFlagPtr = IRBPoison.CreateLoad( 1811 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 1812 IRBPoison.CreateStore( 1813 Constant::getNullValue(IRBPoison.getInt8Ty()), 1814 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 1815 } else { 1816 // For larger frames call __asan_stack_free_*. 1817 IRBPoison.CreateCall2(AsanStackFreeFunc[StackMallocIdx], FakeStack, 1818 ConstantInt::get(IntptrTy, LocalStackSize)); 1819 } 1820 1821 IRBuilder<> IRBElse(ElseTerm); 1822 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false); 1823 } else if (HavePoisonedAllocas) { 1824 // If we poisoned some allocas in llvm.lifetime analysis, 1825 // unpoison whole stack frame now. 1826 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); 1827 } else { 1828 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false); 1829 } 1830 } 1831 1832 // We are done. Remove the old unused alloca instructions. 1833 for (auto AI : AllocaVec) 1834 AI->eraseFromParent(); 1835 } 1836 1837 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 1838 IRBuilder<> &IRB, bool DoPoison) { 1839 // For now just insert the call to ASan runtime. 1840 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 1841 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 1842 IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc 1843 : AsanUnpoisonStackMemoryFunc, 1844 AddrArg, SizeArg); 1845 } 1846 1847 // Handling llvm.lifetime intrinsics for a given %alloca: 1848 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 1849 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 1850 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 1851 // could be poisoned by previous llvm.lifetime.end instruction, as the 1852 // variable may go in and out of scope several times, e.g. in loops). 1853 // (3) if we poisoned at least one %alloca in a function, 1854 // unpoison the whole stack frame at function exit. 1855 1856 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { 1857 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) 1858 // We're intested only in allocas we can handle. 1859 return isInterestingAlloca(*AI) ? AI : nullptr; 1860 // See if we've already calculated (or started to calculate) alloca for a 1861 // given value. 1862 AllocaForValueMapTy::iterator I = AllocaForValue.find(V); 1863 if (I != AllocaForValue.end()) 1864 return I->second; 1865 // Store 0 while we're calculating alloca for value V to avoid 1866 // infinite recursion if the value references itself. 1867 AllocaForValue[V] = nullptr; 1868 AllocaInst *Res = nullptr; 1869 if (CastInst *CI = dyn_cast<CastInst>(V)) 1870 Res = findAllocaForValue(CI->getOperand(0)); 1871 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1872 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1873 Value *IncValue = PN->getIncomingValue(i); 1874 // Allow self-referencing phi-nodes. 1875 if (IncValue == PN) continue; 1876 AllocaInst *IncValueAI = findAllocaForValue(IncValue); 1877 // AI for incoming values should exist and should all be equal. 1878 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) 1879 return nullptr; 1880 Res = IncValueAI; 1881 } 1882 } 1883 if (Res) 1884 AllocaForValue[V] = Res; 1885 return Res; 1886 } 1887 1888 // Compute PartialRzMagic for dynamic alloca call. PartialRzMagic is 1889 // constructed from two separate 32-bit numbers: PartialRzMagic = Val1 | Val2. 1890 // (1) Val1 is resposible for forming base value for PartialRzMagic, containing 1891 // only 00 for fully addressable and 0xcb for fully poisoned bytes for each 1892 // 8-byte chunk of user memory respectively. 1893 // (2) Val2 forms the value for marking first poisoned byte in shadow memory 1894 // with appropriate value (0x01 - 0x07 or 0xcb if Padding % 8 == 0). 1895 1896 // Shift = Padding & ~7; // the number of bits we need to shift to access first 1897 // chunk in shadow memory, containing nonzero bytes. 1898 // Example: 1899 // Padding = 21 Padding = 16 1900 // Shadow: |00|00|05|cb| Shadow: |00|00|cb|cb| 1901 // ^ ^ 1902 // | | 1903 // Shift = 21 & ~7 = 16 Shift = 16 & ~7 = 16 1904 // 1905 // Val1 = 0xcbcbcbcb << Shift; 1906 // PartialBits = Padding ? Padding & 7 : 0xcb; 1907 // Val2 = PartialBits << Shift; 1908 // Result = Val1 | Val2; 1909 Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize, 1910 IRBuilder<> &IRB) { 1911 PartialSize = IRB.CreateIntCast(PartialSize, IRB.getInt32Ty(), false); 1912 Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7)); 1913 unsigned Val1Int = kAsanAllocaPartialVal1; 1914 unsigned Val2Int = kAsanAllocaPartialVal2; 1915 if (!ASan.DL->isLittleEndian()) { 1916 Val1Int = sys::getSwappedBytes(Val1Int); 1917 Val2Int = sys::getSwappedBytes(Val2Int); 1918 } 1919 Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift); 1920 Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7)); 1921 // For BigEndian get 0x000000YZ -> 0xYZ000000. 1922 if (ASan.DL->isBigEndian()) 1923 PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24)); 1924 Value *Val2 = IRB.getInt32(Val2Int); 1925 Value *Cond = 1926 IRB.CreateICmpNE(PartialBits, Constant::getNullValue(IRB.getInt32Ty())); 1927 Val2 = IRB.CreateSelect(Cond, shiftAllocaMagic(PartialBits, IRB, Shift), 1928 shiftAllocaMagic(Val2, IRB, Shift)); 1929 return IRB.CreateOr(Val1, Val2); 1930 } 1931 1932 void FunctionStackPoisoner::handleDynamicAllocaCall( 1933 DynamicAllocaCall &AllocaCall) { 1934 AllocaInst *AI = AllocaCall.AI; 1935 if (!doesDominateAllExits(AI)) { 1936 // We do not yet handle complex allocas 1937 AllocaCall.Poison = false; 1938 return; 1939 } 1940 1941 IRBuilder<> IRB(AI); 1942 1943 PointerType *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty()); 1944 const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment()); 1945 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 1946 1947 Value *Zero = Constant::getNullValue(IntptrTy); 1948 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 1949 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 1950 Value *NotAllocaRzMask = ConstantInt::get(IntptrTy, ~AllocaRedzoneMask); 1951 1952 // Since we need to extend alloca with additional memory to locate 1953 // redzones, and OldSize is number of allocated blocks with 1954 // ElementSize size, get allocated memory size in bytes by 1955 // OldSize * ElementSize. 1956 unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType()); 1957 Value *OldSize = IRB.CreateMul(AI->getArraySize(), 1958 ConstantInt::get(IntptrTy, ElementSize)); 1959 1960 // PartialSize = OldSize % 32 1961 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 1962 1963 // Misalign = kAllocaRzSize - PartialSize; 1964 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 1965 1966 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 1967 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 1968 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 1969 1970 // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize 1971 // Align is added to locate left redzone, PartialPadding for possible 1972 // partial redzone and kAllocaRzSize for right redzone respectively. 1973 Value *AdditionalChunkSize = IRB.CreateAdd( 1974 ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding); 1975 1976 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 1977 1978 // Insert new alloca with new NewSize and Align params. 1979 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 1980 NewAlloca->setAlignment(Align); 1981 1982 // NewAddress = Address + Align 1983 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 1984 ConstantInt::get(IntptrTy, Align)); 1985 1986 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 1987 1988 // LeftRzAddress = NewAddress - kAllocaRzSize 1989 Value *LeftRzAddress = IRB.CreateSub(NewAddress, AllocaRzSize); 1990 1991 // Poisoning left redzone. 1992 AllocaCall.LeftRzAddr = ASan.memToShadow(LeftRzAddress, IRB); 1993 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaLeftMagic), 1994 IRB.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy)); 1995 1996 // PartialRzAligned = PartialRzAddr & ~AllocaRzMask 1997 Value *PartialRzAddr = IRB.CreateAdd(NewAddress, OldSize); 1998 Value *PartialRzAligned = IRB.CreateAnd(PartialRzAddr, NotAllocaRzMask); 1999 2000 // Poisoning partial redzone. 2001 Value *PartialRzMagic = computePartialRzMagic(PartialSize, IRB); 2002 Value *PartialRzShadowAddr = ASan.memToShadow(PartialRzAligned, IRB); 2003 IRB.CreateStore(PartialRzMagic, 2004 IRB.CreateIntToPtr(PartialRzShadowAddr, Int32PtrTy)); 2005 2006 // RightRzAddress 2007 // = (PartialRzAddr + AllocaRzMask) & ~AllocaRzMask 2008 Value *RightRzAddress = IRB.CreateAnd( 2009 IRB.CreateAdd(PartialRzAddr, AllocaRzMask), NotAllocaRzMask); 2010 2011 // Poisoning right redzone. 2012 AllocaCall.RightRzAddr = ASan.memToShadow(RightRzAddress, IRB); 2013 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaRightMagic), 2014 IRB.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy)); 2015 2016 // Replace all uses of AddessReturnedByAlloca with NewAddress. 2017 AI->replaceAllUsesWith(NewAddressPtr); 2018 2019 // We are done. Erase old alloca and store left, partial and right redzones 2020 // shadow addresses for future unpoisoning. 2021 AI->eraseFromParent(); 2022 NumInstrumentedDynamicAllocas++; 2023 } 2024