1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// Status: early prototype. 14 /// 15 /// The algorithm of the tool is similar to Memcheck 16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 17 /// byte of the application memory, poison the shadow of the malloc-ed 18 /// or alloca-ed memory, load the shadow bits on every memory read, 19 /// propagate the shadow bits through some of the arithmetic 20 /// instruction (including MOV), store the shadow bits on every memory 21 /// write, report a bug on some other instructions (e.g. JMP) if the 22 /// associated shadow is poisoned. 23 /// 24 /// But there are differences too. The first and the major one: 25 /// compiler instrumentation instead of binary instrumentation. This 26 /// gives us much better register allocation, possible compiler 27 /// optimizations and a fast start-up. But this brings the major issue 28 /// as well: msan needs to see all program events, including system 29 /// calls and reads/writes in system libraries, so we either need to 30 /// compile *everything* with msan or use a binary translation 31 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 32 /// Another difference from Memcheck is that we use 8 shadow bits per 33 /// byte of application memory and use a direct shadow mapping. This 34 /// greatly simplifies the instrumentation code and avoids races on 35 /// shadow updates (Memcheck is single-threaded so races are not a 36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 37 /// path storage that uses 8 bits per byte). 38 /// 39 /// The default value of shadow is 0, which means "clean" (not poisoned). 40 /// 41 /// Every module initializer should call __msan_init to ensure that the 42 /// shadow memory is ready. On error, __msan_warning is called. Since 43 /// parameters and return values may be passed via registers, we have a 44 /// specialized thread-local shadow for return values 45 /// (__msan_retval_tls) and parameters (__msan_param_tls). 46 /// 47 /// Origin tracking. 48 /// 49 /// MemorySanitizer can track origins (allocation points) of all uninitialized 50 /// values. This behavior is controlled with a flag (msan-track-origins) and is 51 /// disabled by default. 52 /// 53 /// Origins are 4-byte values created and interpreted by the runtime library. 54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 55 /// of application memory. Propagation of origins is basically a bunch of 56 /// "select" instructions that pick the origin of a dirty argument, if an 57 /// instruction has one. 58 /// 59 /// Every 4 aligned, consecutive bytes of application memory have one origin 60 /// value associated with them. If these bytes contain uninitialized data 61 /// coming from 2 different allocations, the last store wins. Because of this, 62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 63 /// practice. 64 /// 65 /// Origins are meaningless for fully initialized values, so MemorySanitizer 66 /// avoids storing origin to memory when a fully initialized value is stored. 67 /// This way it avoids needless overwritting origin of the 4-byte region on 68 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 69 //===----------------------------------------------------------------------===// 70 71 #define DEBUG_TYPE "msan" 72 73 #include "llvm/Transforms/Instrumentation.h" 74 #include "llvm/ADT/DepthFirstIterator.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Triple.h" 78 #include "llvm/ADT/ValueMap.h" 79 #include "llvm/IR/DataLayout.h" 80 #include "llvm/IR/Function.h" 81 #include "llvm/IR/IRBuilder.h" 82 #include "llvm/IR/InlineAsm.h" 83 #include "llvm/IR/IntrinsicInst.h" 84 #include "llvm/IR/LLVMContext.h" 85 #include "llvm/IR/MDBuilder.h" 86 #include "llvm/IR/Module.h" 87 #include "llvm/IR/Type.h" 88 #include "llvm/InstVisitor.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Compiler.h" 91 #include "llvm/Support/Debug.h" 92 #include "llvm/Support/raw_ostream.h" 93 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 94 #include "llvm/Transforms/Utils/BlackList.h" 95 #include "llvm/Transforms/Utils/Local.h" 96 #include "llvm/Transforms/Utils/ModuleUtils.h" 97 98 using namespace llvm; 99 100 static const uint64_t kShadowMask32 = 1ULL << 31; 101 static const uint64_t kShadowMask64 = 1ULL << 46; 102 static const uint64_t kOriginOffset32 = 1ULL << 30; 103 static const uint64_t kOriginOffset64 = 1ULL << 45; 104 static const unsigned kMinOriginAlignment = 4; 105 static const unsigned kShadowTLSAlignment = 8; 106 107 /// \brief Track origins of uninitialized values. 108 /// 109 /// Adds a section to MemorySanitizer report that points to the allocation 110 /// (stack or heap) the uninitialized bits came from originally. 111 static cl::opt<bool> ClTrackOrigins("msan-track-origins", 112 cl::desc("Track origins (allocation sites) of poisoned memory"), 113 cl::Hidden, cl::init(false)); 114 static cl::opt<bool> ClKeepGoing("msan-keep-going", 115 cl::desc("keep going after reporting a UMR"), 116 cl::Hidden, cl::init(false)); 117 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 118 cl::desc("poison uninitialized stack variables"), 119 cl::Hidden, cl::init(true)); 120 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 121 cl::desc("poison uninitialized stack variables with a call"), 122 cl::Hidden, cl::init(false)); 123 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 124 cl::desc("poison uninitialized stack variables with the given patter"), 125 cl::Hidden, cl::init(0xff)); 126 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 127 cl::desc("poison undef temps"), 128 cl::Hidden, cl::init(true)); 129 130 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 131 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 132 cl::Hidden, cl::init(true)); 133 134 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 135 cl::desc("exact handling of relational integer ICmp"), 136 cl::Hidden, cl::init(false)); 137 138 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin", 139 cl::desc("store origin for clean (fully initialized) values"), 140 cl::Hidden, cl::init(false)); 141 142 // This flag controls whether we check the shadow of the address 143 // operand of load or store. Such bugs are very rare, since load from 144 // a garbage address typically results in SEGV, but still happen 145 // (e.g. only lower bits of address are garbage, or the access happens 146 // early at program startup where malloc-ed memory is more likely to 147 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 148 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 149 cl::desc("report accesses through a pointer which has poisoned shadow"), 150 cl::Hidden, cl::init(true)); 151 152 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 153 cl::desc("print out instructions with default strict semantics"), 154 cl::Hidden, cl::init(false)); 155 156 static cl::opt<std::string> ClBlacklistFile("msan-blacklist", 157 cl::desc("File containing the list of functions where MemorySanitizer " 158 "should not report bugs"), cl::Hidden); 159 160 namespace { 161 162 /// \brief An instrumentation pass implementing detection of uninitialized 163 /// reads. 164 /// 165 /// MemorySanitizer: instrument the code in module to find 166 /// uninitialized reads. 167 class MemorySanitizer : public FunctionPass { 168 public: 169 MemorySanitizer(bool TrackOrigins = false, 170 StringRef BlacklistFile = StringRef()) 171 : FunctionPass(ID), 172 TrackOrigins(TrackOrigins || ClTrackOrigins), 173 TD(0), 174 WarningFn(0), 175 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile 176 : BlacklistFile) { } 177 const char *getPassName() const { return "MemorySanitizer"; } 178 bool runOnFunction(Function &F); 179 bool doInitialization(Module &M); 180 static char ID; // Pass identification, replacement for typeid. 181 182 private: 183 void initializeCallbacks(Module &M); 184 185 /// \brief Track origins (allocation points) of uninitialized values. 186 bool TrackOrigins; 187 188 DataLayout *TD; 189 LLVMContext *C; 190 Type *IntptrTy; 191 Type *OriginTy; 192 /// \brief Thread-local shadow storage for function parameters. 193 GlobalVariable *ParamTLS; 194 /// \brief Thread-local origin storage for function parameters. 195 GlobalVariable *ParamOriginTLS; 196 /// \brief Thread-local shadow storage for function return value. 197 GlobalVariable *RetvalTLS; 198 /// \brief Thread-local origin storage for function return value. 199 GlobalVariable *RetvalOriginTLS; 200 /// \brief Thread-local shadow storage for in-register va_arg function 201 /// parameters (x86_64-specific). 202 GlobalVariable *VAArgTLS; 203 /// \brief Thread-local shadow storage for va_arg overflow area 204 /// (x86_64-specific). 205 GlobalVariable *VAArgOverflowSizeTLS; 206 /// \brief Thread-local space used to pass origin value to the UMR reporting 207 /// function. 208 GlobalVariable *OriginTLS; 209 210 /// \brief The run-time callback to print a warning. 211 Value *WarningFn; 212 /// \brief Run-time helper that copies origin info for a memory range. 213 Value *MsanCopyOriginFn; 214 /// \brief Run-time helper that generates a new origin value for a stack 215 /// allocation. 216 Value *MsanSetAllocaOriginFn; 217 /// \brief Run-time helper that poisons stack on function entry. 218 Value *MsanPoisonStackFn; 219 /// \brief MSan runtime replacements for memmove, memcpy and memset. 220 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 221 222 /// \brief Address mask used in application-to-shadow address calculation. 223 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask. 224 uint64_t ShadowMask; 225 /// \brief Offset of the origin shadow from the "normal" shadow. 226 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL 227 uint64_t OriginOffset; 228 /// \brief Branch weights for error reporting. 229 MDNode *ColdCallWeights; 230 /// \brief Branch weights for origin store. 231 MDNode *OriginStoreWeights; 232 /// \brief Path to blacklist file. 233 SmallString<64> BlacklistFile; 234 /// \brief The blacklist. 235 OwningPtr<BlackList> BL; 236 /// \brief An empty volatile inline asm that prevents callback merge. 237 InlineAsm *EmptyAsm; 238 239 friend struct MemorySanitizerVisitor; 240 friend struct VarArgAMD64Helper; 241 }; 242 } // namespace 243 244 char MemorySanitizer::ID = 0; 245 INITIALIZE_PASS(MemorySanitizer, "msan", 246 "MemorySanitizer: detects uninitialized reads.", 247 false, false) 248 249 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins, 250 StringRef BlacklistFile) { 251 return new MemorySanitizer(TrackOrigins, BlacklistFile); 252 } 253 254 /// \brief Create a non-const global initialized with the given string. 255 /// 256 /// Creates a writable global for Str so that we can pass it to the 257 /// run-time lib. Runtime uses first 4 bytes of the string to store the 258 /// frame ID, so the string needs to be mutable. 259 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 260 StringRef Str) { 261 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 262 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 263 GlobalValue::PrivateLinkage, StrConst, ""); 264 } 265 266 267 /// \brief Insert extern declaration of runtime-provided functions and globals. 268 void MemorySanitizer::initializeCallbacks(Module &M) { 269 // Only do this once. 270 if (WarningFn) 271 return; 272 273 IRBuilder<> IRB(*C); 274 // Create the callback. 275 // FIXME: this function should have "Cold" calling conv, 276 // which is not yet implemented. 277 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 278 : "__msan_warning_noreturn"; 279 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); 280 281 MsanCopyOriginFn = M.getOrInsertFunction( 282 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), 283 IRB.getInt8PtrTy(), IntptrTy, NULL); 284 MsanSetAllocaOriginFn = M.getOrInsertFunction( 285 "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 286 IRB.getInt8PtrTy(), NULL); 287 MsanPoisonStackFn = M.getOrInsertFunction( 288 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); 289 MemmoveFn = M.getOrInsertFunction( 290 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 291 IRB.getInt8PtrTy(), IntptrTy, NULL); 292 MemcpyFn = M.getOrInsertFunction( 293 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 294 IntptrTy, NULL); 295 MemsetFn = M.getOrInsertFunction( 296 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 297 IntptrTy, NULL); 298 299 // Create globals. 300 RetvalTLS = new GlobalVariable( 301 M, ArrayType::get(IRB.getInt64Ty(), 8), false, 302 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0, 303 GlobalVariable::InitialExecTLSModel); 304 RetvalOriginTLS = new GlobalVariable( 305 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0, 306 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 307 308 ParamTLS = new GlobalVariable( 309 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 310 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0, 311 GlobalVariable::InitialExecTLSModel); 312 ParamOriginTLS = new GlobalVariable( 313 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage, 314 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 315 316 VAArgTLS = new GlobalVariable( 317 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 318 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0, 319 GlobalVariable::InitialExecTLSModel); 320 VAArgOverflowSizeTLS = new GlobalVariable( 321 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0, 322 "__msan_va_arg_overflow_size_tls", 0, 323 GlobalVariable::InitialExecTLSModel); 324 OriginTLS = new GlobalVariable( 325 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0, 326 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 327 328 // We insert an empty inline asm after __msan_report* to avoid callback merge. 329 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 330 StringRef(""), StringRef(""), 331 /*hasSideEffects=*/true); 332 } 333 334 /// \brief Module-level initialization. 335 /// 336 /// inserts a call to __msan_init to the module's constructor list. 337 bool MemorySanitizer::doInitialization(Module &M) { 338 TD = getAnalysisIfAvailable<DataLayout>(); 339 if (!TD) 340 return false; 341 BL.reset(new BlackList(BlacklistFile)); 342 C = &(M.getContext()); 343 unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0); 344 switch (PtrSize) { 345 case 64: 346 ShadowMask = kShadowMask64; 347 OriginOffset = kOriginOffset64; 348 break; 349 case 32: 350 ShadowMask = kShadowMask32; 351 OriginOffset = kOriginOffset32; 352 break; 353 default: 354 report_fatal_error("unsupported pointer size"); 355 break; 356 } 357 358 IRBuilder<> IRB(*C); 359 IntptrTy = IRB.getIntPtrTy(TD); 360 OriginTy = IRB.getInt32Ty(); 361 362 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 363 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 364 365 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. 366 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( 367 "__msan_init", IRB.getVoidTy(), NULL)), 0); 368 369 if (TrackOrigins) 370 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 371 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 372 373 if (ClKeepGoing) 374 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 375 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 376 377 return true; 378 } 379 380 namespace { 381 382 /// \brief A helper class that handles instrumentation of VarArg 383 /// functions on a particular platform. 384 /// 385 /// Implementations are expected to insert the instrumentation 386 /// necessary to propagate argument shadow through VarArg function 387 /// calls. Visit* methods are called during an InstVisitor pass over 388 /// the function, and should avoid creating new basic blocks. A new 389 /// instance of this class is created for each instrumented function. 390 struct VarArgHelper { 391 /// \brief Visit a CallSite. 392 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 393 394 /// \brief Visit a va_start call. 395 virtual void visitVAStartInst(VAStartInst &I) = 0; 396 397 /// \brief Visit a va_copy call. 398 virtual void visitVACopyInst(VACopyInst &I) = 0; 399 400 /// \brief Finalize function instrumentation. 401 /// 402 /// This method is called after visiting all interesting (see above) 403 /// instructions in a function. 404 virtual void finalizeInstrumentation() = 0; 405 406 virtual ~VarArgHelper() {} 407 }; 408 409 struct MemorySanitizerVisitor; 410 411 VarArgHelper* 412 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 413 MemorySanitizerVisitor &Visitor); 414 415 /// This class does all the work for a given function. Store and Load 416 /// instructions store and load corresponding shadow and origin 417 /// values. Most instructions propagate shadow from arguments to their 418 /// return values. Certain instructions (most importantly, BranchInst) 419 /// test their argument shadow and print reports (with a runtime call) if it's 420 /// non-zero. 421 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 422 Function &F; 423 MemorySanitizer &MS; 424 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 425 ValueMap<Value*, Value*> ShadowMap, OriginMap; 426 bool InsertChecks; 427 bool LoadShadow; 428 OwningPtr<VarArgHelper> VAHelper; 429 430 struct ShadowOriginAndInsertPoint { 431 Instruction *Shadow; 432 Instruction *Origin; 433 Instruction *OrigIns; 434 ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I) 435 : Shadow(S), Origin(O), OrigIns(I) { } 436 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { } 437 }; 438 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 439 SmallVector<Instruction*, 16> StoreList; 440 441 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 442 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 443 LoadShadow = InsertChecks = 444 !MS.BL->isIn(F) && 445 F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 446 Attribute::SanitizeMemory); 447 448 DEBUG(if (!InsertChecks) 449 dbgs() << "MemorySanitizer is not inserting checks into '" 450 << F.getName() << "'\n"); 451 } 452 453 void materializeStores() { 454 for (size_t i = 0, n = StoreList.size(); i < n; i++) { 455 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]); 456 457 IRBuilder<> IRB(&I); 458 Value *Val = I.getValueOperand(); 459 Value *Addr = I.getPointerOperand(); 460 Value *Shadow = getShadow(Val); 461 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 462 463 StoreInst *NewSI = 464 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment()); 465 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 466 (void)NewSI; 467 468 if (ClCheckAccessAddress) 469 insertCheck(Addr, &I); 470 471 if (MS.TrackOrigins) { 472 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 473 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) { 474 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB), 475 Alignment); 476 } else { 477 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 478 479 Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow); 480 // TODO(eugenis): handle non-zero constant shadow by inserting an 481 // unconditional check (can not simply fail compilation as this could 482 // be in the dead code). 483 if (Cst) 484 continue; 485 486 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 487 getCleanShadow(ConvertedShadow), "_mscmp"); 488 Instruction *CheckTerm = 489 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false, 490 MS.OriginStoreWeights); 491 IRBuilder<> IRBNew(CheckTerm); 492 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew), 493 Alignment); 494 } 495 } 496 } 497 } 498 499 void materializeChecks() { 500 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) { 501 Instruction *Shadow = InstrumentationList[i].Shadow; 502 Instruction *OrigIns = InstrumentationList[i].OrigIns; 503 IRBuilder<> IRB(OrigIns); 504 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 505 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 506 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 507 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 508 getCleanShadow(ConvertedShadow), "_mscmp"); 509 Instruction *CheckTerm = 510 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), 511 /* Unreachable */ !ClKeepGoing, 512 MS.ColdCallWeights); 513 514 IRB.SetInsertPoint(CheckTerm); 515 if (MS.TrackOrigins) { 516 Instruction *Origin = InstrumentationList[i].Origin; 517 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0), 518 MS.OriginTLS); 519 } 520 CallInst *Call = IRB.CreateCall(MS.WarningFn); 521 Call->setDebugLoc(OrigIns->getDebugLoc()); 522 IRB.CreateCall(MS.EmptyAsm); 523 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 524 } 525 DEBUG(dbgs() << "DONE:\n" << F); 526 } 527 528 /// \brief Add MemorySanitizer instrumentation to a function. 529 bool runOnFunction() { 530 MS.initializeCallbacks(*F.getParent()); 531 if (!MS.TD) return false; 532 533 // In the presence of unreachable blocks, we may see Phi nodes with 534 // incoming nodes from such blocks. Since InstVisitor skips unreachable 535 // blocks, such nodes will not have any shadow value associated with them. 536 // It's easier to remove unreachable blocks than deal with missing shadow. 537 removeUnreachableBlocks(F); 538 539 // Iterate all BBs in depth-first order and create shadow instructions 540 // for all instructions (where applicable). 541 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 542 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), 543 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { 544 BasicBlock *BB = *DI; 545 visit(*BB); 546 } 547 548 // Finalize PHI nodes. 549 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) { 550 PHINode *PN = ShadowPHINodes[i]; 551 PHINode *PNS = cast<PHINode>(getShadow(PN)); 552 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0; 553 size_t NumValues = PN->getNumIncomingValues(); 554 for (size_t v = 0; v < NumValues; v++) { 555 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 556 if (PNO) 557 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 558 } 559 } 560 561 VAHelper->finalizeInstrumentation(); 562 563 // Delayed instrumentation of StoreInst. 564 // This may add new checks to be inserted later. 565 materializeStores(); 566 567 // Insert shadow value checks. 568 materializeChecks(); 569 570 return true; 571 } 572 573 /// \brief Compute the shadow type that corresponds to a given Value. 574 Type *getShadowTy(Value *V) { 575 return getShadowTy(V->getType()); 576 } 577 578 /// \brief Compute the shadow type that corresponds to a given Type. 579 Type *getShadowTy(Type *OrigTy) { 580 if (!OrigTy->isSized()) { 581 return 0; 582 } 583 // For integer type, shadow is the same as the original type. 584 // This may return weird-sized types like i1. 585 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 586 return IT; 587 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 588 uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType()); 589 return VectorType::get(IntegerType::get(*MS.C, EltSize), 590 VT->getNumElements()); 591 } 592 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 593 SmallVector<Type*, 4> Elements; 594 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 595 Elements.push_back(getShadowTy(ST->getElementType(i))); 596 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 597 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 598 return Res; 599 } 600 uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy); 601 return IntegerType::get(*MS.C, TypeSize); 602 } 603 604 /// \brief Flatten a vector type. 605 Type *getShadowTyNoVec(Type *ty) { 606 if (VectorType *vt = dyn_cast<VectorType>(ty)) 607 return IntegerType::get(*MS.C, vt->getBitWidth()); 608 return ty; 609 } 610 611 /// \brief Convert a shadow value to it's flattened variant. 612 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 613 Type *Ty = V->getType(); 614 Type *NoVecTy = getShadowTyNoVec(Ty); 615 if (Ty == NoVecTy) return V; 616 return IRB.CreateBitCast(V, NoVecTy); 617 } 618 619 /// \brief Compute the shadow address that corresponds to a given application 620 /// address. 621 /// 622 /// Shadow = Addr & ~ShadowMask. 623 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 624 IRBuilder<> &IRB) { 625 Value *ShadowLong = 626 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 627 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 628 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 629 } 630 631 /// \brief Compute the origin address that corresponds to a given application 632 /// address. 633 /// 634 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL 635 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) { 636 Value *ShadowLong = 637 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 638 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 639 Value *Add = 640 IRB.CreateAdd(ShadowLong, 641 ConstantInt::get(MS.IntptrTy, MS.OriginOffset)); 642 Value *SecondAnd = 643 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL)); 644 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0)); 645 } 646 647 /// \brief Compute the shadow address for a given function argument. 648 /// 649 /// Shadow = ParamTLS+ArgOffset. 650 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 651 int ArgOffset) { 652 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 653 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 654 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 655 "_msarg"); 656 } 657 658 /// \brief Compute the origin address for a given function argument. 659 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 660 int ArgOffset) { 661 if (!MS.TrackOrigins) return 0; 662 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 663 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 664 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 665 "_msarg_o"); 666 } 667 668 /// \brief Compute the shadow address for a retval. 669 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 670 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 671 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 672 "_msret"); 673 } 674 675 /// \brief Compute the origin address for a retval. 676 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 677 // We keep a single origin for the entire retval. Might be too optimistic. 678 return MS.RetvalOriginTLS; 679 } 680 681 /// \brief Set SV to be the shadow value for V. 682 void setShadow(Value *V, Value *SV) { 683 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 684 ShadowMap[V] = SV; 685 } 686 687 /// \brief Set Origin to be the origin value for V. 688 void setOrigin(Value *V, Value *Origin) { 689 if (!MS.TrackOrigins) return; 690 assert(!OriginMap.count(V) && "Values may only have one origin"); 691 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 692 OriginMap[V] = Origin; 693 } 694 695 /// \brief Create a clean shadow value for a given value. 696 /// 697 /// Clean shadow (all zeroes) means all bits of the value are defined 698 /// (initialized). 699 Constant *getCleanShadow(Value *V) { 700 Type *ShadowTy = getShadowTy(V); 701 if (!ShadowTy) 702 return 0; 703 return Constant::getNullValue(ShadowTy); 704 } 705 706 /// \brief Create a dirty shadow of a given shadow type. 707 Constant *getPoisonedShadow(Type *ShadowTy) { 708 assert(ShadowTy); 709 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 710 return Constant::getAllOnesValue(ShadowTy); 711 StructType *ST = cast<StructType>(ShadowTy); 712 SmallVector<Constant *, 4> Vals; 713 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 714 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 715 return ConstantStruct::get(ST, Vals); 716 } 717 718 /// \brief Create a dirty shadow for a given value. 719 Constant *getPoisonedShadow(Value *V) { 720 Type *ShadowTy = getShadowTy(V); 721 if (!ShadowTy) 722 return 0; 723 return getPoisonedShadow(ShadowTy); 724 } 725 726 /// \brief Create a clean (zero) origin. 727 Value *getCleanOrigin() { 728 return Constant::getNullValue(MS.OriginTy); 729 } 730 731 /// \brief Get the shadow value for a given Value. 732 /// 733 /// This function either returns the value set earlier with setShadow, 734 /// or extracts if from ParamTLS (for function arguments). 735 Value *getShadow(Value *V) { 736 if (Instruction *I = dyn_cast<Instruction>(V)) { 737 // For instructions the shadow is already stored in the map. 738 Value *Shadow = ShadowMap[V]; 739 if (!Shadow) { 740 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 741 (void)I; 742 assert(Shadow && "No shadow for a value"); 743 } 744 return Shadow; 745 } 746 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 747 Value *AllOnes = ClPoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 748 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 749 (void)U; 750 return AllOnes; 751 } 752 if (Argument *A = dyn_cast<Argument>(V)) { 753 // For arguments we compute the shadow on demand and store it in the map. 754 Value **ShadowPtr = &ShadowMap[V]; 755 if (*ShadowPtr) 756 return *ShadowPtr; 757 Function *F = A->getParent(); 758 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 759 unsigned ArgOffset = 0; 760 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); 761 AI != AE; ++AI) { 762 if (!AI->getType()->isSized()) { 763 DEBUG(dbgs() << "Arg is not sized\n"); 764 continue; 765 } 766 unsigned Size = AI->hasByValAttr() 767 ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType()) 768 : MS.TD->getTypeAllocSize(AI->getType()); 769 if (A == AI) { 770 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset); 771 if (AI->hasByValAttr()) { 772 // ByVal pointer itself has clean shadow. We copy the actual 773 // argument shadow to the underlying memory. 774 // Figure out maximal valid memcpy alignment. 775 unsigned ArgAlign = AI->getParamAlignment(); 776 if (ArgAlign == 0) { 777 Type *EltType = A->getType()->getPointerElementType(); 778 ArgAlign = MS.TD->getABITypeAlignment(EltType); 779 } 780 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 781 Value *Cpy = EntryIRB.CreateMemCpy( 782 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 783 CopyAlign); 784 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 785 (void)Cpy; 786 *ShadowPtr = getCleanShadow(V); 787 } else { 788 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 789 } 790 DEBUG(dbgs() << " ARG: " << *AI << " ==> " << 791 **ShadowPtr << "\n"); 792 if (MS.TrackOrigins) { 793 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset); 794 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 795 } 796 } 797 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment); 798 } 799 assert(*ShadowPtr && "Could not find shadow for an argument"); 800 return *ShadowPtr; 801 } 802 // For everything else the shadow is zero. 803 return getCleanShadow(V); 804 } 805 806 /// \brief Get the shadow for i-th argument of the instruction I. 807 Value *getShadow(Instruction *I, int i) { 808 return getShadow(I->getOperand(i)); 809 } 810 811 /// \brief Get the origin for a value. 812 Value *getOrigin(Value *V) { 813 if (!MS.TrackOrigins) return 0; 814 if (isa<Instruction>(V) || isa<Argument>(V)) { 815 Value *Origin = OriginMap[V]; 816 if (!Origin) { 817 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n"); 818 Origin = getCleanOrigin(); 819 } 820 return Origin; 821 } 822 return getCleanOrigin(); 823 } 824 825 /// \brief Get the origin for i-th argument of the instruction I. 826 Value *getOrigin(Instruction *I, int i) { 827 return getOrigin(I->getOperand(i)); 828 } 829 830 /// \brief Remember the place where a shadow check should be inserted. 831 /// 832 /// This location will be later instrumented with a check that will print a 833 /// UMR warning in runtime if the value is not fully defined. 834 void insertCheck(Value *Val, Instruction *OrigIns) { 835 assert(Val); 836 if (!InsertChecks) return; 837 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 838 if (!Shadow) return; 839 #ifndef NDEBUG 840 Type *ShadowTy = Shadow->getType(); 841 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 842 "Can only insert checks for integer and vector shadow types"); 843 #endif 844 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 845 InstrumentationList.push_back( 846 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 847 } 848 849 // ------------------- Visitors. 850 851 /// \brief Instrument LoadInst 852 /// 853 /// Loads the corresponding shadow and (optionally) origin. 854 /// Optionally, checks that the load address is fully defined. 855 void visitLoadInst(LoadInst &I) { 856 assert(I.getType()->isSized() && "Load type must have size"); 857 IRBuilder<> IRB(&I); 858 Type *ShadowTy = getShadowTy(&I); 859 Value *Addr = I.getPointerOperand(); 860 if (LoadShadow) { 861 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 862 setShadow(&I, 863 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 864 } else { 865 setShadow(&I, getCleanShadow(&I)); 866 } 867 868 if (ClCheckAccessAddress) 869 insertCheck(I.getPointerOperand(), &I); 870 871 if (MS.TrackOrigins) { 872 if (LoadShadow) { 873 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 874 setOrigin(&I, 875 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment)); 876 } else { 877 setOrigin(&I, getCleanOrigin()); 878 } 879 } 880 } 881 882 /// \brief Instrument StoreInst 883 /// 884 /// Stores the corresponding shadow and (optionally) origin. 885 /// Optionally, checks that the store address is fully defined. 886 void visitStoreInst(StoreInst &I) { 887 StoreList.push_back(&I); 888 } 889 890 // Vector manipulation. 891 void visitExtractElementInst(ExtractElementInst &I) { 892 insertCheck(I.getOperand(1), &I); 893 IRBuilder<> IRB(&I); 894 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 895 "_msprop")); 896 setOrigin(&I, getOrigin(&I, 0)); 897 } 898 899 void visitInsertElementInst(InsertElementInst &I) { 900 insertCheck(I.getOperand(2), &I); 901 IRBuilder<> IRB(&I); 902 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 903 I.getOperand(2), "_msprop")); 904 setOriginForNaryOp(I); 905 } 906 907 void visitShuffleVectorInst(ShuffleVectorInst &I) { 908 insertCheck(I.getOperand(2), &I); 909 IRBuilder<> IRB(&I); 910 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 911 I.getOperand(2), "_msprop")); 912 setOriginForNaryOp(I); 913 } 914 915 // Casts. 916 void visitSExtInst(SExtInst &I) { 917 IRBuilder<> IRB(&I); 918 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 919 setOrigin(&I, getOrigin(&I, 0)); 920 } 921 922 void visitZExtInst(ZExtInst &I) { 923 IRBuilder<> IRB(&I); 924 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 925 setOrigin(&I, getOrigin(&I, 0)); 926 } 927 928 void visitTruncInst(TruncInst &I) { 929 IRBuilder<> IRB(&I); 930 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 931 setOrigin(&I, getOrigin(&I, 0)); 932 } 933 934 void visitBitCastInst(BitCastInst &I) { 935 IRBuilder<> IRB(&I); 936 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 937 setOrigin(&I, getOrigin(&I, 0)); 938 } 939 940 void visitPtrToIntInst(PtrToIntInst &I) { 941 IRBuilder<> IRB(&I); 942 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 943 "_msprop_ptrtoint")); 944 setOrigin(&I, getOrigin(&I, 0)); 945 } 946 947 void visitIntToPtrInst(IntToPtrInst &I) { 948 IRBuilder<> IRB(&I); 949 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 950 "_msprop_inttoptr")); 951 setOrigin(&I, getOrigin(&I, 0)); 952 } 953 954 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 955 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 956 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 957 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 958 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 959 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 960 961 /// \brief Propagate shadow for bitwise AND. 962 /// 963 /// This code is exact, i.e. if, for example, a bit in the left argument 964 /// is defined and 0, then neither the value not definedness of the 965 /// corresponding bit in B don't affect the resulting shadow. 966 void visitAnd(BinaryOperator &I) { 967 IRBuilder<> IRB(&I); 968 // "And" of 0 and a poisoned value results in unpoisoned value. 969 // 1&1 => 1; 0&1 => 0; p&1 => p; 970 // 1&0 => 0; 0&0 => 0; p&0 => 0; 971 // 1&p => p; 0&p => 0; p&p => p; 972 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 973 Value *S1 = getShadow(&I, 0); 974 Value *S2 = getShadow(&I, 1); 975 Value *V1 = I.getOperand(0); 976 Value *V2 = I.getOperand(1); 977 if (V1->getType() != S1->getType()) { 978 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 979 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 980 } 981 Value *S1S2 = IRB.CreateAnd(S1, S2); 982 Value *V1S2 = IRB.CreateAnd(V1, S2); 983 Value *S1V2 = IRB.CreateAnd(S1, V2); 984 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 985 setOriginForNaryOp(I); 986 } 987 988 void visitOr(BinaryOperator &I) { 989 IRBuilder<> IRB(&I); 990 // "Or" of 1 and a poisoned value results in unpoisoned value. 991 // 1|1 => 1; 0|1 => 1; p|1 => 1; 992 // 1|0 => 1; 0|0 => 0; p|0 => p; 993 // 1|p => 1; 0|p => p; p|p => p; 994 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 995 Value *S1 = getShadow(&I, 0); 996 Value *S2 = getShadow(&I, 1); 997 Value *V1 = IRB.CreateNot(I.getOperand(0)); 998 Value *V2 = IRB.CreateNot(I.getOperand(1)); 999 if (V1->getType() != S1->getType()) { 1000 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1001 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1002 } 1003 Value *S1S2 = IRB.CreateAnd(S1, S2); 1004 Value *V1S2 = IRB.CreateAnd(V1, S2); 1005 Value *S1V2 = IRB.CreateAnd(S1, V2); 1006 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1007 setOriginForNaryOp(I); 1008 } 1009 1010 /// \brief Default propagation of shadow and/or origin. 1011 /// 1012 /// This class implements the general case of shadow propagation, used in all 1013 /// cases where we don't know and/or don't care about what the operation 1014 /// actually does. It converts all input shadow values to a common type 1015 /// (extending or truncating as necessary), and bitwise OR's them. 1016 /// 1017 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1018 /// fully initialized), and less prone to false positives. 1019 /// 1020 /// This class also implements the general case of origin propagation. For a 1021 /// Nary operation, result origin is set to the origin of an argument that is 1022 /// not entirely initialized. If there is more than one such arguments, the 1023 /// rightmost of them is picked. It does not matter which one is picked if all 1024 /// arguments are initialized. 1025 template <bool CombineShadow> 1026 class Combiner { 1027 Value *Shadow; 1028 Value *Origin; 1029 IRBuilder<> &IRB; 1030 MemorySanitizerVisitor *MSV; 1031 1032 public: 1033 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1034 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {} 1035 1036 /// \brief Add a pair of shadow and origin values to the mix. 1037 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1038 if (CombineShadow) { 1039 assert(OpShadow); 1040 if (!Shadow) 1041 Shadow = OpShadow; 1042 else { 1043 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1044 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1045 } 1046 } 1047 1048 if (MSV->MS.TrackOrigins) { 1049 assert(OpOrigin); 1050 if (!Origin) { 1051 Origin = OpOrigin; 1052 } else { 1053 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1054 Value *Cond = IRB.CreateICmpNE(FlatShadow, 1055 MSV->getCleanShadow(FlatShadow)); 1056 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1057 } 1058 } 1059 return *this; 1060 } 1061 1062 /// \brief Add an application value to the mix. 1063 Combiner &Add(Value *V) { 1064 Value *OpShadow = MSV->getShadow(V); 1065 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0; 1066 return Add(OpShadow, OpOrigin); 1067 } 1068 1069 /// \brief Set the current combined values as the given instruction's shadow 1070 /// and origin. 1071 void Done(Instruction *I) { 1072 if (CombineShadow) { 1073 assert(Shadow); 1074 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1075 MSV->setShadow(I, Shadow); 1076 } 1077 if (MSV->MS.TrackOrigins) { 1078 assert(Origin); 1079 MSV->setOrigin(I, Origin); 1080 } 1081 } 1082 }; 1083 1084 typedef Combiner<true> ShadowAndOriginCombiner; 1085 typedef Combiner<false> OriginCombiner; 1086 1087 /// \brief Propagate origin for arbitrary operation. 1088 void setOriginForNaryOp(Instruction &I) { 1089 if (!MS.TrackOrigins) return; 1090 IRBuilder<> IRB(&I); 1091 OriginCombiner OC(this, IRB); 1092 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1093 OC.Add(OI->get()); 1094 OC.Done(&I); 1095 } 1096 1097 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1098 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1099 "Vector of pointers is not a valid shadow type"); 1100 return Ty->isVectorTy() ? 1101 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1102 Ty->getPrimitiveSizeInBits(); 1103 } 1104 1105 /// \brief Cast between two shadow types, extending or truncating as 1106 /// necessary. 1107 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) { 1108 Type *srcTy = V->getType(); 1109 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1110 return IRB.CreateIntCast(V, dstTy, false); 1111 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1112 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1113 return IRB.CreateIntCast(V, dstTy, false); 1114 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1115 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1116 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1117 Value *V2 = 1118 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false); 1119 return IRB.CreateBitCast(V2, dstTy); 1120 // TODO: handle struct types. 1121 } 1122 1123 /// \brief Propagate shadow for arbitrary operation. 1124 void handleShadowOr(Instruction &I) { 1125 IRBuilder<> IRB(&I); 1126 ShadowAndOriginCombiner SC(this, IRB); 1127 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1128 SC.Add(OI->get()); 1129 SC.Done(&I); 1130 } 1131 1132 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1133 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1134 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1135 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1136 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1137 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1138 void visitMul(BinaryOperator &I) { handleShadowOr(I); } 1139 1140 void handleDiv(Instruction &I) { 1141 IRBuilder<> IRB(&I); 1142 // Strict on the second argument. 1143 insertCheck(I.getOperand(1), &I); 1144 setShadow(&I, getShadow(&I, 0)); 1145 setOrigin(&I, getOrigin(&I, 0)); 1146 } 1147 1148 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1149 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1150 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1151 void visitURem(BinaryOperator &I) { handleDiv(I); } 1152 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1153 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1154 1155 /// \brief Instrument == and != comparisons. 1156 /// 1157 /// Sometimes the comparison result is known even if some of the bits of the 1158 /// arguments are not. 1159 void handleEqualityComparison(ICmpInst &I) { 1160 IRBuilder<> IRB(&I); 1161 Value *A = I.getOperand(0); 1162 Value *B = I.getOperand(1); 1163 Value *Sa = getShadow(A); 1164 Value *Sb = getShadow(B); 1165 1166 // Get rid of pointers and vectors of pointers. 1167 // For ints (and vectors of ints), types of A and Sa match, 1168 // and this is a no-op. 1169 A = IRB.CreatePointerCast(A, Sa->getType()); 1170 B = IRB.CreatePointerCast(B, Sb->getType()); 1171 1172 // A == B <==> (C = A^B) == 0 1173 // A != B <==> (C = A^B) != 0 1174 // Sc = Sa | Sb 1175 Value *C = IRB.CreateXor(A, B); 1176 Value *Sc = IRB.CreateOr(Sa, Sb); 1177 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1178 // Result is defined if one of the following is true 1179 // * there is a defined 1 bit in C 1180 // * C is fully defined 1181 // Si = !(C & ~Sc) && Sc 1182 Value *Zero = Constant::getNullValue(Sc->getType()); 1183 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1184 Value *Si = 1185 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1186 IRB.CreateICmpEQ( 1187 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1188 Si->setName("_msprop_icmp"); 1189 setShadow(&I, Si); 1190 setOriginForNaryOp(I); 1191 } 1192 1193 /// \brief Build the lowest possible value of V, taking into account V's 1194 /// uninitialized bits. 1195 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1196 bool isSigned) { 1197 if (isSigned) { 1198 // Split shadow into sign bit and other bits. 1199 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1200 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1201 // Maximise the undefined shadow bit, minimize other undefined bits. 1202 return 1203 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1204 } else { 1205 // Minimize undefined bits. 1206 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1207 } 1208 } 1209 1210 /// \brief Build the highest possible value of V, taking into account V's 1211 /// uninitialized bits. 1212 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1213 bool isSigned) { 1214 if (isSigned) { 1215 // Split shadow into sign bit and other bits. 1216 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1217 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1218 // Minimise the undefined shadow bit, maximise other undefined bits. 1219 return 1220 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1221 } else { 1222 // Maximize undefined bits. 1223 return IRB.CreateOr(A, Sa); 1224 } 1225 } 1226 1227 /// \brief Instrument relational comparisons. 1228 /// 1229 /// This function does exact shadow propagation for all relational 1230 /// comparisons of integers, pointers and vectors of those. 1231 /// FIXME: output seems suboptimal when one of the operands is a constant 1232 void handleRelationalComparisonExact(ICmpInst &I) { 1233 IRBuilder<> IRB(&I); 1234 Value *A = I.getOperand(0); 1235 Value *B = I.getOperand(1); 1236 Value *Sa = getShadow(A); 1237 Value *Sb = getShadow(B); 1238 1239 // Get rid of pointers and vectors of pointers. 1240 // For ints (and vectors of ints), types of A and Sa match, 1241 // and this is a no-op. 1242 A = IRB.CreatePointerCast(A, Sa->getType()); 1243 B = IRB.CreatePointerCast(B, Sb->getType()); 1244 1245 // Let [a0, a1] be the interval of possible values of A, taking into account 1246 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1247 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1248 bool IsSigned = I.isSigned(); 1249 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1250 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1251 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1252 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1253 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1254 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1255 Value *Si = IRB.CreateXor(S1, S2); 1256 setShadow(&I, Si); 1257 setOriginForNaryOp(I); 1258 } 1259 1260 /// \brief Instrument signed relational comparisons. 1261 /// 1262 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by 1263 /// propagating the highest bit of the shadow. Everything else is delegated 1264 /// to handleShadowOr(). 1265 void handleSignedRelationalComparison(ICmpInst &I) { 1266 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1267 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1268 Value* op = NULL; 1269 CmpInst::Predicate pre = I.getPredicate(); 1270 if (constOp0 && constOp0->isNullValue() && 1271 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { 1272 op = I.getOperand(1); 1273 } else if (constOp1 && constOp1->isNullValue() && 1274 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { 1275 op = I.getOperand(0); 1276 } 1277 if (op) { 1278 IRBuilder<> IRB(&I); 1279 Value* Shadow = 1280 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); 1281 setShadow(&I, Shadow); 1282 setOrigin(&I, getOrigin(op)); 1283 } else { 1284 handleShadowOr(I); 1285 } 1286 } 1287 1288 void visitICmpInst(ICmpInst &I) { 1289 if (!ClHandleICmp) { 1290 handleShadowOr(I); 1291 return; 1292 } 1293 if (I.isEquality()) { 1294 handleEqualityComparison(I); 1295 return; 1296 } 1297 1298 assert(I.isRelational()); 1299 if (ClHandleICmpExact) { 1300 handleRelationalComparisonExact(I); 1301 return; 1302 } 1303 if (I.isSigned()) { 1304 handleSignedRelationalComparison(I); 1305 return; 1306 } 1307 1308 assert(I.isUnsigned()); 1309 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1310 handleRelationalComparisonExact(I); 1311 return; 1312 } 1313 1314 handleShadowOr(I); 1315 } 1316 1317 void visitFCmpInst(FCmpInst &I) { 1318 handleShadowOr(I); 1319 } 1320 1321 void handleShift(BinaryOperator &I) { 1322 IRBuilder<> IRB(&I); 1323 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1324 // Otherwise perform the same shift on S1. 1325 Value *S1 = getShadow(&I, 0); 1326 Value *S2 = getShadow(&I, 1); 1327 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1328 S2->getType()); 1329 Value *V2 = I.getOperand(1); 1330 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1331 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1332 setOriginForNaryOp(I); 1333 } 1334 1335 void visitShl(BinaryOperator &I) { handleShift(I); } 1336 void visitAShr(BinaryOperator &I) { handleShift(I); } 1337 void visitLShr(BinaryOperator &I) { handleShift(I); } 1338 1339 /// \brief Instrument llvm.memmove 1340 /// 1341 /// At this point we don't know if llvm.memmove will be inlined or not. 1342 /// If we don't instrument it and it gets inlined, 1343 /// our interceptor will not kick in and we will lose the memmove. 1344 /// If we instrument the call here, but it does not get inlined, 1345 /// we will memove the shadow twice: which is bad in case 1346 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1347 /// 1348 /// Similar situation exists for memcpy and memset. 1349 void visitMemMoveInst(MemMoveInst &I) { 1350 IRBuilder<> IRB(&I); 1351 IRB.CreateCall3( 1352 MS.MemmoveFn, 1353 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1354 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1355 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1356 I.eraseFromParent(); 1357 } 1358 1359 // Similar to memmove: avoid copying shadow twice. 1360 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1361 // FIXME: consider doing manual inline for small constant sizes and proper 1362 // alignment. 1363 void visitMemCpyInst(MemCpyInst &I) { 1364 IRBuilder<> IRB(&I); 1365 IRB.CreateCall3( 1366 MS.MemcpyFn, 1367 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1368 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1369 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1370 I.eraseFromParent(); 1371 } 1372 1373 // Same as memcpy. 1374 void visitMemSetInst(MemSetInst &I) { 1375 IRBuilder<> IRB(&I); 1376 IRB.CreateCall3( 1377 MS.MemsetFn, 1378 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1379 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1380 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1381 I.eraseFromParent(); 1382 } 1383 1384 void visitVAStartInst(VAStartInst &I) { 1385 VAHelper->visitVAStartInst(I); 1386 } 1387 1388 void visitVACopyInst(VACopyInst &I) { 1389 VAHelper->visitVACopyInst(I); 1390 } 1391 1392 enum IntrinsicKind { 1393 IK_DoesNotAccessMemory, 1394 IK_OnlyReadsMemory, 1395 IK_WritesMemory 1396 }; 1397 1398 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { 1399 const int DoesNotAccessMemory = IK_DoesNotAccessMemory; 1400 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; 1401 const int OnlyReadsMemory = IK_OnlyReadsMemory; 1402 const int OnlyAccessesArgumentPointees = IK_WritesMemory; 1403 const int UnknownModRefBehavior = IK_WritesMemory; 1404 #define GET_INTRINSIC_MODREF_BEHAVIOR 1405 #define ModRefBehavior IntrinsicKind 1406 #include "llvm/IR/Intrinsics.gen" 1407 #undef ModRefBehavior 1408 #undef GET_INTRINSIC_MODREF_BEHAVIOR 1409 } 1410 1411 /// \brief Handle vector store-like intrinsics. 1412 /// 1413 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1414 /// has 1 pointer argument and 1 vector argument, returns void. 1415 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1416 IRBuilder<> IRB(&I); 1417 Value* Addr = I.getArgOperand(0); 1418 Value *Shadow = getShadow(&I, 1); 1419 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1420 1421 // We don't know the pointer alignment (could be unaligned SSE store!). 1422 // Have to assume to worst case. 1423 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1424 1425 if (ClCheckAccessAddress) 1426 insertCheck(Addr, &I); 1427 1428 // FIXME: use ClStoreCleanOrigin 1429 // FIXME: factor out common code from materializeStores 1430 if (MS.TrackOrigins) 1431 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB)); 1432 return true; 1433 } 1434 1435 /// \brief Handle vector load-like intrinsics. 1436 /// 1437 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1438 /// has 1 pointer argument, returns a vector. 1439 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1440 IRBuilder<> IRB(&I); 1441 Value *Addr = I.getArgOperand(0); 1442 1443 Type *ShadowTy = getShadowTy(&I); 1444 if (LoadShadow) { 1445 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1446 // We don't know the pointer alignment (could be unaligned SSE load!). 1447 // Have to assume to worst case. 1448 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1449 } else { 1450 setShadow(&I, getCleanShadow(&I)); 1451 } 1452 1453 1454 if (ClCheckAccessAddress) 1455 insertCheck(Addr, &I); 1456 1457 if (MS.TrackOrigins) { 1458 if (LoadShadow) 1459 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); 1460 else 1461 setOrigin(&I, getCleanOrigin()); 1462 } 1463 return true; 1464 } 1465 1466 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1467 /// 1468 /// Instrument intrinsics with any number of arguments of the same type, 1469 /// equal to the return type. The type should be simple (no aggregates or 1470 /// pointers; vectors are fine). 1471 /// Caller guarantees that this intrinsic does not access memory. 1472 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1473 Type *RetTy = I.getType(); 1474 if (!(RetTy->isIntOrIntVectorTy() || 1475 RetTy->isFPOrFPVectorTy() || 1476 RetTy->isX86_MMXTy())) 1477 return false; 1478 1479 unsigned NumArgOperands = I.getNumArgOperands(); 1480 1481 for (unsigned i = 0; i < NumArgOperands; ++i) { 1482 Type *Ty = I.getArgOperand(i)->getType(); 1483 if (Ty != RetTy) 1484 return false; 1485 } 1486 1487 IRBuilder<> IRB(&I); 1488 ShadowAndOriginCombiner SC(this, IRB); 1489 for (unsigned i = 0; i < NumArgOperands; ++i) 1490 SC.Add(I.getArgOperand(i)); 1491 SC.Done(&I); 1492 1493 return true; 1494 } 1495 1496 /// \brief Heuristically instrument unknown intrinsics. 1497 /// 1498 /// The main purpose of this code is to do something reasonable with all 1499 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1500 /// We recognize several classes of intrinsics by their argument types and 1501 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1502 /// sure that we know what the intrinsic does. 1503 /// 1504 /// We special-case intrinsics where this approach fails. See llvm.bswap 1505 /// handling as an example of that. 1506 bool handleUnknownIntrinsic(IntrinsicInst &I) { 1507 unsigned NumArgOperands = I.getNumArgOperands(); 1508 if (NumArgOperands == 0) 1509 return false; 1510 1511 Intrinsic::ID iid = I.getIntrinsicID(); 1512 IntrinsicKind IK = getIntrinsicKind(iid); 1513 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; 1514 bool WritesMemory = IK == IK_WritesMemory; 1515 assert(!(OnlyReadsMemory && WritesMemory)); 1516 1517 if (NumArgOperands == 2 && 1518 I.getArgOperand(0)->getType()->isPointerTy() && 1519 I.getArgOperand(1)->getType()->isVectorTy() && 1520 I.getType()->isVoidTy() && 1521 WritesMemory) { 1522 // This looks like a vector store. 1523 return handleVectorStoreIntrinsic(I); 1524 } 1525 1526 if (NumArgOperands == 1 && 1527 I.getArgOperand(0)->getType()->isPointerTy() && 1528 I.getType()->isVectorTy() && 1529 OnlyReadsMemory) { 1530 // This looks like a vector load. 1531 return handleVectorLoadIntrinsic(I); 1532 } 1533 1534 if (!OnlyReadsMemory && !WritesMemory) 1535 if (maybeHandleSimpleNomemIntrinsic(I)) 1536 return true; 1537 1538 // FIXME: detect and handle SSE maskstore/maskload 1539 return false; 1540 } 1541 1542 void handleBswap(IntrinsicInst &I) { 1543 IRBuilder<> IRB(&I); 1544 Value *Op = I.getArgOperand(0); 1545 Type *OpType = Op->getType(); 1546 Function *BswapFunc = Intrinsic::getDeclaration( 1547 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1)); 1548 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 1549 setOrigin(&I, getOrigin(Op)); 1550 } 1551 1552 void visitIntrinsicInst(IntrinsicInst &I) { 1553 switch (I.getIntrinsicID()) { 1554 case llvm::Intrinsic::bswap: 1555 handleBswap(I); 1556 break; 1557 default: 1558 if (!handleUnknownIntrinsic(I)) 1559 visitInstruction(I); 1560 break; 1561 } 1562 } 1563 1564 void visitCallSite(CallSite CS) { 1565 Instruction &I = *CS.getInstruction(); 1566 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 1567 if (CS.isCall()) { 1568 CallInst *Call = cast<CallInst>(&I); 1569 1570 // For inline asm, do the usual thing: check argument shadow and mark all 1571 // outputs as clean. Note that any side effects of the inline asm that are 1572 // not immediately visible in its constraints are not handled. 1573 if (Call->isInlineAsm()) { 1574 visitInstruction(I); 1575 return; 1576 } 1577 1578 // Allow only tail calls with the same types, otherwise 1579 // we may have a false positive: shadow for a non-void RetVal 1580 // will get propagated to a void RetVal. 1581 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType()) 1582 Call->setTailCall(false); 1583 1584 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 1585 1586 // We are going to insert code that relies on the fact that the callee 1587 // will become a non-readonly function after it is instrumented by us. To 1588 // prevent this code from being optimized out, mark that function 1589 // non-readonly in advance. 1590 if (Function *Func = Call->getCalledFunction()) { 1591 // Clear out readonly/readnone attributes. 1592 AttrBuilder B; 1593 B.addAttribute(Attribute::ReadOnly) 1594 .addAttribute(Attribute::ReadNone); 1595 Func->removeAttributes(AttributeSet::FunctionIndex, 1596 AttributeSet::get(Func->getContext(), 1597 AttributeSet::FunctionIndex, 1598 B)); 1599 } 1600 } 1601 IRBuilder<> IRB(&I); 1602 unsigned ArgOffset = 0; 1603 DEBUG(dbgs() << " CallSite: " << I << "\n"); 1604 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 1605 ArgIt != End; ++ArgIt) { 1606 Value *A = *ArgIt; 1607 unsigned i = ArgIt - CS.arg_begin(); 1608 if (!A->getType()->isSized()) { 1609 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 1610 continue; 1611 } 1612 unsigned Size = 0; 1613 Value *Store = 0; 1614 // Compute the Shadow for arg even if it is ByVal, because 1615 // in that case getShadow() will copy the actual arg shadow to 1616 // __msan_param_tls. 1617 Value *ArgShadow = getShadow(A); 1618 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 1619 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 1620 " Shadow: " << *ArgShadow << "\n"); 1621 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 1622 assert(A->getType()->isPointerTy() && 1623 "ByVal argument is not a pointer!"); 1624 Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType()); 1625 unsigned Alignment = CS.getParamAlignment(i + 1); 1626 Store = IRB.CreateMemCpy(ArgShadowBase, 1627 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 1628 Size, Alignment); 1629 } else { 1630 Size = MS.TD->getTypeAllocSize(A->getType()); 1631 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 1632 kShadowTLSAlignment); 1633 } 1634 if (MS.TrackOrigins) 1635 IRB.CreateStore(getOrigin(A), 1636 getOriginPtrForArgument(A, IRB, ArgOffset)); 1637 (void)Store; 1638 assert(Size != 0 && Store != 0); 1639 DEBUG(dbgs() << " Param:" << *Store << "\n"); 1640 ArgOffset += DataLayout::RoundUpAlignment(Size, 8); 1641 } 1642 DEBUG(dbgs() << " done with call args\n"); 1643 1644 FunctionType *FT = 1645 cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0)); 1646 if (FT->isVarArg()) { 1647 VAHelper->visitCallSite(CS, IRB); 1648 } 1649 1650 // Now, get the shadow for the RetVal. 1651 if (!I.getType()->isSized()) return; 1652 IRBuilder<> IRBBefore(&I); 1653 // Untill we have full dynamic coverage, make sure the retval shadow is 0. 1654 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 1655 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 1656 Instruction *NextInsn = 0; 1657 if (CS.isCall()) { 1658 NextInsn = I.getNextNode(); 1659 } else { 1660 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 1661 if (!NormalDest->getSinglePredecessor()) { 1662 // FIXME: this case is tricky, so we are just conservative here. 1663 // Perhaps we need to split the edge between this BB and NormalDest, 1664 // but a naive attempt to use SplitEdge leads to a crash. 1665 setShadow(&I, getCleanShadow(&I)); 1666 setOrigin(&I, getCleanOrigin()); 1667 return; 1668 } 1669 NextInsn = NormalDest->getFirstInsertionPt(); 1670 assert(NextInsn && 1671 "Could not find insertion point for retval shadow load"); 1672 } 1673 IRBuilder<> IRBAfter(NextInsn); 1674 Value *RetvalShadow = 1675 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 1676 kShadowTLSAlignment, "_msret"); 1677 setShadow(&I, RetvalShadow); 1678 if (MS.TrackOrigins) 1679 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 1680 } 1681 1682 void visitReturnInst(ReturnInst &I) { 1683 IRBuilder<> IRB(&I); 1684 if (Value *RetVal = I.getReturnValue()) { 1685 // Set the shadow for the RetVal. 1686 Value *Shadow = getShadow(RetVal); 1687 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 1688 DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n"); 1689 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 1690 if (MS.TrackOrigins) 1691 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 1692 } 1693 } 1694 1695 void visitPHINode(PHINode &I) { 1696 IRBuilder<> IRB(&I); 1697 ShadowPHINodes.push_back(&I); 1698 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 1699 "_msphi_s")); 1700 if (MS.TrackOrigins) 1701 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 1702 "_msphi_o")); 1703 } 1704 1705 void visitAllocaInst(AllocaInst &I) { 1706 setShadow(&I, getCleanShadow(&I)); 1707 if (!ClPoisonStack) return; 1708 IRBuilder<> IRB(I.getNextNode()); 1709 uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType()); 1710 if (ClPoisonStackWithCall) { 1711 IRB.CreateCall2(MS.MsanPoisonStackFn, 1712 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 1713 ConstantInt::get(MS.IntptrTy, Size)); 1714 } else { 1715 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 1716 IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern), 1717 Size, I.getAlignment()); 1718 } 1719 1720 if (MS.TrackOrigins) { 1721 setOrigin(&I, getCleanOrigin()); 1722 SmallString<2048> StackDescriptionStorage; 1723 raw_svector_ostream StackDescription(StackDescriptionStorage); 1724 // We create a string with a description of the stack allocation and 1725 // pass it into __msan_set_alloca_origin. 1726 // It will be printed by the run-time if stack-originated UMR is found. 1727 // The first 4 bytes of the string are set to '----' and will be replaced 1728 // by __msan_va_arg_overflow_size_tls at the first call. 1729 StackDescription << "----" << I.getName() << "@" << F.getName(); 1730 Value *Descr = 1731 createPrivateNonConstGlobalForString(*F.getParent(), 1732 StackDescription.str()); 1733 IRB.CreateCall3(MS.MsanSetAllocaOriginFn, 1734 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 1735 ConstantInt::get(MS.IntptrTy, Size), 1736 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())); 1737 } 1738 } 1739 1740 void visitSelectInst(SelectInst& I) { 1741 IRBuilder<> IRB(&I); 1742 setShadow(&I, IRB.CreateSelect(I.getCondition(), 1743 getShadow(I.getTrueValue()), getShadow(I.getFalseValue()), 1744 "_msprop")); 1745 if (MS.TrackOrigins) { 1746 // Origins are always i32, so any vector conditions must be flattened. 1747 // FIXME: consider tracking vector origins for app vectors? 1748 Value *Cond = I.getCondition(); 1749 if (Cond->getType()->isVectorTy()) { 1750 Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB); 1751 Cond = IRB.CreateICmpNE(ConvertedShadow, 1752 getCleanShadow(ConvertedShadow), "_mso_select"); 1753 } 1754 setOrigin(&I, IRB.CreateSelect(Cond, 1755 getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue()))); 1756 } 1757 } 1758 1759 void visitLandingPadInst(LandingPadInst &I) { 1760 // Do nothing. 1761 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 1762 setShadow(&I, getCleanShadow(&I)); 1763 setOrigin(&I, getCleanOrigin()); 1764 } 1765 1766 void visitGetElementPtrInst(GetElementPtrInst &I) { 1767 handleShadowOr(I); 1768 } 1769 1770 void visitExtractValueInst(ExtractValueInst &I) { 1771 IRBuilder<> IRB(&I); 1772 Value *Agg = I.getAggregateOperand(); 1773 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 1774 Value *AggShadow = getShadow(Agg); 1775 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 1776 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 1777 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 1778 setShadow(&I, ResShadow); 1779 setOrigin(&I, getCleanOrigin()); 1780 } 1781 1782 void visitInsertValueInst(InsertValueInst &I) { 1783 IRBuilder<> IRB(&I); 1784 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 1785 Value *AggShadow = getShadow(I.getAggregateOperand()); 1786 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 1787 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 1788 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 1789 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 1790 DEBUG(dbgs() << " Res: " << *Res << "\n"); 1791 setShadow(&I, Res); 1792 setOrigin(&I, getCleanOrigin()); 1793 } 1794 1795 void dumpInst(Instruction &I) { 1796 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1797 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 1798 } else { 1799 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 1800 } 1801 errs() << "QQQ " << I << "\n"; 1802 } 1803 1804 void visitResumeInst(ResumeInst &I) { 1805 DEBUG(dbgs() << "Resume: " << I << "\n"); 1806 // Nothing to do here. 1807 } 1808 1809 void visitInstruction(Instruction &I) { 1810 // Everything else: stop propagating and check for poisoned shadow. 1811 if (ClDumpStrictInstructions) 1812 dumpInst(I); 1813 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 1814 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 1815 insertCheck(I.getOperand(i), &I); 1816 setShadow(&I, getCleanShadow(&I)); 1817 setOrigin(&I, getCleanOrigin()); 1818 } 1819 }; 1820 1821 /// \brief AMD64-specific implementation of VarArgHelper. 1822 struct VarArgAMD64Helper : public VarArgHelper { 1823 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 1824 // See a comment in visitCallSite for more details. 1825 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 1826 static const unsigned AMD64FpEndOffset = 176; 1827 1828 Function &F; 1829 MemorySanitizer &MS; 1830 MemorySanitizerVisitor &MSV; 1831 Value *VAArgTLSCopy; 1832 Value *VAArgOverflowSize; 1833 1834 SmallVector<CallInst*, 16> VAStartInstrumentationList; 1835 1836 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 1837 MemorySanitizerVisitor &MSV) 1838 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { } 1839 1840 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 1841 1842 ArgKind classifyArgument(Value* arg) { 1843 // A very rough approximation of X86_64 argument classification rules. 1844 Type *T = arg->getType(); 1845 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 1846 return AK_FloatingPoint; 1847 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 1848 return AK_GeneralPurpose; 1849 if (T->isPointerTy()) 1850 return AK_GeneralPurpose; 1851 return AK_Memory; 1852 } 1853 1854 // For VarArg functions, store the argument shadow in an ABI-specific format 1855 // that corresponds to va_list layout. 1856 // We do this because Clang lowers va_arg in the frontend, and this pass 1857 // only sees the low level code that deals with va_list internals. 1858 // A much easier alternative (provided that Clang emits va_arg instructions) 1859 // would have been to associate each live instance of va_list with a copy of 1860 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 1861 // order. 1862 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) { 1863 unsigned GpOffset = 0; 1864 unsigned FpOffset = AMD64GpEndOffset; 1865 unsigned OverflowOffset = AMD64FpEndOffset; 1866 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 1867 ArgIt != End; ++ArgIt) { 1868 Value *A = *ArgIt; 1869 ArgKind AK = classifyArgument(A); 1870 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 1871 AK = AK_Memory; 1872 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 1873 AK = AK_Memory; 1874 Value *Base; 1875 switch (AK) { 1876 case AK_GeneralPurpose: 1877 Base = getShadowPtrForVAArgument(A, IRB, GpOffset); 1878 GpOffset += 8; 1879 break; 1880 case AK_FloatingPoint: 1881 Base = getShadowPtrForVAArgument(A, IRB, FpOffset); 1882 FpOffset += 16; 1883 break; 1884 case AK_Memory: 1885 uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType()); 1886 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset); 1887 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 1888 } 1889 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 1890 } 1891 Constant *OverflowSize = 1892 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 1893 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 1894 } 1895 1896 /// \brief Compute the shadow address for a given va_arg. 1897 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB, 1898 int ArgOffset) { 1899 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 1900 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 1901 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0), 1902 "_msarg"); 1903 } 1904 1905 void visitVAStartInst(VAStartInst &I) { 1906 IRBuilder<> IRB(&I); 1907 VAStartInstrumentationList.push_back(&I); 1908 Value *VAListTag = I.getArgOperand(0); 1909 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 1910 1911 // Unpoison the whole __va_list_tag. 1912 // FIXME: magic ABI constants. 1913 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 1914 /* size */24, /* alignment */8, false); 1915 } 1916 1917 void visitVACopyInst(VACopyInst &I) { 1918 IRBuilder<> IRB(&I); 1919 Value *VAListTag = I.getArgOperand(0); 1920 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 1921 1922 // Unpoison the whole __va_list_tag. 1923 // FIXME: magic ABI constants. 1924 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 1925 /* size */24, /* alignment */8, false); 1926 } 1927 1928 void finalizeInstrumentation() { 1929 assert(!VAArgOverflowSize && !VAArgTLSCopy && 1930 "finalizeInstrumentation called twice"); 1931 if (!VAStartInstrumentationList.empty()) { 1932 // If there is a va_start in this function, make a backup copy of 1933 // va_arg_tls somewhere in the function entry block. 1934 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 1935 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 1936 Value *CopySize = 1937 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 1938 VAArgOverflowSize); 1939 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 1940 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 1941 } 1942 1943 // Instrument va_start. 1944 // Copy va_list shadow from the backup copy of the TLS contents. 1945 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 1946 CallInst *OrigInst = VAStartInstrumentationList[i]; 1947 IRBuilder<> IRB(OrigInst->getNextNode()); 1948 Value *VAListTag = OrigInst->getArgOperand(0); 1949 1950 Value *RegSaveAreaPtrPtr = 1951 IRB.CreateIntToPtr( 1952 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 1953 ConstantInt::get(MS.IntptrTy, 16)), 1954 Type::getInt64PtrTy(*MS.C)); 1955 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 1956 Value *RegSaveAreaShadowPtr = 1957 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 1958 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 1959 AMD64FpEndOffset, 16); 1960 1961 Value *OverflowArgAreaPtrPtr = 1962 IRB.CreateIntToPtr( 1963 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 1964 ConstantInt::get(MS.IntptrTy, 8)), 1965 Type::getInt64PtrTy(*MS.C)); 1966 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 1967 Value *OverflowArgAreaShadowPtr = 1968 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 1969 Value *SrcPtr = 1970 getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset); 1971 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 1972 } 1973 } 1974 }; 1975 1976 /// \brief A no-op implementation of VarArgHelper. 1977 struct VarArgNoOpHelper : public VarArgHelper { 1978 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 1979 MemorySanitizerVisitor &MSV) {} 1980 1981 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {} 1982 1983 void visitVAStartInst(VAStartInst &I) {} 1984 1985 void visitVACopyInst(VACopyInst &I) {} 1986 1987 void finalizeInstrumentation() {} 1988 }; 1989 1990 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 1991 MemorySanitizerVisitor &Visitor) { 1992 // VarArg handling is only implemented on AMD64. False positives are possible 1993 // on other platforms. 1994 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 1995 if (TargetTriple.getArch() == llvm::Triple::x86_64) 1996 return new VarArgAMD64Helper(Func, Msan, Visitor); 1997 else 1998 return new VarArgNoOpHelper(Func, Msan, Visitor); 1999 } 2000 2001 } // namespace 2002 2003 bool MemorySanitizer::runOnFunction(Function &F) { 2004 MemorySanitizerVisitor Visitor(F, *this); 2005 2006 // Clear out readonly/readnone attributes. 2007 AttrBuilder B; 2008 B.addAttribute(Attribute::ReadOnly) 2009 .addAttribute(Attribute::ReadNone); 2010 F.removeAttributes(AttributeSet::FunctionIndex, 2011 AttributeSet::get(F.getContext(), 2012 AttributeSet::FunctionIndex, B)); 2013 2014 return Visitor.runOnFunction(); 2015 } 2016