1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// Status: early prototype. 14 /// 15 /// The algorithm of the tool is similar to Memcheck 16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 17 /// byte of the application memory, poison the shadow of the malloc-ed 18 /// or alloca-ed memory, load the shadow bits on every memory read, 19 /// propagate the shadow bits through some of the arithmetic 20 /// instruction (including MOV), store the shadow bits on every memory 21 /// write, report a bug on some other instructions (e.g. JMP) if the 22 /// associated shadow is poisoned. 23 /// 24 /// But there are differences too. The first and the major one: 25 /// compiler instrumentation instead of binary instrumentation. This 26 /// gives us much better register allocation, possible compiler 27 /// optimizations and a fast start-up. But this brings the major issue 28 /// as well: msan needs to see all program events, including system 29 /// calls and reads/writes in system libraries, so we either need to 30 /// compile *everything* with msan or use a binary translation 31 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 32 /// Another difference from Memcheck is that we use 8 shadow bits per 33 /// byte of application memory and use a direct shadow mapping. This 34 /// greatly simplifies the instrumentation code and avoids races on 35 /// shadow updates (Memcheck is single-threaded so races are not a 36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 37 /// path storage that uses 8 bits per byte). 38 /// 39 /// The default value of shadow is 0, which means "clean" (not poisoned). 40 /// 41 /// Every module initializer should call __msan_init to ensure that the 42 /// shadow memory is ready. On error, __msan_warning is called. Since 43 /// parameters and return values may be passed via registers, we have a 44 /// specialized thread-local shadow for return values 45 /// (__msan_retval_tls) and parameters (__msan_param_tls). 46 /// 47 /// Origin tracking. 48 /// 49 /// MemorySanitizer can track origins (allocation points) of all uninitialized 50 /// values. This behavior is controlled with a flag (msan-track-origins) and is 51 /// disabled by default. 52 /// 53 /// Origins are 4-byte values created and interpreted by the runtime library. 54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 55 /// of application memory. Propagation of origins is basically a bunch of 56 /// "select" instructions that pick the origin of a dirty argument, if an 57 /// instruction has one. 58 /// 59 /// Every 4 aligned, consecutive bytes of application memory have one origin 60 /// value associated with them. If these bytes contain uninitialized data 61 /// coming from 2 different allocations, the last store wins. Because of this, 62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 63 /// practice. 64 /// 65 /// Origins are meaningless for fully initialized values, so MemorySanitizer 66 /// avoids storing origin to memory when a fully initialized value is stored. 67 /// This way it avoids needless overwritting origin of the 4-byte region on 68 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 69 /// 70 /// Atomic handling. 71 /// 72 /// Ideally, every atomic store of application value should update the 73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store 74 /// of two disjoint locations can not be done without severe slowdown. 75 /// 76 /// Therefore, we implement an approximation that may err on the safe side. 77 /// In this implementation, every atomically accessed location in the program 78 /// may only change from (partially) uninitialized to fully initialized, but 79 /// not the other way around. We load the shadow _after_ the application load, 80 /// and we store the shadow _before_ the app store. Also, we always store clean 81 /// shadow (if the application store is atomic). This way, if the store-load 82 /// pair constitutes a happens-before arc, shadow store and load are correctly 83 /// ordered such that the load will get either the value that was stored, or 84 /// some later value (which is always clean). 85 /// 86 /// This does not work very well with Compare-And-Swap (CAS) and 87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW 88 /// must store the new shadow before the app operation, and load the shadow 89 /// after the app operation. Computers don't work this way. Current 90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean 91 /// value. It implements the store part as a simple atomic store by storing a 92 /// clean shadow. 93 94 //===----------------------------------------------------------------------===// 95 96 #include "llvm/Transforms/Instrumentation.h" 97 #include "llvm/ADT/DepthFirstIterator.h" 98 #include "llvm/ADT/SmallString.h" 99 #include "llvm/ADT/SmallVector.h" 100 #include "llvm/ADT/StringExtras.h" 101 #include "llvm/ADT/Triple.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/Function.h" 104 #include "llvm/IR/IRBuilder.h" 105 #include "llvm/IR/InlineAsm.h" 106 #include "llvm/IR/InstVisitor.h" 107 #include "llvm/IR/IntrinsicInst.h" 108 #include "llvm/IR/LLVMContext.h" 109 #include "llvm/IR/MDBuilder.h" 110 #include "llvm/IR/Module.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/ValueMap.h" 113 #include "llvm/Support/CommandLine.h" 114 #include "llvm/Support/Compiler.h" 115 #include "llvm/Support/Debug.h" 116 #include "llvm/Support/raw_ostream.h" 117 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 118 #include "llvm/Transforms/Utils/Local.h" 119 #include "llvm/Transforms/Utils/ModuleUtils.h" 120 #include "llvm/Transforms/Utils/SpecialCaseList.h" 121 122 using namespace llvm; 123 124 #define DEBUG_TYPE "msan" 125 126 static const uint64_t kShadowMask32 = 1ULL << 31; 127 static const uint64_t kShadowMask64 = 1ULL << 46; 128 static const uint64_t kOriginOffset32 = 1ULL << 30; 129 static const uint64_t kOriginOffset64 = 1ULL << 45; 130 static const unsigned kMinOriginAlignment = 4; 131 static const unsigned kShadowTLSAlignment = 8; 132 133 // Accesses sizes are powers of two: 1, 2, 4, 8. 134 static const size_t kNumberOfAccessSizes = 4; 135 136 /// \brief Track origins of uninitialized values. 137 /// 138 /// Adds a section to MemorySanitizer report that points to the allocation 139 /// (stack or heap) the uninitialized bits came from originally. 140 static cl::opt<int> ClTrackOrigins("msan-track-origins", 141 cl::desc("Track origins (allocation sites) of poisoned memory"), 142 cl::Hidden, cl::init(0)); 143 static cl::opt<bool> ClKeepGoing("msan-keep-going", 144 cl::desc("keep going after reporting a UMR"), 145 cl::Hidden, cl::init(false)); 146 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 147 cl::desc("poison uninitialized stack variables"), 148 cl::Hidden, cl::init(true)); 149 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 150 cl::desc("poison uninitialized stack variables with a call"), 151 cl::Hidden, cl::init(false)); 152 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 153 cl::desc("poison uninitialized stack variables with the given patter"), 154 cl::Hidden, cl::init(0xff)); 155 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 156 cl::desc("poison undef temps"), 157 cl::Hidden, cl::init(true)); 158 159 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 160 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 161 cl::Hidden, cl::init(true)); 162 163 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 164 cl::desc("exact handling of relational integer ICmp"), 165 cl::Hidden, cl::init(false)); 166 167 // This flag controls whether we check the shadow of the address 168 // operand of load or store. Such bugs are very rare, since load from 169 // a garbage address typically results in SEGV, but still happen 170 // (e.g. only lower bits of address are garbage, or the access happens 171 // early at program startup where malloc-ed memory is more likely to 172 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 173 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 174 cl::desc("report accesses through a pointer which has poisoned shadow"), 175 cl::Hidden, cl::init(true)); 176 177 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 178 cl::desc("print out instructions with default strict semantics"), 179 cl::Hidden, cl::init(false)); 180 181 static cl::opt<std::string> ClBlacklistFile("msan-blacklist", 182 cl::desc("File containing the list of functions where MemorySanitizer " 183 "should not report bugs"), cl::Hidden); 184 185 static cl::opt<int> ClInstrumentationWithCallThreshold( 186 "msan-instrumentation-with-call-threshold", 187 cl::desc( 188 "If the function being instrumented requires more than " 189 "this number of checks and origin stores, use callbacks instead of " 190 "inline checks (-1 means never use callbacks)."), 191 cl::Hidden, cl::init(3500)); 192 193 // Experimental. Wraps all indirect calls in the instrumented code with 194 // a call to the given function. This is needed to assist the dynamic 195 // helper tool (MSanDR) to regain control on transition between instrumented and 196 // non-instrumented code. 197 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls", 198 cl::desc("Wrap indirect calls with a given function"), 199 cl::Hidden); 200 201 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast", 202 cl::desc("Do not wrap indirect calls with target in the same module"), 203 cl::Hidden, cl::init(true)); 204 205 namespace { 206 207 /// \brief An instrumentation pass implementing detection of uninitialized 208 /// reads. 209 /// 210 /// MemorySanitizer: instrument the code in module to find 211 /// uninitialized reads. 212 class MemorySanitizer : public FunctionPass { 213 public: 214 MemorySanitizer(int TrackOrigins = 0, 215 StringRef BlacklistFile = StringRef()) 216 : FunctionPass(ID), 217 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), 218 DL(0), 219 WarningFn(0), 220 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile), 221 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {} 222 const char *getPassName() const override { return "MemorySanitizer"; } 223 bool runOnFunction(Function &F) override; 224 bool doInitialization(Module &M) override; 225 static char ID; // Pass identification, replacement for typeid. 226 227 private: 228 void initializeCallbacks(Module &M); 229 230 /// \brief Track origins (allocation points) of uninitialized values. 231 int TrackOrigins; 232 233 const DataLayout *DL; 234 LLVMContext *C; 235 Type *IntptrTy; 236 Type *OriginTy; 237 /// \brief Thread-local shadow storage for function parameters. 238 GlobalVariable *ParamTLS; 239 /// \brief Thread-local origin storage for function parameters. 240 GlobalVariable *ParamOriginTLS; 241 /// \brief Thread-local shadow storage for function return value. 242 GlobalVariable *RetvalTLS; 243 /// \brief Thread-local origin storage for function return value. 244 GlobalVariable *RetvalOriginTLS; 245 /// \brief Thread-local shadow storage for in-register va_arg function 246 /// parameters (x86_64-specific). 247 GlobalVariable *VAArgTLS; 248 /// \brief Thread-local shadow storage for va_arg overflow area 249 /// (x86_64-specific). 250 GlobalVariable *VAArgOverflowSizeTLS; 251 /// \brief Thread-local space used to pass origin value to the UMR reporting 252 /// function. 253 GlobalVariable *OriginTLS; 254 255 GlobalVariable *MsandrModuleStart; 256 GlobalVariable *MsandrModuleEnd; 257 258 /// \brief The run-time callback to print a warning. 259 Value *WarningFn; 260 // These arrays are indexed by log2(AccessSize). 261 Value *MaybeWarningFn[kNumberOfAccessSizes]; 262 Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; 263 264 /// \brief Run-time helper that generates a new origin value for a stack 265 /// allocation. 266 Value *MsanSetAllocaOrigin4Fn; 267 /// \brief Run-time helper that poisons stack on function entry. 268 Value *MsanPoisonStackFn; 269 /// \brief Run-time helper that records a store (or any event) of an 270 /// uninitialized value and returns an updated origin id encoding this info. 271 Value *MsanChainOriginFn; 272 /// \brief MSan runtime replacements for memmove, memcpy and memset. 273 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 274 275 /// \brief Address mask used in application-to-shadow address calculation. 276 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask. 277 uint64_t ShadowMask; 278 /// \brief Offset of the origin shadow from the "normal" shadow. 279 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL 280 uint64_t OriginOffset; 281 /// \brief Branch weights for error reporting. 282 MDNode *ColdCallWeights; 283 /// \brief Branch weights for origin store. 284 MDNode *OriginStoreWeights; 285 /// \brief Path to blacklist file. 286 SmallString<64> BlacklistFile; 287 /// \brief The blacklist. 288 std::unique_ptr<SpecialCaseList> BL; 289 /// \brief An empty volatile inline asm that prevents callback merge. 290 InlineAsm *EmptyAsm; 291 292 bool WrapIndirectCalls; 293 /// \brief Run-time wrapper for indirect calls. 294 Value *IndirectCallWrapperFn; 295 // Argument and return type of IndirectCallWrapperFn: void (*f)(void). 296 Type *AnyFunctionPtrTy; 297 298 friend struct MemorySanitizerVisitor; 299 friend struct VarArgAMD64Helper; 300 }; 301 } // namespace 302 303 char MemorySanitizer::ID = 0; 304 INITIALIZE_PASS(MemorySanitizer, "msan", 305 "MemorySanitizer: detects uninitialized reads.", 306 false, false) 307 308 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, 309 StringRef BlacklistFile) { 310 return new MemorySanitizer(TrackOrigins, BlacklistFile); 311 } 312 313 /// \brief Create a non-const global initialized with the given string. 314 /// 315 /// Creates a writable global for Str so that we can pass it to the 316 /// run-time lib. Runtime uses first 4 bytes of the string to store the 317 /// frame ID, so the string needs to be mutable. 318 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 319 StringRef Str) { 320 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 321 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 322 GlobalValue::PrivateLinkage, StrConst, ""); 323 } 324 325 326 /// \brief Insert extern declaration of runtime-provided functions and globals. 327 void MemorySanitizer::initializeCallbacks(Module &M) { 328 // Only do this once. 329 if (WarningFn) 330 return; 331 332 IRBuilder<> IRB(*C); 333 // Create the callback. 334 // FIXME: this function should have "Cold" calling conv, 335 // which is not yet implemented. 336 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 337 : "__msan_warning_noreturn"; 338 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); 339 340 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 341 AccessSizeIndex++) { 342 unsigned AccessSize = 1 << AccessSizeIndex; 343 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); 344 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( 345 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 346 IRB.getInt32Ty(), NULL); 347 348 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); 349 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( 350 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 351 IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL); 352 } 353 354 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( 355 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 356 IRB.getInt8PtrTy(), IntptrTy, NULL); 357 MsanPoisonStackFn = M.getOrInsertFunction( 358 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); 359 MsanChainOriginFn = M.getOrInsertFunction( 360 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL); 361 MemmoveFn = M.getOrInsertFunction( 362 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 363 IRB.getInt8PtrTy(), IntptrTy, NULL); 364 MemcpyFn = M.getOrInsertFunction( 365 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 366 IntptrTy, NULL); 367 MemsetFn = M.getOrInsertFunction( 368 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 369 IntptrTy, NULL); 370 371 // Create globals. 372 RetvalTLS = new GlobalVariable( 373 M, ArrayType::get(IRB.getInt64Ty(), 8), false, 374 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0, 375 GlobalVariable::InitialExecTLSModel); 376 RetvalOriginTLS = new GlobalVariable( 377 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0, 378 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 379 380 ParamTLS = new GlobalVariable( 381 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 382 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0, 383 GlobalVariable::InitialExecTLSModel); 384 ParamOriginTLS = new GlobalVariable( 385 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage, 386 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 387 388 VAArgTLS = new GlobalVariable( 389 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 390 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0, 391 GlobalVariable::InitialExecTLSModel); 392 VAArgOverflowSizeTLS = new GlobalVariable( 393 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0, 394 "__msan_va_arg_overflow_size_tls", 0, 395 GlobalVariable::InitialExecTLSModel); 396 OriginTLS = new GlobalVariable( 397 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0, 398 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel); 399 400 // We insert an empty inline asm after __msan_report* to avoid callback merge. 401 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 402 StringRef(""), StringRef(""), 403 /*hasSideEffects=*/true); 404 405 if (WrapIndirectCalls) { 406 AnyFunctionPtrTy = 407 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false)); 408 IndirectCallWrapperFn = M.getOrInsertFunction( 409 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL); 410 } 411 412 if (ClWrapIndirectCallsFast) { 413 MsandrModuleStart = new GlobalVariable( 414 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 415 0, "__executable_start"); 416 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility); 417 MsandrModuleEnd = new GlobalVariable( 418 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 419 0, "_end"); 420 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility); 421 } 422 } 423 424 /// \brief Module-level initialization. 425 /// 426 /// inserts a call to __msan_init to the module's constructor list. 427 bool MemorySanitizer::doInitialization(Module &M) { 428 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 429 if (!DLP) 430 report_fatal_error("data layout missing"); 431 DL = &DLP->getDataLayout(); 432 433 BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); 434 C = &(M.getContext()); 435 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0); 436 switch (PtrSize) { 437 case 64: 438 ShadowMask = kShadowMask64; 439 OriginOffset = kOriginOffset64; 440 break; 441 case 32: 442 ShadowMask = kShadowMask32; 443 OriginOffset = kOriginOffset32; 444 break; 445 default: 446 report_fatal_error("unsupported pointer size"); 447 break; 448 } 449 450 IRBuilder<> IRB(*C); 451 IntptrTy = IRB.getIntPtrTy(DL); 452 OriginTy = IRB.getInt32Ty(); 453 454 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 455 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 456 457 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. 458 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( 459 "__msan_init", IRB.getVoidTy(), NULL)), 0); 460 461 if (TrackOrigins) 462 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 463 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 464 465 if (ClKeepGoing) 466 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 467 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 468 469 return true; 470 } 471 472 namespace { 473 474 /// \brief A helper class that handles instrumentation of VarArg 475 /// functions on a particular platform. 476 /// 477 /// Implementations are expected to insert the instrumentation 478 /// necessary to propagate argument shadow through VarArg function 479 /// calls. Visit* methods are called during an InstVisitor pass over 480 /// the function, and should avoid creating new basic blocks. A new 481 /// instance of this class is created for each instrumented function. 482 struct VarArgHelper { 483 /// \brief Visit a CallSite. 484 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 485 486 /// \brief Visit a va_start call. 487 virtual void visitVAStartInst(VAStartInst &I) = 0; 488 489 /// \brief Visit a va_copy call. 490 virtual void visitVACopyInst(VACopyInst &I) = 0; 491 492 /// \brief Finalize function instrumentation. 493 /// 494 /// This method is called after visiting all interesting (see above) 495 /// instructions in a function. 496 virtual void finalizeInstrumentation() = 0; 497 498 virtual ~VarArgHelper() {} 499 }; 500 501 struct MemorySanitizerVisitor; 502 503 VarArgHelper* 504 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 505 MemorySanitizerVisitor &Visitor); 506 507 unsigned TypeSizeToSizeIndex(unsigned TypeSize) { 508 if (TypeSize <= 8) return 0; 509 return Log2_32_Ceil(TypeSize / 8); 510 } 511 512 /// This class does all the work for a given function. Store and Load 513 /// instructions store and load corresponding shadow and origin 514 /// values. Most instructions propagate shadow from arguments to their 515 /// return values. Certain instructions (most importantly, BranchInst) 516 /// test their argument shadow and print reports (with a runtime call) if it's 517 /// non-zero. 518 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 519 Function &F; 520 MemorySanitizer &MS; 521 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 522 ValueMap<Value*, Value*> ShadowMap, OriginMap; 523 std::unique_ptr<VarArgHelper> VAHelper; 524 525 // The following flags disable parts of MSan instrumentation based on 526 // blacklist contents and command-line options. 527 bool InsertChecks; 528 bool LoadShadow; 529 bool PoisonStack; 530 bool PoisonUndef; 531 bool CheckReturnValue; 532 533 struct ShadowOriginAndInsertPoint { 534 Value *Shadow; 535 Value *Origin; 536 Instruction *OrigIns; 537 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) 538 : Shadow(S), Origin(O), OrigIns(I) { } 539 }; 540 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 541 SmallVector<Instruction*, 16> StoreList; 542 SmallVector<CallSite, 16> IndirectCallList; 543 544 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 545 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 546 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute( 547 AttributeSet::FunctionIndex, 548 Attribute::SanitizeMemory); 549 InsertChecks = SanitizeFunction; 550 LoadShadow = SanitizeFunction; 551 PoisonStack = SanitizeFunction && ClPoisonStack; 552 PoisonUndef = SanitizeFunction && ClPoisonUndef; 553 // FIXME: Consider using SpecialCaseList to specify a list of functions that 554 // must always return fully initialized values. For now, we hardcode "main". 555 CheckReturnValue = SanitizeFunction && (F.getName() == "main"); 556 557 DEBUG(if (!InsertChecks) 558 dbgs() << "MemorySanitizer is not inserting checks into '" 559 << F.getName() << "'\n"); 560 } 561 562 Value *updateOrigin(Value *V, IRBuilder<> &IRB) { 563 if (MS.TrackOrigins <= 1) return V; 564 return IRB.CreateCall(MS.MsanChainOriginFn, V); 565 } 566 567 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, 568 unsigned Alignment, bool AsCall) { 569 if (isa<StructType>(Shadow->getType())) { 570 IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB), 571 Alignment); 572 } else { 573 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 574 // TODO(eugenis): handle non-zero constant shadow by inserting an 575 // unconditional check (can not simply fail compilation as this could 576 // be in the dead code). 577 if (isa<Constant>(ConvertedShadow)) return; 578 unsigned TypeSizeInBits = 579 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 580 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 581 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 582 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; 583 Value *ConvertedShadow2 = IRB.CreateZExt( 584 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 585 IRB.CreateCall3(Fn, ConvertedShadow2, 586 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 587 updateOrigin(Origin, IRB)); 588 } else { 589 Value *Cmp = IRB.CreateICmpNE( 590 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); 591 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 592 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); 593 IRBuilder<> IRBNew(CheckTerm); 594 IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew), 595 getOriginPtr(Addr, IRBNew), Alignment); 596 } 597 } 598 } 599 600 void materializeStores(bool InstrumentWithCalls) { 601 for (size_t i = 0, n = StoreList.size(); i < n; i++) { 602 StoreInst &I = *dyn_cast<StoreInst>(StoreList[i]); 603 604 IRBuilder<> IRB(&I); 605 Value *Val = I.getValueOperand(); 606 Value *Addr = I.getPointerOperand(); 607 Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val); 608 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 609 610 StoreInst *NewSI = 611 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment()); 612 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 613 (void)NewSI; 614 615 if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); 616 617 if (I.isAtomic()) I.setOrdering(addReleaseOrdering(I.getOrdering())); 618 619 if (MS.TrackOrigins) { 620 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 621 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment, 622 InstrumentWithCalls); 623 } 624 } 625 } 626 627 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, 628 bool AsCall) { 629 IRBuilder<> IRB(OrigIns); 630 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 631 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 632 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 633 // See the comment in materializeStores(). 634 if (isa<Constant>(ConvertedShadow)) return; 635 unsigned TypeSizeInBits = 636 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 637 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 638 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 639 Value *Fn = MS.MaybeWarningFn[SizeIndex]; 640 Value *ConvertedShadow2 = 641 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 642 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin 643 ? Origin 644 : (Value *)IRB.getInt32(0)); 645 } else { 646 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 647 getCleanShadow(ConvertedShadow), "_mscmp"); 648 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 649 Cmp, OrigIns, 650 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); 651 652 IRB.SetInsertPoint(CheckTerm); 653 if (MS.TrackOrigins) { 654 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 655 MS.OriginTLS); 656 } 657 IRB.CreateCall(MS.WarningFn); 658 IRB.CreateCall(MS.EmptyAsm); 659 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 660 } 661 } 662 663 void materializeChecks(bool InstrumentWithCalls) { 664 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) { 665 Instruction *OrigIns = InstrumentationList[i].OrigIns; 666 Value *Shadow = InstrumentationList[i].Shadow; 667 Value *Origin = InstrumentationList[i].Origin; 668 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); 669 } 670 DEBUG(dbgs() << "DONE:\n" << F); 671 } 672 673 void materializeIndirectCalls() { 674 for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) { 675 CallSite CS = IndirectCallList[i]; 676 Instruction *I = CS.getInstruction(); 677 BasicBlock *B = I->getParent(); 678 IRBuilder<> IRB(I); 679 Value *Fn0 = CS.getCalledValue(); 680 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy); 681 682 if (ClWrapIndirectCallsFast) { 683 // Check that call target is inside this module limits. 684 Value *Start = 685 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy); 686 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy); 687 688 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start), 689 IRB.CreateICmpUGE(Fn, End)); 690 691 PHINode *NewFnPhi = 692 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target"); 693 694 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 695 NotInThisModule, NewFnPhi, 696 /* Unreachable */ false, MS.ColdCallWeights); 697 698 IRB.SetInsertPoint(CheckTerm); 699 // Slow path: call wrapper function to possibly transform the call 700 // target. 701 Value *NewFn = IRB.CreateBitCast( 702 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 703 704 NewFnPhi->addIncoming(Fn0, B); 705 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent()); 706 CS.setCalledFunction(NewFnPhi); 707 } else { 708 Value *NewFn = IRB.CreateBitCast( 709 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 710 CS.setCalledFunction(NewFn); 711 } 712 } 713 } 714 715 /// \brief Add MemorySanitizer instrumentation to a function. 716 bool runOnFunction() { 717 MS.initializeCallbacks(*F.getParent()); 718 if (!MS.DL) return false; 719 720 // In the presence of unreachable blocks, we may see Phi nodes with 721 // incoming nodes from such blocks. Since InstVisitor skips unreachable 722 // blocks, such nodes will not have any shadow value associated with them. 723 // It's easier to remove unreachable blocks than deal with missing shadow. 724 removeUnreachableBlocks(F); 725 726 // Iterate all BBs in depth-first order and create shadow instructions 727 // for all instructions (where applicable). 728 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 729 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 730 visit(*BB); 731 732 733 // Finalize PHI nodes. 734 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) { 735 PHINode *PN = ShadowPHINodes[i]; 736 PHINode *PNS = cast<PHINode>(getShadow(PN)); 737 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0; 738 size_t NumValues = PN->getNumIncomingValues(); 739 for (size_t v = 0; v < NumValues; v++) { 740 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 741 if (PNO) 742 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 743 } 744 } 745 746 VAHelper->finalizeInstrumentation(); 747 748 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && 749 InstrumentationList.size() + StoreList.size() > 750 (unsigned)ClInstrumentationWithCallThreshold; 751 752 // Delayed instrumentation of StoreInst. 753 // This may add new checks to be inserted later. 754 materializeStores(InstrumentWithCalls); 755 756 // Insert shadow value checks. 757 materializeChecks(InstrumentWithCalls); 758 759 // Wrap indirect calls. 760 materializeIndirectCalls(); 761 762 return true; 763 } 764 765 /// \brief Compute the shadow type that corresponds to a given Value. 766 Type *getShadowTy(Value *V) { 767 return getShadowTy(V->getType()); 768 } 769 770 /// \brief Compute the shadow type that corresponds to a given Type. 771 Type *getShadowTy(Type *OrigTy) { 772 if (!OrigTy->isSized()) { 773 return 0; 774 } 775 // For integer type, shadow is the same as the original type. 776 // This may return weird-sized types like i1. 777 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 778 return IT; 779 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 780 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); 781 return VectorType::get(IntegerType::get(*MS.C, EltSize), 782 VT->getNumElements()); 783 } 784 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 785 SmallVector<Type*, 4> Elements; 786 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 787 Elements.push_back(getShadowTy(ST->getElementType(i))); 788 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 789 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 790 return Res; 791 } 792 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); 793 return IntegerType::get(*MS.C, TypeSize); 794 } 795 796 /// \brief Flatten a vector type. 797 Type *getShadowTyNoVec(Type *ty) { 798 if (VectorType *vt = dyn_cast<VectorType>(ty)) 799 return IntegerType::get(*MS.C, vt->getBitWidth()); 800 return ty; 801 } 802 803 /// \brief Convert a shadow value to it's flattened variant. 804 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 805 Type *Ty = V->getType(); 806 Type *NoVecTy = getShadowTyNoVec(Ty); 807 if (Ty == NoVecTy) return V; 808 return IRB.CreateBitCast(V, NoVecTy); 809 } 810 811 /// \brief Compute the shadow address that corresponds to a given application 812 /// address. 813 /// 814 /// Shadow = Addr & ~ShadowMask. 815 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 816 IRBuilder<> &IRB) { 817 Value *ShadowLong = 818 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 819 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 820 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 821 } 822 823 /// \brief Compute the origin address that corresponds to a given application 824 /// address. 825 /// 826 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL 827 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) { 828 Value *ShadowLong = 829 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 830 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 831 Value *Add = 832 IRB.CreateAdd(ShadowLong, 833 ConstantInt::get(MS.IntptrTy, MS.OriginOffset)); 834 Value *SecondAnd = 835 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL)); 836 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0)); 837 } 838 839 /// \brief Compute the shadow address for a given function argument. 840 /// 841 /// Shadow = ParamTLS+ArgOffset. 842 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 843 int ArgOffset) { 844 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 845 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 846 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 847 "_msarg"); 848 } 849 850 /// \brief Compute the origin address for a given function argument. 851 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 852 int ArgOffset) { 853 if (!MS.TrackOrigins) return 0; 854 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 855 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 856 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 857 "_msarg_o"); 858 } 859 860 /// \brief Compute the shadow address for a retval. 861 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 862 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 863 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 864 "_msret"); 865 } 866 867 /// \brief Compute the origin address for a retval. 868 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 869 // We keep a single origin for the entire retval. Might be too optimistic. 870 return MS.RetvalOriginTLS; 871 } 872 873 /// \brief Set SV to be the shadow value for V. 874 void setShadow(Value *V, Value *SV) { 875 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 876 ShadowMap[V] = SV; 877 } 878 879 /// \brief Set Origin to be the origin value for V. 880 void setOrigin(Value *V, Value *Origin) { 881 if (!MS.TrackOrigins) return; 882 assert(!OriginMap.count(V) && "Values may only have one origin"); 883 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 884 OriginMap[V] = Origin; 885 } 886 887 /// \brief Create a clean shadow value for a given value. 888 /// 889 /// Clean shadow (all zeroes) means all bits of the value are defined 890 /// (initialized). 891 Constant *getCleanShadow(Value *V) { 892 Type *ShadowTy = getShadowTy(V); 893 if (!ShadowTy) 894 return 0; 895 return Constant::getNullValue(ShadowTy); 896 } 897 898 /// \brief Create a dirty shadow of a given shadow type. 899 Constant *getPoisonedShadow(Type *ShadowTy) { 900 assert(ShadowTy); 901 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 902 return Constant::getAllOnesValue(ShadowTy); 903 StructType *ST = cast<StructType>(ShadowTy); 904 SmallVector<Constant *, 4> Vals; 905 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 906 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 907 return ConstantStruct::get(ST, Vals); 908 } 909 910 /// \brief Create a dirty shadow for a given value. 911 Constant *getPoisonedShadow(Value *V) { 912 Type *ShadowTy = getShadowTy(V); 913 if (!ShadowTy) 914 return 0; 915 return getPoisonedShadow(ShadowTy); 916 } 917 918 /// \brief Create a clean (zero) origin. 919 Value *getCleanOrigin() { 920 return Constant::getNullValue(MS.OriginTy); 921 } 922 923 /// \brief Get the shadow value for a given Value. 924 /// 925 /// This function either returns the value set earlier with setShadow, 926 /// or extracts if from ParamTLS (for function arguments). 927 Value *getShadow(Value *V) { 928 if (Instruction *I = dyn_cast<Instruction>(V)) { 929 // For instructions the shadow is already stored in the map. 930 Value *Shadow = ShadowMap[V]; 931 if (!Shadow) { 932 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 933 (void)I; 934 assert(Shadow && "No shadow for a value"); 935 } 936 return Shadow; 937 } 938 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 939 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 940 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 941 (void)U; 942 return AllOnes; 943 } 944 if (Argument *A = dyn_cast<Argument>(V)) { 945 // For arguments we compute the shadow on demand and store it in the map. 946 Value **ShadowPtr = &ShadowMap[V]; 947 if (*ShadowPtr) 948 return *ShadowPtr; 949 Function *F = A->getParent(); 950 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 951 unsigned ArgOffset = 0; 952 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); 953 AI != AE; ++AI) { 954 if (!AI->getType()->isSized()) { 955 DEBUG(dbgs() << "Arg is not sized\n"); 956 continue; 957 } 958 unsigned Size = AI->hasByValAttr() 959 ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType()) 960 : MS.DL->getTypeAllocSize(AI->getType()); 961 if (A == AI) { 962 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset); 963 if (AI->hasByValAttr()) { 964 // ByVal pointer itself has clean shadow. We copy the actual 965 // argument shadow to the underlying memory. 966 // Figure out maximal valid memcpy alignment. 967 unsigned ArgAlign = AI->getParamAlignment(); 968 if (ArgAlign == 0) { 969 Type *EltType = A->getType()->getPointerElementType(); 970 ArgAlign = MS.DL->getABITypeAlignment(EltType); 971 } 972 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 973 Value *Cpy = EntryIRB.CreateMemCpy( 974 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 975 CopyAlign); 976 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 977 (void)Cpy; 978 *ShadowPtr = getCleanShadow(V); 979 } else { 980 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 981 } 982 DEBUG(dbgs() << " ARG: " << *AI << " ==> " << 983 **ShadowPtr << "\n"); 984 if (MS.TrackOrigins) { 985 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset); 986 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 987 } 988 } 989 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment); 990 } 991 assert(*ShadowPtr && "Could not find shadow for an argument"); 992 return *ShadowPtr; 993 } 994 // For everything else the shadow is zero. 995 return getCleanShadow(V); 996 } 997 998 /// \brief Get the shadow for i-th argument of the instruction I. 999 Value *getShadow(Instruction *I, int i) { 1000 return getShadow(I->getOperand(i)); 1001 } 1002 1003 /// \brief Get the origin for a value. 1004 Value *getOrigin(Value *V) { 1005 if (!MS.TrackOrigins) return 0; 1006 if (isa<Instruction>(V) || isa<Argument>(V)) { 1007 Value *Origin = OriginMap[V]; 1008 if (!Origin) { 1009 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n"); 1010 Origin = getCleanOrigin(); 1011 } 1012 return Origin; 1013 } 1014 return getCleanOrigin(); 1015 } 1016 1017 /// \brief Get the origin for i-th argument of the instruction I. 1018 Value *getOrigin(Instruction *I, int i) { 1019 return getOrigin(I->getOperand(i)); 1020 } 1021 1022 /// \brief Remember the place where a shadow check should be inserted. 1023 /// 1024 /// This location will be later instrumented with a check that will print a 1025 /// UMR warning in runtime if the shadow value is not 0. 1026 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { 1027 assert(Shadow); 1028 if (!InsertChecks) return; 1029 #ifndef NDEBUG 1030 Type *ShadowTy = Shadow->getType(); 1031 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 1032 "Can only insert checks for integer and vector shadow types"); 1033 #endif 1034 InstrumentationList.push_back( 1035 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 1036 } 1037 1038 /// \brief Remember the place where a shadow check should be inserted. 1039 /// 1040 /// This location will be later instrumented with a check that will print a 1041 /// UMR warning in runtime if the value is not fully defined. 1042 void insertShadowCheck(Value *Val, Instruction *OrigIns) { 1043 assert(Val); 1044 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 1045 if (!Shadow) return; 1046 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 1047 insertShadowCheck(Shadow, Origin, OrigIns); 1048 } 1049 1050 AtomicOrdering addReleaseOrdering(AtomicOrdering a) { 1051 switch (a) { 1052 case NotAtomic: 1053 return NotAtomic; 1054 case Unordered: 1055 case Monotonic: 1056 case Release: 1057 return Release; 1058 case Acquire: 1059 case AcquireRelease: 1060 return AcquireRelease; 1061 case SequentiallyConsistent: 1062 return SequentiallyConsistent; 1063 } 1064 llvm_unreachable("Unknown ordering"); 1065 } 1066 1067 AtomicOrdering addAcquireOrdering(AtomicOrdering a) { 1068 switch (a) { 1069 case NotAtomic: 1070 return NotAtomic; 1071 case Unordered: 1072 case Monotonic: 1073 case Acquire: 1074 return Acquire; 1075 case Release: 1076 case AcquireRelease: 1077 return AcquireRelease; 1078 case SequentiallyConsistent: 1079 return SequentiallyConsistent; 1080 } 1081 llvm_unreachable("Unknown ordering"); 1082 } 1083 1084 // ------------------- Visitors. 1085 1086 /// \brief Instrument LoadInst 1087 /// 1088 /// Loads the corresponding shadow and (optionally) origin. 1089 /// Optionally, checks that the load address is fully defined. 1090 void visitLoadInst(LoadInst &I) { 1091 assert(I.getType()->isSized() && "Load type must have size"); 1092 IRBuilder<> IRB(I.getNextNode()); 1093 Type *ShadowTy = getShadowTy(&I); 1094 Value *Addr = I.getPointerOperand(); 1095 if (LoadShadow) { 1096 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1097 setShadow(&I, 1098 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 1099 } else { 1100 setShadow(&I, getCleanShadow(&I)); 1101 } 1102 1103 if (ClCheckAccessAddress) 1104 insertShadowCheck(I.getPointerOperand(), &I); 1105 1106 if (I.isAtomic()) 1107 I.setOrdering(addAcquireOrdering(I.getOrdering())); 1108 1109 if (MS.TrackOrigins) { 1110 if (LoadShadow) { 1111 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 1112 setOrigin(&I, 1113 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment)); 1114 } else { 1115 setOrigin(&I, getCleanOrigin()); 1116 } 1117 } 1118 } 1119 1120 /// \brief Instrument StoreInst 1121 /// 1122 /// Stores the corresponding shadow and (optionally) origin. 1123 /// Optionally, checks that the store address is fully defined. 1124 void visitStoreInst(StoreInst &I) { 1125 StoreList.push_back(&I); 1126 } 1127 1128 void handleCASOrRMW(Instruction &I) { 1129 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 1130 1131 IRBuilder<> IRB(&I); 1132 Value *Addr = I.getOperand(0); 1133 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); 1134 1135 if (ClCheckAccessAddress) 1136 insertShadowCheck(Addr, &I); 1137 1138 // Only test the conditional argument of cmpxchg instruction. 1139 // The other argument can potentially be uninitialized, but we can not 1140 // detect this situation reliably without possible false positives. 1141 if (isa<AtomicCmpXchgInst>(I)) 1142 insertShadowCheck(I.getOperand(1), &I); 1143 1144 IRB.CreateStore(getCleanShadow(&I), ShadowPtr); 1145 1146 setShadow(&I, getCleanShadow(&I)); 1147 } 1148 1149 void visitAtomicRMWInst(AtomicRMWInst &I) { 1150 handleCASOrRMW(I); 1151 I.setOrdering(addReleaseOrdering(I.getOrdering())); 1152 } 1153 1154 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 1155 handleCASOrRMW(I); 1156 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 1157 } 1158 1159 // Vector manipulation. 1160 void visitExtractElementInst(ExtractElementInst &I) { 1161 insertShadowCheck(I.getOperand(1), &I); 1162 IRBuilder<> IRB(&I); 1163 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 1164 "_msprop")); 1165 setOrigin(&I, getOrigin(&I, 0)); 1166 } 1167 1168 void visitInsertElementInst(InsertElementInst &I) { 1169 insertShadowCheck(I.getOperand(2), &I); 1170 IRBuilder<> IRB(&I); 1171 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 1172 I.getOperand(2), "_msprop")); 1173 setOriginForNaryOp(I); 1174 } 1175 1176 void visitShuffleVectorInst(ShuffleVectorInst &I) { 1177 insertShadowCheck(I.getOperand(2), &I); 1178 IRBuilder<> IRB(&I); 1179 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 1180 I.getOperand(2), "_msprop")); 1181 setOriginForNaryOp(I); 1182 } 1183 1184 // Casts. 1185 void visitSExtInst(SExtInst &I) { 1186 IRBuilder<> IRB(&I); 1187 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 1188 setOrigin(&I, getOrigin(&I, 0)); 1189 } 1190 1191 void visitZExtInst(ZExtInst &I) { 1192 IRBuilder<> IRB(&I); 1193 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 1194 setOrigin(&I, getOrigin(&I, 0)); 1195 } 1196 1197 void visitTruncInst(TruncInst &I) { 1198 IRBuilder<> IRB(&I); 1199 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 1200 setOrigin(&I, getOrigin(&I, 0)); 1201 } 1202 1203 void visitBitCastInst(BitCastInst &I) { 1204 IRBuilder<> IRB(&I); 1205 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 1206 setOrigin(&I, getOrigin(&I, 0)); 1207 } 1208 1209 void visitPtrToIntInst(PtrToIntInst &I) { 1210 IRBuilder<> IRB(&I); 1211 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1212 "_msprop_ptrtoint")); 1213 setOrigin(&I, getOrigin(&I, 0)); 1214 } 1215 1216 void visitIntToPtrInst(IntToPtrInst &I) { 1217 IRBuilder<> IRB(&I); 1218 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1219 "_msprop_inttoptr")); 1220 setOrigin(&I, getOrigin(&I, 0)); 1221 } 1222 1223 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 1224 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 1225 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 1226 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 1227 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 1228 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 1229 1230 /// \brief Propagate shadow for bitwise AND. 1231 /// 1232 /// This code is exact, i.e. if, for example, a bit in the left argument 1233 /// is defined and 0, then neither the value not definedness of the 1234 /// corresponding bit in B don't affect the resulting shadow. 1235 void visitAnd(BinaryOperator &I) { 1236 IRBuilder<> IRB(&I); 1237 // "And" of 0 and a poisoned value results in unpoisoned value. 1238 // 1&1 => 1; 0&1 => 0; p&1 => p; 1239 // 1&0 => 0; 0&0 => 0; p&0 => 0; 1240 // 1&p => p; 0&p => 0; p&p => p; 1241 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 1242 Value *S1 = getShadow(&I, 0); 1243 Value *S2 = getShadow(&I, 1); 1244 Value *V1 = I.getOperand(0); 1245 Value *V2 = I.getOperand(1); 1246 if (V1->getType() != S1->getType()) { 1247 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1248 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1249 } 1250 Value *S1S2 = IRB.CreateAnd(S1, S2); 1251 Value *V1S2 = IRB.CreateAnd(V1, S2); 1252 Value *S1V2 = IRB.CreateAnd(S1, V2); 1253 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1254 setOriginForNaryOp(I); 1255 } 1256 1257 void visitOr(BinaryOperator &I) { 1258 IRBuilder<> IRB(&I); 1259 // "Or" of 1 and a poisoned value results in unpoisoned value. 1260 // 1|1 => 1; 0|1 => 1; p|1 => 1; 1261 // 1|0 => 1; 0|0 => 0; p|0 => p; 1262 // 1|p => 1; 0|p => p; p|p => p; 1263 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 1264 Value *S1 = getShadow(&I, 0); 1265 Value *S2 = getShadow(&I, 1); 1266 Value *V1 = IRB.CreateNot(I.getOperand(0)); 1267 Value *V2 = IRB.CreateNot(I.getOperand(1)); 1268 if (V1->getType() != S1->getType()) { 1269 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1270 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1271 } 1272 Value *S1S2 = IRB.CreateAnd(S1, S2); 1273 Value *V1S2 = IRB.CreateAnd(V1, S2); 1274 Value *S1V2 = IRB.CreateAnd(S1, V2); 1275 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1276 setOriginForNaryOp(I); 1277 } 1278 1279 /// \brief Default propagation of shadow and/or origin. 1280 /// 1281 /// This class implements the general case of shadow propagation, used in all 1282 /// cases where we don't know and/or don't care about what the operation 1283 /// actually does. It converts all input shadow values to a common type 1284 /// (extending or truncating as necessary), and bitwise OR's them. 1285 /// 1286 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1287 /// fully initialized), and less prone to false positives. 1288 /// 1289 /// This class also implements the general case of origin propagation. For a 1290 /// Nary operation, result origin is set to the origin of an argument that is 1291 /// not entirely initialized. If there is more than one such arguments, the 1292 /// rightmost of them is picked. It does not matter which one is picked if all 1293 /// arguments are initialized. 1294 template <bool CombineShadow> 1295 class Combiner { 1296 Value *Shadow; 1297 Value *Origin; 1298 IRBuilder<> &IRB; 1299 MemorySanitizerVisitor *MSV; 1300 1301 public: 1302 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1303 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {} 1304 1305 /// \brief Add a pair of shadow and origin values to the mix. 1306 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1307 if (CombineShadow) { 1308 assert(OpShadow); 1309 if (!Shadow) 1310 Shadow = OpShadow; 1311 else { 1312 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1313 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1314 } 1315 } 1316 1317 if (MSV->MS.TrackOrigins) { 1318 assert(OpOrigin); 1319 if (!Origin) { 1320 Origin = OpOrigin; 1321 } else { 1322 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1323 Value *Cond = IRB.CreateICmpNE(FlatShadow, 1324 MSV->getCleanShadow(FlatShadow)); 1325 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1326 } 1327 } 1328 return *this; 1329 } 1330 1331 /// \brief Add an application value to the mix. 1332 Combiner &Add(Value *V) { 1333 Value *OpShadow = MSV->getShadow(V); 1334 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0; 1335 return Add(OpShadow, OpOrigin); 1336 } 1337 1338 /// \brief Set the current combined values as the given instruction's shadow 1339 /// and origin. 1340 void Done(Instruction *I) { 1341 if (CombineShadow) { 1342 assert(Shadow); 1343 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1344 MSV->setShadow(I, Shadow); 1345 } 1346 if (MSV->MS.TrackOrigins) { 1347 assert(Origin); 1348 MSV->setOrigin(I, Origin); 1349 } 1350 } 1351 }; 1352 1353 typedef Combiner<true> ShadowAndOriginCombiner; 1354 typedef Combiner<false> OriginCombiner; 1355 1356 /// \brief Propagate origin for arbitrary operation. 1357 void setOriginForNaryOp(Instruction &I) { 1358 if (!MS.TrackOrigins) return; 1359 IRBuilder<> IRB(&I); 1360 OriginCombiner OC(this, IRB); 1361 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1362 OC.Add(OI->get()); 1363 OC.Done(&I); 1364 } 1365 1366 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1367 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1368 "Vector of pointers is not a valid shadow type"); 1369 return Ty->isVectorTy() ? 1370 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1371 Ty->getPrimitiveSizeInBits(); 1372 } 1373 1374 /// \brief Cast between two shadow types, extending or truncating as 1375 /// necessary. 1376 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, 1377 bool Signed = false) { 1378 Type *srcTy = V->getType(); 1379 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1380 return IRB.CreateIntCast(V, dstTy, Signed); 1381 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1382 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1383 return IRB.CreateIntCast(V, dstTy, Signed); 1384 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1385 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1386 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1387 Value *V2 = 1388 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); 1389 return IRB.CreateBitCast(V2, dstTy); 1390 // TODO: handle struct types. 1391 } 1392 1393 /// \brief Cast an application value to the type of its own shadow. 1394 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { 1395 Type *ShadowTy = getShadowTy(V); 1396 if (V->getType() == ShadowTy) 1397 return V; 1398 if (V->getType()->isPtrOrPtrVectorTy()) 1399 return IRB.CreatePtrToInt(V, ShadowTy); 1400 else 1401 return IRB.CreateBitCast(V, ShadowTy); 1402 } 1403 1404 /// \brief Propagate shadow for arbitrary operation. 1405 void handleShadowOr(Instruction &I) { 1406 IRBuilder<> IRB(&I); 1407 ShadowAndOriginCombiner SC(this, IRB); 1408 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1409 SC.Add(OI->get()); 1410 SC.Done(&I); 1411 } 1412 1413 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1414 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1415 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1416 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1417 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1418 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1419 void visitMul(BinaryOperator &I) { handleShadowOr(I); } 1420 1421 void handleDiv(Instruction &I) { 1422 IRBuilder<> IRB(&I); 1423 // Strict on the second argument. 1424 insertShadowCheck(I.getOperand(1), &I); 1425 setShadow(&I, getShadow(&I, 0)); 1426 setOrigin(&I, getOrigin(&I, 0)); 1427 } 1428 1429 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1430 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1431 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1432 void visitURem(BinaryOperator &I) { handleDiv(I); } 1433 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1434 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1435 1436 /// \brief Instrument == and != comparisons. 1437 /// 1438 /// Sometimes the comparison result is known even if some of the bits of the 1439 /// arguments are not. 1440 void handleEqualityComparison(ICmpInst &I) { 1441 IRBuilder<> IRB(&I); 1442 Value *A = I.getOperand(0); 1443 Value *B = I.getOperand(1); 1444 Value *Sa = getShadow(A); 1445 Value *Sb = getShadow(B); 1446 1447 // Get rid of pointers and vectors of pointers. 1448 // For ints (and vectors of ints), types of A and Sa match, 1449 // and this is a no-op. 1450 A = IRB.CreatePointerCast(A, Sa->getType()); 1451 B = IRB.CreatePointerCast(B, Sb->getType()); 1452 1453 // A == B <==> (C = A^B) == 0 1454 // A != B <==> (C = A^B) != 0 1455 // Sc = Sa | Sb 1456 Value *C = IRB.CreateXor(A, B); 1457 Value *Sc = IRB.CreateOr(Sa, Sb); 1458 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1459 // Result is defined if one of the following is true 1460 // * there is a defined 1 bit in C 1461 // * C is fully defined 1462 // Si = !(C & ~Sc) && Sc 1463 Value *Zero = Constant::getNullValue(Sc->getType()); 1464 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1465 Value *Si = 1466 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1467 IRB.CreateICmpEQ( 1468 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1469 Si->setName("_msprop_icmp"); 1470 setShadow(&I, Si); 1471 setOriginForNaryOp(I); 1472 } 1473 1474 /// \brief Build the lowest possible value of V, taking into account V's 1475 /// uninitialized bits. 1476 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1477 bool isSigned) { 1478 if (isSigned) { 1479 // Split shadow into sign bit and other bits. 1480 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1481 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1482 // Maximise the undefined shadow bit, minimize other undefined bits. 1483 return 1484 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1485 } else { 1486 // Minimize undefined bits. 1487 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1488 } 1489 } 1490 1491 /// \brief Build the highest possible value of V, taking into account V's 1492 /// uninitialized bits. 1493 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1494 bool isSigned) { 1495 if (isSigned) { 1496 // Split shadow into sign bit and other bits. 1497 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1498 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1499 // Minimise the undefined shadow bit, maximise other undefined bits. 1500 return 1501 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1502 } else { 1503 // Maximize undefined bits. 1504 return IRB.CreateOr(A, Sa); 1505 } 1506 } 1507 1508 /// \brief Instrument relational comparisons. 1509 /// 1510 /// This function does exact shadow propagation for all relational 1511 /// comparisons of integers, pointers and vectors of those. 1512 /// FIXME: output seems suboptimal when one of the operands is a constant 1513 void handleRelationalComparisonExact(ICmpInst &I) { 1514 IRBuilder<> IRB(&I); 1515 Value *A = I.getOperand(0); 1516 Value *B = I.getOperand(1); 1517 Value *Sa = getShadow(A); 1518 Value *Sb = getShadow(B); 1519 1520 // Get rid of pointers and vectors of pointers. 1521 // For ints (and vectors of ints), types of A and Sa match, 1522 // and this is a no-op. 1523 A = IRB.CreatePointerCast(A, Sa->getType()); 1524 B = IRB.CreatePointerCast(B, Sb->getType()); 1525 1526 // Let [a0, a1] be the interval of possible values of A, taking into account 1527 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1528 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1529 bool IsSigned = I.isSigned(); 1530 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1531 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1532 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1533 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1534 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1535 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1536 Value *Si = IRB.CreateXor(S1, S2); 1537 setShadow(&I, Si); 1538 setOriginForNaryOp(I); 1539 } 1540 1541 /// \brief Instrument signed relational comparisons. 1542 /// 1543 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by 1544 /// propagating the highest bit of the shadow. Everything else is delegated 1545 /// to handleShadowOr(). 1546 void handleSignedRelationalComparison(ICmpInst &I) { 1547 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1548 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1549 Value* op = NULL; 1550 CmpInst::Predicate pre = I.getPredicate(); 1551 if (constOp0 && constOp0->isNullValue() && 1552 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { 1553 op = I.getOperand(1); 1554 } else if (constOp1 && constOp1->isNullValue() && 1555 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { 1556 op = I.getOperand(0); 1557 } 1558 if (op) { 1559 IRBuilder<> IRB(&I); 1560 Value* Shadow = 1561 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); 1562 setShadow(&I, Shadow); 1563 setOrigin(&I, getOrigin(op)); 1564 } else { 1565 handleShadowOr(I); 1566 } 1567 } 1568 1569 void visitICmpInst(ICmpInst &I) { 1570 if (!ClHandleICmp) { 1571 handleShadowOr(I); 1572 return; 1573 } 1574 if (I.isEquality()) { 1575 handleEqualityComparison(I); 1576 return; 1577 } 1578 1579 assert(I.isRelational()); 1580 if (ClHandleICmpExact) { 1581 handleRelationalComparisonExact(I); 1582 return; 1583 } 1584 if (I.isSigned()) { 1585 handleSignedRelationalComparison(I); 1586 return; 1587 } 1588 1589 assert(I.isUnsigned()); 1590 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1591 handleRelationalComparisonExact(I); 1592 return; 1593 } 1594 1595 handleShadowOr(I); 1596 } 1597 1598 void visitFCmpInst(FCmpInst &I) { 1599 handleShadowOr(I); 1600 } 1601 1602 void handleShift(BinaryOperator &I) { 1603 IRBuilder<> IRB(&I); 1604 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1605 // Otherwise perform the same shift on S1. 1606 Value *S1 = getShadow(&I, 0); 1607 Value *S2 = getShadow(&I, 1); 1608 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1609 S2->getType()); 1610 Value *V2 = I.getOperand(1); 1611 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1612 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1613 setOriginForNaryOp(I); 1614 } 1615 1616 void visitShl(BinaryOperator &I) { handleShift(I); } 1617 void visitAShr(BinaryOperator &I) { handleShift(I); } 1618 void visitLShr(BinaryOperator &I) { handleShift(I); } 1619 1620 /// \brief Instrument llvm.memmove 1621 /// 1622 /// At this point we don't know if llvm.memmove will be inlined or not. 1623 /// If we don't instrument it and it gets inlined, 1624 /// our interceptor will not kick in and we will lose the memmove. 1625 /// If we instrument the call here, but it does not get inlined, 1626 /// we will memove the shadow twice: which is bad in case 1627 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1628 /// 1629 /// Similar situation exists for memcpy and memset. 1630 void visitMemMoveInst(MemMoveInst &I) { 1631 IRBuilder<> IRB(&I); 1632 IRB.CreateCall3( 1633 MS.MemmoveFn, 1634 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1635 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1636 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1637 I.eraseFromParent(); 1638 } 1639 1640 // Similar to memmove: avoid copying shadow twice. 1641 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1642 // FIXME: consider doing manual inline for small constant sizes and proper 1643 // alignment. 1644 void visitMemCpyInst(MemCpyInst &I) { 1645 IRBuilder<> IRB(&I); 1646 IRB.CreateCall3( 1647 MS.MemcpyFn, 1648 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1649 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1650 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1651 I.eraseFromParent(); 1652 } 1653 1654 // Same as memcpy. 1655 void visitMemSetInst(MemSetInst &I) { 1656 IRBuilder<> IRB(&I); 1657 IRB.CreateCall3( 1658 MS.MemsetFn, 1659 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1660 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1661 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1662 I.eraseFromParent(); 1663 } 1664 1665 void visitVAStartInst(VAStartInst &I) { 1666 VAHelper->visitVAStartInst(I); 1667 } 1668 1669 void visitVACopyInst(VACopyInst &I) { 1670 VAHelper->visitVACopyInst(I); 1671 } 1672 1673 enum IntrinsicKind { 1674 IK_DoesNotAccessMemory, 1675 IK_OnlyReadsMemory, 1676 IK_WritesMemory 1677 }; 1678 1679 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { 1680 const int DoesNotAccessMemory = IK_DoesNotAccessMemory; 1681 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; 1682 const int OnlyReadsMemory = IK_OnlyReadsMemory; 1683 const int OnlyAccessesArgumentPointees = IK_WritesMemory; 1684 const int UnknownModRefBehavior = IK_WritesMemory; 1685 #define GET_INTRINSIC_MODREF_BEHAVIOR 1686 #define ModRefBehavior IntrinsicKind 1687 #include "llvm/IR/Intrinsics.gen" 1688 #undef ModRefBehavior 1689 #undef GET_INTRINSIC_MODREF_BEHAVIOR 1690 } 1691 1692 /// \brief Handle vector store-like intrinsics. 1693 /// 1694 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1695 /// has 1 pointer argument and 1 vector argument, returns void. 1696 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1697 IRBuilder<> IRB(&I); 1698 Value* Addr = I.getArgOperand(0); 1699 Value *Shadow = getShadow(&I, 1); 1700 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1701 1702 // We don't know the pointer alignment (could be unaligned SSE store!). 1703 // Have to assume to worst case. 1704 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1705 1706 if (ClCheckAccessAddress) 1707 insertShadowCheck(Addr, &I); 1708 1709 // FIXME: use ClStoreCleanOrigin 1710 // FIXME: factor out common code from materializeStores 1711 if (MS.TrackOrigins) 1712 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB)); 1713 return true; 1714 } 1715 1716 /// \brief Handle vector load-like intrinsics. 1717 /// 1718 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1719 /// has 1 pointer argument, returns a vector. 1720 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1721 IRBuilder<> IRB(&I); 1722 Value *Addr = I.getArgOperand(0); 1723 1724 Type *ShadowTy = getShadowTy(&I); 1725 if (LoadShadow) { 1726 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1727 // We don't know the pointer alignment (could be unaligned SSE load!). 1728 // Have to assume to worst case. 1729 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1730 } else { 1731 setShadow(&I, getCleanShadow(&I)); 1732 } 1733 1734 if (ClCheckAccessAddress) 1735 insertShadowCheck(Addr, &I); 1736 1737 if (MS.TrackOrigins) { 1738 if (LoadShadow) 1739 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); 1740 else 1741 setOrigin(&I, getCleanOrigin()); 1742 } 1743 return true; 1744 } 1745 1746 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1747 /// 1748 /// Instrument intrinsics with any number of arguments of the same type, 1749 /// equal to the return type. The type should be simple (no aggregates or 1750 /// pointers; vectors are fine). 1751 /// Caller guarantees that this intrinsic does not access memory. 1752 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1753 Type *RetTy = I.getType(); 1754 if (!(RetTy->isIntOrIntVectorTy() || 1755 RetTy->isFPOrFPVectorTy() || 1756 RetTy->isX86_MMXTy())) 1757 return false; 1758 1759 unsigned NumArgOperands = I.getNumArgOperands(); 1760 1761 for (unsigned i = 0; i < NumArgOperands; ++i) { 1762 Type *Ty = I.getArgOperand(i)->getType(); 1763 if (Ty != RetTy) 1764 return false; 1765 } 1766 1767 IRBuilder<> IRB(&I); 1768 ShadowAndOriginCombiner SC(this, IRB); 1769 for (unsigned i = 0; i < NumArgOperands; ++i) 1770 SC.Add(I.getArgOperand(i)); 1771 SC.Done(&I); 1772 1773 return true; 1774 } 1775 1776 /// \brief Heuristically instrument unknown intrinsics. 1777 /// 1778 /// The main purpose of this code is to do something reasonable with all 1779 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1780 /// We recognize several classes of intrinsics by their argument types and 1781 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1782 /// sure that we know what the intrinsic does. 1783 /// 1784 /// We special-case intrinsics where this approach fails. See llvm.bswap 1785 /// handling as an example of that. 1786 bool handleUnknownIntrinsic(IntrinsicInst &I) { 1787 unsigned NumArgOperands = I.getNumArgOperands(); 1788 if (NumArgOperands == 0) 1789 return false; 1790 1791 Intrinsic::ID iid = I.getIntrinsicID(); 1792 IntrinsicKind IK = getIntrinsicKind(iid); 1793 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; 1794 bool WritesMemory = IK == IK_WritesMemory; 1795 assert(!(OnlyReadsMemory && WritesMemory)); 1796 1797 if (NumArgOperands == 2 && 1798 I.getArgOperand(0)->getType()->isPointerTy() && 1799 I.getArgOperand(1)->getType()->isVectorTy() && 1800 I.getType()->isVoidTy() && 1801 WritesMemory) { 1802 // This looks like a vector store. 1803 return handleVectorStoreIntrinsic(I); 1804 } 1805 1806 if (NumArgOperands == 1 && 1807 I.getArgOperand(0)->getType()->isPointerTy() && 1808 I.getType()->isVectorTy() && 1809 OnlyReadsMemory) { 1810 // This looks like a vector load. 1811 return handleVectorLoadIntrinsic(I); 1812 } 1813 1814 if (!OnlyReadsMemory && !WritesMemory) 1815 if (maybeHandleSimpleNomemIntrinsic(I)) 1816 return true; 1817 1818 // FIXME: detect and handle SSE maskstore/maskload 1819 return false; 1820 } 1821 1822 void handleBswap(IntrinsicInst &I) { 1823 IRBuilder<> IRB(&I); 1824 Value *Op = I.getArgOperand(0); 1825 Type *OpType = Op->getType(); 1826 Function *BswapFunc = Intrinsic::getDeclaration( 1827 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1)); 1828 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 1829 setOrigin(&I, getOrigin(Op)); 1830 } 1831 1832 // \brief Instrument vector convert instrinsic. 1833 // 1834 // This function instruments intrinsics like cvtsi2ss: 1835 // %Out = int_xxx_cvtyyy(%ConvertOp) 1836 // or 1837 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) 1838 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same 1839 // number \p Out elements, and (if has 2 arguments) copies the rest of the 1840 // elements from \p CopyOp. 1841 // In most cases conversion involves floating-point value which may trigger a 1842 // hardware exception when not fully initialized. For this reason we require 1843 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. 1844 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p 1845 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always 1846 // return a fully initialized value. 1847 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { 1848 IRBuilder<> IRB(&I); 1849 Value *CopyOp, *ConvertOp; 1850 1851 switch (I.getNumArgOperands()) { 1852 case 2: 1853 CopyOp = I.getArgOperand(0); 1854 ConvertOp = I.getArgOperand(1); 1855 break; 1856 case 1: 1857 ConvertOp = I.getArgOperand(0); 1858 CopyOp = NULL; 1859 break; 1860 default: 1861 llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); 1862 } 1863 1864 // The first *NumUsedElements* elements of ConvertOp are converted to the 1865 // same number of output elements. The rest of the output is copied from 1866 // CopyOp, or (if not available) filled with zeroes. 1867 // Combine shadow for elements of ConvertOp that are used in this operation, 1868 // and insert a check. 1869 // FIXME: consider propagating shadow of ConvertOp, at least in the case of 1870 // int->any conversion. 1871 Value *ConvertShadow = getShadow(ConvertOp); 1872 Value *AggShadow = 0; 1873 if (ConvertOp->getType()->isVectorTy()) { 1874 AggShadow = IRB.CreateExtractElement( 1875 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); 1876 for (int i = 1; i < NumUsedElements; ++i) { 1877 Value *MoreShadow = IRB.CreateExtractElement( 1878 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); 1879 AggShadow = IRB.CreateOr(AggShadow, MoreShadow); 1880 } 1881 } else { 1882 AggShadow = ConvertShadow; 1883 } 1884 assert(AggShadow->getType()->isIntegerTy()); 1885 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); 1886 1887 // Build result shadow by zero-filling parts of CopyOp shadow that come from 1888 // ConvertOp. 1889 if (CopyOp) { 1890 assert(CopyOp->getType() == I.getType()); 1891 assert(CopyOp->getType()->isVectorTy()); 1892 Value *ResultShadow = getShadow(CopyOp); 1893 Type *EltTy = ResultShadow->getType()->getVectorElementType(); 1894 for (int i = 0; i < NumUsedElements; ++i) { 1895 ResultShadow = IRB.CreateInsertElement( 1896 ResultShadow, ConstantInt::getNullValue(EltTy), 1897 ConstantInt::get(IRB.getInt32Ty(), i)); 1898 } 1899 setShadow(&I, ResultShadow); 1900 setOrigin(&I, getOrigin(CopyOp)); 1901 } else { 1902 setShadow(&I, getCleanShadow(&I)); 1903 } 1904 } 1905 1906 // Given a scalar or vector, extract lower 64 bits (or less), and return all 1907 // zeroes if it is zero, and all ones otherwise. 1908 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { 1909 if (S->getType()->isVectorTy()) 1910 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); 1911 assert(S->getType()->getPrimitiveSizeInBits() <= 64); 1912 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1913 return CreateShadowCast(IRB, S2, T, /* Signed */ true); 1914 } 1915 1916 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { 1917 Type *T = S->getType(); 1918 assert(T->isVectorTy()); 1919 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1920 return IRB.CreateSExt(S2, T); 1921 } 1922 1923 // \brief Instrument vector shift instrinsic. 1924 // 1925 // This function instruments intrinsics like int_x86_avx2_psll_w. 1926 // Intrinsic shifts %In by %ShiftSize bits. 1927 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift 1928 // size, and the rest is ignored. Behavior is defined even if shift size is 1929 // greater than register (or field) width. 1930 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { 1931 assert(I.getNumArgOperands() == 2); 1932 IRBuilder<> IRB(&I); 1933 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1934 // Otherwise perform the same shift on S1. 1935 Value *S1 = getShadow(&I, 0); 1936 Value *S2 = getShadow(&I, 1); 1937 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) 1938 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); 1939 Value *V1 = I.getOperand(0); 1940 Value *V2 = I.getOperand(1); 1941 Value *Shift = IRB.CreateCall2(I.getCalledValue(), 1942 IRB.CreateBitCast(S1, V1->getType()), V2); 1943 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); 1944 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1945 setOriginForNaryOp(I); 1946 } 1947 1948 void visitIntrinsicInst(IntrinsicInst &I) { 1949 switch (I.getIntrinsicID()) { 1950 case llvm::Intrinsic::bswap: 1951 handleBswap(I); 1952 break; 1953 case llvm::Intrinsic::x86_avx512_cvtsd2usi64: 1954 case llvm::Intrinsic::x86_avx512_cvtsd2usi: 1955 case llvm::Intrinsic::x86_avx512_cvtss2usi64: 1956 case llvm::Intrinsic::x86_avx512_cvtss2usi: 1957 case llvm::Intrinsic::x86_avx512_cvttss2usi64: 1958 case llvm::Intrinsic::x86_avx512_cvttss2usi: 1959 case llvm::Intrinsic::x86_avx512_cvttsd2usi64: 1960 case llvm::Intrinsic::x86_avx512_cvttsd2usi: 1961 case llvm::Intrinsic::x86_avx512_cvtusi2sd: 1962 case llvm::Intrinsic::x86_avx512_cvtusi2ss: 1963 case llvm::Intrinsic::x86_avx512_cvtusi642sd: 1964 case llvm::Intrinsic::x86_avx512_cvtusi642ss: 1965 case llvm::Intrinsic::x86_sse2_cvtsd2si64: 1966 case llvm::Intrinsic::x86_sse2_cvtsd2si: 1967 case llvm::Intrinsic::x86_sse2_cvtsd2ss: 1968 case llvm::Intrinsic::x86_sse2_cvtsi2sd: 1969 case llvm::Intrinsic::x86_sse2_cvtsi642sd: 1970 case llvm::Intrinsic::x86_sse2_cvtss2sd: 1971 case llvm::Intrinsic::x86_sse2_cvttsd2si64: 1972 case llvm::Intrinsic::x86_sse2_cvttsd2si: 1973 case llvm::Intrinsic::x86_sse_cvtsi2ss: 1974 case llvm::Intrinsic::x86_sse_cvtsi642ss: 1975 case llvm::Intrinsic::x86_sse_cvtss2si64: 1976 case llvm::Intrinsic::x86_sse_cvtss2si: 1977 case llvm::Intrinsic::x86_sse_cvttss2si64: 1978 case llvm::Intrinsic::x86_sse_cvttss2si: 1979 handleVectorConvertIntrinsic(I, 1); 1980 break; 1981 case llvm::Intrinsic::x86_sse2_cvtdq2pd: 1982 case llvm::Intrinsic::x86_sse2_cvtps2pd: 1983 case llvm::Intrinsic::x86_sse_cvtps2pi: 1984 case llvm::Intrinsic::x86_sse_cvttps2pi: 1985 handleVectorConvertIntrinsic(I, 2); 1986 break; 1987 case llvm::Intrinsic::x86_avx512_psll_dq: 1988 case llvm::Intrinsic::x86_avx512_psrl_dq: 1989 case llvm::Intrinsic::x86_avx2_psll_w: 1990 case llvm::Intrinsic::x86_avx2_psll_d: 1991 case llvm::Intrinsic::x86_avx2_psll_q: 1992 case llvm::Intrinsic::x86_avx2_pslli_w: 1993 case llvm::Intrinsic::x86_avx2_pslli_d: 1994 case llvm::Intrinsic::x86_avx2_pslli_q: 1995 case llvm::Intrinsic::x86_avx2_psll_dq: 1996 case llvm::Intrinsic::x86_avx2_psrl_w: 1997 case llvm::Intrinsic::x86_avx2_psrl_d: 1998 case llvm::Intrinsic::x86_avx2_psrl_q: 1999 case llvm::Intrinsic::x86_avx2_psra_w: 2000 case llvm::Intrinsic::x86_avx2_psra_d: 2001 case llvm::Intrinsic::x86_avx2_psrli_w: 2002 case llvm::Intrinsic::x86_avx2_psrli_d: 2003 case llvm::Intrinsic::x86_avx2_psrli_q: 2004 case llvm::Intrinsic::x86_avx2_psrai_w: 2005 case llvm::Intrinsic::x86_avx2_psrai_d: 2006 case llvm::Intrinsic::x86_avx2_psrl_dq: 2007 case llvm::Intrinsic::x86_sse2_psll_w: 2008 case llvm::Intrinsic::x86_sse2_psll_d: 2009 case llvm::Intrinsic::x86_sse2_psll_q: 2010 case llvm::Intrinsic::x86_sse2_pslli_w: 2011 case llvm::Intrinsic::x86_sse2_pslli_d: 2012 case llvm::Intrinsic::x86_sse2_pslli_q: 2013 case llvm::Intrinsic::x86_sse2_psll_dq: 2014 case llvm::Intrinsic::x86_sse2_psrl_w: 2015 case llvm::Intrinsic::x86_sse2_psrl_d: 2016 case llvm::Intrinsic::x86_sse2_psrl_q: 2017 case llvm::Intrinsic::x86_sse2_psra_w: 2018 case llvm::Intrinsic::x86_sse2_psra_d: 2019 case llvm::Intrinsic::x86_sse2_psrli_w: 2020 case llvm::Intrinsic::x86_sse2_psrli_d: 2021 case llvm::Intrinsic::x86_sse2_psrli_q: 2022 case llvm::Intrinsic::x86_sse2_psrai_w: 2023 case llvm::Intrinsic::x86_sse2_psrai_d: 2024 case llvm::Intrinsic::x86_sse2_psrl_dq: 2025 case llvm::Intrinsic::x86_mmx_psll_w: 2026 case llvm::Intrinsic::x86_mmx_psll_d: 2027 case llvm::Intrinsic::x86_mmx_psll_q: 2028 case llvm::Intrinsic::x86_mmx_pslli_w: 2029 case llvm::Intrinsic::x86_mmx_pslli_d: 2030 case llvm::Intrinsic::x86_mmx_pslli_q: 2031 case llvm::Intrinsic::x86_mmx_psrl_w: 2032 case llvm::Intrinsic::x86_mmx_psrl_d: 2033 case llvm::Intrinsic::x86_mmx_psrl_q: 2034 case llvm::Intrinsic::x86_mmx_psra_w: 2035 case llvm::Intrinsic::x86_mmx_psra_d: 2036 case llvm::Intrinsic::x86_mmx_psrli_w: 2037 case llvm::Intrinsic::x86_mmx_psrli_d: 2038 case llvm::Intrinsic::x86_mmx_psrli_q: 2039 case llvm::Intrinsic::x86_mmx_psrai_w: 2040 case llvm::Intrinsic::x86_mmx_psrai_d: 2041 handleVectorShiftIntrinsic(I, /* Variable */ false); 2042 break; 2043 case llvm::Intrinsic::x86_avx2_psllv_d: 2044 case llvm::Intrinsic::x86_avx2_psllv_d_256: 2045 case llvm::Intrinsic::x86_avx2_psllv_q: 2046 case llvm::Intrinsic::x86_avx2_psllv_q_256: 2047 case llvm::Intrinsic::x86_avx2_psrlv_d: 2048 case llvm::Intrinsic::x86_avx2_psrlv_d_256: 2049 case llvm::Intrinsic::x86_avx2_psrlv_q: 2050 case llvm::Intrinsic::x86_avx2_psrlv_q_256: 2051 case llvm::Intrinsic::x86_avx2_psrav_d: 2052 case llvm::Intrinsic::x86_avx2_psrav_d_256: 2053 handleVectorShiftIntrinsic(I, /* Variable */ true); 2054 break; 2055 2056 // Byte shifts are not implemented. 2057 // case llvm::Intrinsic::x86_avx512_psll_dq_bs: 2058 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs: 2059 // case llvm::Intrinsic::x86_avx2_psll_dq_bs: 2060 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs: 2061 // case llvm::Intrinsic::x86_sse2_psll_dq_bs: 2062 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs: 2063 2064 default: 2065 if (!handleUnknownIntrinsic(I)) 2066 visitInstruction(I); 2067 break; 2068 } 2069 } 2070 2071 void visitCallSite(CallSite CS) { 2072 Instruction &I = *CS.getInstruction(); 2073 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 2074 if (CS.isCall()) { 2075 CallInst *Call = cast<CallInst>(&I); 2076 2077 // For inline asm, do the usual thing: check argument shadow and mark all 2078 // outputs as clean. Note that any side effects of the inline asm that are 2079 // not immediately visible in its constraints are not handled. 2080 if (Call->isInlineAsm()) { 2081 visitInstruction(I); 2082 return; 2083 } 2084 2085 // Allow only tail calls with the same types, otherwise 2086 // we may have a false positive: shadow for a non-void RetVal 2087 // will get propagated to a void RetVal. 2088 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType()) 2089 Call->setTailCall(false); 2090 2091 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 2092 2093 // We are going to insert code that relies on the fact that the callee 2094 // will become a non-readonly function after it is instrumented by us. To 2095 // prevent this code from being optimized out, mark that function 2096 // non-readonly in advance. 2097 if (Function *Func = Call->getCalledFunction()) { 2098 // Clear out readonly/readnone attributes. 2099 AttrBuilder B; 2100 B.addAttribute(Attribute::ReadOnly) 2101 .addAttribute(Attribute::ReadNone); 2102 Func->removeAttributes(AttributeSet::FunctionIndex, 2103 AttributeSet::get(Func->getContext(), 2104 AttributeSet::FunctionIndex, 2105 B)); 2106 } 2107 } 2108 IRBuilder<> IRB(&I); 2109 2110 if (MS.WrapIndirectCalls && !CS.getCalledFunction()) 2111 IndirectCallList.push_back(CS); 2112 2113 unsigned ArgOffset = 0; 2114 DEBUG(dbgs() << " CallSite: " << I << "\n"); 2115 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2116 ArgIt != End; ++ArgIt) { 2117 Value *A = *ArgIt; 2118 unsigned i = ArgIt - CS.arg_begin(); 2119 if (!A->getType()->isSized()) { 2120 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 2121 continue; 2122 } 2123 unsigned Size = 0; 2124 Value *Store = 0; 2125 // Compute the Shadow for arg even if it is ByVal, because 2126 // in that case getShadow() will copy the actual arg shadow to 2127 // __msan_param_tls. 2128 Value *ArgShadow = getShadow(A); 2129 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 2130 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 2131 " Shadow: " << *ArgShadow << "\n"); 2132 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 2133 assert(A->getType()->isPointerTy() && 2134 "ByVal argument is not a pointer!"); 2135 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); 2136 unsigned Alignment = CS.getParamAlignment(i + 1); 2137 Store = IRB.CreateMemCpy(ArgShadowBase, 2138 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 2139 Size, Alignment); 2140 } else { 2141 Size = MS.DL->getTypeAllocSize(A->getType()); 2142 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 2143 kShadowTLSAlignment); 2144 } 2145 if (MS.TrackOrigins) 2146 IRB.CreateStore(getOrigin(A), 2147 getOriginPtrForArgument(A, IRB, ArgOffset)); 2148 (void)Store; 2149 assert(Size != 0 && Store != 0); 2150 DEBUG(dbgs() << " Param:" << *Store << "\n"); 2151 ArgOffset += DataLayout::RoundUpAlignment(Size, 8); 2152 } 2153 DEBUG(dbgs() << " done with call args\n"); 2154 2155 FunctionType *FT = 2156 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); 2157 if (FT->isVarArg()) { 2158 VAHelper->visitCallSite(CS, IRB); 2159 } 2160 2161 // Now, get the shadow for the RetVal. 2162 if (!I.getType()->isSized()) return; 2163 IRBuilder<> IRBBefore(&I); 2164 // Until we have full dynamic coverage, make sure the retval shadow is 0. 2165 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 2166 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 2167 Instruction *NextInsn = 0; 2168 if (CS.isCall()) { 2169 NextInsn = I.getNextNode(); 2170 } else { 2171 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 2172 if (!NormalDest->getSinglePredecessor()) { 2173 // FIXME: this case is tricky, so we are just conservative here. 2174 // Perhaps we need to split the edge between this BB and NormalDest, 2175 // but a naive attempt to use SplitEdge leads to a crash. 2176 setShadow(&I, getCleanShadow(&I)); 2177 setOrigin(&I, getCleanOrigin()); 2178 return; 2179 } 2180 NextInsn = NormalDest->getFirstInsertionPt(); 2181 assert(NextInsn && 2182 "Could not find insertion point for retval shadow load"); 2183 } 2184 IRBuilder<> IRBAfter(NextInsn); 2185 Value *RetvalShadow = 2186 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 2187 kShadowTLSAlignment, "_msret"); 2188 setShadow(&I, RetvalShadow); 2189 if (MS.TrackOrigins) 2190 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 2191 } 2192 2193 void visitReturnInst(ReturnInst &I) { 2194 IRBuilder<> IRB(&I); 2195 Value *RetVal = I.getReturnValue(); 2196 if (!RetVal) return; 2197 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 2198 if (CheckReturnValue) { 2199 insertShadowCheck(RetVal, &I); 2200 Value *Shadow = getCleanShadow(RetVal); 2201 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2202 } else { 2203 Value *Shadow = getShadow(RetVal); 2204 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2205 // FIXME: make it conditional if ClStoreCleanOrigin==0 2206 if (MS.TrackOrigins) 2207 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 2208 } 2209 } 2210 2211 void visitPHINode(PHINode &I) { 2212 IRBuilder<> IRB(&I); 2213 ShadowPHINodes.push_back(&I); 2214 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 2215 "_msphi_s")); 2216 if (MS.TrackOrigins) 2217 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 2218 "_msphi_o")); 2219 } 2220 2221 void visitAllocaInst(AllocaInst &I) { 2222 setShadow(&I, getCleanShadow(&I)); 2223 IRBuilder<> IRB(I.getNextNode()); 2224 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); 2225 if (PoisonStack && ClPoisonStackWithCall) { 2226 IRB.CreateCall2(MS.MsanPoisonStackFn, 2227 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2228 ConstantInt::get(MS.IntptrTy, Size)); 2229 } else { 2230 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 2231 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); 2232 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); 2233 } 2234 2235 if (PoisonStack && MS.TrackOrigins) { 2236 setOrigin(&I, getCleanOrigin()); 2237 SmallString<2048> StackDescriptionStorage; 2238 raw_svector_ostream StackDescription(StackDescriptionStorage); 2239 // We create a string with a description of the stack allocation and 2240 // pass it into __msan_set_alloca_origin. 2241 // It will be printed by the run-time if stack-originated UMR is found. 2242 // The first 4 bytes of the string are set to '----' and will be replaced 2243 // by __msan_va_arg_overflow_size_tls at the first call. 2244 StackDescription << "----" << I.getName() << "@" << F.getName(); 2245 Value *Descr = 2246 createPrivateNonConstGlobalForString(*F.getParent(), 2247 StackDescription.str()); 2248 2249 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn, 2250 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2251 ConstantInt::get(MS.IntptrTy, Size), 2252 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), 2253 IRB.CreatePointerCast(&F, MS.IntptrTy)); 2254 } 2255 } 2256 2257 void visitSelectInst(SelectInst& I) { 2258 IRBuilder<> IRB(&I); 2259 // a = select b, c, d 2260 Value *B = I.getCondition(); 2261 Value *C = I.getTrueValue(); 2262 Value *D = I.getFalseValue(); 2263 Value *Sb = getShadow(B); 2264 Value *Sc = getShadow(C); 2265 Value *Sd = getShadow(D); 2266 2267 // Result shadow if condition shadow is 0. 2268 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); 2269 Value *Sa1; 2270 if (I.getType()->isAggregateType()) { 2271 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do 2272 // an extra "select". This results in much more compact IR. 2273 // Sa = select Sb, poisoned, (select b, Sc, Sd) 2274 Sa1 = getPoisonedShadow(getShadowTy(I.getType())); 2275 } else { 2276 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] 2277 // If Sb (condition is poisoned), look for bits in c and d that are equal 2278 // and both unpoisoned. 2279 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. 2280 2281 // Cast arguments to shadow-compatible type. 2282 C = CreateAppToShadowCast(IRB, C); 2283 D = CreateAppToShadowCast(IRB, D); 2284 2285 // Result shadow if condition shadow is 1. 2286 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); 2287 } 2288 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); 2289 setShadow(&I, Sa); 2290 if (MS.TrackOrigins) { 2291 // Origins are always i32, so any vector conditions must be flattened. 2292 // FIXME: consider tracking vector origins for app vectors? 2293 if (B->getType()->isVectorTy()) { 2294 Type *FlatTy = getShadowTyNoVec(B->getType()); 2295 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), 2296 ConstantInt::getNullValue(FlatTy)); 2297 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), 2298 ConstantInt::getNullValue(FlatTy)); 2299 } 2300 // a = select b, c, d 2301 // Oa = Sb ? Ob : (b ? Oc : Od) 2302 setOrigin(&I, IRB.CreateSelect( 2303 Sb, getOrigin(I.getCondition()), 2304 IRB.CreateSelect(B, getOrigin(C), getOrigin(D)))); 2305 } 2306 } 2307 2308 void visitLandingPadInst(LandingPadInst &I) { 2309 // Do nothing. 2310 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 2311 setShadow(&I, getCleanShadow(&I)); 2312 setOrigin(&I, getCleanOrigin()); 2313 } 2314 2315 void visitGetElementPtrInst(GetElementPtrInst &I) { 2316 handleShadowOr(I); 2317 } 2318 2319 void visitExtractValueInst(ExtractValueInst &I) { 2320 IRBuilder<> IRB(&I); 2321 Value *Agg = I.getAggregateOperand(); 2322 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 2323 Value *AggShadow = getShadow(Agg); 2324 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2325 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2326 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 2327 setShadow(&I, ResShadow); 2328 setOriginForNaryOp(I); 2329 } 2330 2331 void visitInsertValueInst(InsertValueInst &I) { 2332 IRBuilder<> IRB(&I); 2333 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 2334 Value *AggShadow = getShadow(I.getAggregateOperand()); 2335 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 2336 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2337 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 2338 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2339 DEBUG(dbgs() << " Res: " << *Res << "\n"); 2340 setShadow(&I, Res); 2341 setOriginForNaryOp(I); 2342 } 2343 2344 void dumpInst(Instruction &I) { 2345 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2346 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 2347 } else { 2348 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 2349 } 2350 errs() << "QQQ " << I << "\n"; 2351 } 2352 2353 void visitResumeInst(ResumeInst &I) { 2354 DEBUG(dbgs() << "Resume: " << I << "\n"); 2355 // Nothing to do here. 2356 } 2357 2358 void visitInstruction(Instruction &I) { 2359 // Everything else: stop propagating and check for poisoned shadow. 2360 if (ClDumpStrictInstructions) 2361 dumpInst(I); 2362 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 2363 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 2364 insertShadowCheck(I.getOperand(i), &I); 2365 setShadow(&I, getCleanShadow(&I)); 2366 setOrigin(&I, getCleanOrigin()); 2367 } 2368 }; 2369 2370 /// \brief AMD64-specific implementation of VarArgHelper. 2371 struct VarArgAMD64Helper : public VarArgHelper { 2372 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 2373 // See a comment in visitCallSite for more details. 2374 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 2375 static const unsigned AMD64FpEndOffset = 176; 2376 2377 Function &F; 2378 MemorySanitizer &MS; 2379 MemorySanitizerVisitor &MSV; 2380 Value *VAArgTLSCopy; 2381 Value *VAArgOverflowSize; 2382 2383 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2384 2385 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 2386 MemorySanitizerVisitor &MSV) 2387 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { } 2388 2389 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 2390 2391 ArgKind classifyArgument(Value* arg) { 2392 // A very rough approximation of X86_64 argument classification rules. 2393 Type *T = arg->getType(); 2394 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 2395 return AK_FloatingPoint; 2396 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 2397 return AK_GeneralPurpose; 2398 if (T->isPointerTy()) 2399 return AK_GeneralPurpose; 2400 return AK_Memory; 2401 } 2402 2403 // For VarArg functions, store the argument shadow in an ABI-specific format 2404 // that corresponds to va_list layout. 2405 // We do this because Clang lowers va_arg in the frontend, and this pass 2406 // only sees the low level code that deals with va_list internals. 2407 // A much easier alternative (provided that Clang emits va_arg instructions) 2408 // would have been to associate each live instance of va_list with a copy of 2409 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 2410 // order. 2411 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2412 unsigned GpOffset = 0; 2413 unsigned FpOffset = AMD64GpEndOffset; 2414 unsigned OverflowOffset = AMD64FpEndOffset; 2415 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2416 ArgIt != End; ++ArgIt) { 2417 Value *A = *ArgIt; 2418 unsigned ArgNo = CS.getArgumentNo(ArgIt); 2419 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); 2420 if (IsByVal) { 2421 // ByVal arguments always go to the overflow area. 2422 assert(A->getType()->isPointerTy()); 2423 Type *RealTy = A->getType()->getPointerElementType(); 2424 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); 2425 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); 2426 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2427 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), 2428 ArgSize, kShadowTLSAlignment); 2429 } else { 2430 ArgKind AK = classifyArgument(A); 2431 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 2432 AK = AK_Memory; 2433 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 2434 AK = AK_Memory; 2435 Value *Base; 2436 switch (AK) { 2437 case AK_GeneralPurpose: 2438 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); 2439 GpOffset += 8; 2440 break; 2441 case AK_FloatingPoint: 2442 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); 2443 FpOffset += 16; 2444 break; 2445 case AK_Memory: 2446 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); 2447 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 2448 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2449 } 2450 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2451 } 2452 } 2453 Constant *OverflowSize = 2454 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 2455 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 2456 } 2457 2458 /// \brief Compute the shadow address for a given va_arg. 2459 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2460 int ArgOffset) { 2461 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2462 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2463 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2464 "_msarg"); 2465 } 2466 2467 void visitVAStartInst(VAStartInst &I) override { 2468 IRBuilder<> IRB(&I); 2469 VAStartInstrumentationList.push_back(&I); 2470 Value *VAListTag = I.getArgOperand(0); 2471 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2472 2473 // Unpoison the whole __va_list_tag. 2474 // FIXME: magic ABI constants. 2475 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2476 /* size */24, /* alignment */8, false); 2477 } 2478 2479 void visitVACopyInst(VACopyInst &I) override { 2480 IRBuilder<> IRB(&I); 2481 Value *VAListTag = I.getArgOperand(0); 2482 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2483 2484 // Unpoison the whole __va_list_tag. 2485 // FIXME: magic ABI constants. 2486 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2487 /* size */24, /* alignment */8, false); 2488 } 2489 2490 void finalizeInstrumentation() override { 2491 assert(!VAArgOverflowSize && !VAArgTLSCopy && 2492 "finalizeInstrumentation called twice"); 2493 if (!VAStartInstrumentationList.empty()) { 2494 // If there is a va_start in this function, make a backup copy of 2495 // va_arg_tls somewhere in the function entry block. 2496 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2497 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2498 Value *CopySize = 2499 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 2500 VAArgOverflowSize); 2501 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2502 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2503 } 2504 2505 // Instrument va_start. 2506 // Copy va_list shadow from the backup copy of the TLS contents. 2507 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2508 CallInst *OrigInst = VAStartInstrumentationList[i]; 2509 IRBuilder<> IRB(OrigInst->getNextNode()); 2510 Value *VAListTag = OrigInst->getArgOperand(0); 2511 2512 Value *RegSaveAreaPtrPtr = 2513 IRB.CreateIntToPtr( 2514 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2515 ConstantInt::get(MS.IntptrTy, 16)), 2516 Type::getInt64PtrTy(*MS.C)); 2517 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2518 Value *RegSaveAreaShadowPtr = 2519 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2520 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 2521 AMD64FpEndOffset, 16); 2522 2523 Value *OverflowArgAreaPtrPtr = 2524 IRB.CreateIntToPtr( 2525 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2526 ConstantInt::get(MS.IntptrTy, 8)), 2527 Type::getInt64PtrTy(*MS.C)); 2528 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 2529 Value *OverflowArgAreaShadowPtr = 2530 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 2531 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset); 2532 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 2533 } 2534 } 2535 }; 2536 2537 /// \brief A no-op implementation of VarArgHelper. 2538 struct VarArgNoOpHelper : public VarArgHelper { 2539 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 2540 MemorySanitizerVisitor &MSV) {} 2541 2542 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} 2543 2544 void visitVAStartInst(VAStartInst &I) override {} 2545 2546 void visitVACopyInst(VACopyInst &I) override {} 2547 2548 void finalizeInstrumentation() override {} 2549 }; 2550 2551 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 2552 MemorySanitizerVisitor &Visitor) { 2553 // VarArg handling is only implemented on AMD64. False positives are possible 2554 // on other platforms. 2555 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 2556 if (TargetTriple.getArch() == llvm::Triple::x86_64) 2557 return new VarArgAMD64Helper(Func, Msan, Visitor); 2558 else 2559 return new VarArgNoOpHelper(Func, Msan, Visitor); 2560 } 2561 2562 } // namespace 2563 2564 bool MemorySanitizer::runOnFunction(Function &F) { 2565 MemorySanitizerVisitor Visitor(F, *this); 2566 2567 // Clear out readonly/readnone attributes. 2568 AttrBuilder B; 2569 B.addAttribute(Attribute::ReadOnly) 2570 .addAttribute(Attribute::ReadNone); 2571 F.removeAttributes(AttributeSet::FunctionIndex, 2572 AttributeSet::get(F.getContext(), 2573 AttributeSet::FunctionIndex, B)); 2574 2575 return Visitor.runOnFunction(); 2576 } 2577