1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// Status: early prototype. 14 /// 15 /// The algorithm of the tool is similar to Memcheck 16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 17 /// byte of the application memory, poison the shadow of the malloc-ed 18 /// or alloca-ed memory, load the shadow bits on every memory read, 19 /// propagate the shadow bits through some of the arithmetic 20 /// instruction (including MOV), store the shadow bits on every memory 21 /// write, report a bug on some other instructions (e.g. JMP) if the 22 /// associated shadow is poisoned. 23 /// 24 /// But there are differences too. The first and the major one: 25 /// compiler instrumentation instead of binary instrumentation. This 26 /// gives us much better register allocation, possible compiler 27 /// optimizations and a fast start-up. But this brings the major issue 28 /// as well: msan needs to see all program events, including system 29 /// calls and reads/writes in system libraries, so we either need to 30 /// compile *everything* with msan or use a binary translation 31 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 32 /// Another difference from Memcheck is that we use 8 shadow bits per 33 /// byte of application memory and use a direct shadow mapping. This 34 /// greatly simplifies the instrumentation code and avoids races on 35 /// shadow updates (Memcheck is single-threaded so races are not a 36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 37 /// path storage that uses 8 bits per byte). 38 /// 39 /// The default value of shadow is 0, which means "clean" (not poisoned). 40 /// 41 /// Every module initializer should call __msan_init to ensure that the 42 /// shadow memory is ready. On error, __msan_warning is called. Since 43 /// parameters and return values may be passed via registers, we have a 44 /// specialized thread-local shadow for return values 45 /// (__msan_retval_tls) and parameters (__msan_param_tls). 46 /// 47 /// Origin tracking. 48 /// 49 /// MemorySanitizer can track origins (allocation points) of all uninitialized 50 /// values. This behavior is controlled with a flag (msan-track-origins) and is 51 /// disabled by default. 52 /// 53 /// Origins are 4-byte values created and interpreted by the runtime library. 54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 55 /// of application memory. Propagation of origins is basically a bunch of 56 /// "select" instructions that pick the origin of a dirty argument, if an 57 /// instruction has one. 58 /// 59 /// Every 4 aligned, consecutive bytes of application memory have one origin 60 /// value associated with them. If these bytes contain uninitialized data 61 /// coming from 2 different allocations, the last store wins. Because of this, 62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 63 /// practice. 64 /// 65 /// Origins are meaningless for fully initialized values, so MemorySanitizer 66 /// avoids storing origin to memory when a fully initialized value is stored. 67 /// This way it avoids needless overwritting origin of the 4-byte region on 68 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 69 /// 70 /// Atomic handling. 71 /// 72 /// Ideally, every atomic store of application value should update the 73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store 74 /// of two disjoint locations can not be done without severe slowdown. 75 /// 76 /// Therefore, we implement an approximation that may err on the safe side. 77 /// In this implementation, every atomically accessed location in the program 78 /// may only change from (partially) uninitialized to fully initialized, but 79 /// not the other way around. We load the shadow _after_ the application load, 80 /// and we store the shadow _before_ the app store. Also, we always store clean 81 /// shadow (if the application store is atomic). This way, if the store-load 82 /// pair constitutes a happens-before arc, shadow store and load are correctly 83 /// ordered such that the load will get either the value that was stored, or 84 /// some later value (which is always clean). 85 /// 86 /// This does not work very well with Compare-And-Swap (CAS) and 87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW 88 /// must store the new shadow before the app operation, and load the shadow 89 /// after the app operation. Computers don't work this way. Current 90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean 91 /// value. It implements the store part as a simple atomic store by storing a 92 /// clean shadow. 93 94 //===----------------------------------------------------------------------===// 95 96 #include "llvm/Transforms/Instrumentation.h" 97 #include "llvm/ADT/DepthFirstIterator.h" 98 #include "llvm/ADT/SmallString.h" 99 #include "llvm/ADT/SmallVector.h" 100 #include "llvm/ADT/StringExtras.h" 101 #include "llvm/ADT/Triple.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/Function.h" 104 #include "llvm/IR/IRBuilder.h" 105 #include "llvm/IR/InlineAsm.h" 106 #include "llvm/IR/InstVisitor.h" 107 #include "llvm/IR/IntrinsicInst.h" 108 #include "llvm/IR/LLVMContext.h" 109 #include "llvm/IR/MDBuilder.h" 110 #include "llvm/IR/Module.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/ValueMap.h" 113 #include "llvm/Support/CommandLine.h" 114 #include "llvm/Support/Compiler.h" 115 #include "llvm/Support/Debug.h" 116 #include "llvm/Support/raw_ostream.h" 117 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 118 #include "llvm/Transforms/Utils/Local.h" 119 #include "llvm/Transforms/Utils/ModuleUtils.h" 120 #include "llvm/Transforms/Utils/SpecialCaseList.h" 121 122 using namespace llvm; 123 124 #define DEBUG_TYPE "msan" 125 126 static const uint64_t kShadowMask32 = 1ULL << 31; 127 static const uint64_t kShadowMask64 = 1ULL << 46; 128 static const uint64_t kOriginOffset32 = 1ULL << 30; 129 static const uint64_t kOriginOffset64 = 1ULL << 45; 130 static const unsigned kMinOriginAlignment = 4; 131 static const unsigned kShadowTLSAlignment = 8; 132 133 // Accesses sizes are powers of two: 1, 2, 4, 8. 134 static const size_t kNumberOfAccessSizes = 4; 135 136 /// \brief Track origins of uninitialized values. 137 /// 138 /// Adds a section to MemorySanitizer report that points to the allocation 139 /// (stack or heap) the uninitialized bits came from originally. 140 static cl::opt<int> ClTrackOrigins("msan-track-origins", 141 cl::desc("Track origins (allocation sites) of poisoned memory"), 142 cl::Hidden, cl::init(0)); 143 static cl::opt<bool> ClKeepGoing("msan-keep-going", 144 cl::desc("keep going after reporting a UMR"), 145 cl::Hidden, cl::init(false)); 146 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 147 cl::desc("poison uninitialized stack variables"), 148 cl::Hidden, cl::init(true)); 149 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 150 cl::desc("poison uninitialized stack variables with a call"), 151 cl::Hidden, cl::init(false)); 152 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 153 cl::desc("poison uninitialized stack variables with the given patter"), 154 cl::Hidden, cl::init(0xff)); 155 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 156 cl::desc("poison undef temps"), 157 cl::Hidden, cl::init(true)); 158 159 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 160 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 161 cl::Hidden, cl::init(true)); 162 163 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 164 cl::desc("exact handling of relational integer ICmp"), 165 cl::Hidden, cl::init(false)); 166 167 // This flag controls whether we check the shadow of the address 168 // operand of load or store. Such bugs are very rare, since load from 169 // a garbage address typically results in SEGV, but still happen 170 // (e.g. only lower bits of address are garbage, or the access happens 171 // early at program startup where malloc-ed memory is more likely to 172 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 173 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 174 cl::desc("report accesses through a pointer which has poisoned shadow"), 175 cl::Hidden, cl::init(true)); 176 177 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 178 cl::desc("print out instructions with default strict semantics"), 179 cl::Hidden, cl::init(false)); 180 181 static cl::opt<std::string> ClBlacklistFile("msan-blacklist", 182 cl::desc("File containing the list of functions where MemorySanitizer " 183 "should not report bugs"), cl::Hidden); 184 185 static cl::opt<int> ClInstrumentationWithCallThreshold( 186 "msan-instrumentation-with-call-threshold", 187 cl::desc( 188 "If the function being instrumented requires more than " 189 "this number of checks and origin stores, use callbacks instead of " 190 "inline checks (-1 means never use callbacks)."), 191 cl::Hidden, cl::init(3500)); 192 193 // Experimental. Wraps all indirect calls in the instrumented code with 194 // a call to the given function. This is needed to assist the dynamic 195 // helper tool (MSanDR) to regain control on transition between instrumented and 196 // non-instrumented code. 197 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls", 198 cl::desc("Wrap indirect calls with a given function"), 199 cl::Hidden); 200 201 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast", 202 cl::desc("Do not wrap indirect calls with target in the same module"), 203 cl::Hidden, cl::init(true)); 204 205 namespace { 206 207 /// \brief An instrumentation pass implementing detection of uninitialized 208 /// reads. 209 /// 210 /// MemorySanitizer: instrument the code in module to find 211 /// uninitialized reads. 212 class MemorySanitizer : public FunctionPass { 213 public: 214 MemorySanitizer(int TrackOrigins = 0, 215 StringRef BlacklistFile = StringRef()) 216 : FunctionPass(ID), 217 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), 218 DL(nullptr), 219 WarningFn(nullptr), 220 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile), 221 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {} 222 const char *getPassName() const override { return "MemorySanitizer"; } 223 bool runOnFunction(Function &F) override; 224 bool doInitialization(Module &M) override; 225 static char ID; // Pass identification, replacement for typeid. 226 227 private: 228 void initializeCallbacks(Module &M); 229 230 /// \brief Track origins (allocation points) of uninitialized values. 231 int TrackOrigins; 232 233 const DataLayout *DL; 234 LLVMContext *C; 235 Type *IntptrTy; 236 Type *OriginTy; 237 /// \brief Thread-local shadow storage for function parameters. 238 GlobalVariable *ParamTLS; 239 /// \brief Thread-local origin storage for function parameters. 240 GlobalVariable *ParamOriginTLS; 241 /// \brief Thread-local shadow storage for function return value. 242 GlobalVariable *RetvalTLS; 243 /// \brief Thread-local origin storage for function return value. 244 GlobalVariable *RetvalOriginTLS; 245 /// \brief Thread-local shadow storage for in-register va_arg function 246 /// parameters (x86_64-specific). 247 GlobalVariable *VAArgTLS; 248 /// \brief Thread-local shadow storage for va_arg overflow area 249 /// (x86_64-specific). 250 GlobalVariable *VAArgOverflowSizeTLS; 251 /// \brief Thread-local space used to pass origin value to the UMR reporting 252 /// function. 253 GlobalVariable *OriginTLS; 254 255 GlobalVariable *MsandrModuleStart; 256 GlobalVariable *MsandrModuleEnd; 257 258 /// \brief The run-time callback to print a warning. 259 Value *WarningFn; 260 // These arrays are indexed by log2(AccessSize). 261 Value *MaybeWarningFn[kNumberOfAccessSizes]; 262 Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; 263 264 /// \brief Run-time helper that generates a new origin value for a stack 265 /// allocation. 266 Value *MsanSetAllocaOrigin4Fn; 267 /// \brief Run-time helper that poisons stack on function entry. 268 Value *MsanPoisonStackFn; 269 /// \brief Run-time helper that records a store (or any event) of an 270 /// uninitialized value and returns an updated origin id encoding this info. 271 Value *MsanChainOriginFn; 272 /// \brief MSan runtime replacements for memmove, memcpy and memset. 273 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 274 275 /// \brief Address mask used in application-to-shadow address calculation. 276 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask. 277 uint64_t ShadowMask; 278 /// \brief Offset of the origin shadow from the "normal" shadow. 279 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL 280 uint64_t OriginOffset; 281 /// \brief Branch weights for error reporting. 282 MDNode *ColdCallWeights; 283 /// \brief Branch weights for origin store. 284 MDNode *OriginStoreWeights; 285 /// \brief Path to blacklist file. 286 SmallString<64> BlacklistFile; 287 /// \brief The blacklist. 288 std::unique_ptr<SpecialCaseList> BL; 289 /// \brief An empty volatile inline asm that prevents callback merge. 290 InlineAsm *EmptyAsm; 291 292 bool WrapIndirectCalls; 293 /// \brief Run-time wrapper for indirect calls. 294 Value *IndirectCallWrapperFn; 295 // Argument and return type of IndirectCallWrapperFn: void (*f)(void). 296 Type *AnyFunctionPtrTy; 297 298 friend struct MemorySanitizerVisitor; 299 friend struct VarArgAMD64Helper; 300 }; 301 } // namespace 302 303 char MemorySanitizer::ID = 0; 304 INITIALIZE_PASS(MemorySanitizer, "msan", 305 "MemorySanitizer: detects uninitialized reads.", 306 false, false) 307 308 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, 309 StringRef BlacklistFile) { 310 return new MemorySanitizer(TrackOrigins, BlacklistFile); 311 } 312 313 /// \brief Create a non-const global initialized with the given string. 314 /// 315 /// Creates a writable global for Str so that we can pass it to the 316 /// run-time lib. Runtime uses first 4 bytes of the string to store the 317 /// frame ID, so the string needs to be mutable. 318 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 319 StringRef Str) { 320 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 321 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 322 GlobalValue::PrivateLinkage, StrConst, ""); 323 } 324 325 326 /// \brief Insert extern declaration of runtime-provided functions and globals. 327 void MemorySanitizer::initializeCallbacks(Module &M) { 328 // Only do this once. 329 if (WarningFn) 330 return; 331 332 IRBuilder<> IRB(*C); 333 // Create the callback. 334 // FIXME: this function should have "Cold" calling conv, 335 // which is not yet implemented. 336 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 337 : "__msan_warning_noreturn"; 338 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); 339 340 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 341 AccessSizeIndex++) { 342 unsigned AccessSize = 1 << AccessSizeIndex; 343 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); 344 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( 345 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 346 IRB.getInt32Ty(), NULL); 347 348 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); 349 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( 350 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 351 IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL); 352 } 353 354 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( 355 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 356 IRB.getInt8PtrTy(), IntptrTy, NULL); 357 MsanPoisonStackFn = M.getOrInsertFunction( 358 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); 359 MsanChainOriginFn = M.getOrInsertFunction( 360 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL); 361 MemmoveFn = M.getOrInsertFunction( 362 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 363 IRB.getInt8PtrTy(), IntptrTy, NULL); 364 MemcpyFn = M.getOrInsertFunction( 365 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 366 IntptrTy, NULL); 367 MemsetFn = M.getOrInsertFunction( 368 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 369 IntptrTy, NULL); 370 371 // Create globals. 372 RetvalTLS = new GlobalVariable( 373 M, ArrayType::get(IRB.getInt64Ty(), 8), false, 374 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, 375 GlobalVariable::InitialExecTLSModel); 376 RetvalOriginTLS = new GlobalVariable( 377 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, 378 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 379 380 ParamTLS = new GlobalVariable( 381 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 382 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, 383 GlobalVariable::InitialExecTLSModel); 384 ParamOriginTLS = new GlobalVariable( 385 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage, 386 nullptr, "__msan_param_origin_tls", nullptr, 387 GlobalVariable::InitialExecTLSModel); 388 389 VAArgTLS = new GlobalVariable( 390 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 391 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, 392 GlobalVariable::InitialExecTLSModel); 393 VAArgOverflowSizeTLS = new GlobalVariable( 394 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 395 "__msan_va_arg_overflow_size_tls", nullptr, 396 GlobalVariable::InitialExecTLSModel); 397 OriginTLS = new GlobalVariable( 398 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 399 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 400 401 // We insert an empty inline asm after __msan_report* to avoid callback merge. 402 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 403 StringRef(""), StringRef(""), 404 /*hasSideEffects=*/true); 405 406 if (WrapIndirectCalls) { 407 AnyFunctionPtrTy = 408 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false)); 409 IndirectCallWrapperFn = M.getOrInsertFunction( 410 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL); 411 } 412 413 if (WrapIndirectCalls && ClWrapIndirectCallsFast) { 414 MsandrModuleStart = new GlobalVariable( 415 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 416 nullptr, "__executable_start"); 417 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility); 418 MsandrModuleEnd = new GlobalVariable( 419 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 420 nullptr, "_end"); 421 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility); 422 } 423 } 424 425 /// \brief Module-level initialization. 426 /// 427 /// inserts a call to __msan_init to the module's constructor list. 428 bool MemorySanitizer::doInitialization(Module &M) { 429 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 430 if (!DLP) 431 report_fatal_error("data layout missing"); 432 DL = &DLP->getDataLayout(); 433 434 BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); 435 C = &(M.getContext()); 436 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0); 437 switch (PtrSize) { 438 case 64: 439 ShadowMask = kShadowMask64; 440 OriginOffset = kOriginOffset64; 441 break; 442 case 32: 443 ShadowMask = kShadowMask32; 444 OriginOffset = kOriginOffset32; 445 break; 446 default: 447 report_fatal_error("unsupported pointer size"); 448 break; 449 } 450 451 IRBuilder<> IRB(*C); 452 IntptrTy = IRB.getIntPtrTy(DL); 453 OriginTy = IRB.getInt32Ty(); 454 455 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 456 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 457 458 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. 459 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( 460 "__msan_init", IRB.getVoidTy(), NULL)), 0); 461 462 if (TrackOrigins) 463 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 464 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 465 466 if (ClKeepGoing) 467 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 468 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 469 470 return true; 471 } 472 473 namespace { 474 475 /// \brief A helper class that handles instrumentation of VarArg 476 /// functions on a particular platform. 477 /// 478 /// Implementations are expected to insert the instrumentation 479 /// necessary to propagate argument shadow through VarArg function 480 /// calls. Visit* methods are called during an InstVisitor pass over 481 /// the function, and should avoid creating new basic blocks. A new 482 /// instance of this class is created for each instrumented function. 483 struct VarArgHelper { 484 /// \brief Visit a CallSite. 485 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 486 487 /// \brief Visit a va_start call. 488 virtual void visitVAStartInst(VAStartInst &I) = 0; 489 490 /// \brief Visit a va_copy call. 491 virtual void visitVACopyInst(VACopyInst &I) = 0; 492 493 /// \brief Finalize function instrumentation. 494 /// 495 /// This method is called after visiting all interesting (see above) 496 /// instructions in a function. 497 virtual void finalizeInstrumentation() = 0; 498 499 virtual ~VarArgHelper() {} 500 }; 501 502 struct MemorySanitizerVisitor; 503 504 VarArgHelper* 505 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 506 MemorySanitizerVisitor &Visitor); 507 508 unsigned TypeSizeToSizeIndex(unsigned TypeSize) { 509 if (TypeSize <= 8) return 0; 510 return Log2_32_Ceil(TypeSize / 8); 511 } 512 513 /// This class does all the work for a given function. Store and Load 514 /// instructions store and load corresponding shadow and origin 515 /// values. Most instructions propagate shadow from arguments to their 516 /// return values. Certain instructions (most importantly, BranchInst) 517 /// test their argument shadow and print reports (with a runtime call) if it's 518 /// non-zero. 519 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 520 Function &F; 521 MemorySanitizer &MS; 522 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 523 ValueMap<Value*, Value*> ShadowMap, OriginMap; 524 std::unique_ptr<VarArgHelper> VAHelper; 525 526 // The following flags disable parts of MSan instrumentation based on 527 // blacklist contents and command-line options. 528 bool InsertChecks; 529 bool LoadShadow; 530 bool PoisonStack; 531 bool PoisonUndef; 532 bool CheckReturnValue; 533 534 struct ShadowOriginAndInsertPoint { 535 Value *Shadow; 536 Value *Origin; 537 Instruction *OrigIns; 538 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) 539 : Shadow(S), Origin(O), OrigIns(I) { } 540 }; 541 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 542 SmallVector<Instruction*, 16> StoreList; 543 SmallVector<CallSite, 16> IndirectCallList; 544 545 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 546 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 547 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute( 548 AttributeSet::FunctionIndex, 549 Attribute::SanitizeMemory); 550 InsertChecks = SanitizeFunction; 551 LoadShadow = SanitizeFunction; 552 PoisonStack = SanitizeFunction && ClPoisonStack; 553 PoisonUndef = SanitizeFunction && ClPoisonUndef; 554 // FIXME: Consider using SpecialCaseList to specify a list of functions that 555 // must always return fully initialized values. For now, we hardcode "main". 556 CheckReturnValue = SanitizeFunction && (F.getName() == "main"); 557 558 DEBUG(if (!InsertChecks) 559 dbgs() << "MemorySanitizer is not inserting checks into '" 560 << F.getName() << "'\n"); 561 } 562 563 Value *updateOrigin(Value *V, IRBuilder<> &IRB) { 564 if (MS.TrackOrigins <= 1) return V; 565 return IRB.CreateCall(MS.MsanChainOriginFn, V); 566 } 567 568 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, 569 unsigned Alignment, bool AsCall) { 570 if (isa<StructType>(Shadow->getType())) { 571 IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB), 572 Alignment); 573 } else { 574 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 575 // TODO(eugenis): handle non-zero constant shadow by inserting an 576 // unconditional check (can not simply fail compilation as this could 577 // be in the dead code). 578 if (isa<Constant>(ConvertedShadow)) return; 579 unsigned TypeSizeInBits = 580 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 581 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 582 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 583 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; 584 Value *ConvertedShadow2 = IRB.CreateZExt( 585 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 586 IRB.CreateCall3(Fn, ConvertedShadow2, 587 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 588 updateOrigin(Origin, IRB)); 589 } else { 590 Value *Cmp = IRB.CreateICmpNE( 591 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); 592 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 593 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); 594 IRBuilder<> IRBNew(CheckTerm); 595 IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew), 596 getOriginPtr(Addr, IRBNew), Alignment); 597 } 598 } 599 } 600 601 void materializeStores(bool InstrumentWithCalls) { 602 for (auto Inst : StoreList) { 603 StoreInst &SI = *dyn_cast<StoreInst>(Inst); 604 605 IRBuilder<> IRB(&SI); 606 Value *Val = SI.getValueOperand(); 607 Value *Addr = SI.getPointerOperand(); 608 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val); 609 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 610 611 StoreInst *NewSI = 612 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment()); 613 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 614 (void)NewSI; 615 616 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI); 617 618 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); 619 620 if (MS.TrackOrigins) { 621 unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment()); 622 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment, 623 InstrumentWithCalls); 624 } 625 } 626 } 627 628 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, 629 bool AsCall) { 630 IRBuilder<> IRB(OrigIns); 631 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 632 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 633 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 634 // See the comment in materializeStores(). 635 if (isa<Constant>(ConvertedShadow)) return; 636 unsigned TypeSizeInBits = 637 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 638 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 639 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 640 Value *Fn = MS.MaybeWarningFn[SizeIndex]; 641 Value *ConvertedShadow2 = 642 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 643 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin 644 ? Origin 645 : (Value *)IRB.getInt32(0)); 646 } else { 647 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 648 getCleanShadow(ConvertedShadow), "_mscmp"); 649 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 650 Cmp, OrigIns, 651 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); 652 653 IRB.SetInsertPoint(CheckTerm); 654 if (MS.TrackOrigins) { 655 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 656 MS.OriginTLS); 657 } 658 IRB.CreateCall(MS.WarningFn); 659 IRB.CreateCall(MS.EmptyAsm); 660 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 661 } 662 } 663 664 void materializeChecks(bool InstrumentWithCalls) { 665 for (const auto &ShadowData : InstrumentationList) { 666 Instruction *OrigIns = ShadowData.OrigIns; 667 Value *Shadow = ShadowData.Shadow; 668 Value *Origin = ShadowData.Origin; 669 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); 670 } 671 DEBUG(dbgs() << "DONE:\n" << F); 672 } 673 674 void materializeIndirectCalls() { 675 for (auto &CS : IndirectCallList) { 676 Instruction *I = CS.getInstruction(); 677 BasicBlock *B = I->getParent(); 678 IRBuilder<> IRB(I); 679 Value *Fn0 = CS.getCalledValue(); 680 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy); 681 682 if (ClWrapIndirectCallsFast) { 683 // Check that call target is inside this module limits. 684 Value *Start = 685 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy); 686 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy); 687 688 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start), 689 IRB.CreateICmpUGE(Fn, End)); 690 691 PHINode *NewFnPhi = 692 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target"); 693 694 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 695 NotInThisModule, NewFnPhi, 696 /* Unreachable */ false, MS.ColdCallWeights); 697 698 IRB.SetInsertPoint(CheckTerm); 699 // Slow path: call wrapper function to possibly transform the call 700 // target. 701 Value *NewFn = IRB.CreateBitCast( 702 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 703 704 NewFnPhi->addIncoming(Fn0, B); 705 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent()); 706 CS.setCalledFunction(NewFnPhi); 707 } else { 708 Value *NewFn = IRB.CreateBitCast( 709 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 710 CS.setCalledFunction(NewFn); 711 } 712 } 713 } 714 715 /// \brief Add MemorySanitizer instrumentation to a function. 716 bool runOnFunction() { 717 MS.initializeCallbacks(*F.getParent()); 718 if (!MS.DL) return false; 719 720 // In the presence of unreachable blocks, we may see Phi nodes with 721 // incoming nodes from such blocks. Since InstVisitor skips unreachable 722 // blocks, such nodes will not have any shadow value associated with them. 723 // It's easier to remove unreachable blocks than deal with missing shadow. 724 removeUnreachableBlocks(F); 725 726 // Iterate all BBs in depth-first order and create shadow instructions 727 // for all instructions (where applicable). 728 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 729 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 730 visit(*BB); 731 732 733 // Finalize PHI nodes. 734 for (PHINode *PN : ShadowPHINodes) { 735 PHINode *PNS = cast<PHINode>(getShadow(PN)); 736 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; 737 size_t NumValues = PN->getNumIncomingValues(); 738 for (size_t v = 0; v < NumValues; v++) { 739 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 740 if (PNO) 741 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 742 } 743 } 744 745 VAHelper->finalizeInstrumentation(); 746 747 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && 748 InstrumentationList.size() + StoreList.size() > 749 (unsigned)ClInstrumentationWithCallThreshold; 750 751 // Delayed instrumentation of StoreInst. 752 // This may add new checks to be inserted later. 753 materializeStores(InstrumentWithCalls); 754 755 // Insert shadow value checks. 756 materializeChecks(InstrumentWithCalls); 757 758 // Wrap indirect calls. 759 materializeIndirectCalls(); 760 761 return true; 762 } 763 764 /// \brief Compute the shadow type that corresponds to a given Value. 765 Type *getShadowTy(Value *V) { 766 return getShadowTy(V->getType()); 767 } 768 769 /// \brief Compute the shadow type that corresponds to a given Type. 770 Type *getShadowTy(Type *OrigTy) { 771 if (!OrigTy->isSized()) { 772 return nullptr; 773 } 774 // For integer type, shadow is the same as the original type. 775 // This may return weird-sized types like i1. 776 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 777 return IT; 778 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 779 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); 780 return VectorType::get(IntegerType::get(*MS.C, EltSize), 781 VT->getNumElements()); 782 } 783 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 784 SmallVector<Type*, 4> Elements; 785 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 786 Elements.push_back(getShadowTy(ST->getElementType(i))); 787 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 788 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 789 return Res; 790 } 791 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); 792 return IntegerType::get(*MS.C, TypeSize); 793 } 794 795 /// \brief Flatten a vector type. 796 Type *getShadowTyNoVec(Type *ty) { 797 if (VectorType *vt = dyn_cast<VectorType>(ty)) 798 return IntegerType::get(*MS.C, vt->getBitWidth()); 799 return ty; 800 } 801 802 /// \brief Convert a shadow value to it's flattened variant. 803 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 804 Type *Ty = V->getType(); 805 Type *NoVecTy = getShadowTyNoVec(Ty); 806 if (Ty == NoVecTy) return V; 807 return IRB.CreateBitCast(V, NoVecTy); 808 } 809 810 /// \brief Compute the shadow address that corresponds to a given application 811 /// address. 812 /// 813 /// Shadow = Addr & ~ShadowMask. 814 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 815 IRBuilder<> &IRB) { 816 Value *ShadowLong = 817 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 818 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 819 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 820 } 821 822 /// \brief Compute the origin address that corresponds to a given application 823 /// address. 824 /// 825 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL 826 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) { 827 Value *ShadowLong = 828 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 829 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 830 Value *Add = 831 IRB.CreateAdd(ShadowLong, 832 ConstantInt::get(MS.IntptrTy, MS.OriginOffset)); 833 Value *SecondAnd = 834 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL)); 835 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0)); 836 } 837 838 /// \brief Compute the shadow address for a given function argument. 839 /// 840 /// Shadow = ParamTLS+ArgOffset. 841 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 842 int ArgOffset) { 843 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 844 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 845 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 846 "_msarg"); 847 } 848 849 /// \brief Compute the origin address for a given function argument. 850 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 851 int ArgOffset) { 852 if (!MS.TrackOrigins) return nullptr; 853 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 854 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 855 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 856 "_msarg_o"); 857 } 858 859 /// \brief Compute the shadow address for a retval. 860 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 861 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 862 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 863 "_msret"); 864 } 865 866 /// \brief Compute the origin address for a retval. 867 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 868 // We keep a single origin for the entire retval. Might be too optimistic. 869 return MS.RetvalOriginTLS; 870 } 871 872 /// \brief Set SV to be the shadow value for V. 873 void setShadow(Value *V, Value *SV) { 874 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 875 ShadowMap[V] = SV; 876 } 877 878 /// \brief Set Origin to be the origin value for V. 879 void setOrigin(Value *V, Value *Origin) { 880 if (!MS.TrackOrigins) return; 881 assert(!OriginMap.count(V) && "Values may only have one origin"); 882 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 883 OriginMap[V] = Origin; 884 } 885 886 /// \brief Create a clean shadow value for a given value. 887 /// 888 /// Clean shadow (all zeroes) means all bits of the value are defined 889 /// (initialized). 890 Constant *getCleanShadow(Value *V) { 891 Type *ShadowTy = getShadowTy(V); 892 if (!ShadowTy) 893 return nullptr; 894 return Constant::getNullValue(ShadowTy); 895 } 896 897 /// \brief Create a dirty shadow of a given shadow type. 898 Constant *getPoisonedShadow(Type *ShadowTy) { 899 assert(ShadowTy); 900 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 901 return Constant::getAllOnesValue(ShadowTy); 902 StructType *ST = cast<StructType>(ShadowTy); 903 SmallVector<Constant *, 4> Vals; 904 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 905 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 906 return ConstantStruct::get(ST, Vals); 907 } 908 909 /// \brief Create a dirty shadow for a given value. 910 Constant *getPoisonedShadow(Value *V) { 911 Type *ShadowTy = getShadowTy(V); 912 if (!ShadowTy) 913 return nullptr; 914 return getPoisonedShadow(ShadowTy); 915 } 916 917 /// \brief Create a clean (zero) origin. 918 Value *getCleanOrigin() { 919 return Constant::getNullValue(MS.OriginTy); 920 } 921 922 /// \brief Get the shadow value for a given Value. 923 /// 924 /// This function either returns the value set earlier with setShadow, 925 /// or extracts if from ParamTLS (for function arguments). 926 Value *getShadow(Value *V) { 927 if (Instruction *I = dyn_cast<Instruction>(V)) { 928 // For instructions the shadow is already stored in the map. 929 Value *Shadow = ShadowMap[V]; 930 if (!Shadow) { 931 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 932 (void)I; 933 assert(Shadow && "No shadow for a value"); 934 } 935 return Shadow; 936 } 937 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 938 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 939 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 940 (void)U; 941 return AllOnes; 942 } 943 if (Argument *A = dyn_cast<Argument>(V)) { 944 // For arguments we compute the shadow on demand and store it in the map. 945 Value **ShadowPtr = &ShadowMap[V]; 946 if (*ShadowPtr) 947 return *ShadowPtr; 948 Function *F = A->getParent(); 949 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 950 unsigned ArgOffset = 0; 951 for (auto &FArg : F->args()) { 952 if (!FArg.getType()->isSized()) { 953 DEBUG(dbgs() << "Arg is not sized\n"); 954 continue; 955 } 956 unsigned Size = FArg.hasByValAttr() 957 ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType()) 958 : MS.DL->getTypeAllocSize(FArg.getType()); 959 if (A == &FArg) { 960 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); 961 if (FArg.hasByValAttr()) { 962 // ByVal pointer itself has clean shadow. We copy the actual 963 // argument shadow to the underlying memory. 964 // Figure out maximal valid memcpy alignment. 965 unsigned ArgAlign = FArg.getParamAlignment(); 966 if (ArgAlign == 0) { 967 Type *EltType = A->getType()->getPointerElementType(); 968 ArgAlign = MS.DL->getABITypeAlignment(EltType); 969 } 970 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 971 Value *Cpy = EntryIRB.CreateMemCpy( 972 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 973 CopyAlign); 974 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 975 (void)Cpy; 976 *ShadowPtr = getCleanShadow(V); 977 } else { 978 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 979 } 980 DEBUG(dbgs() << " ARG: " << FArg << " ==> " << 981 **ShadowPtr << "\n"); 982 if (MS.TrackOrigins) { 983 Value *OriginPtr = 984 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); 985 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 986 } 987 } 988 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment); 989 } 990 assert(*ShadowPtr && "Could not find shadow for an argument"); 991 return *ShadowPtr; 992 } 993 // For everything else the shadow is zero. 994 return getCleanShadow(V); 995 } 996 997 /// \brief Get the shadow for i-th argument of the instruction I. 998 Value *getShadow(Instruction *I, int i) { 999 return getShadow(I->getOperand(i)); 1000 } 1001 1002 /// \brief Get the origin for a value. 1003 Value *getOrigin(Value *V) { 1004 if (!MS.TrackOrigins) return nullptr; 1005 if (isa<Instruction>(V) || isa<Argument>(V)) { 1006 Value *Origin = OriginMap[V]; 1007 if (!Origin) { 1008 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n"); 1009 Origin = getCleanOrigin(); 1010 } 1011 return Origin; 1012 } 1013 return getCleanOrigin(); 1014 } 1015 1016 /// \brief Get the origin for i-th argument of the instruction I. 1017 Value *getOrigin(Instruction *I, int i) { 1018 return getOrigin(I->getOperand(i)); 1019 } 1020 1021 /// \brief Remember the place where a shadow check should be inserted. 1022 /// 1023 /// This location will be later instrumented with a check that will print a 1024 /// UMR warning in runtime if the shadow value is not 0. 1025 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { 1026 assert(Shadow); 1027 if (!InsertChecks) return; 1028 #ifndef NDEBUG 1029 Type *ShadowTy = Shadow->getType(); 1030 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 1031 "Can only insert checks for integer and vector shadow types"); 1032 #endif 1033 InstrumentationList.push_back( 1034 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 1035 } 1036 1037 /// \brief Remember the place where a shadow check should be inserted. 1038 /// 1039 /// This location will be later instrumented with a check that will print a 1040 /// UMR warning in runtime if the value is not fully defined. 1041 void insertShadowCheck(Value *Val, Instruction *OrigIns) { 1042 assert(Val); 1043 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 1044 if (!Shadow) return; 1045 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 1046 insertShadowCheck(Shadow, Origin, OrigIns); 1047 } 1048 1049 AtomicOrdering addReleaseOrdering(AtomicOrdering a) { 1050 switch (a) { 1051 case NotAtomic: 1052 return NotAtomic; 1053 case Unordered: 1054 case Monotonic: 1055 case Release: 1056 return Release; 1057 case Acquire: 1058 case AcquireRelease: 1059 return AcquireRelease; 1060 case SequentiallyConsistent: 1061 return SequentiallyConsistent; 1062 } 1063 llvm_unreachable("Unknown ordering"); 1064 } 1065 1066 AtomicOrdering addAcquireOrdering(AtomicOrdering a) { 1067 switch (a) { 1068 case NotAtomic: 1069 return NotAtomic; 1070 case Unordered: 1071 case Monotonic: 1072 case Acquire: 1073 return Acquire; 1074 case Release: 1075 case AcquireRelease: 1076 return AcquireRelease; 1077 case SequentiallyConsistent: 1078 return SequentiallyConsistent; 1079 } 1080 llvm_unreachable("Unknown ordering"); 1081 } 1082 1083 // ------------------- Visitors. 1084 1085 /// \brief Instrument LoadInst 1086 /// 1087 /// Loads the corresponding shadow and (optionally) origin. 1088 /// Optionally, checks that the load address is fully defined. 1089 void visitLoadInst(LoadInst &I) { 1090 assert(I.getType()->isSized() && "Load type must have size"); 1091 IRBuilder<> IRB(I.getNextNode()); 1092 Type *ShadowTy = getShadowTy(&I); 1093 Value *Addr = I.getPointerOperand(); 1094 if (LoadShadow) { 1095 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1096 setShadow(&I, 1097 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 1098 } else { 1099 setShadow(&I, getCleanShadow(&I)); 1100 } 1101 1102 if (ClCheckAccessAddress) 1103 insertShadowCheck(I.getPointerOperand(), &I); 1104 1105 if (I.isAtomic()) 1106 I.setOrdering(addAcquireOrdering(I.getOrdering())); 1107 1108 if (MS.TrackOrigins) { 1109 if (LoadShadow) { 1110 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 1111 setOrigin(&I, 1112 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment)); 1113 } else { 1114 setOrigin(&I, getCleanOrigin()); 1115 } 1116 } 1117 } 1118 1119 /// \brief Instrument StoreInst 1120 /// 1121 /// Stores the corresponding shadow and (optionally) origin. 1122 /// Optionally, checks that the store address is fully defined. 1123 void visitStoreInst(StoreInst &I) { 1124 StoreList.push_back(&I); 1125 } 1126 1127 void handleCASOrRMW(Instruction &I) { 1128 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 1129 1130 IRBuilder<> IRB(&I); 1131 Value *Addr = I.getOperand(0); 1132 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); 1133 1134 if (ClCheckAccessAddress) 1135 insertShadowCheck(Addr, &I); 1136 1137 // Only test the conditional argument of cmpxchg instruction. 1138 // The other argument can potentially be uninitialized, but we can not 1139 // detect this situation reliably without possible false positives. 1140 if (isa<AtomicCmpXchgInst>(I)) 1141 insertShadowCheck(I.getOperand(1), &I); 1142 1143 IRB.CreateStore(getCleanShadow(&I), ShadowPtr); 1144 1145 setShadow(&I, getCleanShadow(&I)); 1146 } 1147 1148 void visitAtomicRMWInst(AtomicRMWInst &I) { 1149 handleCASOrRMW(I); 1150 I.setOrdering(addReleaseOrdering(I.getOrdering())); 1151 } 1152 1153 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 1154 handleCASOrRMW(I); 1155 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 1156 } 1157 1158 // Vector manipulation. 1159 void visitExtractElementInst(ExtractElementInst &I) { 1160 insertShadowCheck(I.getOperand(1), &I); 1161 IRBuilder<> IRB(&I); 1162 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 1163 "_msprop")); 1164 setOrigin(&I, getOrigin(&I, 0)); 1165 } 1166 1167 void visitInsertElementInst(InsertElementInst &I) { 1168 insertShadowCheck(I.getOperand(2), &I); 1169 IRBuilder<> IRB(&I); 1170 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 1171 I.getOperand(2), "_msprop")); 1172 setOriginForNaryOp(I); 1173 } 1174 1175 void visitShuffleVectorInst(ShuffleVectorInst &I) { 1176 insertShadowCheck(I.getOperand(2), &I); 1177 IRBuilder<> IRB(&I); 1178 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 1179 I.getOperand(2), "_msprop")); 1180 setOriginForNaryOp(I); 1181 } 1182 1183 // Casts. 1184 void visitSExtInst(SExtInst &I) { 1185 IRBuilder<> IRB(&I); 1186 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 1187 setOrigin(&I, getOrigin(&I, 0)); 1188 } 1189 1190 void visitZExtInst(ZExtInst &I) { 1191 IRBuilder<> IRB(&I); 1192 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 1193 setOrigin(&I, getOrigin(&I, 0)); 1194 } 1195 1196 void visitTruncInst(TruncInst &I) { 1197 IRBuilder<> IRB(&I); 1198 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 1199 setOrigin(&I, getOrigin(&I, 0)); 1200 } 1201 1202 void visitBitCastInst(BitCastInst &I) { 1203 IRBuilder<> IRB(&I); 1204 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 1205 setOrigin(&I, getOrigin(&I, 0)); 1206 } 1207 1208 void visitPtrToIntInst(PtrToIntInst &I) { 1209 IRBuilder<> IRB(&I); 1210 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1211 "_msprop_ptrtoint")); 1212 setOrigin(&I, getOrigin(&I, 0)); 1213 } 1214 1215 void visitIntToPtrInst(IntToPtrInst &I) { 1216 IRBuilder<> IRB(&I); 1217 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1218 "_msprop_inttoptr")); 1219 setOrigin(&I, getOrigin(&I, 0)); 1220 } 1221 1222 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 1223 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 1224 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 1225 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 1226 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 1227 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 1228 1229 /// \brief Propagate shadow for bitwise AND. 1230 /// 1231 /// This code is exact, i.e. if, for example, a bit in the left argument 1232 /// is defined and 0, then neither the value not definedness of the 1233 /// corresponding bit in B don't affect the resulting shadow. 1234 void visitAnd(BinaryOperator &I) { 1235 IRBuilder<> IRB(&I); 1236 // "And" of 0 and a poisoned value results in unpoisoned value. 1237 // 1&1 => 1; 0&1 => 0; p&1 => p; 1238 // 1&0 => 0; 0&0 => 0; p&0 => 0; 1239 // 1&p => p; 0&p => 0; p&p => p; 1240 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 1241 Value *S1 = getShadow(&I, 0); 1242 Value *S2 = getShadow(&I, 1); 1243 Value *V1 = I.getOperand(0); 1244 Value *V2 = I.getOperand(1); 1245 if (V1->getType() != S1->getType()) { 1246 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1247 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1248 } 1249 Value *S1S2 = IRB.CreateAnd(S1, S2); 1250 Value *V1S2 = IRB.CreateAnd(V1, S2); 1251 Value *S1V2 = IRB.CreateAnd(S1, V2); 1252 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1253 setOriginForNaryOp(I); 1254 } 1255 1256 void visitOr(BinaryOperator &I) { 1257 IRBuilder<> IRB(&I); 1258 // "Or" of 1 and a poisoned value results in unpoisoned value. 1259 // 1|1 => 1; 0|1 => 1; p|1 => 1; 1260 // 1|0 => 1; 0|0 => 0; p|0 => p; 1261 // 1|p => 1; 0|p => p; p|p => p; 1262 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 1263 Value *S1 = getShadow(&I, 0); 1264 Value *S2 = getShadow(&I, 1); 1265 Value *V1 = IRB.CreateNot(I.getOperand(0)); 1266 Value *V2 = IRB.CreateNot(I.getOperand(1)); 1267 if (V1->getType() != S1->getType()) { 1268 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1269 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1270 } 1271 Value *S1S2 = IRB.CreateAnd(S1, S2); 1272 Value *V1S2 = IRB.CreateAnd(V1, S2); 1273 Value *S1V2 = IRB.CreateAnd(S1, V2); 1274 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1275 setOriginForNaryOp(I); 1276 } 1277 1278 /// \brief Default propagation of shadow and/or origin. 1279 /// 1280 /// This class implements the general case of shadow propagation, used in all 1281 /// cases where we don't know and/or don't care about what the operation 1282 /// actually does. It converts all input shadow values to a common type 1283 /// (extending or truncating as necessary), and bitwise OR's them. 1284 /// 1285 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1286 /// fully initialized), and less prone to false positives. 1287 /// 1288 /// This class also implements the general case of origin propagation. For a 1289 /// Nary operation, result origin is set to the origin of an argument that is 1290 /// not entirely initialized. If there is more than one such arguments, the 1291 /// rightmost of them is picked. It does not matter which one is picked if all 1292 /// arguments are initialized. 1293 template <bool CombineShadow> 1294 class Combiner { 1295 Value *Shadow; 1296 Value *Origin; 1297 IRBuilder<> &IRB; 1298 MemorySanitizerVisitor *MSV; 1299 1300 public: 1301 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1302 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {} 1303 1304 /// \brief Add a pair of shadow and origin values to the mix. 1305 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1306 if (CombineShadow) { 1307 assert(OpShadow); 1308 if (!Shadow) 1309 Shadow = OpShadow; 1310 else { 1311 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1312 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1313 } 1314 } 1315 1316 if (MSV->MS.TrackOrigins) { 1317 assert(OpOrigin); 1318 if (!Origin) { 1319 Origin = OpOrigin; 1320 } else { 1321 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1322 Value *Cond = IRB.CreateICmpNE(FlatShadow, 1323 MSV->getCleanShadow(FlatShadow)); 1324 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1325 } 1326 } 1327 return *this; 1328 } 1329 1330 /// \brief Add an application value to the mix. 1331 Combiner &Add(Value *V) { 1332 Value *OpShadow = MSV->getShadow(V); 1333 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; 1334 return Add(OpShadow, OpOrigin); 1335 } 1336 1337 /// \brief Set the current combined values as the given instruction's shadow 1338 /// and origin. 1339 void Done(Instruction *I) { 1340 if (CombineShadow) { 1341 assert(Shadow); 1342 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1343 MSV->setShadow(I, Shadow); 1344 } 1345 if (MSV->MS.TrackOrigins) { 1346 assert(Origin); 1347 MSV->setOrigin(I, Origin); 1348 } 1349 } 1350 }; 1351 1352 typedef Combiner<true> ShadowAndOriginCombiner; 1353 typedef Combiner<false> OriginCombiner; 1354 1355 /// \brief Propagate origin for arbitrary operation. 1356 void setOriginForNaryOp(Instruction &I) { 1357 if (!MS.TrackOrigins) return; 1358 IRBuilder<> IRB(&I); 1359 OriginCombiner OC(this, IRB); 1360 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1361 OC.Add(OI->get()); 1362 OC.Done(&I); 1363 } 1364 1365 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1366 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1367 "Vector of pointers is not a valid shadow type"); 1368 return Ty->isVectorTy() ? 1369 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1370 Ty->getPrimitiveSizeInBits(); 1371 } 1372 1373 /// \brief Cast between two shadow types, extending or truncating as 1374 /// necessary. 1375 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, 1376 bool Signed = false) { 1377 Type *srcTy = V->getType(); 1378 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1379 return IRB.CreateIntCast(V, dstTy, Signed); 1380 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1381 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1382 return IRB.CreateIntCast(V, dstTy, Signed); 1383 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1384 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1385 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1386 Value *V2 = 1387 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); 1388 return IRB.CreateBitCast(V2, dstTy); 1389 // TODO: handle struct types. 1390 } 1391 1392 /// \brief Cast an application value to the type of its own shadow. 1393 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { 1394 Type *ShadowTy = getShadowTy(V); 1395 if (V->getType() == ShadowTy) 1396 return V; 1397 if (V->getType()->isPtrOrPtrVectorTy()) 1398 return IRB.CreatePtrToInt(V, ShadowTy); 1399 else 1400 return IRB.CreateBitCast(V, ShadowTy); 1401 } 1402 1403 /// \brief Propagate shadow for arbitrary operation. 1404 void handleShadowOr(Instruction &I) { 1405 IRBuilder<> IRB(&I); 1406 ShadowAndOriginCombiner SC(this, IRB); 1407 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1408 SC.Add(OI->get()); 1409 SC.Done(&I); 1410 } 1411 1412 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1413 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1414 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1415 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1416 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1417 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1418 void visitMul(BinaryOperator &I) { handleShadowOr(I); } 1419 1420 void handleDiv(Instruction &I) { 1421 IRBuilder<> IRB(&I); 1422 // Strict on the second argument. 1423 insertShadowCheck(I.getOperand(1), &I); 1424 setShadow(&I, getShadow(&I, 0)); 1425 setOrigin(&I, getOrigin(&I, 0)); 1426 } 1427 1428 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1429 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1430 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1431 void visitURem(BinaryOperator &I) { handleDiv(I); } 1432 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1433 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1434 1435 /// \brief Instrument == and != comparisons. 1436 /// 1437 /// Sometimes the comparison result is known even if some of the bits of the 1438 /// arguments are not. 1439 void handleEqualityComparison(ICmpInst &I) { 1440 IRBuilder<> IRB(&I); 1441 Value *A = I.getOperand(0); 1442 Value *B = I.getOperand(1); 1443 Value *Sa = getShadow(A); 1444 Value *Sb = getShadow(B); 1445 1446 // Get rid of pointers and vectors of pointers. 1447 // For ints (and vectors of ints), types of A and Sa match, 1448 // and this is a no-op. 1449 A = IRB.CreatePointerCast(A, Sa->getType()); 1450 B = IRB.CreatePointerCast(B, Sb->getType()); 1451 1452 // A == B <==> (C = A^B) == 0 1453 // A != B <==> (C = A^B) != 0 1454 // Sc = Sa | Sb 1455 Value *C = IRB.CreateXor(A, B); 1456 Value *Sc = IRB.CreateOr(Sa, Sb); 1457 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1458 // Result is defined if one of the following is true 1459 // * there is a defined 1 bit in C 1460 // * C is fully defined 1461 // Si = !(C & ~Sc) && Sc 1462 Value *Zero = Constant::getNullValue(Sc->getType()); 1463 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1464 Value *Si = 1465 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1466 IRB.CreateICmpEQ( 1467 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1468 Si->setName("_msprop_icmp"); 1469 setShadow(&I, Si); 1470 setOriginForNaryOp(I); 1471 } 1472 1473 /// \brief Build the lowest possible value of V, taking into account V's 1474 /// uninitialized bits. 1475 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1476 bool isSigned) { 1477 if (isSigned) { 1478 // Split shadow into sign bit and other bits. 1479 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1480 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1481 // Maximise the undefined shadow bit, minimize other undefined bits. 1482 return 1483 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1484 } else { 1485 // Minimize undefined bits. 1486 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1487 } 1488 } 1489 1490 /// \brief Build the highest possible value of V, taking into account V's 1491 /// uninitialized bits. 1492 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1493 bool isSigned) { 1494 if (isSigned) { 1495 // Split shadow into sign bit and other bits. 1496 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1497 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1498 // Minimise the undefined shadow bit, maximise other undefined bits. 1499 return 1500 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1501 } else { 1502 // Maximize undefined bits. 1503 return IRB.CreateOr(A, Sa); 1504 } 1505 } 1506 1507 /// \brief Instrument relational comparisons. 1508 /// 1509 /// This function does exact shadow propagation for all relational 1510 /// comparisons of integers, pointers and vectors of those. 1511 /// FIXME: output seems suboptimal when one of the operands is a constant 1512 void handleRelationalComparisonExact(ICmpInst &I) { 1513 IRBuilder<> IRB(&I); 1514 Value *A = I.getOperand(0); 1515 Value *B = I.getOperand(1); 1516 Value *Sa = getShadow(A); 1517 Value *Sb = getShadow(B); 1518 1519 // Get rid of pointers and vectors of pointers. 1520 // For ints (and vectors of ints), types of A and Sa match, 1521 // and this is a no-op. 1522 A = IRB.CreatePointerCast(A, Sa->getType()); 1523 B = IRB.CreatePointerCast(B, Sb->getType()); 1524 1525 // Let [a0, a1] be the interval of possible values of A, taking into account 1526 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1527 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1528 bool IsSigned = I.isSigned(); 1529 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1530 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1531 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1532 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1533 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1534 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1535 Value *Si = IRB.CreateXor(S1, S2); 1536 setShadow(&I, Si); 1537 setOriginForNaryOp(I); 1538 } 1539 1540 /// \brief Instrument signed relational comparisons. 1541 /// 1542 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by 1543 /// propagating the highest bit of the shadow. Everything else is delegated 1544 /// to handleShadowOr(). 1545 void handleSignedRelationalComparison(ICmpInst &I) { 1546 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1547 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1548 Value* op = nullptr; 1549 CmpInst::Predicate pre = I.getPredicate(); 1550 if (constOp0 && constOp0->isNullValue() && 1551 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { 1552 op = I.getOperand(1); 1553 } else if (constOp1 && constOp1->isNullValue() && 1554 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { 1555 op = I.getOperand(0); 1556 } 1557 if (op) { 1558 IRBuilder<> IRB(&I); 1559 Value* Shadow = 1560 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); 1561 setShadow(&I, Shadow); 1562 setOrigin(&I, getOrigin(op)); 1563 } else { 1564 handleShadowOr(I); 1565 } 1566 } 1567 1568 void visitICmpInst(ICmpInst &I) { 1569 if (!ClHandleICmp) { 1570 handleShadowOr(I); 1571 return; 1572 } 1573 if (I.isEquality()) { 1574 handleEqualityComparison(I); 1575 return; 1576 } 1577 1578 assert(I.isRelational()); 1579 if (ClHandleICmpExact) { 1580 handleRelationalComparisonExact(I); 1581 return; 1582 } 1583 if (I.isSigned()) { 1584 handleSignedRelationalComparison(I); 1585 return; 1586 } 1587 1588 assert(I.isUnsigned()); 1589 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1590 handleRelationalComparisonExact(I); 1591 return; 1592 } 1593 1594 handleShadowOr(I); 1595 } 1596 1597 void visitFCmpInst(FCmpInst &I) { 1598 handleShadowOr(I); 1599 } 1600 1601 void handleShift(BinaryOperator &I) { 1602 IRBuilder<> IRB(&I); 1603 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1604 // Otherwise perform the same shift on S1. 1605 Value *S1 = getShadow(&I, 0); 1606 Value *S2 = getShadow(&I, 1); 1607 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1608 S2->getType()); 1609 Value *V2 = I.getOperand(1); 1610 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1611 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1612 setOriginForNaryOp(I); 1613 } 1614 1615 void visitShl(BinaryOperator &I) { handleShift(I); } 1616 void visitAShr(BinaryOperator &I) { handleShift(I); } 1617 void visitLShr(BinaryOperator &I) { handleShift(I); } 1618 1619 /// \brief Instrument llvm.memmove 1620 /// 1621 /// At this point we don't know if llvm.memmove will be inlined or not. 1622 /// If we don't instrument it and it gets inlined, 1623 /// our interceptor will not kick in and we will lose the memmove. 1624 /// If we instrument the call here, but it does not get inlined, 1625 /// we will memove the shadow twice: which is bad in case 1626 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1627 /// 1628 /// Similar situation exists for memcpy and memset. 1629 void visitMemMoveInst(MemMoveInst &I) { 1630 IRBuilder<> IRB(&I); 1631 IRB.CreateCall3( 1632 MS.MemmoveFn, 1633 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1634 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1635 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1636 I.eraseFromParent(); 1637 } 1638 1639 // Similar to memmove: avoid copying shadow twice. 1640 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1641 // FIXME: consider doing manual inline for small constant sizes and proper 1642 // alignment. 1643 void visitMemCpyInst(MemCpyInst &I) { 1644 IRBuilder<> IRB(&I); 1645 IRB.CreateCall3( 1646 MS.MemcpyFn, 1647 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1648 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1649 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1650 I.eraseFromParent(); 1651 } 1652 1653 // Same as memcpy. 1654 void visitMemSetInst(MemSetInst &I) { 1655 IRBuilder<> IRB(&I); 1656 IRB.CreateCall3( 1657 MS.MemsetFn, 1658 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1659 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1660 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1661 I.eraseFromParent(); 1662 } 1663 1664 void visitVAStartInst(VAStartInst &I) { 1665 VAHelper->visitVAStartInst(I); 1666 } 1667 1668 void visitVACopyInst(VACopyInst &I) { 1669 VAHelper->visitVACopyInst(I); 1670 } 1671 1672 enum IntrinsicKind { 1673 IK_DoesNotAccessMemory, 1674 IK_OnlyReadsMemory, 1675 IK_WritesMemory 1676 }; 1677 1678 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { 1679 const int DoesNotAccessMemory = IK_DoesNotAccessMemory; 1680 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; 1681 const int OnlyReadsMemory = IK_OnlyReadsMemory; 1682 const int OnlyAccessesArgumentPointees = IK_WritesMemory; 1683 const int UnknownModRefBehavior = IK_WritesMemory; 1684 #define GET_INTRINSIC_MODREF_BEHAVIOR 1685 #define ModRefBehavior IntrinsicKind 1686 #include "llvm/IR/Intrinsics.gen" 1687 #undef ModRefBehavior 1688 #undef GET_INTRINSIC_MODREF_BEHAVIOR 1689 } 1690 1691 /// \brief Handle vector store-like intrinsics. 1692 /// 1693 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1694 /// has 1 pointer argument and 1 vector argument, returns void. 1695 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1696 IRBuilder<> IRB(&I); 1697 Value* Addr = I.getArgOperand(0); 1698 Value *Shadow = getShadow(&I, 1); 1699 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1700 1701 // We don't know the pointer alignment (could be unaligned SSE store!). 1702 // Have to assume to worst case. 1703 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1704 1705 if (ClCheckAccessAddress) 1706 insertShadowCheck(Addr, &I); 1707 1708 // FIXME: use ClStoreCleanOrigin 1709 // FIXME: factor out common code from materializeStores 1710 if (MS.TrackOrigins) 1711 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB)); 1712 return true; 1713 } 1714 1715 /// \brief Handle vector load-like intrinsics. 1716 /// 1717 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1718 /// has 1 pointer argument, returns a vector. 1719 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1720 IRBuilder<> IRB(&I); 1721 Value *Addr = I.getArgOperand(0); 1722 1723 Type *ShadowTy = getShadowTy(&I); 1724 if (LoadShadow) { 1725 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1726 // We don't know the pointer alignment (could be unaligned SSE load!). 1727 // Have to assume to worst case. 1728 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1729 } else { 1730 setShadow(&I, getCleanShadow(&I)); 1731 } 1732 1733 if (ClCheckAccessAddress) 1734 insertShadowCheck(Addr, &I); 1735 1736 if (MS.TrackOrigins) { 1737 if (LoadShadow) 1738 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); 1739 else 1740 setOrigin(&I, getCleanOrigin()); 1741 } 1742 return true; 1743 } 1744 1745 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1746 /// 1747 /// Instrument intrinsics with any number of arguments of the same type, 1748 /// equal to the return type. The type should be simple (no aggregates or 1749 /// pointers; vectors are fine). 1750 /// Caller guarantees that this intrinsic does not access memory. 1751 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1752 Type *RetTy = I.getType(); 1753 if (!(RetTy->isIntOrIntVectorTy() || 1754 RetTy->isFPOrFPVectorTy() || 1755 RetTy->isX86_MMXTy())) 1756 return false; 1757 1758 unsigned NumArgOperands = I.getNumArgOperands(); 1759 1760 for (unsigned i = 0; i < NumArgOperands; ++i) { 1761 Type *Ty = I.getArgOperand(i)->getType(); 1762 if (Ty != RetTy) 1763 return false; 1764 } 1765 1766 IRBuilder<> IRB(&I); 1767 ShadowAndOriginCombiner SC(this, IRB); 1768 for (unsigned i = 0; i < NumArgOperands; ++i) 1769 SC.Add(I.getArgOperand(i)); 1770 SC.Done(&I); 1771 1772 return true; 1773 } 1774 1775 /// \brief Heuristically instrument unknown intrinsics. 1776 /// 1777 /// The main purpose of this code is to do something reasonable with all 1778 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1779 /// We recognize several classes of intrinsics by their argument types and 1780 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1781 /// sure that we know what the intrinsic does. 1782 /// 1783 /// We special-case intrinsics where this approach fails. See llvm.bswap 1784 /// handling as an example of that. 1785 bool handleUnknownIntrinsic(IntrinsicInst &I) { 1786 unsigned NumArgOperands = I.getNumArgOperands(); 1787 if (NumArgOperands == 0) 1788 return false; 1789 1790 Intrinsic::ID iid = I.getIntrinsicID(); 1791 IntrinsicKind IK = getIntrinsicKind(iid); 1792 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; 1793 bool WritesMemory = IK == IK_WritesMemory; 1794 assert(!(OnlyReadsMemory && WritesMemory)); 1795 1796 if (NumArgOperands == 2 && 1797 I.getArgOperand(0)->getType()->isPointerTy() && 1798 I.getArgOperand(1)->getType()->isVectorTy() && 1799 I.getType()->isVoidTy() && 1800 WritesMemory) { 1801 // This looks like a vector store. 1802 return handleVectorStoreIntrinsic(I); 1803 } 1804 1805 if (NumArgOperands == 1 && 1806 I.getArgOperand(0)->getType()->isPointerTy() && 1807 I.getType()->isVectorTy() && 1808 OnlyReadsMemory) { 1809 // This looks like a vector load. 1810 return handleVectorLoadIntrinsic(I); 1811 } 1812 1813 if (!OnlyReadsMemory && !WritesMemory) 1814 if (maybeHandleSimpleNomemIntrinsic(I)) 1815 return true; 1816 1817 // FIXME: detect and handle SSE maskstore/maskload 1818 return false; 1819 } 1820 1821 void handleBswap(IntrinsicInst &I) { 1822 IRBuilder<> IRB(&I); 1823 Value *Op = I.getArgOperand(0); 1824 Type *OpType = Op->getType(); 1825 Function *BswapFunc = Intrinsic::getDeclaration( 1826 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1)); 1827 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 1828 setOrigin(&I, getOrigin(Op)); 1829 } 1830 1831 // \brief Instrument vector convert instrinsic. 1832 // 1833 // This function instruments intrinsics like cvtsi2ss: 1834 // %Out = int_xxx_cvtyyy(%ConvertOp) 1835 // or 1836 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) 1837 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same 1838 // number \p Out elements, and (if has 2 arguments) copies the rest of the 1839 // elements from \p CopyOp. 1840 // In most cases conversion involves floating-point value which may trigger a 1841 // hardware exception when not fully initialized. For this reason we require 1842 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. 1843 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p 1844 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always 1845 // return a fully initialized value. 1846 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { 1847 IRBuilder<> IRB(&I); 1848 Value *CopyOp, *ConvertOp; 1849 1850 switch (I.getNumArgOperands()) { 1851 case 2: 1852 CopyOp = I.getArgOperand(0); 1853 ConvertOp = I.getArgOperand(1); 1854 break; 1855 case 1: 1856 ConvertOp = I.getArgOperand(0); 1857 CopyOp = nullptr; 1858 break; 1859 default: 1860 llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); 1861 } 1862 1863 // The first *NumUsedElements* elements of ConvertOp are converted to the 1864 // same number of output elements. The rest of the output is copied from 1865 // CopyOp, or (if not available) filled with zeroes. 1866 // Combine shadow for elements of ConvertOp that are used in this operation, 1867 // and insert a check. 1868 // FIXME: consider propagating shadow of ConvertOp, at least in the case of 1869 // int->any conversion. 1870 Value *ConvertShadow = getShadow(ConvertOp); 1871 Value *AggShadow = nullptr; 1872 if (ConvertOp->getType()->isVectorTy()) { 1873 AggShadow = IRB.CreateExtractElement( 1874 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); 1875 for (int i = 1; i < NumUsedElements; ++i) { 1876 Value *MoreShadow = IRB.CreateExtractElement( 1877 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); 1878 AggShadow = IRB.CreateOr(AggShadow, MoreShadow); 1879 } 1880 } else { 1881 AggShadow = ConvertShadow; 1882 } 1883 assert(AggShadow->getType()->isIntegerTy()); 1884 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); 1885 1886 // Build result shadow by zero-filling parts of CopyOp shadow that come from 1887 // ConvertOp. 1888 if (CopyOp) { 1889 assert(CopyOp->getType() == I.getType()); 1890 assert(CopyOp->getType()->isVectorTy()); 1891 Value *ResultShadow = getShadow(CopyOp); 1892 Type *EltTy = ResultShadow->getType()->getVectorElementType(); 1893 for (int i = 0; i < NumUsedElements; ++i) { 1894 ResultShadow = IRB.CreateInsertElement( 1895 ResultShadow, ConstantInt::getNullValue(EltTy), 1896 ConstantInt::get(IRB.getInt32Ty(), i)); 1897 } 1898 setShadow(&I, ResultShadow); 1899 setOrigin(&I, getOrigin(CopyOp)); 1900 } else { 1901 setShadow(&I, getCleanShadow(&I)); 1902 } 1903 } 1904 1905 // Given a scalar or vector, extract lower 64 bits (or less), and return all 1906 // zeroes if it is zero, and all ones otherwise. 1907 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { 1908 if (S->getType()->isVectorTy()) 1909 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); 1910 assert(S->getType()->getPrimitiveSizeInBits() <= 64); 1911 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1912 return CreateShadowCast(IRB, S2, T, /* Signed */ true); 1913 } 1914 1915 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { 1916 Type *T = S->getType(); 1917 assert(T->isVectorTy()); 1918 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1919 return IRB.CreateSExt(S2, T); 1920 } 1921 1922 // \brief Instrument vector shift instrinsic. 1923 // 1924 // This function instruments intrinsics like int_x86_avx2_psll_w. 1925 // Intrinsic shifts %In by %ShiftSize bits. 1926 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift 1927 // size, and the rest is ignored. Behavior is defined even if shift size is 1928 // greater than register (or field) width. 1929 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { 1930 assert(I.getNumArgOperands() == 2); 1931 IRBuilder<> IRB(&I); 1932 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1933 // Otherwise perform the same shift on S1. 1934 Value *S1 = getShadow(&I, 0); 1935 Value *S2 = getShadow(&I, 1); 1936 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) 1937 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); 1938 Value *V1 = I.getOperand(0); 1939 Value *V2 = I.getOperand(1); 1940 Value *Shift = IRB.CreateCall2(I.getCalledValue(), 1941 IRB.CreateBitCast(S1, V1->getType()), V2); 1942 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); 1943 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1944 setOriginForNaryOp(I); 1945 } 1946 1947 void visitIntrinsicInst(IntrinsicInst &I) { 1948 switch (I.getIntrinsicID()) { 1949 case llvm::Intrinsic::bswap: 1950 handleBswap(I); 1951 break; 1952 case llvm::Intrinsic::x86_avx512_cvtsd2usi64: 1953 case llvm::Intrinsic::x86_avx512_cvtsd2usi: 1954 case llvm::Intrinsic::x86_avx512_cvtss2usi64: 1955 case llvm::Intrinsic::x86_avx512_cvtss2usi: 1956 case llvm::Intrinsic::x86_avx512_cvttss2usi64: 1957 case llvm::Intrinsic::x86_avx512_cvttss2usi: 1958 case llvm::Intrinsic::x86_avx512_cvttsd2usi64: 1959 case llvm::Intrinsic::x86_avx512_cvttsd2usi: 1960 case llvm::Intrinsic::x86_avx512_cvtusi2sd: 1961 case llvm::Intrinsic::x86_avx512_cvtusi2ss: 1962 case llvm::Intrinsic::x86_avx512_cvtusi642sd: 1963 case llvm::Intrinsic::x86_avx512_cvtusi642ss: 1964 case llvm::Intrinsic::x86_sse2_cvtsd2si64: 1965 case llvm::Intrinsic::x86_sse2_cvtsd2si: 1966 case llvm::Intrinsic::x86_sse2_cvtsd2ss: 1967 case llvm::Intrinsic::x86_sse2_cvtsi2sd: 1968 case llvm::Intrinsic::x86_sse2_cvtsi642sd: 1969 case llvm::Intrinsic::x86_sse2_cvtss2sd: 1970 case llvm::Intrinsic::x86_sse2_cvttsd2si64: 1971 case llvm::Intrinsic::x86_sse2_cvttsd2si: 1972 case llvm::Intrinsic::x86_sse_cvtsi2ss: 1973 case llvm::Intrinsic::x86_sse_cvtsi642ss: 1974 case llvm::Intrinsic::x86_sse_cvtss2si64: 1975 case llvm::Intrinsic::x86_sse_cvtss2si: 1976 case llvm::Intrinsic::x86_sse_cvttss2si64: 1977 case llvm::Intrinsic::x86_sse_cvttss2si: 1978 handleVectorConvertIntrinsic(I, 1); 1979 break; 1980 case llvm::Intrinsic::x86_sse2_cvtdq2pd: 1981 case llvm::Intrinsic::x86_sse2_cvtps2pd: 1982 case llvm::Intrinsic::x86_sse_cvtps2pi: 1983 case llvm::Intrinsic::x86_sse_cvttps2pi: 1984 handleVectorConvertIntrinsic(I, 2); 1985 break; 1986 case llvm::Intrinsic::x86_avx512_psll_dq: 1987 case llvm::Intrinsic::x86_avx512_psrl_dq: 1988 case llvm::Intrinsic::x86_avx2_psll_w: 1989 case llvm::Intrinsic::x86_avx2_psll_d: 1990 case llvm::Intrinsic::x86_avx2_psll_q: 1991 case llvm::Intrinsic::x86_avx2_pslli_w: 1992 case llvm::Intrinsic::x86_avx2_pslli_d: 1993 case llvm::Intrinsic::x86_avx2_pslli_q: 1994 case llvm::Intrinsic::x86_avx2_psll_dq: 1995 case llvm::Intrinsic::x86_avx2_psrl_w: 1996 case llvm::Intrinsic::x86_avx2_psrl_d: 1997 case llvm::Intrinsic::x86_avx2_psrl_q: 1998 case llvm::Intrinsic::x86_avx2_psra_w: 1999 case llvm::Intrinsic::x86_avx2_psra_d: 2000 case llvm::Intrinsic::x86_avx2_psrli_w: 2001 case llvm::Intrinsic::x86_avx2_psrli_d: 2002 case llvm::Intrinsic::x86_avx2_psrli_q: 2003 case llvm::Intrinsic::x86_avx2_psrai_w: 2004 case llvm::Intrinsic::x86_avx2_psrai_d: 2005 case llvm::Intrinsic::x86_avx2_psrl_dq: 2006 case llvm::Intrinsic::x86_sse2_psll_w: 2007 case llvm::Intrinsic::x86_sse2_psll_d: 2008 case llvm::Intrinsic::x86_sse2_psll_q: 2009 case llvm::Intrinsic::x86_sse2_pslli_w: 2010 case llvm::Intrinsic::x86_sse2_pslli_d: 2011 case llvm::Intrinsic::x86_sse2_pslli_q: 2012 case llvm::Intrinsic::x86_sse2_psll_dq: 2013 case llvm::Intrinsic::x86_sse2_psrl_w: 2014 case llvm::Intrinsic::x86_sse2_psrl_d: 2015 case llvm::Intrinsic::x86_sse2_psrl_q: 2016 case llvm::Intrinsic::x86_sse2_psra_w: 2017 case llvm::Intrinsic::x86_sse2_psra_d: 2018 case llvm::Intrinsic::x86_sse2_psrli_w: 2019 case llvm::Intrinsic::x86_sse2_psrli_d: 2020 case llvm::Intrinsic::x86_sse2_psrli_q: 2021 case llvm::Intrinsic::x86_sse2_psrai_w: 2022 case llvm::Intrinsic::x86_sse2_psrai_d: 2023 case llvm::Intrinsic::x86_sse2_psrl_dq: 2024 case llvm::Intrinsic::x86_mmx_psll_w: 2025 case llvm::Intrinsic::x86_mmx_psll_d: 2026 case llvm::Intrinsic::x86_mmx_psll_q: 2027 case llvm::Intrinsic::x86_mmx_pslli_w: 2028 case llvm::Intrinsic::x86_mmx_pslli_d: 2029 case llvm::Intrinsic::x86_mmx_pslli_q: 2030 case llvm::Intrinsic::x86_mmx_psrl_w: 2031 case llvm::Intrinsic::x86_mmx_psrl_d: 2032 case llvm::Intrinsic::x86_mmx_psrl_q: 2033 case llvm::Intrinsic::x86_mmx_psra_w: 2034 case llvm::Intrinsic::x86_mmx_psra_d: 2035 case llvm::Intrinsic::x86_mmx_psrli_w: 2036 case llvm::Intrinsic::x86_mmx_psrli_d: 2037 case llvm::Intrinsic::x86_mmx_psrli_q: 2038 case llvm::Intrinsic::x86_mmx_psrai_w: 2039 case llvm::Intrinsic::x86_mmx_psrai_d: 2040 handleVectorShiftIntrinsic(I, /* Variable */ false); 2041 break; 2042 case llvm::Intrinsic::x86_avx2_psllv_d: 2043 case llvm::Intrinsic::x86_avx2_psllv_d_256: 2044 case llvm::Intrinsic::x86_avx2_psllv_q: 2045 case llvm::Intrinsic::x86_avx2_psllv_q_256: 2046 case llvm::Intrinsic::x86_avx2_psrlv_d: 2047 case llvm::Intrinsic::x86_avx2_psrlv_d_256: 2048 case llvm::Intrinsic::x86_avx2_psrlv_q: 2049 case llvm::Intrinsic::x86_avx2_psrlv_q_256: 2050 case llvm::Intrinsic::x86_avx2_psrav_d: 2051 case llvm::Intrinsic::x86_avx2_psrav_d_256: 2052 handleVectorShiftIntrinsic(I, /* Variable */ true); 2053 break; 2054 2055 // Byte shifts are not implemented. 2056 // case llvm::Intrinsic::x86_avx512_psll_dq_bs: 2057 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs: 2058 // case llvm::Intrinsic::x86_avx2_psll_dq_bs: 2059 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs: 2060 // case llvm::Intrinsic::x86_sse2_psll_dq_bs: 2061 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs: 2062 2063 default: 2064 if (!handleUnknownIntrinsic(I)) 2065 visitInstruction(I); 2066 break; 2067 } 2068 } 2069 2070 void visitCallSite(CallSite CS) { 2071 Instruction &I = *CS.getInstruction(); 2072 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 2073 if (CS.isCall()) { 2074 CallInst *Call = cast<CallInst>(&I); 2075 2076 // For inline asm, do the usual thing: check argument shadow and mark all 2077 // outputs as clean. Note that any side effects of the inline asm that are 2078 // not immediately visible in its constraints are not handled. 2079 if (Call->isInlineAsm()) { 2080 visitInstruction(I); 2081 return; 2082 } 2083 2084 // Allow only tail calls with the same types, otherwise 2085 // we may have a false positive: shadow for a non-void RetVal 2086 // will get propagated to a void RetVal. 2087 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType()) 2088 Call->setTailCall(false); 2089 2090 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 2091 2092 // We are going to insert code that relies on the fact that the callee 2093 // will become a non-readonly function after it is instrumented by us. To 2094 // prevent this code from being optimized out, mark that function 2095 // non-readonly in advance. 2096 if (Function *Func = Call->getCalledFunction()) { 2097 // Clear out readonly/readnone attributes. 2098 AttrBuilder B; 2099 B.addAttribute(Attribute::ReadOnly) 2100 .addAttribute(Attribute::ReadNone); 2101 Func->removeAttributes(AttributeSet::FunctionIndex, 2102 AttributeSet::get(Func->getContext(), 2103 AttributeSet::FunctionIndex, 2104 B)); 2105 } 2106 } 2107 IRBuilder<> IRB(&I); 2108 2109 if (MS.WrapIndirectCalls && !CS.getCalledFunction()) 2110 IndirectCallList.push_back(CS); 2111 2112 unsigned ArgOffset = 0; 2113 DEBUG(dbgs() << " CallSite: " << I << "\n"); 2114 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2115 ArgIt != End; ++ArgIt) { 2116 Value *A = *ArgIt; 2117 unsigned i = ArgIt - CS.arg_begin(); 2118 if (!A->getType()->isSized()) { 2119 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 2120 continue; 2121 } 2122 unsigned Size = 0; 2123 Value *Store = nullptr; 2124 // Compute the Shadow for arg even if it is ByVal, because 2125 // in that case getShadow() will copy the actual arg shadow to 2126 // __msan_param_tls. 2127 Value *ArgShadow = getShadow(A); 2128 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 2129 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 2130 " Shadow: " << *ArgShadow << "\n"); 2131 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 2132 assert(A->getType()->isPointerTy() && 2133 "ByVal argument is not a pointer!"); 2134 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); 2135 unsigned Alignment = CS.getParamAlignment(i + 1); 2136 Store = IRB.CreateMemCpy(ArgShadowBase, 2137 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 2138 Size, Alignment); 2139 } else { 2140 Size = MS.DL->getTypeAllocSize(A->getType()); 2141 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 2142 kShadowTLSAlignment); 2143 } 2144 if (MS.TrackOrigins) 2145 IRB.CreateStore(getOrigin(A), 2146 getOriginPtrForArgument(A, IRB, ArgOffset)); 2147 (void)Store; 2148 assert(Size != 0 && Store != nullptr); 2149 DEBUG(dbgs() << " Param:" << *Store << "\n"); 2150 ArgOffset += DataLayout::RoundUpAlignment(Size, 8); 2151 } 2152 DEBUG(dbgs() << " done with call args\n"); 2153 2154 FunctionType *FT = 2155 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); 2156 if (FT->isVarArg()) { 2157 VAHelper->visitCallSite(CS, IRB); 2158 } 2159 2160 // Now, get the shadow for the RetVal. 2161 if (!I.getType()->isSized()) return; 2162 IRBuilder<> IRBBefore(&I); 2163 // Until we have full dynamic coverage, make sure the retval shadow is 0. 2164 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 2165 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 2166 Instruction *NextInsn = nullptr; 2167 if (CS.isCall()) { 2168 NextInsn = I.getNextNode(); 2169 } else { 2170 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 2171 if (!NormalDest->getSinglePredecessor()) { 2172 // FIXME: this case is tricky, so we are just conservative here. 2173 // Perhaps we need to split the edge between this BB and NormalDest, 2174 // but a naive attempt to use SplitEdge leads to a crash. 2175 setShadow(&I, getCleanShadow(&I)); 2176 setOrigin(&I, getCleanOrigin()); 2177 return; 2178 } 2179 NextInsn = NormalDest->getFirstInsertionPt(); 2180 assert(NextInsn && 2181 "Could not find insertion point for retval shadow load"); 2182 } 2183 IRBuilder<> IRBAfter(NextInsn); 2184 Value *RetvalShadow = 2185 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 2186 kShadowTLSAlignment, "_msret"); 2187 setShadow(&I, RetvalShadow); 2188 if (MS.TrackOrigins) 2189 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 2190 } 2191 2192 void visitReturnInst(ReturnInst &I) { 2193 IRBuilder<> IRB(&I); 2194 Value *RetVal = I.getReturnValue(); 2195 if (!RetVal) return; 2196 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 2197 if (CheckReturnValue) { 2198 insertShadowCheck(RetVal, &I); 2199 Value *Shadow = getCleanShadow(RetVal); 2200 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2201 } else { 2202 Value *Shadow = getShadow(RetVal); 2203 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2204 // FIXME: make it conditional if ClStoreCleanOrigin==0 2205 if (MS.TrackOrigins) 2206 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 2207 } 2208 } 2209 2210 void visitPHINode(PHINode &I) { 2211 IRBuilder<> IRB(&I); 2212 ShadowPHINodes.push_back(&I); 2213 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 2214 "_msphi_s")); 2215 if (MS.TrackOrigins) 2216 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 2217 "_msphi_o")); 2218 } 2219 2220 void visitAllocaInst(AllocaInst &I) { 2221 setShadow(&I, getCleanShadow(&I)); 2222 IRBuilder<> IRB(I.getNextNode()); 2223 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); 2224 if (PoisonStack && ClPoisonStackWithCall) { 2225 IRB.CreateCall2(MS.MsanPoisonStackFn, 2226 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2227 ConstantInt::get(MS.IntptrTy, Size)); 2228 } else { 2229 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 2230 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); 2231 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); 2232 } 2233 2234 if (PoisonStack && MS.TrackOrigins) { 2235 setOrigin(&I, getCleanOrigin()); 2236 SmallString<2048> StackDescriptionStorage; 2237 raw_svector_ostream StackDescription(StackDescriptionStorage); 2238 // We create a string with a description of the stack allocation and 2239 // pass it into __msan_set_alloca_origin. 2240 // It will be printed by the run-time if stack-originated UMR is found. 2241 // The first 4 bytes of the string are set to '----' and will be replaced 2242 // by __msan_va_arg_overflow_size_tls at the first call. 2243 StackDescription << "----" << I.getName() << "@" << F.getName(); 2244 Value *Descr = 2245 createPrivateNonConstGlobalForString(*F.getParent(), 2246 StackDescription.str()); 2247 2248 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn, 2249 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2250 ConstantInt::get(MS.IntptrTy, Size), 2251 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), 2252 IRB.CreatePointerCast(&F, MS.IntptrTy)); 2253 } 2254 } 2255 2256 void visitSelectInst(SelectInst& I) { 2257 IRBuilder<> IRB(&I); 2258 // a = select b, c, d 2259 Value *B = I.getCondition(); 2260 Value *C = I.getTrueValue(); 2261 Value *D = I.getFalseValue(); 2262 Value *Sb = getShadow(B); 2263 Value *Sc = getShadow(C); 2264 Value *Sd = getShadow(D); 2265 2266 // Result shadow if condition shadow is 0. 2267 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); 2268 Value *Sa1; 2269 if (I.getType()->isAggregateType()) { 2270 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do 2271 // an extra "select". This results in much more compact IR. 2272 // Sa = select Sb, poisoned, (select b, Sc, Sd) 2273 Sa1 = getPoisonedShadow(getShadowTy(I.getType())); 2274 } else { 2275 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] 2276 // If Sb (condition is poisoned), look for bits in c and d that are equal 2277 // and both unpoisoned. 2278 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. 2279 2280 // Cast arguments to shadow-compatible type. 2281 C = CreateAppToShadowCast(IRB, C); 2282 D = CreateAppToShadowCast(IRB, D); 2283 2284 // Result shadow if condition shadow is 1. 2285 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); 2286 } 2287 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); 2288 setShadow(&I, Sa); 2289 if (MS.TrackOrigins) { 2290 // Origins are always i32, so any vector conditions must be flattened. 2291 // FIXME: consider tracking vector origins for app vectors? 2292 if (B->getType()->isVectorTy()) { 2293 Type *FlatTy = getShadowTyNoVec(B->getType()); 2294 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), 2295 ConstantInt::getNullValue(FlatTy)); 2296 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), 2297 ConstantInt::getNullValue(FlatTy)); 2298 } 2299 // a = select b, c, d 2300 // Oa = Sb ? Ob : (b ? Oc : Od) 2301 setOrigin(&I, IRB.CreateSelect( 2302 Sb, getOrigin(I.getCondition()), 2303 IRB.CreateSelect(B, getOrigin(C), getOrigin(D)))); 2304 } 2305 } 2306 2307 void visitLandingPadInst(LandingPadInst &I) { 2308 // Do nothing. 2309 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 2310 setShadow(&I, getCleanShadow(&I)); 2311 setOrigin(&I, getCleanOrigin()); 2312 } 2313 2314 void visitGetElementPtrInst(GetElementPtrInst &I) { 2315 handleShadowOr(I); 2316 } 2317 2318 void visitExtractValueInst(ExtractValueInst &I) { 2319 IRBuilder<> IRB(&I); 2320 Value *Agg = I.getAggregateOperand(); 2321 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 2322 Value *AggShadow = getShadow(Agg); 2323 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2324 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2325 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 2326 setShadow(&I, ResShadow); 2327 setOriginForNaryOp(I); 2328 } 2329 2330 void visitInsertValueInst(InsertValueInst &I) { 2331 IRBuilder<> IRB(&I); 2332 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 2333 Value *AggShadow = getShadow(I.getAggregateOperand()); 2334 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 2335 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2336 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 2337 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2338 DEBUG(dbgs() << " Res: " << *Res << "\n"); 2339 setShadow(&I, Res); 2340 setOriginForNaryOp(I); 2341 } 2342 2343 void dumpInst(Instruction &I) { 2344 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2345 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 2346 } else { 2347 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 2348 } 2349 errs() << "QQQ " << I << "\n"; 2350 } 2351 2352 void visitResumeInst(ResumeInst &I) { 2353 DEBUG(dbgs() << "Resume: " << I << "\n"); 2354 // Nothing to do here. 2355 } 2356 2357 void visitInstruction(Instruction &I) { 2358 // Everything else: stop propagating and check for poisoned shadow. 2359 if (ClDumpStrictInstructions) 2360 dumpInst(I); 2361 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 2362 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 2363 insertShadowCheck(I.getOperand(i), &I); 2364 setShadow(&I, getCleanShadow(&I)); 2365 setOrigin(&I, getCleanOrigin()); 2366 } 2367 }; 2368 2369 /// \brief AMD64-specific implementation of VarArgHelper. 2370 struct VarArgAMD64Helper : public VarArgHelper { 2371 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 2372 // See a comment in visitCallSite for more details. 2373 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 2374 static const unsigned AMD64FpEndOffset = 176; 2375 2376 Function &F; 2377 MemorySanitizer &MS; 2378 MemorySanitizerVisitor &MSV; 2379 Value *VAArgTLSCopy; 2380 Value *VAArgOverflowSize; 2381 2382 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2383 2384 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 2385 MemorySanitizerVisitor &MSV) 2386 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2387 VAArgOverflowSize(nullptr) {} 2388 2389 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 2390 2391 ArgKind classifyArgument(Value* arg) { 2392 // A very rough approximation of X86_64 argument classification rules. 2393 Type *T = arg->getType(); 2394 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 2395 return AK_FloatingPoint; 2396 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 2397 return AK_GeneralPurpose; 2398 if (T->isPointerTy()) 2399 return AK_GeneralPurpose; 2400 return AK_Memory; 2401 } 2402 2403 // For VarArg functions, store the argument shadow in an ABI-specific format 2404 // that corresponds to va_list layout. 2405 // We do this because Clang lowers va_arg in the frontend, and this pass 2406 // only sees the low level code that deals with va_list internals. 2407 // A much easier alternative (provided that Clang emits va_arg instructions) 2408 // would have been to associate each live instance of va_list with a copy of 2409 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 2410 // order. 2411 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2412 unsigned GpOffset = 0; 2413 unsigned FpOffset = AMD64GpEndOffset; 2414 unsigned OverflowOffset = AMD64FpEndOffset; 2415 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2416 ArgIt != End; ++ArgIt) { 2417 Value *A = *ArgIt; 2418 unsigned ArgNo = CS.getArgumentNo(ArgIt); 2419 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); 2420 if (IsByVal) { 2421 // ByVal arguments always go to the overflow area. 2422 assert(A->getType()->isPointerTy()); 2423 Type *RealTy = A->getType()->getPointerElementType(); 2424 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); 2425 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); 2426 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2427 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), 2428 ArgSize, kShadowTLSAlignment); 2429 } else { 2430 ArgKind AK = classifyArgument(A); 2431 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 2432 AK = AK_Memory; 2433 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 2434 AK = AK_Memory; 2435 Value *Base; 2436 switch (AK) { 2437 case AK_GeneralPurpose: 2438 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); 2439 GpOffset += 8; 2440 break; 2441 case AK_FloatingPoint: 2442 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); 2443 FpOffset += 16; 2444 break; 2445 case AK_Memory: 2446 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); 2447 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 2448 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2449 } 2450 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2451 } 2452 } 2453 Constant *OverflowSize = 2454 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 2455 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 2456 } 2457 2458 /// \brief Compute the shadow address for a given va_arg. 2459 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2460 int ArgOffset) { 2461 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2462 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2463 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2464 "_msarg"); 2465 } 2466 2467 void visitVAStartInst(VAStartInst &I) override { 2468 IRBuilder<> IRB(&I); 2469 VAStartInstrumentationList.push_back(&I); 2470 Value *VAListTag = I.getArgOperand(0); 2471 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2472 2473 // Unpoison the whole __va_list_tag. 2474 // FIXME: magic ABI constants. 2475 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2476 /* size */24, /* alignment */8, false); 2477 } 2478 2479 void visitVACopyInst(VACopyInst &I) override { 2480 IRBuilder<> IRB(&I); 2481 Value *VAListTag = I.getArgOperand(0); 2482 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2483 2484 // Unpoison the whole __va_list_tag. 2485 // FIXME: magic ABI constants. 2486 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2487 /* size */24, /* alignment */8, false); 2488 } 2489 2490 void finalizeInstrumentation() override { 2491 assert(!VAArgOverflowSize && !VAArgTLSCopy && 2492 "finalizeInstrumentation called twice"); 2493 if (!VAStartInstrumentationList.empty()) { 2494 // If there is a va_start in this function, make a backup copy of 2495 // va_arg_tls somewhere in the function entry block. 2496 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2497 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2498 Value *CopySize = 2499 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 2500 VAArgOverflowSize); 2501 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2502 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2503 } 2504 2505 // Instrument va_start. 2506 // Copy va_list shadow from the backup copy of the TLS contents. 2507 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2508 CallInst *OrigInst = VAStartInstrumentationList[i]; 2509 IRBuilder<> IRB(OrigInst->getNextNode()); 2510 Value *VAListTag = OrigInst->getArgOperand(0); 2511 2512 Value *RegSaveAreaPtrPtr = 2513 IRB.CreateIntToPtr( 2514 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2515 ConstantInt::get(MS.IntptrTy, 16)), 2516 Type::getInt64PtrTy(*MS.C)); 2517 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2518 Value *RegSaveAreaShadowPtr = 2519 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2520 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 2521 AMD64FpEndOffset, 16); 2522 2523 Value *OverflowArgAreaPtrPtr = 2524 IRB.CreateIntToPtr( 2525 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2526 ConstantInt::get(MS.IntptrTy, 8)), 2527 Type::getInt64PtrTy(*MS.C)); 2528 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 2529 Value *OverflowArgAreaShadowPtr = 2530 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 2531 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset); 2532 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 2533 } 2534 } 2535 }; 2536 2537 /// \brief A no-op implementation of VarArgHelper. 2538 struct VarArgNoOpHelper : public VarArgHelper { 2539 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 2540 MemorySanitizerVisitor &MSV) {} 2541 2542 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} 2543 2544 void visitVAStartInst(VAStartInst &I) override {} 2545 2546 void visitVACopyInst(VACopyInst &I) override {} 2547 2548 void finalizeInstrumentation() override {} 2549 }; 2550 2551 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 2552 MemorySanitizerVisitor &Visitor) { 2553 // VarArg handling is only implemented on AMD64. False positives are possible 2554 // on other platforms. 2555 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 2556 if (TargetTriple.getArch() == llvm::Triple::x86_64) 2557 return new VarArgAMD64Helper(Func, Msan, Visitor); 2558 else 2559 return new VarArgNoOpHelper(Func, Msan, Visitor); 2560 } 2561 2562 } // namespace 2563 2564 bool MemorySanitizer::runOnFunction(Function &F) { 2565 MemorySanitizerVisitor Visitor(F, *this); 2566 2567 // Clear out readonly/readnone attributes. 2568 AttrBuilder B; 2569 B.addAttribute(Attribute::ReadOnly) 2570 .addAttribute(Attribute::ReadNone); 2571 F.removeAttributes(AttributeSet::FunctionIndex, 2572 AttributeSet::get(F.getContext(), 2573 AttributeSet::FunctionIndex, B)); 2574 2575 return Visitor.runOnFunction(); 2576 } 2577