1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// The algorithm of the tool is similar to Memcheck 14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 15 /// byte of the application memory, poison the shadow of the malloc-ed 16 /// or alloca-ed memory, load the shadow bits on every memory read, 17 /// propagate the shadow bits through some of the arithmetic 18 /// instruction (including MOV), store the shadow bits on every memory 19 /// write, report a bug on some other instructions (e.g. JMP) if the 20 /// associated shadow is poisoned. 21 /// 22 /// But there are differences too. The first and the major one: 23 /// compiler instrumentation instead of binary instrumentation. This 24 /// gives us much better register allocation, possible compiler 25 /// optimizations and a fast start-up. But this brings the major issue 26 /// as well: msan needs to see all program events, including system 27 /// calls and reads/writes in system libraries, so we either need to 28 /// compile *everything* with msan or use a binary translation 29 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 30 /// Another difference from Memcheck is that we use 8 shadow bits per 31 /// byte of application memory and use a direct shadow mapping. This 32 /// greatly simplifies the instrumentation code and avoids races on 33 /// shadow updates (Memcheck is single-threaded so races are not a 34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 35 /// path storage that uses 8 bits per byte). 36 /// 37 /// The default value of shadow is 0, which means "clean" (not poisoned). 38 /// 39 /// Every module initializer should call __msan_init to ensure that the 40 /// shadow memory is ready. On error, __msan_warning is called. Since 41 /// parameters and return values may be passed via registers, we have a 42 /// specialized thread-local shadow for return values 43 /// (__msan_retval_tls) and parameters (__msan_param_tls). 44 /// 45 /// Origin tracking. 46 /// 47 /// MemorySanitizer can track origins (allocation points) of all uninitialized 48 /// values. This behavior is controlled with a flag (msan-track-origins) and is 49 /// disabled by default. 50 /// 51 /// Origins are 4-byte values created and interpreted by the runtime library. 52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 53 /// of application memory. Propagation of origins is basically a bunch of 54 /// "select" instructions that pick the origin of a dirty argument, if an 55 /// instruction has one. 56 /// 57 /// Every 4 aligned, consecutive bytes of application memory have one origin 58 /// value associated with them. If these bytes contain uninitialized data 59 /// coming from 2 different allocations, the last store wins. Because of this, 60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 61 /// practice. 62 /// 63 /// Origins are meaningless for fully initialized values, so MemorySanitizer 64 /// avoids storing origin to memory when a fully initialized value is stored. 65 /// This way it avoids needless overwritting origin of the 4-byte region on 66 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 67 /// 68 /// Atomic handling. 69 /// 70 /// Ideally, every atomic store of application value should update the 71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store 72 /// of two disjoint locations can not be done without severe slowdown. 73 /// 74 /// Therefore, we implement an approximation that may err on the safe side. 75 /// In this implementation, every atomically accessed location in the program 76 /// may only change from (partially) uninitialized to fully initialized, but 77 /// not the other way around. We load the shadow _after_ the application load, 78 /// and we store the shadow _before_ the app store. Also, we always store clean 79 /// shadow (if the application store is atomic). This way, if the store-load 80 /// pair constitutes a happens-before arc, shadow store and load are correctly 81 /// ordered such that the load will get either the value that was stored, or 82 /// some later value (which is always clean). 83 /// 84 /// This does not work very well with Compare-And-Swap (CAS) and 85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW 86 /// must store the new shadow before the app operation, and load the shadow 87 /// after the app operation. Computers don't work this way. Current 88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean 89 /// value. It implements the store part as a simple atomic store by storing a 90 /// clean shadow. 91 92 //===----------------------------------------------------------------------===// 93 94 #include "llvm/Transforms/Instrumentation.h" 95 #include "llvm/ADT/DepthFirstIterator.h" 96 #include "llvm/ADT/SmallString.h" 97 #include "llvm/ADT/SmallVector.h" 98 #include "llvm/ADT/StringExtras.h" 99 #include "llvm/ADT/Triple.h" 100 #include "llvm/IR/DataLayout.h" 101 #include "llvm/IR/Function.h" 102 #include "llvm/IR/IRBuilder.h" 103 #include "llvm/IR/InlineAsm.h" 104 #include "llvm/IR/InstVisitor.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/MDBuilder.h" 108 #include "llvm/IR/Module.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/ValueMap.h" 111 #include "llvm/Support/CommandLine.h" 112 #include "llvm/Support/Compiler.h" 113 #include "llvm/Support/Debug.h" 114 #include "llvm/Support/raw_ostream.h" 115 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 116 #include "llvm/Transforms/Utils/Local.h" 117 #include "llvm/Transforms/Utils/ModuleUtils.h" 118 119 using namespace llvm; 120 121 #define DEBUG_TYPE "msan" 122 123 static const unsigned kOriginSize = 4; 124 static const unsigned kMinOriginAlignment = 4; 125 static const unsigned kShadowTLSAlignment = 8; 126 127 // These constants must be kept in sync with the ones in msan.h. 128 static const unsigned kParamTLSSize = 800; 129 static const unsigned kRetvalTLSSize = 800; 130 131 // Accesses sizes are powers of two: 1, 2, 4, 8. 132 static const size_t kNumberOfAccessSizes = 4; 133 134 /// \brief Track origins of uninitialized values. 135 /// 136 /// Adds a section to MemorySanitizer report that points to the allocation 137 /// (stack or heap) the uninitialized bits came from originally. 138 static cl::opt<int> ClTrackOrigins("msan-track-origins", 139 cl::desc("Track origins (allocation sites) of poisoned memory"), 140 cl::Hidden, cl::init(0)); 141 static cl::opt<bool> ClKeepGoing("msan-keep-going", 142 cl::desc("keep going after reporting a UMR"), 143 cl::Hidden, cl::init(false)); 144 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 145 cl::desc("poison uninitialized stack variables"), 146 cl::Hidden, cl::init(true)); 147 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 148 cl::desc("poison uninitialized stack variables with a call"), 149 cl::Hidden, cl::init(false)); 150 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 151 cl::desc("poison uninitialized stack variables with the given patter"), 152 cl::Hidden, cl::init(0xff)); 153 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 154 cl::desc("poison undef temps"), 155 cl::Hidden, cl::init(true)); 156 157 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 158 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 159 cl::Hidden, cl::init(true)); 160 161 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 162 cl::desc("exact handling of relational integer ICmp"), 163 cl::Hidden, cl::init(false)); 164 165 // This flag controls whether we check the shadow of the address 166 // operand of load or store. Such bugs are very rare, since load from 167 // a garbage address typically results in SEGV, but still happen 168 // (e.g. only lower bits of address are garbage, or the access happens 169 // early at program startup where malloc-ed memory is more likely to 170 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 171 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 172 cl::desc("report accesses through a pointer which has poisoned shadow"), 173 cl::Hidden, cl::init(true)); 174 175 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 176 cl::desc("print out instructions with default strict semantics"), 177 cl::Hidden, cl::init(false)); 178 179 static cl::opt<int> ClInstrumentationWithCallThreshold( 180 "msan-instrumentation-with-call-threshold", 181 cl::desc( 182 "If the function being instrumented requires more than " 183 "this number of checks and origin stores, use callbacks instead of " 184 "inline checks (-1 means never use callbacks)."), 185 cl::Hidden, cl::init(3500)); 186 187 // This is an experiment to enable handling of cases where shadow is a non-zero 188 // compile-time constant. For some unexplainable reason they were silently 189 // ignored in the instrumentation. 190 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow", 191 cl::desc("Insert checks for constant shadow values"), 192 cl::Hidden, cl::init(false)); 193 194 namespace { 195 196 // Memory map parameters used in application-to-shadow address calculation. 197 // Offset = (Addr & ~AndMask) ^ XorMask 198 // Shadow = ShadowBase + Offset 199 // Origin = OriginBase + Offset 200 struct MemoryMapParams { 201 uint64_t AndMask; 202 uint64_t XorMask; 203 uint64_t ShadowBase; 204 uint64_t OriginBase; 205 }; 206 207 struct PlatformMemoryMapParams { 208 const MemoryMapParams *bits32; 209 const MemoryMapParams *bits64; 210 }; 211 212 // i386 Linux 213 static const MemoryMapParams Linux_I386_MemoryMapParams = { 214 0x000080000000, // AndMask 215 0, // XorMask (not used) 216 0, // ShadowBase (not used) 217 0x000040000000, // OriginBase 218 }; 219 220 // x86_64 Linux 221 static const MemoryMapParams Linux_X86_64_MemoryMapParams = { 222 0x400000000000, // AndMask 223 0, // XorMask (not used) 224 0, // ShadowBase (not used) 225 0x200000000000, // OriginBase 226 }; 227 228 // mips64 Linux 229 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = { 230 0x004000000000, // AndMask 231 0, // XorMask (not used) 232 0, // ShadowBase (not used) 233 0x002000000000, // OriginBase 234 }; 235 236 // i386 FreeBSD 237 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = { 238 0x000180000000, // AndMask 239 0x000040000000, // XorMask 240 0x000020000000, // ShadowBase 241 0x000700000000, // OriginBase 242 }; 243 244 // x86_64 FreeBSD 245 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = { 246 0xc00000000000, // AndMask 247 0x200000000000, // XorMask 248 0x100000000000, // ShadowBase 249 0x380000000000, // OriginBase 250 }; 251 252 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = { 253 &Linux_I386_MemoryMapParams, 254 &Linux_X86_64_MemoryMapParams, 255 }; 256 257 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = { 258 NULL, 259 &Linux_MIPS64_MemoryMapParams, 260 }; 261 262 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = { 263 &FreeBSD_I386_MemoryMapParams, 264 &FreeBSD_X86_64_MemoryMapParams, 265 }; 266 267 /// \brief An instrumentation pass implementing detection of uninitialized 268 /// reads. 269 /// 270 /// MemorySanitizer: instrument the code in module to find 271 /// uninitialized reads. 272 class MemorySanitizer : public FunctionPass { 273 public: 274 MemorySanitizer(int TrackOrigins = 0) 275 : FunctionPass(ID), 276 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), 277 DL(nullptr), 278 WarningFn(nullptr) {} 279 const char *getPassName() const override { return "MemorySanitizer"; } 280 bool runOnFunction(Function &F) override; 281 bool doInitialization(Module &M) override; 282 static char ID; // Pass identification, replacement for typeid. 283 284 private: 285 void initializeCallbacks(Module &M); 286 287 /// \brief Track origins (allocation points) of uninitialized values. 288 int TrackOrigins; 289 290 const DataLayout *DL; 291 LLVMContext *C; 292 Type *IntptrTy; 293 Type *OriginTy; 294 /// \brief Thread-local shadow storage for function parameters. 295 GlobalVariable *ParamTLS; 296 /// \brief Thread-local origin storage for function parameters. 297 GlobalVariable *ParamOriginTLS; 298 /// \brief Thread-local shadow storage for function return value. 299 GlobalVariable *RetvalTLS; 300 /// \brief Thread-local origin storage for function return value. 301 GlobalVariable *RetvalOriginTLS; 302 /// \brief Thread-local shadow storage for in-register va_arg function 303 /// parameters (x86_64-specific). 304 GlobalVariable *VAArgTLS; 305 /// \brief Thread-local shadow storage for va_arg overflow area 306 /// (x86_64-specific). 307 GlobalVariable *VAArgOverflowSizeTLS; 308 /// \brief Thread-local space used to pass origin value to the UMR reporting 309 /// function. 310 GlobalVariable *OriginTLS; 311 312 /// \brief The run-time callback to print a warning. 313 Value *WarningFn; 314 // These arrays are indexed by log2(AccessSize). 315 Value *MaybeWarningFn[kNumberOfAccessSizes]; 316 Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; 317 318 /// \brief Run-time helper that generates a new origin value for a stack 319 /// allocation. 320 Value *MsanSetAllocaOrigin4Fn; 321 /// \brief Run-time helper that poisons stack on function entry. 322 Value *MsanPoisonStackFn; 323 /// \brief Run-time helper that records a store (or any event) of an 324 /// uninitialized value and returns an updated origin id encoding this info. 325 Value *MsanChainOriginFn; 326 /// \brief MSan runtime replacements for memmove, memcpy and memset. 327 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 328 329 /// \brief Memory map parameters used in application-to-shadow calculation. 330 const MemoryMapParams *MapParams; 331 332 MDNode *ColdCallWeights; 333 /// \brief Branch weights for origin store. 334 MDNode *OriginStoreWeights; 335 /// \brief An empty volatile inline asm that prevents callback merge. 336 InlineAsm *EmptyAsm; 337 338 friend struct MemorySanitizerVisitor; 339 friend struct VarArgAMD64Helper; 340 friend struct VarArgMIPS64Helper; 341 }; 342 } // namespace 343 344 char MemorySanitizer::ID = 0; 345 INITIALIZE_PASS(MemorySanitizer, "msan", 346 "MemorySanitizer: detects uninitialized reads.", 347 false, false) 348 349 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) { 350 return new MemorySanitizer(TrackOrigins); 351 } 352 353 /// \brief Create a non-const global initialized with the given string. 354 /// 355 /// Creates a writable global for Str so that we can pass it to the 356 /// run-time lib. Runtime uses first 4 bytes of the string to store the 357 /// frame ID, so the string needs to be mutable. 358 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 359 StringRef Str) { 360 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 361 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 362 GlobalValue::PrivateLinkage, StrConst, ""); 363 } 364 365 366 /// \brief Insert extern declaration of runtime-provided functions and globals. 367 void MemorySanitizer::initializeCallbacks(Module &M) { 368 // Only do this once. 369 if (WarningFn) 370 return; 371 372 IRBuilder<> IRB(*C); 373 // Create the callback. 374 // FIXME: this function should have "Cold" calling conv, 375 // which is not yet implemented. 376 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 377 : "__msan_warning_noreturn"; 378 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr); 379 380 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 381 AccessSizeIndex++) { 382 unsigned AccessSize = 1 << AccessSizeIndex; 383 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); 384 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( 385 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 386 IRB.getInt32Ty(), nullptr); 387 388 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); 389 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( 390 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 391 IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr); 392 } 393 394 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( 395 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 396 IRB.getInt8PtrTy(), IntptrTy, nullptr); 397 MsanPoisonStackFn = 398 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(), 399 IRB.getInt8PtrTy(), IntptrTy, nullptr); 400 MsanChainOriginFn = M.getOrInsertFunction( 401 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr); 402 MemmoveFn = M.getOrInsertFunction( 403 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 404 IRB.getInt8PtrTy(), IntptrTy, nullptr); 405 MemcpyFn = M.getOrInsertFunction( 406 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 407 IntptrTy, nullptr); 408 MemsetFn = M.getOrInsertFunction( 409 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 410 IntptrTy, nullptr); 411 412 // Create globals. 413 RetvalTLS = new GlobalVariable( 414 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, 415 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, 416 GlobalVariable::InitialExecTLSModel); 417 RetvalOriginTLS = new GlobalVariable( 418 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, 419 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 420 421 ParamTLS = new GlobalVariable( 422 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, 423 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, 424 GlobalVariable::InitialExecTLSModel); 425 ParamOriginTLS = new GlobalVariable( 426 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, 427 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", 428 nullptr, GlobalVariable::InitialExecTLSModel); 429 430 VAArgTLS = new GlobalVariable( 431 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, 432 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, 433 GlobalVariable::InitialExecTLSModel); 434 VAArgOverflowSizeTLS = new GlobalVariable( 435 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 436 "__msan_va_arg_overflow_size_tls", nullptr, 437 GlobalVariable::InitialExecTLSModel); 438 OriginTLS = new GlobalVariable( 439 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 440 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 441 442 // We insert an empty inline asm after __msan_report* to avoid callback merge. 443 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 444 StringRef(""), StringRef(""), 445 /*hasSideEffects=*/true); 446 } 447 448 /// \brief Module-level initialization. 449 /// 450 /// inserts a call to __msan_init to the module's constructor list. 451 bool MemorySanitizer::doInitialization(Module &M) { 452 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 453 if (!DLP) 454 report_fatal_error("data layout missing"); 455 DL = &DLP->getDataLayout(); 456 457 Triple TargetTriple(M.getTargetTriple()); 458 switch (TargetTriple.getOS()) { 459 case Triple::FreeBSD: 460 switch (TargetTriple.getArch()) { 461 case Triple::x86_64: 462 MapParams = FreeBSD_X86_MemoryMapParams.bits64; 463 break; 464 case Triple::x86: 465 MapParams = FreeBSD_X86_MemoryMapParams.bits32; 466 break; 467 default: 468 report_fatal_error("unsupported architecture"); 469 } 470 break; 471 case Triple::Linux: 472 switch (TargetTriple.getArch()) { 473 case Triple::x86_64: 474 MapParams = Linux_X86_MemoryMapParams.bits64; 475 break; 476 case Triple::x86: 477 MapParams = Linux_X86_MemoryMapParams.bits32; 478 break; 479 case Triple::mips64: 480 case Triple::mips64el: 481 MapParams = Linux_MIPS_MemoryMapParams.bits64; 482 break; 483 default: 484 report_fatal_error("unsupported architecture"); 485 } 486 break; 487 default: 488 report_fatal_error("unsupported operating system"); 489 } 490 491 C = &(M.getContext()); 492 IRBuilder<> IRB(*C); 493 IntptrTy = IRB.getIntPtrTy(DL); 494 OriginTy = IRB.getInt32Ty(); 495 496 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 497 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 498 499 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. 500 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( 501 "__msan_init", IRB.getVoidTy(), nullptr)), 0); 502 503 if (TrackOrigins) 504 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 505 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 506 507 if (ClKeepGoing) 508 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 509 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 510 511 return true; 512 } 513 514 namespace { 515 516 /// \brief A helper class that handles instrumentation of VarArg 517 /// functions on a particular platform. 518 /// 519 /// Implementations are expected to insert the instrumentation 520 /// necessary to propagate argument shadow through VarArg function 521 /// calls. Visit* methods are called during an InstVisitor pass over 522 /// the function, and should avoid creating new basic blocks. A new 523 /// instance of this class is created for each instrumented function. 524 struct VarArgHelper { 525 /// \brief Visit a CallSite. 526 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 527 528 /// \brief Visit a va_start call. 529 virtual void visitVAStartInst(VAStartInst &I) = 0; 530 531 /// \brief Visit a va_copy call. 532 virtual void visitVACopyInst(VACopyInst &I) = 0; 533 534 /// \brief Finalize function instrumentation. 535 /// 536 /// This method is called after visiting all interesting (see above) 537 /// instructions in a function. 538 virtual void finalizeInstrumentation() = 0; 539 540 virtual ~VarArgHelper() {} 541 }; 542 543 struct MemorySanitizerVisitor; 544 545 VarArgHelper* 546 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 547 MemorySanitizerVisitor &Visitor); 548 549 unsigned TypeSizeToSizeIndex(unsigned TypeSize) { 550 if (TypeSize <= 8) return 0; 551 return Log2_32_Ceil(TypeSize / 8); 552 } 553 554 /// This class does all the work for a given function. Store and Load 555 /// instructions store and load corresponding shadow and origin 556 /// values. Most instructions propagate shadow from arguments to their 557 /// return values. Certain instructions (most importantly, BranchInst) 558 /// test their argument shadow and print reports (with a runtime call) if it's 559 /// non-zero. 560 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 561 Function &F; 562 MemorySanitizer &MS; 563 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 564 ValueMap<Value*, Value*> ShadowMap, OriginMap; 565 std::unique_ptr<VarArgHelper> VAHelper; 566 567 // The following flags disable parts of MSan instrumentation based on 568 // blacklist contents and command-line options. 569 bool InsertChecks; 570 bool PropagateShadow; 571 bool PoisonStack; 572 bool PoisonUndef; 573 bool CheckReturnValue; 574 575 struct ShadowOriginAndInsertPoint { 576 Value *Shadow; 577 Value *Origin; 578 Instruction *OrigIns; 579 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) 580 : Shadow(S), Origin(O), OrigIns(I) { } 581 }; 582 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 583 SmallVector<Instruction*, 16> StoreList; 584 585 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 586 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 587 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory); 588 InsertChecks = SanitizeFunction; 589 PropagateShadow = SanitizeFunction; 590 PoisonStack = SanitizeFunction && ClPoisonStack; 591 PoisonUndef = SanitizeFunction && ClPoisonUndef; 592 // FIXME: Consider using SpecialCaseList to specify a list of functions that 593 // must always return fully initialized values. For now, we hardcode "main". 594 CheckReturnValue = SanitizeFunction && (F.getName() == "main"); 595 596 DEBUG(if (!InsertChecks) 597 dbgs() << "MemorySanitizer is not inserting checks into '" 598 << F.getName() << "'\n"); 599 } 600 601 Value *updateOrigin(Value *V, IRBuilder<> &IRB) { 602 if (MS.TrackOrigins <= 1) return V; 603 return IRB.CreateCall(MS.MsanChainOriginFn, V); 604 } 605 606 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) { 607 unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy); 608 if (IntptrSize == kOriginSize) return Origin; 609 assert(IntptrSize == kOriginSize * 2); 610 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false); 611 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8)); 612 } 613 614 /// \brief Fill memory range with the given origin value. 615 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, 616 unsigned Size, unsigned Alignment) { 617 unsigned IntptrAlignment = MS.DL->getABITypeAlignment(MS.IntptrTy); 618 unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy); 619 assert(IntptrAlignment >= kMinOriginAlignment); 620 assert(IntptrSize >= kOriginSize); 621 622 unsigned Ofs = 0; 623 unsigned CurrentAlignment = Alignment; 624 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) { 625 Value *IntptrOrigin = originToIntptr(IRB, Origin); 626 Value *IntptrOriginPtr = 627 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0)); 628 for (unsigned i = 0; i < Size / IntptrSize; ++i) { 629 Value *Ptr = 630 i ? IRB.CreateConstGEP1_32(IntptrOriginPtr, i) : IntptrOriginPtr; 631 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); 632 Ofs += IntptrSize / kOriginSize; 633 CurrentAlignment = IntptrAlignment; 634 } 635 } 636 637 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) { 638 Value *GEP = i ? IRB.CreateConstGEP1_32(OriginPtr, i) : OriginPtr; 639 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); 640 CurrentAlignment = kMinOriginAlignment; 641 } 642 } 643 644 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, 645 unsigned Alignment, bool AsCall) { 646 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); 647 unsigned StoreSize = MS.DL->getTypeStoreSize(Shadow->getType()); 648 if (isa<StructType>(Shadow->getType())) { 649 paintOrigin(IRB, updateOrigin(Origin, IRB), 650 getOriginPtr(Addr, IRB, Alignment), StoreSize, 651 OriginAlignment); 652 } else { 653 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 654 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); 655 if (ConstantShadow) { 656 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) 657 paintOrigin(IRB, updateOrigin(Origin, IRB), 658 getOriginPtr(Addr, IRB, Alignment), StoreSize, 659 OriginAlignment); 660 return; 661 } 662 663 unsigned TypeSizeInBits = 664 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 665 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 666 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 667 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; 668 Value *ConvertedShadow2 = IRB.CreateZExt( 669 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 670 IRB.CreateCall3(Fn, ConvertedShadow2, 671 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 672 Origin); 673 } else { 674 Value *Cmp = IRB.CreateICmpNE( 675 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); 676 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 677 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); 678 IRBuilder<> IRBNew(CheckTerm); 679 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), 680 getOriginPtr(Addr, IRBNew, Alignment), StoreSize, 681 OriginAlignment); 682 } 683 } 684 } 685 686 void materializeStores(bool InstrumentWithCalls) { 687 for (auto Inst : StoreList) { 688 StoreInst &SI = *dyn_cast<StoreInst>(Inst); 689 690 IRBuilder<> IRB(&SI); 691 Value *Val = SI.getValueOperand(); 692 Value *Addr = SI.getPointerOperand(); 693 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val); 694 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 695 696 StoreInst *NewSI = 697 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment()); 698 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 699 (void)NewSI; 700 701 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI); 702 703 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); 704 705 if (MS.TrackOrigins && !SI.isAtomic()) 706 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(), 707 InstrumentWithCalls); 708 } 709 } 710 711 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, 712 bool AsCall) { 713 IRBuilder<> IRB(OrigIns); 714 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 715 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 716 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 717 718 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); 719 if (ConstantShadow) { 720 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) { 721 if (MS.TrackOrigins) { 722 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 723 MS.OriginTLS); 724 } 725 IRB.CreateCall(MS.WarningFn); 726 IRB.CreateCall(MS.EmptyAsm); 727 // FIXME: Insert UnreachableInst if !ClKeepGoing? 728 // This may invalidate some of the following checks and needs to be done 729 // at the very end. 730 } 731 return; 732 } 733 734 unsigned TypeSizeInBits = 735 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 736 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 737 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 738 Value *Fn = MS.MaybeWarningFn[SizeIndex]; 739 Value *ConvertedShadow2 = 740 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 741 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin 742 ? Origin 743 : (Value *)IRB.getInt32(0)); 744 } else { 745 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 746 getCleanShadow(ConvertedShadow), "_mscmp"); 747 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 748 Cmp, OrigIns, 749 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); 750 751 IRB.SetInsertPoint(CheckTerm); 752 if (MS.TrackOrigins) { 753 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 754 MS.OriginTLS); 755 } 756 IRB.CreateCall(MS.WarningFn); 757 IRB.CreateCall(MS.EmptyAsm); 758 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 759 } 760 } 761 762 void materializeChecks(bool InstrumentWithCalls) { 763 for (const auto &ShadowData : InstrumentationList) { 764 Instruction *OrigIns = ShadowData.OrigIns; 765 Value *Shadow = ShadowData.Shadow; 766 Value *Origin = ShadowData.Origin; 767 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); 768 } 769 DEBUG(dbgs() << "DONE:\n" << F); 770 } 771 772 /// \brief Add MemorySanitizer instrumentation to a function. 773 bool runOnFunction() { 774 MS.initializeCallbacks(*F.getParent()); 775 if (!MS.DL) return false; 776 777 // In the presence of unreachable blocks, we may see Phi nodes with 778 // incoming nodes from such blocks. Since InstVisitor skips unreachable 779 // blocks, such nodes will not have any shadow value associated with them. 780 // It's easier to remove unreachable blocks than deal with missing shadow. 781 removeUnreachableBlocks(F); 782 783 // Iterate all BBs in depth-first order and create shadow instructions 784 // for all instructions (where applicable). 785 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 786 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 787 visit(*BB); 788 789 790 // Finalize PHI nodes. 791 for (PHINode *PN : ShadowPHINodes) { 792 PHINode *PNS = cast<PHINode>(getShadow(PN)); 793 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; 794 size_t NumValues = PN->getNumIncomingValues(); 795 for (size_t v = 0; v < NumValues; v++) { 796 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 797 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 798 } 799 } 800 801 VAHelper->finalizeInstrumentation(); 802 803 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && 804 InstrumentationList.size() + StoreList.size() > 805 (unsigned)ClInstrumentationWithCallThreshold; 806 807 // Delayed instrumentation of StoreInst. 808 // This may add new checks to be inserted later. 809 materializeStores(InstrumentWithCalls); 810 811 // Insert shadow value checks. 812 materializeChecks(InstrumentWithCalls); 813 814 return true; 815 } 816 817 /// \brief Compute the shadow type that corresponds to a given Value. 818 Type *getShadowTy(Value *V) { 819 return getShadowTy(V->getType()); 820 } 821 822 /// \brief Compute the shadow type that corresponds to a given Type. 823 Type *getShadowTy(Type *OrigTy) { 824 if (!OrigTy->isSized()) { 825 return nullptr; 826 } 827 // For integer type, shadow is the same as the original type. 828 // This may return weird-sized types like i1. 829 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 830 return IT; 831 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 832 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); 833 return VectorType::get(IntegerType::get(*MS.C, EltSize), 834 VT->getNumElements()); 835 } 836 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) { 837 return ArrayType::get(getShadowTy(AT->getElementType()), 838 AT->getNumElements()); 839 } 840 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 841 SmallVector<Type*, 4> Elements; 842 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 843 Elements.push_back(getShadowTy(ST->getElementType(i))); 844 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 845 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 846 return Res; 847 } 848 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); 849 return IntegerType::get(*MS.C, TypeSize); 850 } 851 852 /// \brief Flatten a vector type. 853 Type *getShadowTyNoVec(Type *ty) { 854 if (VectorType *vt = dyn_cast<VectorType>(ty)) 855 return IntegerType::get(*MS.C, vt->getBitWidth()); 856 return ty; 857 } 858 859 /// \brief Convert a shadow value to it's flattened variant. 860 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 861 Type *Ty = V->getType(); 862 Type *NoVecTy = getShadowTyNoVec(Ty); 863 if (Ty == NoVecTy) return V; 864 return IRB.CreateBitCast(V, NoVecTy); 865 } 866 867 /// \brief Compute the integer shadow offset that corresponds to a given 868 /// application address. 869 /// 870 /// Offset = (Addr & ~AndMask) ^ XorMask 871 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) { 872 uint64_t AndMask = MS.MapParams->AndMask; 873 assert(AndMask != 0 && "AndMask shall be specified"); 874 Value *OffsetLong = 875 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 876 ConstantInt::get(MS.IntptrTy, ~AndMask)); 877 878 uint64_t XorMask = MS.MapParams->XorMask; 879 if (XorMask != 0) 880 OffsetLong = IRB.CreateXor(OffsetLong, 881 ConstantInt::get(MS.IntptrTy, XorMask)); 882 return OffsetLong; 883 } 884 885 /// \brief Compute the shadow address that corresponds to a given application 886 /// address. 887 /// 888 /// Shadow = ShadowBase + Offset 889 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 890 IRBuilder<> &IRB) { 891 Value *ShadowLong = getShadowPtrOffset(Addr, IRB); 892 uint64_t ShadowBase = MS.MapParams->ShadowBase; 893 if (ShadowBase != 0) 894 ShadowLong = 895 IRB.CreateAdd(ShadowLong, 896 ConstantInt::get(MS.IntptrTy, ShadowBase)); 897 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 898 } 899 900 /// \brief Compute the origin address that corresponds to a given application 901 /// address. 902 /// 903 /// OriginAddr = (OriginBase + Offset) & ~3ULL 904 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) { 905 Value *OriginLong = getShadowPtrOffset(Addr, IRB); 906 uint64_t OriginBase = MS.MapParams->OriginBase; 907 if (OriginBase != 0) 908 OriginLong = 909 IRB.CreateAdd(OriginLong, 910 ConstantInt::get(MS.IntptrTy, OriginBase)); 911 if (Alignment < kMinOriginAlignment) { 912 uint64_t Mask = kMinOriginAlignment - 1; 913 OriginLong = IRB.CreateAnd(OriginLong, 914 ConstantInt::get(MS.IntptrTy, ~Mask)); 915 } 916 return IRB.CreateIntToPtr(OriginLong, 917 PointerType::get(IRB.getInt32Ty(), 0)); 918 } 919 920 /// \brief Compute the shadow address for a given function argument. 921 /// 922 /// Shadow = ParamTLS+ArgOffset. 923 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 924 int ArgOffset) { 925 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 926 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 927 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 928 "_msarg"); 929 } 930 931 /// \brief Compute the origin address for a given function argument. 932 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 933 int ArgOffset) { 934 if (!MS.TrackOrigins) return nullptr; 935 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 936 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 937 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 938 "_msarg_o"); 939 } 940 941 /// \brief Compute the shadow address for a retval. 942 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 943 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 944 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 945 "_msret"); 946 } 947 948 /// \brief Compute the origin address for a retval. 949 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 950 // We keep a single origin for the entire retval. Might be too optimistic. 951 return MS.RetvalOriginTLS; 952 } 953 954 /// \brief Set SV to be the shadow value for V. 955 void setShadow(Value *V, Value *SV) { 956 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 957 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); 958 } 959 960 /// \brief Set Origin to be the origin value for V. 961 void setOrigin(Value *V, Value *Origin) { 962 if (!MS.TrackOrigins) return; 963 assert(!OriginMap.count(V) && "Values may only have one origin"); 964 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 965 OriginMap[V] = Origin; 966 } 967 968 /// \brief Create a clean shadow value for a given value. 969 /// 970 /// Clean shadow (all zeroes) means all bits of the value are defined 971 /// (initialized). 972 Constant *getCleanShadow(Value *V) { 973 Type *ShadowTy = getShadowTy(V); 974 if (!ShadowTy) 975 return nullptr; 976 return Constant::getNullValue(ShadowTy); 977 } 978 979 /// \brief Create a dirty shadow of a given shadow type. 980 Constant *getPoisonedShadow(Type *ShadowTy) { 981 assert(ShadowTy); 982 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 983 return Constant::getAllOnesValue(ShadowTy); 984 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) { 985 SmallVector<Constant *, 4> Vals(AT->getNumElements(), 986 getPoisonedShadow(AT->getElementType())); 987 return ConstantArray::get(AT, Vals); 988 } 989 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) { 990 SmallVector<Constant *, 4> Vals; 991 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 992 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 993 return ConstantStruct::get(ST, Vals); 994 } 995 llvm_unreachable("Unexpected shadow type"); 996 } 997 998 /// \brief Create a dirty shadow for a given value. 999 Constant *getPoisonedShadow(Value *V) { 1000 Type *ShadowTy = getShadowTy(V); 1001 if (!ShadowTy) 1002 return nullptr; 1003 return getPoisonedShadow(ShadowTy); 1004 } 1005 1006 /// \brief Create a clean (zero) origin. 1007 Value *getCleanOrigin() { 1008 return Constant::getNullValue(MS.OriginTy); 1009 } 1010 1011 /// \brief Get the shadow value for a given Value. 1012 /// 1013 /// This function either returns the value set earlier with setShadow, 1014 /// or extracts if from ParamTLS (for function arguments). 1015 Value *getShadow(Value *V) { 1016 if (!PropagateShadow) return getCleanShadow(V); 1017 if (Instruction *I = dyn_cast<Instruction>(V)) { 1018 // For instructions the shadow is already stored in the map. 1019 Value *Shadow = ShadowMap[V]; 1020 if (!Shadow) { 1021 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 1022 (void)I; 1023 assert(Shadow && "No shadow for a value"); 1024 } 1025 return Shadow; 1026 } 1027 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 1028 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 1029 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 1030 (void)U; 1031 return AllOnes; 1032 } 1033 if (Argument *A = dyn_cast<Argument>(V)) { 1034 // For arguments we compute the shadow on demand and store it in the map. 1035 Value **ShadowPtr = &ShadowMap[V]; 1036 if (*ShadowPtr) 1037 return *ShadowPtr; 1038 Function *F = A->getParent(); 1039 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 1040 unsigned ArgOffset = 0; 1041 for (auto &FArg : F->args()) { 1042 if (!FArg.getType()->isSized()) { 1043 DEBUG(dbgs() << "Arg is not sized\n"); 1044 continue; 1045 } 1046 unsigned Size = FArg.hasByValAttr() 1047 ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType()) 1048 : MS.DL->getTypeAllocSize(FArg.getType()); 1049 if (A == &FArg) { 1050 bool Overflow = ArgOffset + Size > kParamTLSSize; 1051 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); 1052 if (FArg.hasByValAttr()) { 1053 // ByVal pointer itself has clean shadow. We copy the actual 1054 // argument shadow to the underlying memory. 1055 // Figure out maximal valid memcpy alignment. 1056 unsigned ArgAlign = FArg.getParamAlignment(); 1057 if (ArgAlign == 0) { 1058 Type *EltType = A->getType()->getPointerElementType(); 1059 ArgAlign = MS.DL->getABITypeAlignment(EltType); 1060 } 1061 if (Overflow) { 1062 // ParamTLS overflow. 1063 EntryIRB.CreateMemSet( 1064 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), 1065 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign); 1066 } else { 1067 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 1068 Value *Cpy = EntryIRB.CreateMemCpy( 1069 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 1070 CopyAlign); 1071 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 1072 (void)Cpy; 1073 } 1074 *ShadowPtr = getCleanShadow(V); 1075 } else { 1076 if (Overflow) { 1077 // ParamTLS overflow. 1078 *ShadowPtr = getCleanShadow(V); 1079 } else { 1080 *ShadowPtr = 1081 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 1082 } 1083 } 1084 DEBUG(dbgs() << " ARG: " << FArg << " ==> " << 1085 **ShadowPtr << "\n"); 1086 if (MS.TrackOrigins && !Overflow) { 1087 Value *OriginPtr = 1088 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); 1089 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 1090 } else { 1091 setOrigin(A, getCleanOrigin()); 1092 } 1093 } 1094 ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment); 1095 } 1096 assert(*ShadowPtr && "Could not find shadow for an argument"); 1097 return *ShadowPtr; 1098 } 1099 // For everything else the shadow is zero. 1100 return getCleanShadow(V); 1101 } 1102 1103 /// \brief Get the shadow for i-th argument of the instruction I. 1104 Value *getShadow(Instruction *I, int i) { 1105 return getShadow(I->getOperand(i)); 1106 } 1107 1108 /// \brief Get the origin for a value. 1109 Value *getOrigin(Value *V) { 1110 if (!MS.TrackOrigins) return nullptr; 1111 if (!PropagateShadow) return getCleanOrigin(); 1112 if (isa<Constant>(V)) return getCleanOrigin(); 1113 assert((isa<Instruction>(V) || isa<Argument>(V)) && 1114 "Unexpected value type in getOrigin()"); 1115 Value *Origin = OriginMap[V]; 1116 assert(Origin && "Missing origin"); 1117 return Origin; 1118 } 1119 1120 /// \brief Get the origin for i-th argument of the instruction I. 1121 Value *getOrigin(Instruction *I, int i) { 1122 return getOrigin(I->getOperand(i)); 1123 } 1124 1125 /// \brief Remember the place where a shadow check should be inserted. 1126 /// 1127 /// This location will be later instrumented with a check that will print a 1128 /// UMR warning in runtime if the shadow value is not 0. 1129 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { 1130 assert(Shadow); 1131 if (!InsertChecks) return; 1132 #ifndef NDEBUG 1133 Type *ShadowTy = Shadow->getType(); 1134 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 1135 "Can only insert checks for integer and vector shadow types"); 1136 #endif 1137 InstrumentationList.push_back( 1138 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 1139 } 1140 1141 /// \brief Remember the place where a shadow check should be inserted. 1142 /// 1143 /// This location will be later instrumented with a check that will print a 1144 /// UMR warning in runtime if the value is not fully defined. 1145 void insertShadowCheck(Value *Val, Instruction *OrigIns) { 1146 assert(Val); 1147 Value *Shadow, *Origin; 1148 if (ClCheckConstantShadow) { 1149 Shadow = getShadow(Val); 1150 if (!Shadow) return; 1151 Origin = getOrigin(Val); 1152 } else { 1153 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 1154 if (!Shadow) return; 1155 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 1156 } 1157 insertShadowCheck(Shadow, Origin, OrigIns); 1158 } 1159 1160 AtomicOrdering addReleaseOrdering(AtomicOrdering a) { 1161 switch (a) { 1162 case NotAtomic: 1163 return NotAtomic; 1164 case Unordered: 1165 case Monotonic: 1166 case Release: 1167 return Release; 1168 case Acquire: 1169 case AcquireRelease: 1170 return AcquireRelease; 1171 case SequentiallyConsistent: 1172 return SequentiallyConsistent; 1173 } 1174 llvm_unreachable("Unknown ordering"); 1175 } 1176 1177 AtomicOrdering addAcquireOrdering(AtomicOrdering a) { 1178 switch (a) { 1179 case NotAtomic: 1180 return NotAtomic; 1181 case Unordered: 1182 case Monotonic: 1183 case Acquire: 1184 return Acquire; 1185 case Release: 1186 case AcquireRelease: 1187 return AcquireRelease; 1188 case SequentiallyConsistent: 1189 return SequentiallyConsistent; 1190 } 1191 llvm_unreachable("Unknown ordering"); 1192 } 1193 1194 // ------------------- Visitors. 1195 1196 /// \brief Instrument LoadInst 1197 /// 1198 /// Loads the corresponding shadow and (optionally) origin. 1199 /// Optionally, checks that the load address is fully defined. 1200 void visitLoadInst(LoadInst &I) { 1201 assert(I.getType()->isSized() && "Load type must have size"); 1202 IRBuilder<> IRB(I.getNextNode()); 1203 Type *ShadowTy = getShadowTy(&I); 1204 Value *Addr = I.getPointerOperand(); 1205 if (PropagateShadow && !I.getMetadata("nosanitize")) { 1206 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1207 setShadow(&I, 1208 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 1209 } else { 1210 setShadow(&I, getCleanShadow(&I)); 1211 } 1212 1213 if (ClCheckAccessAddress) 1214 insertShadowCheck(I.getPointerOperand(), &I); 1215 1216 if (I.isAtomic()) 1217 I.setOrdering(addAcquireOrdering(I.getOrdering())); 1218 1219 if (MS.TrackOrigins) { 1220 if (PropagateShadow) { 1221 unsigned Alignment = I.getAlignment(); 1222 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); 1223 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment), 1224 OriginAlignment)); 1225 } else { 1226 setOrigin(&I, getCleanOrigin()); 1227 } 1228 } 1229 } 1230 1231 /// \brief Instrument StoreInst 1232 /// 1233 /// Stores the corresponding shadow and (optionally) origin. 1234 /// Optionally, checks that the store address is fully defined. 1235 void visitStoreInst(StoreInst &I) { 1236 StoreList.push_back(&I); 1237 } 1238 1239 void handleCASOrRMW(Instruction &I) { 1240 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 1241 1242 IRBuilder<> IRB(&I); 1243 Value *Addr = I.getOperand(0); 1244 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); 1245 1246 if (ClCheckAccessAddress) 1247 insertShadowCheck(Addr, &I); 1248 1249 // Only test the conditional argument of cmpxchg instruction. 1250 // The other argument can potentially be uninitialized, but we can not 1251 // detect this situation reliably without possible false positives. 1252 if (isa<AtomicCmpXchgInst>(I)) 1253 insertShadowCheck(I.getOperand(1), &I); 1254 1255 IRB.CreateStore(getCleanShadow(&I), ShadowPtr); 1256 1257 setShadow(&I, getCleanShadow(&I)); 1258 setOrigin(&I, getCleanOrigin()); 1259 } 1260 1261 void visitAtomicRMWInst(AtomicRMWInst &I) { 1262 handleCASOrRMW(I); 1263 I.setOrdering(addReleaseOrdering(I.getOrdering())); 1264 } 1265 1266 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 1267 handleCASOrRMW(I); 1268 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 1269 } 1270 1271 // Vector manipulation. 1272 void visitExtractElementInst(ExtractElementInst &I) { 1273 insertShadowCheck(I.getOperand(1), &I); 1274 IRBuilder<> IRB(&I); 1275 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 1276 "_msprop")); 1277 setOrigin(&I, getOrigin(&I, 0)); 1278 } 1279 1280 void visitInsertElementInst(InsertElementInst &I) { 1281 insertShadowCheck(I.getOperand(2), &I); 1282 IRBuilder<> IRB(&I); 1283 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 1284 I.getOperand(2), "_msprop")); 1285 setOriginForNaryOp(I); 1286 } 1287 1288 void visitShuffleVectorInst(ShuffleVectorInst &I) { 1289 insertShadowCheck(I.getOperand(2), &I); 1290 IRBuilder<> IRB(&I); 1291 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 1292 I.getOperand(2), "_msprop")); 1293 setOriginForNaryOp(I); 1294 } 1295 1296 // Casts. 1297 void visitSExtInst(SExtInst &I) { 1298 IRBuilder<> IRB(&I); 1299 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 1300 setOrigin(&I, getOrigin(&I, 0)); 1301 } 1302 1303 void visitZExtInst(ZExtInst &I) { 1304 IRBuilder<> IRB(&I); 1305 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 1306 setOrigin(&I, getOrigin(&I, 0)); 1307 } 1308 1309 void visitTruncInst(TruncInst &I) { 1310 IRBuilder<> IRB(&I); 1311 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 1312 setOrigin(&I, getOrigin(&I, 0)); 1313 } 1314 1315 void visitBitCastInst(BitCastInst &I) { 1316 IRBuilder<> IRB(&I); 1317 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 1318 setOrigin(&I, getOrigin(&I, 0)); 1319 } 1320 1321 void visitPtrToIntInst(PtrToIntInst &I) { 1322 IRBuilder<> IRB(&I); 1323 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1324 "_msprop_ptrtoint")); 1325 setOrigin(&I, getOrigin(&I, 0)); 1326 } 1327 1328 void visitIntToPtrInst(IntToPtrInst &I) { 1329 IRBuilder<> IRB(&I); 1330 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1331 "_msprop_inttoptr")); 1332 setOrigin(&I, getOrigin(&I, 0)); 1333 } 1334 1335 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 1336 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 1337 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 1338 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 1339 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 1340 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 1341 1342 /// \brief Propagate shadow for bitwise AND. 1343 /// 1344 /// This code is exact, i.e. if, for example, a bit in the left argument 1345 /// is defined and 0, then neither the value not definedness of the 1346 /// corresponding bit in B don't affect the resulting shadow. 1347 void visitAnd(BinaryOperator &I) { 1348 IRBuilder<> IRB(&I); 1349 // "And" of 0 and a poisoned value results in unpoisoned value. 1350 // 1&1 => 1; 0&1 => 0; p&1 => p; 1351 // 1&0 => 0; 0&0 => 0; p&0 => 0; 1352 // 1&p => p; 0&p => 0; p&p => p; 1353 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 1354 Value *S1 = getShadow(&I, 0); 1355 Value *S2 = getShadow(&I, 1); 1356 Value *V1 = I.getOperand(0); 1357 Value *V2 = I.getOperand(1); 1358 if (V1->getType() != S1->getType()) { 1359 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1360 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1361 } 1362 Value *S1S2 = IRB.CreateAnd(S1, S2); 1363 Value *V1S2 = IRB.CreateAnd(V1, S2); 1364 Value *S1V2 = IRB.CreateAnd(S1, V2); 1365 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1366 setOriginForNaryOp(I); 1367 } 1368 1369 void visitOr(BinaryOperator &I) { 1370 IRBuilder<> IRB(&I); 1371 // "Or" of 1 and a poisoned value results in unpoisoned value. 1372 // 1|1 => 1; 0|1 => 1; p|1 => 1; 1373 // 1|0 => 1; 0|0 => 0; p|0 => p; 1374 // 1|p => 1; 0|p => p; p|p => p; 1375 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 1376 Value *S1 = getShadow(&I, 0); 1377 Value *S2 = getShadow(&I, 1); 1378 Value *V1 = IRB.CreateNot(I.getOperand(0)); 1379 Value *V2 = IRB.CreateNot(I.getOperand(1)); 1380 if (V1->getType() != S1->getType()) { 1381 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1382 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1383 } 1384 Value *S1S2 = IRB.CreateAnd(S1, S2); 1385 Value *V1S2 = IRB.CreateAnd(V1, S2); 1386 Value *S1V2 = IRB.CreateAnd(S1, V2); 1387 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1388 setOriginForNaryOp(I); 1389 } 1390 1391 /// \brief Default propagation of shadow and/or origin. 1392 /// 1393 /// This class implements the general case of shadow propagation, used in all 1394 /// cases where we don't know and/or don't care about what the operation 1395 /// actually does. It converts all input shadow values to a common type 1396 /// (extending or truncating as necessary), and bitwise OR's them. 1397 /// 1398 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1399 /// fully initialized), and less prone to false positives. 1400 /// 1401 /// This class also implements the general case of origin propagation. For a 1402 /// Nary operation, result origin is set to the origin of an argument that is 1403 /// not entirely initialized. If there is more than one such arguments, the 1404 /// rightmost of them is picked. It does not matter which one is picked if all 1405 /// arguments are initialized. 1406 template <bool CombineShadow> 1407 class Combiner { 1408 Value *Shadow; 1409 Value *Origin; 1410 IRBuilder<> &IRB; 1411 MemorySanitizerVisitor *MSV; 1412 1413 public: 1414 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1415 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {} 1416 1417 /// \brief Add a pair of shadow and origin values to the mix. 1418 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1419 if (CombineShadow) { 1420 assert(OpShadow); 1421 if (!Shadow) 1422 Shadow = OpShadow; 1423 else { 1424 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1425 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1426 } 1427 } 1428 1429 if (MSV->MS.TrackOrigins) { 1430 assert(OpOrigin); 1431 if (!Origin) { 1432 Origin = OpOrigin; 1433 } else { 1434 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin); 1435 // No point in adding something that might result in 0 origin value. 1436 if (!ConstOrigin || !ConstOrigin->isNullValue()) { 1437 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1438 Value *Cond = 1439 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow)); 1440 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1441 } 1442 } 1443 } 1444 return *this; 1445 } 1446 1447 /// \brief Add an application value to the mix. 1448 Combiner &Add(Value *V) { 1449 Value *OpShadow = MSV->getShadow(V); 1450 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; 1451 return Add(OpShadow, OpOrigin); 1452 } 1453 1454 /// \brief Set the current combined values as the given instruction's shadow 1455 /// and origin. 1456 void Done(Instruction *I) { 1457 if (CombineShadow) { 1458 assert(Shadow); 1459 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1460 MSV->setShadow(I, Shadow); 1461 } 1462 if (MSV->MS.TrackOrigins) { 1463 assert(Origin); 1464 MSV->setOrigin(I, Origin); 1465 } 1466 } 1467 }; 1468 1469 typedef Combiner<true> ShadowAndOriginCombiner; 1470 typedef Combiner<false> OriginCombiner; 1471 1472 /// \brief Propagate origin for arbitrary operation. 1473 void setOriginForNaryOp(Instruction &I) { 1474 if (!MS.TrackOrigins) return; 1475 IRBuilder<> IRB(&I); 1476 OriginCombiner OC(this, IRB); 1477 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1478 OC.Add(OI->get()); 1479 OC.Done(&I); 1480 } 1481 1482 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1483 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1484 "Vector of pointers is not a valid shadow type"); 1485 return Ty->isVectorTy() ? 1486 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1487 Ty->getPrimitiveSizeInBits(); 1488 } 1489 1490 /// \brief Cast between two shadow types, extending or truncating as 1491 /// necessary. 1492 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, 1493 bool Signed = false) { 1494 Type *srcTy = V->getType(); 1495 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1496 return IRB.CreateIntCast(V, dstTy, Signed); 1497 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1498 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1499 return IRB.CreateIntCast(V, dstTy, Signed); 1500 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1501 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1502 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1503 Value *V2 = 1504 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); 1505 return IRB.CreateBitCast(V2, dstTy); 1506 // TODO: handle struct types. 1507 } 1508 1509 /// \brief Cast an application value to the type of its own shadow. 1510 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { 1511 Type *ShadowTy = getShadowTy(V); 1512 if (V->getType() == ShadowTy) 1513 return V; 1514 if (V->getType()->isPtrOrPtrVectorTy()) 1515 return IRB.CreatePtrToInt(V, ShadowTy); 1516 else 1517 return IRB.CreateBitCast(V, ShadowTy); 1518 } 1519 1520 /// \brief Propagate shadow for arbitrary operation. 1521 void handleShadowOr(Instruction &I) { 1522 IRBuilder<> IRB(&I); 1523 ShadowAndOriginCombiner SC(this, IRB); 1524 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1525 SC.Add(OI->get()); 1526 SC.Done(&I); 1527 } 1528 1529 // \brief Handle multiplication by constant. 1530 // 1531 // Handle a special case of multiplication by constant that may have one or 1532 // more zeros in the lower bits. This makes corresponding number of lower bits 1533 // of the result zero as well. We model it by shifting the other operand 1534 // shadow left by the required number of bits. Effectively, we transform 1535 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B). 1536 // We use multiplication by 2**N instead of shift to cover the case of 1537 // multiplication by 0, which may occur in some elements of a vector operand. 1538 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg, 1539 Value *OtherArg) { 1540 Constant *ShadowMul; 1541 Type *Ty = ConstArg->getType(); 1542 if (Ty->isVectorTy()) { 1543 unsigned NumElements = Ty->getVectorNumElements(); 1544 Type *EltTy = Ty->getSequentialElementType(); 1545 SmallVector<Constant *, 16> Elements; 1546 for (unsigned Idx = 0; Idx < NumElements; ++Idx) { 1547 ConstantInt *Elt = 1548 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx)); 1549 APInt V = Elt->getValue(); 1550 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1551 Elements.push_back(ConstantInt::get(EltTy, V2)); 1552 } 1553 ShadowMul = ConstantVector::get(Elements); 1554 } else { 1555 ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg); 1556 APInt V = Elt->getValue(); 1557 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1558 ShadowMul = ConstantInt::get(Elt->getType(), V2); 1559 } 1560 1561 IRBuilder<> IRB(&I); 1562 setShadow(&I, 1563 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst")); 1564 setOrigin(&I, getOrigin(OtherArg)); 1565 } 1566 1567 void visitMul(BinaryOperator &I) { 1568 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1569 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1570 if (constOp0 && !constOp1) 1571 handleMulByConstant(I, constOp0, I.getOperand(1)); 1572 else if (constOp1 && !constOp0) 1573 handleMulByConstant(I, constOp1, I.getOperand(0)); 1574 else 1575 handleShadowOr(I); 1576 } 1577 1578 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1579 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1580 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1581 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1582 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1583 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1584 1585 void handleDiv(Instruction &I) { 1586 IRBuilder<> IRB(&I); 1587 // Strict on the second argument. 1588 insertShadowCheck(I.getOperand(1), &I); 1589 setShadow(&I, getShadow(&I, 0)); 1590 setOrigin(&I, getOrigin(&I, 0)); 1591 } 1592 1593 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1594 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1595 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1596 void visitURem(BinaryOperator &I) { handleDiv(I); } 1597 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1598 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1599 1600 /// \brief Instrument == and != comparisons. 1601 /// 1602 /// Sometimes the comparison result is known even if some of the bits of the 1603 /// arguments are not. 1604 void handleEqualityComparison(ICmpInst &I) { 1605 IRBuilder<> IRB(&I); 1606 Value *A = I.getOperand(0); 1607 Value *B = I.getOperand(1); 1608 Value *Sa = getShadow(A); 1609 Value *Sb = getShadow(B); 1610 1611 // Get rid of pointers and vectors of pointers. 1612 // For ints (and vectors of ints), types of A and Sa match, 1613 // and this is a no-op. 1614 A = IRB.CreatePointerCast(A, Sa->getType()); 1615 B = IRB.CreatePointerCast(B, Sb->getType()); 1616 1617 // A == B <==> (C = A^B) == 0 1618 // A != B <==> (C = A^B) != 0 1619 // Sc = Sa | Sb 1620 Value *C = IRB.CreateXor(A, B); 1621 Value *Sc = IRB.CreateOr(Sa, Sb); 1622 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1623 // Result is defined if one of the following is true 1624 // * there is a defined 1 bit in C 1625 // * C is fully defined 1626 // Si = !(C & ~Sc) && Sc 1627 Value *Zero = Constant::getNullValue(Sc->getType()); 1628 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1629 Value *Si = 1630 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1631 IRB.CreateICmpEQ( 1632 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1633 Si->setName("_msprop_icmp"); 1634 setShadow(&I, Si); 1635 setOriginForNaryOp(I); 1636 } 1637 1638 /// \brief Build the lowest possible value of V, taking into account V's 1639 /// uninitialized bits. 1640 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1641 bool isSigned) { 1642 if (isSigned) { 1643 // Split shadow into sign bit and other bits. 1644 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1645 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1646 // Maximise the undefined shadow bit, minimize other undefined bits. 1647 return 1648 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1649 } else { 1650 // Minimize undefined bits. 1651 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1652 } 1653 } 1654 1655 /// \brief Build the highest possible value of V, taking into account V's 1656 /// uninitialized bits. 1657 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1658 bool isSigned) { 1659 if (isSigned) { 1660 // Split shadow into sign bit and other bits. 1661 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1662 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1663 // Minimise the undefined shadow bit, maximise other undefined bits. 1664 return 1665 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1666 } else { 1667 // Maximize undefined bits. 1668 return IRB.CreateOr(A, Sa); 1669 } 1670 } 1671 1672 /// \brief Instrument relational comparisons. 1673 /// 1674 /// This function does exact shadow propagation for all relational 1675 /// comparisons of integers, pointers and vectors of those. 1676 /// FIXME: output seems suboptimal when one of the operands is a constant 1677 void handleRelationalComparisonExact(ICmpInst &I) { 1678 IRBuilder<> IRB(&I); 1679 Value *A = I.getOperand(0); 1680 Value *B = I.getOperand(1); 1681 Value *Sa = getShadow(A); 1682 Value *Sb = getShadow(B); 1683 1684 // Get rid of pointers and vectors of pointers. 1685 // For ints (and vectors of ints), types of A and Sa match, 1686 // and this is a no-op. 1687 A = IRB.CreatePointerCast(A, Sa->getType()); 1688 B = IRB.CreatePointerCast(B, Sb->getType()); 1689 1690 // Let [a0, a1] be the interval of possible values of A, taking into account 1691 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1692 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1693 bool IsSigned = I.isSigned(); 1694 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1695 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1696 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1697 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1698 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1699 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1700 Value *Si = IRB.CreateXor(S1, S2); 1701 setShadow(&I, Si); 1702 setOriginForNaryOp(I); 1703 } 1704 1705 /// \brief Instrument signed relational comparisons. 1706 /// 1707 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by 1708 /// propagating the highest bit of the shadow. Everything else is delegated 1709 /// to handleShadowOr(). 1710 void handleSignedRelationalComparison(ICmpInst &I) { 1711 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1712 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1713 Value* op = nullptr; 1714 CmpInst::Predicate pre = I.getPredicate(); 1715 if (constOp0 && constOp0->isNullValue() && 1716 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { 1717 op = I.getOperand(1); 1718 } else if (constOp1 && constOp1->isNullValue() && 1719 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { 1720 op = I.getOperand(0); 1721 } 1722 if (op) { 1723 IRBuilder<> IRB(&I); 1724 Value* Shadow = 1725 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); 1726 setShadow(&I, Shadow); 1727 setOrigin(&I, getOrigin(op)); 1728 } else { 1729 handleShadowOr(I); 1730 } 1731 } 1732 1733 void visitICmpInst(ICmpInst &I) { 1734 if (!ClHandleICmp) { 1735 handleShadowOr(I); 1736 return; 1737 } 1738 if (I.isEquality()) { 1739 handleEqualityComparison(I); 1740 return; 1741 } 1742 1743 assert(I.isRelational()); 1744 if (ClHandleICmpExact) { 1745 handleRelationalComparisonExact(I); 1746 return; 1747 } 1748 if (I.isSigned()) { 1749 handleSignedRelationalComparison(I); 1750 return; 1751 } 1752 1753 assert(I.isUnsigned()); 1754 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1755 handleRelationalComparisonExact(I); 1756 return; 1757 } 1758 1759 handleShadowOr(I); 1760 } 1761 1762 void visitFCmpInst(FCmpInst &I) { 1763 handleShadowOr(I); 1764 } 1765 1766 void handleShift(BinaryOperator &I) { 1767 IRBuilder<> IRB(&I); 1768 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1769 // Otherwise perform the same shift on S1. 1770 Value *S1 = getShadow(&I, 0); 1771 Value *S2 = getShadow(&I, 1); 1772 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1773 S2->getType()); 1774 Value *V2 = I.getOperand(1); 1775 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1776 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1777 setOriginForNaryOp(I); 1778 } 1779 1780 void visitShl(BinaryOperator &I) { handleShift(I); } 1781 void visitAShr(BinaryOperator &I) { handleShift(I); } 1782 void visitLShr(BinaryOperator &I) { handleShift(I); } 1783 1784 /// \brief Instrument llvm.memmove 1785 /// 1786 /// At this point we don't know if llvm.memmove will be inlined or not. 1787 /// If we don't instrument it and it gets inlined, 1788 /// our interceptor will not kick in and we will lose the memmove. 1789 /// If we instrument the call here, but it does not get inlined, 1790 /// we will memove the shadow twice: which is bad in case 1791 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1792 /// 1793 /// Similar situation exists for memcpy and memset. 1794 void visitMemMoveInst(MemMoveInst &I) { 1795 IRBuilder<> IRB(&I); 1796 IRB.CreateCall3( 1797 MS.MemmoveFn, 1798 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1799 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1800 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1801 I.eraseFromParent(); 1802 } 1803 1804 // Similar to memmove: avoid copying shadow twice. 1805 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1806 // FIXME: consider doing manual inline for small constant sizes and proper 1807 // alignment. 1808 void visitMemCpyInst(MemCpyInst &I) { 1809 IRBuilder<> IRB(&I); 1810 IRB.CreateCall3( 1811 MS.MemcpyFn, 1812 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1813 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1814 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1815 I.eraseFromParent(); 1816 } 1817 1818 // Same as memcpy. 1819 void visitMemSetInst(MemSetInst &I) { 1820 IRBuilder<> IRB(&I); 1821 IRB.CreateCall3( 1822 MS.MemsetFn, 1823 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1824 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1825 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1826 I.eraseFromParent(); 1827 } 1828 1829 void visitVAStartInst(VAStartInst &I) { 1830 VAHelper->visitVAStartInst(I); 1831 } 1832 1833 void visitVACopyInst(VACopyInst &I) { 1834 VAHelper->visitVACopyInst(I); 1835 } 1836 1837 enum IntrinsicKind { 1838 IK_DoesNotAccessMemory, 1839 IK_OnlyReadsMemory, 1840 IK_WritesMemory 1841 }; 1842 1843 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { 1844 const int DoesNotAccessMemory = IK_DoesNotAccessMemory; 1845 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; 1846 const int OnlyReadsMemory = IK_OnlyReadsMemory; 1847 const int OnlyAccessesArgumentPointees = IK_WritesMemory; 1848 const int UnknownModRefBehavior = IK_WritesMemory; 1849 #define GET_INTRINSIC_MODREF_BEHAVIOR 1850 #define ModRefBehavior IntrinsicKind 1851 #include "llvm/IR/Intrinsics.gen" 1852 #undef ModRefBehavior 1853 #undef GET_INTRINSIC_MODREF_BEHAVIOR 1854 } 1855 1856 /// \brief Handle vector store-like intrinsics. 1857 /// 1858 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1859 /// has 1 pointer argument and 1 vector argument, returns void. 1860 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1861 IRBuilder<> IRB(&I); 1862 Value* Addr = I.getArgOperand(0); 1863 Value *Shadow = getShadow(&I, 1); 1864 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1865 1866 // We don't know the pointer alignment (could be unaligned SSE store!). 1867 // Have to assume to worst case. 1868 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1869 1870 if (ClCheckAccessAddress) 1871 insertShadowCheck(Addr, &I); 1872 1873 // FIXME: use ClStoreCleanOrigin 1874 // FIXME: factor out common code from materializeStores 1875 if (MS.TrackOrigins) 1876 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1)); 1877 return true; 1878 } 1879 1880 /// \brief Handle vector load-like intrinsics. 1881 /// 1882 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1883 /// has 1 pointer argument, returns a vector. 1884 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1885 IRBuilder<> IRB(&I); 1886 Value *Addr = I.getArgOperand(0); 1887 1888 Type *ShadowTy = getShadowTy(&I); 1889 if (PropagateShadow) { 1890 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1891 // We don't know the pointer alignment (could be unaligned SSE load!). 1892 // Have to assume to worst case. 1893 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1894 } else { 1895 setShadow(&I, getCleanShadow(&I)); 1896 } 1897 1898 if (ClCheckAccessAddress) 1899 insertShadowCheck(Addr, &I); 1900 1901 if (MS.TrackOrigins) { 1902 if (PropagateShadow) 1903 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1))); 1904 else 1905 setOrigin(&I, getCleanOrigin()); 1906 } 1907 return true; 1908 } 1909 1910 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1911 /// 1912 /// Instrument intrinsics with any number of arguments of the same type, 1913 /// equal to the return type. The type should be simple (no aggregates or 1914 /// pointers; vectors are fine). 1915 /// Caller guarantees that this intrinsic does not access memory. 1916 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1917 Type *RetTy = I.getType(); 1918 if (!(RetTy->isIntOrIntVectorTy() || 1919 RetTy->isFPOrFPVectorTy() || 1920 RetTy->isX86_MMXTy())) 1921 return false; 1922 1923 unsigned NumArgOperands = I.getNumArgOperands(); 1924 1925 for (unsigned i = 0; i < NumArgOperands; ++i) { 1926 Type *Ty = I.getArgOperand(i)->getType(); 1927 if (Ty != RetTy) 1928 return false; 1929 } 1930 1931 IRBuilder<> IRB(&I); 1932 ShadowAndOriginCombiner SC(this, IRB); 1933 for (unsigned i = 0; i < NumArgOperands; ++i) 1934 SC.Add(I.getArgOperand(i)); 1935 SC.Done(&I); 1936 1937 return true; 1938 } 1939 1940 /// \brief Heuristically instrument unknown intrinsics. 1941 /// 1942 /// The main purpose of this code is to do something reasonable with all 1943 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1944 /// We recognize several classes of intrinsics by their argument types and 1945 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1946 /// sure that we know what the intrinsic does. 1947 /// 1948 /// We special-case intrinsics where this approach fails. See llvm.bswap 1949 /// handling as an example of that. 1950 bool handleUnknownIntrinsic(IntrinsicInst &I) { 1951 unsigned NumArgOperands = I.getNumArgOperands(); 1952 if (NumArgOperands == 0) 1953 return false; 1954 1955 Intrinsic::ID iid = I.getIntrinsicID(); 1956 IntrinsicKind IK = getIntrinsicKind(iid); 1957 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; 1958 bool WritesMemory = IK == IK_WritesMemory; 1959 assert(!(OnlyReadsMemory && WritesMemory)); 1960 1961 if (NumArgOperands == 2 && 1962 I.getArgOperand(0)->getType()->isPointerTy() && 1963 I.getArgOperand(1)->getType()->isVectorTy() && 1964 I.getType()->isVoidTy() && 1965 WritesMemory) { 1966 // This looks like a vector store. 1967 return handleVectorStoreIntrinsic(I); 1968 } 1969 1970 if (NumArgOperands == 1 && 1971 I.getArgOperand(0)->getType()->isPointerTy() && 1972 I.getType()->isVectorTy() && 1973 OnlyReadsMemory) { 1974 // This looks like a vector load. 1975 return handleVectorLoadIntrinsic(I); 1976 } 1977 1978 if (!OnlyReadsMemory && !WritesMemory) 1979 if (maybeHandleSimpleNomemIntrinsic(I)) 1980 return true; 1981 1982 // FIXME: detect and handle SSE maskstore/maskload 1983 return false; 1984 } 1985 1986 void handleBswap(IntrinsicInst &I) { 1987 IRBuilder<> IRB(&I); 1988 Value *Op = I.getArgOperand(0); 1989 Type *OpType = Op->getType(); 1990 Function *BswapFunc = Intrinsic::getDeclaration( 1991 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1)); 1992 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 1993 setOrigin(&I, getOrigin(Op)); 1994 } 1995 1996 // \brief Instrument vector convert instrinsic. 1997 // 1998 // This function instruments intrinsics like cvtsi2ss: 1999 // %Out = int_xxx_cvtyyy(%ConvertOp) 2000 // or 2001 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) 2002 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same 2003 // number \p Out elements, and (if has 2 arguments) copies the rest of the 2004 // elements from \p CopyOp. 2005 // In most cases conversion involves floating-point value which may trigger a 2006 // hardware exception when not fully initialized. For this reason we require 2007 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. 2008 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p 2009 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always 2010 // return a fully initialized value. 2011 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { 2012 IRBuilder<> IRB(&I); 2013 Value *CopyOp, *ConvertOp; 2014 2015 switch (I.getNumArgOperands()) { 2016 case 2: 2017 CopyOp = I.getArgOperand(0); 2018 ConvertOp = I.getArgOperand(1); 2019 break; 2020 case 1: 2021 ConvertOp = I.getArgOperand(0); 2022 CopyOp = nullptr; 2023 break; 2024 default: 2025 llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); 2026 } 2027 2028 // The first *NumUsedElements* elements of ConvertOp are converted to the 2029 // same number of output elements. The rest of the output is copied from 2030 // CopyOp, or (if not available) filled with zeroes. 2031 // Combine shadow for elements of ConvertOp that are used in this operation, 2032 // and insert a check. 2033 // FIXME: consider propagating shadow of ConvertOp, at least in the case of 2034 // int->any conversion. 2035 Value *ConvertShadow = getShadow(ConvertOp); 2036 Value *AggShadow = nullptr; 2037 if (ConvertOp->getType()->isVectorTy()) { 2038 AggShadow = IRB.CreateExtractElement( 2039 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); 2040 for (int i = 1; i < NumUsedElements; ++i) { 2041 Value *MoreShadow = IRB.CreateExtractElement( 2042 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); 2043 AggShadow = IRB.CreateOr(AggShadow, MoreShadow); 2044 } 2045 } else { 2046 AggShadow = ConvertShadow; 2047 } 2048 assert(AggShadow->getType()->isIntegerTy()); 2049 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); 2050 2051 // Build result shadow by zero-filling parts of CopyOp shadow that come from 2052 // ConvertOp. 2053 if (CopyOp) { 2054 assert(CopyOp->getType() == I.getType()); 2055 assert(CopyOp->getType()->isVectorTy()); 2056 Value *ResultShadow = getShadow(CopyOp); 2057 Type *EltTy = ResultShadow->getType()->getVectorElementType(); 2058 for (int i = 0; i < NumUsedElements; ++i) { 2059 ResultShadow = IRB.CreateInsertElement( 2060 ResultShadow, ConstantInt::getNullValue(EltTy), 2061 ConstantInt::get(IRB.getInt32Ty(), i)); 2062 } 2063 setShadow(&I, ResultShadow); 2064 setOrigin(&I, getOrigin(CopyOp)); 2065 } else { 2066 setShadow(&I, getCleanShadow(&I)); 2067 setOrigin(&I, getCleanOrigin()); 2068 } 2069 } 2070 2071 // Given a scalar or vector, extract lower 64 bits (or less), and return all 2072 // zeroes if it is zero, and all ones otherwise. 2073 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { 2074 if (S->getType()->isVectorTy()) 2075 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); 2076 assert(S->getType()->getPrimitiveSizeInBits() <= 64); 2077 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 2078 return CreateShadowCast(IRB, S2, T, /* Signed */ true); 2079 } 2080 2081 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { 2082 Type *T = S->getType(); 2083 assert(T->isVectorTy()); 2084 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 2085 return IRB.CreateSExt(S2, T); 2086 } 2087 2088 // \brief Instrument vector shift instrinsic. 2089 // 2090 // This function instruments intrinsics like int_x86_avx2_psll_w. 2091 // Intrinsic shifts %In by %ShiftSize bits. 2092 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift 2093 // size, and the rest is ignored. Behavior is defined even if shift size is 2094 // greater than register (or field) width. 2095 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { 2096 assert(I.getNumArgOperands() == 2); 2097 IRBuilder<> IRB(&I); 2098 // If any of the S2 bits are poisoned, the whole thing is poisoned. 2099 // Otherwise perform the same shift on S1. 2100 Value *S1 = getShadow(&I, 0); 2101 Value *S2 = getShadow(&I, 1); 2102 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) 2103 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); 2104 Value *V1 = I.getOperand(0); 2105 Value *V2 = I.getOperand(1); 2106 Value *Shift = IRB.CreateCall2(I.getCalledValue(), 2107 IRB.CreateBitCast(S1, V1->getType()), V2); 2108 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); 2109 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 2110 setOriginForNaryOp(I); 2111 } 2112 2113 // \brief Get an X86_MMX-sized vector type. 2114 Type *getMMXVectorTy(unsigned EltSizeInBits) { 2115 const unsigned X86_MMXSizeInBits = 64; 2116 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), 2117 X86_MMXSizeInBits / EltSizeInBits); 2118 } 2119 2120 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack 2121 // intrinsic. 2122 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { 2123 switch (id) { 2124 case llvm::Intrinsic::x86_sse2_packsswb_128: 2125 case llvm::Intrinsic::x86_sse2_packuswb_128: 2126 return llvm::Intrinsic::x86_sse2_packsswb_128; 2127 2128 case llvm::Intrinsic::x86_sse2_packssdw_128: 2129 case llvm::Intrinsic::x86_sse41_packusdw: 2130 return llvm::Intrinsic::x86_sse2_packssdw_128; 2131 2132 case llvm::Intrinsic::x86_avx2_packsswb: 2133 case llvm::Intrinsic::x86_avx2_packuswb: 2134 return llvm::Intrinsic::x86_avx2_packsswb; 2135 2136 case llvm::Intrinsic::x86_avx2_packssdw: 2137 case llvm::Intrinsic::x86_avx2_packusdw: 2138 return llvm::Intrinsic::x86_avx2_packssdw; 2139 2140 case llvm::Intrinsic::x86_mmx_packsswb: 2141 case llvm::Intrinsic::x86_mmx_packuswb: 2142 return llvm::Intrinsic::x86_mmx_packsswb; 2143 2144 case llvm::Intrinsic::x86_mmx_packssdw: 2145 return llvm::Intrinsic::x86_mmx_packssdw; 2146 default: 2147 llvm_unreachable("unexpected intrinsic id"); 2148 } 2149 } 2150 2151 // \brief Instrument vector pack instrinsic. 2152 // 2153 // This function instruments intrinsics like x86_mmx_packsswb, that 2154 // packs elements of 2 input vectors into half as many bits with saturation. 2155 // Shadow is propagated with the signed variant of the same intrinsic applied 2156 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer). 2157 // EltSizeInBits is used only for x86mmx arguments. 2158 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { 2159 assert(I.getNumArgOperands() == 2); 2160 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2161 IRBuilder<> IRB(&I); 2162 Value *S1 = getShadow(&I, 0); 2163 Value *S2 = getShadow(&I, 1); 2164 assert(isX86_MMX || S1->getType()->isVectorTy()); 2165 2166 // SExt and ICmpNE below must apply to individual elements of input vectors. 2167 // In case of x86mmx arguments, cast them to appropriate vector types and 2168 // back. 2169 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType(); 2170 if (isX86_MMX) { 2171 S1 = IRB.CreateBitCast(S1, T); 2172 S2 = IRB.CreateBitCast(S2, T); 2173 } 2174 Value *S1_ext = IRB.CreateSExt( 2175 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T); 2176 Value *S2_ext = IRB.CreateSExt( 2177 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T); 2178 if (isX86_MMX) { 2179 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C); 2180 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy); 2181 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy); 2182 } 2183 2184 Function *ShadowFn = Intrinsic::getDeclaration( 2185 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID())); 2186 2187 Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack"); 2188 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I)); 2189 setShadow(&I, S); 2190 setOriginForNaryOp(I); 2191 } 2192 2193 // \brief Instrument sum-of-absolute-differencies intrinsic. 2194 void handleVectorSadIntrinsic(IntrinsicInst &I) { 2195 const unsigned SignificantBitsPerResultElement = 16; 2196 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2197 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType(); 2198 unsigned ZeroBitsPerResultElement = 2199 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement; 2200 2201 IRBuilder<> IRB(&I); 2202 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2203 S = IRB.CreateBitCast(S, ResTy); 2204 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2205 ResTy); 2206 S = IRB.CreateLShr(S, ZeroBitsPerResultElement); 2207 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2208 setShadow(&I, S); 2209 setOriginForNaryOp(I); 2210 } 2211 2212 // \brief Instrument multiply-add intrinsic. 2213 void handleVectorPmaddIntrinsic(IntrinsicInst &I, 2214 unsigned EltSizeInBits = 0) { 2215 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2216 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType(); 2217 IRBuilder<> IRB(&I); 2218 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2219 S = IRB.CreateBitCast(S, ResTy); 2220 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2221 ResTy); 2222 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2223 setShadow(&I, S); 2224 setOriginForNaryOp(I); 2225 } 2226 2227 void visitIntrinsicInst(IntrinsicInst &I) { 2228 switch (I.getIntrinsicID()) { 2229 case llvm::Intrinsic::bswap: 2230 handleBswap(I); 2231 break; 2232 case llvm::Intrinsic::x86_avx512_cvtsd2usi64: 2233 case llvm::Intrinsic::x86_avx512_cvtsd2usi: 2234 case llvm::Intrinsic::x86_avx512_cvtss2usi64: 2235 case llvm::Intrinsic::x86_avx512_cvtss2usi: 2236 case llvm::Intrinsic::x86_avx512_cvttss2usi64: 2237 case llvm::Intrinsic::x86_avx512_cvttss2usi: 2238 case llvm::Intrinsic::x86_avx512_cvttsd2usi64: 2239 case llvm::Intrinsic::x86_avx512_cvttsd2usi: 2240 case llvm::Intrinsic::x86_avx512_cvtusi2sd: 2241 case llvm::Intrinsic::x86_avx512_cvtusi2ss: 2242 case llvm::Intrinsic::x86_avx512_cvtusi642sd: 2243 case llvm::Intrinsic::x86_avx512_cvtusi642ss: 2244 case llvm::Intrinsic::x86_sse2_cvtsd2si64: 2245 case llvm::Intrinsic::x86_sse2_cvtsd2si: 2246 case llvm::Intrinsic::x86_sse2_cvtsd2ss: 2247 case llvm::Intrinsic::x86_sse2_cvtsi2sd: 2248 case llvm::Intrinsic::x86_sse2_cvtsi642sd: 2249 case llvm::Intrinsic::x86_sse2_cvtss2sd: 2250 case llvm::Intrinsic::x86_sse2_cvttsd2si64: 2251 case llvm::Intrinsic::x86_sse2_cvttsd2si: 2252 case llvm::Intrinsic::x86_sse_cvtsi2ss: 2253 case llvm::Intrinsic::x86_sse_cvtsi642ss: 2254 case llvm::Intrinsic::x86_sse_cvtss2si64: 2255 case llvm::Intrinsic::x86_sse_cvtss2si: 2256 case llvm::Intrinsic::x86_sse_cvttss2si64: 2257 case llvm::Intrinsic::x86_sse_cvttss2si: 2258 handleVectorConvertIntrinsic(I, 1); 2259 break; 2260 case llvm::Intrinsic::x86_sse2_cvtdq2pd: 2261 case llvm::Intrinsic::x86_sse2_cvtps2pd: 2262 case llvm::Intrinsic::x86_sse_cvtps2pi: 2263 case llvm::Intrinsic::x86_sse_cvttps2pi: 2264 handleVectorConvertIntrinsic(I, 2); 2265 break; 2266 case llvm::Intrinsic::x86_avx2_psll_w: 2267 case llvm::Intrinsic::x86_avx2_psll_d: 2268 case llvm::Intrinsic::x86_avx2_psll_q: 2269 case llvm::Intrinsic::x86_avx2_pslli_w: 2270 case llvm::Intrinsic::x86_avx2_pslli_d: 2271 case llvm::Intrinsic::x86_avx2_pslli_q: 2272 case llvm::Intrinsic::x86_avx2_psrl_w: 2273 case llvm::Intrinsic::x86_avx2_psrl_d: 2274 case llvm::Intrinsic::x86_avx2_psrl_q: 2275 case llvm::Intrinsic::x86_avx2_psra_w: 2276 case llvm::Intrinsic::x86_avx2_psra_d: 2277 case llvm::Intrinsic::x86_avx2_psrli_w: 2278 case llvm::Intrinsic::x86_avx2_psrli_d: 2279 case llvm::Intrinsic::x86_avx2_psrli_q: 2280 case llvm::Intrinsic::x86_avx2_psrai_w: 2281 case llvm::Intrinsic::x86_avx2_psrai_d: 2282 case llvm::Intrinsic::x86_sse2_psll_w: 2283 case llvm::Intrinsic::x86_sse2_psll_d: 2284 case llvm::Intrinsic::x86_sse2_psll_q: 2285 case llvm::Intrinsic::x86_sse2_pslli_w: 2286 case llvm::Intrinsic::x86_sse2_pslli_d: 2287 case llvm::Intrinsic::x86_sse2_pslli_q: 2288 case llvm::Intrinsic::x86_sse2_psrl_w: 2289 case llvm::Intrinsic::x86_sse2_psrl_d: 2290 case llvm::Intrinsic::x86_sse2_psrl_q: 2291 case llvm::Intrinsic::x86_sse2_psra_w: 2292 case llvm::Intrinsic::x86_sse2_psra_d: 2293 case llvm::Intrinsic::x86_sse2_psrli_w: 2294 case llvm::Intrinsic::x86_sse2_psrli_d: 2295 case llvm::Intrinsic::x86_sse2_psrli_q: 2296 case llvm::Intrinsic::x86_sse2_psrai_w: 2297 case llvm::Intrinsic::x86_sse2_psrai_d: 2298 case llvm::Intrinsic::x86_mmx_psll_w: 2299 case llvm::Intrinsic::x86_mmx_psll_d: 2300 case llvm::Intrinsic::x86_mmx_psll_q: 2301 case llvm::Intrinsic::x86_mmx_pslli_w: 2302 case llvm::Intrinsic::x86_mmx_pslli_d: 2303 case llvm::Intrinsic::x86_mmx_pslli_q: 2304 case llvm::Intrinsic::x86_mmx_psrl_w: 2305 case llvm::Intrinsic::x86_mmx_psrl_d: 2306 case llvm::Intrinsic::x86_mmx_psrl_q: 2307 case llvm::Intrinsic::x86_mmx_psra_w: 2308 case llvm::Intrinsic::x86_mmx_psra_d: 2309 case llvm::Intrinsic::x86_mmx_psrli_w: 2310 case llvm::Intrinsic::x86_mmx_psrli_d: 2311 case llvm::Intrinsic::x86_mmx_psrli_q: 2312 case llvm::Intrinsic::x86_mmx_psrai_w: 2313 case llvm::Intrinsic::x86_mmx_psrai_d: 2314 handleVectorShiftIntrinsic(I, /* Variable */ false); 2315 break; 2316 case llvm::Intrinsic::x86_avx2_psllv_d: 2317 case llvm::Intrinsic::x86_avx2_psllv_d_256: 2318 case llvm::Intrinsic::x86_avx2_psllv_q: 2319 case llvm::Intrinsic::x86_avx2_psllv_q_256: 2320 case llvm::Intrinsic::x86_avx2_psrlv_d: 2321 case llvm::Intrinsic::x86_avx2_psrlv_d_256: 2322 case llvm::Intrinsic::x86_avx2_psrlv_q: 2323 case llvm::Intrinsic::x86_avx2_psrlv_q_256: 2324 case llvm::Intrinsic::x86_avx2_psrav_d: 2325 case llvm::Intrinsic::x86_avx2_psrav_d_256: 2326 handleVectorShiftIntrinsic(I, /* Variable */ true); 2327 break; 2328 2329 case llvm::Intrinsic::x86_sse2_packsswb_128: 2330 case llvm::Intrinsic::x86_sse2_packssdw_128: 2331 case llvm::Intrinsic::x86_sse2_packuswb_128: 2332 case llvm::Intrinsic::x86_sse41_packusdw: 2333 case llvm::Intrinsic::x86_avx2_packsswb: 2334 case llvm::Intrinsic::x86_avx2_packssdw: 2335 case llvm::Intrinsic::x86_avx2_packuswb: 2336 case llvm::Intrinsic::x86_avx2_packusdw: 2337 handleVectorPackIntrinsic(I); 2338 break; 2339 2340 case llvm::Intrinsic::x86_mmx_packsswb: 2341 case llvm::Intrinsic::x86_mmx_packuswb: 2342 handleVectorPackIntrinsic(I, 16); 2343 break; 2344 2345 case llvm::Intrinsic::x86_mmx_packssdw: 2346 handleVectorPackIntrinsic(I, 32); 2347 break; 2348 2349 case llvm::Intrinsic::x86_mmx_psad_bw: 2350 case llvm::Intrinsic::x86_sse2_psad_bw: 2351 case llvm::Intrinsic::x86_avx2_psad_bw: 2352 handleVectorSadIntrinsic(I); 2353 break; 2354 2355 case llvm::Intrinsic::x86_sse2_pmadd_wd: 2356 case llvm::Intrinsic::x86_avx2_pmadd_wd: 2357 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128: 2358 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw: 2359 handleVectorPmaddIntrinsic(I); 2360 break; 2361 2362 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw: 2363 handleVectorPmaddIntrinsic(I, 8); 2364 break; 2365 2366 case llvm::Intrinsic::x86_mmx_pmadd_wd: 2367 handleVectorPmaddIntrinsic(I, 16); 2368 break; 2369 2370 default: 2371 if (!handleUnknownIntrinsic(I)) 2372 visitInstruction(I); 2373 break; 2374 } 2375 } 2376 2377 void visitCallSite(CallSite CS) { 2378 Instruction &I = *CS.getInstruction(); 2379 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 2380 if (CS.isCall()) { 2381 CallInst *Call = cast<CallInst>(&I); 2382 2383 // For inline asm, do the usual thing: check argument shadow and mark all 2384 // outputs as clean. Note that any side effects of the inline asm that are 2385 // not immediately visible in its constraints are not handled. 2386 if (Call->isInlineAsm()) { 2387 visitInstruction(I); 2388 return; 2389 } 2390 2391 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 2392 2393 // We are going to insert code that relies on the fact that the callee 2394 // will become a non-readonly function after it is instrumented by us. To 2395 // prevent this code from being optimized out, mark that function 2396 // non-readonly in advance. 2397 if (Function *Func = Call->getCalledFunction()) { 2398 // Clear out readonly/readnone attributes. 2399 AttrBuilder B; 2400 B.addAttribute(Attribute::ReadOnly) 2401 .addAttribute(Attribute::ReadNone); 2402 Func->removeAttributes(AttributeSet::FunctionIndex, 2403 AttributeSet::get(Func->getContext(), 2404 AttributeSet::FunctionIndex, 2405 B)); 2406 } 2407 } 2408 IRBuilder<> IRB(&I); 2409 2410 unsigned ArgOffset = 0; 2411 DEBUG(dbgs() << " CallSite: " << I << "\n"); 2412 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2413 ArgIt != End; ++ArgIt) { 2414 Value *A = *ArgIt; 2415 unsigned i = ArgIt - CS.arg_begin(); 2416 if (!A->getType()->isSized()) { 2417 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 2418 continue; 2419 } 2420 unsigned Size = 0; 2421 Value *Store = nullptr; 2422 // Compute the Shadow for arg even if it is ByVal, because 2423 // in that case getShadow() will copy the actual arg shadow to 2424 // __msan_param_tls. 2425 Value *ArgShadow = getShadow(A); 2426 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 2427 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 2428 " Shadow: " << *ArgShadow << "\n"); 2429 bool ArgIsInitialized = false; 2430 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 2431 assert(A->getType()->isPointerTy() && 2432 "ByVal argument is not a pointer!"); 2433 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); 2434 if (ArgOffset + Size > kParamTLSSize) break; 2435 unsigned ParamAlignment = CS.getParamAlignment(i + 1); 2436 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); 2437 Store = IRB.CreateMemCpy(ArgShadowBase, 2438 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 2439 Size, Alignment); 2440 } else { 2441 Size = MS.DL->getTypeAllocSize(A->getType()); 2442 if (ArgOffset + Size > kParamTLSSize) break; 2443 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 2444 kShadowTLSAlignment); 2445 Constant *Cst = dyn_cast<Constant>(ArgShadow); 2446 if (Cst && Cst->isNullValue()) ArgIsInitialized = true; 2447 } 2448 if (MS.TrackOrigins && !ArgIsInitialized) 2449 IRB.CreateStore(getOrigin(A), 2450 getOriginPtrForArgument(A, IRB, ArgOffset)); 2451 (void)Store; 2452 assert(Size != 0 && Store != nullptr); 2453 DEBUG(dbgs() << " Param:" << *Store << "\n"); 2454 ArgOffset += RoundUpToAlignment(Size, 8); 2455 } 2456 DEBUG(dbgs() << " done with call args\n"); 2457 2458 FunctionType *FT = 2459 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); 2460 if (FT->isVarArg()) { 2461 VAHelper->visitCallSite(CS, IRB); 2462 } 2463 2464 // Now, get the shadow for the RetVal. 2465 if (!I.getType()->isSized()) return; 2466 IRBuilder<> IRBBefore(&I); 2467 // Until we have full dynamic coverage, make sure the retval shadow is 0. 2468 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 2469 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 2470 Instruction *NextInsn = nullptr; 2471 if (CS.isCall()) { 2472 NextInsn = I.getNextNode(); 2473 } else { 2474 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 2475 if (!NormalDest->getSinglePredecessor()) { 2476 // FIXME: this case is tricky, so we are just conservative here. 2477 // Perhaps we need to split the edge between this BB and NormalDest, 2478 // but a naive attempt to use SplitEdge leads to a crash. 2479 setShadow(&I, getCleanShadow(&I)); 2480 setOrigin(&I, getCleanOrigin()); 2481 return; 2482 } 2483 NextInsn = NormalDest->getFirstInsertionPt(); 2484 assert(NextInsn && 2485 "Could not find insertion point for retval shadow load"); 2486 } 2487 IRBuilder<> IRBAfter(NextInsn); 2488 Value *RetvalShadow = 2489 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 2490 kShadowTLSAlignment, "_msret"); 2491 setShadow(&I, RetvalShadow); 2492 if (MS.TrackOrigins) 2493 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 2494 } 2495 2496 void visitReturnInst(ReturnInst &I) { 2497 IRBuilder<> IRB(&I); 2498 Value *RetVal = I.getReturnValue(); 2499 if (!RetVal) return; 2500 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 2501 if (CheckReturnValue) { 2502 insertShadowCheck(RetVal, &I); 2503 Value *Shadow = getCleanShadow(RetVal); 2504 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2505 } else { 2506 Value *Shadow = getShadow(RetVal); 2507 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2508 // FIXME: make it conditional if ClStoreCleanOrigin==0 2509 if (MS.TrackOrigins) 2510 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 2511 } 2512 } 2513 2514 void visitPHINode(PHINode &I) { 2515 IRBuilder<> IRB(&I); 2516 if (!PropagateShadow) { 2517 setShadow(&I, getCleanShadow(&I)); 2518 setOrigin(&I, getCleanOrigin()); 2519 return; 2520 } 2521 2522 ShadowPHINodes.push_back(&I); 2523 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 2524 "_msphi_s")); 2525 if (MS.TrackOrigins) 2526 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 2527 "_msphi_o")); 2528 } 2529 2530 void visitAllocaInst(AllocaInst &I) { 2531 setShadow(&I, getCleanShadow(&I)); 2532 setOrigin(&I, getCleanOrigin()); 2533 IRBuilder<> IRB(I.getNextNode()); 2534 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); 2535 if (PoisonStack && ClPoisonStackWithCall) { 2536 IRB.CreateCall2(MS.MsanPoisonStackFn, 2537 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2538 ConstantInt::get(MS.IntptrTy, Size)); 2539 } else { 2540 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 2541 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); 2542 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); 2543 } 2544 2545 if (PoisonStack && MS.TrackOrigins) { 2546 SmallString<2048> StackDescriptionStorage; 2547 raw_svector_ostream StackDescription(StackDescriptionStorage); 2548 // We create a string with a description of the stack allocation and 2549 // pass it into __msan_set_alloca_origin. 2550 // It will be printed by the run-time if stack-originated UMR is found. 2551 // The first 4 bytes of the string are set to '----' and will be replaced 2552 // by __msan_va_arg_overflow_size_tls at the first call. 2553 StackDescription << "----" << I.getName() << "@" << F.getName(); 2554 Value *Descr = 2555 createPrivateNonConstGlobalForString(*F.getParent(), 2556 StackDescription.str()); 2557 2558 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn, 2559 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2560 ConstantInt::get(MS.IntptrTy, Size), 2561 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), 2562 IRB.CreatePointerCast(&F, MS.IntptrTy)); 2563 } 2564 } 2565 2566 void visitSelectInst(SelectInst& I) { 2567 IRBuilder<> IRB(&I); 2568 // a = select b, c, d 2569 Value *B = I.getCondition(); 2570 Value *C = I.getTrueValue(); 2571 Value *D = I.getFalseValue(); 2572 Value *Sb = getShadow(B); 2573 Value *Sc = getShadow(C); 2574 Value *Sd = getShadow(D); 2575 2576 // Result shadow if condition shadow is 0. 2577 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); 2578 Value *Sa1; 2579 if (I.getType()->isAggregateType()) { 2580 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do 2581 // an extra "select". This results in much more compact IR. 2582 // Sa = select Sb, poisoned, (select b, Sc, Sd) 2583 Sa1 = getPoisonedShadow(getShadowTy(I.getType())); 2584 } else { 2585 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] 2586 // If Sb (condition is poisoned), look for bits in c and d that are equal 2587 // and both unpoisoned. 2588 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. 2589 2590 // Cast arguments to shadow-compatible type. 2591 C = CreateAppToShadowCast(IRB, C); 2592 D = CreateAppToShadowCast(IRB, D); 2593 2594 // Result shadow if condition shadow is 1. 2595 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); 2596 } 2597 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); 2598 setShadow(&I, Sa); 2599 if (MS.TrackOrigins) { 2600 // Origins are always i32, so any vector conditions must be flattened. 2601 // FIXME: consider tracking vector origins for app vectors? 2602 if (B->getType()->isVectorTy()) { 2603 Type *FlatTy = getShadowTyNoVec(B->getType()); 2604 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), 2605 ConstantInt::getNullValue(FlatTy)); 2606 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), 2607 ConstantInt::getNullValue(FlatTy)); 2608 } 2609 // a = select b, c, d 2610 // Oa = Sb ? Ob : (b ? Oc : Od) 2611 setOrigin( 2612 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()), 2613 IRB.CreateSelect(B, getOrigin(I.getTrueValue()), 2614 getOrigin(I.getFalseValue())))); 2615 } 2616 } 2617 2618 void visitLandingPadInst(LandingPadInst &I) { 2619 // Do nothing. 2620 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 2621 setShadow(&I, getCleanShadow(&I)); 2622 setOrigin(&I, getCleanOrigin()); 2623 } 2624 2625 void visitGetElementPtrInst(GetElementPtrInst &I) { 2626 handleShadowOr(I); 2627 } 2628 2629 void visitExtractValueInst(ExtractValueInst &I) { 2630 IRBuilder<> IRB(&I); 2631 Value *Agg = I.getAggregateOperand(); 2632 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 2633 Value *AggShadow = getShadow(Agg); 2634 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2635 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2636 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 2637 setShadow(&I, ResShadow); 2638 setOriginForNaryOp(I); 2639 } 2640 2641 void visitInsertValueInst(InsertValueInst &I) { 2642 IRBuilder<> IRB(&I); 2643 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 2644 Value *AggShadow = getShadow(I.getAggregateOperand()); 2645 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 2646 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2647 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 2648 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2649 DEBUG(dbgs() << " Res: " << *Res << "\n"); 2650 setShadow(&I, Res); 2651 setOriginForNaryOp(I); 2652 } 2653 2654 void dumpInst(Instruction &I) { 2655 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2656 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 2657 } else { 2658 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 2659 } 2660 errs() << "QQQ " << I << "\n"; 2661 } 2662 2663 void visitResumeInst(ResumeInst &I) { 2664 DEBUG(dbgs() << "Resume: " << I << "\n"); 2665 // Nothing to do here. 2666 } 2667 2668 void visitInstruction(Instruction &I) { 2669 // Everything else: stop propagating and check for poisoned shadow. 2670 if (ClDumpStrictInstructions) 2671 dumpInst(I); 2672 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 2673 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 2674 insertShadowCheck(I.getOperand(i), &I); 2675 setShadow(&I, getCleanShadow(&I)); 2676 setOrigin(&I, getCleanOrigin()); 2677 } 2678 }; 2679 2680 /// \brief AMD64-specific implementation of VarArgHelper. 2681 struct VarArgAMD64Helper : public VarArgHelper { 2682 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 2683 // See a comment in visitCallSite for more details. 2684 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 2685 static const unsigned AMD64FpEndOffset = 176; 2686 2687 Function &F; 2688 MemorySanitizer &MS; 2689 MemorySanitizerVisitor &MSV; 2690 Value *VAArgTLSCopy; 2691 Value *VAArgOverflowSize; 2692 2693 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2694 2695 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 2696 MemorySanitizerVisitor &MSV) 2697 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2698 VAArgOverflowSize(nullptr) {} 2699 2700 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 2701 2702 ArgKind classifyArgument(Value* arg) { 2703 // A very rough approximation of X86_64 argument classification rules. 2704 Type *T = arg->getType(); 2705 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 2706 return AK_FloatingPoint; 2707 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 2708 return AK_GeneralPurpose; 2709 if (T->isPointerTy()) 2710 return AK_GeneralPurpose; 2711 return AK_Memory; 2712 } 2713 2714 // For VarArg functions, store the argument shadow in an ABI-specific format 2715 // that corresponds to va_list layout. 2716 // We do this because Clang lowers va_arg in the frontend, and this pass 2717 // only sees the low level code that deals with va_list internals. 2718 // A much easier alternative (provided that Clang emits va_arg instructions) 2719 // would have been to associate each live instance of va_list with a copy of 2720 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 2721 // order. 2722 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2723 unsigned GpOffset = 0; 2724 unsigned FpOffset = AMD64GpEndOffset; 2725 unsigned OverflowOffset = AMD64FpEndOffset; 2726 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2727 ArgIt != End; ++ArgIt) { 2728 Value *A = *ArgIt; 2729 unsigned ArgNo = CS.getArgumentNo(ArgIt); 2730 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); 2731 if (IsByVal) { 2732 // ByVal arguments always go to the overflow area. 2733 assert(A->getType()->isPointerTy()); 2734 Type *RealTy = A->getType()->getPointerElementType(); 2735 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); 2736 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); 2737 OverflowOffset += RoundUpToAlignment(ArgSize, 8); 2738 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), 2739 ArgSize, kShadowTLSAlignment); 2740 } else { 2741 ArgKind AK = classifyArgument(A); 2742 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 2743 AK = AK_Memory; 2744 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 2745 AK = AK_Memory; 2746 Value *Base; 2747 switch (AK) { 2748 case AK_GeneralPurpose: 2749 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); 2750 GpOffset += 8; 2751 break; 2752 case AK_FloatingPoint: 2753 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); 2754 FpOffset += 16; 2755 break; 2756 case AK_Memory: 2757 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); 2758 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 2759 OverflowOffset += RoundUpToAlignment(ArgSize, 8); 2760 } 2761 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2762 } 2763 } 2764 Constant *OverflowSize = 2765 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 2766 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 2767 } 2768 2769 /// \brief Compute the shadow address for a given va_arg. 2770 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2771 int ArgOffset) { 2772 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2773 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2774 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2775 "_msarg"); 2776 } 2777 2778 void visitVAStartInst(VAStartInst &I) override { 2779 IRBuilder<> IRB(&I); 2780 VAStartInstrumentationList.push_back(&I); 2781 Value *VAListTag = I.getArgOperand(0); 2782 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2783 2784 // Unpoison the whole __va_list_tag. 2785 // FIXME: magic ABI constants. 2786 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2787 /* size */24, /* alignment */8, false); 2788 } 2789 2790 void visitVACopyInst(VACopyInst &I) override { 2791 IRBuilder<> IRB(&I); 2792 Value *VAListTag = I.getArgOperand(0); 2793 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2794 2795 // Unpoison the whole __va_list_tag. 2796 // FIXME: magic ABI constants. 2797 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2798 /* size */24, /* alignment */8, false); 2799 } 2800 2801 void finalizeInstrumentation() override { 2802 assert(!VAArgOverflowSize && !VAArgTLSCopy && 2803 "finalizeInstrumentation called twice"); 2804 if (!VAStartInstrumentationList.empty()) { 2805 // If there is a va_start in this function, make a backup copy of 2806 // va_arg_tls somewhere in the function entry block. 2807 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2808 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2809 Value *CopySize = 2810 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 2811 VAArgOverflowSize); 2812 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2813 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2814 } 2815 2816 // Instrument va_start. 2817 // Copy va_list shadow from the backup copy of the TLS contents. 2818 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2819 CallInst *OrigInst = VAStartInstrumentationList[i]; 2820 IRBuilder<> IRB(OrigInst->getNextNode()); 2821 Value *VAListTag = OrigInst->getArgOperand(0); 2822 2823 Value *RegSaveAreaPtrPtr = 2824 IRB.CreateIntToPtr( 2825 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2826 ConstantInt::get(MS.IntptrTy, 16)), 2827 Type::getInt64PtrTy(*MS.C)); 2828 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2829 Value *RegSaveAreaShadowPtr = 2830 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2831 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 2832 AMD64FpEndOffset, 16); 2833 2834 Value *OverflowArgAreaPtrPtr = 2835 IRB.CreateIntToPtr( 2836 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2837 ConstantInt::get(MS.IntptrTy, 8)), 2838 Type::getInt64PtrTy(*MS.C)); 2839 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 2840 Value *OverflowArgAreaShadowPtr = 2841 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 2842 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset); 2843 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 2844 } 2845 } 2846 }; 2847 2848 /// \brief MIPS64-specific implementation of VarArgHelper. 2849 struct VarArgMIPS64Helper : public VarArgHelper { 2850 Function &F; 2851 MemorySanitizer &MS; 2852 MemorySanitizerVisitor &MSV; 2853 Value *VAArgTLSCopy; 2854 Value *VAArgSize; 2855 2856 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2857 2858 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS, 2859 MemorySanitizerVisitor &MSV) 2860 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2861 VAArgSize(nullptr) {} 2862 2863 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2864 unsigned VAArgOffset = 0; 2865 for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end(); 2866 ArgIt != End; ++ArgIt) { 2867 Value *A = *ArgIt; 2868 Value *Base; 2869 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); 2870 #if defined(__MIPSEB__) || defined(MIPSEB) 2871 // Adjusting the shadow for argument with size < 8 to match the placement 2872 // of bits in big endian system 2873 if (ArgSize < 8) 2874 VAArgOffset += (8 - ArgSize); 2875 #endif 2876 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset); 2877 VAArgOffset += ArgSize; 2878 VAArgOffset = RoundUpToAlignment(VAArgOffset, 8); 2879 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2880 } 2881 2882 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); 2883 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of 2884 // a new class member i.e. it is the total size of all VarArgs. 2885 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); 2886 } 2887 2888 /// \brief Compute the shadow address for a given va_arg. 2889 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2890 int ArgOffset) { 2891 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2892 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2893 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2894 "_msarg"); 2895 } 2896 2897 void visitVAStartInst(VAStartInst &I) override { 2898 IRBuilder<> IRB(&I); 2899 VAStartInstrumentationList.push_back(&I); 2900 Value *VAListTag = I.getArgOperand(0); 2901 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2902 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2903 /* size */8, /* alignment */8, false); 2904 } 2905 2906 void visitVACopyInst(VACopyInst &I) override { 2907 IRBuilder<> IRB(&I); 2908 Value *VAListTag = I.getArgOperand(0); 2909 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2910 // Unpoison the whole __va_list_tag. 2911 // FIXME: magic ABI constants. 2912 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2913 /* size */8, /* alignment */8, false); 2914 } 2915 2916 void finalizeInstrumentation() override { 2917 assert(!VAArgSize && !VAArgTLSCopy && 2918 "finalizeInstrumentation called twice"); 2919 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2920 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2921 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), 2922 VAArgSize); 2923 2924 if (!VAStartInstrumentationList.empty()) { 2925 // If there is a va_start in this function, make a backup copy of 2926 // va_arg_tls somewhere in the function entry block. 2927 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2928 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2929 } 2930 2931 // Instrument va_start. 2932 // Copy va_list shadow from the backup copy of the TLS contents. 2933 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2934 CallInst *OrigInst = VAStartInstrumentationList[i]; 2935 IRBuilder<> IRB(OrigInst->getNextNode()); 2936 Value *VAListTag = OrigInst->getArgOperand(0); 2937 Value *RegSaveAreaPtrPtr = 2938 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2939 Type::getInt64PtrTy(*MS.C)); 2940 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2941 Value *RegSaveAreaShadowPtr = 2942 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2943 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8); 2944 } 2945 } 2946 }; 2947 2948 /// \brief A no-op implementation of VarArgHelper. 2949 struct VarArgNoOpHelper : public VarArgHelper { 2950 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 2951 MemorySanitizerVisitor &MSV) {} 2952 2953 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} 2954 2955 void visitVAStartInst(VAStartInst &I) override {} 2956 2957 void visitVACopyInst(VACopyInst &I) override {} 2958 2959 void finalizeInstrumentation() override {} 2960 }; 2961 2962 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 2963 MemorySanitizerVisitor &Visitor) { 2964 // VarArg handling is only implemented on AMD64. False positives are possible 2965 // on other platforms. 2966 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 2967 if (TargetTriple.getArch() == llvm::Triple::x86_64) 2968 return new VarArgAMD64Helper(Func, Msan, Visitor); 2969 else if (TargetTriple.getArch() == llvm::Triple::mips64 || 2970 TargetTriple.getArch() == llvm::Triple::mips64el) 2971 return new VarArgMIPS64Helper(Func, Msan, Visitor); 2972 else 2973 return new VarArgNoOpHelper(Func, Msan, Visitor); 2974 } 2975 2976 } // namespace 2977 2978 bool MemorySanitizer::runOnFunction(Function &F) { 2979 MemorySanitizerVisitor Visitor(F, *this); 2980 2981 // Clear out readonly/readnone attributes. 2982 AttrBuilder B; 2983 B.addAttribute(Attribute::ReadOnly) 2984 .addAttribute(Attribute::ReadNone); 2985 F.removeAttributes(AttributeSet::FunctionIndex, 2986 AttributeSet::get(F.getContext(), 2987 AttributeSet::FunctionIndex, B)); 2988 2989 return Visitor.runOnFunction(); 2990 } 2991