1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// The algorithm of the tool is similar to Memcheck 14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 15 /// byte of the application memory, poison the shadow of the malloc-ed 16 /// or alloca-ed memory, load the shadow bits on every memory read, 17 /// propagate the shadow bits through some of the arithmetic 18 /// instruction (including MOV), store the shadow bits on every memory 19 /// write, report a bug on some other instructions (e.g. JMP) if the 20 /// associated shadow is poisoned. 21 /// 22 /// But there are differences too. The first and the major one: 23 /// compiler instrumentation instead of binary instrumentation. This 24 /// gives us much better register allocation, possible compiler 25 /// optimizations and a fast start-up. But this brings the major issue 26 /// as well: msan needs to see all program events, including system 27 /// calls and reads/writes in system libraries, so we either need to 28 /// compile *everything* with msan or use a binary translation 29 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 30 /// Another difference from Memcheck is that we use 8 shadow bits per 31 /// byte of application memory and use a direct shadow mapping. This 32 /// greatly simplifies the instrumentation code and avoids races on 33 /// shadow updates (Memcheck is single-threaded so races are not a 34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 35 /// path storage that uses 8 bits per byte). 36 /// 37 /// The default value of shadow is 0, which means "clean" (not poisoned). 38 /// 39 /// Every module initializer should call __msan_init to ensure that the 40 /// shadow memory is ready. On error, __msan_warning is called. Since 41 /// parameters and return values may be passed via registers, we have a 42 /// specialized thread-local shadow for return values 43 /// (__msan_retval_tls) and parameters (__msan_param_tls). 44 /// 45 /// Origin tracking. 46 /// 47 /// MemorySanitizer can track origins (allocation points) of all uninitialized 48 /// values. This behavior is controlled with a flag (msan-track-origins) and is 49 /// disabled by default. 50 /// 51 /// Origins are 4-byte values created and interpreted by the runtime library. 52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 53 /// of application memory. Propagation of origins is basically a bunch of 54 /// "select" instructions that pick the origin of a dirty argument, if an 55 /// instruction has one. 56 /// 57 /// Every 4 aligned, consecutive bytes of application memory have one origin 58 /// value associated with them. If these bytes contain uninitialized data 59 /// coming from 2 different allocations, the last store wins. Because of this, 60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 61 /// practice. 62 /// 63 /// Origins are meaningless for fully initialized values, so MemorySanitizer 64 /// avoids storing origin to memory when a fully initialized value is stored. 65 /// This way it avoids needless overwritting origin of the 4-byte region on 66 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 67 /// 68 /// Atomic handling. 69 /// 70 /// Ideally, every atomic store of application value should update the 71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store 72 /// of two disjoint locations can not be done without severe slowdown. 73 /// 74 /// Therefore, we implement an approximation that may err on the safe side. 75 /// In this implementation, every atomically accessed location in the program 76 /// may only change from (partially) uninitialized to fully initialized, but 77 /// not the other way around. We load the shadow _after_ the application load, 78 /// and we store the shadow _before_ the app store. Also, we always store clean 79 /// shadow (if the application store is atomic). This way, if the store-load 80 /// pair constitutes a happens-before arc, shadow store and load are correctly 81 /// ordered such that the load will get either the value that was stored, or 82 /// some later value (which is always clean). 83 /// 84 /// This does not work very well with Compare-And-Swap (CAS) and 85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW 86 /// must store the new shadow before the app operation, and load the shadow 87 /// after the app operation. Computers don't work this way. Current 88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean 89 /// value. It implements the store part as a simple atomic store by storing a 90 /// clean shadow. 91 92 //===----------------------------------------------------------------------===// 93 94 #include "llvm/Transforms/Instrumentation.h" 95 #include "llvm/ADT/DepthFirstIterator.h" 96 #include "llvm/ADT/SmallString.h" 97 #include "llvm/ADT/SmallVector.h" 98 #include "llvm/ADT/StringExtras.h" 99 #include "llvm/ADT/Triple.h" 100 #include "llvm/IR/DataLayout.h" 101 #include "llvm/IR/Function.h" 102 #include "llvm/IR/IRBuilder.h" 103 #include "llvm/IR/InlineAsm.h" 104 #include "llvm/IR/InstVisitor.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/MDBuilder.h" 108 #include "llvm/IR/Module.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/ValueMap.h" 111 #include "llvm/Support/CommandLine.h" 112 #include "llvm/Support/Compiler.h" 113 #include "llvm/Support/Debug.h" 114 #include "llvm/Support/raw_ostream.h" 115 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 116 #include "llvm/Transforms/Utils/Local.h" 117 #include "llvm/Transforms/Utils/ModuleUtils.h" 118 119 using namespace llvm; 120 121 #define DEBUG_TYPE "msan" 122 123 static const unsigned kOriginSize = 4; 124 static const unsigned kMinOriginAlignment = 4; 125 static const unsigned kShadowTLSAlignment = 8; 126 127 // These constants must be kept in sync with the ones in msan.h. 128 static const unsigned kParamTLSSize = 800; 129 static const unsigned kRetvalTLSSize = 800; 130 131 // Accesses sizes are powers of two: 1, 2, 4, 8. 132 static const size_t kNumberOfAccessSizes = 4; 133 134 /// \brief Track origins of uninitialized values. 135 /// 136 /// Adds a section to MemorySanitizer report that points to the allocation 137 /// (stack or heap) the uninitialized bits came from originally. 138 static cl::opt<int> ClTrackOrigins("msan-track-origins", 139 cl::desc("Track origins (allocation sites) of poisoned memory"), 140 cl::Hidden, cl::init(0)); 141 static cl::opt<bool> ClKeepGoing("msan-keep-going", 142 cl::desc("keep going after reporting a UMR"), 143 cl::Hidden, cl::init(false)); 144 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 145 cl::desc("poison uninitialized stack variables"), 146 cl::Hidden, cl::init(true)); 147 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 148 cl::desc("poison uninitialized stack variables with a call"), 149 cl::Hidden, cl::init(false)); 150 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 151 cl::desc("poison uninitialized stack variables with the given pattern"), 152 cl::Hidden, cl::init(0xff)); 153 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 154 cl::desc("poison undef temps"), 155 cl::Hidden, cl::init(true)); 156 157 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 158 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 159 cl::Hidden, cl::init(true)); 160 161 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 162 cl::desc("exact handling of relational integer ICmp"), 163 cl::Hidden, cl::init(false)); 164 165 // This flag controls whether we check the shadow of the address 166 // operand of load or store. Such bugs are very rare, since load from 167 // a garbage address typically results in SEGV, but still happen 168 // (e.g. only lower bits of address are garbage, or the access happens 169 // early at program startup where malloc-ed memory is more likely to 170 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 171 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 172 cl::desc("report accesses through a pointer which has poisoned shadow"), 173 cl::Hidden, cl::init(true)); 174 175 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 176 cl::desc("print out instructions with default strict semantics"), 177 cl::Hidden, cl::init(false)); 178 179 static cl::opt<int> ClInstrumentationWithCallThreshold( 180 "msan-instrumentation-with-call-threshold", 181 cl::desc( 182 "If the function being instrumented requires more than " 183 "this number of checks and origin stores, use callbacks instead of " 184 "inline checks (-1 means never use callbacks)."), 185 cl::Hidden, cl::init(3500)); 186 187 // This is an experiment to enable handling of cases where shadow is a non-zero 188 // compile-time constant. For some unexplainable reason they were silently 189 // ignored in the instrumentation. 190 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow", 191 cl::desc("Insert checks for constant shadow values"), 192 cl::Hidden, cl::init(false)); 193 194 static const char *const kMsanModuleCtorName = "msan.module_ctor"; 195 static const char *const kMsanInitName = "__msan_init"; 196 197 namespace { 198 199 // Memory map parameters used in application-to-shadow address calculation. 200 // Offset = (Addr & ~AndMask) ^ XorMask 201 // Shadow = ShadowBase + Offset 202 // Origin = OriginBase + Offset 203 struct MemoryMapParams { 204 uint64_t AndMask; 205 uint64_t XorMask; 206 uint64_t ShadowBase; 207 uint64_t OriginBase; 208 }; 209 210 struct PlatformMemoryMapParams { 211 const MemoryMapParams *bits32; 212 const MemoryMapParams *bits64; 213 }; 214 215 // i386 Linux 216 static const MemoryMapParams Linux_I386_MemoryMapParams = { 217 0x000080000000, // AndMask 218 0, // XorMask (not used) 219 0, // ShadowBase (not used) 220 0x000040000000, // OriginBase 221 }; 222 223 // x86_64 Linux 224 static const MemoryMapParams Linux_X86_64_MemoryMapParams = { 225 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING 226 0x400000000000, // AndMask 227 0, // XorMask (not used) 228 0, // ShadowBase (not used) 229 0x200000000000, // OriginBase 230 #else 231 0, // AndMask (not used) 232 0x500000000000, // XorMask 233 0, // ShadowBase (not used) 234 0x100000000000, // OriginBase 235 #endif 236 }; 237 238 // mips64 Linux 239 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = { 240 0x004000000000, // AndMask 241 0, // XorMask (not used) 242 0, // ShadowBase (not used) 243 0x002000000000, // OriginBase 244 }; 245 246 // ppc64 Linux 247 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = { 248 0x200000000000, // AndMask 249 0x100000000000, // XorMask 250 0x080000000000, // ShadowBase 251 0x1C0000000000, // OriginBase 252 }; 253 254 // aarch64 Linux 255 static const MemoryMapParams Linux_AArch64_MemoryMapParams = { 256 0, // AndMask (not used) 257 0x06000000000, // XorMask 258 0, // ShadowBase (not used) 259 0x01000000000, // OriginBase 260 }; 261 262 // i386 FreeBSD 263 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = { 264 0x000180000000, // AndMask 265 0x000040000000, // XorMask 266 0x000020000000, // ShadowBase 267 0x000700000000, // OriginBase 268 }; 269 270 // x86_64 FreeBSD 271 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = { 272 0xc00000000000, // AndMask 273 0x200000000000, // XorMask 274 0x100000000000, // ShadowBase 275 0x380000000000, // OriginBase 276 }; 277 278 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = { 279 &Linux_I386_MemoryMapParams, 280 &Linux_X86_64_MemoryMapParams, 281 }; 282 283 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = { 284 nullptr, 285 &Linux_MIPS64_MemoryMapParams, 286 }; 287 288 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = { 289 nullptr, 290 &Linux_PowerPC64_MemoryMapParams, 291 }; 292 293 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = { 294 nullptr, 295 &Linux_AArch64_MemoryMapParams, 296 }; 297 298 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = { 299 &FreeBSD_I386_MemoryMapParams, 300 &FreeBSD_X86_64_MemoryMapParams, 301 }; 302 303 /// \brief An instrumentation pass implementing detection of uninitialized 304 /// reads. 305 /// 306 /// MemorySanitizer: instrument the code in module to find 307 /// uninitialized reads. 308 class MemorySanitizer : public FunctionPass { 309 public: 310 MemorySanitizer(int TrackOrigins = 0) 311 : FunctionPass(ID), 312 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), 313 WarningFn(nullptr) {} 314 const char *getPassName() const override { return "MemorySanitizer"; } 315 bool runOnFunction(Function &F) override; 316 bool doInitialization(Module &M) override; 317 static char ID; // Pass identification, replacement for typeid. 318 319 private: 320 void initializeCallbacks(Module &M); 321 322 /// \brief Track origins (allocation points) of uninitialized values. 323 int TrackOrigins; 324 325 LLVMContext *C; 326 Type *IntptrTy; 327 Type *OriginTy; 328 /// \brief Thread-local shadow storage for function parameters. 329 GlobalVariable *ParamTLS; 330 /// \brief Thread-local origin storage for function parameters. 331 GlobalVariable *ParamOriginTLS; 332 /// \brief Thread-local shadow storage for function return value. 333 GlobalVariable *RetvalTLS; 334 /// \brief Thread-local origin storage for function return value. 335 GlobalVariable *RetvalOriginTLS; 336 /// \brief Thread-local shadow storage for in-register va_arg function 337 /// parameters (x86_64-specific). 338 GlobalVariable *VAArgTLS; 339 /// \brief Thread-local shadow storage for va_arg overflow area 340 /// (x86_64-specific). 341 GlobalVariable *VAArgOverflowSizeTLS; 342 /// \brief Thread-local space used to pass origin value to the UMR reporting 343 /// function. 344 GlobalVariable *OriginTLS; 345 346 /// \brief The run-time callback to print a warning. 347 Value *WarningFn; 348 // These arrays are indexed by log2(AccessSize). 349 Value *MaybeWarningFn[kNumberOfAccessSizes]; 350 Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; 351 352 /// \brief Run-time helper that generates a new origin value for a stack 353 /// allocation. 354 Value *MsanSetAllocaOrigin4Fn; 355 /// \brief Run-time helper that poisons stack on function entry. 356 Value *MsanPoisonStackFn; 357 /// \brief Run-time helper that records a store (or any event) of an 358 /// uninitialized value and returns an updated origin id encoding this info. 359 Value *MsanChainOriginFn; 360 /// \brief MSan runtime replacements for memmove, memcpy and memset. 361 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 362 363 /// \brief Memory map parameters used in application-to-shadow calculation. 364 const MemoryMapParams *MapParams; 365 366 MDNode *ColdCallWeights; 367 /// \brief Branch weights for origin store. 368 MDNode *OriginStoreWeights; 369 /// \brief An empty volatile inline asm that prevents callback merge. 370 InlineAsm *EmptyAsm; 371 Function *MsanCtorFunction; 372 373 friend struct MemorySanitizerVisitor; 374 friend struct VarArgAMD64Helper; 375 friend struct VarArgMIPS64Helper; 376 friend struct VarArgAArch64Helper; 377 }; 378 } // anonymous namespace 379 380 char MemorySanitizer::ID = 0; 381 INITIALIZE_PASS(MemorySanitizer, "msan", 382 "MemorySanitizer: detects uninitialized reads.", 383 false, false) 384 385 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) { 386 return new MemorySanitizer(TrackOrigins); 387 } 388 389 /// \brief Create a non-const global initialized with the given string. 390 /// 391 /// Creates a writable global for Str so that we can pass it to the 392 /// run-time lib. Runtime uses first 4 bytes of the string to store the 393 /// frame ID, so the string needs to be mutable. 394 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 395 StringRef Str) { 396 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 397 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 398 GlobalValue::PrivateLinkage, StrConst, ""); 399 } 400 401 /// \brief Insert extern declaration of runtime-provided functions and globals. 402 void MemorySanitizer::initializeCallbacks(Module &M) { 403 // Only do this once. 404 if (WarningFn) 405 return; 406 407 IRBuilder<> IRB(*C); 408 // Create the callback. 409 // FIXME: this function should have "Cold" calling conv, 410 // which is not yet implemented. 411 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 412 : "__msan_warning_noreturn"; 413 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr); 414 415 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 416 AccessSizeIndex++) { 417 unsigned AccessSize = 1 << AccessSizeIndex; 418 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); 419 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( 420 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 421 IRB.getInt32Ty(), nullptr); 422 423 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); 424 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( 425 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 426 IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr); 427 } 428 429 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( 430 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 431 IRB.getInt8PtrTy(), IntptrTy, nullptr); 432 MsanPoisonStackFn = 433 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(), 434 IRB.getInt8PtrTy(), IntptrTy, nullptr); 435 MsanChainOriginFn = M.getOrInsertFunction( 436 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr); 437 MemmoveFn = M.getOrInsertFunction( 438 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 439 IRB.getInt8PtrTy(), IntptrTy, nullptr); 440 MemcpyFn = M.getOrInsertFunction( 441 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 442 IntptrTy, nullptr); 443 MemsetFn = M.getOrInsertFunction( 444 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 445 IntptrTy, nullptr); 446 447 // Create globals. 448 RetvalTLS = new GlobalVariable( 449 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, 450 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, 451 GlobalVariable::InitialExecTLSModel); 452 RetvalOriginTLS = new GlobalVariable( 453 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, 454 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 455 456 ParamTLS = new GlobalVariable( 457 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, 458 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, 459 GlobalVariable::InitialExecTLSModel); 460 ParamOriginTLS = new GlobalVariable( 461 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, 462 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", 463 nullptr, GlobalVariable::InitialExecTLSModel); 464 465 VAArgTLS = new GlobalVariable( 466 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, 467 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, 468 GlobalVariable::InitialExecTLSModel); 469 VAArgOverflowSizeTLS = new GlobalVariable( 470 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 471 "__msan_va_arg_overflow_size_tls", nullptr, 472 GlobalVariable::InitialExecTLSModel); 473 OriginTLS = new GlobalVariable( 474 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 475 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 476 477 // We insert an empty inline asm after __msan_report* to avoid callback merge. 478 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 479 StringRef(""), StringRef(""), 480 /*hasSideEffects=*/true); 481 } 482 483 /// \brief Module-level initialization. 484 /// 485 /// inserts a call to __msan_init to the module's constructor list. 486 bool MemorySanitizer::doInitialization(Module &M) { 487 auto &DL = M.getDataLayout(); 488 489 Triple TargetTriple(M.getTargetTriple()); 490 switch (TargetTriple.getOS()) { 491 case Triple::FreeBSD: 492 switch (TargetTriple.getArch()) { 493 case Triple::x86_64: 494 MapParams = FreeBSD_X86_MemoryMapParams.bits64; 495 break; 496 case Triple::x86: 497 MapParams = FreeBSD_X86_MemoryMapParams.bits32; 498 break; 499 default: 500 report_fatal_error("unsupported architecture"); 501 } 502 break; 503 case Triple::Linux: 504 switch (TargetTriple.getArch()) { 505 case Triple::x86_64: 506 MapParams = Linux_X86_MemoryMapParams.bits64; 507 break; 508 case Triple::x86: 509 MapParams = Linux_X86_MemoryMapParams.bits32; 510 break; 511 case Triple::mips64: 512 case Triple::mips64el: 513 MapParams = Linux_MIPS_MemoryMapParams.bits64; 514 break; 515 case Triple::ppc64: 516 case Triple::ppc64le: 517 MapParams = Linux_PowerPC_MemoryMapParams.bits64; 518 break; 519 case Triple::aarch64: 520 case Triple::aarch64_be: 521 MapParams = Linux_ARM_MemoryMapParams.bits64; 522 break; 523 default: 524 report_fatal_error("unsupported architecture"); 525 } 526 break; 527 default: 528 report_fatal_error("unsupported operating system"); 529 } 530 531 C = &(M.getContext()); 532 IRBuilder<> IRB(*C); 533 IntptrTy = IRB.getIntPtrTy(DL); 534 OriginTy = IRB.getInt32Ty(); 535 536 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 537 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 538 539 std::tie(MsanCtorFunction, std::ignore) = 540 createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName, 541 /*InitArgTypes=*/{}, 542 /*InitArgs=*/{}); 543 Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName); 544 MsanCtorFunction->setComdat(MsanCtorComdat); 545 546 appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction); 547 548 if (TrackOrigins) 549 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 550 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 551 552 if (ClKeepGoing) 553 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 554 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 555 556 return true; 557 } 558 559 namespace { 560 561 /// \brief A helper class that handles instrumentation of VarArg 562 /// functions on a particular platform. 563 /// 564 /// Implementations are expected to insert the instrumentation 565 /// necessary to propagate argument shadow through VarArg function 566 /// calls. Visit* methods are called during an InstVisitor pass over 567 /// the function, and should avoid creating new basic blocks. A new 568 /// instance of this class is created for each instrumented function. 569 struct VarArgHelper { 570 /// \brief Visit a CallSite. 571 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 572 573 /// \brief Visit a va_start call. 574 virtual void visitVAStartInst(VAStartInst &I) = 0; 575 576 /// \brief Visit a va_copy call. 577 virtual void visitVACopyInst(VACopyInst &I) = 0; 578 579 /// \brief Finalize function instrumentation. 580 /// 581 /// This method is called after visiting all interesting (see above) 582 /// instructions in a function. 583 virtual void finalizeInstrumentation() = 0; 584 585 virtual ~VarArgHelper() {} 586 }; 587 588 struct MemorySanitizerVisitor; 589 590 VarArgHelper* 591 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 592 MemorySanitizerVisitor &Visitor); 593 594 unsigned TypeSizeToSizeIndex(unsigned TypeSize) { 595 if (TypeSize <= 8) return 0; 596 return Log2_32_Ceil(TypeSize / 8); 597 } 598 599 /// This class does all the work for a given function. Store and Load 600 /// instructions store and load corresponding shadow and origin 601 /// values. Most instructions propagate shadow from arguments to their 602 /// return values. Certain instructions (most importantly, BranchInst) 603 /// test their argument shadow and print reports (with a runtime call) if it's 604 /// non-zero. 605 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 606 Function &F; 607 MemorySanitizer &MS; 608 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 609 ValueMap<Value*, Value*> ShadowMap, OriginMap; 610 std::unique_ptr<VarArgHelper> VAHelper; 611 612 // The following flags disable parts of MSan instrumentation based on 613 // blacklist contents and command-line options. 614 bool InsertChecks; 615 bool PropagateShadow; 616 bool PoisonStack; 617 bool PoisonUndef; 618 bool CheckReturnValue; 619 620 struct ShadowOriginAndInsertPoint { 621 Value *Shadow; 622 Value *Origin; 623 Instruction *OrigIns; 624 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) 625 : Shadow(S), Origin(O), OrigIns(I) { } 626 }; 627 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 628 SmallVector<Instruction*, 16> StoreList; 629 630 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 631 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 632 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory); 633 InsertChecks = SanitizeFunction; 634 PropagateShadow = SanitizeFunction; 635 PoisonStack = SanitizeFunction && ClPoisonStack; 636 PoisonUndef = SanitizeFunction && ClPoisonUndef; 637 // FIXME: Consider using SpecialCaseList to specify a list of functions that 638 // must always return fully initialized values. For now, we hardcode "main". 639 CheckReturnValue = SanitizeFunction && (F.getName() == "main"); 640 641 DEBUG(if (!InsertChecks) 642 dbgs() << "MemorySanitizer is not inserting checks into '" 643 << F.getName() << "'\n"); 644 } 645 646 Value *updateOrigin(Value *V, IRBuilder<> &IRB) { 647 if (MS.TrackOrigins <= 1) return V; 648 return IRB.CreateCall(MS.MsanChainOriginFn, V); 649 } 650 651 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) { 652 const DataLayout &DL = F.getParent()->getDataLayout(); 653 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); 654 if (IntptrSize == kOriginSize) return Origin; 655 assert(IntptrSize == kOriginSize * 2); 656 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false); 657 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8)); 658 } 659 660 /// \brief Fill memory range with the given origin value. 661 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, 662 unsigned Size, unsigned Alignment) { 663 const DataLayout &DL = F.getParent()->getDataLayout(); 664 unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy); 665 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); 666 assert(IntptrAlignment >= kMinOriginAlignment); 667 assert(IntptrSize >= kOriginSize); 668 669 unsigned Ofs = 0; 670 unsigned CurrentAlignment = Alignment; 671 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) { 672 Value *IntptrOrigin = originToIntptr(IRB, Origin); 673 Value *IntptrOriginPtr = 674 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0)); 675 for (unsigned i = 0; i < Size / IntptrSize; ++i) { 676 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i) 677 : IntptrOriginPtr; 678 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); 679 Ofs += IntptrSize / kOriginSize; 680 CurrentAlignment = IntptrAlignment; 681 } 682 } 683 684 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) { 685 Value *GEP = 686 i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr; 687 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); 688 CurrentAlignment = kMinOriginAlignment; 689 } 690 } 691 692 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, 693 unsigned Alignment, bool AsCall) { 694 const DataLayout &DL = F.getParent()->getDataLayout(); 695 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); 696 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); 697 if (Shadow->getType()->isAggregateType()) { 698 paintOrigin(IRB, updateOrigin(Origin, IRB), 699 getOriginPtr(Addr, IRB, Alignment), StoreSize, 700 OriginAlignment); 701 } else { 702 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 703 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); 704 if (ConstantShadow) { 705 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) 706 paintOrigin(IRB, updateOrigin(Origin, IRB), 707 getOriginPtr(Addr, IRB, Alignment), StoreSize, 708 OriginAlignment); 709 return; 710 } 711 712 unsigned TypeSizeInBits = 713 DL.getTypeSizeInBits(ConvertedShadow->getType()); 714 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 715 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 716 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; 717 Value *ConvertedShadow2 = IRB.CreateZExt( 718 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 719 IRB.CreateCall(Fn, {ConvertedShadow2, 720 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 721 Origin}); 722 } else { 723 Value *Cmp = IRB.CreateICmpNE( 724 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); 725 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 726 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights); 727 IRBuilder<> IRBNew(CheckTerm); 728 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), 729 getOriginPtr(Addr, IRBNew, Alignment), StoreSize, 730 OriginAlignment); 731 } 732 } 733 } 734 735 void materializeStores(bool InstrumentWithCalls) { 736 for (auto Inst : StoreList) { 737 StoreInst &SI = *dyn_cast<StoreInst>(Inst); 738 739 IRBuilder<> IRB(&SI); 740 Value *Val = SI.getValueOperand(); 741 Value *Addr = SI.getPointerOperand(); 742 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val); 743 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 744 745 StoreInst *NewSI = 746 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment()); 747 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 748 (void)NewSI; 749 750 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI); 751 752 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); 753 754 if (MS.TrackOrigins && !SI.isAtomic()) 755 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(), 756 InstrumentWithCalls); 757 } 758 } 759 760 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, 761 bool AsCall) { 762 IRBuilder<> IRB(OrigIns); 763 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 764 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 765 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 766 767 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); 768 if (ConstantShadow) { 769 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) { 770 if (MS.TrackOrigins) { 771 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 772 MS.OriginTLS); 773 } 774 IRB.CreateCall(MS.WarningFn, {}); 775 IRB.CreateCall(MS.EmptyAsm, {}); 776 // FIXME: Insert UnreachableInst if !ClKeepGoing? 777 // This may invalidate some of the following checks and needs to be done 778 // at the very end. 779 } 780 return; 781 } 782 783 const DataLayout &DL = OrigIns->getModule()->getDataLayout(); 784 785 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); 786 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 787 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 788 Value *Fn = MS.MaybeWarningFn[SizeIndex]; 789 Value *ConvertedShadow2 = 790 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 791 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin 792 ? Origin 793 : (Value *)IRB.getInt32(0)}); 794 } else { 795 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 796 getCleanShadow(ConvertedShadow), "_mscmp"); 797 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 798 Cmp, OrigIns, 799 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); 800 801 IRB.SetInsertPoint(CheckTerm); 802 if (MS.TrackOrigins) { 803 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 804 MS.OriginTLS); 805 } 806 IRB.CreateCall(MS.WarningFn, {}); 807 IRB.CreateCall(MS.EmptyAsm, {}); 808 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 809 } 810 } 811 812 void materializeChecks(bool InstrumentWithCalls) { 813 for (const auto &ShadowData : InstrumentationList) { 814 Instruction *OrigIns = ShadowData.OrigIns; 815 Value *Shadow = ShadowData.Shadow; 816 Value *Origin = ShadowData.Origin; 817 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); 818 } 819 DEBUG(dbgs() << "DONE:\n" << F); 820 } 821 822 /// \brief Add MemorySanitizer instrumentation to a function. 823 bool runOnFunction() { 824 MS.initializeCallbacks(*F.getParent()); 825 826 // In the presence of unreachable blocks, we may see Phi nodes with 827 // incoming nodes from such blocks. Since InstVisitor skips unreachable 828 // blocks, such nodes will not have any shadow value associated with them. 829 // It's easier to remove unreachable blocks than deal with missing shadow. 830 removeUnreachableBlocks(F); 831 832 // Iterate all BBs in depth-first order and create shadow instructions 833 // for all instructions (where applicable). 834 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 835 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 836 visit(*BB); 837 838 839 // Finalize PHI nodes. 840 for (PHINode *PN : ShadowPHINodes) { 841 PHINode *PNS = cast<PHINode>(getShadow(PN)); 842 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; 843 size_t NumValues = PN->getNumIncomingValues(); 844 for (size_t v = 0; v < NumValues; v++) { 845 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 846 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 847 } 848 } 849 850 VAHelper->finalizeInstrumentation(); 851 852 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && 853 InstrumentationList.size() + StoreList.size() > 854 (unsigned)ClInstrumentationWithCallThreshold; 855 856 // Delayed instrumentation of StoreInst. 857 // This may add new checks to be inserted later. 858 materializeStores(InstrumentWithCalls); 859 860 // Insert shadow value checks. 861 materializeChecks(InstrumentWithCalls); 862 863 return true; 864 } 865 866 /// \brief Compute the shadow type that corresponds to a given Value. 867 Type *getShadowTy(Value *V) { 868 return getShadowTy(V->getType()); 869 } 870 871 /// \brief Compute the shadow type that corresponds to a given Type. 872 Type *getShadowTy(Type *OrigTy) { 873 if (!OrigTy->isSized()) { 874 return nullptr; 875 } 876 // For integer type, shadow is the same as the original type. 877 // This may return weird-sized types like i1. 878 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 879 return IT; 880 const DataLayout &DL = F.getParent()->getDataLayout(); 881 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 882 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType()); 883 return VectorType::get(IntegerType::get(*MS.C, EltSize), 884 VT->getNumElements()); 885 } 886 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) { 887 return ArrayType::get(getShadowTy(AT->getElementType()), 888 AT->getNumElements()); 889 } 890 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 891 SmallVector<Type*, 4> Elements; 892 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 893 Elements.push_back(getShadowTy(ST->getElementType(i))); 894 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 895 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 896 return Res; 897 } 898 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy); 899 return IntegerType::get(*MS.C, TypeSize); 900 } 901 902 /// \brief Flatten a vector type. 903 Type *getShadowTyNoVec(Type *ty) { 904 if (VectorType *vt = dyn_cast<VectorType>(ty)) 905 return IntegerType::get(*MS.C, vt->getBitWidth()); 906 return ty; 907 } 908 909 /// \brief Convert a shadow value to it's flattened variant. 910 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 911 Type *Ty = V->getType(); 912 Type *NoVecTy = getShadowTyNoVec(Ty); 913 if (Ty == NoVecTy) return V; 914 return IRB.CreateBitCast(V, NoVecTy); 915 } 916 917 /// \brief Compute the integer shadow offset that corresponds to a given 918 /// application address. 919 /// 920 /// Offset = (Addr & ~AndMask) ^ XorMask 921 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) { 922 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy); 923 924 uint64_t AndMask = MS.MapParams->AndMask; 925 if (AndMask) 926 OffsetLong = 927 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask)); 928 929 uint64_t XorMask = MS.MapParams->XorMask; 930 if (XorMask) 931 OffsetLong = 932 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask)); 933 return OffsetLong; 934 } 935 936 /// \brief Compute the shadow address that corresponds to a given application 937 /// address. 938 /// 939 /// Shadow = ShadowBase + Offset 940 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 941 IRBuilder<> &IRB) { 942 Value *ShadowLong = getShadowPtrOffset(Addr, IRB); 943 uint64_t ShadowBase = MS.MapParams->ShadowBase; 944 if (ShadowBase != 0) 945 ShadowLong = 946 IRB.CreateAdd(ShadowLong, 947 ConstantInt::get(MS.IntptrTy, ShadowBase)); 948 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 949 } 950 951 /// \brief Compute the origin address that corresponds to a given application 952 /// address. 953 /// 954 /// OriginAddr = (OriginBase + Offset) & ~3ULL 955 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) { 956 Value *OriginLong = getShadowPtrOffset(Addr, IRB); 957 uint64_t OriginBase = MS.MapParams->OriginBase; 958 if (OriginBase != 0) 959 OriginLong = 960 IRB.CreateAdd(OriginLong, 961 ConstantInt::get(MS.IntptrTy, OriginBase)); 962 if (Alignment < kMinOriginAlignment) { 963 uint64_t Mask = kMinOriginAlignment - 1; 964 OriginLong = IRB.CreateAnd(OriginLong, 965 ConstantInt::get(MS.IntptrTy, ~Mask)); 966 } 967 return IRB.CreateIntToPtr(OriginLong, 968 PointerType::get(IRB.getInt32Ty(), 0)); 969 } 970 971 /// \brief Compute the shadow address for a given function argument. 972 /// 973 /// Shadow = ParamTLS+ArgOffset. 974 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 975 int ArgOffset) { 976 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 977 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 978 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 979 "_msarg"); 980 } 981 982 /// \brief Compute the origin address for a given function argument. 983 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 984 int ArgOffset) { 985 if (!MS.TrackOrigins) return nullptr; 986 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 987 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 988 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 989 "_msarg_o"); 990 } 991 992 /// \brief Compute the shadow address for a retval. 993 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 994 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 995 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 996 "_msret"); 997 } 998 999 /// \brief Compute the origin address for a retval. 1000 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 1001 // We keep a single origin for the entire retval. Might be too optimistic. 1002 return MS.RetvalOriginTLS; 1003 } 1004 1005 /// \brief Set SV to be the shadow value for V. 1006 void setShadow(Value *V, Value *SV) { 1007 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 1008 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); 1009 } 1010 1011 /// \brief Set Origin to be the origin value for V. 1012 void setOrigin(Value *V, Value *Origin) { 1013 if (!MS.TrackOrigins) return; 1014 assert(!OriginMap.count(V) && "Values may only have one origin"); 1015 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 1016 OriginMap[V] = Origin; 1017 } 1018 1019 /// \brief Create a clean shadow value for a given value. 1020 /// 1021 /// Clean shadow (all zeroes) means all bits of the value are defined 1022 /// (initialized). 1023 Constant *getCleanShadow(Value *V) { 1024 Type *ShadowTy = getShadowTy(V); 1025 if (!ShadowTy) 1026 return nullptr; 1027 return Constant::getNullValue(ShadowTy); 1028 } 1029 1030 /// \brief Create a dirty shadow of a given shadow type. 1031 Constant *getPoisonedShadow(Type *ShadowTy) { 1032 assert(ShadowTy); 1033 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 1034 return Constant::getAllOnesValue(ShadowTy); 1035 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) { 1036 SmallVector<Constant *, 4> Vals(AT->getNumElements(), 1037 getPoisonedShadow(AT->getElementType())); 1038 return ConstantArray::get(AT, Vals); 1039 } 1040 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) { 1041 SmallVector<Constant *, 4> Vals; 1042 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 1043 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 1044 return ConstantStruct::get(ST, Vals); 1045 } 1046 llvm_unreachable("Unexpected shadow type"); 1047 } 1048 1049 /// \brief Create a dirty shadow for a given value. 1050 Constant *getPoisonedShadow(Value *V) { 1051 Type *ShadowTy = getShadowTy(V); 1052 if (!ShadowTy) 1053 return nullptr; 1054 return getPoisonedShadow(ShadowTy); 1055 } 1056 1057 /// \brief Create a clean (zero) origin. 1058 Value *getCleanOrigin() { 1059 return Constant::getNullValue(MS.OriginTy); 1060 } 1061 1062 /// \brief Get the shadow value for a given Value. 1063 /// 1064 /// This function either returns the value set earlier with setShadow, 1065 /// or extracts if from ParamTLS (for function arguments). 1066 Value *getShadow(Value *V) { 1067 if (!PropagateShadow) return getCleanShadow(V); 1068 if (Instruction *I = dyn_cast<Instruction>(V)) { 1069 // For instructions the shadow is already stored in the map. 1070 Value *Shadow = ShadowMap[V]; 1071 if (!Shadow) { 1072 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 1073 (void)I; 1074 assert(Shadow && "No shadow for a value"); 1075 } 1076 return Shadow; 1077 } 1078 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 1079 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 1080 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 1081 (void)U; 1082 return AllOnes; 1083 } 1084 if (Argument *A = dyn_cast<Argument>(V)) { 1085 // For arguments we compute the shadow on demand and store it in the map. 1086 Value **ShadowPtr = &ShadowMap[V]; 1087 if (*ShadowPtr) 1088 return *ShadowPtr; 1089 Function *F = A->getParent(); 1090 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 1091 unsigned ArgOffset = 0; 1092 const DataLayout &DL = F->getParent()->getDataLayout(); 1093 for (auto &FArg : F->args()) { 1094 if (!FArg.getType()->isSized()) { 1095 DEBUG(dbgs() << "Arg is not sized\n"); 1096 continue; 1097 } 1098 unsigned Size = 1099 FArg.hasByValAttr() 1100 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType()) 1101 : DL.getTypeAllocSize(FArg.getType()); 1102 if (A == &FArg) { 1103 bool Overflow = ArgOffset + Size > kParamTLSSize; 1104 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); 1105 if (FArg.hasByValAttr()) { 1106 // ByVal pointer itself has clean shadow. We copy the actual 1107 // argument shadow to the underlying memory. 1108 // Figure out maximal valid memcpy alignment. 1109 unsigned ArgAlign = FArg.getParamAlignment(); 1110 if (ArgAlign == 0) { 1111 Type *EltType = A->getType()->getPointerElementType(); 1112 ArgAlign = DL.getABITypeAlignment(EltType); 1113 } 1114 if (Overflow) { 1115 // ParamTLS overflow. 1116 EntryIRB.CreateMemSet( 1117 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), 1118 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign); 1119 } else { 1120 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 1121 Value *Cpy = EntryIRB.CreateMemCpy( 1122 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 1123 CopyAlign); 1124 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 1125 (void)Cpy; 1126 } 1127 *ShadowPtr = getCleanShadow(V); 1128 } else { 1129 if (Overflow) { 1130 // ParamTLS overflow. 1131 *ShadowPtr = getCleanShadow(V); 1132 } else { 1133 *ShadowPtr = 1134 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 1135 } 1136 } 1137 DEBUG(dbgs() << " ARG: " << FArg << " ==> " << 1138 **ShadowPtr << "\n"); 1139 if (MS.TrackOrigins && !Overflow) { 1140 Value *OriginPtr = 1141 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); 1142 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 1143 } else { 1144 setOrigin(A, getCleanOrigin()); 1145 } 1146 } 1147 ArgOffset += alignTo(Size, kShadowTLSAlignment); 1148 } 1149 assert(*ShadowPtr && "Could not find shadow for an argument"); 1150 return *ShadowPtr; 1151 } 1152 // For everything else the shadow is zero. 1153 return getCleanShadow(V); 1154 } 1155 1156 /// \brief Get the shadow for i-th argument of the instruction I. 1157 Value *getShadow(Instruction *I, int i) { 1158 return getShadow(I->getOperand(i)); 1159 } 1160 1161 /// \brief Get the origin for a value. 1162 Value *getOrigin(Value *V) { 1163 if (!MS.TrackOrigins) return nullptr; 1164 if (!PropagateShadow) return getCleanOrigin(); 1165 if (isa<Constant>(V)) return getCleanOrigin(); 1166 assert((isa<Instruction>(V) || isa<Argument>(V)) && 1167 "Unexpected value type in getOrigin()"); 1168 Value *Origin = OriginMap[V]; 1169 assert(Origin && "Missing origin"); 1170 return Origin; 1171 } 1172 1173 /// \brief Get the origin for i-th argument of the instruction I. 1174 Value *getOrigin(Instruction *I, int i) { 1175 return getOrigin(I->getOperand(i)); 1176 } 1177 1178 /// \brief Remember the place where a shadow check should be inserted. 1179 /// 1180 /// This location will be later instrumented with a check that will print a 1181 /// UMR warning in runtime if the shadow value is not 0. 1182 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { 1183 assert(Shadow); 1184 if (!InsertChecks) return; 1185 #ifndef NDEBUG 1186 Type *ShadowTy = Shadow->getType(); 1187 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 1188 "Can only insert checks for integer and vector shadow types"); 1189 #endif 1190 InstrumentationList.push_back( 1191 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 1192 } 1193 1194 /// \brief Remember the place where a shadow check should be inserted. 1195 /// 1196 /// This location will be later instrumented with a check that will print a 1197 /// UMR warning in runtime if the value is not fully defined. 1198 void insertShadowCheck(Value *Val, Instruction *OrigIns) { 1199 assert(Val); 1200 Value *Shadow, *Origin; 1201 if (ClCheckConstantShadow) { 1202 Shadow = getShadow(Val); 1203 if (!Shadow) return; 1204 Origin = getOrigin(Val); 1205 } else { 1206 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 1207 if (!Shadow) return; 1208 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 1209 } 1210 insertShadowCheck(Shadow, Origin, OrigIns); 1211 } 1212 1213 AtomicOrdering addReleaseOrdering(AtomicOrdering a) { 1214 switch (a) { 1215 case NotAtomic: 1216 return NotAtomic; 1217 case Unordered: 1218 case Monotonic: 1219 case Release: 1220 return Release; 1221 case Acquire: 1222 case AcquireRelease: 1223 return AcquireRelease; 1224 case SequentiallyConsistent: 1225 return SequentiallyConsistent; 1226 } 1227 llvm_unreachable("Unknown ordering"); 1228 } 1229 1230 AtomicOrdering addAcquireOrdering(AtomicOrdering a) { 1231 switch (a) { 1232 case NotAtomic: 1233 return NotAtomic; 1234 case Unordered: 1235 case Monotonic: 1236 case Acquire: 1237 return Acquire; 1238 case Release: 1239 case AcquireRelease: 1240 return AcquireRelease; 1241 case SequentiallyConsistent: 1242 return SequentiallyConsistent; 1243 } 1244 llvm_unreachable("Unknown ordering"); 1245 } 1246 1247 // ------------------- Visitors. 1248 1249 /// \brief Instrument LoadInst 1250 /// 1251 /// Loads the corresponding shadow and (optionally) origin. 1252 /// Optionally, checks that the load address is fully defined. 1253 void visitLoadInst(LoadInst &I) { 1254 assert(I.getType()->isSized() && "Load type must have size"); 1255 IRBuilder<> IRB(I.getNextNode()); 1256 Type *ShadowTy = getShadowTy(&I); 1257 Value *Addr = I.getPointerOperand(); 1258 if (PropagateShadow && !I.getMetadata("nosanitize")) { 1259 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1260 setShadow(&I, 1261 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 1262 } else { 1263 setShadow(&I, getCleanShadow(&I)); 1264 } 1265 1266 if (ClCheckAccessAddress) 1267 insertShadowCheck(I.getPointerOperand(), &I); 1268 1269 if (I.isAtomic()) 1270 I.setOrdering(addAcquireOrdering(I.getOrdering())); 1271 1272 if (MS.TrackOrigins) { 1273 if (PropagateShadow) { 1274 unsigned Alignment = I.getAlignment(); 1275 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); 1276 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment), 1277 OriginAlignment)); 1278 } else { 1279 setOrigin(&I, getCleanOrigin()); 1280 } 1281 } 1282 } 1283 1284 /// \brief Instrument StoreInst 1285 /// 1286 /// Stores the corresponding shadow and (optionally) origin. 1287 /// Optionally, checks that the store address is fully defined. 1288 void visitStoreInst(StoreInst &I) { 1289 StoreList.push_back(&I); 1290 } 1291 1292 void handleCASOrRMW(Instruction &I) { 1293 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 1294 1295 IRBuilder<> IRB(&I); 1296 Value *Addr = I.getOperand(0); 1297 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); 1298 1299 if (ClCheckAccessAddress) 1300 insertShadowCheck(Addr, &I); 1301 1302 // Only test the conditional argument of cmpxchg instruction. 1303 // The other argument can potentially be uninitialized, but we can not 1304 // detect this situation reliably without possible false positives. 1305 if (isa<AtomicCmpXchgInst>(I)) 1306 insertShadowCheck(I.getOperand(1), &I); 1307 1308 IRB.CreateStore(getCleanShadow(&I), ShadowPtr); 1309 1310 setShadow(&I, getCleanShadow(&I)); 1311 setOrigin(&I, getCleanOrigin()); 1312 } 1313 1314 void visitAtomicRMWInst(AtomicRMWInst &I) { 1315 handleCASOrRMW(I); 1316 I.setOrdering(addReleaseOrdering(I.getOrdering())); 1317 } 1318 1319 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 1320 handleCASOrRMW(I); 1321 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 1322 } 1323 1324 // Vector manipulation. 1325 void visitExtractElementInst(ExtractElementInst &I) { 1326 insertShadowCheck(I.getOperand(1), &I); 1327 IRBuilder<> IRB(&I); 1328 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 1329 "_msprop")); 1330 setOrigin(&I, getOrigin(&I, 0)); 1331 } 1332 1333 void visitInsertElementInst(InsertElementInst &I) { 1334 insertShadowCheck(I.getOperand(2), &I); 1335 IRBuilder<> IRB(&I); 1336 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 1337 I.getOperand(2), "_msprop")); 1338 setOriginForNaryOp(I); 1339 } 1340 1341 void visitShuffleVectorInst(ShuffleVectorInst &I) { 1342 insertShadowCheck(I.getOperand(2), &I); 1343 IRBuilder<> IRB(&I); 1344 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 1345 I.getOperand(2), "_msprop")); 1346 setOriginForNaryOp(I); 1347 } 1348 1349 // Casts. 1350 void visitSExtInst(SExtInst &I) { 1351 IRBuilder<> IRB(&I); 1352 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 1353 setOrigin(&I, getOrigin(&I, 0)); 1354 } 1355 1356 void visitZExtInst(ZExtInst &I) { 1357 IRBuilder<> IRB(&I); 1358 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 1359 setOrigin(&I, getOrigin(&I, 0)); 1360 } 1361 1362 void visitTruncInst(TruncInst &I) { 1363 IRBuilder<> IRB(&I); 1364 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 1365 setOrigin(&I, getOrigin(&I, 0)); 1366 } 1367 1368 void visitBitCastInst(BitCastInst &I) { 1369 // Special case: if this is the bitcast (there is exactly 1 allowed) between 1370 // a musttail call and a ret, don't instrument. New instructions are not 1371 // allowed after a musttail call. 1372 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0))) 1373 if (CI->isMustTailCall()) 1374 return; 1375 IRBuilder<> IRB(&I); 1376 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 1377 setOrigin(&I, getOrigin(&I, 0)); 1378 } 1379 1380 void visitPtrToIntInst(PtrToIntInst &I) { 1381 IRBuilder<> IRB(&I); 1382 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1383 "_msprop_ptrtoint")); 1384 setOrigin(&I, getOrigin(&I, 0)); 1385 } 1386 1387 void visitIntToPtrInst(IntToPtrInst &I) { 1388 IRBuilder<> IRB(&I); 1389 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1390 "_msprop_inttoptr")); 1391 setOrigin(&I, getOrigin(&I, 0)); 1392 } 1393 1394 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 1395 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 1396 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 1397 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 1398 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 1399 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 1400 1401 /// \brief Propagate shadow for bitwise AND. 1402 /// 1403 /// This code is exact, i.e. if, for example, a bit in the left argument 1404 /// is defined and 0, then neither the value not definedness of the 1405 /// corresponding bit in B don't affect the resulting shadow. 1406 void visitAnd(BinaryOperator &I) { 1407 IRBuilder<> IRB(&I); 1408 // "And" of 0 and a poisoned value results in unpoisoned value. 1409 // 1&1 => 1; 0&1 => 0; p&1 => p; 1410 // 1&0 => 0; 0&0 => 0; p&0 => 0; 1411 // 1&p => p; 0&p => 0; p&p => p; 1412 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 1413 Value *S1 = getShadow(&I, 0); 1414 Value *S2 = getShadow(&I, 1); 1415 Value *V1 = I.getOperand(0); 1416 Value *V2 = I.getOperand(1); 1417 if (V1->getType() != S1->getType()) { 1418 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1419 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1420 } 1421 Value *S1S2 = IRB.CreateAnd(S1, S2); 1422 Value *V1S2 = IRB.CreateAnd(V1, S2); 1423 Value *S1V2 = IRB.CreateAnd(S1, V2); 1424 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1425 setOriginForNaryOp(I); 1426 } 1427 1428 void visitOr(BinaryOperator &I) { 1429 IRBuilder<> IRB(&I); 1430 // "Or" of 1 and a poisoned value results in unpoisoned value. 1431 // 1|1 => 1; 0|1 => 1; p|1 => 1; 1432 // 1|0 => 1; 0|0 => 0; p|0 => p; 1433 // 1|p => 1; 0|p => p; p|p => p; 1434 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 1435 Value *S1 = getShadow(&I, 0); 1436 Value *S2 = getShadow(&I, 1); 1437 Value *V1 = IRB.CreateNot(I.getOperand(0)); 1438 Value *V2 = IRB.CreateNot(I.getOperand(1)); 1439 if (V1->getType() != S1->getType()) { 1440 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1441 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1442 } 1443 Value *S1S2 = IRB.CreateAnd(S1, S2); 1444 Value *V1S2 = IRB.CreateAnd(V1, S2); 1445 Value *S1V2 = IRB.CreateAnd(S1, V2); 1446 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1447 setOriginForNaryOp(I); 1448 } 1449 1450 /// \brief Default propagation of shadow and/or origin. 1451 /// 1452 /// This class implements the general case of shadow propagation, used in all 1453 /// cases where we don't know and/or don't care about what the operation 1454 /// actually does. It converts all input shadow values to a common type 1455 /// (extending or truncating as necessary), and bitwise OR's them. 1456 /// 1457 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1458 /// fully initialized), and less prone to false positives. 1459 /// 1460 /// This class also implements the general case of origin propagation. For a 1461 /// Nary operation, result origin is set to the origin of an argument that is 1462 /// not entirely initialized. If there is more than one such arguments, the 1463 /// rightmost of them is picked. It does not matter which one is picked if all 1464 /// arguments are initialized. 1465 template <bool CombineShadow> 1466 class Combiner { 1467 Value *Shadow; 1468 Value *Origin; 1469 IRBuilder<> &IRB; 1470 MemorySanitizerVisitor *MSV; 1471 1472 public: 1473 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1474 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {} 1475 1476 /// \brief Add a pair of shadow and origin values to the mix. 1477 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1478 if (CombineShadow) { 1479 assert(OpShadow); 1480 if (!Shadow) 1481 Shadow = OpShadow; 1482 else { 1483 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1484 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1485 } 1486 } 1487 1488 if (MSV->MS.TrackOrigins) { 1489 assert(OpOrigin); 1490 if (!Origin) { 1491 Origin = OpOrigin; 1492 } else { 1493 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin); 1494 // No point in adding something that might result in 0 origin value. 1495 if (!ConstOrigin || !ConstOrigin->isNullValue()) { 1496 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1497 Value *Cond = 1498 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow)); 1499 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1500 } 1501 } 1502 } 1503 return *this; 1504 } 1505 1506 /// \brief Add an application value to the mix. 1507 Combiner &Add(Value *V) { 1508 Value *OpShadow = MSV->getShadow(V); 1509 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; 1510 return Add(OpShadow, OpOrigin); 1511 } 1512 1513 /// \brief Set the current combined values as the given instruction's shadow 1514 /// and origin. 1515 void Done(Instruction *I) { 1516 if (CombineShadow) { 1517 assert(Shadow); 1518 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1519 MSV->setShadow(I, Shadow); 1520 } 1521 if (MSV->MS.TrackOrigins) { 1522 assert(Origin); 1523 MSV->setOrigin(I, Origin); 1524 } 1525 } 1526 }; 1527 1528 typedef Combiner<true> ShadowAndOriginCombiner; 1529 typedef Combiner<false> OriginCombiner; 1530 1531 /// \brief Propagate origin for arbitrary operation. 1532 void setOriginForNaryOp(Instruction &I) { 1533 if (!MS.TrackOrigins) return; 1534 IRBuilder<> IRB(&I); 1535 OriginCombiner OC(this, IRB); 1536 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1537 OC.Add(OI->get()); 1538 OC.Done(&I); 1539 } 1540 1541 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1542 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1543 "Vector of pointers is not a valid shadow type"); 1544 return Ty->isVectorTy() ? 1545 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1546 Ty->getPrimitiveSizeInBits(); 1547 } 1548 1549 /// \brief Cast between two shadow types, extending or truncating as 1550 /// necessary. 1551 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, 1552 bool Signed = false) { 1553 Type *srcTy = V->getType(); 1554 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1555 return IRB.CreateIntCast(V, dstTy, Signed); 1556 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1557 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1558 return IRB.CreateIntCast(V, dstTy, Signed); 1559 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1560 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1561 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1562 Value *V2 = 1563 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); 1564 return IRB.CreateBitCast(V2, dstTy); 1565 // TODO: handle struct types. 1566 } 1567 1568 /// \brief Cast an application value to the type of its own shadow. 1569 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { 1570 Type *ShadowTy = getShadowTy(V); 1571 if (V->getType() == ShadowTy) 1572 return V; 1573 if (V->getType()->isPtrOrPtrVectorTy()) 1574 return IRB.CreatePtrToInt(V, ShadowTy); 1575 else 1576 return IRB.CreateBitCast(V, ShadowTy); 1577 } 1578 1579 /// \brief Propagate shadow for arbitrary operation. 1580 void handleShadowOr(Instruction &I) { 1581 IRBuilder<> IRB(&I); 1582 ShadowAndOriginCombiner SC(this, IRB); 1583 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1584 SC.Add(OI->get()); 1585 SC.Done(&I); 1586 } 1587 1588 // \brief Handle multiplication by constant. 1589 // 1590 // Handle a special case of multiplication by constant that may have one or 1591 // more zeros in the lower bits. This makes corresponding number of lower bits 1592 // of the result zero as well. We model it by shifting the other operand 1593 // shadow left by the required number of bits. Effectively, we transform 1594 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B). 1595 // We use multiplication by 2**N instead of shift to cover the case of 1596 // multiplication by 0, which may occur in some elements of a vector operand. 1597 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg, 1598 Value *OtherArg) { 1599 Constant *ShadowMul; 1600 Type *Ty = ConstArg->getType(); 1601 if (Ty->isVectorTy()) { 1602 unsigned NumElements = Ty->getVectorNumElements(); 1603 Type *EltTy = Ty->getSequentialElementType(); 1604 SmallVector<Constant *, 16> Elements; 1605 for (unsigned Idx = 0; Idx < NumElements; ++Idx) { 1606 if (ConstantInt *Elt = 1607 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) { 1608 APInt V = Elt->getValue(); 1609 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1610 Elements.push_back(ConstantInt::get(EltTy, V2)); 1611 } else { 1612 Elements.push_back(ConstantInt::get(EltTy, 1)); 1613 } 1614 } 1615 ShadowMul = ConstantVector::get(Elements); 1616 } else { 1617 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) { 1618 APInt V = Elt->getValue(); 1619 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1620 ShadowMul = ConstantInt::get(Ty, V2); 1621 } else { 1622 ShadowMul = ConstantInt::get(Ty, 1); 1623 } 1624 } 1625 1626 IRBuilder<> IRB(&I); 1627 setShadow(&I, 1628 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst")); 1629 setOrigin(&I, getOrigin(OtherArg)); 1630 } 1631 1632 void visitMul(BinaryOperator &I) { 1633 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1634 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1635 if (constOp0 && !constOp1) 1636 handleMulByConstant(I, constOp0, I.getOperand(1)); 1637 else if (constOp1 && !constOp0) 1638 handleMulByConstant(I, constOp1, I.getOperand(0)); 1639 else 1640 handleShadowOr(I); 1641 } 1642 1643 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1644 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1645 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1646 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1647 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1648 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1649 1650 void handleDiv(Instruction &I) { 1651 IRBuilder<> IRB(&I); 1652 // Strict on the second argument. 1653 insertShadowCheck(I.getOperand(1), &I); 1654 setShadow(&I, getShadow(&I, 0)); 1655 setOrigin(&I, getOrigin(&I, 0)); 1656 } 1657 1658 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1659 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1660 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1661 void visitURem(BinaryOperator &I) { handleDiv(I); } 1662 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1663 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1664 1665 /// \brief Instrument == and != comparisons. 1666 /// 1667 /// Sometimes the comparison result is known even if some of the bits of the 1668 /// arguments are not. 1669 void handleEqualityComparison(ICmpInst &I) { 1670 IRBuilder<> IRB(&I); 1671 Value *A = I.getOperand(0); 1672 Value *B = I.getOperand(1); 1673 Value *Sa = getShadow(A); 1674 Value *Sb = getShadow(B); 1675 1676 // Get rid of pointers and vectors of pointers. 1677 // For ints (and vectors of ints), types of A and Sa match, 1678 // and this is a no-op. 1679 A = IRB.CreatePointerCast(A, Sa->getType()); 1680 B = IRB.CreatePointerCast(B, Sb->getType()); 1681 1682 // A == B <==> (C = A^B) == 0 1683 // A != B <==> (C = A^B) != 0 1684 // Sc = Sa | Sb 1685 Value *C = IRB.CreateXor(A, B); 1686 Value *Sc = IRB.CreateOr(Sa, Sb); 1687 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1688 // Result is defined if one of the following is true 1689 // * there is a defined 1 bit in C 1690 // * C is fully defined 1691 // Si = !(C & ~Sc) && Sc 1692 Value *Zero = Constant::getNullValue(Sc->getType()); 1693 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1694 Value *Si = 1695 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1696 IRB.CreateICmpEQ( 1697 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1698 Si->setName("_msprop_icmp"); 1699 setShadow(&I, Si); 1700 setOriginForNaryOp(I); 1701 } 1702 1703 /// \brief Build the lowest possible value of V, taking into account V's 1704 /// uninitialized bits. 1705 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1706 bool isSigned) { 1707 if (isSigned) { 1708 // Split shadow into sign bit and other bits. 1709 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1710 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1711 // Maximise the undefined shadow bit, minimize other undefined bits. 1712 return 1713 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1714 } else { 1715 // Minimize undefined bits. 1716 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1717 } 1718 } 1719 1720 /// \brief Build the highest possible value of V, taking into account V's 1721 /// uninitialized bits. 1722 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1723 bool isSigned) { 1724 if (isSigned) { 1725 // Split shadow into sign bit and other bits. 1726 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1727 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1728 // Minimise the undefined shadow bit, maximise other undefined bits. 1729 return 1730 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1731 } else { 1732 // Maximize undefined bits. 1733 return IRB.CreateOr(A, Sa); 1734 } 1735 } 1736 1737 /// \brief Instrument relational comparisons. 1738 /// 1739 /// This function does exact shadow propagation for all relational 1740 /// comparisons of integers, pointers and vectors of those. 1741 /// FIXME: output seems suboptimal when one of the operands is a constant 1742 void handleRelationalComparisonExact(ICmpInst &I) { 1743 IRBuilder<> IRB(&I); 1744 Value *A = I.getOperand(0); 1745 Value *B = I.getOperand(1); 1746 Value *Sa = getShadow(A); 1747 Value *Sb = getShadow(B); 1748 1749 // Get rid of pointers and vectors of pointers. 1750 // For ints (and vectors of ints), types of A and Sa match, 1751 // and this is a no-op. 1752 A = IRB.CreatePointerCast(A, Sa->getType()); 1753 B = IRB.CreatePointerCast(B, Sb->getType()); 1754 1755 // Let [a0, a1] be the interval of possible values of A, taking into account 1756 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1757 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1758 bool IsSigned = I.isSigned(); 1759 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1760 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1761 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1762 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1763 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1764 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1765 Value *Si = IRB.CreateXor(S1, S2); 1766 setShadow(&I, Si); 1767 setOriginForNaryOp(I); 1768 } 1769 1770 /// \brief Instrument signed relational comparisons. 1771 /// 1772 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest 1773 /// bit of the shadow. Everything else is delegated to handleShadowOr(). 1774 void handleSignedRelationalComparison(ICmpInst &I) { 1775 Constant *constOp; 1776 Value *op = nullptr; 1777 CmpInst::Predicate pre; 1778 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) { 1779 op = I.getOperand(0); 1780 pre = I.getPredicate(); 1781 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) { 1782 op = I.getOperand(1); 1783 pre = I.getSwappedPredicate(); 1784 } else { 1785 handleShadowOr(I); 1786 return; 1787 } 1788 1789 if ((constOp->isNullValue() && 1790 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) || 1791 (constOp->isAllOnesValue() && 1792 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) { 1793 IRBuilder<> IRB(&I); 1794 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), 1795 "_msprop_icmp_s"); 1796 setShadow(&I, Shadow); 1797 setOrigin(&I, getOrigin(op)); 1798 } else { 1799 handleShadowOr(I); 1800 } 1801 } 1802 1803 void visitICmpInst(ICmpInst &I) { 1804 if (!ClHandleICmp) { 1805 handleShadowOr(I); 1806 return; 1807 } 1808 if (I.isEquality()) { 1809 handleEqualityComparison(I); 1810 return; 1811 } 1812 1813 assert(I.isRelational()); 1814 if (ClHandleICmpExact) { 1815 handleRelationalComparisonExact(I); 1816 return; 1817 } 1818 if (I.isSigned()) { 1819 handleSignedRelationalComparison(I); 1820 return; 1821 } 1822 1823 assert(I.isUnsigned()); 1824 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1825 handleRelationalComparisonExact(I); 1826 return; 1827 } 1828 1829 handleShadowOr(I); 1830 } 1831 1832 void visitFCmpInst(FCmpInst &I) { 1833 handleShadowOr(I); 1834 } 1835 1836 void handleShift(BinaryOperator &I) { 1837 IRBuilder<> IRB(&I); 1838 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1839 // Otherwise perform the same shift on S1. 1840 Value *S1 = getShadow(&I, 0); 1841 Value *S2 = getShadow(&I, 1); 1842 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1843 S2->getType()); 1844 Value *V2 = I.getOperand(1); 1845 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1846 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1847 setOriginForNaryOp(I); 1848 } 1849 1850 void visitShl(BinaryOperator &I) { handleShift(I); } 1851 void visitAShr(BinaryOperator &I) { handleShift(I); } 1852 void visitLShr(BinaryOperator &I) { handleShift(I); } 1853 1854 /// \brief Instrument llvm.memmove 1855 /// 1856 /// At this point we don't know if llvm.memmove will be inlined or not. 1857 /// If we don't instrument it and it gets inlined, 1858 /// our interceptor will not kick in and we will lose the memmove. 1859 /// If we instrument the call here, but it does not get inlined, 1860 /// we will memove the shadow twice: which is bad in case 1861 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1862 /// 1863 /// Similar situation exists for memcpy and memset. 1864 void visitMemMoveInst(MemMoveInst &I) { 1865 IRBuilder<> IRB(&I); 1866 IRB.CreateCall( 1867 MS.MemmoveFn, 1868 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1869 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1870 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); 1871 I.eraseFromParent(); 1872 } 1873 1874 // Similar to memmove: avoid copying shadow twice. 1875 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1876 // FIXME: consider doing manual inline for small constant sizes and proper 1877 // alignment. 1878 void visitMemCpyInst(MemCpyInst &I) { 1879 IRBuilder<> IRB(&I); 1880 IRB.CreateCall( 1881 MS.MemcpyFn, 1882 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1883 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1884 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); 1885 I.eraseFromParent(); 1886 } 1887 1888 // Same as memcpy. 1889 void visitMemSetInst(MemSetInst &I) { 1890 IRBuilder<> IRB(&I); 1891 IRB.CreateCall( 1892 MS.MemsetFn, 1893 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1894 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1895 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); 1896 I.eraseFromParent(); 1897 } 1898 1899 void visitVAStartInst(VAStartInst &I) { 1900 VAHelper->visitVAStartInst(I); 1901 } 1902 1903 void visitVACopyInst(VACopyInst &I) { 1904 VAHelper->visitVACopyInst(I); 1905 } 1906 1907 /// \brief Handle vector store-like intrinsics. 1908 /// 1909 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1910 /// has 1 pointer argument and 1 vector argument, returns void. 1911 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1912 IRBuilder<> IRB(&I); 1913 Value* Addr = I.getArgOperand(0); 1914 Value *Shadow = getShadow(&I, 1); 1915 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1916 1917 // We don't know the pointer alignment (could be unaligned SSE store!). 1918 // Have to assume to worst case. 1919 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1920 1921 if (ClCheckAccessAddress) 1922 insertShadowCheck(Addr, &I); 1923 1924 // FIXME: use ClStoreCleanOrigin 1925 // FIXME: factor out common code from materializeStores 1926 if (MS.TrackOrigins) 1927 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1)); 1928 return true; 1929 } 1930 1931 /// \brief Handle vector load-like intrinsics. 1932 /// 1933 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1934 /// has 1 pointer argument, returns a vector. 1935 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1936 IRBuilder<> IRB(&I); 1937 Value *Addr = I.getArgOperand(0); 1938 1939 Type *ShadowTy = getShadowTy(&I); 1940 if (PropagateShadow) { 1941 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1942 // We don't know the pointer alignment (could be unaligned SSE load!). 1943 // Have to assume to worst case. 1944 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1945 } else { 1946 setShadow(&I, getCleanShadow(&I)); 1947 } 1948 1949 if (ClCheckAccessAddress) 1950 insertShadowCheck(Addr, &I); 1951 1952 if (MS.TrackOrigins) { 1953 if (PropagateShadow) 1954 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1))); 1955 else 1956 setOrigin(&I, getCleanOrigin()); 1957 } 1958 return true; 1959 } 1960 1961 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1962 /// 1963 /// Instrument intrinsics with any number of arguments of the same type, 1964 /// equal to the return type. The type should be simple (no aggregates or 1965 /// pointers; vectors are fine). 1966 /// Caller guarantees that this intrinsic does not access memory. 1967 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1968 Type *RetTy = I.getType(); 1969 if (!(RetTy->isIntOrIntVectorTy() || 1970 RetTy->isFPOrFPVectorTy() || 1971 RetTy->isX86_MMXTy())) 1972 return false; 1973 1974 unsigned NumArgOperands = I.getNumArgOperands(); 1975 1976 for (unsigned i = 0; i < NumArgOperands; ++i) { 1977 Type *Ty = I.getArgOperand(i)->getType(); 1978 if (Ty != RetTy) 1979 return false; 1980 } 1981 1982 IRBuilder<> IRB(&I); 1983 ShadowAndOriginCombiner SC(this, IRB); 1984 for (unsigned i = 0; i < NumArgOperands; ++i) 1985 SC.Add(I.getArgOperand(i)); 1986 SC.Done(&I); 1987 1988 return true; 1989 } 1990 1991 /// \brief Heuristically instrument unknown intrinsics. 1992 /// 1993 /// The main purpose of this code is to do something reasonable with all 1994 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1995 /// We recognize several classes of intrinsics by their argument types and 1996 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1997 /// sure that we know what the intrinsic does. 1998 /// 1999 /// We special-case intrinsics where this approach fails. See llvm.bswap 2000 /// handling as an example of that. 2001 bool handleUnknownIntrinsic(IntrinsicInst &I) { 2002 unsigned NumArgOperands = I.getNumArgOperands(); 2003 if (NumArgOperands == 0) 2004 return false; 2005 2006 if (NumArgOperands == 2 && 2007 I.getArgOperand(0)->getType()->isPointerTy() && 2008 I.getArgOperand(1)->getType()->isVectorTy() && 2009 I.getType()->isVoidTy() && 2010 !I.onlyReadsMemory()) { 2011 // This looks like a vector store. 2012 return handleVectorStoreIntrinsic(I); 2013 } 2014 2015 if (NumArgOperands == 1 && 2016 I.getArgOperand(0)->getType()->isPointerTy() && 2017 I.getType()->isVectorTy() && 2018 I.onlyReadsMemory()) { 2019 // This looks like a vector load. 2020 return handleVectorLoadIntrinsic(I); 2021 } 2022 2023 if (I.doesNotAccessMemory()) 2024 if (maybeHandleSimpleNomemIntrinsic(I)) 2025 return true; 2026 2027 // FIXME: detect and handle SSE maskstore/maskload 2028 return false; 2029 } 2030 2031 void handleBswap(IntrinsicInst &I) { 2032 IRBuilder<> IRB(&I); 2033 Value *Op = I.getArgOperand(0); 2034 Type *OpType = Op->getType(); 2035 Function *BswapFunc = Intrinsic::getDeclaration( 2036 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1)); 2037 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 2038 setOrigin(&I, getOrigin(Op)); 2039 } 2040 2041 // \brief Instrument vector convert instrinsic. 2042 // 2043 // This function instruments intrinsics like cvtsi2ss: 2044 // %Out = int_xxx_cvtyyy(%ConvertOp) 2045 // or 2046 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) 2047 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same 2048 // number \p Out elements, and (if has 2 arguments) copies the rest of the 2049 // elements from \p CopyOp. 2050 // In most cases conversion involves floating-point value which may trigger a 2051 // hardware exception when not fully initialized. For this reason we require 2052 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. 2053 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p 2054 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always 2055 // return a fully initialized value. 2056 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { 2057 IRBuilder<> IRB(&I); 2058 Value *CopyOp, *ConvertOp; 2059 2060 switch (I.getNumArgOperands()) { 2061 case 3: 2062 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode"); 2063 case 2: 2064 CopyOp = I.getArgOperand(0); 2065 ConvertOp = I.getArgOperand(1); 2066 break; 2067 case 1: 2068 ConvertOp = I.getArgOperand(0); 2069 CopyOp = nullptr; 2070 break; 2071 default: 2072 llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); 2073 } 2074 2075 // The first *NumUsedElements* elements of ConvertOp are converted to the 2076 // same number of output elements. The rest of the output is copied from 2077 // CopyOp, or (if not available) filled with zeroes. 2078 // Combine shadow for elements of ConvertOp that are used in this operation, 2079 // and insert a check. 2080 // FIXME: consider propagating shadow of ConvertOp, at least in the case of 2081 // int->any conversion. 2082 Value *ConvertShadow = getShadow(ConvertOp); 2083 Value *AggShadow = nullptr; 2084 if (ConvertOp->getType()->isVectorTy()) { 2085 AggShadow = IRB.CreateExtractElement( 2086 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); 2087 for (int i = 1; i < NumUsedElements; ++i) { 2088 Value *MoreShadow = IRB.CreateExtractElement( 2089 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); 2090 AggShadow = IRB.CreateOr(AggShadow, MoreShadow); 2091 } 2092 } else { 2093 AggShadow = ConvertShadow; 2094 } 2095 assert(AggShadow->getType()->isIntegerTy()); 2096 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); 2097 2098 // Build result shadow by zero-filling parts of CopyOp shadow that come from 2099 // ConvertOp. 2100 if (CopyOp) { 2101 assert(CopyOp->getType() == I.getType()); 2102 assert(CopyOp->getType()->isVectorTy()); 2103 Value *ResultShadow = getShadow(CopyOp); 2104 Type *EltTy = ResultShadow->getType()->getVectorElementType(); 2105 for (int i = 0; i < NumUsedElements; ++i) { 2106 ResultShadow = IRB.CreateInsertElement( 2107 ResultShadow, ConstantInt::getNullValue(EltTy), 2108 ConstantInt::get(IRB.getInt32Ty(), i)); 2109 } 2110 setShadow(&I, ResultShadow); 2111 setOrigin(&I, getOrigin(CopyOp)); 2112 } else { 2113 setShadow(&I, getCleanShadow(&I)); 2114 setOrigin(&I, getCleanOrigin()); 2115 } 2116 } 2117 2118 // Given a scalar or vector, extract lower 64 bits (or less), and return all 2119 // zeroes if it is zero, and all ones otherwise. 2120 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { 2121 if (S->getType()->isVectorTy()) 2122 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); 2123 assert(S->getType()->getPrimitiveSizeInBits() <= 64); 2124 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 2125 return CreateShadowCast(IRB, S2, T, /* Signed */ true); 2126 } 2127 2128 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { 2129 Type *T = S->getType(); 2130 assert(T->isVectorTy()); 2131 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 2132 return IRB.CreateSExt(S2, T); 2133 } 2134 2135 // \brief Instrument vector shift instrinsic. 2136 // 2137 // This function instruments intrinsics like int_x86_avx2_psll_w. 2138 // Intrinsic shifts %In by %ShiftSize bits. 2139 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift 2140 // size, and the rest is ignored. Behavior is defined even if shift size is 2141 // greater than register (or field) width. 2142 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { 2143 assert(I.getNumArgOperands() == 2); 2144 IRBuilder<> IRB(&I); 2145 // If any of the S2 bits are poisoned, the whole thing is poisoned. 2146 // Otherwise perform the same shift on S1. 2147 Value *S1 = getShadow(&I, 0); 2148 Value *S2 = getShadow(&I, 1); 2149 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) 2150 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); 2151 Value *V1 = I.getOperand(0); 2152 Value *V2 = I.getOperand(1); 2153 Value *Shift = IRB.CreateCall(I.getCalledValue(), 2154 {IRB.CreateBitCast(S1, V1->getType()), V2}); 2155 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); 2156 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 2157 setOriginForNaryOp(I); 2158 } 2159 2160 // \brief Get an X86_MMX-sized vector type. 2161 Type *getMMXVectorTy(unsigned EltSizeInBits) { 2162 const unsigned X86_MMXSizeInBits = 64; 2163 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), 2164 X86_MMXSizeInBits / EltSizeInBits); 2165 } 2166 2167 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack 2168 // intrinsic. 2169 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { 2170 switch (id) { 2171 case llvm::Intrinsic::x86_sse2_packsswb_128: 2172 case llvm::Intrinsic::x86_sse2_packuswb_128: 2173 return llvm::Intrinsic::x86_sse2_packsswb_128; 2174 2175 case llvm::Intrinsic::x86_sse2_packssdw_128: 2176 case llvm::Intrinsic::x86_sse41_packusdw: 2177 return llvm::Intrinsic::x86_sse2_packssdw_128; 2178 2179 case llvm::Intrinsic::x86_avx2_packsswb: 2180 case llvm::Intrinsic::x86_avx2_packuswb: 2181 return llvm::Intrinsic::x86_avx2_packsswb; 2182 2183 case llvm::Intrinsic::x86_avx2_packssdw: 2184 case llvm::Intrinsic::x86_avx2_packusdw: 2185 return llvm::Intrinsic::x86_avx2_packssdw; 2186 2187 case llvm::Intrinsic::x86_mmx_packsswb: 2188 case llvm::Intrinsic::x86_mmx_packuswb: 2189 return llvm::Intrinsic::x86_mmx_packsswb; 2190 2191 case llvm::Intrinsic::x86_mmx_packssdw: 2192 return llvm::Intrinsic::x86_mmx_packssdw; 2193 default: 2194 llvm_unreachable("unexpected intrinsic id"); 2195 } 2196 } 2197 2198 // \brief Instrument vector pack instrinsic. 2199 // 2200 // This function instruments intrinsics like x86_mmx_packsswb, that 2201 // packs elements of 2 input vectors into half as many bits with saturation. 2202 // Shadow is propagated with the signed variant of the same intrinsic applied 2203 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer). 2204 // EltSizeInBits is used only for x86mmx arguments. 2205 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { 2206 assert(I.getNumArgOperands() == 2); 2207 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2208 IRBuilder<> IRB(&I); 2209 Value *S1 = getShadow(&I, 0); 2210 Value *S2 = getShadow(&I, 1); 2211 assert(isX86_MMX || S1->getType()->isVectorTy()); 2212 2213 // SExt and ICmpNE below must apply to individual elements of input vectors. 2214 // In case of x86mmx arguments, cast them to appropriate vector types and 2215 // back. 2216 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType(); 2217 if (isX86_MMX) { 2218 S1 = IRB.CreateBitCast(S1, T); 2219 S2 = IRB.CreateBitCast(S2, T); 2220 } 2221 Value *S1_ext = IRB.CreateSExt( 2222 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T); 2223 Value *S2_ext = IRB.CreateSExt( 2224 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T); 2225 if (isX86_MMX) { 2226 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C); 2227 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy); 2228 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy); 2229 } 2230 2231 Function *ShadowFn = Intrinsic::getDeclaration( 2232 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID())); 2233 2234 Value *S = 2235 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack"); 2236 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I)); 2237 setShadow(&I, S); 2238 setOriginForNaryOp(I); 2239 } 2240 2241 // \brief Instrument sum-of-absolute-differencies intrinsic. 2242 void handleVectorSadIntrinsic(IntrinsicInst &I) { 2243 const unsigned SignificantBitsPerResultElement = 16; 2244 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2245 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType(); 2246 unsigned ZeroBitsPerResultElement = 2247 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement; 2248 2249 IRBuilder<> IRB(&I); 2250 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2251 S = IRB.CreateBitCast(S, ResTy); 2252 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2253 ResTy); 2254 S = IRB.CreateLShr(S, ZeroBitsPerResultElement); 2255 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2256 setShadow(&I, S); 2257 setOriginForNaryOp(I); 2258 } 2259 2260 // \brief Instrument multiply-add intrinsic. 2261 void handleVectorPmaddIntrinsic(IntrinsicInst &I, 2262 unsigned EltSizeInBits = 0) { 2263 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2264 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType(); 2265 IRBuilder<> IRB(&I); 2266 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2267 S = IRB.CreateBitCast(S, ResTy); 2268 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2269 ResTy); 2270 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2271 setShadow(&I, S); 2272 setOriginForNaryOp(I); 2273 } 2274 2275 void visitIntrinsicInst(IntrinsicInst &I) { 2276 switch (I.getIntrinsicID()) { 2277 case llvm::Intrinsic::bswap: 2278 handleBswap(I); 2279 break; 2280 case llvm::Intrinsic::x86_avx512_vcvtsd2usi64: 2281 case llvm::Intrinsic::x86_avx512_vcvtsd2usi32: 2282 case llvm::Intrinsic::x86_avx512_vcvtss2usi64: 2283 case llvm::Intrinsic::x86_avx512_vcvtss2usi32: 2284 case llvm::Intrinsic::x86_avx512_cvttss2usi64: 2285 case llvm::Intrinsic::x86_avx512_cvttss2usi: 2286 case llvm::Intrinsic::x86_avx512_cvttsd2usi64: 2287 case llvm::Intrinsic::x86_avx512_cvttsd2usi: 2288 case llvm::Intrinsic::x86_avx512_cvtusi2sd: 2289 case llvm::Intrinsic::x86_avx512_cvtusi2ss: 2290 case llvm::Intrinsic::x86_avx512_cvtusi642sd: 2291 case llvm::Intrinsic::x86_avx512_cvtusi642ss: 2292 case llvm::Intrinsic::x86_sse2_cvtsd2si64: 2293 case llvm::Intrinsic::x86_sse2_cvtsd2si: 2294 case llvm::Intrinsic::x86_sse2_cvtsd2ss: 2295 case llvm::Intrinsic::x86_sse2_cvtsi2sd: 2296 case llvm::Intrinsic::x86_sse2_cvtsi642sd: 2297 case llvm::Intrinsic::x86_sse2_cvtss2sd: 2298 case llvm::Intrinsic::x86_sse2_cvttsd2si64: 2299 case llvm::Intrinsic::x86_sse2_cvttsd2si: 2300 case llvm::Intrinsic::x86_sse_cvtsi2ss: 2301 case llvm::Intrinsic::x86_sse_cvtsi642ss: 2302 case llvm::Intrinsic::x86_sse_cvtss2si64: 2303 case llvm::Intrinsic::x86_sse_cvtss2si: 2304 case llvm::Intrinsic::x86_sse_cvttss2si64: 2305 case llvm::Intrinsic::x86_sse_cvttss2si: 2306 handleVectorConvertIntrinsic(I, 1); 2307 break; 2308 case llvm::Intrinsic::x86_sse2_cvtdq2pd: 2309 case llvm::Intrinsic::x86_sse2_cvtps2pd: 2310 case llvm::Intrinsic::x86_sse_cvtps2pi: 2311 case llvm::Intrinsic::x86_sse_cvttps2pi: 2312 handleVectorConvertIntrinsic(I, 2); 2313 break; 2314 case llvm::Intrinsic::x86_avx2_psll_w: 2315 case llvm::Intrinsic::x86_avx2_psll_d: 2316 case llvm::Intrinsic::x86_avx2_psll_q: 2317 case llvm::Intrinsic::x86_avx2_pslli_w: 2318 case llvm::Intrinsic::x86_avx2_pslli_d: 2319 case llvm::Intrinsic::x86_avx2_pslli_q: 2320 case llvm::Intrinsic::x86_avx2_psrl_w: 2321 case llvm::Intrinsic::x86_avx2_psrl_d: 2322 case llvm::Intrinsic::x86_avx2_psrl_q: 2323 case llvm::Intrinsic::x86_avx2_psra_w: 2324 case llvm::Intrinsic::x86_avx2_psra_d: 2325 case llvm::Intrinsic::x86_avx2_psrli_w: 2326 case llvm::Intrinsic::x86_avx2_psrli_d: 2327 case llvm::Intrinsic::x86_avx2_psrli_q: 2328 case llvm::Intrinsic::x86_avx2_psrai_w: 2329 case llvm::Intrinsic::x86_avx2_psrai_d: 2330 case llvm::Intrinsic::x86_sse2_psll_w: 2331 case llvm::Intrinsic::x86_sse2_psll_d: 2332 case llvm::Intrinsic::x86_sse2_psll_q: 2333 case llvm::Intrinsic::x86_sse2_pslli_w: 2334 case llvm::Intrinsic::x86_sse2_pslli_d: 2335 case llvm::Intrinsic::x86_sse2_pslli_q: 2336 case llvm::Intrinsic::x86_sse2_psrl_w: 2337 case llvm::Intrinsic::x86_sse2_psrl_d: 2338 case llvm::Intrinsic::x86_sse2_psrl_q: 2339 case llvm::Intrinsic::x86_sse2_psra_w: 2340 case llvm::Intrinsic::x86_sse2_psra_d: 2341 case llvm::Intrinsic::x86_sse2_psrli_w: 2342 case llvm::Intrinsic::x86_sse2_psrli_d: 2343 case llvm::Intrinsic::x86_sse2_psrli_q: 2344 case llvm::Intrinsic::x86_sse2_psrai_w: 2345 case llvm::Intrinsic::x86_sse2_psrai_d: 2346 case llvm::Intrinsic::x86_mmx_psll_w: 2347 case llvm::Intrinsic::x86_mmx_psll_d: 2348 case llvm::Intrinsic::x86_mmx_psll_q: 2349 case llvm::Intrinsic::x86_mmx_pslli_w: 2350 case llvm::Intrinsic::x86_mmx_pslli_d: 2351 case llvm::Intrinsic::x86_mmx_pslli_q: 2352 case llvm::Intrinsic::x86_mmx_psrl_w: 2353 case llvm::Intrinsic::x86_mmx_psrl_d: 2354 case llvm::Intrinsic::x86_mmx_psrl_q: 2355 case llvm::Intrinsic::x86_mmx_psra_w: 2356 case llvm::Intrinsic::x86_mmx_psra_d: 2357 case llvm::Intrinsic::x86_mmx_psrli_w: 2358 case llvm::Intrinsic::x86_mmx_psrli_d: 2359 case llvm::Intrinsic::x86_mmx_psrli_q: 2360 case llvm::Intrinsic::x86_mmx_psrai_w: 2361 case llvm::Intrinsic::x86_mmx_psrai_d: 2362 handleVectorShiftIntrinsic(I, /* Variable */ false); 2363 break; 2364 case llvm::Intrinsic::x86_avx2_psllv_d: 2365 case llvm::Intrinsic::x86_avx2_psllv_d_256: 2366 case llvm::Intrinsic::x86_avx2_psllv_q: 2367 case llvm::Intrinsic::x86_avx2_psllv_q_256: 2368 case llvm::Intrinsic::x86_avx2_psrlv_d: 2369 case llvm::Intrinsic::x86_avx2_psrlv_d_256: 2370 case llvm::Intrinsic::x86_avx2_psrlv_q: 2371 case llvm::Intrinsic::x86_avx2_psrlv_q_256: 2372 case llvm::Intrinsic::x86_avx2_psrav_d: 2373 case llvm::Intrinsic::x86_avx2_psrav_d_256: 2374 handleVectorShiftIntrinsic(I, /* Variable */ true); 2375 break; 2376 2377 case llvm::Intrinsic::x86_sse2_packsswb_128: 2378 case llvm::Intrinsic::x86_sse2_packssdw_128: 2379 case llvm::Intrinsic::x86_sse2_packuswb_128: 2380 case llvm::Intrinsic::x86_sse41_packusdw: 2381 case llvm::Intrinsic::x86_avx2_packsswb: 2382 case llvm::Intrinsic::x86_avx2_packssdw: 2383 case llvm::Intrinsic::x86_avx2_packuswb: 2384 case llvm::Intrinsic::x86_avx2_packusdw: 2385 handleVectorPackIntrinsic(I); 2386 break; 2387 2388 case llvm::Intrinsic::x86_mmx_packsswb: 2389 case llvm::Intrinsic::x86_mmx_packuswb: 2390 handleVectorPackIntrinsic(I, 16); 2391 break; 2392 2393 case llvm::Intrinsic::x86_mmx_packssdw: 2394 handleVectorPackIntrinsic(I, 32); 2395 break; 2396 2397 case llvm::Intrinsic::x86_mmx_psad_bw: 2398 case llvm::Intrinsic::x86_sse2_psad_bw: 2399 case llvm::Intrinsic::x86_avx2_psad_bw: 2400 handleVectorSadIntrinsic(I); 2401 break; 2402 2403 case llvm::Intrinsic::x86_sse2_pmadd_wd: 2404 case llvm::Intrinsic::x86_avx2_pmadd_wd: 2405 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128: 2406 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw: 2407 handleVectorPmaddIntrinsic(I); 2408 break; 2409 2410 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw: 2411 handleVectorPmaddIntrinsic(I, 8); 2412 break; 2413 2414 case llvm::Intrinsic::x86_mmx_pmadd_wd: 2415 handleVectorPmaddIntrinsic(I, 16); 2416 break; 2417 2418 default: 2419 if (!handleUnknownIntrinsic(I)) 2420 visitInstruction(I); 2421 break; 2422 } 2423 } 2424 2425 void visitCallSite(CallSite CS) { 2426 Instruction &I = *CS.getInstruction(); 2427 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 2428 if (CS.isCall()) { 2429 CallInst *Call = cast<CallInst>(&I); 2430 2431 // For inline asm, do the usual thing: check argument shadow and mark all 2432 // outputs as clean. Note that any side effects of the inline asm that are 2433 // not immediately visible in its constraints are not handled. 2434 if (Call->isInlineAsm()) { 2435 visitInstruction(I); 2436 return; 2437 } 2438 2439 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 2440 2441 // We are going to insert code that relies on the fact that the callee 2442 // will become a non-readonly function after it is instrumented by us. To 2443 // prevent this code from being optimized out, mark that function 2444 // non-readonly in advance. 2445 if (Function *Func = Call->getCalledFunction()) { 2446 // Clear out readonly/readnone attributes. 2447 AttrBuilder B; 2448 B.addAttribute(Attribute::ReadOnly) 2449 .addAttribute(Attribute::ReadNone); 2450 Func->removeAttributes(AttributeSet::FunctionIndex, 2451 AttributeSet::get(Func->getContext(), 2452 AttributeSet::FunctionIndex, 2453 B)); 2454 } 2455 } 2456 IRBuilder<> IRB(&I); 2457 2458 unsigned ArgOffset = 0; 2459 DEBUG(dbgs() << " CallSite: " << I << "\n"); 2460 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2461 ArgIt != End; ++ArgIt) { 2462 Value *A = *ArgIt; 2463 unsigned i = ArgIt - CS.arg_begin(); 2464 if (!A->getType()->isSized()) { 2465 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 2466 continue; 2467 } 2468 unsigned Size = 0; 2469 Value *Store = nullptr; 2470 // Compute the Shadow for arg even if it is ByVal, because 2471 // in that case getShadow() will copy the actual arg shadow to 2472 // __msan_param_tls. 2473 Value *ArgShadow = getShadow(A); 2474 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 2475 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 2476 " Shadow: " << *ArgShadow << "\n"); 2477 bool ArgIsInitialized = false; 2478 const DataLayout &DL = F.getParent()->getDataLayout(); 2479 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 2480 assert(A->getType()->isPointerTy() && 2481 "ByVal argument is not a pointer!"); 2482 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType()); 2483 if (ArgOffset + Size > kParamTLSSize) break; 2484 unsigned ParamAlignment = CS.getParamAlignment(i + 1); 2485 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); 2486 Store = IRB.CreateMemCpy(ArgShadowBase, 2487 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 2488 Size, Alignment); 2489 } else { 2490 Size = DL.getTypeAllocSize(A->getType()); 2491 if (ArgOffset + Size > kParamTLSSize) break; 2492 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 2493 kShadowTLSAlignment); 2494 Constant *Cst = dyn_cast<Constant>(ArgShadow); 2495 if (Cst && Cst->isNullValue()) ArgIsInitialized = true; 2496 } 2497 if (MS.TrackOrigins && !ArgIsInitialized) 2498 IRB.CreateStore(getOrigin(A), 2499 getOriginPtrForArgument(A, IRB, ArgOffset)); 2500 (void)Store; 2501 assert(Size != 0 && Store != nullptr); 2502 DEBUG(dbgs() << " Param:" << *Store << "\n"); 2503 ArgOffset += alignTo(Size, 8); 2504 } 2505 DEBUG(dbgs() << " done with call args\n"); 2506 2507 FunctionType *FT = 2508 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); 2509 if (FT->isVarArg()) { 2510 VAHelper->visitCallSite(CS, IRB); 2511 } 2512 2513 // Now, get the shadow for the RetVal. 2514 if (!I.getType()->isSized()) return; 2515 // Don't emit the epilogue for musttail call returns. 2516 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return; 2517 IRBuilder<> IRBBefore(&I); 2518 // Until we have full dynamic coverage, make sure the retval shadow is 0. 2519 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 2520 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 2521 BasicBlock::iterator NextInsn; 2522 if (CS.isCall()) { 2523 NextInsn = ++I.getIterator(); 2524 assert(NextInsn != I.getParent()->end()); 2525 } else { 2526 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 2527 if (!NormalDest->getSinglePredecessor()) { 2528 // FIXME: this case is tricky, so we are just conservative here. 2529 // Perhaps we need to split the edge between this BB and NormalDest, 2530 // but a naive attempt to use SplitEdge leads to a crash. 2531 setShadow(&I, getCleanShadow(&I)); 2532 setOrigin(&I, getCleanOrigin()); 2533 return; 2534 } 2535 NextInsn = NormalDest->getFirstInsertionPt(); 2536 assert(NextInsn != NormalDest->end() && 2537 "Could not find insertion point for retval shadow load"); 2538 } 2539 IRBuilder<> IRBAfter(&*NextInsn); 2540 Value *RetvalShadow = 2541 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 2542 kShadowTLSAlignment, "_msret"); 2543 setShadow(&I, RetvalShadow); 2544 if (MS.TrackOrigins) 2545 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 2546 } 2547 2548 bool isAMustTailRetVal(Value *RetVal) { 2549 if (auto *I = dyn_cast<BitCastInst>(RetVal)) { 2550 RetVal = I->getOperand(0); 2551 } 2552 if (auto *I = dyn_cast<CallInst>(RetVal)) { 2553 return I->isMustTailCall(); 2554 } 2555 return false; 2556 } 2557 2558 void visitReturnInst(ReturnInst &I) { 2559 IRBuilder<> IRB(&I); 2560 Value *RetVal = I.getReturnValue(); 2561 if (!RetVal) return; 2562 // Don't emit the epilogue for musttail call returns. 2563 if (isAMustTailRetVal(RetVal)) return; 2564 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 2565 if (CheckReturnValue) { 2566 insertShadowCheck(RetVal, &I); 2567 Value *Shadow = getCleanShadow(RetVal); 2568 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2569 } else { 2570 Value *Shadow = getShadow(RetVal); 2571 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2572 // FIXME: make it conditional if ClStoreCleanOrigin==0 2573 if (MS.TrackOrigins) 2574 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 2575 } 2576 } 2577 2578 void visitPHINode(PHINode &I) { 2579 IRBuilder<> IRB(&I); 2580 if (!PropagateShadow) { 2581 setShadow(&I, getCleanShadow(&I)); 2582 setOrigin(&I, getCleanOrigin()); 2583 return; 2584 } 2585 2586 ShadowPHINodes.push_back(&I); 2587 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 2588 "_msphi_s")); 2589 if (MS.TrackOrigins) 2590 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 2591 "_msphi_o")); 2592 } 2593 2594 void visitAllocaInst(AllocaInst &I) { 2595 setShadow(&I, getCleanShadow(&I)); 2596 setOrigin(&I, getCleanOrigin()); 2597 IRBuilder<> IRB(I.getNextNode()); 2598 const DataLayout &DL = F.getParent()->getDataLayout(); 2599 uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType()); 2600 if (PoisonStack && ClPoisonStackWithCall) { 2601 IRB.CreateCall(MS.MsanPoisonStackFn, 2602 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2603 ConstantInt::get(MS.IntptrTy, Size)}); 2604 } else { 2605 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 2606 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); 2607 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); 2608 } 2609 2610 if (PoisonStack && MS.TrackOrigins) { 2611 SmallString<2048> StackDescriptionStorage; 2612 raw_svector_ostream StackDescription(StackDescriptionStorage); 2613 // We create a string with a description of the stack allocation and 2614 // pass it into __msan_set_alloca_origin. 2615 // It will be printed by the run-time if stack-originated UMR is found. 2616 // The first 4 bytes of the string are set to '----' and will be replaced 2617 // by __msan_va_arg_overflow_size_tls at the first call. 2618 StackDescription << "----" << I.getName() << "@" << F.getName(); 2619 Value *Descr = 2620 createPrivateNonConstGlobalForString(*F.getParent(), 2621 StackDescription.str()); 2622 2623 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn, 2624 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2625 ConstantInt::get(MS.IntptrTy, Size), 2626 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), 2627 IRB.CreatePointerCast(&F, MS.IntptrTy)}); 2628 } 2629 } 2630 2631 void visitSelectInst(SelectInst& I) { 2632 IRBuilder<> IRB(&I); 2633 // a = select b, c, d 2634 Value *B = I.getCondition(); 2635 Value *C = I.getTrueValue(); 2636 Value *D = I.getFalseValue(); 2637 Value *Sb = getShadow(B); 2638 Value *Sc = getShadow(C); 2639 Value *Sd = getShadow(D); 2640 2641 // Result shadow if condition shadow is 0. 2642 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); 2643 Value *Sa1; 2644 if (I.getType()->isAggregateType()) { 2645 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do 2646 // an extra "select". This results in much more compact IR. 2647 // Sa = select Sb, poisoned, (select b, Sc, Sd) 2648 Sa1 = getPoisonedShadow(getShadowTy(I.getType())); 2649 } else { 2650 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] 2651 // If Sb (condition is poisoned), look for bits in c and d that are equal 2652 // and both unpoisoned. 2653 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. 2654 2655 // Cast arguments to shadow-compatible type. 2656 C = CreateAppToShadowCast(IRB, C); 2657 D = CreateAppToShadowCast(IRB, D); 2658 2659 // Result shadow if condition shadow is 1. 2660 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); 2661 } 2662 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); 2663 setShadow(&I, Sa); 2664 if (MS.TrackOrigins) { 2665 // Origins are always i32, so any vector conditions must be flattened. 2666 // FIXME: consider tracking vector origins for app vectors? 2667 if (B->getType()->isVectorTy()) { 2668 Type *FlatTy = getShadowTyNoVec(B->getType()); 2669 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), 2670 ConstantInt::getNullValue(FlatTy)); 2671 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), 2672 ConstantInt::getNullValue(FlatTy)); 2673 } 2674 // a = select b, c, d 2675 // Oa = Sb ? Ob : (b ? Oc : Od) 2676 setOrigin( 2677 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()), 2678 IRB.CreateSelect(B, getOrigin(I.getTrueValue()), 2679 getOrigin(I.getFalseValue())))); 2680 } 2681 } 2682 2683 void visitLandingPadInst(LandingPadInst &I) { 2684 // Do nothing. 2685 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 2686 setShadow(&I, getCleanShadow(&I)); 2687 setOrigin(&I, getCleanOrigin()); 2688 } 2689 2690 void visitCatchSwitchInst(CatchSwitchInst &I) { 2691 setShadow(&I, getCleanShadow(&I)); 2692 setOrigin(&I, getCleanOrigin()); 2693 } 2694 2695 void visitFuncletPadInst(FuncletPadInst &I) { 2696 setShadow(&I, getCleanShadow(&I)); 2697 setOrigin(&I, getCleanOrigin()); 2698 } 2699 2700 void visitGetElementPtrInst(GetElementPtrInst &I) { 2701 handleShadowOr(I); 2702 } 2703 2704 void visitExtractValueInst(ExtractValueInst &I) { 2705 IRBuilder<> IRB(&I); 2706 Value *Agg = I.getAggregateOperand(); 2707 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 2708 Value *AggShadow = getShadow(Agg); 2709 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2710 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2711 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 2712 setShadow(&I, ResShadow); 2713 setOriginForNaryOp(I); 2714 } 2715 2716 void visitInsertValueInst(InsertValueInst &I) { 2717 IRBuilder<> IRB(&I); 2718 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 2719 Value *AggShadow = getShadow(I.getAggregateOperand()); 2720 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 2721 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2722 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 2723 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2724 DEBUG(dbgs() << " Res: " << *Res << "\n"); 2725 setShadow(&I, Res); 2726 setOriginForNaryOp(I); 2727 } 2728 2729 void dumpInst(Instruction &I) { 2730 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2731 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 2732 } else { 2733 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 2734 } 2735 errs() << "QQQ " << I << "\n"; 2736 } 2737 2738 void visitResumeInst(ResumeInst &I) { 2739 DEBUG(dbgs() << "Resume: " << I << "\n"); 2740 // Nothing to do here. 2741 } 2742 2743 void visitCleanupReturnInst(CleanupReturnInst &CRI) { 2744 DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); 2745 // Nothing to do here. 2746 } 2747 2748 void visitCatchReturnInst(CatchReturnInst &CRI) { 2749 DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); 2750 // Nothing to do here. 2751 } 2752 2753 void visitInstruction(Instruction &I) { 2754 // Everything else: stop propagating and check for poisoned shadow. 2755 if (ClDumpStrictInstructions) 2756 dumpInst(I); 2757 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 2758 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 2759 insertShadowCheck(I.getOperand(i), &I); 2760 setShadow(&I, getCleanShadow(&I)); 2761 setOrigin(&I, getCleanOrigin()); 2762 } 2763 }; 2764 2765 /// \brief AMD64-specific implementation of VarArgHelper. 2766 struct VarArgAMD64Helper : public VarArgHelper { 2767 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 2768 // See a comment in visitCallSite for more details. 2769 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 2770 static const unsigned AMD64FpEndOffset = 176; 2771 2772 Function &F; 2773 MemorySanitizer &MS; 2774 MemorySanitizerVisitor &MSV; 2775 Value *VAArgTLSCopy; 2776 Value *VAArgOverflowSize; 2777 2778 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2779 2780 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 2781 MemorySanitizerVisitor &MSV) 2782 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2783 VAArgOverflowSize(nullptr) {} 2784 2785 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 2786 2787 ArgKind classifyArgument(Value* arg) { 2788 // A very rough approximation of X86_64 argument classification rules. 2789 Type *T = arg->getType(); 2790 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 2791 return AK_FloatingPoint; 2792 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 2793 return AK_GeneralPurpose; 2794 if (T->isPointerTy()) 2795 return AK_GeneralPurpose; 2796 return AK_Memory; 2797 } 2798 2799 // For VarArg functions, store the argument shadow in an ABI-specific format 2800 // that corresponds to va_list layout. 2801 // We do this because Clang lowers va_arg in the frontend, and this pass 2802 // only sees the low level code that deals with va_list internals. 2803 // A much easier alternative (provided that Clang emits va_arg instructions) 2804 // would have been to associate each live instance of va_list with a copy of 2805 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 2806 // order. 2807 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2808 unsigned GpOffset = 0; 2809 unsigned FpOffset = AMD64GpEndOffset; 2810 unsigned OverflowOffset = AMD64FpEndOffset; 2811 const DataLayout &DL = F.getParent()->getDataLayout(); 2812 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2813 ArgIt != End; ++ArgIt) { 2814 Value *A = *ArgIt; 2815 unsigned ArgNo = CS.getArgumentNo(ArgIt); 2816 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); 2817 if (IsByVal) { 2818 // ByVal arguments always go to the overflow area. 2819 assert(A->getType()->isPointerTy()); 2820 Type *RealTy = A->getType()->getPointerElementType(); 2821 uint64_t ArgSize = DL.getTypeAllocSize(RealTy); 2822 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); 2823 OverflowOffset += alignTo(ArgSize, 8); 2824 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), 2825 ArgSize, kShadowTLSAlignment); 2826 } else { 2827 ArgKind AK = classifyArgument(A); 2828 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 2829 AK = AK_Memory; 2830 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 2831 AK = AK_Memory; 2832 Value *Base; 2833 switch (AK) { 2834 case AK_GeneralPurpose: 2835 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); 2836 GpOffset += 8; 2837 break; 2838 case AK_FloatingPoint: 2839 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); 2840 FpOffset += 16; 2841 break; 2842 case AK_Memory: 2843 uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); 2844 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 2845 OverflowOffset += alignTo(ArgSize, 8); 2846 } 2847 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2848 } 2849 } 2850 Constant *OverflowSize = 2851 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 2852 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 2853 } 2854 2855 /// \brief Compute the shadow address for a given va_arg. 2856 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2857 int ArgOffset) { 2858 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2859 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2860 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2861 "_msarg"); 2862 } 2863 2864 void visitVAStartInst(VAStartInst &I) override { 2865 if (F.getCallingConv() == CallingConv::X86_64_Win64) 2866 return; 2867 IRBuilder<> IRB(&I); 2868 VAStartInstrumentationList.push_back(&I); 2869 Value *VAListTag = I.getArgOperand(0); 2870 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2871 2872 // Unpoison the whole __va_list_tag. 2873 // FIXME: magic ABI constants. 2874 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2875 /* size */24, /* alignment */8, false); 2876 } 2877 2878 void visitVACopyInst(VACopyInst &I) override { 2879 if (F.getCallingConv() == CallingConv::X86_64_Win64) 2880 return; 2881 IRBuilder<> IRB(&I); 2882 Value *VAListTag = I.getArgOperand(0); 2883 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2884 2885 // Unpoison the whole __va_list_tag. 2886 // FIXME: magic ABI constants. 2887 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2888 /* size */24, /* alignment */8, false); 2889 } 2890 2891 void finalizeInstrumentation() override { 2892 assert(!VAArgOverflowSize && !VAArgTLSCopy && 2893 "finalizeInstrumentation called twice"); 2894 if (!VAStartInstrumentationList.empty()) { 2895 // If there is a va_start in this function, make a backup copy of 2896 // va_arg_tls somewhere in the function entry block. 2897 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2898 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2899 Value *CopySize = 2900 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 2901 VAArgOverflowSize); 2902 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2903 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2904 } 2905 2906 // Instrument va_start. 2907 // Copy va_list shadow from the backup copy of the TLS contents. 2908 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2909 CallInst *OrigInst = VAStartInstrumentationList[i]; 2910 IRBuilder<> IRB(OrigInst->getNextNode()); 2911 Value *VAListTag = OrigInst->getArgOperand(0); 2912 2913 Value *RegSaveAreaPtrPtr = 2914 IRB.CreateIntToPtr( 2915 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2916 ConstantInt::get(MS.IntptrTy, 16)), 2917 Type::getInt64PtrTy(*MS.C)); 2918 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2919 Value *RegSaveAreaShadowPtr = 2920 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2921 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 2922 AMD64FpEndOffset, 16); 2923 2924 Value *OverflowArgAreaPtrPtr = 2925 IRB.CreateIntToPtr( 2926 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2927 ConstantInt::get(MS.IntptrTy, 8)), 2928 Type::getInt64PtrTy(*MS.C)); 2929 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 2930 Value *OverflowArgAreaShadowPtr = 2931 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 2932 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, 2933 AMD64FpEndOffset); 2934 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 2935 } 2936 } 2937 }; 2938 2939 /// \brief MIPS64-specific implementation of VarArgHelper. 2940 struct VarArgMIPS64Helper : public VarArgHelper { 2941 Function &F; 2942 MemorySanitizer &MS; 2943 MemorySanitizerVisitor &MSV; 2944 Value *VAArgTLSCopy; 2945 Value *VAArgSize; 2946 2947 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2948 2949 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS, 2950 MemorySanitizerVisitor &MSV) 2951 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2952 VAArgSize(nullptr) {} 2953 2954 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2955 unsigned VAArgOffset = 0; 2956 const DataLayout &DL = F.getParent()->getDataLayout(); 2957 for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end(); 2958 ArgIt != End; ++ArgIt) { 2959 Value *A = *ArgIt; 2960 Value *Base; 2961 uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); 2962 #if defined(__MIPSEB__) || defined(MIPSEB) 2963 // Adjusting the shadow for argument with size < 8 to match the placement 2964 // of bits in big endian system 2965 if (ArgSize < 8) 2966 VAArgOffset += (8 - ArgSize); 2967 #endif 2968 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset); 2969 VAArgOffset += ArgSize; 2970 VAArgOffset = alignTo(VAArgOffset, 8); 2971 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2972 } 2973 2974 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); 2975 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of 2976 // a new class member i.e. it is the total size of all VarArgs. 2977 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); 2978 } 2979 2980 /// \brief Compute the shadow address for a given va_arg. 2981 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2982 int ArgOffset) { 2983 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2984 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2985 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2986 "_msarg"); 2987 } 2988 2989 void visitVAStartInst(VAStartInst &I) override { 2990 IRBuilder<> IRB(&I); 2991 VAStartInstrumentationList.push_back(&I); 2992 Value *VAListTag = I.getArgOperand(0); 2993 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2994 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2995 /* size */8, /* alignment */8, false); 2996 } 2997 2998 void visitVACopyInst(VACopyInst &I) override { 2999 IRBuilder<> IRB(&I); 3000 Value *VAListTag = I.getArgOperand(0); 3001 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 3002 // Unpoison the whole __va_list_tag. 3003 // FIXME: magic ABI constants. 3004 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 3005 /* size */8, /* alignment */8, false); 3006 } 3007 3008 void finalizeInstrumentation() override { 3009 assert(!VAArgSize && !VAArgTLSCopy && 3010 "finalizeInstrumentation called twice"); 3011 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 3012 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 3013 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), 3014 VAArgSize); 3015 3016 if (!VAStartInstrumentationList.empty()) { 3017 // If there is a va_start in this function, make a backup copy of 3018 // va_arg_tls somewhere in the function entry block. 3019 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 3020 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 3021 } 3022 3023 // Instrument va_start. 3024 // Copy va_list shadow from the backup copy of the TLS contents. 3025 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 3026 CallInst *OrigInst = VAStartInstrumentationList[i]; 3027 IRBuilder<> IRB(OrigInst->getNextNode()); 3028 Value *VAListTag = OrigInst->getArgOperand(0); 3029 Value *RegSaveAreaPtrPtr = 3030 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 3031 Type::getInt64PtrTy(*MS.C)); 3032 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 3033 Value *RegSaveAreaShadowPtr = 3034 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 3035 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8); 3036 } 3037 } 3038 }; 3039 3040 3041 /// \brief AArch64-specific implementation of VarArgHelper. 3042 struct VarArgAArch64Helper : public VarArgHelper { 3043 static const unsigned kAArch64GrArgSize = 56; 3044 static const unsigned kAArch64VrArgSize = 128; 3045 3046 static const unsigned AArch64GrBegOffset = 0; 3047 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize; 3048 // Make VR space aligned to 16 bytes. 3049 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset + 8; 3050 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset 3051 + kAArch64VrArgSize; 3052 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset; 3053 3054 Function &F; 3055 MemorySanitizer &MS; 3056 MemorySanitizerVisitor &MSV; 3057 Value *VAArgTLSCopy; 3058 Value *VAArgOverflowSize; 3059 3060 SmallVector<CallInst*, 16> VAStartInstrumentationList; 3061 3062 VarArgAArch64Helper(Function &F, MemorySanitizer &MS, 3063 MemorySanitizerVisitor &MSV) 3064 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 3065 VAArgOverflowSize(nullptr) {} 3066 3067 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 3068 3069 ArgKind classifyArgument(Value* arg) { 3070 Type *T = arg->getType(); 3071 if (T->isFPOrFPVectorTy()) 3072 return AK_FloatingPoint; 3073 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 3074 || (T->isPointerTy())) 3075 return AK_GeneralPurpose; 3076 return AK_Memory; 3077 } 3078 3079 // The instrumentation stores the argument shadow in a non ABI-specific 3080 // format because it does not know which argument is named (since Clang, 3081 // like x86_64 case, lowers the va_args in the frontend and this pass only 3082 // sees the low level code that deals with va_list internals). 3083 // The first seven GR registers are saved in the first 56 bytes of the 3084 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then 3085 // the remaining arguments. 3086 // Using constant offset within the va_arg TLS array allows fast copy 3087 // in the finalize instrumentation. 3088 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 3089 unsigned GrOffset = AArch64GrBegOffset; 3090 unsigned VrOffset = AArch64VrBegOffset; 3091 unsigned OverflowOffset = AArch64VAEndOffset; 3092 3093 const DataLayout &DL = F.getParent()->getDataLayout(); 3094 for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end(); 3095 ArgIt != End; ++ArgIt) { 3096 Value *A = *ArgIt; 3097 ArgKind AK = classifyArgument(A); 3098 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset) 3099 AK = AK_Memory; 3100 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset) 3101 AK = AK_Memory; 3102 Value *Base; 3103 switch (AK) { 3104 case AK_GeneralPurpose: 3105 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset); 3106 GrOffset += 8; 3107 break; 3108 case AK_FloatingPoint: 3109 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset); 3110 VrOffset += 16; 3111 break; 3112 case AK_Memory: 3113 uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); 3114 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 3115 OverflowOffset += alignTo(ArgSize, 8); 3116 break; 3117 } 3118 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 3119 } 3120 Constant *OverflowSize = 3121 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset); 3122 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 3123 } 3124 3125 /// Compute the shadow address for a given va_arg. 3126 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 3127 int ArgOffset) { 3128 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 3129 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 3130 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 3131 "_msarg"); 3132 } 3133 3134 void visitVAStartInst(VAStartInst &I) override { 3135 IRBuilder<> IRB(&I); 3136 VAStartInstrumentationList.push_back(&I); 3137 Value *VAListTag = I.getArgOperand(0); 3138 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 3139 // Unpoison the whole __va_list_tag. 3140 // FIXME: magic ABI constants (size of va_list). 3141 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 3142 /* size */32, /* alignment */8, false); 3143 } 3144 3145 void visitVACopyInst(VACopyInst &I) override { 3146 IRBuilder<> IRB(&I); 3147 Value *VAListTag = I.getArgOperand(0); 3148 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 3149 // Unpoison the whole __va_list_tag. 3150 // FIXME: magic ABI constants (size of va_list). 3151 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 3152 /* size */32, /* alignment */8, false); 3153 } 3154 3155 // Retrieve a va_list field of 'void*' size. 3156 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) { 3157 Value *SaveAreaPtrPtr = 3158 IRB.CreateIntToPtr( 3159 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 3160 ConstantInt::get(MS.IntptrTy, offset)), 3161 Type::getInt64PtrTy(*MS.C)); 3162 return IRB.CreateLoad(SaveAreaPtrPtr); 3163 } 3164 3165 // Retrieve a va_list field of 'int' size. 3166 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) { 3167 Value *SaveAreaPtr = 3168 IRB.CreateIntToPtr( 3169 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 3170 ConstantInt::get(MS.IntptrTy, offset)), 3171 Type::getInt32PtrTy(*MS.C)); 3172 Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr); 3173 return IRB.CreateSExt(SaveArea32, MS.IntptrTy); 3174 } 3175 3176 void finalizeInstrumentation() override { 3177 assert(!VAArgOverflowSize && !VAArgTLSCopy && 3178 "finalizeInstrumentation called twice"); 3179 if (!VAStartInstrumentationList.empty()) { 3180 // If there is a va_start in this function, make a backup copy of 3181 // va_arg_tls somewhere in the function entry block. 3182 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 3183 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 3184 Value *CopySize = 3185 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), 3186 VAArgOverflowSize); 3187 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 3188 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 3189 } 3190 3191 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize); 3192 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize); 3193 3194 // Instrument va_start, copy va_list shadow from the backup copy of 3195 // the TLS contents. 3196 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 3197 CallInst *OrigInst = VAStartInstrumentationList[i]; 3198 IRBuilder<> IRB(OrigInst->getNextNode()); 3199 3200 Value *VAListTag = OrigInst->getArgOperand(0); 3201 3202 // The variadic ABI for AArch64 creates two areas to save the incoming 3203 // argument registers (one for 64-bit general register xn-x7 and another 3204 // for 128-bit FP/SIMD vn-v7). 3205 // We need then to propagate the shadow arguments on both regions 3206 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'. 3207 // The remaning arguments are saved on shadow for 'va::stack'. 3208 // One caveat is it requires only to propagate the non-named arguments, 3209 // however on the call site instrumentation 'all' the arguments are 3210 // saved. So to copy the shadow values from the va_arg TLS array 3211 // we need to adjust the offset for both GR and VR fields based on 3212 // the __{gr,vr}_offs value (since they are stores based on incoming 3213 // named arguments). 3214 3215 // Read the stack pointer from the va_list. 3216 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0); 3217 3218 // Read both the __gr_top and __gr_off and add them up. 3219 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8); 3220 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24); 3221 3222 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea); 3223 3224 // Read both the __vr_top and __vr_off and add them up. 3225 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16); 3226 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28); 3227 3228 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea); 3229 3230 // It does not know how many named arguments is being used and, on the 3231 // callsite all the arguments were saved. Since __gr_off is defined as 3232 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic 3233 // argument by ignoring the bytes of shadow from named arguments. 3234 Value *GrRegSaveAreaShadowPtrOff = 3235 IRB.CreateAdd(GrArgSize, GrOffSaveArea); 3236 3237 Value *GrRegSaveAreaShadowPtr = 3238 MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.getInt8Ty(), IRB); 3239 3240 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, 3241 GrRegSaveAreaShadowPtrOff); 3242 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff); 3243 3244 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8); 3245 3246 // Again, but for FP/SIMD values. 3247 Value *VrRegSaveAreaShadowPtrOff = 3248 IRB.CreateAdd(VrArgSize, VrOffSaveArea); 3249 3250 Value *VrRegSaveAreaShadowPtr = 3251 MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.getInt8Ty(), IRB); 3252 3253 Value *VrSrcPtr = IRB.CreateInBoundsGEP( 3254 IRB.getInt8Ty(), 3255 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, 3256 IRB.getInt32(AArch64VrBegOffset)), 3257 VrRegSaveAreaShadowPtrOff); 3258 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff); 3259 3260 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8); 3261 3262 // And finally for remaining arguments. 3263 Value *StackSaveAreaShadowPtr = 3264 MSV.getShadowPtr(StackSaveAreaPtr, IRB.getInt8Ty(), IRB); 3265 3266 Value *StackSrcPtr = 3267 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, 3268 IRB.getInt32(AArch64VAEndOffset)); 3269 3270 IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr, 3271 VAArgOverflowSize, 16); 3272 } 3273 } 3274 }; 3275 3276 /// \brief A no-op implementation of VarArgHelper. 3277 struct VarArgNoOpHelper : public VarArgHelper { 3278 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 3279 MemorySanitizerVisitor &MSV) {} 3280 3281 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} 3282 3283 void visitVAStartInst(VAStartInst &I) override {} 3284 3285 void visitVACopyInst(VACopyInst &I) override {} 3286 3287 void finalizeInstrumentation() override {} 3288 }; 3289 3290 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 3291 MemorySanitizerVisitor &Visitor) { 3292 // VarArg handling is only implemented on AMD64. False positives are possible 3293 // on other platforms. 3294 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 3295 if (TargetTriple.getArch() == llvm::Triple::x86_64) 3296 return new VarArgAMD64Helper(Func, Msan, Visitor); 3297 else if (TargetTriple.getArch() == llvm::Triple::mips64 || 3298 TargetTriple.getArch() == llvm::Triple::mips64el) 3299 return new VarArgMIPS64Helper(Func, Msan, Visitor); 3300 else if (TargetTriple.getArch() == llvm::Triple::aarch64) 3301 return new VarArgAArch64Helper(Func, Msan, Visitor); 3302 else 3303 return new VarArgNoOpHelper(Func, Msan, Visitor); 3304 } 3305 3306 } // anonymous namespace 3307 3308 bool MemorySanitizer::runOnFunction(Function &F) { 3309 if (&F == MsanCtorFunction) 3310 return false; 3311 MemorySanitizerVisitor Visitor(F, *this); 3312 3313 // Clear out readonly/readnone attributes. 3314 AttrBuilder B; 3315 B.addAttribute(Attribute::ReadOnly) 3316 .addAttribute(Attribute::ReadNone); 3317 F.removeAttributes(AttributeSet::FunctionIndex, 3318 AttributeSet::get(F.getContext(), 3319 AttributeSet::FunctionIndex, B)); 3320 3321 return Visitor.runOnFunction(); 3322 } 3323