1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 
92 //===----------------------------------------------------------------------===//
93 
94 #include "llvm/ADT/DepthFirstIterator.h"
95 #include "llvm/ADT/SmallString.h"
96 #include "llvm/ADT/SmallVector.h"
97 #include "llvm/ADT/StringExtras.h"
98 #include "llvm/ADT/Triple.h"
99 #include "llvm/IR/DataLayout.h"
100 #include "llvm/IR/Function.h"
101 #include "llvm/IR/IRBuilder.h"
102 #include "llvm/IR/InlineAsm.h"
103 #include "llvm/IR/InstVisitor.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/LLVMContext.h"
106 #include "llvm/IR/MDBuilder.h"
107 #include "llvm/IR/Module.h"
108 #include "llvm/IR/Type.h"
109 #include "llvm/IR/ValueMap.h"
110 #include "llvm/Support/CommandLine.h"
111 #include "llvm/Support/Debug.h"
112 #include "llvm/Support/raw_ostream.h"
113 #include "llvm/Transforms/Instrumentation.h"
114 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include "llvm/Transforms/Utils/ModuleUtils.h"
117 
118 using namespace llvm;
119 
120 #define DEBUG_TYPE "msan"
121 
122 static const unsigned kOriginSize = 4;
123 static const unsigned kMinOriginAlignment = 4;
124 static const unsigned kShadowTLSAlignment = 8;
125 
126 // These constants must be kept in sync with the ones in msan.h.
127 static const unsigned kParamTLSSize = 800;
128 static const unsigned kRetvalTLSSize = 800;
129 
130 // Accesses sizes are powers of two: 1, 2, 4, 8.
131 static const size_t kNumberOfAccessSizes = 4;
132 
133 /// \brief Track origins of uninitialized values.
134 ///
135 /// Adds a section to MemorySanitizer report that points to the allocation
136 /// (stack or heap) the uninitialized bits came from originally.
137 static cl::opt<int> ClTrackOrigins("msan-track-origins",
138        cl::desc("Track origins (allocation sites) of poisoned memory"),
139        cl::Hidden, cl::init(0));
140 static cl::opt<bool> ClKeepGoing("msan-keep-going",
141        cl::desc("keep going after reporting a UMR"),
142        cl::Hidden, cl::init(false));
143 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
144        cl::desc("poison uninitialized stack variables"),
145        cl::Hidden, cl::init(true));
146 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
147        cl::desc("poison uninitialized stack variables with a call"),
148        cl::Hidden, cl::init(false));
149 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
150        cl::desc("poison uninitialized stack variables with the given pattern"),
151        cl::Hidden, cl::init(0xff));
152 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
153        cl::desc("poison undef temps"),
154        cl::Hidden, cl::init(true));
155 
156 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
157        cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
158        cl::Hidden, cl::init(true));
159 
160 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
161        cl::desc("exact handling of relational integer ICmp"),
162        cl::Hidden, cl::init(false));
163 
164 // This flag controls whether we check the shadow of the address
165 // operand of load or store. Such bugs are very rare, since load from
166 // a garbage address typically results in SEGV, but still happen
167 // (e.g. only lower bits of address are garbage, or the access happens
168 // early at program startup where malloc-ed memory is more likely to
169 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
170 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
171        cl::desc("report accesses through a pointer which has poisoned shadow"),
172        cl::Hidden, cl::init(true));
173 
174 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
175        cl::desc("print out instructions with default strict semantics"),
176        cl::Hidden, cl::init(false));
177 
178 static cl::opt<int> ClInstrumentationWithCallThreshold(
179     "msan-instrumentation-with-call-threshold",
180     cl::desc(
181         "If the function being instrumented requires more than "
182         "this number of checks and origin stores, use callbacks instead of "
183         "inline checks (-1 means never use callbacks)."),
184     cl::Hidden, cl::init(3500));
185 
186 // This is an experiment to enable handling of cases where shadow is a non-zero
187 // compile-time constant. For some unexplainable reason they were silently
188 // ignored in the instrumentation.
189 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
190        cl::desc("Insert checks for constant shadow values"),
191        cl::Hidden, cl::init(false));
192 
193 // This is off by default because of a bug in gold:
194 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
195 static cl::opt<bool> ClWithComdat("msan-with-comdat",
196        cl::desc("Place MSan constructors in comdat sections"),
197        cl::Hidden, cl::init(false));
198 
199 static const char *const kMsanModuleCtorName = "msan.module_ctor";
200 static const char *const kMsanInitName = "__msan_init";
201 
202 namespace {
203 
204 // Memory map parameters used in application-to-shadow address calculation.
205 // Offset = (Addr & ~AndMask) ^ XorMask
206 // Shadow = ShadowBase + Offset
207 // Origin = OriginBase + Offset
208 struct MemoryMapParams {
209   uint64_t AndMask;
210   uint64_t XorMask;
211   uint64_t ShadowBase;
212   uint64_t OriginBase;
213 };
214 
215 struct PlatformMemoryMapParams {
216   const MemoryMapParams *bits32;
217   const MemoryMapParams *bits64;
218 };
219 
220 // i386 Linux
221 static const MemoryMapParams Linux_I386_MemoryMapParams = {
222   0x000080000000,  // AndMask
223   0,               // XorMask (not used)
224   0,               // ShadowBase (not used)
225   0x000040000000,  // OriginBase
226 };
227 
228 // x86_64 Linux
229 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
230 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
231   0x400000000000,  // AndMask
232   0,               // XorMask (not used)
233   0,               // ShadowBase (not used)
234   0x200000000000,  // OriginBase
235 #else
236   0,               // AndMask (not used)
237   0x500000000000,  // XorMask
238   0,               // ShadowBase (not used)
239   0x100000000000,  // OriginBase
240 #endif
241 };
242 
243 // mips64 Linux
244 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
245   0x004000000000,  // AndMask
246   0,               // XorMask (not used)
247   0,               // ShadowBase (not used)
248   0x002000000000,  // OriginBase
249 };
250 
251 // ppc64 Linux
252 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
253   0x200000000000,  // AndMask
254   0x100000000000,  // XorMask
255   0x080000000000,  // ShadowBase
256   0x1C0000000000,  // OriginBase
257 };
258 
259 // aarch64 Linux
260 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
261   0,               // AndMask (not used)
262   0x06000000000,   // XorMask
263   0,               // ShadowBase (not used)
264   0x01000000000,   // OriginBase
265 };
266 
267 // i386 FreeBSD
268 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
269   0x000180000000,  // AndMask
270   0x000040000000,  // XorMask
271   0x000020000000,  // ShadowBase
272   0x000700000000,  // OriginBase
273 };
274 
275 // x86_64 FreeBSD
276 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
277   0xc00000000000,  // AndMask
278   0x200000000000,  // XorMask
279   0x100000000000,  // ShadowBase
280   0x380000000000,  // OriginBase
281 };
282 
283 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
284   &Linux_I386_MemoryMapParams,
285   &Linux_X86_64_MemoryMapParams,
286 };
287 
288 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
289   nullptr,
290   &Linux_MIPS64_MemoryMapParams,
291 };
292 
293 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
294   nullptr,
295   &Linux_PowerPC64_MemoryMapParams,
296 };
297 
298 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
299   nullptr,
300   &Linux_AArch64_MemoryMapParams,
301 };
302 
303 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
304   &FreeBSD_I386_MemoryMapParams,
305   &FreeBSD_X86_64_MemoryMapParams,
306 };
307 
308 /// \brief An instrumentation pass implementing detection of uninitialized
309 /// reads.
310 ///
311 /// MemorySanitizer: instrument the code in module to find
312 /// uninitialized reads.
313 class MemorySanitizer : public FunctionPass {
314  public:
315   MemorySanitizer(int TrackOrigins = 0)
316       : FunctionPass(ID),
317         TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
318         WarningFn(nullptr) {}
319   const char *getPassName() const override { return "MemorySanitizer"; }
320   bool runOnFunction(Function &F) override;
321   bool doInitialization(Module &M) override;
322   static char ID;  // Pass identification, replacement for typeid.
323 
324  private:
325   void initializeCallbacks(Module &M);
326 
327   /// \brief Track origins (allocation points) of uninitialized values.
328   int TrackOrigins;
329 
330   LLVMContext *C;
331   Type *IntptrTy;
332   Type *OriginTy;
333   /// \brief Thread-local shadow storage for function parameters.
334   GlobalVariable *ParamTLS;
335   /// \brief Thread-local origin storage for function parameters.
336   GlobalVariable *ParamOriginTLS;
337   /// \brief Thread-local shadow storage for function return value.
338   GlobalVariable *RetvalTLS;
339   /// \brief Thread-local origin storage for function return value.
340   GlobalVariable *RetvalOriginTLS;
341   /// \brief Thread-local shadow storage for in-register va_arg function
342   /// parameters (x86_64-specific).
343   GlobalVariable *VAArgTLS;
344   /// \brief Thread-local shadow storage for va_arg overflow area
345   /// (x86_64-specific).
346   GlobalVariable *VAArgOverflowSizeTLS;
347   /// \brief Thread-local space used to pass origin value to the UMR reporting
348   /// function.
349   GlobalVariable *OriginTLS;
350 
351   /// \brief The run-time callback to print a warning.
352   Value *WarningFn;
353   // These arrays are indexed by log2(AccessSize).
354   Value *MaybeWarningFn[kNumberOfAccessSizes];
355   Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
356 
357   /// \brief Run-time helper that generates a new origin value for a stack
358   /// allocation.
359   Value *MsanSetAllocaOrigin4Fn;
360   /// \brief Run-time helper that poisons stack on function entry.
361   Value *MsanPoisonStackFn;
362   /// \brief Run-time helper that records a store (or any event) of an
363   /// uninitialized value and returns an updated origin id encoding this info.
364   Value *MsanChainOriginFn;
365   /// \brief MSan runtime replacements for memmove, memcpy and memset.
366   Value *MemmoveFn, *MemcpyFn, *MemsetFn;
367 
368   /// \brief Memory map parameters used in application-to-shadow calculation.
369   const MemoryMapParams *MapParams;
370 
371   MDNode *ColdCallWeights;
372   /// \brief Branch weights for origin store.
373   MDNode *OriginStoreWeights;
374   /// \brief An empty volatile inline asm that prevents callback merge.
375   InlineAsm *EmptyAsm;
376   Function *MsanCtorFunction;
377 
378   friend struct MemorySanitizerVisitor;
379   friend struct VarArgAMD64Helper;
380   friend struct VarArgMIPS64Helper;
381   friend struct VarArgAArch64Helper;
382   friend struct VarArgPowerPC64Helper;
383 };
384 } // anonymous namespace
385 
386 char MemorySanitizer::ID = 0;
387 INITIALIZE_PASS(MemorySanitizer, "msan",
388                 "MemorySanitizer: detects uninitialized reads.",
389                 false, false)
390 
391 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
392   return new MemorySanitizer(TrackOrigins);
393 }
394 
395 /// \brief Create a non-const global initialized with the given string.
396 ///
397 /// Creates a writable global for Str so that we can pass it to the
398 /// run-time lib. Runtime uses first 4 bytes of the string to store the
399 /// frame ID, so the string needs to be mutable.
400 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
401                                                             StringRef Str) {
402   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
403   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
404                             GlobalValue::PrivateLinkage, StrConst, "");
405 }
406 
407 /// \brief Insert extern declaration of runtime-provided functions and globals.
408 void MemorySanitizer::initializeCallbacks(Module &M) {
409   // Only do this once.
410   if (WarningFn)
411     return;
412 
413   IRBuilder<> IRB(*C);
414   // Create the callback.
415   // FIXME: this function should have "Cold" calling conv,
416   // which is not yet implemented.
417   StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
418                                         : "__msan_warning_noreturn";
419   WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr);
420 
421   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
422        AccessSizeIndex++) {
423     unsigned AccessSize = 1 << AccessSizeIndex;
424     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
425     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
426         FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
427         IRB.getInt32Ty(), nullptr);
428 
429     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
430     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
431         FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
432         IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr);
433   }
434 
435   MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
436     "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
437     IRB.getInt8PtrTy(), IntptrTy, nullptr);
438   MsanPoisonStackFn =
439       M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
440                             IRB.getInt8PtrTy(), IntptrTy, nullptr);
441   MsanChainOriginFn = M.getOrInsertFunction(
442     "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr);
443   MemmoveFn = M.getOrInsertFunction(
444     "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
445     IRB.getInt8PtrTy(), IntptrTy, nullptr);
446   MemcpyFn = M.getOrInsertFunction(
447     "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
448     IntptrTy, nullptr);
449   MemsetFn = M.getOrInsertFunction(
450     "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
451     IntptrTy, nullptr);
452 
453   // Create globals.
454   RetvalTLS = new GlobalVariable(
455     M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
456     GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
457     GlobalVariable::InitialExecTLSModel);
458   RetvalOriginTLS = new GlobalVariable(
459     M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
460     "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
461 
462   ParamTLS = new GlobalVariable(
463     M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
464     GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
465     GlobalVariable::InitialExecTLSModel);
466   ParamOriginTLS = new GlobalVariable(
467     M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
468     GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
469     nullptr, GlobalVariable::InitialExecTLSModel);
470 
471   VAArgTLS = new GlobalVariable(
472     M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
473     GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
474     GlobalVariable::InitialExecTLSModel);
475   VAArgOverflowSizeTLS = new GlobalVariable(
476     M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
477     "__msan_va_arg_overflow_size_tls", nullptr,
478     GlobalVariable::InitialExecTLSModel);
479   OriginTLS = new GlobalVariable(
480     M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
481     "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
482 
483   // We insert an empty inline asm after __msan_report* to avoid callback merge.
484   EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
485                             StringRef(""), StringRef(""),
486                             /*hasSideEffects=*/true);
487 }
488 
489 /// \brief Module-level initialization.
490 ///
491 /// inserts a call to __msan_init to the module's constructor list.
492 bool MemorySanitizer::doInitialization(Module &M) {
493   auto &DL = M.getDataLayout();
494 
495   Triple TargetTriple(M.getTargetTriple());
496   switch (TargetTriple.getOS()) {
497     case Triple::FreeBSD:
498       switch (TargetTriple.getArch()) {
499         case Triple::x86_64:
500           MapParams = FreeBSD_X86_MemoryMapParams.bits64;
501           break;
502         case Triple::x86:
503           MapParams = FreeBSD_X86_MemoryMapParams.bits32;
504           break;
505         default:
506           report_fatal_error("unsupported architecture");
507       }
508       break;
509     case Triple::Linux:
510       switch (TargetTriple.getArch()) {
511         case Triple::x86_64:
512           MapParams = Linux_X86_MemoryMapParams.bits64;
513           break;
514         case Triple::x86:
515           MapParams = Linux_X86_MemoryMapParams.bits32;
516           break;
517         case Triple::mips64:
518         case Triple::mips64el:
519           MapParams = Linux_MIPS_MemoryMapParams.bits64;
520           break;
521         case Triple::ppc64:
522         case Triple::ppc64le:
523           MapParams = Linux_PowerPC_MemoryMapParams.bits64;
524           break;
525         case Triple::aarch64:
526         case Triple::aarch64_be:
527           MapParams = Linux_ARM_MemoryMapParams.bits64;
528           break;
529         default:
530           report_fatal_error("unsupported architecture");
531       }
532       break;
533     default:
534       report_fatal_error("unsupported operating system");
535   }
536 
537   C = &(M.getContext());
538   IRBuilder<> IRB(*C);
539   IntptrTy = IRB.getIntPtrTy(DL);
540   OriginTy = IRB.getInt32Ty();
541 
542   ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
543   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
544 
545   std::tie(MsanCtorFunction, std::ignore) =
546       createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
547                                           /*InitArgTypes=*/{},
548                                           /*InitArgs=*/{});
549   if (ClWithComdat) {
550     Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
551     MsanCtorFunction->setComdat(MsanCtorComdat);
552     appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
553   } else {
554     appendToGlobalCtors(M, MsanCtorFunction, 0);
555   }
556 
557 
558   if (TrackOrigins)
559     new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
560                        IRB.getInt32(TrackOrigins), "__msan_track_origins");
561 
562   if (ClKeepGoing)
563     new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
564                        IRB.getInt32(ClKeepGoing), "__msan_keep_going");
565 
566   return true;
567 }
568 
569 namespace {
570 
571 /// \brief A helper class that handles instrumentation of VarArg
572 /// functions on a particular platform.
573 ///
574 /// Implementations are expected to insert the instrumentation
575 /// necessary to propagate argument shadow through VarArg function
576 /// calls. Visit* methods are called during an InstVisitor pass over
577 /// the function, and should avoid creating new basic blocks. A new
578 /// instance of this class is created for each instrumented function.
579 struct VarArgHelper {
580   /// \brief Visit a CallSite.
581   virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
582 
583   /// \brief Visit a va_start call.
584   virtual void visitVAStartInst(VAStartInst &I) = 0;
585 
586   /// \brief Visit a va_copy call.
587   virtual void visitVACopyInst(VACopyInst &I) = 0;
588 
589   /// \brief Finalize function instrumentation.
590   ///
591   /// This method is called after visiting all interesting (see above)
592   /// instructions in a function.
593   virtual void finalizeInstrumentation() = 0;
594 
595   virtual ~VarArgHelper() {}
596 };
597 
598 struct MemorySanitizerVisitor;
599 
600 VarArgHelper*
601 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
602                    MemorySanitizerVisitor &Visitor);
603 
604 unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
605   if (TypeSize <= 8) return 0;
606   return Log2_32_Ceil(TypeSize / 8);
607 }
608 
609 /// This class does all the work for a given function. Store and Load
610 /// instructions store and load corresponding shadow and origin
611 /// values. Most instructions propagate shadow from arguments to their
612 /// return values. Certain instructions (most importantly, BranchInst)
613 /// test their argument shadow and print reports (with a runtime call) if it's
614 /// non-zero.
615 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
616   Function &F;
617   MemorySanitizer &MS;
618   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
619   ValueMap<Value*, Value*> ShadowMap, OriginMap;
620   std::unique_ptr<VarArgHelper> VAHelper;
621 
622   // The following flags disable parts of MSan instrumentation based on
623   // blacklist contents and command-line options.
624   bool InsertChecks;
625   bool PropagateShadow;
626   bool PoisonStack;
627   bool PoisonUndef;
628   bool CheckReturnValue;
629 
630   struct ShadowOriginAndInsertPoint {
631     Value *Shadow;
632     Value *Origin;
633     Instruction *OrigIns;
634     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
635       : Shadow(S), Origin(O), OrigIns(I) { }
636   };
637   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
638   SmallVector<Instruction*, 16> StoreList;
639 
640   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
641       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
642     bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
643     InsertChecks = SanitizeFunction;
644     PropagateShadow = SanitizeFunction;
645     PoisonStack = SanitizeFunction && ClPoisonStack;
646     PoisonUndef = SanitizeFunction && ClPoisonUndef;
647     // FIXME: Consider using SpecialCaseList to specify a list of functions that
648     // must always return fully initialized values. For now, we hardcode "main".
649     CheckReturnValue = SanitizeFunction && (F.getName() == "main");
650 
651     DEBUG(if (!InsertChecks)
652           dbgs() << "MemorySanitizer is not inserting checks into '"
653                  << F.getName() << "'\n");
654   }
655 
656   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
657     if (MS.TrackOrigins <= 1) return V;
658     return IRB.CreateCall(MS.MsanChainOriginFn, V);
659   }
660 
661   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
662     const DataLayout &DL = F.getParent()->getDataLayout();
663     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
664     if (IntptrSize == kOriginSize) return Origin;
665     assert(IntptrSize == kOriginSize * 2);
666     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
667     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
668   }
669 
670   /// \brief Fill memory range with the given origin value.
671   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
672                    unsigned Size, unsigned Alignment) {
673     const DataLayout &DL = F.getParent()->getDataLayout();
674     unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
675     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
676     assert(IntptrAlignment >= kMinOriginAlignment);
677     assert(IntptrSize >= kOriginSize);
678 
679     unsigned Ofs = 0;
680     unsigned CurrentAlignment = Alignment;
681     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
682       Value *IntptrOrigin = originToIntptr(IRB, Origin);
683       Value *IntptrOriginPtr =
684           IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
685       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
686         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
687                        : IntptrOriginPtr;
688         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
689         Ofs += IntptrSize / kOriginSize;
690         CurrentAlignment = IntptrAlignment;
691       }
692     }
693 
694     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
695       Value *GEP =
696           i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
697       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
698       CurrentAlignment = kMinOriginAlignment;
699     }
700   }
701 
702   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
703                    unsigned Alignment, bool AsCall) {
704     const DataLayout &DL = F.getParent()->getDataLayout();
705     unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
706     unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
707     if (Shadow->getType()->isAggregateType()) {
708       paintOrigin(IRB, updateOrigin(Origin, IRB),
709                   getOriginPtr(Addr, IRB, Alignment), StoreSize,
710                   OriginAlignment);
711     } else {
712       Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
713       Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
714       if (ConstantShadow) {
715         if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
716           paintOrigin(IRB, updateOrigin(Origin, IRB),
717                       getOriginPtr(Addr, IRB, Alignment), StoreSize,
718                       OriginAlignment);
719         return;
720       }
721 
722       unsigned TypeSizeInBits =
723           DL.getTypeSizeInBits(ConvertedShadow->getType());
724       unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
725       if (AsCall && SizeIndex < kNumberOfAccessSizes) {
726         Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
727         Value *ConvertedShadow2 = IRB.CreateZExt(
728             ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
729         IRB.CreateCall(Fn, {ConvertedShadow2,
730                             IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
731                             Origin});
732       } else {
733         Value *Cmp = IRB.CreateICmpNE(
734             ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
735         Instruction *CheckTerm = SplitBlockAndInsertIfThen(
736             Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
737         IRBuilder<> IRBNew(CheckTerm);
738         paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
739                     getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
740                     OriginAlignment);
741       }
742     }
743   }
744 
745   void materializeStores(bool InstrumentWithCalls) {
746     for (auto Inst : StoreList) {
747       StoreInst &SI = *dyn_cast<StoreInst>(Inst);
748 
749       IRBuilder<> IRB(&SI);
750       Value *Val = SI.getValueOperand();
751       Value *Addr = SI.getPointerOperand();
752       Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
753       Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
754 
755       StoreInst *NewSI =
756           IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
757       DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
758       (void)NewSI;
759 
760       if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
761 
762       if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
763 
764       if (MS.TrackOrigins && !SI.isAtomic())
765         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
766                     InstrumentWithCalls);
767     }
768   }
769 
770   void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
771                            bool AsCall) {
772     IRBuilder<> IRB(OrigIns);
773     DEBUG(dbgs() << "  SHAD0 : " << *Shadow << "\n");
774     Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
775     DEBUG(dbgs() << "  SHAD1 : " << *ConvertedShadow << "\n");
776 
777     Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
778     if (ConstantShadow) {
779       if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
780         if (MS.TrackOrigins) {
781           IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
782                           MS.OriginTLS);
783         }
784         IRB.CreateCall(MS.WarningFn, {});
785         IRB.CreateCall(MS.EmptyAsm, {});
786         // FIXME: Insert UnreachableInst if !ClKeepGoing?
787         // This may invalidate some of the following checks and needs to be done
788         // at the very end.
789       }
790       return;
791     }
792 
793     const DataLayout &DL = OrigIns->getModule()->getDataLayout();
794 
795     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
796     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
797     if (AsCall && SizeIndex < kNumberOfAccessSizes) {
798       Value *Fn = MS.MaybeWarningFn[SizeIndex];
799       Value *ConvertedShadow2 =
800           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
801       IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
802                                                 ? Origin
803                                                 : (Value *)IRB.getInt32(0)});
804     } else {
805       Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
806                                     getCleanShadow(ConvertedShadow), "_mscmp");
807       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
808           Cmp, OrigIns,
809           /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
810 
811       IRB.SetInsertPoint(CheckTerm);
812       if (MS.TrackOrigins) {
813         IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
814                         MS.OriginTLS);
815       }
816       IRB.CreateCall(MS.WarningFn, {});
817       IRB.CreateCall(MS.EmptyAsm, {});
818       DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
819     }
820   }
821 
822   void materializeChecks(bool InstrumentWithCalls) {
823     for (const auto &ShadowData : InstrumentationList) {
824       Instruction *OrigIns = ShadowData.OrigIns;
825       Value *Shadow = ShadowData.Shadow;
826       Value *Origin = ShadowData.Origin;
827       materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
828     }
829     DEBUG(dbgs() << "DONE:\n" << F);
830   }
831 
832   /// \brief Add MemorySanitizer instrumentation to a function.
833   bool runOnFunction() {
834     MS.initializeCallbacks(*F.getParent());
835 
836     // In the presence of unreachable blocks, we may see Phi nodes with
837     // incoming nodes from such blocks. Since InstVisitor skips unreachable
838     // blocks, such nodes will not have any shadow value associated with them.
839     // It's easier to remove unreachable blocks than deal with missing shadow.
840     removeUnreachableBlocks(F);
841 
842     // Iterate all BBs in depth-first order and create shadow instructions
843     // for all instructions (where applicable).
844     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
845     for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
846       visit(*BB);
847 
848 
849     // Finalize PHI nodes.
850     for (PHINode *PN : ShadowPHINodes) {
851       PHINode *PNS = cast<PHINode>(getShadow(PN));
852       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
853       size_t NumValues = PN->getNumIncomingValues();
854       for (size_t v = 0; v < NumValues; v++) {
855         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
856         if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
857       }
858     }
859 
860     VAHelper->finalizeInstrumentation();
861 
862     bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
863                                InstrumentationList.size() + StoreList.size() >
864                                    (unsigned)ClInstrumentationWithCallThreshold;
865 
866     // Delayed instrumentation of StoreInst.
867     // This may add new checks to be inserted later.
868     materializeStores(InstrumentWithCalls);
869 
870     // Insert shadow value checks.
871     materializeChecks(InstrumentWithCalls);
872 
873     return true;
874   }
875 
876   /// \brief Compute the shadow type that corresponds to a given Value.
877   Type *getShadowTy(Value *V) {
878     return getShadowTy(V->getType());
879   }
880 
881   /// \brief Compute the shadow type that corresponds to a given Type.
882   Type *getShadowTy(Type *OrigTy) {
883     if (!OrigTy->isSized()) {
884       return nullptr;
885     }
886     // For integer type, shadow is the same as the original type.
887     // This may return weird-sized types like i1.
888     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
889       return IT;
890     const DataLayout &DL = F.getParent()->getDataLayout();
891     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
892       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
893       return VectorType::get(IntegerType::get(*MS.C, EltSize),
894                              VT->getNumElements());
895     }
896     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
897       return ArrayType::get(getShadowTy(AT->getElementType()),
898                             AT->getNumElements());
899     }
900     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
901       SmallVector<Type*, 4> Elements;
902       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
903         Elements.push_back(getShadowTy(ST->getElementType(i)));
904       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
905       DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
906       return Res;
907     }
908     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
909     return IntegerType::get(*MS.C, TypeSize);
910   }
911 
912   /// \brief Flatten a vector type.
913   Type *getShadowTyNoVec(Type *ty) {
914     if (VectorType *vt = dyn_cast<VectorType>(ty))
915       return IntegerType::get(*MS.C, vt->getBitWidth());
916     return ty;
917   }
918 
919   /// \brief Convert a shadow value to it's flattened variant.
920   Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
921     Type *Ty = V->getType();
922     Type *NoVecTy = getShadowTyNoVec(Ty);
923     if (Ty == NoVecTy) return V;
924     return IRB.CreateBitCast(V, NoVecTy);
925   }
926 
927   /// \brief Compute the integer shadow offset that corresponds to a given
928   /// application address.
929   ///
930   /// Offset = (Addr & ~AndMask) ^ XorMask
931   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
932     Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
933 
934     uint64_t AndMask = MS.MapParams->AndMask;
935     if (AndMask)
936       OffsetLong =
937           IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
938 
939     uint64_t XorMask = MS.MapParams->XorMask;
940     if (XorMask)
941       OffsetLong =
942           IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
943     return OffsetLong;
944   }
945 
946   /// \brief Compute the shadow address that corresponds to a given application
947   /// address.
948   ///
949   /// Shadow = ShadowBase + Offset
950   Value *getShadowPtr(Value *Addr, Type *ShadowTy,
951                       IRBuilder<> &IRB) {
952     Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
953     uint64_t ShadowBase = MS.MapParams->ShadowBase;
954     if (ShadowBase != 0)
955       ShadowLong =
956         IRB.CreateAdd(ShadowLong,
957                       ConstantInt::get(MS.IntptrTy, ShadowBase));
958     return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
959   }
960 
961   /// \brief Compute the origin address that corresponds to a given application
962   /// address.
963   ///
964   /// OriginAddr = (OriginBase + Offset) & ~3ULL
965   Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
966     Value *OriginLong = getShadowPtrOffset(Addr, IRB);
967     uint64_t OriginBase = MS.MapParams->OriginBase;
968     if (OriginBase != 0)
969       OriginLong =
970         IRB.CreateAdd(OriginLong,
971                       ConstantInt::get(MS.IntptrTy, OriginBase));
972     if (Alignment < kMinOriginAlignment) {
973       uint64_t Mask = kMinOriginAlignment - 1;
974       OriginLong = IRB.CreateAnd(OriginLong,
975                                  ConstantInt::get(MS.IntptrTy, ~Mask));
976     }
977     return IRB.CreateIntToPtr(OriginLong,
978                               PointerType::get(IRB.getInt32Ty(), 0));
979   }
980 
981   /// \brief Compute the shadow address for a given function argument.
982   ///
983   /// Shadow = ParamTLS+ArgOffset.
984   Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
985                                  int ArgOffset) {
986     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
987     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
988     return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
989                               "_msarg");
990   }
991 
992   /// \brief Compute the origin address for a given function argument.
993   Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
994                                  int ArgOffset) {
995     if (!MS.TrackOrigins) return nullptr;
996     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
997     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
998     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
999                               "_msarg_o");
1000   }
1001 
1002   /// \brief Compute the shadow address for a retval.
1003   Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1004     Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
1005     return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1006                               "_msret");
1007   }
1008 
1009   /// \brief Compute the origin address for a retval.
1010   Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1011     // We keep a single origin for the entire retval. Might be too optimistic.
1012     return MS.RetvalOriginTLS;
1013   }
1014 
1015   /// \brief Set SV to be the shadow value for V.
1016   void setShadow(Value *V, Value *SV) {
1017     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1018     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1019   }
1020 
1021   /// \brief Set Origin to be the origin value for V.
1022   void setOrigin(Value *V, Value *Origin) {
1023     if (!MS.TrackOrigins) return;
1024     assert(!OriginMap.count(V) && "Values may only have one origin");
1025     DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1026     OriginMap[V] = Origin;
1027   }
1028 
1029   /// \brief Create a clean shadow value for a given value.
1030   ///
1031   /// Clean shadow (all zeroes) means all bits of the value are defined
1032   /// (initialized).
1033   Constant *getCleanShadow(Value *V) {
1034     Type *ShadowTy = getShadowTy(V);
1035     if (!ShadowTy)
1036       return nullptr;
1037     return Constant::getNullValue(ShadowTy);
1038   }
1039 
1040   /// \brief Create a dirty shadow of a given shadow type.
1041   Constant *getPoisonedShadow(Type *ShadowTy) {
1042     assert(ShadowTy);
1043     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1044       return Constant::getAllOnesValue(ShadowTy);
1045     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1046       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1047                                       getPoisonedShadow(AT->getElementType()));
1048       return ConstantArray::get(AT, Vals);
1049     }
1050     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1051       SmallVector<Constant *, 4> Vals;
1052       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1053         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1054       return ConstantStruct::get(ST, Vals);
1055     }
1056     llvm_unreachable("Unexpected shadow type");
1057   }
1058 
1059   /// \brief Create a dirty shadow for a given value.
1060   Constant *getPoisonedShadow(Value *V) {
1061     Type *ShadowTy = getShadowTy(V);
1062     if (!ShadowTy)
1063       return nullptr;
1064     return getPoisonedShadow(ShadowTy);
1065   }
1066 
1067   /// \brief Create a clean (zero) origin.
1068   Value *getCleanOrigin() {
1069     return Constant::getNullValue(MS.OriginTy);
1070   }
1071 
1072   /// \brief Get the shadow value for a given Value.
1073   ///
1074   /// This function either returns the value set earlier with setShadow,
1075   /// or extracts if from ParamTLS (for function arguments).
1076   Value *getShadow(Value *V) {
1077     if (!PropagateShadow) return getCleanShadow(V);
1078     if (Instruction *I = dyn_cast<Instruction>(V)) {
1079       // For instructions the shadow is already stored in the map.
1080       Value *Shadow = ShadowMap[V];
1081       if (!Shadow) {
1082         DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1083         (void)I;
1084         assert(Shadow && "No shadow for a value");
1085       }
1086       return Shadow;
1087     }
1088     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1089       Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1090       DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1091       (void)U;
1092       return AllOnes;
1093     }
1094     if (Argument *A = dyn_cast<Argument>(V)) {
1095       // For arguments we compute the shadow on demand and store it in the map.
1096       Value **ShadowPtr = &ShadowMap[V];
1097       if (*ShadowPtr)
1098         return *ShadowPtr;
1099       Function *F = A->getParent();
1100       IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
1101       unsigned ArgOffset = 0;
1102       const DataLayout &DL = F->getParent()->getDataLayout();
1103       for (auto &FArg : F->args()) {
1104         if (!FArg.getType()->isSized()) {
1105           DEBUG(dbgs() << "Arg is not sized\n");
1106           continue;
1107         }
1108         unsigned Size =
1109             FArg.hasByValAttr()
1110                 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1111                 : DL.getTypeAllocSize(FArg.getType());
1112         if (A == &FArg) {
1113           bool Overflow = ArgOffset + Size > kParamTLSSize;
1114           Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1115           if (FArg.hasByValAttr()) {
1116             // ByVal pointer itself has clean shadow. We copy the actual
1117             // argument shadow to the underlying memory.
1118             // Figure out maximal valid memcpy alignment.
1119             unsigned ArgAlign = FArg.getParamAlignment();
1120             if (ArgAlign == 0) {
1121               Type *EltType = A->getType()->getPointerElementType();
1122               ArgAlign = DL.getABITypeAlignment(EltType);
1123             }
1124             if (Overflow) {
1125               // ParamTLS overflow.
1126               EntryIRB.CreateMemSet(
1127                   getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1128                   Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1129             } else {
1130               unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1131               Value *Cpy = EntryIRB.CreateMemCpy(
1132                   getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1133                   CopyAlign);
1134               DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
1135               (void)Cpy;
1136             }
1137             *ShadowPtr = getCleanShadow(V);
1138           } else {
1139             if (Overflow) {
1140               // ParamTLS overflow.
1141               *ShadowPtr = getCleanShadow(V);
1142             } else {
1143               *ShadowPtr =
1144                   EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1145             }
1146           }
1147           DEBUG(dbgs() << "  ARG:    "  << FArg << " ==> " <<
1148                 **ShadowPtr << "\n");
1149           if (MS.TrackOrigins && !Overflow) {
1150             Value *OriginPtr =
1151                 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1152             setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1153           } else {
1154             setOrigin(A, getCleanOrigin());
1155           }
1156         }
1157         ArgOffset += alignTo(Size, kShadowTLSAlignment);
1158       }
1159       assert(*ShadowPtr && "Could not find shadow for an argument");
1160       return *ShadowPtr;
1161     }
1162     // For everything else the shadow is zero.
1163     return getCleanShadow(V);
1164   }
1165 
1166   /// \brief Get the shadow for i-th argument of the instruction I.
1167   Value *getShadow(Instruction *I, int i) {
1168     return getShadow(I->getOperand(i));
1169   }
1170 
1171   /// \brief Get the origin for a value.
1172   Value *getOrigin(Value *V) {
1173     if (!MS.TrackOrigins) return nullptr;
1174     if (!PropagateShadow) return getCleanOrigin();
1175     if (isa<Constant>(V)) return getCleanOrigin();
1176     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1177            "Unexpected value type in getOrigin()");
1178     Value *Origin = OriginMap[V];
1179     assert(Origin && "Missing origin");
1180     return Origin;
1181   }
1182 
1183   /// \brief Get the origin for i-th argument of the instruction I.
1184   Value *getOrigin(Instruction *I, int i) {
1185     return getOrigin(I->getOperand(i));
1186   }
1187 
1188   /// \brief Remember the place where a shadow check should be inserted.
1189   ///
1190   /// This location will be later instrumented with a check that will print a
1191   /// UMR warning in runtime if the shadow value is not 0.
1192   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1193     assert(Shadow);
1194     if (!InsertChecks) return;
1195 #ifndef NDEBUG
1196     Type *ShadowTy = Shadow->getType();
1197     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1198            "Can only insert checks for integer and vector shadow types");
1199 #endif
1200     InstrumentationList.push_back(
1201         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1202   }
1203 
1204   /// \brief Remember the place where a shadow check should be inserted.
1205   ///
1206   /// This location will be later instrumented with a check that will print a
1207   /// UMR warning in runtime if the value is not fully defined.
1208   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1209     assert(Val);
1210     Value *Shadow, *Origin;
1211     if (ClCheckConstantShadow) {
1212       Shadow = getShadow(Val);
1213       if (!Shadow) return;
1214       Origin = getOrigin(Val);
1215     } else {
1216       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1217       if (!Shadow) return;
1218       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1219     }
1220     insertShadowCheck(Shadow, Origin, OrigIns);
1221   }
1222 
1223   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1224     switch (a) {
1225       case AtomicOrdering::NotAtomic:
1226         return AtomicOrdering::NotAtomic;
1227       case AtomicOrdering::Unordered:
1228       case AtomicOrdering::Monotonic:
1229       case AtomicOrdering::Release:
1230         return AtomicOrdering::Release;
1231       case AtomicOrdering::Acquire:
1232       case AtomicOrdering::AcquireRelease:
1233         return AtomicOrdering::AcquireRelease;
1234       case AtomicOrdering::SequentiallyConsistent:
1235         return AtomicOrdering::SequentiallyConsistent;
1236     }
1237     llvm_unreachable("Unknown ordering");
1238   }
1239 
1240   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1241     switch (a) {
1242       case AtomicOrdering::NotAtomic:
1243         return AtomicOrdering::NotAtomic;
1244       case AtomicOrdering::Unordered:
1245       case AtomicOrdering::Monotonic:
1246       case AtomicOrdering::Acquire:
1247         return AtomicOrdering::Acquire;
1248       case AtomicOrdering::Release:
1249       case AtomicOrdering::AcquireRelease:
1250         return AtomicOrdering::AcquireRelease;
1251       case AtomicOrdering::SequentiallyConsistent:
1252         return AtomicOrdering::SequentiallyConsistent;
1253     }
1254     llvm_unreachable("Unknown ordering");
1255   }
1256 
1257   // ------------------- Visitors.
1258 
1259   /// \brief Instrument LoadInst
1260   ///
1261   /// Loads the corresponding shadow and (optionally) origin.
1262   /// Optionally, checks that the load address is fully defined.
1263   void visitLoadInst(LoadInst &I) {
1264     assert(I.getType()->isSized() && "Load type must have size");
1265     IRBuilder<> IRB(I.getNextNode());
1266     Type *ShadowTy = getShadowTy(&I);
1267     Value *Addr = I.getPointerOperand();
1268     if (PropagateShadow && !I.getMetadata("nosanitize")) {
1269       Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1270       setShadow(&I,
1271                 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1272     } else {
1273       setShadow(&I, getCleanShadow(&I));
1274     }
1275 
1276     if (ClCheckAccessAddress)
1277       insertShadowCheck(I.getPointerOperand(), &I);
1278 
1279     if (I.isAtomic())
1280       I.setOrdering(addAcquireOrdering(I.getOrdering()));
1281 
1282     if (MS.TrackOrigins) {
1283       if (PropagateShadow) {
1284         unsigned Alignment = I.getAlignment();
1285         unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1286         setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
1287                                             OriginAlignment));
1288       } else {
1289         setOrigin(&I, getCleanOrigin());
1290       }
1291     }
1292   }
1293 
1294   /// \brief Instrument StoreInst
1295   ///
1296   /// Stores the corresponding shadow and (optionally) origin.
1297   /// Optionally, checks that the store address is fully defined.
1298   void visitStoreInst(StoreInst &I) {
1299     StoreList.push_back(&I);
1300   }
1301 
1302   void handleCASOrRMW(Instruction &I) {
1303     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1304 
1305     IRBuilder<> IRB(&I);
1306     Value *Addr = I.getOperand(0);
1307     Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1308 
1309     if (ClCheckAccessAddress)
1310       insertShadowCheck(Addr, &I);
1311 
1312     // Only test the conditional argument of cmpxchg instruction.
1313     // The other argument can potentially be uninitialized, but we can not
1314     // detect this situation reliably without possible false positives.
1315     if (isa<AtomicCmpXchgInst>(I))
1316       insertShadowCheck(I.getOperand(1), &I);
1317 
1318     IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1319 
1320     setShadow(&I, getCleanShadow(&I));
1321     setOrigin(&I, getCleanOrigin());
1322   }
1323 
1324   void visitAtomicRMWInst(AtomicRMWInst &I) {
1325     handleCASOrRMW(I);
1326     I.setOrdering(addReleaseOrdering(I.getOrdering()));
1327   }
1328 
1329   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1330     handleCASOrRMW(I);
1331     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1332   }
1333 
1334   // Vector manipulation.
1335   void visitExtractElementInst(ExtractElementInst &I) {
1336     insertShadowCheck(I.getOperand(1), &I);
1337     IRBuilder<> IRB(&I);
1338     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1339               "_msprop"));
1340     setOrigin(&I, getOrigin(&I, 0));
1341   }
1342 
1343   void visitInsertElementInst(InsertElementInst &I) {
1344     insertShadowCheck(I.getOperand(2), &I);
1345     IRBuilder<> IRB(&I);
1346     setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1347               I.getOperand(2), "_msprop"));
1348     setOriginForNaryOp(I);
1349   }
1350 
1351   void visitShuffleVectorInst(ShuffleVectorInst &I) {
1352     insertShadowCheck(I.getOperand(2), &I);
1353     IRBuilder<> IRB(&I);
1354     setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1355               I.getOperand(2), "_msprop"));
1356     setOriginForNaryOp(I);
1357   }
1358 
1359   // Casts.
1360   void visitSExtInst(SExtInst &I) {
1361     IRBuilder<> IRB(&I);
1362     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1363     setOrigin(&I, getOrigin(&I, 0));
1364   }
1365 
1366   void visitZExtInst(ZExtInst &I) {
1367     IRBuilder<> IRB(&I);
1368     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1369     setOrigin(&I, getOrigin(&I, 0));
1370   }
1371 
1372   void visitTruncInst(TruncInst &I) {
1373     IRBuilder<> IRB(&I);
1374     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1375     setOrigin(&I, getOrigin(&I, 0));
1376   }
1377 
1378   void visitBitCastInst(BitCastInst &I) {
1379     // Special case: if this is the bitcast (there is exactly 1 allowed) between
1380     // a musttail call and a ret, don't instrument. New instructions are not
1381     // allowed after a musttail call.
1382     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1383       if (CI->isMustTailCall())
1384         return;
1385     IRBuilder<> IRB(&I);
1386     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1387     setOrigin(&I, getOrigin(&I, 0));
1388   }
1389 
1390   void visitPtrToIntInst(PtrToIntInst &I) {
1391     IRBuilder<> IRB(&I);
1392     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1393              "_msprop_ptrtoint"));
1394     setOrigin(&I, getOrigin(&I, 0));
1395   }
1396 
1397   void visitIntToPtrInst(IntToPtrInst &I) {
1398     IRBuilder<> IRB(&I);
1399     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1400              "_msprop_inttoptr"));
1401     setOrigin(&I, getOrigin(&I, 0));
1402   }
1403 
1404   void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1405   void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1406   void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1407   void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1408   void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1409   void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1410 
1411   /// \brief Propagate shadow for bitwise AND.
1412   ///
1413   /// This code is exact, i.e. if, for example, a bit in the left argument
1414   /// is defined and 0, then neither the value not definedness of the
1415   /// corresponding bit in B don't affect the resulting shadow.
1416   void visitAnd(BinaryOperator &I) {
1417     IRBuilder<> IRB(&I);
1418     //  "And" of 0 and a poisoned value results in unpoisoned value.
1419     //  1&1 => 1;     0&1 => 0;     p&1 => p;
1420     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
1421     //  1&p => p;     0&p => 0;     p&p => p;
1422     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1423     Value *S1 = getShadow(&I, 0);
1424     Value *S2 = getShadow(&I, 1);
1425     Value *V1 = I.getOperand(0);
1426     Value *V2 = I.getOperand(1);
1427     if (V1->getType() != S1->getType()) {
1428       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1429       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1430     }
1431     Value *S1S2 = IRB.CreateAnd(S1, S2);
1432     Value *V1S2 = IRB.CreateAnd(V1, S2);
1433     Value *S1V2 = IRB.CreateAnd(S1, V2);
1434     setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1435     setOriginForNaryOp(I);
1436   }
1437 
1438   void visitOr(BinaryOperator &I) {
1439     IRBuilder<> IRB(&I);
1440     //  "Or" of 1 and a poisoned value results in unpoisoned value.
1441     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
1442     //  1|0 => 1;     0|0 => 0;     p|0 => p;
1443     //  1|p => 1;     0|p => p;     p|p => p;
1444     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1445     Value *S1 = getShadow(&I, 0);
1446     Value *S2 = getShadow(&I, 1);
1447     Value *V1 = IRB.CreateNot(I.getOperand(0));
1448     Value *V2 = IRB.CreateNot(I.getOperand(1));
1449     if (V1->getType() != S1->getType()) {
1450       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1451       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1452     }
1453     Value *S1S2 = IRB.CreateAnd(S1, S2);
1454     Value *V1S2 = IRB.CreateAnd(V1, S2);
1455     Value *S1V2 = IRB.CreateAnd(S1, V2);
1456     setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1457     setOriginForNaryOp(I);
1458   }
1459 
1460   /// \brief Default propagation of shadow and/or origin.
1461   ///
1462   /// This class implements the general case of shadow propagation, used in all
1463   /// cases where we don't know and/or don't care about what the operation
1464   /// actually does. It converts all input shadow values to a common type
1465   /// (extending or truncating as necessary), and bitwise OR's them.
1466   ///
1467   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1468   /// fully initialized), and less prone to false positives.
1469   ///
1470   /// This class also implements the general case of origin propagation. For a
1471   /// Nary operation, result origin is set to the origin of an argument that is
1472   /// not entirely initialized. If there is more than one such arguments, the
1473   /// rightmost of them is picked. It does not matter which one is picked if all
1474   /// arguments are initialized.
1475   template <bool CombineShadow>
1476   class Combiner {
1477     Value *Shadow;
1478     Value *Origin;
1479     IRBuilder<> &IRB;
1480     MemorySanitizerVisitor *MSV;
1481 
1482   public:
1483     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1484       Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1485 
1486     /// \brief Add a pair of shadow and origin values to the mix.
1487     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1488       if (CombineShadow) {
1489         assert(OpShadow);
1490         if (!Shadow)
1491           Shadow = OpShadow;
1492         else {
1493           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1494           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1495         }
1496       }
1497 
1498       if (MSV->MS.TrackOrigins) {
1499         assert(OpOrigin);
1500         if (!Origin) {
1501           Origin = OpOrigin;
1502         } else {
1503           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1504           // No point in adding something that might result in 0 origin value.
1505           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1506             Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1507             Value *Cond =
1508                 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1509             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1510           }
1511         }
1512       }
1513       return *this;
1514     }
1515 
1516     /// \brief Add an application value to the mix.
1517     Combiner &Add(Value *V) {
1518       Value *OpShadow = MSV->getShadow(V);
1519       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1520       return Add(OpShadow, OpOrigin);
1521     }
1522 
1523     /// \brief Set the current combined values as the given instruction's shadow
1524     /// and origin.
1525     void Done(Instruction *I) {
1526       if (CombineShadow) {
1527         assert(Shadow);
1528         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1529         MSV->setShadow(I, Shadow);
1530       }
1531       if (MSV->MS.TrackOrigins) {
1532         assert(Origin);
1533         MSV->setOrigin(I, Origin);
1534       }
1535     }
1536   };
1537 
1538   typedef Combiner<true> ShadowAndOriginCombiner;
1539   typedef Combiner<false> OriginCombiner;
1540 
1541   /// \brief Propagate origin for arbitrary operation.
1542   void setOriginForNaryOp(Instruction &I) {
1543     if (!MS.TrackOrigins) return;
1544     IRBuilder<> IRB(&I);
1545     OriginCombiner OC(this, IRB);
1546     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1547       OC.Add(OI->get());
1548     OC.Done(&I);
1549   }
1550 
1551   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1552     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1553            "Vector of pointers is not a valid shadow type");
1554     return Ty->isVectorTy() ?
1555       Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1556       Ty->getPrimitiveSizeInBits();
1557   }
1558 
1559   /// \brief Cast between two shadow types, extending or truncating as
1560   /// necessary.
1561   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1562                           bool Signed = false) {
1563     Type *srcTy = V->getType();
1564     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1565       return IRB.CreateIntCast(V, dstTy, Signed);
1566     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1567         dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1568       return IRB.CreateIntCast(V, dstTy, Signed);
1569     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1570     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1571     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1572     Value *V2 =
1573       IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1574     return IRB.CreateBitCast(V2, dstTy);
1575     // TODO: handle struct types.
1576   }
1577 
1578   /// \brief Cast an application value to the type of its own shadow.
1579   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1580     Type *ShadowTy = getShadowTy(V);
1581     if (V->getType() == ShadowTy)
1582       return V;
1583     if (V->getType()->isPtrOrPtrVectorTy())
1584       return IRB.CreatePtrToInt(V, ShadowTy);
1585     else
1586       return IRB.CreateBitCast(V, ShadowTy);
1587   }
1588 
1589   /// \brief Propagate shadow for arbitrary operation.
1590   void handleShadowOr(Instruction &I) {
1591     IRBuilder<> IRB(&I);
1592     ShadowAndOriginCombiner SC(this, IRB);
1593     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1594       SC.Add(OI->get());
1595     SC.Done(&I);
1596   }
1597 
1598   // \brief Handle multiplication by constant.
1599   //
1600   // Handle a special case of multiplication by constant that may have one or
1601   // more zeros in the lower bits. This makes corresponding number of lower bits
1602   // of the result zero as well. We model it by shifting the other operand
1603   // shadow left by the required number of bits. Effectively, we transform
1604   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1605   // We use multiplication by 2**N instead of shift to cover the case of
1606   // multiplication by 0, which may occur in some elements of a vector operand.
1607   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1608                            Value *OtherArg) {
1609     Constant *ShadowMul;
1610     Type *Ty = ConstArg->getType();
1611     if (Ty->isVectorTy()) {
1612       unsigned NumElements = Ty->getVectorNumElements();
1613       Type *EltTy = Ty->getSequentialElementType();
1614       SmallVector<Constant *, 16> Elements;
1615       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1616         if (ConstantInt *Elt =
1617                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
1618           APInt V = Elt->getValue();
1619           APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1620           Elements.push_back(ConstantInt::get(EltTy, V2));
1621         } else {
1622           Elements.push_back(ConstantInt::get(EltTy, 1));
1623         }
1624       }
1625       ShadowMul = ConstantVector::get(Elements);
1626     } else {
1627       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1628         APInt V = Elt->getValue();
1629         APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1630         ShadowMul = ConstantInt::get(Ty, V2);
1631       } else {
1632         ShadowMul = ConstantInt::get(Ty, 1);
1633       }
1634     }
1635 
1636     IRBuilder<> IRB(&I);
1637     setShadow(&I,
1638               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1639     setOrigin(&I, getOrigin(OtherArg));
1640   }
1641 
1642   void visitMul(BinaryOperator &I) {
1643     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1644     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1645     if (constOp0 && !constOp1)
1646       handleMulByConstant(I, constOp0, I.getOperand(1));
1647     else if (constOp1 && !constOp0)
1648       handleMulByConstant(I, constOp1, I.getOperand(0));
1649     else
1650       handleShadowOr(I);
1651   }
1652 
1653   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1654   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1655   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1656   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1657   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1658   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1659 
1660   void handleDiv(Instruction &I) {
1661     IRBuilder<> IRB(&I);
1662     // Strict on the second argument.
1663     insertShadowCheck(I.getOperand(1), &I);
1664     setShadow(&I, getShadow(&I, 0));
1665     setOrigin(&I, getOrigin(&I, 0));
1666   }
1667 
1668   void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1669   void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1670   void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1671   void visitURem(BinaryOperator &I) { handleDiv(I); }
1672   void visitSRem(BinaryOperator &I) { handleDiv(I); }
1673   void visitFRem(BinaryOperator &I) { handleDiv(I); }
1674 
1675   /// \brief Instrument == and != comparisons.
1676   ///
1677   /// Sometimes the comparison result is known even if some of the bits of the
1678   /// arguments are not.
1679   void handleEqualityComparison(ICmpInst &I) {
1680     IRBuilder<> IRB(&I);
1681     Value *A = I.getOperand(0);
1682     Value *B = I.getOperand(1);
1683     Value *Sa = getShadow(A);
1684     Value *Sb = getShadow(B);
1685 
1686     // Get rid of pointers and vectors of pointers.
1687     // For ints (and vectors of ints), types of A and Sa match,
1688     // and this is a no-op.
1689     A = IRB.CreatePointerCast(A, Sa->getType());
1690     B = IRB.CreatePointerCast(B, Sb->getType());
1691 
1692     // A == B  <==>  (C = A^B) == 0
1693     // A != B  <==>  (C = A^B) != 0
1694     // Sc = Sa | Sb
1695     Value *C = IRB.CreateXor(A, B);
1696     Value *Sc = IRB.CreateOr(Sa, Sb);
1697     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1698     // Result is defined if one of the following is true
1699     // * there is a defined 1 bit in C
1700     // * C is fully defined
1701     // Si = !(C & ~Sc) && Sc
1702     Value *Zero = Constant::getNullValue(Sc->getType());
1703     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1704     Value *Si =
1705       IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1706                     IRB.CreateICmpEQ(
1707                       IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1708     Si->setName("_msprop_icmp");
1709     setShadow(&I, Si);
1710     setOriginForNaryOp(I);
1711   }
1712 
1713   /// \brief Build the lowest possible value of V, taking into account V's
1714   ///        uninitialized bits.
1715   Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1716                                 bool isSigned) {
1717     if (isSigned) {
1718       // Split shadow into sign bit and other bits.
1719       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1720       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1721       // Maximise the undefined shadow bit, minimize other undefined bits.
1722       return
1723         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1724     } else {
1725       // Minimize undefined bits.
1726       return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1727     }
1728   }
1729 
1730   /// \brief Build the highest possible value of V, taking into account V's
1731   ///        uninitialized bits.
1732   Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1733                                 bool isSigned) {
1734     if (isSigned) {
1735       // Split shadow into sign bit and other bits.
1736       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1737       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1738       // Minimise the undefined shadow bit, maximise other undefined bits.
1739       return
1740         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1741     } else {
1742       // Maximize undefined bits.
1743       return IRB.CreateOr(A, Sa);
1744     }
1745   }
1746 
1747   /// \brief Instrument relational comparisons.
1748   ///
1749   /// This function does exact shadow propagation for all relational
1750   /// comparisons of integers, pointers and vectors of those.
1751   /// FIXME: output seems suboptimal when one of the operands is a constant
1752   void handleRelationalComparisonExact(ICmpInst &I) {
1753     IRBuilder<> IRB(&I);
1754     Value *A = I.getOperand(0);
1755     Value *B = I.getOperand(1);
1756     Value *Sa = getShadow(A);
1757     Value *Sb = getShadow(B);
1758 
1759     // Get rid of pointers and vectors of pointers.
1760     // For ints (and vectors of ints), types of A and Sa match,
1761     // and this is a no-op.
1762     A = IRB.CreatePointerCast(A, Sa->getType());
1763     B = IRB.CreatePointerCast(B, Sb->getType());
1764 
1765     // Let [a0, a1] be the interval of possible values of A, taking into account
1766     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1767     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1768     bool IsSigned = I.isSigned();
1769     Value *S1 = IRB.CreateICmp(I.getPredicate(),
1770                                getLowestPossibleValue(IRB, A, Sa, IsSigned),
1771                                getHighestPossibleValue(IRB, B, Sb, IsSigned));
1772     Value *S2 = IRB.CreateICmp(I.getPredicate(),
1773                                getHighestPossibleValue(IRB, A, Sa, IsSigned),
1774                                getLowestPossibleValue(IRB, B, Sb, IsSigned));
1775     Value *Si = IRB.CreateXor(S1, S2);
1776     setShadow(&I, Si);
1777     setOriginForNaryOp(I);
1778   }
1779 
1780   /// \brief Instrument signed relational comparisons.
1781   ///
1782   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
1783   /// bit of the shadow. Everything else is delegated to handleShadowOr().
1784   void handleSignedRelationalComparison(ICmpInst &I) {
1785     Constant *constOp;
1786     Value *op = nullptr;
1787     CmpInst::Predicate pre;
1788     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
1789       op = I.getOperand(0);
1790       pre = I.getPredicate();
1791     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
1792       op = I.getOperand(1);
1793       pre = I.getSwappedPredicate();
1794     } else {
1795       handleShadowOr(I);
1796       return;
1797     }
1798 
1799     if ((constOp->isNullValue() &&
1800          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1801         (constOp->isAllOnesValue() &&
1802          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1803       IRBuilder<> IRB(&I);
1804       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
1805                                         "_msprop_icmp_s");
1806       setShadow(&I, Shadow);
1807       setOrigin(&I, getOrigin(op));
1808     } else {
1809       handleShadowOr(I);
1810     }
1811   }
1812 
1813   void visitICmpInst(ICmpInst &I) {
1814     if (!ClHandleICmp) {
1815       handleShadowOr(I);
1816       return;
1817     }
1818     if (I.isEquality()) {
1819       handleEqualityComparison(I);
1820       return;
1821     }
1822 
1823     assert(I.isRelational());
1824     if (ClHandleICmpExact) {
1825       handleRelationalComparisonExact(I);
1826       return;
1827     }
1828     if (I.isSigned()) {
1829       handleSignedRelationalComparison(I);
1830       return;
1831     }
1832 
1833     assert(I.isUnsigned());
1834     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1835       handleRelationalComparisonExact(I);
1836       return;
1837     }
1838 
1839     handleShadowOr(I);
1840   }
1841 
1842   void visitFCmpInst(FCmpInst &I) {
1843     handleShadowOr(I);
1844   }
1845 
1846   void handleShift(BinaryOperator &I) {
1847     IRBuilder<> IRB(&I);
1848     // If any of the S2 bits are poisoned, the whole thing is poisoned.
1849     // Otherwise perform the same shift on S1.
1850     Value *S1 = getShadow(&I, 0);
1851     Value *S2 = getShadow(&I, 1);
1852     Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1853                                    S2->getType());
1854     Value *V2 = I.getOperand(1);
1855     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1856     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1857     setOriginForNaryOp(I);
1858   }
1859 
1860   void visitShl(BinaryOperator &I) { handleShift(I); }
1861   void visitAShr(BinaryOperator &I) { handleShift(I); }
1862   void visitLShr(BinaryOperator &I) { handleShift(I); }
1863 
1864   /// \brief Instrument llvm.memmove
1865   ///
1866   /// At this point we don't know if llvm.memmove will be inlined or not.
1867   /// If we don't instrument it and it gets inlined,
1868   /// our interceptor will not kick in and we will lose the memmove.
1869   /// If we instrument the call here, but it does not get inlined,
1870   /// we will memove the shadow twice: which is bad in case
1871   /// of overlapping regions. So, we simply lower the intrinsic to a call.
1872   ///
1873   /// Similar situation exists for memcpy and memset.
1874   void visitMemMoveInst(MemMoveInst &I) {
1875     IRBuilder<> IRB(&I);
1876     IRB.CreateCall(
1877         MS.MemmoveFn,
1878         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1879          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1880          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1881     I.eraseFromParent();
1882   }
1883 
1884   // Similar to memmove: avoid copying shadow twice.
1885   // This is somewhat unfortunate as it may slowdown small constant memcpys.
1886   // FIXME: consider doing manual inline for small constant sizes and proper
1887   // alignment.
1888   void visitMemCpyInst(MemCpyInst &I) {
1889     IRBuilder<> IRB(&I);
1890     IRB.CreateCall(
1891         MS.MemcpyFn,
1892         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1893          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1894          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1895     I.eraseFromParent();
1896   }
1897 
1898   // Same as memcpy.
1899   void visitMemSetInst(MemSetInst &I) {
1900     IRBuilder<> IRB(&I);
1901     IRB.CreateCall(
1902         MS.MemsetFn,
1903         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1904          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1905          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1906     I.eraseFromParent();
1907   }
1908 
1909   void visitVAStartInst(VAStartInst &I) {
1910     VAHelper->visitVAStartInst(I);
1911   }
1912 
1913   void visitVACopyInst(VACopyInst &I) {
1914     VAHelper->visitVACopyInst(I);
1915   }
1916 
1917   /// \brief Handle vector store-like intrinsics.
1918   ///
1919   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1920   /// has 1 pointer argument and 1 vector argument, returns void.
1921   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1922     IRBuilder<> IRB(&I);
1923     Value* Addr = I.getArgOperand(0);
1924     Value *Shadow = getShadow(&I, 1);
1925     Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1926 
1927     // We don't know the pointer alignment (could be unaligned SSE store!).
1928     // Have to assume to worst case.
1929     IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1930 
1931     if (ClCheckAccessAddress)
1932       insertShadowCheck(Addr, &I);
1933 
1934     // FIXME: use ClStoreCleanOrigin
1935     // FIXME: factor out common code from materializeStores
1936     if (MS.TrackOrigins)
1937       IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1938     return true;
1939   }
1940 
1941   /// \brief Handle vector load-like intrinsics.
1942   ///
1943   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1944   /// has 1 pointer argument, returns a vector.
1945   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1946     IRBuilder<> IRB(&I);
1947     Value *Addr = I.getArgOperand(0);
1948 
1949     Type *ShadowTy = getShadowTy(&I);
1950     if (PropagateShadow) {
1951       Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1952       // We don't know the pointer alignment (could be unaligned SSE load!).
1953       // Have to assume to worst case.
1954       setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1955     } else {
1956       setShadow(&I, getCleanShadow(&I));
1957     }
1958 
1959     if (ClCheckAccessAddress)
1960       insertShadowCheck(Addr, &I);
1961 
1962     if (MS.TrackOrigins) {
1963       if (PropagateShadow)
1964         setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
1965       else
1966         setOrigin(&I, getCleanOrigin());
1967     }
1968     return true;
1969   }
1970 
1971   /// \brief Handle (SIMD arithmetic)-like intrinsics.
1972   ///
1973   /// Instrument intrinsics with any number of arguments of the same type,
1974   /// equal to the return type. The type should be simple (no aggregates or
1975   /// pointers; vectors are fine).
1976   /// Caller guarantees that this intrinsic does not access memory.
1977   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1978     Type *RetTy = I.getType();
1979     if (!(RetTy->isIntOrIntVectorTy() ||
1980           RetTy->isFPOrFPVectorTy() ||
1981           RetTy->isX86_MMXTy()))
1982       return false;
1983 
1984     unsigned NumArgOperands = I.getNumArgOperands();
1985 
1986     for (unsigned i = 0; i < NumArgOperands; ++i) {
1987       Type *Ty = I.getArgOperand(i)->getType();
1988       if (Ty != RetTy)
1989         return false;
1990     }
1991 
1992     IRBuilder<> IRB(&I);
1993     ShadowAndOriginCombiner SC(this, IRB);
1994     for (unsigned i = 0; i < NumArgOperands; ++i)
1995       SC.Add(I.getArgOperand(i));
1996     SC.Done(&I);
1997 
1998     return true;
1999   }
2000 
2001   /// \brief Heuristically instrument unknown intrinsics.
2002   ///
2003   /// The main purpose of this code is to do something reasonable with all
2004   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2005   /// We recognize several classes of intrinsics by their argument types and
2006   /// ModRefBehaviour and apply special intrumentation when we are reasonably
2007   /// sure that we know what the intrinsic does.
2008   ///
2009   /// We special-case intrinsics where this approach fails. See llvm.bswap
2010   /// handling as an example of that.
2011   bool handleUnknownIntrinsic(IntrinsicInst &I) {
2012     unsigned NumArgOperands = I.getNumArgOperands();
2013     if (NumArgOperands == 0)
2014       return false;
2015 
2016     if (NumArgOperands == 2 &&
2017         I.getArgOperand(0)->getType()->isPointerTy() &&
2018         I.getArgOperand(1)->getType()->isVectorTy() &&
2019         I.getType()->isVoidTy() &&
2020         !I.onlyReadsMemory()) {
2021       // This looks like a vector store.
2022       return handleVectorStoreIntrinsic(I);
2023     }
2024 
2025     if (NumArgOperands == 1 &&
2026         I.getArgOperand(0)->getType()->isPointerTy() &&
2027         I.getType()->isVectorTy() &&
2028         I.onlyReadsMemory()) {
2029       // This looks like a vector load.
2030       return handleVectorLoadIntrinsic(I);
2031     }
2032 
2033     if (I.doesNotAccessMemory())
2034       if (maybeHandleSimpleNomemIntrinsic(I))
2035         return true;
2036 
2037     // FIXME: detect and handle SSE maskstore/maskload
2038     return false;
2039   }
2040 
2041   void handleBswap(IntrinsicInst &I) {
2042     IRBuilder<> IRB(&I);
2043     Value *Op = I.getArgOperand(0);
2044     Type *OpType = Op->getType();
2045     Function *BswapFunc = Intrinsic::getDeclaration(
2046       F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2047     setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2048     setOrigin(&I, getOrigin(Op));
2049   }
2050 
2051   // \brief Instrument vector convert instrinsic.
2052   //
2053   // This function instruments intrinsics like cvtsi2ss:
2054   // %Out = int_xxx_cvtyyy(%ConvertOp)
2055   // or
2056   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2057   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2058   // number \p Out elements, and (if has 2 arguments) copies the rest of the
2059   // elements from \p CopyOp.
2060   // In most cases conversion involves floating-point value which may trigger a
2061   // hardware exception when not fully initialized. For this reason we require
2062   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2063   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2064   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2065   // return a fully initialized value.
2066   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2067     IRBuilder<> IRB(&I);
2068     Value *CopyOp, *ConvertOp;
2069 
2070     switch (I.getNumArgOperands()) {
2071     case 3:
2072       assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2073     case 2:
2074       CopyOp = I.getArgOperand(0);
2075       ConvertOp = I.getArgOperand(1);
2076       break;
2077     case 1:
2078       ConvertOp = I.getArgOperand(0);
2079       CopyOp = nullptr;
2080       break;
2081     default:
2082       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2083     }
2084 
2085     // The first *NumUsedElements* elements of ConvertOp are converted to the
2086     // same number of output elements. The rest of the output is copied from
2087     // CopyOp, or (if not available) filled with zeroes.
2088     // Combine shadow for elements of ConvertOp that are used in this operation,
2089     // and insert a check.
2090     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2091     // int->any conversion.
2092     Value *ConvertShadow = getShadow(ConvertOp);
2093     Value *AggShadow = nullptr;
2094     if (ConvertOp->getType()->isVectorTy()) {
2095       AggShadow = IRB.CreateExtractElement(
2096           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2097       for (int i = 1; i < NumUsedElements; ++i) {
2098         Value *MoreShadow = IRB.CreateExtractElement(
2099             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2100         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2101       }
2102     } else {
2103       AggShadow = ConvertShadow;
2104     }
2105     assert(AggShadow->getType()->isIntegerTy());
2106     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2107 
2108     // Build result shadow by zero-filling parts of CopyOp shadow that come from
2109     // ConvertOp.
2110     if (CopyOp) {
2111       assert(CopyOp->getType() == I.getType());
2112       assert(CopyOp->getType()->isVectorTy());
2113       Value *ResultShadow = getShadow(CopyOp);
2114       Type *EltTy = ResultShadow->getType()->getVectorElementType();
2115       for (int i = 0; i < NumUsedElements; ++i) {
2116         ResultShadow = IRB.CreateInsertElement(
2117             ResultShadow, ConstantInt::getNullValue(EltTy),
2118             ConstantInt::get(IRB.getInt32Ty(), i));
2119       }
2120       setShadow(&I, ResultShadow);
2121       setOrigin(&I, getOrigin(CopyOp));
2122     } else {
2123       setShadow(&I, getCleanShadow(&I));
2124       setOrigin(&I, getCleanOrigin());
2125     }
2126   }
2127 
2128   // Given a scalar or vector, extract lower 64 bits (or less), and return all
2129   // zeroes if it is zero, and all ones otherwise.
2130   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2131     if (S->getType()->isVectorTy())
2132       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2133     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2134     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2135     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2136   }
2137 
2138   // Given a vector, extract its first element, and return all
2139   // zeroes if it is zero, and all ones otherwise.
2140   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2141     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2142     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2143     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2144   }
2145 
2146   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2147     Type *T = S->getType();
2148     assert(T->isVectorTy());
2149     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2150     return IRB.CreateSExt(S2, T);
2151   }
2152 
2153   // \brief Instrument vector shift instrinsic.
2154   //
2155   // This function instruments intrinsics like int_x86_avx2_psll_w.
2156   // Intrinsic shifts %In by %ShiftSize bits.
2157   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2158   // size, and the rest is ignored. Behavior is defined even if shift size is
2159   // greater than register (or field) width.
2160   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2161     assert(I.getNumArgOperands() == 2);
2162     IRBuilder<> IRB(&I);
2163     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2164     // Otherwise perform the same shift on S1.
2165     Value *S1 = getShadow(&I, 0);
2166     Value *S2 = getShadow(&I, 1);
2167     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2168                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2169     Value *V1 = I.getOperand(0);
2170     Value *V2 = I.getOperand(1);
2171     Value *Shift = IRB.CreateCall(I.getCalledValue(),
2172                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
2173     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2174     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2175     setOriginForNaryOp(I);
2176   }
2177 
2178   // \brief Get an X86_MMX-sized vector type.
2179   Type *getMMXVectorTy(unsigned EltSizeInBits) {
2180     const unsigned X86_MMXSizeInBits = 64;
2181     return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2182                            X86_MMXSizeInBits / EltSizeInBits);
2183   }
2184 
2185   // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2186   // intrinsic.
2187   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2188     switch (id) {
2189       case llvm::Intrinsic::x86_sse2_packsswb_128:
2190       case llvm::Intrinsic::x86_sse2_packuswb_128:
2191         return llvm::Intrinsic::x86_sse2_packsswb_128;
2192 
2193       case llvm::Intrinsic::x86_sse2_packssdw_128:
2194       case llvm::Intrinsic::x86_sse41_packusdw:
2195         return llvm::Intrinsic::x86_sse2_packssdw_128;
2196 
2197       case llvm::Intrinsic::x86_avx2_packsswb:
2198       case llvm::Intrinsic::x86_avx2_packuswb:
2199         return llvm::Intrinsic::x86_avx2_packsswb;
2200 
2201       case llvm::Intrinsic::x86_avx2_packssdw:
2202       case llvm::Intrinsic::x86_avx2_packusdw:
2203         return llvm::Intrinsic::x86_avx2_packssdw;
2204 
2205       case llvm::Intrinsic::x86_mmx_packsswb:
2206       case llvm::Intrinsic::x86_mmx_packuswb:
2207         return llvm::Intrinsic::x86_mmx_packsswb;
2208 
2209       case llvm::Intrinsic::x86_mmx_packssdw:
2210         return llvm::Intrinsic::x86_mmx_packssdw;
2211       default:
2212         llvm_unreachable("unexpected intrinsic id");
2213     }
2214   }
2215 
2216   // \brief Instrument vector pack instrinsic.
2217   //
2218   // This function instruments intrinsics like x86_mmx_packsswb, that
2219   // packs elements of 2 input vectors into half as many bits with saturation.
2220   // Shadow is propagated with the signed variant of the same intrinsic applied
2221   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2222   // EltSizeInBits is used only for x86mmx arguments.
2223   void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2224     assert(I.getNumArgOperands() == 2);
2225     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2226     IRBuilder<> IRB(&I);
2227     Value *S1 = getShadow(&I, 0);
2228     Value *S2 = getShadow(&I, 1);
2229     assert(isX86_MMX || S1->getType()->isVectorTy());
2230 
2231     // SExt and ICmpNE below must apply to individual elements of input vectors.
2232     // In case of x86mmx arguments, cast them to appropriate vector types and
2233     // back.
2234     Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2235     if (isX86_MMX) {
2236       S1 = IRB.CreateBitCast(S1, T);
2237       S2 = IRB.CreateBitCast(S2, T);
2238     }
2239     Value *S1_ext = IRB.CreateSExt(
2240         IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
2241     Value *S2_ext = IRB.CreateSExt(
2242         IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
2243     if (isX86_MMX) {
2244       Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2245       S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2246       S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2247     }
2248 
2249     Function *ShadowFn = Intrinsic::getDeclaration(
2250         F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2251 
2252     Value *S =
2253         IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2254     if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2255     setShadow(&I, S);
2256     setOriginForNaryOp(I);
2257   }
2258 
2259   // \brief Instrument sum-of-absolute-differencies intrinsic.
2260   void handleVectorSadIntrinsic(IntrinsicInst &I) {
2261     const unsigned SignificantBitsPerResultElement = 16;
2262     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2263     Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2264     unsigned ZeroBitsPerResultElement =
2265         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2266 
2267     IRBuilder<> IRB(&I);
2268     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2269     S = IRB.CreateBitCast(S, ResTy);
2270     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2271                        ResTy);
2272     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2273     S = IRB.CreateBitCast(S, getShadowTy(&I));
2274     setShadow(&I, S);
2275     setOriginForNaryOp(I);
2276   }
2277 
2278   // \brief Instrument multiply-add intrinsic.
2279   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2280                                   unsigned EltSizeInBits = 0) {
2281     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2282     Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2283     IRBuilder<> IRB(&I);
2284     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2285     S = IRB.CreateBitCast(S, ResTy);
2286     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2287                        ResTy);
2288     S = IRB.CreateBitCast(S, getShadowTy(&I));
2289     setShadow(&I, S);
2290     setOriginForNaryOp(I);
2291   }
2292 
2293   // \brief Instrument compare-packed intrinsic.
2294   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2295   // all-ones shadow.
2296   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2297     IRBuilder<> IRB(&I);
2298     Type *ResTy = getShadowTy(&I);
2299     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2300     Value *S = IRB.CreateSExt(
2301         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2302     setShadow(&I, S);
2303     setOriginForNaryOp(I);
2304   }
2305 
2306   // \brief Instrument compare-scalar intrinsic.
2307   // This handles both cmp* intrinsics which return the result in the first
2308   // element of a vector, and comi* which return the result as i32.
2309   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2310     IRBuilder<> IRB(&I);
2311     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2312     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2313     setShadow(&I, S);
2314     setOriginForNaryOp(I);
2315   }
2316 
2317   void visitIntrinsicInst(IntrinsicInst &I) {
2318     switch (I.getIntrinsicID()) {
2319     case llvm::Intrinsic::bswap:
2320       handleBswap(I);
2321       break;
2322     case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
2323     case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
2324     case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
2325     case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
2326     case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2327     case llvm::Intrinsic::x86_avx512_cvttss2usi:
2328     case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2329     case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2330     case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2331     case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2332     case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2333     case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2334     case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2335     case llvm::Intrinsic::x86_sse2_cvtsd2si:
2336     case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2337     case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2338     case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2339     case llvm::Intrinsic::x86_sse2_cvtss2sd:
2340     case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2341     case llvm::Intrinsic::x86_sse2_cvttsd2si:
2342     case llvm::Intrinsic::x86_sse_cvtsi2ss:
2343     case llvm::Intrinsic::x86_sse_cvtsi642ss:
2344     case llvm::Intrinsic::x86_sse_cvtss2si64:
2345     case llvm::Intrinsic::x86_sse_cvtss2si:
2346     case llvm::Intrinsic::x86_sse_cvttss2si64:
2347     case llvm::Intrinsic::x86_sse_cvttss2si:
2348       handleVectorConvertIntrinsic(I, 1);
2349       break;
2350     case llvm::Intrinsic::x86_sse_cvtps2pi:
2351     case llvm::Intrinsic::x86_sse_cvttps2pi:
2352       handleVectorConvertIntrinsic(I, 2);
2353       break;
2354     case llvm::Intrinsic::x86_avx2_psll_w:
2355     case llvm::Intrinsic::x86_avx2_psll_d:
2356     case llvm::Intrinsic::x86_avx2_psll_q:
2357     case llvm::Intrinsic::x86_avx2_pslli_w:
2358     case llvm::Intrinsic::x86_avx2_pslli_d:
2359     case llvm::Intrinsic::x86_avx2_pslli_q:
2360     case llvm::Intrinsic::x86_avx2_psrl_w:
2361     case llvm::Intrinsic::x86_avx2_psrl_d:
2362     case llvm::Intrinsic::x86_avx2_psrl_q:
2363     case llvm::Intrinsic::x86_avx2_psra_w:
2364     case llvm::Intrinsic::x86_avx2_psra_d:
2365     case llvm::Intrinsic::x86_avx2_psrli_w:
2366     case llvm::Intrinsic::x86_avx2_psrli_d:
2367     case llvm::Intrinsic::x86_avx2_psrli_q:
2368     case llvm::Intrinsic::x86_avx2_psrai_w:
2369     case llvm::Intrinsic::x86_avx2_psrai_d:
2370     case llvm::Intrinsic::x86_sse2_psll_w:
2371     case llvm::Intrinsic::x86_sse2_psll_d:
2372     case llvm::Intrinsic::x86_sse2_psll_q:
2373     case llvm::Intrinsic::x86_sse2_pslli_w:
2374     case llvm::Intrinsic::x86_sse2_pslli_d:
2375     case llvm::Intrinsic::x86_sse2_pslli_q:
2376     case llvm::Intrinsic::x86_sse2_psrl_w:
2377     case llvm::Intrinsic::x86_sse2_psrl_d:
2378     case llvm::Intrinsic::x86_sse2_psrl_q:
2379     case llvm::Intrinsic::x86_sse2_psra_w:
2380     case llvm::Intrinsic::x86_sse2_psra_d:
2381     case llvm::Intrinsic::x86_sse2_psrli_w:
2382     case llvm::Intrinsic::x86_sse2_psrli_d:
2383     case llvm::Intrinsic::x86_sse2_psrli_q:
2384     case llvm::Intrinsic::x86_sse2_psrai_w:
2385     case llvm::Intrinsic::x86_sse2_psrai_d:
2386     case llvm::Intrinsic::x86_mmx_psll_w:
2387     case llvm::Intrinsic::x86_mmx_psll_d:
2388     case llvm::Intrinsic::x86_mmx_psll_q:
2389     case llvm::Intrinsic::x86_mmx_pslli_w:
2390     case llvm::Intrinsic::x86_mmx_pslli_d:
2391     case llvm::Intrinsic::x86_mmx_pslli_q:
2392     case llvm::Intrinsic::x86_mmx_psrl_w:
2393     case llvm::Intrinsic::x86_mmx_psrl_d:
2394     case llvm::Intrinsic::x86_mmx_psrl_q:
2395     case llvm::Intrinsic::x86_mmx_psra_w:
2396     case llvm::Intrinsic::x86_mmx_psra_d:
2397     case llvm::Intrinsic::x86_mmx_psrli_w:
2398     case llvm::Intrinsic::x86_mmx_psrli_d:
2399     case llvm::Intrinsic::x86_mmx_psrli_q:
2400     case llvm::Intrinsic::x86_mmx_psrai_w:
2401     case llvm::Intrinsic::x86_mmx_psrai_d:
2402       handleVectorShiftIntrinsic(I, /* Variable */ false);
2403       break;
2404     case llvm::Intrinsic::x86_avx2_psllv_d:
2405     case llvm::Intrinsic::x86_avx2_psllv_d_256:
2406     case llvm::Intrinsic::x86_avx2_psllv_q:
2407     case llvm::Intrinsic::x86_avx2_psllv_q_256:
2408     case llvm::Intrinsic::x86_avx2_psrlv_d:
2409     case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2410     case llvm::Intrinsic::x86_avx2_psrlv_q:
2411     case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2412     case llvm::Intrinsic::x86_avx2_psrav_d:
2413     case llvm::Intrinsic::x86_avx2_psrav_d_256:
2414       handleVectorShiftIntrinsic(I, /* Variable */ true);
2415       break;
2416 
2417     case llvm::Intrinsic::x86_sse2_packsswb_128:
2418     case llvm::Intrinsic::x86_sse2_packssdw_128:
2419     case llvm::Intrinsic::x86_sse2_packuswb_128:
2420     case llvm::Intrinsic::x86_sse41_packusdw:
2421     case llvm::Intrinsic::x86_avx2_packsswb:
2422     case llvm::Intrinsic::x86_avx2_packssdw:
2423     case llvm::Intrinsic::x86_avx2_packuswb:
2424     case llvm::Intrinsic::x86_avx2_packusdw:
2425       handleVectorPackIntrinsic(I);
2426       break;
2427 
2428     case llvm::Intrinsic::x86_mmx_packsswb:
2429     case llvm::Intrinsic::x86_mmx_packuswb:
2430       handleVectorPackIntrinsic(I, 16);
2431       break;
2432 
2433     case llvm::Intrinsic::x86_mmx_packssdw:
2434       handleVectorPackIntrinsic(I, 32);
2435       break;
2436 
2437     case llvm::Intrinsic::x86_mmx_psad_bw:
2438     case llvm::Intrinsic::x86_sse2_psad_bw:
2439     case llvm::Intrinsic::x86_avx2_psad_bw:
2440       handleVectorSadIntrinsic(I);
2441       break;
2442 
2443     case llvm::Intrinsic::x86_sse2_pmadd_wd:
2444     case llvm::Intrinsic::x86_avx2_pmadd_wd:
2445     case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2446     case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2447       handleVectorPmaddIntrinsic(I);
2448       break;
2449 
2450     case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2451       handleVectorPmaddIntrinsic(I, 8);
2452       break;
2453 
2454     case llvm::Intrinsic::x86_mmx_pmadd_wd:
2455       handleVectorPmaddIntrinsic(I, 16);
2456       break;
2457 
2458     case llvm::Intrinsic::x86_sse_cmp_ss:
2459     case llvm::Intrinsic::x86_sse2_cmp_sd:
2460     case llvm::Intrinsic::x86_sse_comieq_ss:
2461     case llvm::Intrinsic::x86_sse_comilt_ss:
2462     case llvm::Intrinsic::x86_sse_comile_ss:
2463     case llvm::Intrinsic::x86_sse_comigt_ss:
2464     case llvm::Intrinsic::x86_sse_comige_ss:
2465     case llvm::Intrinsic::x86_sse_comineq_ss:
2466     case llvm::Intrinsic::x86_sse_ucomieq_ss:
2467     case llvm::Intrinsic::x86_sse_ucomilt_ss:
2468     case llvm::Intrinsic::x86_sse_ucomile_ss:
2469     case llvm::Intrinsic::x86_sse_ucomigt_ss:
2470     case llvm::Intrinsic::x86_sse_ucomige_ss:
2471     case llvm::Intrinsic::x86_sse_ucomineq_ss:
2472     case llvm::Intrinsic::x86_sse2_comieq_sd:
2473     case llvm::Intrinsic::x86_sse2_comilt_sd:
2474     case llvm::Intrinsic::x86_sse2_comile_sd:
2475     case llvm::Intrinsic::x86_sse2_comigt_sd:
2476     case llvm::Intrinsic::x86_sse2_comige_sd:
2477     case llvm::Intrinsic::x86_sse2_comineq_sd:
2478     case llvm::Intrinsic::x86_sse2_ucomieq_sd:
2479     case llvm::Intrinsic::x86_sse2_ucomilt_sd:
2480     case llvm::Intrinsic::x86_sse2_ucomile_sd:
2481     case llvm::Intrinsic::x86_sse2_ucomigt_sd:
2482     case llvm::Intrinsic::x86_sse2_ucomige_sd:
2483     case llvm::Intrinsic::x86_sse2_ucomineq_sd:
2484       handleVectorCompareScalarIntrinsic(I);
2485       break;
2486 
2487     case llvm::Intrinsic::x86_sse_cmp_ps:
2488     case llvm::Intrinsic::x86_sse2_cmp_pd:
2489       // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
2490       // generates reasonably looking IR that fails in the backend with "Do not
2491       // know how to split the result of this operator!".
2492       handleVectorComparePackedIntrinsic(I);
2493       break;
2494 
2495     default:
2496       if (!handleUnknownIntrinsic(I))
2497         visitInstruction(I);
2498       break;
2499     }
2500   }
2501 
2502   void visitCallSite(CallSite CS) {
2503     Instruction &I = *CS.getInstruction();
2504     assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
2505     if (CS.isCall()) {
2506       CallInst *Call = cast<CallInst>(&I);
2507 
2508       // For inline asm, do the usual thing: check argument shadow and mark all
2509       // outputs as clean. Note that any side effects of the inline asm that are
2510       // not immediately visible in its constraints are not handled.
2511       if (Call->isInlineAsm()) {
2512         visitInstruction(I);
2513         return;
2514       }
2515 
2516       assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2517 
2518       // We are going to insert code that relies on the fact that the callee
2519       // will become a non-readonly function after it is instrumented by us. To
2520       // prevent this code from being optimized out, mark that function
2521       // non-readonly in advance.
2522       if (Function *Func = Call->getCalledFunction()) {
2523         // Clear out readonly/readnone attributes.
2524         AttrBuilder B;
2525         B.addAttribute(Attribute::ReadOnly)
2526           .addAttribute(Attribute::ReadNone);
2527         Func->removeAttributes(AttributeSet::FunctionIndex,
2528                                AttributeSet::get(Func->getContext(),
2529                                                  AttributeSet::FunctionIndex,
2530                                                  B));
2531       }
2532     }
2533     IRBuilder<> IRB(&I);
2534 
2535     unsigned ArgOffset = 0;
2536     DEBUG(dbgs() << "  CallSite: " << I << "\n");
2537     for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2538          ArgIt != End; ++ArgIt) {
2539       Value *A = *ArgIt;
2540       unsigned i = ArgIt - CS.arg_begin();
2541       if (!A->getType()->isSized()) {
2542         DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2543         continue;
2544       }
2545       unsigned Size = 0;
2546       Value *Store = nullptr;
2547       // Compute the Shadow for arg even if it is ByVal, because
2548       // in that case getShadow() will copy the actual arg shadow to
2549       // __msan_param_tls.
2550       Value *ArgShadow = getShadow(A);
2551       Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2552       DEBUG(dbgs() << "  Arg#" << i << ": " << *A <<
2553             " Shadow: " << *ArgShadow << "\n");
2554       bool ArgIsInitialized = false;
2555       const DataLayout &DL = F.getParent()->getDataLayout();
2556       if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2557         assert(A->getType()->isPointerTy() &&
2558                "ByVal argument is not a pointer!");
2559         Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
2560         if (ArgOffset + Size > kParamTLSSize) break;
2561         unsigned ParamAlignment = CS.getParamAlignment(i + 1);
2562         unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2563         Store = IRB.CreateMemCpy(ArgShadowBase,
2564                                  getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2565                                  Size, Alignment);
2566       } else {
2567         Size = DL.getTypeAllocSize(A->getType());
2568         if (ArgOffset + Size > kParamTLSSize) break;
2569         Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2570                                        kShadowTLSAlignment);
2571         Constant *Cst = dyn_cast<Constant>(ArgShadow);
2572         if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2573       }
2574       if (MS.TrackOrigins && !ArgIsInitialized)
2575         IRB.CreateStore(getOrigin(A),
2576                         getOriginPtrForArgument(A, IRB, ArgOffset));
2577       (void)Store;
2578       assert(Size != 0 && Store != nullptr);
2579       DEBUG(dbgs() << "  Param:" << *Store << "\n");
2580       ArgOffset += alignTo(Size, 8);
2581     }
2582     DEBUG(dbgs() << "  done with call args\n");
2583 
2584     FunctionType *FT =
2585       cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2586     if (FT->isVarArg()) {
2587       VAHelper->visitCallSite(CS, IRB);
2588     }
2589 
2590     // Now, get the shadow for the RetVal.
2591     if (!I.getType()->isSized()) return;
2592     // Don't emit the epilogue for musttail call returns.
2593     if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
2594     IRBuilder<> IRBBefore(&I);
2595     // Until we have full dynamic coverage, make sure the retval shadow is 0.
2596     Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2597     IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2598     BasicBlock::iterator NextInsn;
2599     if (CS.isCall()) {
2600       NextInsn = ++I.getIterator();
2601       assert(NextInsn != I.getParent()->end());
2602     } else {
2603       BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2604       if (!NormalDest->getSinglePredecessor()) {
2605         // FIXME: this case is tricky, so we are just conservative here.
2606         // Perhaps we need to split the edge between this BB and NormalDest,
2607         // but a naive attempt to use SplitEdge leads to a crash.
2608         setShadow(&I, getCleanShadow(&I));
2609         setOrigin(&I, getCleanOrigin());
2610         return;
2611       }
2612       NextInsn = NormalDest->getFirstInsertionPt();
2613       assert(NextInsn != NormalDest->end() &&
2614              "Could not find insertion point for retval shadow load");
2615     }
2616     IRBuilder<> IRBAfter(&*NextInsn);
2617     Value *RetvalShadow =
2618       IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2619                                  kShadowTLSAlignment, "_msret");
2620     setShadow(&I, RetvalShadow);
2621     if (MS.TrackOrigins)
2622       setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2623   }
2624 
2625   bool isAMustTailRetVal(Value *RetVal) {
2626     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2627       RetVal = I->getOperand(0);
2628     }
2629     if (auto *I = dyn_cast<CallInst>(RetVal)) {
2630       return I->isMustTailCall();
2631     }
2632     return false;
2633   }
2634 
2635   void visitReturnInst(ReturnInst &I) {
2636     IRBuilder<> IRB(&I);
2637     Value *RetVal = I.getReturnValue();
2638     if (!RetVal) return;
2639     // Don't emit the epilogue for musttail call returns.
2640     if (isAMustTailRetVal(RetVal)) return;
2641     Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2642     if (CheckReturnValue) {
2643       insertShadowCheck(RetVal, &I);
2644       Value *Shadow = getCleanShadow(RetVal);
2645       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2646     } else {
2647       Value *Shadow = getShadow(RetVal);
2648       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2649       // FIXME: make it conditional if ClStoreCleanOrigin==0
2650       if (MS.TrackOrigins)
2651         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2652     }
2653   }
2654 
2655   void visitPHINode(PHINode &I) {
2656     IRBuilder<> IRB(&I);
2657     if (!PropagateShadow) {
2658       setShadow(&I, getCleanShadow(&I));
2659       setOrigin(&I, getCleanOrigin());
2660       return;
2661     }
2662 
2663     ShadowPHINodes.push_back(&I);
2664     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2665                                 "_msphi_s"));
2666     if (MS.TrackOrigins)
2667       setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2668                                   "_msphi_o"));
2669   }
2670 
2671   void visitAllocaInst(AllocaInst &I) {
2672     setShadow(&I, getCleanShadow(&I));
2673     setOrigin(&I, getCleanOrigin());
2674     IRBuilder<> IRB(I.getNextNode());
2675     const DataLayout &DL = F.getParent()->getDataLayout();
2676     uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
2677     if (PoisonStack && ClPoisonStackWithCall) {
2678       IRB.CreateCall(MS.MsanPoisonStackFn,
2679                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2680                       ConstantInt::get(MS.IntptrTy, Size)});
2681     } else {
2682       Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2683       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2684       IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2685     }
2686 
2687     if (PoisonStack && MS.TrackOrigins) {
2688       SmallString<2048> StackDescriptionStorage;
2689       raw_svector_ostream StackDescription(StackDescriptionStorage);
2690       // We create a string with a description of the stack allocation and
2691       // pass it into __msan_set_alloca_origin.
2692       // It will be printed by the run-time if stack-originated UMR is found.
2693       // The first 4 bytes of the string are set to '----' and will be replaced
2694       // by __msan_va_arg_overflow_size_tls at the first call.
2695       StackDescription << "----" << I.getName() << "@" << F.getName();
2696       Value *Descr =
2697           createPrivateNonConstGlobalForString(*F.getParent(),
2698                                                StackDescription.str());
2699 
2700       IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
2701                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2702                       ConstantInt::get(MS.IntptrTy, Size),
2703                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2704                       IRB.CreatePointerCast(&F, MS.IntptrTy)});
2705     }
2706   }
2707 
2708   void visitSelectInst(SelectInst& I) {
2709     IRBuilder<> IRB(&I);
2710     // a = select b, c, d
2711     Value *B = I.getCondition();
2712     Value *C = I.getTrueValue();
2713     Value *D = I.getFalseValue();
2714     Value *Sb = getShadow(B);
2715     Value *Sc = getShadow(C);
2716     Value *Sd = getShadow(D);
2717 
2718     // Result shadow if condition shadow is 0.
2719     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2720     Value *Sa1;
2721     if (I.getType()->isAggregateType()) {
2722       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2723       // an extra "select". This results in much more compact IR.
2724       // Sa = select Sb, poisoned, (select b, Sc, Sd)
2725       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2726     } else {
2727       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2728       // If Sb (condition is poisoned), look for bits in c and d that are equal
2729       // and both unpoisoned.
2730       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2731 
2732       // Cast arguments to shadow-compatible type.
2733       C = CreateAppToShadowCast(IRB, C);
2734       D = CreateAppToShadowCast(IRB, D);
2735 
2736       // Result shadow if condition shadow is 1.
2737       Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2738     }
2739     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2740     setShadow(&I, Sa);
2741     if (MS.TrackOrigins) {
2742       // Origins are always i32, so any vector conditions must be flattened.
2743       // FIXME: consider tracking vector origins for app vectors?
2744       if (B->getType()->isVectorTy()) {
2745         Type *FlatTy = getShadowTyNoVec(B->getType());
2746         B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2747                                 ConstantInt::getNullValue(FlatTy));
2748         Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2749                                       ConstantInt::getNullValue(FlatTy));
2750       }
2751       // a = select b, c, d
2752       // Oa = Sb ? Ob : (b ? Oc : Od)
2753       setOrigin(
2754           &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
2755                                IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
2756                                                 getOrigin(I.getFalseValue()))));
2757     }
2758   }
2759 
2760   void visitLandingPadInst(LandingPadInst &I) {
2761     // Do nothing.
2762     // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2763     setShadow(&I, getCleanShadow(&I));
2764     setOrigin(&I, getCleanOrigin());
2765   }
2766 
2767   void visitCatchSwitchInst(CatchSwitchInst &I) {
2768     setShadow(&I, getCleanShadow(&I));
2769     setOrigin(&I, getCleanOrigin());
2770   }
2771 
2772   void visitFuncletPadInst(FuncletPadInst &I) {
2773     setShadow(&I, getCleanShadow(&I));
2774     setOrigin(&I, getCleanOrigin());
2775   }
2776 
2777   void visitGetElementPtrInst(GetElementPtrInst &I) {
2778     handleShadowOr(I);
2779   }
2780 
2781   void visitExtractValueInst(ExtractValueInst &I) {
2782     IRBuilder<> IRB(&I);
2783     Value *Agg = I.getAggregateOperand();
2784     DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
2785     Value *AggShadow = getShadow(Agg);
2786     DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
2787     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2788     DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
2789     setShadow(&I, ResShadow);
2790     setOriginForNaryOp(I);
2791   }
2792 
2793   void visitInsertValueInst(InsertValueInst &I) {
2794     IRBuilder<> IRB(&I);
2795     DEBUG(dbgs() << "InsertValue:  " << I << "\n");
2796     Value *AggShadow = getShadow(I.getAggregateOperand());
2797     Value *InsShadow = getShadow(I.getInsertedValueOperand());
2798     DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
2799     DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
2800     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2801     DEBUG(dbgs() << "   Res:        " << *Res << "\n");
2802     setShadow(&I, Res);
2803     setOriginForNaryOp(I);
2804   }
2805 
2806   void dumpInst(Instruction &I) {
2807     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2808       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2809     } else {
2810       errs() << "ZZZ " << I.getOpcodeName() << "\n";
2811     }
2812     errs() << "QQQ " << I << "\n";
2813   }
2814 
2815   void visitResumeInst(ResumeInst &I) {
2816     DEBUG(dbgs() << "Resume: " << I << "\n");
2817     // Nothing to do here.
2818   }
2819 
2820   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
2821     DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
2822     // Nothing to do here.
2823   }
2824 
2825   void visitCatchReturnInst(CatchReturnInst &CRI) {
2826     DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
2827     // Nothing to do here.
2828   }
2829 
2830   void visitInstruction(Instruction &I) {
2831     // Everything else: stop propagating and check for poisoned shadow.
2832     if (ClDumpStrictInstructions)
2833       dumpInst(I);
2834     DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2835     for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2836       insertShadowCheck(I.getOperand(i), &I);
2837     setShadow(&I, getCleanShadow(&I));
2838     setOrigin(&I, getCleanOrigin());
2839   }
2840 };
2841 
2842 /// \brief AMD64-specific implementation of VarArgHelper.
2843 struct VarArgAMD64Helper : public VarArgHelper {
2844   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2845   // See a comment in visitCallSite for more details.
2846   static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
2847   static const unsigned AMD64FpEndOffset = 176;
2848 
2849   Function &F;
2850   MemorySanitizer &MS;
2851   MemorySanitizerVisitor &MSV;
2852   Value *VAArgTLSCopy;
2853   Value *VAArgOverflowSize;
2854 
2855   SmallVector<CallInst*, 16> VAStartInstrumentationList;
2856 
2857   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2858                     MemorySanitizerVisitor &MSV)
2859     : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2860       VAArgOverflowSize(nullptr) {}
2861 
2862   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2863 
2864   ArgKind classifyArgument(Value* arg) {
2865     // A very rough approximation of X86_64 argument classification rules.
2866     Type *T = arg->getType();
2867     if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2868       return AK_FloatingPoint;
2869     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2870       return AK_GeneralPurpose;
2871     if (T->isPointerTy())
2872       return AK_GeneralPurpose;
2873     return AK_Memory;
2874   }
2875 
2876   // For VarArg functions, store the argument shadow in an ABI-specific format
2877   // that corresponds to va_list layout.
2878   // We do this because Clang lowers va_arg in the frontend, and this pass
2879   // only sees the low level code that deals with va_list internals.
2880   // A much easier alternative (provided that Clang emits va_arg instructions)
2881   // would have been to associate each live instance of va_list with a copy of
2882   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2883   // order.
2884   void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2885     unsigned GpOffset = 0;
2886     unsigned FpOffset = AMD64GpEndOffset;
2887     unsigned OverflowOffset = AMD64FpEndOffset;
2888     const DataLayout &DL = F.getParent()->getDataLayout();
2889     for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2890          ArgIt != End; ++ArgIt) {
2891       Value *A = *ArgIt;
2892       unsigned ArgNo = CS.getArgumentNo(ArgIt);
2893       bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
2894       bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2895       if (IsByVal) {
2896         // ByVal arguments always go to the overflow area.
2897         // Fixed arguments passed through the overflow area will be stepped
2898         // over by va_start, so don't count them towards the offset.
2899         if (IsFixed)
2900           continue;
2901         assert(A->getType()->isPointerTy());
2902         Type *RealTy = A->getType()->getPointerElementType();
2903         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
2904         Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2905         OverflowOffset += alignTo(ArgSize, 8);
2906         IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2907                          ArgSize, kShadowTLSAlignment);
2908       } else {
2909         ArgKind AK = classifyArgument(A);
2910         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2911           AK = AK_Memory;
2912         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2913           AK = AK_Memory;
2914         Value *Base;
2915         switch (AK) {
2916           case AK_GeneralPurpose:
2917             Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2918             GpOffset += 8;
2919             break;
2920           case AK_FloatingPoint:
2921             Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2922             FpOffset += 16;
2923             break;
2924           case AK_Memory:
2925             if (IsFixed)
2926               continue;
2927             uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
2928             Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2929             OverflowOffset += alignTo(ArgSize, 8);
2930         }
2931         // Take fixed arguments into account for GpOffset and FpOffset,
2932         // but don't actually store shadows for them.
2933         if (IsFixed)
2934           continue;
2935         IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2936       }
2937     }
2938     Constant *OverflowSize =
2939       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2940     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2941   }
2942 
2943   /// \brief Compute the shadow address for a given va_arg.
2944   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2945                                    int ArgOffset) {
2946     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2947     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2948     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2949                               "_msarg");
2950   }
2951 
2952   void visitVAStartInst(VAStartInst &I) override {
2953     if (F.getCallingConv() == CallingConv::X86_64_Win64)
2954       return;
2955     IRBuilder<> IRB(&I);
2956     VAStartInstrumentationList.push_back(&I);
2957     Value *VAListTag = I.getArgOperand(0);
2958     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2959 
2960     // Unpoison the whole __va_list_tag.
2961     // FIXME: magic ABI constants.
2962     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2963                      /* size */24, /* alignment */8, false);
2964   }
2965 
2966   void visitVACopyInst(VACopyInst &I) override {
2967     if (F.getCallingConv() == CallingConv::X86_64_Win64)
2968       return;
2969     IRBuilder<> IRB(&I);
2970     Value *VAListTag = I.getArgOperand(0);
2971     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2972 
2973     // Unpoison the whole __va_list_tag.
2974     // FIXME: magic ABI constants.
2975     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2976                      /* size */24, /* alignment */8, false);
2977   }
2978 
2979   void finalizeInstrumentation() override {
2980     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2981            "finalizeInstrumentation called twice");
2982     if (!VAStartInstrumentationList.empty()) {
2983       // If there is a va_start in this function, make a backup copy of
2984       // va_arg_tls somewhere in the function entry block.
2985       IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2986       VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2987       Value *CopySize =
2988         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2989                       VAArgOverflowSize);
2990       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2991       IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2992     }
2993 
2994     // Instrument va_start.
2995     // Copy va_list shadow from the backup copy of the TLS contents.
2996     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2997       CallInst *OrigInst = VAStartInstrumentationList[i];
2998       IRBuilder<> IRB(OrigInst->getNextNode());
2999       Value *VAListTag = OrigInst->getArgOperand(0);
3000 
3001       Value *RegSaveAreaPtrPtr =
3002         IRB.CreateIntToPtr(
3003           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3004                         ConstantInt::get(MS.IntptrTy, 16)),
3005           Type::getInt64PtrTy(*MS.C));
3006       Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3007       Value *RegSaveAreaShadowPtr =
3008         MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3009       IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
3010                        AMD64FpEndOffset, 16);
3011 
3012       Value *OverflowArgAreaPtrPtr =
3013         IRB.CreateIntToPtr(
3014           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3015                         ConstantInt::get(MS.IntptrTy, 8)),
3016           Type::getInt64PtrTy(*MS.C));
3017       Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
3018       Value *OverflowArgAreaShadowPtr =
3019         MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
3020       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
3021                                              AMD64FpEndOffset);
3022       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
3023     }
3024   }
3025 };
3026 
3027 /// \brief MIPS64-specific implementation of VarArgHelper.
3028 struct VarArgMIPS64Helper : public VarArgHelper {
3029   Function &F;
3030   MemorySanitizer &MS;
3031   MemorySanitizerVisitor &MSV;
3032   Value *VAArgTLSCopy;
3033   Value *VAArgSize;
3034 
3035   SmallVector<CallInst*, 16> VAStartInstrumentationList;
3036 
3037   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
3038                     MemorySanitizerVisitor &MSV)
3039     : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3040       VAArgSize(nullptr) {}
3041 
3042   void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3043     unsigned VAArgOffset = 0;
3044     const DataLayout &DL = F.getParent()->getDataLayout();
3045     for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
3046          CS.getFunctionType()->getNumParams(), End = CS.arg_end();
3047          ArgIt != End; ++ArgIt) {
3048       llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3049       Value *A = *ArgIt;
3050       Value *Base;
3051       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3052       if (TargetTriple.getArch() == llvm::Triple::mips64) {
3053         // Adjusting the shadow for argument with size < 8 to match the placement
3054         // of bits in big endian system
3055         if (ArgSize < 8)
3056           VAArgOffset += (8 - ArgSize);
3057       }
3058       Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
3059       VAArgOffset += ArgSize;
3060       VAArgOffset = alignTo(VAArgOffset, 8);
3061       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3062     }
3063 
3064     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
3065     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3066     // a new class member i.e. it is the total size of all VarArgs.
3067     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3068   }
3069 
3070   /// \brief Compute the shadow address for a given va_arg.
3071   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3072                                    int ArgOffset) {
3073     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3074     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3075     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3076                               "_msarg");
3077   }
3078 
3079   void visitVAStartInst(VAStartInst &I) override {
3080     IRBuilder<> IRB(&I);
3081     VAStartInstrumentationList.push_back(&I);
3082     Value *VAListTag = I.getArgOperand(0);
3083     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3084     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3085                      /* size */8, /* alignment */8, false);
3086   }
3087 
3088   void visitVACopyInst(VACopyInst &I) override {
3089     IRBuilder<> IRB(&I);
3090     Value *VAListTag = I.getArgOperand(0);
3091     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3092     // Unpoison the whole __va_list_tag.
3093     // FIXME: magic ABI constants.
3094     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3095                      /* size */8, /* alignment */8, false);
3096   }
3097 
3098   void finalizeInstrumentation() override {
3099     assert(!VAArgSize && !VAArgTLSCopy &&
3100            "finalizeInstrumentation called twice");
3101     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3102     VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3103     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3104                                     VAArgSize);
3105 
3106     if (!VAStartInstrumentationList.empty()) {
3107       // If there is a va_start in this function, make a backup copy of
3108       // va_arg_tls somewhere in the function entry block.
3109       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3110       IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3111     }
3112 
3113     // Instrument va_start.
3114     // Copy va_list shadow from the backup copy of the TLS contents.
3115     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3116       CallInst *OrigInst = VAStartInstrumentationList[i];
3117       IRBuilder<> IRB(OrigInst->getNextNode());
3118       Value *VAListTag = OrigInst->getArgOperand(0);
3119       Value *RegSaveAreaPtrPtr =
3120         IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3121                         Type::getInt64PtrTy(*MS.C));
3122       Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3123       Value *RegSaveAreaShadowPtr =
3124       MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3125       IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3126     }
3127   }
3128 };
3129 
3130 
3131 /// \brief AArch64-specific implementation of VarArgHelper.
3132 struct VarArgAArch64Helper : public VarArgHelper {
3133   static const unsigned kAArch64GrArgSize = 64;
3134   static const unsigned kAArch64VrArgSize = 128;
3135 
3136   static const unsigned AArch64GrBegOffset = 0;
3137   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3138   // Make VR space aligned to 16 bytes.
3139   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
3140   static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3141                                              + kAArch64VrArgSize;
3142   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3143 
3144   Function &F;
3145   MemorySanitizer &MS;
3146   MemorySanitizerVisitor &MSV;
3147   Value *VAArgTLSCopy;
3148   Value *VAArgOverflowSize;
3149 
3150   SmallVector<CallInst*, 16> VAStartInstrumentationList;
3151 
3152   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
3153                     MemorySanitizerVisitor &MSV)
3154     : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3155       VAArgOverflowSize(nullptr) {}
3156 
3157   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3158 
3159   ArgKind classifyArgument(Value* arg) {
3160     Type *T = arg->getType();
3161     if (T->isFPOrFPVectorTy())
3162       return AK_FloatingPoint;
3163     if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3164         || (T->isPointerTy()))
3165       return AK_GeneralPurpose;
3166     return AK_Memory;
3167   }
3168 
3169   // The instrumentation stores the argument shadow in a non ABI-specific
3170   // format because it does not know which argument is named (since Clang,
3171   // like x86_64 case, lowers the va_args in the frontend and this pass only
3172   // sees the low level code that deals with va_list internals).
3173   // The first seven GR registers are saved in the first 56 bytes of the
3174   // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
3175   // the remaining arguments.
3176   // Using constant offset within the va_arg TLS array allows fast copy
3177   // in the finalize instrumentation.
3178   void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3179     unsigned GrOffset = AArch64GrBegOffset;
3180     unsigned VrOffset = AArch64VrBegOffset;
3181     unsigned OverflowOffset = AArch64VAEndOffset;
3182 
3183     const DataLayout &DL = F.getParent()->getDataLayout();
3184     for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3185          ArgIt != End; ++ArgIt) {
3186       Value *A = *ArgIt;
3187       unsigned ArgNo = CS.getArgumentNo(ArgIt);
3188       bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3189       ArgKind AK = classifyArgument(A);
3190       if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3191         AK = AK_Memory;
3192       if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3193         AK = AK_Memory;
3194       Value *Base;
3195       switch (AK) {
3196         case AK_GeneralPurpose:
3197           Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
3198           GrOffset += 8;
3199           break;
3200         case AK_FloatingPoint:
3201           Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
3202           VrOffset += 16;
3203           break;
3204         case AK_Memory:
3205           // Don't count fixed arguments in the overflow area - va_start will
3206           // skip right over them.
3207           if (IsFixed)
3208             continue;
3209           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3210           Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3211           OverflowOffset += alignTo(ArgSize, 8);
3212           break;
3213       }
3214       // Count Gp/Vr fixed arguments to their respective offsets, but don't
3215       // bother to actually store a shadow.
3216       if (IsFixed)
3217         continue;
3218       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3219     }
3220     Constant *OverflowSize =
3221       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3222     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3223   }
3224 
3225   /// Compute the shadow address for a given va_arg.
3226   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3227                                    int ArgOffset) {
3228     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3229     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3230     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3231                               "_msarg");
3232   }
3233 
3234   void visitVAStartInst(VAStartInst &I) override {
3235     IRBuilder<> IRB(&I);
3236     VAStartInstrumentationList.push_back(&I);
3237     Value *VAListTag = I.getArgOperand(0);
3238     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3239     // Unpoison the whole __va_list_tag.
3240     // FIXME: magic ABI constants (size of va_list).
3241     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3242                      /* size */32, /* alignment */8, false);
3243   }
3244 
3245   void visitVACopyInst(VACopyInst &I) override {
3246     IRBuilder<> IRB(&I);
3247     Value *VAListTag = I.getArgOperand(0);
3248     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3249     // Unpoison the whole __va_list_tag.
3250     // FIXME: magic ABI constants (size of va_list).
3251     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3252                      /* size */32, /* alignment */8, false);
3253   }
3254 
3255   // Retrieve a va_list field of 'void*' size.
3256   Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3257     Value *SaveAreaPtrPtr =
3258       IRB.CreateIntToPtr(
3259         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3260                       ConstantInt::get(MS.IntptrTy, offset)),
3261         Type::getInt64PtrTy(*MS.C));
3262     return IRB.CreateLoad(SaveAreaPtrPtr);
3263   }
3264 
3265   // Retrieve a va_list field of 'int' size.
3266   Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3267     Value *SaveAreaPtr =
3268       IRB.CreateIntToPtr(
3269         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3270                       ConstantInt::get(MS.IntptrTy, offset)),
3271         Type::getInt32PtrTy(*MS.C));
3272     Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
3273     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
3274   }
3275 
3276   void finalizeInstrumentation() override {
3277     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3278            "finalizeInstrumentation called twice");
3279     if (!VAStartInstrumentationList.empty()) {
3280       // If there is a va_start in this function, make a backup copy of
3281       // va_arg_tls somewhere in the function entry block.
3282       IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3283       VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3284       Value *CopySize =
3285         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3286                       VAArgOverflowSize);
3287       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3288       IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3289     }
3290 
3291     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3292     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3293 
3294     // Instrument va_start, copy va_list shadow from the backup copy of
3295     // the TLS contents.
3296     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3297       CallInst *OrigInst = VAStartInstrumentationList[i];
3298       IRBuilder<> IRB(OrigInst->getNextNode());
3299 
3300       Value *VAListTag = OrigInst->getArgOperand(0);
3301 
3302       // The variadic ABI for AArch64 creates two areas to save the incoming
3303       // argument registers (one for 64-bit general register xn-x7 and another
3304       // for 128-bit FP/SIMD vn-v7).
3305       // We need then to propagate the shadow arguments on both regions
3306       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
3307       // The remaning arguments are saved on shadow for 'va::stack'.
3308       // One caveat is it requires only to propagate the non-named arguments,
3309       // however on the call site instrumentation 'all' the arguments are
3310       // saved. So to copy the shadow values from the va_arg TLS array
3311       // we need to adjust the offset for both GR and VR fields based on
3312       // the __{gr,vr}_offs value (since they are stores based on incoming
3313       // named arguments).
3314 
3315       // Read the stack pointer from the va_list.
3316       Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3317 
3318       // Read both the __gr_top and __gr_off and add them up.
3319       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3320       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3321 
3322       Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3323 
3324       // Read both the __vr_top and __vr_off and add them up.
3325       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3326       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3327 
3328       Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3329 
3330       // It does not know how many named arguments is being used and, on the
3331       // callsite all the arguments were saved.  Since __gr_off is defined as
3332       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
3333       // argument by ignoring the bytes of shadow from named arguments.
3334       Value *GrRegSaveAreaShadowPtrOff =
3335         IRB.CreateAdd(GrArgSize, GrOffSaveArea);
3336 
3337       Value *GrRegSaveAreaShadowPtr =
3338         MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3339 
3340       Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3341                                               GrRegSaveAreaShadowPtrOff);
3342       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3343 
3344       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8);
3345 
3346       // Again, but for FP/SIMD values.
3347       Value *VrRegSaveAreaShadowPtrOff =
3348           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
3349 
3350       Value *VrRegSaveAreaShadowPtr =
3351         MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3352 
3353       Value *VrSrcPtr = IRB.CreateInBoundsGEP(
3354         IRB.getInt8Ty(),
3355         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3356                               IRB.getInt32(AArch64VrBegOffset)),
3357         VrRegSaveAreaShadowPtrOff);
3358       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3359 
3360       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8);
3361 
3362       // And finally for remaining arguments.
3363       Value *StackSaveAreaShadowPtr =
3364         MSV.getShadowPtr(StackSaveAreaPtr, IRB.getInt8Ty(), IRB);
3365 
3366       Value *StackSrcPtr =
3367         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3368                               IRB.getInt32(AArch64VAEndOffset));
3369 
3370       IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr,
3371                        VAArgOverflowSize, 16);
3372     }
3373   }
3374 };
3375 
3376 /// \brief PowerPC64-specific implementation of VarArgHelper.
3377 struct VarArgPowerPC64Helper : public VarArgHelper {
3378   Function &F;
3379   MemorySanitizer &MS;
3380   MemorySanitizerVisitor &MSV;
3381   Value *VAArgTLSCopy;
3382   Value *VAArgSize;
3383 
3384   SmallVector<CallInst*, 16> VAStartInstrumentationList;
3385 
3386   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
3387                     MemorySanitizerVisitor &MSV)
3388     : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3389       VAArgSize(nullptr) {}
3390 
3391   void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3392     // For PowerPC, we need to deal with alignment of stack arguments -
3393     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
3394     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
3395     // and QPX vectors are aligned to 32 bytes.  For that reason, we
3396     // compute current offset from stack pointer (which is always properly
3397     // aligned), and offset for the first vararg, then subtract them.
3398     unsigned VAArgBase;
3399     llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3400     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
3401     // and 32 bytes for ABIv2.  This is usually determined by target
3402     // endianness, but in theory could be overriden by function attribute.
3403     // For simplicity, we ignore it here (it'd only matter for QPX vectors).
3404     if (TargetTriple.getArch() == llvm::Triple::ppc64)
3405       VAArgBase = 48;
3406     else
3407       VAArgBase = 32;
3408     unsigned VAArgOffset = VAArgBase;
3409     const DataLayout &DL = F.getParent()->getDataLayout();
3410     for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3411          ArgIt != End; ++ArgIt) {
3412       Value *A = *ArgIt;
3413       unsigned ArgNo = CS.getArgumentNo(ArgIt);
3414       bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3415       bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
3416       if (IsByVal) {
3417         assert(A->getType()->isPointerTy());
3418         Type *RealTy = A->getType()->getPointerElementType();
3419         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3420         uint64_t ArgAlign = CS.getParamAlignment(ArgNo + 1);
3421         if (ArgAlign < 8)
3422           ArgAlign = 8;
3423         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3424         if (!IsFixed) {
3425           Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
3426                                                   VAArgOffset - VAArgBase);
3427           IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
3428                            ArgSize, kShadowTLSAlignment);
3429         }
3430         VAArgOffset += alignTo(ArgSize, 8);
3431       } else {
3432         Value *Base;
3433         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3434         uint64_t ArgAlign = 8;
3435         if (A->getType()->isArrayTy()) {
3436           // Arrays are aligned to element size, except for long double
3437           // arrays, which are aligned to 8 bytes.
3438           Type *ElementTy = A->getType()->getArrayElementType();
3439           if (!ElementTy->isPPC_FP128Ty())
3440             ArgAlign = DL.getTypeAllocSize(ElementTy);
3441         } else if (A->getType()->isVectorTy()) {
3442           // Vectors are naturally aligned.
3443           ArgAlign = DL.getTypeAllocSize(A->getType());
3444         }
3445         if (ArgAlign < 8)
3446           ArgAlign = 8;
3447         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3448         if (DL.isBigEndian()) {
3449           // Adjusting the shadow for argument with size < 8 to match the placement
3450           // of bits in big endian system
3451           if (ArgSize < 8)
3452             VAArgOffset += (8 - ArgSize);
3453         }
3454         if (!IsFixed) {
3455           Base = getShadowPtrForVAArgument(A->getType(), IRB,
3456                                            VAArgOffset - VAArgBase);
3457           IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3458         }
3459         VAArgOffset += ArgSize;
3460         VAArgOffset = alignTo(VAArgOffset, 8);
3461       }
3462       if (IsFixed)
3463         VAArgBase = VAArgOffset;
3464     }
3465 
3466     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
3467                                                 VAArgOffset - VAArgBase);
3468     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3469     // a new class member i.e. it is the total size of all VarArgs.
3470     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3471   }
3472 
3473   /// \brief Compute the shadow address for a given va_arg.
3474   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3475                                    int ArgOffset) {
3476     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3477     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3478     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3479                               "_msarg");
3480   }
3481 
3482   void visitVAStartInst(VAStartInst &I) override {
3483     IRBuilder<> IRB(&I);
3484     VAStartInstrumentationList.push_back(&I);
3485     Value *VAListTag = I.getArgOperand(0);
3486     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3487     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3488                      /* size */8, /* alignment */8, false);
3489   }
3490 
3491   void visitVACopyInst(VACopyInst &I) override {
3492     IRBuilder<> IRB(&I);
3493     Value *VAListTag = I.getArgOperand(0);
3494     Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3495     // Unpoison the whole __va_list_tag.
3496     // FIXME: magic ABI constants.
3497     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3498                      /* size */8, /* alignment */8, false);
3499   }
3500 
3501   void finalizeInstrumentation() override {
3502     assert(!VAArgSize && !VAArgTLSCopy &&
3503            "finalizeInstrumentation called twice");
3504     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3505     VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3506     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3507                                     VAArgSize);
3508 
3509     if (!VAStartInstrumentationList.empty()) {
3510       // If there is a va_start in this function, make a backup copy of
3511       // va_arg_tls somewhere in the function entry block.
3512       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3513       IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3514     }
3515 
3516     // Instrument va_start.
3517     // Copy va_list shadow from the backup copy of the TLS contents.
3518     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3519       CallInst *OrigInst = VAStartInstrumentationList[i];
3520       IRBuilder<> IRB(OrigInst->getNextNode());
3521       Value *VAListTag = OrigInst->getArgOperand(0);
3522       Value *RegSaveAreaPtrPtr =
3523         IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3524                         Type::getInt64PtrTy(*MS.C));
3525       Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3526       Value *RegSaveAreaShadowPtr =
3527       MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3528       IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3529     }
3530   }
3531 };
3532 
3533 /// \brief A no-op implementation of VarArgHelper.
3534 struct VarArgNoOpHelper : public VarArgHelper {
3535   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
3536                    MemorySanitizerVisitor &MSV) {}
3537 
3538   void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
3539 
3540   void visitVAStartInst(VAStartInst &I) override {}
3541 
3542   void visitVACopyInst(VACopyInst &I) override {}
3543 
3544   void finalizeInstrumentation() override {}
3545 };
3546 
3547 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
3548                                  MemorySanitizerVisitor &Visitor) {
3549   // VarArg handling is only implemented on AMD64. False positives are possible
3550   // on other platforms.
3551   llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
3552   if (TargetTriple.getArch() == llvm::Triple::x86_64)
3553     return new VarArgAMD64Helper(Func, Msan, Visitor);
3554   else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
3555            TargetTriple.getArch() == llvm::Triple::mips64el)
3556     return new VarArgMIPS64Helper(Func, Msan, Visitor);
3557   else if (TargetTriple.getArch() == llvm::Triple::aarch64)
3558     return new VarArgAArch64Helper(Func, Msan, Visitor);
3559   else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
3560            TargetTriple.getArch() == llvm::Triple::ppc64le)
3561     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
3562   else
3563     return new VarArgNoOpHelper(Func, Msan, Visitor);
3564 }
3565 
3566 } // anonymous namespace
3567 
3568 bool MemorySanitizer::runOnFunction(Function &F) {
3569   if (&F == MsanCtorFunction)
3570     return false;
3571   MemorySanitizerVisitor Visitor(F, *this);
3572 
3573   // Clear out readonly/readnone attributes.
3574   AttrBuilder B;
3575   B.addAttribute(Attribute::ReadOnly)
3576     .addAttribute(Attribute::ReadNone);
3577   F.removeAttributes(AttributeSet::FunctionIndex,
3578                      AttributeSet::get(F.getContext(),
3579                                        AttributeSet::FunctionIndex, B));
3580 
3581   return Visitor.runOnFunction();
3582 }
3583