1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///  - TLS variables are stored in a single per-task struct. A call to a
126 ///    function __msan_get_context_state() returning a pointer to that struct
127 ///    is inserted into every instrumented function before the entry block;
128 ///  - __msan_warning() takes a 32-bit origin parameter;
129 ///  - local variables are poisoned with __msan_poison_alloca() upon function
130 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
131 ///    function;
132 ///  - the pass doesn't declare any global variables or add global constructors
133 ///    to the translation unit.
134 ///
135 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
136 /// calls, making sure we're on the safe side wrt. possible false positives.
137 ///
138 ///  KernelMemorySanitizer only supports X86_64 at the moment.
139 ///
140 //===----------------------------------------------------------------------===//
141 
142 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
143 #include "llvm/ADT/APInt.h"
144 #include "llvm/ADT/ArrayRef.h"
145 #include "llvm/ADT/DepthFirstIterator.h"
146 #include "llvm/ADT/SmallSet.h"
147 #include "llvm/ADT/SmallString.h"
148 #include "llvm/ADT/SmallVector.h"
149 #include "llvm/ADT/StringExtras.h"
150 #include "llvm/ADT/StringRef.h"
151 #include "llvm/ADT/Triple.h"
152 #include "llvm/Analysis/TargetLibraryInfo.h"
153 #include "llvm/IR/Argument.h"
154 #include "llvm/IR/Attributes.h"
155 #include "llvm/IR/BasicBlock.h"
156 #include "llvm/IR/CallingConv.h"
157 #include "llvm/IR/Constant.h"
158 #include "llvm/IR/Constants.h"
159 #include "llvm/IR/DataLayout.h"
160 #include "llvm/IR/DerivedTypes.h"
161 #include "llvm/IR/Function.h"
162 #include "llvm/IR/GlobalValue.h"
163 #include "llvm/IR/GlobalVariable.h"
164 #include "llvm/IR/IRBuilder.h"
165 #include "llvm/IR/InlineAsm.h"
166 #include "llvm/IR/InstVisitor.h"
167 #include "llvm/IR/InstrTypes.h"
168 #include "llvm/IR/Instruction.h"
169 #include "llvm/IR/Instructions.h"
170 #include "llvm/IR/IntrinsicInst.h"
171 #include "llvm/IR/Intrinsics.h"
172 #include "llvm/IR/IntrinsicsX86.h"
173 #include "llvm/IR/LLVMContext.h"
174 #include "llvm/IR/MDBuilder.h"
175 #include "llvm/IR/Module.h"
176 #include "llvm/IR/Type.h"
177 #include "llvm/IR/Value.h"
178 #include "llvm/IR/ValueMap.h"
179 #include "llvm/InitializePasses.h"
180 #include "llvm/Pass.h"
181 #include "llvm/Support/AtomicOrdering.h"
182 #include "llvm/Support/Casting.h"
183 #include "llvm/Support/CommandLine.h"
184 #include "llvm/Support/Compiler.h"
185 #include "llvm/Support/Debug.h"
186 #include "llvm/Support/ErrorHandling.h"
187 #include "llvm/Support/MathExtras.h"
188 #include "llvm/Support/raw_ostream.h"
189 #include "llvm/Transforms/Instrumentation.h"
190 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
191 #include "llvm/Transforms/Utils/Local.h"
192 #include "llvm/Transforms/Utils/ModuleUtils.h"
193 #include <algorithm>
194 #include <cassert>
195 #include <cstddef>
196 #include <cstdint>
197 #include <memory>
198 #include <string>
199 #include <tuple>
200 
201 using namespace llvm;
202 
203 #define DEBUG_TYPE "msan"
204 
205 static const unsigned kOriginSize = 4;
206 static const Align kMinOriginAlignment = Align(4);
207 static const Align kShadowTLSAlignment = Align(8);
208 
209 // These constants must be kept in sync with the ones in msan.h.
210 static const unsigned kParamTLSSize = 800;
211 static const unsigned kRetvalTLSSize = 800;
212 
213 // Accesses sizes are powers of two: 1, 2, 4, 8.
214 static const size_t kNumberOfAccessSizes = 4;
215 
216 /// Track origins of uninitialized values.
217 ///
218 /// Adds a section to MemorySanitizer report that points to the allocation
219 /// (stack or heap) the uninitialized bits came from originally.
220 static cl::opt<int> ClTrackOrigins("msan-track-origins",
221        cl::desc("Track origins (allocation sites) of poisoned memory"),
222        cl::Hidden, cl::init(0));
223 
224 static cl::opt<bool> ClKeepGoing("msan-keep-going",
225        cl::desc("keep going after reporting a UMR"),
226        cl::Hidden, cl::init(false));
227 
228 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
229        cl::desc("poison uninitialized stack variables"),
230        cl::Hidden, cl::init(true));
231 
232 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
233        cl::desc("poison uninitialized stack variables with a call"),
234        cl::Hidden, cl::init(false));
235 
236 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
237        cl::desc("poison uninitialized stack variables with the given pattern"),
238        cl::Hidden, cl::init(0xff));
239 
240 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
241        cl::desc("poison undef temps"),
242        cl::Hidden, cl::init(true));
243 
244 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
245        cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
246        cl::Hidden, cl::init(true));
247 
248 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
249        cl::desc("exact handling of relational integer ICmp"),
250        cl::Hidden, cl::init(false));
251 
252 static cl::opt<bool> ClHandleLifetimeIntrinsics(
253     "msan-handle-lifetime-intrinsics",
254     cl::desc(
255         "when possible, poison scoped variables at the beginning of the scope "
256         "(slower, but more precise)"),
257     cl::Hidden, cl::init(true));
258 
259 // When compiling the Linux kernel, we sometimes see false positives related to
260 // MSan being unable to understand that inline assembly calls may initialize
261 // local variables.
262 // This flag makes the compiler conservatively unpoison every memory location
263 // passed into an assembly call. Note that this may cause false positives.
264 // Because it's impossible to figure out the array sizes, we can only unpoison
265 // the first sizeof(type) bytes for each type* pointer.
266 // The instrumentation is only enabled in KMSAN builds, and only if
267 // -msan-handle-asm-conservative is on. This is done because we may want to
268 // quickly disable assembly instrumentation when it breaks.
269 static cl::opt<bool> ClHandleAsmConservative(
270     "msan-handle-asm-conservative",
271     cl::desc("conservative handling of inline assembly"), cl::Hidden,
272     cl::init(true));
273 
274 // This flag controls whether we check the shadow of the address
275 // operand of load or store. Such bugs are very rare, since load from
276 // a garbage address typically results in SEGV, but still happen
277 // (e.g. only lower bits of address are garbage, or the access happens
278 // early at program startup where malloc-ed memory is more likely to
279 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
280 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
281        cl::desc("report accesses through a pointer which has poisoned shadow"),
282        cl::Hidden, cl::init(true));
283 
284 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
285        cl::desc("print out instructions with default strict semantics"),
286        cl::Hidden, cl::init(false));
287 
288 static cl::opt<int> ClInstrumentationWithCallThreshold(
289     "msan-instrumentation-with-call-threshold",
290     cl::desc(
291         "If the function being instrumented requires more than "
292         "this number of checks and origin stores, use callbacks instead of "
293         "inline checks (-1 means never use callbacks)."),
294     cl::Hidden, cl::init(3500));
295 
296 static cl::opt<bool>
297     ClEnableKmsan("msan-kernel",
298                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
299                   cl::Hidden, cl::init(false));
300 
301 // This is an experiment to enable handling of cases where shadow is a non-zero
302 // compile-time constant. For some unexplainable reason they were silently
303 // ignored in the instrumentation.
304 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
305        cl::desc("Insert checks for constant shadow values"),
306        cl::Hidden, cl::init(false));
307 
308 // This is off by default because of a bug in gold:
309 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
310 static cl::opt<bool> ClWithComdat("msan-with-comdat",
311        cl::desc("Place MSan constructors in comdat sections"),
312        cl::Hidden, cl::init(false));
313 
314 // These options allow to specify custom memory map parameters
315 // See MemoryMapParams for details.
316 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
317                                    cl::desc("Define custom MSan AndMask"),
318                                    cl::Hidden, cl::init(0));
319 
320 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
321                                    cl::desc("Define custom MSan XorMask"),
322                                    cl::Hidden, cl::init(0));
323 
324 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
325                                       cl::desc("Define custom MSan ShadowBase"),
326                                       cl::Hidden, cl::init(0));
327 
328 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
329                                       cl::desc("Define custom MSan OriginBase"),
330                                       cl::Hidden, cl::init(0));
331 
332 static const char *const kMsanModuleCtorName = "msan.module_ctor";
333 static const char *const kMsanInitName = "__msan_init";
334 
335 namespace {
336 
337 // Memory map parameters used in application-to-shadow address calculation.
338 // Offset = (Addr & ~AndMask) ^ XorMask
339 // Shadow = ShadowBase + Offset
340 // Origin = OriginBase + Offset
341 struct MemoryMapParams {
342   uint64_t AndMask;
343   uint64_t XorMask;
344   uint64_t ShadowBase;
345   uint64_t OriginBase;
346 };
347 
348 struct PlatformMemoryMapParams {
349   const MemoryMapParams *bits32;
350   const MemoryMapParams *bits64;
351 };
352 
353 } // end anonymous namespace
354 
355 // i386 Linux
356 static const MemoryMapParams Linux_I386_MemoryMapParams = {
357   0x000080000000,  // AndMask
358   0,               // XorMask (not used)
359   0,               // ShadowBase (not used)
360   0x000040000000,  // OriginBase
361 };
362 
363 // x86_64 Linux
364 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
365 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
366   0x400000000000,  // AndMask
367   0,               // XorMask (not used)
368   0,               // ShadowBase (not used)
369   0x200000000000,  // OriginBase
370 #else
371   0,               // AndMask (not used)
372   0x500000000000,  // XorMask
373   0,               // ShadowBase (not used)
374   0x100000000000,  // OriginBase
375 #endif
376 };
377 
378 // mips64 Linux
379 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
380   0,               // AndMask (not used)
381   0x008000000000,  // XorMask
382   0,               // ShadowBase (not used)
383   0x002000000000,  // OriginBase
384 };
385 
386 // ppc64 Linux
387 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
388   0xE00000000000,  // AndMask
389   0x100000000000,  // XorMask
390   0x080000000000,  // ShadowBase
391   0x1C0000000000,  // OriginBase
392 };
393 
394 // s390x Linux
395 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
396     0xC00000000000, // AndMask
397     0,              // XorMask (not used)
398     0x080000000000, // ShadowBase
399     0x1C0000000000, // OriginBase
400 };
401 
402 // aarch64 Linux
403 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
404   0,               // AndMask (not used)
405   0x06000000000,   // XorMask
406   0,               // ShadowBase (not used)
407   0x01000000000,   // OriginBase
408 };
409 
410 // i386 FreeBSD
411 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
412   0x000180000000,  // AndMask
413   0x000040000000,  // XorMask
414   0x000020000000,  // ShadowBase
415   0x000700000000,  // OriginBase
416 };
417 
418 // x86_64 FreeBSD
419 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
420   0xc00000000000,  // AndMask
421   0x200000000000,  // XorMask
422   0x100000000000,  // ShadowBase
423   0x380000000000,  // OriginBase
424 };
425 
426 // x86_64 NetBSD
427 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
428   0,               // AndMask
429   0x500000000000,  // XorMask
430   0,               // ShadowBase
431   0x100000000000,  // OriginBase
432 };
433 
434 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
435   &Linux_I386_MemoryMapParams,
436   &Linux_X86_64_MemoryMapParams,
437 };
438 
439 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
440   nullptr,
441   &Linux_MIPS64_MemoryMapParams,
442 };
443 
444 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
445   nullptr,
446   &Linux_PowerPC64_MemoryMapParams,
447 };
448 
449 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
450     nullptr,
451     &Linux_S390X_MemoryMapParams,
452 };
453 
454 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
455   nullptr,
456   &Linux_AArch64_MemoryMapParams,
457 };
458 
459 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
460   &FreeBSD_I386_MemoryMapParams,
461   &FreeBSD_X86_64_MemoryMapParams,
462 };
463 
464 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
465   nullptr,
466   &NetBSD_X86_64_MemoryMapParams,
467 };
468 
469 namespace {
470 
471 /// Instrument functions of a module to detect uninitialized reads.
472 ///
473 /// Instantiating MemorySanitizer inserts the msan runtime library API function
474 /// declarations into the module if they don't exist already. Instantiating
475 /// ensures the __msan_init function is in the list of global constructors for
476 /// the module.
477 class MemorySanitizer {
478 public:
479   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
480       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
481         Recover(Options.Recover) {
482     initializeModule(M);
483   }
484 
485   // MSan cannot be moved or copied because of MapParams.
486   MemorySanitizer(MemorySanitizer &&) = delete;
487   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
488   MemorySanitizer(const MemorySanitizer &) = delete;
489   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
490 
491   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
492 
493 private:
494   friend struct MemorySanitizerVisitor;
495   friend struct VarArgAMD64Helper;
496   friend struct VarArgMIPS64Helper;
497   friend struct VarArgAArch64Helper;
498   friend struct VarArgPowerPC64Helper;
499   friend struct VarArgSystemZHelper;
500 
501   void initializeModule(Module &M);
502   void initializeCallbacks(Module &M);
503   void createKernelApi(Module &M);
504   void createUserspaceApi(Module &M);
505 
506   /// True if we're compiling the Linux kernel.
507   bool CompileKernel;
508   /// Track origins (allocation points) of uninitialized values.
509   int TrackOrigins;
510   bool Recover;
511 
512   LLVMContext *C;
513   Type *IntptrTy;
514   Type *OriginTy;
515 
516   // XxxTLS variables represent the per-thread state in MSan and per-task state
517   // in KMSAN.
518   // For the userspace these point to thread-local globals. In the kernel land
519   // they point to the members of a per-task struct obtained via a call to
520   // __msan_get_context_state().
521 
522   /// Thread-local shadow storage for function parameters.
523   Value *ParamTLS;
524 
525   /// Thread-local origin storage for function parameters.
526   Value *ParamOriginTLS;
527 
528   /// Thread-local shadow storage for function return value.
529   Value *RetvalTLS;
530 
531   /// Thread-local origin storage for function return value.
532   Value *RetvalOriginTLS;
533 
534   /// Thread-local shadow storage for in-register va_arg function
535   /// parameters (x86_64-specific).
536   Value *VAArgTLS;
537 
538   /// Thread-local shadow storage for in-register va_arg function
539   /// parameters (x86_64-specific).
540   Value *VAArgOriginTLS;
541 
542   /// Thread-local shadow storage for va_arg overflow area
543   /// (x86_64-specific).
544   Value *VAArgOverflowSizeTLS;
545 
546   /// Thread-local space used to pass origin value to the UMR reporting
547   /// function.
548   Value *OriginTLS;
549 
550   /// Are the instrumentation callbacks set up?
551   bool CallbacksInitialized = false;
552 
553   /// The run-time callback to print a warning.
554   FunctionCallee WarningFn;
555 
556   // These arrays are indexed by log2(AccessSize).
557   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
558   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
559 
560   /// Run-time helper that generates a new origin value for a stack
561   /// allocation.
562   FunctionCallee MsanSetAllocaOrigin4Fn;
563 
564   /// Run-time helper that poisons stack on function entry.
565   FunctionCallee MsanPoisonStackFn;
566 
567   /// Run-time helper that records a store (or any event) of an
568   /// uninitialized value and returns an updated origin id encoding this info.
569   FunctionCallee MsanChainOriginFn;
570 
571   /// MSan runtime replacements for memmove, memcpy and memset.
572   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
573 
574   /// KMSAN callback for task-local function argument shadow.
575   StructType *MsanContextStateTy;
576   FunctionCallee MsanGetContextStateFn;
577 
578   /// Functions for poisoning/unpoisoning local variables
579   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
580 
581   /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
582   /// pointers.
583   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
584   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
585   FunctionCallee MsanMetadataPtrForStore_1_8[4];
586   FunctionCallee MsanInstrumentAsmStoreFn;
587 
588   /// Helper to choose between different MsanMetadataPtrXxx().
589   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
590 
591   /// Memory map parameters used in application-to-shadow calculation.
592   const MemoryMapParams *MapParams;
593 
594   /// Custom memory map parameters used when -msan-shadow-base or
595   // -msan-origin-base is provided.
596   MemoryMapParams CustomMapParams;
597 
598   MDNode *ColdCallWeights;
599 
600   /// Branch weights for origin store.
601   MDNode *OriginStoreWeights;
602 
603   /// An empty volatile inline asm that prevents callback merge.
604   InlineAsm *EmptyAsm;
605 };
606 
607 void insertModuleCtor(Module &M) {
608   getOrCreateSanitizerCtorAndInitFunctions(
609       M, kMsanModuleCtorName, kMsanInitName,
610       /*InitArgTypes=*/{},
611       /*InitArgs=*/{},
612       // This callback is invoked when the functions are created the first
613       // time. Hook them into the global ctors list in that case:
614       [&](Function *Ctor, FunctionCallee) {
615         if (!ClWithComdat) {
616           appendToGlobalCtors(M, Ctor, 0);
617           return;
618         }
619         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
620         Ctor->setComdat(MsanCtorComdat);
621         appendToGlobalCtors(M, Ctor, 0, Ctor);
622       });
623 }
624 
625 /// A legacy function pass for msan instrumentation.
626 ///
627 /// Instruments functions to detect uninitialized reads.
628 struct MemorySanitizerLegacyPass : public FunctionPass {
629   // Pass identification, replacement for typeid.
630   static char ID;
631 
632   MemorySanitizerLegacyPass(MemorySanitizerOptions Options = {})
633       : FunctionPass(ID), Options(Options) {
634     initializeMemorySanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
635   }
636   StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
637 
638   void getAnalysisUsage(AnalysisUsage &AU) const override {
639     AU.addRequired<TargetLibraryInfoWrapperPass>();
640   }
641 
642   bool runOnFunction(Function &F) override {
643     return MSan->sanitizeFunction(
644         F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F));
645   }
646   bool doInitialization(Module &M) override;
647 
648   Optional<MemorySanitizer> MSan;
649   MemorySanitizerOptions Options;
650 };
651 
652 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
653   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
654 }
655 
656 } // end anonymous namespace
657 
658 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K)
659     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
660       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
661       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)) {}
662 
663 PreservedAnalyses MemorySanitizerPass::run(Function &F,
664                                            FunctionAnalysisManager &FAM) {
665   MemorySanitizer Msan(*F.getParent(), Options);
666   if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
667     return PreservedAnalyses::none();
668   return PreservedAnalyses::all();
669 }
670 
671 PreservedAnalyses MemorySanitizerPass::run(Module &M,
672                                            ModuleAnalysisManager &AM) {
673   if (Options.Kernel)
674     return PreservedAnalyses::all();
675   insertModuleCtor(M);
676   return PreservedAnalyses::none();
677 }
678 
679 char MemorySanitizerLegacyPass::ID = 0;
680 
681 INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
682                       "MemorySanitizer: detects uninitialized reads.", false,
683                       false)
684 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
685 INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
686                     "MemorySanitizer: detects uninitialized reads.", false,
687                     false)
688 
689 FunctionPass *
690 llvm::createMemorySanitizerLegacyPassPass(MemorySanitizerOptions Options) {
691   return new MemorySanitizerLegacyPass(Options);
692 }
693 
694 /// Create a non-const global initialized with the given string.
695 ///
696 /// Creates a writable global for Str so that we can pass it to the
697 /// run-time lib. Runtime uses first 4 bytes of the string to store the
698 /// frame ID, so the string needs to be mutable.
699 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
700                                                             StringRef Str) {
701   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
702   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
703                             GlobalValue::PrivateLinkage, StrConst, "");
704 }
705 
706 /// Create KMSAN API callbacks.
707 void MemorySanitizer::createKernelApi(Module &M) {
708   IRBuilder<> IRB(*C);
709 
710   // These will be initialized in insertKmsanPrologue().
711   RetvalTLS = nullptr;
712   RetvalOriginTLS = nullptr;
713   ParamTLS = nullptr;
714   ParamOriginTLS = nullptr;
715   VAArgTLS = nullptr;
716   VAArgOriginTLS = nullptr;
717   VAArgOverflowSizeTLS = nullptr;
718   // OriginTLS is unused in the kernel.
719   OriginTLS = nullptr;
720 
721   // __msan_warning() in the kernel takes an origin.
722   WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
723                                     IRB.getInt32Ty());
724   // Requests the per-task context state (kmsan_context_state*) from the
725   // runtime library.
726   MsanContextStateTy = StructType::get(
727       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
728       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
729       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
730       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
731       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
732       OriginTy);
733   MsanGetContextStateFn = M.getOrInsertFunction(
734       "__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
735 
736   Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
737                                 PointerType::get(IRB.getInt32Ty(), 0));
738 
739   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
740     std::string name_load =
741         "__msan_metadata_ptr_for_load_" + std::to_string(size);
742     std::string name_store =
743         "__msan_metadata_ptr_for_store_" + std::to_string(size);
744     MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
745         name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
746     MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
747         name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
748   }
749 
750   MsanMetadataPtrForLoadN = M.getOrInsertFunction(
751       "__msan_metadata_ptr_for_load_n", RetTy,
752       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
753   MsanMetadataPtrForStoreN = M.getOrInsertFunction(
754       "__msan_metadata_ptr_for_store_n", RetTy,
755       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
756 
757   // Functions for poisoning and unpoisoning memory.
758   MsanPoisonAllocaFn =
759       M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
760                             IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
761   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
762       "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
763 }
764 
765 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
766   return M.getOrInsertGlobal(Name, Ty, [&] {
767     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
768                               nullptr, Name, nullptr,
769                               GlobalVariable::InitialExecTLSModel);
770   });
771 }
772 
773 /// Insert declarations for userspace-specific functions and globals.
774 void MemorySanitizer::createUserspaceApi(Module &M) {
775   IRBuilder<> IRB(*C);
776   // Create the callback.
777   // FIXME: this function should have "Cold" calling conv,
778   // which is not yet implemented.
779   StringRef WarningFnName = Recover ? "__msan_warning"
780                                     : "__msan_warning_noreturn";
781   WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
782 
783   // Create the global TLS variables.
784   RetvalTLS =
785       getOrInsertGlobal(M, "__msan_retval_tls",
786                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
787 
788   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
789 
790   ParamTLS =
791       getOrInsertGlobal(M, "__msan_param_tls",
792                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
793 
794   ParamOriginTLS =
795       getOrInsertGlobal(M, "__msan_param_origin_tls",
796                         ArrayType::get(OriginTy, kParamTLSSize / 4));
797 
798   VAArgTLS =
799       getOrInsertGlobal(M, "__msan_va_arg_tls",
800                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
801 
802   VAArgOriginTLS =
803       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
804                         ArrayType::get(OriginTy, kParamTLSSize / 4));
805 
806   VAArgOverflowSizeTLS =
807       getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
808   OriginTLS = getOrInsertGlobal(M, "__msan_origin_tls", IRB.getInt32Ty());
809 
810   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
811        AccessSizeIndex++) {
812     unsigned AccessSize = 1 << AccessSizeIndex;
813     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
814     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeWarningFnAttrs;
815     MaybeWarningFnAttrs.push_back(std::make_pair(
816         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
817     MaybeWarningFnAttrs.push_back(std::make_pair(
818         AttributeList::FirstArgIndex + 1, Attribute::get(*C, Attribute::ZExt)));
819     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
820         FunctionName, AttributeList::get(*C, MaybeWarningFnAttrs),
821         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
822 
823     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
824     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeStoreOriginFnAttrs;
825     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
826         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
827     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
828         AttributeList::FirstArgIndex + 2, Attribute::get(*C, Attribute::ZExt)));
829     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
830         FunctionName, AttributeList::get(*C, MaybeStoreOriginFnAttrs),
831         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
832         IRB.getInt32Ty());
833   }
834 
835   MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
836     "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
837     IRB.getInt8PtrTy(), IntptrTy);
838   MsanPoisonStackFn =
839       M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
840                             IRB.getInt8PtrTy(), IntptrTy);
841 }
842 
843 /// Insert extern declaration of runtime-provided functions and globals.
844 void MemorySanitizer::initializeCallbacks(Module &M) {
845   // Only do this once.
846   if (CallbacksInitialized)
847     return;
848 
849   IRBuilder<> IRB(*C);
850   // Initialize callbacks that are common for kernel and userspace
851   // instrumentation.
852   MsanChainOriginFn = M.getOrInsertFunction(
853     "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
854   MemmoveFn = M.getOrInsertFunction(
855     "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
856     IRB.getInt8PtrTy(), IntptrTy);
857   MemcpyFn = M.getOrInsertFunction(
858     "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
859     IntptrTy);
860   MemsetFn = M.getOrInsertFunction(
861     "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
862     IntptrTy);
863   // We insert an empty inline asm after __msan_report* to avoid callback merge.
864   EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
865                             StringRef(""), StringRef(""),
866                             /*hasSideEffects=*/true);
867 
868   MsanInstrumentAsmStoreFn =
869       M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
870                             PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
871 
872   if (CompileKernel) {
873     createKernelApi(M);
874   } else {
875     createUserspaceApi(M);
876   }
877   CallbacksInitialized = true;
878 }
879 
880 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
881                                                              int size) {
882   FunctionCallee *Fns =
883       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
884   switch (size) {
885   case 1:
886     return Fns[0];
887   case 2:
888     return Fns[1];
889   case 4:
890     return Fns[2];
891   case 8:
892     return Fns[3];
893   default:
894     return nullptr;
895   }
896 }
897 
898 /// Module-level initialization.
899 ///
900 /// inserts a call to __msan_init to the module's constructor list.
901 void MemorySanitizer::initializeModule(Module &M) {
902   auto &DL = M.getDataLayout();
903 
904   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
905   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
906   // Check the overrides first
907   if (ShadowPassed || OriginPassed) {
908     CustomMapParams.AndMask = ClAndMask;
909     CustomMapParams.XorMask = ClXorMask;
910     CustomMapParams.ShadowBase = ClShadowBase;
911     CustomMapParams.OriginBase = ClOriginBase;
912     MapParams = &CustomMapParams;
913   } else {
914     Triple TargetTriple(M.getTargetTriple());
915     switch (TargetTriple.getOS()) {
916       case Triple::FreeBSD:
917         switch (TargetTriple.getArch()) {
918           case Triple::x86_64:
919             MapParams = FreeBSD_X86_MemoryMapParams.bits64;
920             break;
921           case Triple::x86:
922             MapParams = FreeBSD_X86_MemoryMapParams.bits32;
923             break;
924           default:
925             report_fatal_error("unsupported architecture");
926         }
927         break;
928       case Triple::NetBSD:
929         switch (TargetTriple.getArch()) {
930           case Triple::x86_64:
931             MapParams = NetBSD_X86_MemoryMapParams.bits64;
932             break;
933           default:
934             report_fatal_error("unsupported architecture");
935         }
936         break;
937       case Triple::Linux:
938         switch (TargetTriple.getArch()) {
939           case Triple::x86_64:
940             MapParams = Linux_X86_MemoryMapParams.bits64;
941             break;
942           case Triple::x86:
943             MapParams = Linux_X86_MemoryMapParams.bits32;
944             break;
945           case Triple::mips64:
946           case Triple::mips64el:
947             MapParams = Linux_MIPS_MemoryMapParams.bits64;
948             break;
949           case Triple::ppc64:
950           case Triple::ppc64le:
951             MapParams = Linux_PowerPC_MemoryMapParams.bits64;
952             break;
953           case Triple::systemz:
954             MapParams = Linux_S390_MemoryMapParams.bits64;
955             break;
956           case Triple::aarch64:
957           case Triple::aarch64_be:
958             MapParams = Linux_ARM_MemoryMapParams.bits64;
959             break;
960           default:
961             report_fatal_error("unsupported architecture");
962         }
963         break;
964       default:
965         report_fatal_error("unsupported operating system");
966     }
967   }
968 
969   C = &(M.getContext());
970   IRBuilder<> IRB(*C);
971   IntptrTy = IRB.getIntPtrTy(DL);
972   OriginTy = IRB.getInt32Ty();
973 
974   ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
975   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
976 
977   if (!CompileKernel) {
978     if (TrackOrigins)
979       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
980         return new GlobalVariable(
981             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
982             IRB.getInt32(TrackOrigins), "__msan_track_origins");
983       });
984 
985     if (Recover)
986       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
987         return new GlobalVariable(M, IRB.getInt32Ty(), true,
988                                   GlobalValue::WeakODRLinkage,
989                                   IRB.getInt32(Recover), "__msan_keep_going");
990       });
991 }
992 }
993 
994 bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
995   if (!Options.Kernel)
996     insertModuleCtor(M);
997   MSan.emplace(M, Options);
998   return true;
999 }
1000 
1001 namespace {
1002 
1003 /// A helper class that handles instrumentation of VarArg
1004 /// functions on a particular platform.
1005 ///
1006 /// Implementations are expected to insert the instrumentation
1007 /// necessary to propagate argument shadow through VarArg function
1008 /// calls. Visit* methods are called during an InstVisitor pass over
1009 /// the function, and should avoid creating new basic blocks. A new
1010 /// instance of this class is created for each instrumented function.
1011 struct VarArgHelper {
1012   virtual ~VarArgHelper() = default;
1013 
1014   /// Visit a CallBase.
1015   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1016 
1017   /// Visit a va_start call.
1018   virtual void visitVAStartInst(VAStartInst &I) = 0;
1019 
1020   /// Visit a va_copy call.
1021   virtual void visitVACopyInst(VACopyInst &I) = 0;
1022 
1023   /// Finalize function instrumentation.
1024   ///
1025   /// This method is called after visiting all interesting (see above)
1026   /// instructions in a function.
1027   virtual void finalizeInstrumentation() = 0;
1028 };
1029 
1030 struct MemorySanitizerVisitor;
1031 
1032 } // end anonymous namespace
1033 
1034 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1035                                         MemorySanitizerVisitor &Visitor);
1036 
1037 static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
1038   if (TypeSize <= 8) return 0;
1039   return Log2_32_Ceil((TypeSize + 7) / 8);
1040 }
1041 
1042 namespace {
1043 
1044 /// This class does all the work for a given function. Store and Load
1045 /// instructions store and load corresponding shadow and origin
1046 /// values. Most instructions propagate shadow from arguments to their
1047 /// return values. Certain instructions (most importantly, BranchInst)
1048 /// test their argument shadow and print reports (with a runtime call) if it's
1049 /// non-zero.
1050 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1051   Function &F;
1052   MemorySanitizer &MS;
1053   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1054   ValueMap<Value*, Value*> ShadowMap, OriginMap;
1055   std::unique_ptr<VarArgHelper> VAHelper;
1056   const TargetLibraryInfo *TLI;
1057   BasicBlock *ActualFnStart;
1058 
1059   // The following flags disable parts of MSan instrumentation based on
1060   // blacklist contents and command-line options.
1061   bool InsertChecks;
1062   bool PropagateShadow;
1063   bool PoisonStack;
1064   bool PoisonUndef;
1065   bool CheckReturnValue;
1066 
1067   struct ShadowOriginAndInsertPoint {
1068     Value *Shadow;
1069     Value *Origin;
1070     Instruction *OrigIns;
1071 
1072     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1073       : Shadow(S), Origin(O), OrigIns(I) {}
1074   };
1075   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1076   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1077   SmallSet<AllocaInst *, 16> AllocaSet;
1078   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1079   SmallVector<StoreInst *, 16> StoreList;
1080 
1081   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1082                          const TargetLibraryInfo &TLI)
1083       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1084     bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
1085     InsertChecks = SanitizeFunction;
1086     PropagateShadow = SanitizeFunction;
1087     PoisonStack = SanitizeFunction && ClPoisonStack;
1088     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1089     // FIXME: Consider using SpecialCaseList to specify a list of functions that
1090     // must always return fully initialized values. For now, we hardcode "main".
1091     CheckReturnValue = SanitizeFunction && (F.getName() == "main");
1092 
1093     MS.initializeCallbacks(*F.getParent());
1094     if (MS.CompileKernel)
1095       ActualFnStart = insertKmsanPrologue(F);
1096     else
1097       ActualFnStart = &F.getEntryBlock();
1098 
1099     LLVM_DEBUG(if (!InsertChecks) dbgs()
1100                << "MemorySanitizer is not inserting checks into '"
1101                << F.getName() << "'\n");
1102   }
1103 
1104   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1105     if (MS.TrackOrigins <= 1) return V;
1106     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1107   }
1108 
1109   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1110     const DataLayout &DL = F.getParent()->getDataLayout();
1111     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1112     if (IntptrSize == kOriginSize) return Origin;
1113     assert(IntptrSize == kOriginSize * 2);
1114     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1115     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1116   }
1117 
1118   /// Fill memory range with the given origin value.
1119   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1120                    unsigned Size, Align Alignment) {
1121     const DataLayout &DL = F.getParent()->getDataLayout();
1122     const Align IntptrAlignment = Align(DL.getABITypeAlignment(MS.IntptrTy));
1123     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1124     assert(IntptrAlignment >= kMinOriginAlignment);
1125     assert(IntptrSize >= kOriginSize);
1126 
1127     unsigned Ofs = 0;
1128     Align CurrentAlignment = Alignment;
1129     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1130       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1131       Value *IntptrOriginPtr =
1132           IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
1133       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1134         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1135                        : IntptrOriginPtr;
1136         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1137         Ofs += IntptrSize / kOriginSize;
1138         CurrentAlignment = IntptrAlignment;
1139       }
1140     }
1141 
1142     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1143       Value *GEP =
1144           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1145       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1146       CurrentAlignment = kMinOriginAlignment;
1147     }
1148   }
1149 
1150   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1151                    Value *OriginPtr, Align Alignment, bool AsCall) {
1152     const DataLayout &DL = F.getParent()->getDataLayout();
1153     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1154     unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
1155     if (Shadow->getType()->isAggregateType()) {
1156       paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1157                   OriginAlignment);
1158     } else {
1159       Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1160       if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1161         if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
1162           paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1163                       OriginAlignment);
1164         return;
1165       }
1166 
1167       unsigned TypeSizeInBits =
1168           DL.getTypeSizeInBits(ConvertedShadow->getType());
1169       unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1170       if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1171         FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1172         Value *ConvertedShadow2 = IRB.CreateZExt(
1173             ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1174         IRB.CreateCall(Fn, {ConvertedShadow2,
1175                             IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
1176                             Origin});
1177       } else {
1178         Value *Cmp = IRB.CreateICmpNE(
1179             ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
1180         Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1181             Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1182         IRBuilder<> IRBNew(CheckTerm);
1183         paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1184                     OriginAlignment);
1185       }
1186     }
1187   }
1188 
1189   void materializeStores(bool InstrumentWithCalls) {
1190     for (StoreInst *SI : StoreList) {
1191       IRBuilder<> IRB(SI);
1192       Value *Val = SI->getValueOperand();
1193       Value *Addr = SI->getPointerOperand();
1194       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1195       Value *ShadowPtr, *OriginPtr;
1196       Type *ShadowTy = Shadow->getType();
1197       const Align Alignment = assumeAligned(SI->getAlignment());
1198       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1199       std::tie(ShadowPtr, OriginPtr) =
1200           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1201 
1202       StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1203       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1204       (void)NewSI;
1205 
1206       if (SI->isAtomic())
1207         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1208 
1209       if (MS.TrackOrigins && !SI->isAtomic())
1210         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1211                     OriginAlignment, InstrumentWithCalls);
1212     }
1213   }
1214 
1215   /// Helper function to insert a warning at IRB's current insert point.
1216   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1217     if (!Origin)
1218       Origin = (Value *)IRB.getInt32(0);
1219     if (MS.CompileKernel) {
1220       IRB.CreateCall(MS.WarningFn, Origin);
1221     } else {
1222       if (MS.TrackOrigins) {
1223         IRB.CreateStore(Origin, MS.OriginTLS);
1224       }
1225       IRB.CreateCall(MS.WarningFn, {});
1226     }
1227     IRB.CreateCall(MS.EmptyAsm, {});
1228     // FIXME: Insert UnreachableInst if !MS.Recover?
1229     // This may invalidate some of the following checks and needs to be done
1230     // at the very end.
1231   }
1232 
1233   void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
1234                            bool AsCall) {
1235     IRBuilder<> IRB(OrigIns);
1236     LLVM_DEBUG(dbgs() << "  SHAD0 : " << *Shadow << "\n");
1237     Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1238     LLVM_DEBUG(dbgs() << "  SHAD1 : " << *ConvertedShadow << "\n");
1239 
1240     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1241       if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
1242         insertWarningFn(IRB, Origin);
1243       }
1244       return;
1245     }
1246 
1247     const DataLayout &DL = OrigIns->getModule()->getDataLayout();
1248 
1249     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1250     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1251     if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1252       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1253       Value *ConvertedShadow2 =
1254           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1255       IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
1256                                                 ? Origin
1257                                                 : (Value *)IRB.getInt32(0)});
1258     } else {
1259       Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
1260                                     getCleanShadow(ConvertedShadow), "_mscmp");
1261       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1262           Cmp, OrigIns,
1263           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1264 
1265       IRB.SetInsertPoint(CheckTerm);
1266       insertWarningFn(IRB, Origin);
1267       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1268     }
1269   }
1270 
1271   void materializeChecks(bool InstrumentWithCalls) {
1272     for (const auto &ShadowData : InstrumentationList) {
1273       Instruction *OrigIns = ShadowData.OrigIns;
1274       Value *Shadow = ShadowData.Shadow;
1275       Value *Origin = ShadowData.Origin;
1276       materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
1277     }
1278     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1279   }
1280 
1281   BasicBlock *insertKmsanPrologue(Function &F) {
1282     BasicBlock *ret =
1283         SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI());
1284     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
1285     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1286     Constant *Zero = IRB.getInt32(0);
1287     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1288                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1289     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1290                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1291     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1292                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1293     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1294                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1295     MS.VAArgOverflowSizeTLS =
1296         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1297                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1298     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1299                                       {Zero, IRB.getInt32(5)}, "param_origin");
1300     MS.RetvalOriginTLS =
1301         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1302                       {Zero, IRB.getInt32(6)}, "retval_origin");
1303     return ret;
1304   }
1305 
1306   /// Add MemorySanitizer instrumentation to a function.
1307   bool runOnFunction() {
1308     // In the presence of unreachable blocks, we may see Phi nodes with
1309     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1310     // blocks, such nodes will not have any shadow value associated with them.
1311     // It's easier to remove unreachable blocks than deal with missing shadow.
1312     removeUnreachableBlocks(F);
1313 
1314     // Iterate all BBs in depth-first order and create shadow instructions
1315     // for all instructions (where applicable).
1316     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1317     for (BasicBlock *BB : depth_first(ActualFnStart))
1318       visit(*BB);
1319 
1320     // Finalize PHI nodes.
1321     for (PHINode *PN : ShadowPHINodes) {
1322       PHINode *PNS = cast<PHINode>(getShadow(PN));
1323       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1324       size_t NumValues = PN->getNumIncomingValues();
1325       for (size_t v = 0; v < NumValues; v++) {
1326         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1327         if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1328       }
1329     }
1330 
1331     VAHelper->finalizeInstrumentation();
1332 
1333     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1334     // instrumenting only allocas.
1335     if (InstrumentLifetimeStart) {
1336       for (auto Item : LifetimeStartList) {
1337         instrumentAlloca(*Item.second, Item.first);
1338         AllocaSet.erase(Item.second);
1339       }
1340     }
1341     // Poison the allocas for which we didn't instrument the corresponding
1342     // lifetime intrinsics.
1343     for (AllocaInst *AI : AllocaSet)
1344       instrumentAlloca(*AI);
1345 
1346     bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
1347                                InstrumentationList.size() + StoreList.size() >
1348                                    (unsigned)ClInstrumentationWithCallThreshold;
1349 
1350     // Insert shadow value checks.
1351     materializeChecks(InstrumentWithCalls);
1352 
1353     // Delayed instrumentation of StoreInst.
1354     // This may not add new address checks.
1355     materializeStores(InstrumentWithCalls);
1356 
1357     return true;
1358   }
1359 
1360   /// Compute the shadow type that corresponds to a given Value.
1361   Type *getShadowTy(Value *V) {
1362     return getShadowTy(V->getType());
1363   }
1364 
1365   /// Compute the shadow type that corresponds to a given Type.
1366   Type *getShadowTy(Type *OrigTy) {
1367     if (!OrigTy->isSized()) {
1368       return nullptr;
1369     }
1370     // For integer type, shadow is the same as the original type.
1371     // This may return weird-sized types like i1.
1372     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1373       return IT;
1374     const DataLayout &DL = F.getParent()->getDataLayout();
1375     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1376       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1377       return VectorType::get(IntegerType::get(*MS.C, EltSize),
1378                              VT->getNumElements());
1379     }
1380     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1381       return ArrayType::get(getShadowTy(AT->getElementType()),
1382                             AT->getNumElements());
1383     }
1384     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1385       SmallVector<Type*, 4> Elements;
1386       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1387         Elements.push_back(getShadowTy(ST->getElementType(i)));
1388       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1389       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1390       return Res;
1391     }
1392     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1393     return IntegerType::get(*MS.C, TypeSize);
1394   }
1395 
1396   /// Flatten a vector type.
1397   Type *getShadowTyNoVec(Type *ty) {
1398     if (VectorType *vt = dyn_cast<VectorType>(ty))
1399       return IntegerType::get(*MS.C,
1400                               vt->getPrimitiveSizeInBits().getFixedSize());
1401     return ty;
1402   }
1403 
1404   /// Convert a shadow value to it's flattened variant.
1405   Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
1406     Type *Ty = V->getType();
1407     Type *NoVecTy = getShadowTyNoVec(Ty);
1408     if (Ty == NoVecTy) return V;
1409     return IRB.CreateBitCast(V, NoVecTy);
1410   }
1411 
1412   /// Compute the integer shadow offset that corresponds to a given
1413   /// application address.
1414   ///
1415   /// Offset = (Addr & ~AndMask) ^ XorMask
1416   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1417     Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
1418 
1419     uint64_t AndMask = MS.MapParams->AndMask;
1420     if (AndMask)
1421       OffsetLong =
1422           IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
1423 
1424     uint64_t XorMask = MS.MapParams->XorMask;
1425     if (XorMask)
1426       OffsetLong =
1427           IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
1428     return OffsetLong;
1429   }
1430 
1431   /// Compute the shadow and origin addresses corresponding to a given
1432   /// application address.
1433   ///
1434   /// Shadow = ShadowBase + Offset
1435   /// Origin = (OriginBase + Offset) & ~3ULL
1436   std::pair<Value *, Value *>
1437   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1438                               MaybeAlign Alignment) {
1439     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1440     Value *ShadowLong = ShadowOffset;
1441     uint64_t ShadowBase = MS.MapParams->ShadowBase;
1442     if (ShadowBase != 0) {
1443       ShadowLong =
1444         IRB.CreateAdd(ShadowLong,
1445                       ConstantInt::get(MS.IntptrTy, ShadowBase));
1446     }
1447     Value *ShadowPtr =
1448         IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1449     Value *OriginPtr = nullptr;
1450     if (MS.TrackOrigins) {
1451       Value *OriginLong = ShadowOffset;
1452       uint64_t OriginBase = MS.MapParams->OriginBase;
1453       if (OriginBase != 0)
1454         OriginLong = IRB.CreateAdd(OriginLong,
1455                                    ConstantInt::get(MS.IntptrTy, OriginBase));
1456       if (!Alignment || *Alignment < kMinOriginAlignment) {
1457         uint64_t Mask = kMinOriginAlignment.value() - 1;
1458         OriginLong =
1459             IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
1460       }
1461       OriginPtr =
1462           IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
1463     }
1464     return std::make_pair(ShadowPtr, OriginPtr);
1465   }
1466 
1467   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1468                                                        IRBuilder<> &IRB,
1469                                                        Type *ShadowTy,
1470                                                        bool isStore) {
1471     Value *ShadowOriginPtrs;
1472     const DataLayout &DL = F.getParent()->getDataLayout();
1473     int Size = DL.getTypeStoreSize(ShadowTy);
1474 
1475     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1476     Value *AddrCast =
1477         IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
1478     if (Getter) {
1479       ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
1480     } else {
1481       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1482       ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
1483                                                 : MS.MsanMetadataPtrForLoadN,
1484                                         {AddrCast, SizeVal});
1485     }
1486     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1487     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
1488     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1489 
1490     return std::make_pair(ShadowPtr, OriginPtr);
1491   }
1492 
1493   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1494                                                  Type *ShadowTy,
1495                                                  MaybeAlign Alignment,
1496                                                  bool isStore) {
1497     if (MS.CompileKernel)
1498       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1499     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1500   }
1501 
1502   /// Compute the shadow address for a given function argument.
1503   ///
1504   /// Shadow = ParamTLS+ArgOffset.
1505   Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
1506                                  int ArgOffset) {
1507     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1508     if (ArgOffset)
1509       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1510     return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1511                               "_msarg");
1512   }
1513 
1514   /// Compute the origin address for a given function argument.
1515   Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1516                                  int ArgOffset) {
1517     if (!MS.TrackOrigins)
1518       return nullptr;
1519     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1520     if (ArgOffset)
1521       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1522     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1523                               "_msarg_o");
1524   }
1525 
1526   /// Compute the shadow address for a retval.
1527   Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1528     return IRB.CreatePointerCast(MS.RetvalTLS,
1529                                  PointerType::get(getShadowTy(A), 0),
1530                                  "_msret");
1531   }
1532 
1533   /// Compute the origin address for a retval.
1534   Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1535     // We keep a single origin for the entire retval. Might be too optimistic.
1536     return MS.RetvalOriginTLS;
1537   }
1538 
1539   /// Set SV to be the shadow value for V.
1540   void setShadow(Value *V, Value *SV) {
1541     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1542     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1543   }
1544 
1545   /// Set Origin to be the origin value for V.
1546   void setOrigin(Value *V, Value *Origin) {
1547     if (!MS.TrackOrigins) return;
1548     assert(!OriginMap.count(V) && "Values may only have one origin");
1549     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1550     OriginMap[V] = Origin;
1551   }
1552 
1553   Constant *getCleanShadow(Type *OrigTy) {
1554     Type *ShadowTy = getShadowTy(OrigTy);
1555     if (!ShadowTy)
1556       return nullptr;
1557     return Constant::getNullValue(ShadowTy);
1558   }
1559 
1560   /// Create a clean shadow value for a given value.
1561   ///
1562   /// Clean shadow (all zeroes) means all bits of the value are defined
1563   /// (initialized).
1564   Constant *getCleanShadow(Value *V) {
1565     return getCleanShadow(V->getType());
1566   }
1567 
1568   /// Create a dirty shadow of a given shadow type.
1569   Constant *getPoisonedShadow(Type *ShadowTy) {
1570     assert(ShadowTy);
1571     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1572       return Constant::getAllOnesValue(ShadowTy);
1573     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1574       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1575                                       getPoisonedShadow(AT->getElementType()));
1576       return ConstantArray::get(AT, Vals);
1577     }
1578     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1579       SmallVector<Constant *, 4> Vals;
1580       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1581         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1582       return ConstantStruct::get(ST, Vals);
1583     }
1584     llvm_unreachable("Unexpected shadow type");
1585   }
1586 
1587   /// Create a dirty shadow for a given value.
1588   Constant *getPoisonedShadow(Value *V) {
1589     Type *ShadowTy = getShadowTy(V);
1590     if (!ShadowTy)
1591       return nullptr;
1592     return getPoisonedShadow(ShadowTy);
1593   }
1594 
1595   /// Create a clean (zero) origin.
1596   Value *getCleanOrigin() {
1597     return Constant::getNullValue(MS.OriginTy);
1598   }
1599 
1600   /// Get the shadow value for a given Value.
1601   ///
1602   /// This function either returns the value set earlier with setShadow,
1603   /// or extracts if from ParamTLS (for function arguments).
1604   Value *getShadow(Value *V) {
1605     if (!PropagateShadow) return getCleanShadow(V);
1606     if (Instruction *I = dyn_cast<Instruction>(V)) {
1607       if (I->getMetadata("nosanitize"))
1608         return getCleanShadow(V);
1609       // For instructions the shadow is already stored in the map.
1610       Value *Shadow = ShadowMap[V];
1611       if (!Shadow) {
1612         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1613         (void)I;
1614         assert(Shadow && "No shadow for a value");
1615       }
1616       return Shadow;
1617     }
1618     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1619       Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1620       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1621       (void)U;
1622       return AllOnes;
1623     }
1624     if (Argument *A = dyn_cast<Argument>(V)) {
1625       // For arguments we compute the shadow on demand and store it in the map.
1626       Value **ShadowPtr = &ShadowMap[V];
1627       if (*ShadowPtr)
1628         return *ShadowPtr;
1629       Function *F = A->getParent();
1630       IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI());
1631       unsigned ArgOffset = 0;
1632       const DataLayout &DL = F->getParent()->getDataLayout();
1633       for (auto &FArg : F->args()) {
1634         if (!FArg.getType()->isSized()) {
1635           LLVM_DEBUG(dbgs() << "Arg is not sized\n");
1636           continue;
1637         }
1638         unsigned Size =
1639             FArg.hasByValAttr()
1640                 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1641                 : DL.getTypeAllocSize(FArg.getType());
1642         if (A == &FArg) {
1643           bool Overflow = ArgOffset + Size > kParamTLSSize;
1644           Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1645           if (FArg.hasByValAttr()) {
1646             // ByVal pointer itself has clean shadow. We copy the actual
1647             // argument shadow to the underlying memory.
1648             // Figure out maximal valid memcpy alignment.
1649             const Align ArgAlign = DL.getValueOrABITypeAlignment(
1650                 MaybeAlign(FArg.getParamAlignment()),
1651                 A->getType()->getPointerElementType());
1652             Value *CpShadowPtr =
1653                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1654                                    /*isStore*/ true)
1655                     .first;
1656             // TODO(glider): need to copy origins.
1657             if (Overflow) {
1658               // ParamTLS overflow.
1659               EntryIRB.CreateMemSet(
1660                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1661                   Size, ArgAlign);
1662             } else {
1663               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1664               Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1665                                                  CopyAlign, Size);
1666               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
1667               (void)Cpy;
1668             }
1669             *ShadowPtr = getCleanShadow(V);
1670           } else {
1671             if (Overflow) {
1672               // ParamTLS overflow.
1673               *ShadowPtr = getCleanShadow(V);
1674             } else {
1675               *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
1676                                                       kShadowTLSAlignment);
1677             }
1678           }
1679           LLVM_DEBUG(dbgs()
1680                      << "  ARG:    " << FArg << " ==> " << **ShadowPtr << "\n");
1681           if (MS.TrackOrigins && !Overflow) {
1682             Value *OriginPtr =
1683                 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1684             setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
1685           } else {
1686             setOrigin(A, getCleanOrigin());
1687           }
1688         }
1689         ArgOffset += alignTo(Size, kShadowTLSAlignment);
1690       }
1691       assert(*ShadowPtr && "Could not find shadow for an argument");
1692       return *ShadowPtr;
1693     }
1694     // For everything else the shadow is zero.
1695     return getCleanShadow(V);
1696   }
1697 
1698   /// Get the shadow for i-th argument of the instruction I.
1699   Value *getShadow(Instruction *I, int i) {
1700     return getShadow(I->getOperand(i));
1701   }
1702 
1703   /// Get the origin for a value.
1704   Value *getOrigin(Value *V) {
1705     if (!MS.TrackOrigins) return nullptr;
1706     if (!PropagateShadow) return getCleanOrigin();
1707     if (isa<Constant>(V)) return getCleanOrigin();
1708     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1709            "Unexpected value type in getOrigin()");
1710     if (Instruction *I = dyn_cast<Instruction>(V)) {
1711       if (I->getMetadata("nosanitize"))
1712         return getCleanOrigin();
1713     }
1714     Value *Origin = OriginMap[V];
1715     assert(Origin && "Missing origin");
1716     return Origin;
1717   }
1718 
1719   /// Get the origin for i-th argument of the instruction I.
1720   Value *getOrigin(Instruction *I, int i) {
1721     return getOrigin(I->getOperand(i));
1722   }
1723 
1724   /// Remember the place where a shadow check should be inserted.
1725   ///
1726   /// This location will be later instrumented with a check that will print a
1727   /// UMR warning in runtime if the shadow value is not 0.
1728   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1729     assert(Shadow);
1730     if (!InsertChecks) return;
1731 #ifndef NDEBUG
1732     Type *ShadowTy = Shadow->getType();
1733     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1734            "Can only insert checks for integer and vector shadow types");
1735 #endif
1736     InstrumentationList.push_back(
1737         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1738   }
1739 
1740   /// Remember the place where a shadow check should be inserted.
1741   ///
1742   /// This location will be later instrumented with a check that will print a
1743   /// UMR warning in runtime if the value is not fully defined.
1744   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1745     assert(Val);
1746     Value *Shadow, *Origin;
1747     if (ClCheckConstantShadow) {
1748       Shadow = getShadow(Val);
1749       if (!Shadow) return;
1750       Origin = getOrigin(Val);
1751     } else {
1752       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1753       if (!Shadow) return;
1754       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1755     }
1756     insertShadowCheck(Shadow, Origin, OrigIns);
1757   }
1758 
1759   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1760     switch (a) {
1761       case AtomicOrdering::NotAtomic:
1762         return AtomicOrdering::NotAtomic;
1763       case AtomicOrdering::Unordered:
1764       case AtomicOrdering::Monotonic:
1765       case AtomicOrdering::Release:
1766         return AtomicOrdering::Release;
1767       case AtomicOrdering::Acquire:
1768       case AtomicOrdering::AcquireRelease:
1769         return AtomicOrdering::AcquireRelease;
1770       case AtomicOrdering::SequentiallyConsistent:
1771         return AtomicOrdering::SequentiallyConsistent;
1772     }
1773     llvm_unreachable("Unknown ordering");
1774   }
1775 
1776   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1777     switch (a) {
1778       case AtomicOrdering::NotAtomic:
1779         return AtomicOrdering::NotAtomic;
1780       case AtomicOrdering::Unordered:
1781       case AtomicOrdering::Monotonic:
1782       case AtomicOrdering::Acquire:
1783         return AtomicOrdering::Acquire;
1784       case AtomicOrdering::Release:
1785       case AtomicOrdering::AcquireRelease:
1786         return AtomicOrdering::AcquireRelease;
1787       case AtomicOrdering::SequentiallyConsistent:
1788         return AtomicOrdering::SequentiallyConsistent;
1789     }
1790     llvm_unreachable("Unknown ordering");
1791   }
1792 
1793   // ------------------- Visitors.
1794   using InstVisitor<MemorySanitizerVisitor>::visit;
1795   void visit(Instruction &I) {
1796     if (!I.getMetadata("nosanitize"))
1797       InstVisitor<MemorySanitizerVisitor>::visit(I);
1798   }
1799 
1800   /// Instrument LoadInst
1801   ///
1802   /// Loads the corresponding shadow and (optionally) origin.
1803   /// Optionally, checks that the load address is fully defined.
1804   void visitLoadInst(LoadInst &I) {
1805     assert(I.getType()->isSized() && "Load type must have size");
1806     assert(!I.getMetadata("nosanitize"));
1807     IRBuilder<> IRB(I.getNextNode());
1808     Type *ShadowTy = getShadowTy(&I);
1809     Value *Addr = I.getPointerOperand();
1810     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
1811     const Align Alignment = assumeAligned(I.getAlignment());
1812     if (PropagateShadow) {
1813       std::tie(ShadowPtr, OriginPtr) =
1814           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
1815       setShadow(&I,
1816                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
1817     } else {
1818       setShadow(&I, getCleanShadow(&I));
1819     }
1820 
1821     if (ClCheckAccessAddress)
1822       insertShadowCheck(I.getPointerOperand(), &I);
1823 
1824     if (I.isAtomic())
1825       I.setOrdering(addAcquireOrdering(I.getOrdering()));
1826 
1827     if (MS.TrackOrigins) {
1828       if (PropagateShadow) {
1829         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1830         setOrigin(
1831             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
1832       } else {
1833         setOrigin(&I, getCleanOrigin());
1834       }
1835     }
1836   }
1837 
1838   /// Instrument StoreInst
1839   ///
1840   /// Stores the corresponding shadow and (optionally) origin.
1841   /// Optionally, checks that the store address is fully defined.
1842   void visitStoreInst(StoreInst &I) {
1843     StoreList.push_back(&I);
1844     if (ClCheckAccessAddress)
1845       insertShadowCheck(I.getPointerOperand(), &I);
1846   }
1847 
1848   void handleCASOrRMW(Instruction &I) {
1849     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1850 
1851     IRBuilder<> IRB(&I);
1852     Value *Addr = I.getOperand(0);
1853     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(), Align(1),
1854                                           /*isStore*/ true)
1855                            .first;
1856 
1857     if (ClCheckAccessAddress)
1858       insertShadowCheck(Addr, &I);
1859 
1860     // Only test the conditional argument of cmpxchg instruction.
1861     // The other argument can potentially be uninitialized, but we can not
1862     // detect this situation reliably without possible false positives.
1863     if (isa<AtomicCmpXchgInst>(I))
1864       insertShadowCheck(I.getOperand(1), &I);
1865 
1866     IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1867 
1868     setShadow(&I, getCleanShadow(&I));
1869     setOrigin(&I, getCleanOrigin());
1870   }
1871 
1872   void visitAtomicRMWInst(AtomicRMWInst &I) {
1873     handleCASOrRMW(I);
1874     I.setOrdering(addReleaseOrdering(I.getOrdering()));
1875   }
1876 
1877   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1878     handleCASOrRMW(I);
1879     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1880   }
1881 
1882   // Vector manipulation.
1883   void visitExtractElementInst(ExtractElementInst &I) {
1884     insertShadowCheck(I.getOperand(1), &I);
1885     IRBuilder<> IRB(&I);
1886     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1887               "_msprop"));
1888     setOrigin(&I, getOrigin(&I, 0));
1889   }
1890 
1891   void visitInsertElementInst(InsertElementInst &I) {
1892     insertShadowCheck(I.getOperand(2), &I);
1893     IRBuilder<> IRB(&I);
1894     setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1895               I.getOperand(2), "_msprop"));
1896     setOriginForNaryOp(I);
1897   }
1898 
1899   void visitShuffleVectorInst(ShuffleVectorInst &I) {
1900     IRBuilder<> IRB(&I);
1901     setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1902                                           I.getShuffleMask(), "_msprop"));
1903     setOriginForNaryOp(I);
1904   }
1905 
1906   // Casts.
1907   void visitSExtInst(SExtInst &I) {
1908     IRBuilder<> IRB(&I);
1909     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1910     setOrigin(&I, getOrigin(&I, 0));
1911   }
1912 
1913   void visitZExtInst(ZExtInst &I) {
1914     IRBuilder<> IRB(&I);
1915     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1916     setOrigin(&I, getOrigin(&I, 0));
1917   }
1918 
1919   void visitTruncInst(TruncInst &I) {
1920     IRBuilder<> IRB(&I);
1921     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1922     setOrigin(&I, getOrigin(&I, 0));
1923   }
1924 
1925   void visitBitCastInst(BitCastInst &I) {
1926     // Special case: if this is the bitcast (there is exactly 1 allowed) between
1927     // a musttail call and a ret, don't instrument. New instructions are not
1928     // allowed after a musttail call.
1929     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1930       if (CI->isMustTailCall())
1931         return;
1932     IRBuilder<> IRB(&I);
1933     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1934     setOrigin(&I, getOrigin(&I, 0));
1935   }
1936 
1937   void visitPtrToIntInst(PtrToIntInst &I) {
1938     IRBuilder<> IRB(&I);
1939     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1940              "_msprop_ptrtoint"));
1941     setOrigin(&I, getOrigin(&I, 0));
1942   }
1943 
1944   void visitIntToPtrInst(IntToPtrInst &I) {
1945     IRBuilder<> IRB(&I);
1946     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1947              "_msprop_inttoptr"));
1948     setOrigin(&I, getOrigin(&I, 0));
1949   }
1950 
1951   void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1952   void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1953   void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1954   void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1955   void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1956   void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1957 
1958   /// Propagate shadow for bitwise AND.
1959   ///
1960   /// This code is exact, i.e. if, for example, a bit in the left argument
1961   /// is defined and 0, then neither the value not definedness of the
1962   /// corresponding bit in B don't affect the resulting shadow.
1963   void visitAnd(BinaryOperator &I) {
1964     IRBuilder<> IRB(&I);
1965     //  "And" of 0 and a poisoned value results in unpoisoned value.
1966     //  1&1 => 1;     0&1 => 0;     p&1 => p;
1967     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
1968     //  1&p => p;     0&p => 0;     p&p => p;
1969     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1970     Value *S1 = getShadow(&I, 0);
1971     Value *S2 = getShadow(&I, 1);
1972     Value *V1 = I.getOperand(0);
1973     Value *V2 = I.getOperand(1);
1974     if (V1->getType() != S1->getType()) {
1975       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1976       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1977     }
1978     Value *S1S2 = IRB.CreateAnd(S1, S2);
1979     Value *V1S2 = IRB.CreateAnd(V1, S2);
1980     Value *S1V2 = IRB.CreateAnd(S1, V2);
1981     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
1982     setOriginForNaryOp(I);
1983   }
1984 
1985   void visitOr(BinaryOperator &I) {
1986     IRBuilder<> IRB(&I);
1987     //  "Or" of 1 and a poisoned value results in unpoisoned value.
1988     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
1989     //  1|0 => 1;     0|0 => 0;     p|0 => p;
1990     //  1|p => 1;     0|p => p;     p|p => p;
1991     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1992     Value *S1 = getShadow(&I, 0);
1993     Value *S2 = getShadow(&I, 1);
1994     Value *V1 = IRB.CreateNot(I.getOperand(0));
1995     Value *V2 = IRB.CreateNot(I.getOperand(1));
1996     if (V1->getType() != S1->getType()) {
1997       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1998       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1999     }
2000     Value *S1S2 = IRB.CreateAnd(S1, S2);
2001     Value *V1S2 = IRB.CreateAnd(V1, S2);
2002     Value *S1V2 = IRB.CreateAnd(S1, V2);
2003     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2004     setOriginForNaryOp(I);
2005   }
2006 
2007   /// Default propagation of shadow and/or origin.
2008   ///
2009   /// This class implements the general case of shadow propagation, used in all
2010   /// cases where we don't know and/or don't care about what the operation
2011   /// actually does. It converts all input shadow values to a common type
2012   /// (extending or truncating as necessary), and bitwise OR's them.
2013   ///
2014   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2015   /// fully initialized), and less prone to false positives.
2016   ///
2017   /// This class also implements the general case of origin propagation. For a
2018   /// Nary operation, result origin is set to the origin of an argument that is
2019   /// not entirely initialized. If there is more than one such arguments, the
2020   /// rightmost of them is picked. It does not matter which one is picked if all
2021   /// arguments are initialized.
2022   template <bool CombineShadow>
2023   class Combiner {
2024     Value *Shadow = nullptr;
2025     Value *Origin = nullptr;
2026     IRBuilder<> &IRB;
2027     MemorySanitizerVisitor *MSV;
2028 
2029   public:
2030     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2031         : IRB(IRB), MSV(MSV) {}
2032 
2033     /// Add a pair of shadow and origin values to the mix.
2034     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2035       if (CombineShadow) {
2036         assert(OpShadow);
2037         if (!Shadow)
2038           Shadow = OpShadow;
2039         else {
2040           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2041           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2042         }
2043       }
2044 
2045       if (MSV->MS.TrackOrigins) {
2046         assert(OpOrigin);
2047         if (!Origin) {
2048           Origin = OpOrigin;
2049         } else {
2050           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2051           // No point in adding something that might result in 0 origin value.
2052           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2053             Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
2054             Value *Cond =
2055                 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
2056             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2057           }
2058         }
2059       }
2060       return *this;
2061     }
2062 
2063     /// Add an application value to the mix.
2064     Combiner &Add(Value *V) {
2065       Value *OpShadow = MSV->getShadow(V);
2066       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2067       return Add(OpShadow, OpOrigin);
2068     }
2069 
2070     /// Set the current combined values as the given instruction's shadow
2071     /// and origin.
2072     void Done(Instruction *I) {
2073       if (CombineShadow) {
2074         assert(Shadow);
2075         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2076         MSV->setShadow(I, Shadow);
2077       }
2078       if (MSV->MS.TrackOrigins) {
2079         assert(Origin);
2080         MSV->setOrigin(I, Origin);
2081       }
2082     }
2083   };
2084 
2085   using ShadowAndOriginCombiner = Combiner<true>;
2086   using OriginCombiner = Combiner<false>;
2087 
2088   /// Propagate origin for arbitrary operation.
2089   void setOriginForNaryOp(Instruction &I) {
2090     if (!MS.TrackOrigins) return;
2091     IRBuilder<> IRB(&I);
2092     OriginCombiner OC(this, IRB);
2093     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2094       OC.Add(OI->get());
2095     OC.Done(&I);
2096   }
2097 
2098   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2099     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2100            "Vector of pointers is not a valid shadow type");
2101     return Ty->isVectorTy() ? cast<VectorType>(Ty)->getNumElements() *
2102                                   Ty->getScalarSizeInBits()
2103                             : Ty->getPrimitiveSizeInBits();
2104   }
2105 
2106   /// Cast between two shadow types, extending or truncating as
2107   /// necessary.
2108   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2109                           bool Signed = false) {
2110     Type *srcTy = V->getType();
2111     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2112     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2113     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2114       return IRB.CreateICmpNE(V, getCleanShadow(V));
2115 
2116     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2117       return IRB.CreateIntCast(V, dstTy, Signed);
2118     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2119         cast<VectorType>(dstTy)->getNumElements() ==
2120             cast<VectorType>(srcTy)->getNumElements())
2121       return IRB.CreateIntCast(V, dstTy, Signed);
2122     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2123     Value *V2 =
2124       IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2125     return IRB.CreateBitCast(V2, dstTy);
2126     // TODO: handle struct types.
2127   }
2128 
2129   /// Cast an application value to the type of its own shadow.
2130   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2131     Type *ShadowTy = getShadowTy(V);
2132     if (V->getType() == ShadowTy)
2133       return V;
2134     if (V->getType()->isPtrOrPtrVectorTy())
2135       return IRB.CreatePtrToInt(V, ShadowTy);
2136     else
2137       return IRB.CreateBitCast(V, ShadowTy);
2138   }
2139 
2140   /// Propagate shadow for arbitrary operation.
2141   void handleShadowOr(Instruction &I) {
2142     IRBuilder<> IRB(&I);
2143     ShadowAndOriginCombiner SC(this, IRB);
2144     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2145       SC.Add(OI->get());
2146     SC.Done(&I);
2147   }
2148 
2149   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2150 
2151   // Handle multiplication by constant.
2152   //
2153   // Handle a special case of multiplication by constant that may have one or
2154   // more zeros in the lower bits. This makes corresponding number of lower bits
2155   // of the result zero as well. We model it by shifting the other operand
2156   // shadow left by the required number of bits. Effectively, we transform
2157   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2158   // We use multiplication by 2**N instead of shift to cover the case of
2159   // multiplication by 0, which may occur in some elements of a vector operand.
2160   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2161                            Value *OtherArg) {
2162     Constant *ShadowMul;
2163     Type *Ty = ConstArg->getType();
2164     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2165       unsigned NumElements = VTy->getNumElements();
2166       Type *EltTy = VTy->getElementType();
2167       SmallVector<Constant *, 16> Elements;
2168       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2169         if (ConstantInt *Elt =
2170                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2171           const APInt &V = Elt->getValue();
2172           APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2173           Elements.push_back(ConstantInt::get(EltTy, V2));
2174         } else {
2175           Elements.push_back(ConstantInt::get(EltTy, 1));
2176         }
2177       }
2178       ShadowMul = ConstantVector::get(Elements);
2179     } else {
2180       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2181         const APInt &V = Elt->getValue();
2182         APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2183         ShadowMul = ConstantInt::get(Ty, V2);
2184       } else {
2185         ShadowMul = ConstantInt::get(Ty, 1);
2186       }
2187     }
2188 
2189     IRBuilder<> IRB(&I);
2190     setShadow(&I,
2191               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2192     setOrigin(&I, getOrigin(OtherArg));
2193   }
2194 
2195   void visitMul(BinaryOperator &I) {
2196     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2197     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2198     if (constOp0 && !constOp1)
2199       handleMulByConstant(I, constOp0, I.getOperand(1));
2200     else if (constOp1 && !constOp0)
2201       handleMulByConstant(I, constOp1, I.getOperand(0));
2202     else
2203       handleShadowOr(I);
2204   }
2205 
2206   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
2207   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
2208   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
2209   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
2210   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
2211   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2212 
2213   void handleIntegerDiv(Instruction &I) {
2214     IRBuilder<> IRB(&I);
2215     // Strict on the second argument.
2216     insertShadowCheck(I.getOperand(1), &I);
2217     setShadow(&I, getShadow(&I, 0));
2218     setOrigin(&I, getOrigin(&I, 0));
2219   }
2220 
2221   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2222   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2223   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
2224   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2225 
2226   // Floating point division is side-effect free. We can not require that the
2227   // divisor is fully initialized and must propagate shadow. See PR37523.
2228   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
2229   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2230 
2231   /// Instrument == and != comparisons.
2232   ///
2233   /// Sometimes the comparison result is known even if some of the bits of the
2234   /// arguments are not.
2235   void handleEqualityComparison(ICmpInst &I) {
2236     IRBuilder<> IRB(&I);
2237     Value *A = I.getOperand(0);
2238     Value *B = I.getOperand(1);
2239     Value *Sa = getShadow(A);
2240     Value *Sb = getShadow(B);
2241 
2242     // Get rid of pointers and vectors of pointers.
2243     // For ints (and vectors of ints), types of A and Sa match,
2244     // and this is a no-op.
2245     A = IRB.CreatePointerCast(A, Sa->getType());
2246     B = IRB.CreatePointerCast(B, Sb->getType());
2247 
2248     // A == B  <==>  (C = A^B) == 0
2249     // A != B  <==>  (C = A^B) != 0
2250     // Sc = Sa | Sb
2251     Value *C = IRB.CreateXor(A, B);
2252     Value *Sc = IRB.CreateOr(Sa, Sb);
2253     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2254     // Result is defined if one of the following is true
2255     // * there is a defined 1 bit in C
2256     // * C is fully defined
2257     // Si = !(C & ~Sc) && Sc
2258     Value *Zero = Constant::getNullValue(Sc->getType());
2259     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2260     Value *Si =
2261       IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
2262                     IRB.CreateICmpEQ(
2263                       IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
2264     Si->setName("_msprop_icmp");
2265     setShadow(&I, Si);
2266     setOriginForNaryOp(I);
2267   }
2268 
2269   /// Build the lowest possible value of V, taking into account V's
2270   ///        uninitialized bits.
2271   Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2272                                 bool isSigned) {
2273     if (isSigned) {
2274       // Split shadow into sign bit and other bits.
2275       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2276       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2277       // Maximise the undefined shadow bit, minimize other undefined bits.
2278       return
2279         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
2280     } else {
2281       // Minimize undefined bits.
2282       return IRB.CreateAnd(A, IRB.CreateNot(Sa));
2283     }
2284   }
2285 
2286   /// Build the highest possible value of V, taking into account V's
2287   ///        uninitialized bits.
2288   Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2289                                 bool isSigned) {
2290     if (isSigned) {
2291       // Split shadow into sign bit and other bits.
2292       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2293       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2294       // Minimise the undefined shadow bit, maximise other undefined bits.
2295       return
2296         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
2297     } else {
2298       // Maximize undefined bits.
2299       return IRB.CreateOr(A, Sa);
2300     }
2301   }
2302 
2303   /// Instrument relational comparisons.
2304   ///
2305   /// This function does exact shadow propagation for all relational
2306   /// comparisons of integers, pointers and vectors of those.
2307   /// FIXME: output seems suboptimal when one of the operands is a constant
2308   void handleRelationalComparisonExact(ICmpInst &I) {
2309     IRBuilder<> IRB(&I);
2310     Value *A = I.getOperand(0);
2311     Value *B = I.getOperand(1);
2312     Value *Sa = getShadow(A);
2313     Value *Sb = getShadow(B);
2314 
2315     // Get rid of pointers and vectors of pointers.
2316     // For ints (and vectors of ints), types of A and Sa match,
2317     // and this is a no-op.
2318     A = IRB.CreatePointerCast(A, Sa->getType());
2319     B = IRB.CreatePointerCast(B, Sb->getType());
2320 
2321     // Let [a0, a1] be the interval of possible values of A, taking into account
2322     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2323     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2324     bool IsSigned = I.isSigned();
2325     Value *S1 = IRB.CreateICmp(I.getPredicate(),
2326                                getLowestPossibleValue(IRB, A, Sa, IsSigned),
2327                                getHighestPossibleValue(IRB, B, Sb, IsSigned));
2328     Value *S2 = IRB.CreateICmp(I.getPredicate(),
2329                                getHighestPossibleValue(IRB, A, Sa, IsSigned),
2330                                getLowestPossibleValue(IRB, B, Sb, IsSigned));
2331     Value *Si = IRB.CreateXor(S1, S2);
2332     setShadow(&I, Si);
2333     setOriginForNaryOp(I);
2334   }
2335 
2336   /// Instrument signed relational comparisons.
2337   ///
2338   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2339   /// bit of the shadow. Everything else is delegated to handleShadowOr().
2340   void handleSignedRelationalComparison(ICmpInst &I) {
2341     Constant *constOp;
2342     Value *op = nullptr;
2343     CmpInst::Predicate pre;
2344     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2345       op = I.getOperand(0);
2346       pre = I.getPredicate();
2347     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2348       op = I.getOperand(1);
2349       pre = I.getSwappedPredicate();
2350     } else {
2351       handleShadowOr(I);
2352       return;
2353     }
2354 
2355     if ((constOp->isNullValue() &&
2356          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2357         (constOp->isAllOnesValue() &&
2358          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2359       IRBuilder<> IRB(&I);
2360       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2361                                         "_msprop_icmp_s");
2362       setShadow(&I, Shadow);
2363       setOrigin(&I, getOrigin(op));
2364     } else {
2365       handleShadowOr(I);
2366     }
2367   }
2368 
2369   void visitICmpInst(ICmpInst &I) {
2370     if (!ClHandleICmp) {
2371       handleShadowOr(I);
2372       return;
2373     }
2374     if (I.isEquality()) {
2375       handleEqualityComparison(I);
2376       return;
2377     }
2378 
2379     assert(I.isRelational());
2380     if (ClHandleICmpExact) {
2381       handleRelationalComparisonExact(I);
2382       return;
2383     }
2384     if (I.isSigned()) {
2385       handleSignedRelationalComparison(I);
2386       return;
2387     }
2388 
2389     assert(I.isUnsigned());
2390     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2391       handleRelationalComparisonExact(I);
2392       return;
2393     }
2394 
2395     handleShadowOr(I);
2396   }
2397 
2398   void visitFCmpInst(FCmpInst &I) {
2399     handleShadowOr(I);
2400   }
2401 
2402   void handleShift(BinaryOperator &I) {
2403     IRBuilder<> IRB(&I);
2404     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2405     // Otherwise perform the same shift on S1.
2406     Value *S1 = getShadow(&I, 0);
2407     Value *S2 = getShadow(&I, 1);
2408     Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
2409                                    S2->getType());
2410     Value *V2 = I.getOperand(1);
2411     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2412     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2413     setOriginForNaryOp(I);
2414   }
2415 
2416   void visitShl(BinaryOperator &I) { handleShift(I); }
2417   void visitAShr(BinaryOperator &I) { handleShift(I); }
2418   void visitLShr(BinaryOperator &I) { handleShift(I); }
2419 
2420   /// Instrument llvm.memmove
2421   ///
2422   /// At this point we don't know if llvm.memmove will be inlined or not.
2423   /// If we don't instrument it and it gets inlined,
2424   /// our interceptor will not kick in and we will lose the memmove.
2425   /// If we instrument the call here, but it does not get inlined,
2426   /// we will memove the shadow twice: which is bad in case
2427   /// of overlapping regions. So, we simply lower the intrinsic to a call.
2428   ///
2429   /// Similar situation exists for memcpy and memset.
2430   void visitMemMoveInst(MemMoveInst &I) {
2431     IRBuilder<> IRB(&I);
2432     IRB.CreateCall(
2433         MS.MemmoveFn,
2434         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2435          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2436          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2437     I.eraseFromParent();
2438   }
2439 
2440   // Similar to memmove: avoid copying shadow twice.
2441   // This is somewhat unfortunate as it may slowdown small constant memcpys.
2442   // FIXME: consider doing manual inline for small constant sizes and proper
2443   // alignment.
2444   void visitMemCpyInst(MemCpyInst &I) {
2445     IRBuilder<> IRB(&I);
2446     IRB.CreateCall(
2447         MS.MemcpyFn,
2448         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2449          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2450          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2451     I.eraseFromParent();
2452   }
2453 
2454   // Same as memcpy.
2455   void visitMemSetInst(MemSetInst &I) {
2456     IRBuilder<> IRB(&I);
2457     IRB.CreateCall(
2458         MS.MemsetFn,
2459         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2460          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2461          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2462     I.eraseFromParent();
2463   }
2464 
2465   void visitVAStartInst(VAStartInst &I) {
2466     VAHelper->visitVAStartInst(I);
2467   }
2468 
2469   void visitVACopyInst(VACopyInst &I) {
2470     VAHelper->visitVACopyInst(I);
2471   }
2472 
2473   /// Handle vector store-like intrinsics.
2474   ///
2475   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2476   /// has 1 pointer argument and 1 vector argument, returns void.
2477   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2478     IRBuilder<> IRB(&I);
2479     Value* Addr = I.getArgOperand(0);
2480     Value *Shadow = getShadow(&I, 1);
2481     Value *ShadowPtr, *OriginPtr;
2482 
2483     // We don't know the pointer alignment (could be unaligned SSE store!).
2484     // Have to assume to worst case.
2485     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2486         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
2487     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
2488 
2489     if (ClCheckAccessAddress)
2490       insertShadowCheck(Addr, &I);
2491 
2492     // FIXME: factor out common code from materializeStores
2493     if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2494     return true;
2495   }
2496 
2497   /// Handle vector load-like intrinsics.
2498   ///
2499   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2500   /// has 1 pointer argument, returns a vector.
2501   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2502     IRBuilder<> IRB(&I);
2503     Value *Addr = I.getArgOperand(0);
2504 
2505     Type *ShadowTy = getShadowTy(&I);
2506     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2507     if (PropagateShadow) {
2508       // We don't know the pointer alignment (could be unaligned SSE load!).
2509       // Have to assume to worst case.
2510       const Align Alignment = Align(1);
2511       std::tie(ShadowPtr, OriginPtr) =
2512           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2513       setShadow(&I,
2514                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2515     } else {
2516       setShadow(&I, getCleanShadow(&I));
2517     }
2518 
2519     if (ClCheckAccessAddress)
2520       insertShadowCheck(Addr, &I);
2521 
2522     if (MS.TrackOrigins) {
2523       if (PropagateShadow)
2524         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2525       else
2526         setOrigin(&I, getCleanOrigin());
2527     }
2528     return true;
2529   }
2530 
2531   /// Handle (SIMD arithmetic)-like intrinsics.
2532   ///
2533   /// Instrument intrinsics with any number of arguments of the same type,
2534   /// equal to the return type. The type should be simple (no aggregates or
2535   /// pointers; vectors are fine).
2536   /// Caller guarantees that this intrinsic does not access memory.
2537   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2538     Type *RetTy = I.getType();
2539     if (!(RetTy->isIntOrIntVectorTy() ||
2540           RetTy->isFPOrFPVectorTy() ||
2541           RetTy->isX86_MMXTy()))
2542       return false;
2543 
2544     unsigned NumArgOperands = I.getNumArgOperands();
2545 
2546     for (unsigned i = 0; i < NumArgOperands; ++i) {
2547       Type *Ty = I.getArgOperand(i)->getType();
2548       if (Ty != RetTy)
2549         return false;
2550     }
2551 
2552     IRBuilder<> IRB(&I);
2553     ShadowAndOriginCombiner SC(this, IRB);
2554     for (unsigned i = 0; i < NumArgOperands; ++i)
2555       SC.Add(I.getArgOperand(i));
2556     SC.Done(&I);
2557 
2558     return true;
2559   }
2560 
2561   /// Heuristically instrument unknown intrinsics.
2562   ///
2563   /// The main purpose of this code is to do something reasonable with all
2564   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2565   /// We recognize several classes of intrinsics by their argument types and
2566   /// ModRefBehaviour and apply special instrumentation when we are reasonably
2567   /// sure that we know what the intrinsic does.
2568   ///
2569   /// We special-case intrinsics where this approach fails. See llvm.bswap
2570   /// handling as an example of that.
2571   bool handleUnknownIntrinsic(IntrinsicInst &I) {
2572     unsigned NumArgOperands = I.getNumArgOperands();
2573     if (NumArgOperands == 0)
2574       return false;
2575 
2576     if (NumArgOperands == 2 &&
2577         I.getArgOperand(0)->getType()->isPointerTy() &&
2578         I.getArgOperand(1)->getType()->isVectorTy() &&
2579         I.getType()->isVoidTy() &&
2580         !I.onlyReadsMemory()) {
2581       // This looks like a vector store.
2582       return handleVectorStoreIntrinsic(I);
2583     }
2584 
2585     if (NumArgOperands == 1 &&
2586         I.getArgOperand(0)->getType()->isPointerTy() &&
2587         I.getType()->isVectorTy() &&
2588         I.onlyReadsMemory()) {
2589       // This looks like a vector load.
2590       return handleVectorLoadIntrinsic(I);
2591     }
2592 
2593     if (I.doesNotAccessMemory())
2594       if (maybeHandleSimpleNomemIntrinsic(I))
2595         return true;
2596 
2597     // FIXME: detect and handle SSE maskstore/maskload
2598     return false;
2599   }
2600 
2601   void handleInvariantGroup(IntrinsicInst &I) {
2602     setShadow(&I, getShadow(&I, 0));
2603     setOrigin(&I, getOrigin(&I, 0));
2604   }
2605 
2606   void handleLifetimeStart(IntrinsicInst &I) {
2607     if (!PoisonStack)
2608       return;
2609     DenseMap<Value *, AllocaInst *> AllocaForValue;
2610     AllocaInst *AI =
2611         llvm::findAllocaForValue(I.getArgOperand(1), AllocaForValue);
2612     if (!AI)
2613       InstrumentLifetimeStart = false;
2614     LifetimeStartList.push_back(std::make_pair(&I, AI));
2615   }
2616 
2617   void handleBswap(IntrinsicInst &I) {
2618     IRBuilder<> IRB(&I);
2619     Value *Op = I.getArgOperand(0);
2620     Type *OpType = Op->getType();
2621     Function *BswapFunc = Intrinsic::getDeclaration(
2622       F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2623     setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2624     setOrigin(&I, getOrigin(Op));
2625   }
2626 
2627   // Instrument vector convert intrinsic.
2628   //
2629   // This function instruments intrinsics like cvtsi2ss:
2630   // %Out = int_xxx_cvtyyy(%ConvertOp)
2631   // or
2632   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2633   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2634   // number \p Out elements, and (if has 2 arguments) copies the rest of the
2635   // elements from \p CopyOp.
2636   // In most cases conversion involves floating-point value which may trigger a
2637   // hardware exception when not fully initialized. For this reason we require
2638   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2639   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2640   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2641   // return a fully initialized value.
2642   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2643     IRBuilder<> IRB(&I);
2644     Value *CopyOp, *ConvertOp;
2645 
2646     switch (I.getNumArgOperands()) {
2647     case 3:
2648       assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2649       LLVM_FALLTHROUGH;
2650     case 2:
2651       CopyOp = I.getArgOperand(0);
2652       ConvertOp = I.getArgOperand(1);
2653       break;
2654     case 1:
2655       ConvertOp = I.getArgOperand(0);
2656       CopyOp = nullptr;
2657       break;
2658     default:
2659       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2660     }
2661 
2662     // The first *NumUsedElements* elements of ConvertOp are converted to the
2663     // same number of output elements. The rest of the output is copied from
2664     // CopyOp, or (if not available) filled with zeroes.
2665     // Combine shadow for elements of ConvertOp that are used in this operation,
2666     // and insert a check.
2667     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2668     // int->any conversion.
2669     Value *ConvertShadow = getShadow(ConvertOp);
2670     Value *AggShadow = nullptr;
2671     if (ConvertOp->getType()->isVectorTy()) {
2672       AggShadow = IRB.CreateExtractElement(
2673           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2674       for (int i = 1; i < NumUsedElements; ++i) {
2675         Value *MoreShadow = IRB.CreateExtractElement(
2676             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2677         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2678       }
2679     } else {
2680       AggShadow = ConvertShadow;
2681     }
2682     assert(AggShadow->getType()->isIntegerTy());
2683     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2684 
2685     // Build result shadow by zero-filling parts of CopyOp shadow that come from
2686     // ConvertOp.
2687     if (CopyOp) {
2688       assert(CopyOp->getType() == I.getType());
2689       assert(CopyOp->getType()->isVectorTy());
2690       Value *ResultShadow = getShadow(CopyOp);
2691       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
2692       for (int i = 0; i < NumUsedElements; ++i) {
2693         ResultShadow = IRB.CreateInsertElement(
2694             ResultShadow, ConstantInt::getNullValue(EltTy),
2695             ConstantInt::get(IRB.getInt32Ty(), i));
2696       }
2697       setShadow(&I, ResultShadow);
2698       setOrigin(&I, getOrigin(CopyOp));
2699     } else {
2700       setShadow(&I, getCleanShadow(&I));
2701       setOrigin(&I, getCleanOrigin());
2702     }
2703   }
2704 
2705   // Given a scalar or vector, extract lower 64 bits (or less), and return all
2706   // zeroes if it is zero, and all ones otherwise.
2707   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2708     if (S->getType()->isVectorTy())
2709       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2710     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2711     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2712     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2713   }
2714 
2715   // Given a vector, extract its first element, and return all
2716   // zeroes if it is zero, and all ones otherwise.
2717   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2718     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2719     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2720     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2721   }
2722 
2723   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2724     Type *T = S->getType();
2725     assert(T->isVectorTy());
2726     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2727     return IRB.CreateSExt(S2, T);
2728   }
2729 
2730   // Instrument vector shift intrinsic.
2731   //
2732   // This function instruments intrinsics like int_x86_avx2_psll_w.
2733   // Intrinsic shifts %In by %ShiftSize bits.
2734   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2735   // size, and the rest is ignored. Behavior is defined even if shift size is
2736   // greater than register (or field) width.
2737   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2738     assert(I.getNumArgOperands() == 2);
2739     IRBuilder<> IRB(&I);
2740     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2741     // Otherwise perform the same shift on S1.
2742     Value *S1 = getShadow(&I, 0);
2743     Value *S2 = getShadow(&I, 1);
2744     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2745                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2746     Value *V1 = I.getOperand(0);
2747     Value *V2 = I.getOperand(1);
2748     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2749                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
2750     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2751     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2752     setOriginForNaryOp(I);
2753   }
2754 
2755   // Get an X86_MMX-sized vector type.
2756   Type *getMMXVectorTy(unsigned EltSizeInBits) {
2757     const unsigned X86_MMXSizeInBits = 64;
2758     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
2759            "Illegal MMX vector element size");
2760     return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2761                            X86_MMXSizeInBits / EltSizeInBits);
2762   }
2763 
2764   // Returns a signed counterpart for an (un)signed-saturate-and-pack
2765   // intrinsic.
2766   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2767     switch (id) {
2768       case Intrinsic::x86_sse2_packsswb_128:
2769       case Intrinsic::x86_sse2_packuswb_128:
2770         return Intrinsic::x86_sse2_packsswb_128;
2771 
2772       case Intrinsic::x86_sse2_packssdw_128:
2773       case Intrinsic::x86_sse41_packusdw:
2774         return Intrinsic::x86_sse2_packssdw_128;
2775 
2776       case Intrinsic::x86_avx2_packsswb:
2777       case Intrinsic::x86_avx2_packuswb:
2778         return Intrinsic::x86_avx2_packsswb;
2779 
2780       case Intrinsic::x86_avx2_packssdw:
2781       case Intrinsic::x86_avx2_packusdw:
2782         return Intrinsic::x86_avx2_packssdw;
2783 
2784       case Intrinsic::x86_mmx_packsswb:
2785       case Intrinsic::x86_mmx_packuswb:
2786         return Intrinsic::x86_mmx_packsswb;
2787 
2788       case Intrinsic::x86_mmx_packssdw:
2789         return Intrinsic::x86_mmx_packssdw;
2790       default:
2791         llvm_unreachable("unexpected intrinsic id");
2792     }
2793   }
2794 
2795   // Instrument vector pack intrinsic.
2796   //
2797   // This function instruments intrinsics like x86_mmx_packsswb, that
2798   // packs elements of 2 input vectors into half as many bits with saturation.
2799   // Shadow is propagated with the signed variant of the same intrinsic applied
2800   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2801   // EltSizeInBits is used only for x86mmx arguments.
2802   void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2803     assert(I.getNumArgOperands() == 2);
2804     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2805     IRBuilder<> IRB(&I);
2806     Value *S1 = getShadow(&I, 0);
2807     Value *S2 = getShadow(&I, 1);
2808     assert(isX86_MMX || S1->getType()->isVectorTy());
2809 
2810     // SExt and ICmpNE below must apply to individual elements of input vectors.
2811     // In case of x86mmx arguments, cast them to appropriate vector types and
2812     // back.
2813     Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2814     if (isX86_MMX) {
2815       S1 = IRB.CreateBitCast(S1, T);
2816       S2 = IRB.CreateBitCast(S2, T);
2817     }
2818     Value *S1_ext = IRB.CreateSExt(
2819         IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
2820     Value *S2_ext = IRB.CreateSExt(
2821         IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
2822     if (isX86_MMX) {
2823       Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2824       S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2825       S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2826     }
2827 
2828     Function *ShadowFn = Intrinsic::getDeclaration(
2829         F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2830 
2831     Value *S =
2832         IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2833     if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2834     setShadow(&I, S);
2835     setOriginForNaryOp(I);
2836   }
2837 
2838   // Instrument sum-of-absolute-differences intrinsic.
2839   void handleVectorSadIntrinsic(IntrinsicInst &I) {
2840     const unsigned SignificantBitsPerResultElement = 16;
2841     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2842     Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2843     unsigned ZeroBitsPerResultElement =
2844         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2845 
2846     IRBuilder<> IRB(&I);
2847     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2848     S = IRB.CreateBitCast(S, ResTy);
2849     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2850                        ResTy);
2851     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2852     S = IRB.CreateBitCast(S, getShadowTy(&I));
2853     setShadow(&I, S);
2854     setOriginForNaryOp(I);
2855   }
2856 
2857   // Instrument multiply-add intrinsic.
2858   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2859                                   unsigned EltSizeInBits = 0) {
2860     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2861     Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2862     IRBuilder<> IRB(&I);
2863     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2864     S = IRB.CreateBitCast(S, ResTy);
2865     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2866                        ResTy);
2867     S = IRB.CreateBitCast(S, getShadowTy(&I));
2868     setShadow(&I, S);
2869     setOriginForNaryOp(I);
2870   }
2871 
2872   // Instrument compare-packed intrinsic.
2873   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2874   // all-ones shadow.
2875   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2876     IRBuilder<> IRB(&I);
2877     Type *ResTy = getShadowTy(&I);
2878     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2879     Value *S = IRB.CreateSExt(
2880         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2881     setShadow(&I, S);
2882     setOriginForNaryOp(I);
2883   }
2884 
2885   // Instrument compare-scalar intrinsic.
2886   // This handles both cmp* intrinsics which return the result in the first
2887   // element of a vector, and comi* which return the result as i32.
2888   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2889     IRBuilder<> IRB(&I);
2890     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2891     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2892     setShadow(&I, S);
2893     setOriginForNaryOp(I);
2894   }
2895 
2896   void handleStmxcsr(IntrinsicInst &I) {
2897     IRBuilder<> IRB(&I);
2898     Value* Addr = I.getArgOperand(0);
2899     Type *Ty = IRB.getInt32Ty();
2900     Value *ShadowPtr =
2901         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
2902 
2903     IRB.CreateStore(getCleanShadow(Ty),
2904                     IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
2905 
2906     if (ClCheckAccessAddress)
2907       insertShadowCheck(Addr, &I);
2908   }
2909 
2910   void handleLdmxcsr(IntrinsicInst &I) {
2911     if (!InsertChecks) return;
2912 
2913     IRBuilder<> IRB(&I);
2914     Value *Addr = I.getArgOperand(0);
2915     Type *Ty = IRB.getInt32Ty();
2916     const Align Alignment = Align(1);
2917     Value *ShadowPtr, *OriginPtr;
2918     std::tie(ShadowPtr, OriginPtr) =
2919         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
2920 
2921     if (ClCheckAccessAddress)
2922       insertShadowCheck(Addr, &I);
2923 
2924     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
2925     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
2926                                     : getCleanOrigin();
2927     insertShadowCheck(Shadow, Origin, &I);
2928   }
2929 
2930   void handleMaskedStore(IntrinsicInst &I) {
2931     IRBuilder<> IRB(&I);
2932     Value *V = I.getArgOperand(0);
2933     Value *Addr = I.getArgOperand(1);
2934     const Align Alignment(
2935         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
2936     Value *Mask = I.getArgOperand(3);
2937     Value *Shadow = getShadow(V);
2938 
2939     Value *ShadowPtr;
2940     Value *OriginPtr;
2941     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2942         Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
2943 
2944     if (ClCheckAccessAddress) {
2945       insertShadowCheck(Addr, &I);
2946       // Uninitialized mask is kind of like uninitialized address, but not as
2947       // scary.
2948       insertShadowCheck(Mask, &I);
2949     }
2950 
2951     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
2952 
2953     if (MS.TrackOrigins) {
2954       auto &DL = F.getParent()->getDataLayout();
2955       paintOrigin(IRB, getOrigin(V), OriginPtr,
2956                   DL.getTypeStoreSize(Shadow->getType()),
2957                   std::max(Alignment, kMinOriginAlignment));
2958     }
2959   }
2960 
2961   bool handleMaskedLoad(IntrinsicInst &I) {
2962     IRBuilder<> IRB(&I);
2963     Value *Addr = I.getArgOperand(0);
2964     const Align Alignment(
2965         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
2966     Value *Mask = I.getArgOperand(2);
2967     Value *PassThru = I.getArgOperand(3);
2968 
2969     Type *ShadowTy = getShadowTy(&I);
2970     Value *ShadowPtr, *OriginPtr;
2971     if (PropagateShadow) {
2972       std::tie(ShadowPtr, OriginPtr) =
2973           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2974       setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask,
2975                                          getShadow(PassThru), "_msmaskedld"));
2976     } else {
2977       setShadow(&I, getCleanShadow(&I));
2978     }
2979 
2980     if (ClCheckAccessAddress) {
2981       insertShadowCheck(Addr, &I);
2982       insertShadowCheck(Mask, &I);
2983     }
2984 
2985     if (MS.TrackOrigins) {
2986       if (PropagateShadow) {
2987         // Choose between PassThru's and the loaded value's origins.
2988         Value *MaskedPassThruShadow = IRB.CreateAnd(
2989             getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
2990 
2991         Value *Acc = IRB.CreateExtractElement(
2992             MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2993         for (int i = 1,
2994                  N = cast<VectorType>(PassThru->getType())->getNumElements();
2995              i < N; ++i) {
2996           Value *More = IRB.CreateExtractElement(
2997               MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2998           Acc = IRB.CreateOr(Acc, More);
2999         }
3000 
3001         Value *Origin = IRB.CreateSelect(
3002             IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
3003             getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
3004 
3005         setOrigin(&I, Origin);
3006       } else {
3007         setOrigin(&I, getCleanOrigin());
3008       }
3009     }
3010     return true;
3011   }
3012 
3013   // Instrument BMI / BMI2 intrinsics.
3014   // All of these intrinsics are Z = I(X, Y)
3015   // where the types of all operands and the result match, and are either i32 or i64.
3016   // The following instrumentation happens to work for all of them:
3017   //   Sz = I(Sx, Y) | (sext (Sy != 0))
3018   void handleBmiIntrinsic(IntrinsicInst &I) {
3019     IRBuilder<> IRB(&I);
3020     Type *ShadowTy = getShadowTy(&I);
3021 
3022     // If any bit of the mask operand is poisoned, then the whole thing is.
3023     Value *SMask = getShadow(&I, 1);
3024     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
3025                            ShadowTy);
3026     // Apply the same intrinsic to the shadow of the first operand.
3027     Value *S = IRB.CreateCall(I.getCalledFunction(),
3028                               {getShadow(&I, 0), I.getOperand(1)});
3029     S = IRB.CreateOr(SMask, S);
3030     setShadow(&I, S);
3031     setOriginForNaryOp(I);
3032   }
3033 
3034   SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
3035     SmallVector<int, 8> Mask;
3036     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3037       Mask.append(2, X);
3038     }
3039     return Mask;
3040   }
3041 
3042   // Instrument pclmul intrinsics.
3043   // These intrinsics operate either on odd or on even elements of the input
3044   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3045   // Replace the unused elements with copies of the used ones, ex:
3046   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3047   // or
3048   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3049   // and then apply the usual shadow combining logic.
3050   void handlePclmulIntrinsic(IntrinsicInst &I) {
3051     IRBuilder<> IRB(&I);
3052     Type *ShadowTy = getShadowTy(&I);
3053     unsigned Width =
3054         cast<VectorType>(I.getArgOperand(0)->getType())->getNumElements();
3055     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3056            "pclmul 3rd operand must be a constant");
3057     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3058     Value *Shuf0 =
3059         IRB.CreateShuffleVector(getShadow(&I, 0), UndefValue::get(ShadowTy),
3060                                 getPclmulMask(Width, Imm & 0x01));
3061     Value *Shuf1 =
3062         IRB.CreateShuffleVector(getShadow(&I, 1), UndefValue::get(ShadowTy),
3063                                 getPclmulMask(Width, Imm & 0x10));
3064     ShadowAndOriginCombiner SOC(this, IRB);
3065     SOC.Add(Shuf0, getOrigin(&I, 0));
3066     SOC.Add(Shuf1, getOrigin(&I, 1));
3067     SOC.Done(&I);
3068   }
3069 
3070   void visitIntrinsicInst(IntrinsicInst &I) {
3071     switch (I.getIntrinsicID()) {
3072     case Intrinsic::lifetime_start:
3073       handleLifetimeStart(I);
3074       break;
3075     case Intrinsic::launder_invariant_group:
3076     case Intrinsic::strip_invariant_group:
3077       handleInvariantGroup(I);
3078       break;
3079     case Intrinsic::bswap:
3080       handleBswap(I);
3081       break;
3082     case Intrinsic::masked_store:
3083       handleMaskedStore(I);
3084       break;
3085     case Intrinsic::masked_load:
3086       handleMaskedLoad(I);
3087       break;
3088     case Intrinsic::x86_sse_stmxcsr:
3089       handleStmxcsr(I);
3090       break;
3091     case Intrinsic::x86_sse_ldmxcsr:
3092       handleLdmxcsr(I);
3093       break;
3094     case Intrinsic::x86_avx512_vcvtsd2usi64:
3095     case Intrinsic::x86_avx512_vcvtsd2usi32:
3096     case Intrinsic::x86_avx512_vcvtss2usi64:
3097     case Intrinsic::x86_avx512_vcvtss2usi32:
3098     case Intrinsic::x86_avx512_cvttss2usi64:
3099     case Intrinsic::x86_avx512_cvttss2usi:
3100     case Intrinsic::x86_avx512_cvttsd2usi64:
3101     case Intrinsic::x86_avx512_cvttsd2usi:
3102     case Intrinsic::x86_avx512_cvtusi2ss:
3103     case Intrinsic::x86_avx512_cvtusi642sd:
3104     case Intrinsic::x86_avx512_cvtusi642ss:
3105     case Intrinsic::x86_sse2_cvtsd2si64:
3106     case Intrinsic::x86_sse2_cvtsd2si:
3107     case Intrinsic::x86_sse2_cvtsd2ss:
3108     case Intrinsic::x86_sse2_cvttsd2si64:
3109     case Intrinsic::x86_sse2_cvttsd2si:
3110     case Intrinsic::x86_sse_cvtss2si64:
3111     case Intrinsic::x86_sse_cvtss2si:
3112     case Intrinsic::x86_sse_cvttss2si64:
3113     case Intrinsic::x86_sse_cvttss2si:
3114       handleVectorConvertIntrinsic(I, 1);
3115       break;
3116     case Intrinsic::x86_sse_cvtps2pi:
3117     case Intrinsic::x86_sse_cvttps2pi:
3118       handleVectorConvertIntrinsic(I, 2);
3119       break;
3120 
3121     case Intrinsic::x86_avx512_psll_w_512:
3122     case Intrinsic::x86_avx512_psll_d_512:
3123     case Intrinsic::x86_avx512_psll_q_512:
3124     case Intrinsic::x86_avx512_pslli_w_512:
3125     case Intrinsic::x86_avx512_pslli_d_512:
3126     case Intrinsic::x86_avx512_pslli_q_512:
3127     case Intrinsic::x86_avx512_psrl_w_512:
3128     case Intrinsic::x86_avx512_psrl_d_512:
3129     case Intrinsic::x86_avx512_psrl_q_512:
3130     case Intrinsic::x86_avx512_psra_w_512:
3131     case Intrinsic::x86_avx512_psra_d_512:
3132     case Intrinsic::x86_avx512_psra_q_512:
3133     case Intrinsic::x86_avx512_psrli_w_512:
3134     case Intrinsic::x86_avx512_psrli_d_512:
3135     case Intrinsic::x86_avx512_psrli_q_512:
3136     case Intrinsic::x86_avx512_psrai_w_512:
3137     case Intrinsic::x86_avx512_psrai_d_512:
3138     case Intrinsic::x86_avx512_psrai_q_512:
3139     case Intrinsic::x86_avx512_psra_q_256:
3140     case Intrinsic::x86_avx512_psra_q_128:
3141     case Intrinsic::x86_avx512_psrai_q_256:
3142     case Intrinsic::x86_avx512_psrai_q_128:
3143     case Intrinsic::x86_avx2_psll_w:
3144     case Intrinsic::x86_avx2_psll_d:
3145     case Intrinsic::x86_avx2_psll_q:
3146     case Intrinsic::x86_avx2_pslli_w:
3147     case Intrinsic::x86_avx2_pslli_d:
3148     case Intrinsic::x86_avx2_pslli_q:
3149     case Intrinsic::x86_avx2_psrl_w:
3150     case Intrinsic::x86_avx2_psrl_d:
3151     case Intrinsic::x86_avx2_psrl_q:
3152     case Intrinsic::x86_avx2_psra_w:
3153     case Intrinsic::x86_avx2_psra_d:
3154     case Intrinsic::x86_avx2_psrli_w:
3155     case Intrinsic::x86_avx2_psrli_d:
3156     case Intrinsic::x86_avx2_psrli_q:
3157     case Intrinsic::x86_avx2_psrai_w:
3158     case Intrinsic::x86_avx2_psrai_d:
3159     case Intrinsic::x86_sse2_psll_w:
3160     case Intrinsic::x86_sse2_psll_d:
3161     case Intrinsic::x86_sse2_psll_q:
3162     case Intrinsic::x86_sse2_pslli_w:
3163     case Intrinsic::x86_sse2_pslli_d:
3164     case Intrinsic::x86_sse2_pslli_q:
3165     case Intrinsic::x86_sse2_psrl_w:
3166     case Intrinsic::x86_sse2_psrl_d:
3167     case Intrinsic::x86_sse2_psrl_q:
3168     case Intrinsic::x86_sse2_psra_w:
3169     case Intrinsic::x86_sse2_psra_d:
3170     case Intrinsic::x86_sse2_psrli_w:
3171     case Intrinsic::x86_sse2_psrli_d:
3172     case Intrinsic::x86_sse2_psrli_q:
3173     case Intrinsic::x86_sse2_psrai_w:
3174     case Intrinsic::x86_sse2_psrai_d:
3175     case Intrinsic::x86_mmx_psll_w:
3176     case Intrinsic::x86_mmx_psll_d:
3177     case Intrinsic::x86_mmx_psll_q:
3178     case Intrinsic::x86_mmx_pslli_w:
3179     case Intrinsic::x86_mmx_pslli_d:
3180     case Intrinsic::x86_mmx_pslli_q:
3181     case Intrinsic::x86_mmx_psrl_w:
3182     case Intrinsic::x86_mmx_psrl_d:
3183     case Intrinsic::x86_mmx_psrl_q:
3184     case Intrinsic::x86_mmx_psra_w:
3185     case Intrinsic::x86_mmx_psra_d:
3186     case Intrinsic::x86_mmx_psrli_w:
3187     case Intrinsic::x86_mmx_psrli_d:
3188     case Intrinsic::x86_mmx_psrli_q:
3189     case Intrinsic::x86_mmx_psrai_w:
3190     case Intrinsic::x86_mmx_psrai_d:
3191       handleVectorShiftIntrinsic(I, /* Variable */ false);
3192       break;
3193     case Intrinsic::x86_avx2_psllv_d:
3194     case Intrinsic::x86_avx2_psllv_d_256:
3195     case Intrinsic::x86_avx512_psllv_d_512:
3196     case Intrinsic::x86_avx2_psllv_q:
3197     case Intrinsic::x86_avx2_psllv_q_256:
3198     case Intrinsic::x86_avx512_psllv_q_512:
3199     case Intrinsic::x86_avx2_psrlv_d:
3200     case Intrinsic::x86_avx2_psrlv_d_256:
3201     case Intrinsic::x86_avx512_psrlv_d_512:
3202     case Intrinsic::x86_avx2_psrlv_q:
3203     case Intrinsic::x86_avx2_psrlv_q_256:
3204     case Intrinsic::x86_avx512_psrlv_q_512:
3205     case Intrinsic::x86_avx2_psrav_d:
3206     case Intrinsic::x86_avx2_psrav_d_256:
3207     case Intrinsic::x86_avx512_psrav_d_512:
3208     case Intrinsic::x86_avx512_psrav_q_128:
3209     case Intrinsic::x86_avx512_psrav_q_256:
3210     case Intrinsic::x86_avx512_psrav_q_512:
3211       handleVectorShiftIntrinsic(I, /* Variable */ true);
3212       break;
3213 
3214     case Intrinsic::x86_sse2_packsswb_128:
3215     case Intrinsic::x86_sse2_packssdw_128:
3216     case Intrinsic::x86_sse2_packuswb_128:
3217     case Intrinsic::x86_sse41_packusdw:
3218     case Intrinsic::x86_avx2_packsswb:
3219     case Intrinsic::x86_avx2_packssdw:
3220     case Intrinsic::x86_avx2_packuswb:
3221     case Intrinsic::x86_avx2_packusdw:
3222       handleVectorPackIntrinsic(I);
3223       break;
3224 
3225     case Intrinsic::x86_mmx_packsswb:
3226     case Intrinsic::x86_mmx_packuswb:
3227       handleVectorPackIntrinsic(I, 16);
3228       break;
3229 
3230     case Intrinsic::x86_mmx_packssdw:
3231       handleVectorPackIntrinsic(I, 32);
3232       break;
3233 
3234     case Intrinsic::x86_mmx_psad_bw:
3235     case Intrinsic::x86_sse2_psad_bw:
3236     case Intrinsic::x86_avx2_psad_bw:
3237       handleVectorSadIntrinsic(I);
3238       break;
3239 
3240     case Intrinsic::x86_sse2_pmadd_wd:
3241     case Intrinsic::x86_avx2_pmadd_wd:
3242     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3243     case Intrinsic::x86_avx2_pmadd_ub_sw:
3244       handleVectorPmaddIntrinsic(I);
3245       break;
3246 
3247     case Intrinsic::x86_ssse3_pmadd_ub_sw:
3248       handleVectorPmaddIntrinsic(I, 8);
3249       break;
3250 
3251     case Intrinsic::x86_mmx_pmadd_wd:
3252       handleVectorPmaddIntrinsic(I, 16);
3253       break;
3254 
3255     case Intrinsic::x86_sse_cmp_ss:
3256     case Intrinsic::x86_sse2_cmp_sd:
3257     case Intrinsic::x86_sse_comieq_ss:
3258     case Intrinsic::x86_sse_comilt_ss:
3259     case Intrinsic::x86_sse_comile_ss:
3260     case Intrinsic::x86_sse_comigt_ss:
3261     case Intrinsic::x86_sse_comige_ss:
3262     case Intrinsic::x86_sse_comineq_ss:
3263     case Intrinsic::x86_sse_ucomieq_ss:
3264     case Intrinsic::x86_sse_ucomilt_ss:
3265     case Intrinsic::x86_sse_ucomile_ss:
3266     case Intrinsic::x86_sse_ucomigt_ss:
3267     case Intrinsic::x86_sse_ucomige_ss:
3268     case Intrinsic::x86_sse_ucomineq_ss:
3269     case Intrinsic::x86_sse2_comieq_sd:
3270     case Intrinsic::x86_sse2_comilt_sd:
3271     case Intrinsic::x86_sse2_comile_sd:
3272     case Intrinsic::x86_sse2_comigt_sd:
3273     case Intrinsic::x86_sse2_comige_sd:
3274     case Intrinsic::x86_sse2_comineq_sd:
3275     case Intrinsic::x86_sse2_ucomieq_sd:
3276     case Intrinsic::x86_sse2_ucomilt_sd:
3277     case Intrinsic::x86_sse2_ucomile_sd:
3278     case Intrinsic::x86_sse2_ucomigt_sd:
3279     case Intrinsic::x86_sse2_ucomige_sd:
3280     case Intrinsic::x86_sse2_ucomineq_sd:
3281       handleVectorCompareScalarIntrinsic(I);
3282       break;
3283 
3284     case Intrinsic::x86_sse_cmp_ps:
3285     case Intrinsic::x86_sse2_cmp_pd:
3286       // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
3287       // generates reasonably looking IR that fails in the backend with "Do not
3288       // know how to split the result of this operator!".
3289       handleVectorComparePackedIntrinsic(I);
3290       break;
3291 
3292     case Intrinsic::x86_bmi_bextr_32:
3293     case Intrinsic::x86_bmi_bextr_64:
3294     case Intrinsic::x86_bmi_bzhi_32:
3295     case Intrinsic::x86_bmi_bzhi_64:
3296     case Intrinsic::x86_bmi_pdep_32:
3297     case Intrinsic::x86_bmi_pdep_64:
3298     case Intrinsic::x86_bmi_pext_32:
3299     case Intrinsic::x86_bmi_pext_64:
3300       handleBmiIntrinsic(I);
3301       break;
3302 
3303     case Intrinsic::x86_pclmulqdq:
3304     case Intrinsic::x86_pclmulqdq_256:
3305     case Intrinsic::x86_pclmulqdq_512:
3306       handlePclmulIntrinsic(I);
3307       break;
3308 
3309     case Intrinsic::is_constant:
3310       // The result of llvm.is.constant() is always defined.
3311       setShadow(&I, getCleanShadow(&I));
3312       setOrigin(&I, getCleanOrigin());
3313       break;
3314 
3315     default:
3316       if (!handleUnknownIntrinsic(I))
3317         visitInstruction(I);
3318       break;
3319     }
3320   }
3321 
3322   void visitCallBase(CallBase &CB) {
3323     assert(!CB.getMetadata("nosanitize"));
3324     if (CB.isInlineAsm()) {
3325       // For inline asm (either a call to asm function, or callbr instruction),
3326       // do the usual thing: check argument shadow and mark all outputs as
3327       // clean. Note that any side effects of the inline asm that are not
3328       // immediately visible in its constraints are not handled.
3329       if (ClHandleAsmConservative && MS.CompileKernel)
3330         visitAsmInstruction(CB);
3331       else
3332         visitInstruction(CB);
3333       return;
3334     }
3335     if (auto *Call = dyn_cast<CallInst>(&CB)) {
3336       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
3337 
3338       // We are going to insert code that relies on the fact that the callee
3339       // will become a non-readonly function after it is instrumented by us. To
3340       // prevent this code from being optimized out, mark that function
3341       // non-readonly in advance.
3342       if (Function *Func = Call->getCalledFunction()) {
3343         // Clear out readonly/readnone attributes.
3344         AttrBuilder B;
3345         B.addAttribute(Attribute::ReadOnly)
3346             .addAttribute(Attribute::ReadNone)
3347             .addAttribute(Attribute::WriteOnly)
3348             .addAttribute(Attribute::ArgMemOnly)
3349             .addAttribute(Attribute::Speculatable);
3350         Func->removeAttributes(AttributeList::FunctionIndex, B);
3351       }
3352 
3353       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
3354     }
3355     IRBuilder<> IRB(&CB);
3356 
3357     unsigned ArgOffset = 0;
3358     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
3359     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
3360          ++ArgIt) {
3361       Value *A = *ArgIt;
3362       unsigned i = ArgIt - CB.arg_begin();
3363       if (!A->getType()->isSized()) {
3364         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
3365         continue;
3366       }
3367       unsigned Size = 0;
3368       Value *Store = nullptr;
3369       // Compute the Shadow for arg even if it is ByVal, because
3370       // in that case getShadow() will copy the actual arg shadow to
3371       // __msan_param_tls.
3372       Value *ArgShadow = getShadow(A);
3373       Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
3374       LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
3375                         << " Shadow: " << *ArgShadow << "\n");
3376       bool ArgIsInitialized = false;
3377       const DataLayout &DL = F.getParent()->getDataLayout();
3378       if (CB.paramHasAttr(i, Attribute::ByVal)) {
3379         assert(A->getType()->isPointerTy() &&
3380                "ByVal argument is not a pointer!");
3381         Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
3382         if (ArgOffset + Size > kParamTLSSize) break;
3383         const MaybeAlign ParamAlignment(CB.getParamAlign(i));
3384         MaybeAlign Alignment = llvm::None;
3385         if (ParamAlignment)
3386           Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
3387         Value *AShadowPtr =
3388             getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
3389                                /*isStore*/ false)
3390                 .first;
3391 
3392         Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
3393                                  Alignment, Size);
3394         // TODO(glider): need to copy origins.
3395       } else {
3396         Size = DL.getTypeAllocSize(A->getType());
3397         if (ArgOffset + Size > kParamTLSSize) break;
3398         Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
3399                                        kShadowTLSAlignment);
3400         Constant *Cst = dyn_cast<Constant>(ArgShadow);
3401         if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
3402       }
3403       if (MS.TrackOrigins && !ArgIsInitialized)
3404         IRB.CreateStore(getOrigin(A),
3405                         getOriginPtrForArgument(A, IRB, ArgOffset));
3406       (void)Store;
3407       assert(Size != 0 && Store != nullptr);
3408       LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
3409       ArgOffset += alignTo(Size, 8);
3410     }
3411     LLVM_DEBUG(dbgs() << "  done with call args\n");
3412 
3413     FunctionType *FT = CB.getFunctionType();
3414     if (FT->isVarArg()) {
3415       VAHelper->visitCallBase(CB, IRB);
3416     }
3417 
3418     // Now, get the shadow for the RetVal.
3419     if (!CB.getType()->isSized())
3420       return;
3421     // Don't emit the epilogue for musttail call returns.
3422     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
3423       return;
3424     IRBuilder<> IRBBefore(&CB);
3425     // Until we have full dynamic coverage, make sure the retval shadow is 0.
3426     Value *Base = getShadowPtrForRetval(&CB, IRBBefore);
3427     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
3428                                  kShadowTLSAlignment);
3429     BasicBlock::iterator NextInsn;
3430     if (isa<CallInst>(CB)) {
3431       NextInsn = ++CB.getIterator();
3432       assert(NextInsn != CB.getParent()->end());
3433     } else {
3434       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
3435       if (!NormalDest->getSinglePredecessor()) {
3436         // FIXME: this case is tricky, so we are just conservative here.
3437         // Perhaps we need to split the edge between this BB and NormalDest,
3438         // but a naive attempt to use SplitEdge leads to a crash.
3439         setShadow(&CB, getCleanShadow(&CB));
3440         setOrigin(&CB, getCleanOrigin());
3441         return;
3442       }
3443       // FIXME: NextInsn is likely in a basic block that has not been visited yet.
3444       // Anything inserted there will be instrumented by MSan later!
3445       NextInsn = NormalDest->getFirstInsertionPt();
3446       assert(NextInsn != NormalDest->end() &&
3447              "Could not find insertion point for retval shadow load");
3448     }
3449     IRBuilder<> IRBAfter(&*NextInsn);
3450     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
3451         getShadowTy(&CB), getShadowPtrForRetval(&CB, IRBAfter),
3452         kShadowTLSAlignment, "_msret");
3453     setShadow(&CB, RetvalShadow);
3454     if (MS.TrackOrigins)
3455       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
3456                                          getOriginPtrForRetval(IRBAfter)));
3457   }
3458 
3459   bool isAMustTailRetVal(Value *RetVal) {
3460     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
3461       RetVal = I->getOperand(0);
3462     }
3463     if (auto *I = dyn_cast<CallInst>(RetVal)) {
3464       return I->isMustTailCall();
3465     }
3466     return false;
3467   }
3468 
3469   void visitReturnInst(ReturnInst &I) {
3470     IRBuilder<> IRB(&I);
3471     Value *RetVal = I.getReturnValue();
3472     if (!RetVal) return;
3473     // Don't emit the epilogue for musttail call returns.
3474     if (isAMustTailRetVal(RetVal)) return;
3475     Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
3476     if (CheckReturnValue) {
3477       insertShadowCheck(RetVal, &I);
3478       Value *Shadow = getCleanShadow(RetVal);
3479       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
3480     } else {
3481       Value *Shadow = getShadow(RetVal);
3482       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
3483       if (MS.TrackOrigins)
3484         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
3485     }
3486   }
3487 
3488   void visitPHINode(PHINode &I) {
3489     IRBuilder<> IRB(&I);
3490     if (!PropagateShadow) {
3491       setShadow(&I, getCleanShadow(&I));
3492       setOrigin(&I, getCleanOrigin());
3493       return;
3494     }
3495 
3496     ShadowPHINodes.push_back(&I);
3497     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
3498                                 "_msphi_s"));
3499     if (MS.TrackOrigins)
3500       setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
3501                                   "_msphi_o"));
3502   }
3503 
3504   Value *getLocalVarDescription(AllocaInst &I) {
3505     SmallString<2048> StackDescriptionStorage;
3506     raw_svector_ostream StackDescription(StackDescriptionStorage);
3507     // We create a string with a description of the stack allocation and
3508     // pass it into __msan_set_alloca_origin.
3509     // It will be printed by the run-time if stack-originated UMR is found.
3510     // The first 4 bytes of the string are set to '----' and will be replaced
3511     // by __msan_va_arg_overflow_size_tls at the first call.
3512     StackDescription << "----" << I.getName() << "@" << F.getName();
3513     return createPrivateNonConstGlobalForString(*F.getParent(),
3514                                                 StackDescription.str());
3515   }
3516 
3517   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3518     if (PoisonStack && ClPoisonStackWithCall) {
3519       IRB.CreateCall(MS.MsanPoisonStackFn,
3520                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3521     } else {
3522       Value *ShadowBase, *OriginBase;
3523       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
3524           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
3525 
3526       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
3527       IRB.CreateMemSet(ShadowBase, PoisonValue, Len,
3528                        MaybeAlign(I.getAlignment()));
3529     }
3530 
3531     if (PoisonStack && MS.TrackOrigins) {
3532       Value *Descr = getLocalVarDescription(I);
3533       IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
3534                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3535                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
3536                       IRB.CreatePointerCast(&F, MS.IntptrTy)});
3537     }
3538   }
3539 
3540   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3541     Value *Descr = getLocalVarDescription(I);
3542     if (PoisonStack) {
3543       IRB.CreateCall(MS.MsanPoisonAllocaFn,
3544                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3545                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
3546     } else {
3547       IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
3548                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3549     }
3550   }
3551 
3552   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
3553     if (!InsPoint)
3554       InsPoint = &I;
3555     IRBuilder<> IRB(InsPoint->getNextNode());
3556     const DataLayout &DL = F.getParent()->getDataLayout();
3557     uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
3558     Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
3559     if (I.isArrayAllocation())
3560       Len = IRB.CreateMul(Len, I.getArraySize());
3561 
3562     if (MS.CompileKernel)
3563       poisonAllocaKmsan(I, IRB, Len);
3564     else
3565       poisonAllocaUserspace(I, IRB, Len);
3566   }
3567 
3568   void visitAllocaInst(AllocaInst &I) {
3569     setShadow(&I, getCleanShadow(&I));
3570     setOrigin(&I, getCleanOrigin());
3571     // We'll get to this alloca later unless it's poisoned at the corresponding
3572     // llvm.lifetime.start.
3573     AllocaSet.insert(&I);
3574   }
3575 
3576   void visitSelectInst(SelectInst& I) {
3577     IRBuilder<> IRB(&I);
3578     // a = select b, c, d
3579     Value *B = I.getCondition();
3580     Value *C = I.getTrueValue();
3581     Value *D = I.getFalseValue();
3582     Value *Sb = getShadow(B);
3583     Value *Sc = getShadow(C);
3584     Value *Sd = getShadow(D);
3585 
3586     // Result shadow if condition shadow is 0.
3587     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
3588     Value *Sa1;
3589     if (I.getType()->isAggregateType()) {
3590       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
3591       // an extra "select". This results in much more compact IR.
3592       // Sa = select Sb, poisoned, (select b, Sc, Sd)
3593       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
3594     } else {
3595       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
3596       // If Sb (condition is poisoned), look for bits in c and d that are equal
3597       // and both unpoisoned.
3598       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
3599 
3600       // Cast arguments to shadow-compatible type.
3601       C = CreateAppToShadowCast(IRB, C);
3602       D = CreateAppToShadowCast(IRB, D);
3603 
3604       // Result shadow if condition shadow is 1.
3605       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
3606     }
3607     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
3608     setShadow(&I, Sa);
3609     if (MS.TrackOrigins) {
3610       // Origins are always i32, so any vector conditions must be flattened.
3611       // FIXME: consider tracking vector origins for app vectors?
3612       if (B->getType()->isVectorTy()) {
3613         Type *FlatTy = getShadowTyNoVec(B->getType());
3614         B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
3615                                 ConstantInt::getNullValue(FlatTy));
3616         Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
3617                                       ConstantInt::getNullValue(FlatTy));
3618       }
3619       // a = select b, c, d
3620       // Oa = Sb ? Ob : (b ? Oc : Od)
3621       setOrigin(
3622           &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
3623                                IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
3624                                                 getOrigin(I.getFalseValue()))));
3625     }
3626   }
3627 
3628   void visitLandingPadInst(LandingPadInst &I) {
3629     // Do nothing.
3630     // See https://github.com/google/sanitizers/issues/504
3631     setShadow(&I, getCleanShadow(&I));
3632     setOrigin(&I, getCleanOrigin());
3633   }
3634 
3635   void visitCatchSwitchInst(CatchSwitchInst &I) {
3636     setShadow(&I, getCleanShadow(&I));
3637     setOrigin(&I, getCleanOrigin());
3638   }
3639 
3640   void visitFuncletPadInst(FuncletPadInst &I) {
3641     setShadow(&I, getCleanShadow(&I));
3642     setOrigin(&I, getCleanOrigin());
3643   }
3644 
3645   void visitGetElementPtrInst(GetElementPtrInst &I) {
3646     handleShadowOr(I);
3647   }
3648 
3649   void visitExtractValueInst(ExtractValueInst &I) {
3650     IRBuilder<> IRB(&I);
3651     Value *Agg = I.getAggregateOperand();
3652     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
3653     Value *AggShadow = getShadow(Agg);
3654     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
3655     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
3656     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
3657     setShadow(&I, ResShadow);
3658     setOriginForNaryOp(I);
3659   }
3660 
3661   void visitInsertValueInst(InsertValueInst &I) {
3662     IRBuilder<> IRB(&I);
3663     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
3664     Value *AggShadow = getShadow(I.getAggregateOperand());
3665     Value *InsShadow = getShadow(I.getInsertedValueOperand());
3666     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
3667     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
3668     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
3669     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
3670     setShadow(&I, Res);
3671     setOriginForNaryOp(I);
3672   }
3673 
3674   void dumpInst(Instruction &I) {
3675     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
3676       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
3677     } else {
3678       errs() << "ZZZ " << I.getOpcodeName() << "\n";
3679     }
3680     errs() << "QQQ " << I << "\n";
3681   }
3682 
3683   void visitResumeInst(ResumeInst &I) {
3684     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
3685     // Nothing to do here.
3686   }
3687 
3688   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
3689     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
3690     // Nothing to do here.
3691   }
3692 
3693   void visitCatchReturnInst(CatchReturnInst &CRI) {
3694     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
3695     // Nothing to do here.
3696   }
3697 
3698   void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
3699                              const DataLayout &DL, bool isOutput) {
3700     // For each assembly argument, we check its value for being initialized.
3701     // If the argument is a pointer, we assume it points to a single element
3702     // of the corresponding type (or to a 8-byte word, if the type is unsized).
3703     // Each such pointer is instrumented with a call to the runtime library.
3704     Type *OpType = Operand->getType();
3705     // Check the operand value itself.
3706     insertShadowCheck(Operand, &I);
3707     if (!OpType->isPointerTy() || !isOutput) {
3708       assert(!isOutput);
3709       return;
3710     }
3711     Type *ElType = OpType->getPointerElementType();
3712     if (!ElType->isSized())
3713       return;
3714     int Size = DL.getTypeStoreSize(ElType);
3715     Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
3716     Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
3717     IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
3718   }
3719 
3720   /// Get the number of output arguments returned by pointers.
3721   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
3722     int NumRetOutputs = 0;
3723     int NumOutputs = 0;
3724     Type *RetTy = cast<Value>(CB)->getType();
3725     if (!RetTy->isVoidTy()) {
3726       // Register outputs are returned via the CallInst return value.
3727       auto *ST = dyn_cast<StructType>(RetTy);
3728       if (ST)
3729         NumRetOutputs = ST->getNumElements();
3730       else
3731         NumRetOutputs = 1;
3732     }
3733     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
3734     for (size_t i = 0, n = Constraints.size(); i < n; i++) {
3735       InlineAsm::ConstraintInfo Info = Constraints[i];
3736       switch (Info.Type) {
3737       case InlineAsm::isOutput:
3738         NumOutputs++;
3739         break;
3740       default:
3741         break;
3742       }
3743     }
3744     return NumOutputs - NumRetOutputs;
3745   }
3746 
3747   void visitAsmInstruction(Instruction &I) {
3748     // Conservative inline assembly handling: check for poisoned shadow of
3749     // asm() arguments, then unpoison the result and all the memory locations
3750     // pointed to by those arguments.
3751     // An inline asm() statement in C++ contains lists of input and output
3752     // arguments used by the assembly code. These are mapped to operands of the
3753     // CallInst as follows:
3754     //  - nR register outputs ("=r) are returned by value in a single structure
3755     //  (SSA value of the CallInst);
3756     //  - nO other outputs ("=m" and others) are returned by pointer as first
3757     // nO operands of the CallInst;
3758     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
3759     // remaining nI operands.
3760     // The total number of asm() arguments in the source is nR+nO+nI, and the
3761     // corresponding CallInst has nO+nI+1 operands (the last operand is the
3762     // function to be called).
3763     const DataLayout &DL = F.getParent()->getDataLayout();
3764     CallBase *CB = cast<CallBase>(&I);
3765     IRBuilder<> IRB(&I);
3766     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
3767     int OutputArgs = getNumOutputArgs(IA, CB);
3768     // The last operand of a CallInst is the function itself.
3769     int NumOperands = CB->getNumOperands() - 1;
3770 
3771     // Check input arguments. Doing so before unpoisoning output arguments, so
3772     // that we won't overwrite uninit values before checking them.
3773     for (int i = OutputArgs; i < NumOperands; i++) {
3774       Value *Operand = CB->getOperand(i);
3775       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
3776     }
3777     // Unpoison output arguments. This must happen before the actual InlineAsm
3778     // call, so that the shadow for memory published in the asm() statement
3779     // remains valid.
3780     for (int i = 0; i < OutputArgs; i++) {
3781       Value *Operand = CB->getOperand(i);
3782       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
3783     }
3784 
3785     setShadow(&I, getCleanShadow(&I));
3786     setOrigin(&I, getCleanOrigin());
3787   }
3788 
3789   void visitInstruction(Instruction &I) {
3790     // Everything else: stop propagating and check for poisoned shadow.
3791     if (ClDumpStrictInstructions)
3792       dumpInst(I);
3793     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
3794     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
3795       Value *Operand = I.getOperand(i);
3796       if (Operand->getType()->isSized())
3797         insertShadowCheck(Operand, &I);
3798     }
3799     setShadow(&I, getCleanShadow(&I));
3800     setOrigin(&I, getCleanOrigin());
3801   }
3802 };
3803 
3804 /// AMD64-specific implementation of VarArgHelper.
3805 struct VarArgAMD64Helper : public VarArgHelper {
3806   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
3807   // See a comment in visitCallBase for more details.
3808   static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
3809   static const unsigned AMD64FpEndOffsetSSE = 176;
3810   // If SSE is disabled, fp_offset in va_list is zero.
3811   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
3812 
3813   unsigned AMD64FpEndOffset;
3814   Function &F;
3815   MemorySanitizer &MS;
3816   MemorySanitizerVisitor &MSV;
3817   Value *VAArgTLSCopy = nullptr;
3818   Value *VAArgTLSOriginCopy = nullptr;
3819   Value *VAArgOverflowSize = nullptr;
3820 
3821   SmallVector<CallInst*, 16> VAStartInstrumentationList;
3822 
3823   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3824 
3825   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
3826                     MemorySanitizerVisitor &MSV)
3827       : F(F), MS(MS), MSV(MSV) {
3828     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
3829     for (const auto &Attr : F.getAttributes().getFnAttributes()) {
3830       if (Attr.isStringAttribute() &&
3831           (Attr.getKindAsString() == "target-features")) {
3832         if (Attr.getValueAsString().contains("-sse"))
3833           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
3834         break;
3835       }
3836     }
3837   }
3838 
3839   ArgKind classifyArgument(Value* arg) {
3840     // A very rough approximation of X86_64 argument classification rules.
3841     Type *T = arg->getType();
3842     if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
3843       return AK_FloatingPoint;
3844     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3845       return AK_GeneralPurpose;
3846     if (T->isPointerTy())
3847       return AK_GeneralPurpose;
3848     return AK_Memory;
3849   }
3850 
3851   // For VarArg functions, store the argument shadow in an ABI-specific format
3852   // that corresponds to va_list layout.
3853   // We do this because Clang lowers va_arg in the frontend, and this pass
3854   // only sees the low level code that deals with va_list internals.
3855   // A much easier alternative (provided that Clang emits va_arg instructions)
3856   // would have been to associate each live instance of va_list with a copy of
3857   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
3858   // order.
3859   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
3860     unsigned GpOffset = 0;
3861     unsigned FpOffset = AMD64GpEndOffset;
3862     unsigned OverflowOffset = AMD64FpEndOffset;
3863     const DataLayout &DL = F.getParent()->getDataLayout();
3864     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
3865          ++ArgIt) {
3866       Value *A = *ArgIt;
3867       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
3868       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
3869       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
3870       if (IsByVal) {
3871         // ByVal arguments always go to the overflow area.
3872         // Fixed arguments passed through the overflow area will be stepped
3873         // over by va_start, so don't count them towards the offset.
3874         if (IsFixed)
3875           continue;
3876         assert(A->getType()->isPointerTy());
3877         Type *RealTy = A->getType()->getPointerElementType();
3878         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3879         Value *ShadowBase = getShadowPtrForVAArgument(
3880             RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
3881         Value *OriginBase = nullptr;
3882         if (MS.TrackOrigins)
3883           OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
3884         OverflowOffset += alignTo(ArgSize, 8);
3885         if (!ShadowBase)
3886           continue;
3887         Value *ShadowPtr, *OriginPtr;
3888         std::tie(ShadowPtr, OriginPtr) =
3889             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
3890                                    /*isStore*/ false);
3891 
3892         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
3893                          kShadowTLSAlignment, ArgSize);
3894         if (MS.TrackOrigins)
3895           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
3896                            kShadowTLSAlignment, ArgSize);
3897       } else {
3898         ArgKind AK = classifyArgument(A);
3899         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
3900           AK = AK_Memory;
3901         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
3902           AK = AK_Memory;
3903         Value *ShadowBase, *OriginBase = nullptr;
3904         switch (AK) {
3905           case AK_GeneralPurpose:
3906             ShadowBase =
3907                 getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
3908             if (MS.TrackOrigins)
3909               OriginBase =
3910                   getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
3911             GpOffset += 8;
3912             break;
3913           case AK_FloatingPoint:
3914             ShadowBase =
3915                 getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
3916             if (MS.TrackOrigins)
3917               OriginBase =
3918                   getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
3919             FpOffset += 16;
3920             break;
3921           case AK_Memory:
3922             if (IsFixed)
3923               continue;
3924             uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3925             ShadowBase =
3926                 getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
3927             if (MS.TrackOrigins)
3928               OriginBase =
3929                   getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3930             OverflowOffset += alignTo(ArgSize, 8);
3931         }
3932         // Take fixed arguments into account for GpOffset and FpOffset,
3933         // but don't actually store shadows for them.
3934         // TODO(glider): don't call get*PtrForVAArgument() for them.
3935         if (IsFixed)
3936           continue;
3937         if (!ShadowBase)
3938           continue;
3939         Value *Shadow = MSV.getShadow(A);
3940         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
3941         if (MS.TrackOrigins) {
3942           Value *Origin = MSV.getOrigin(A);
3943           unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
3944           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
3945                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
3946         }
3947       }
3948     }
3949     Constant *OverflowSize =
3950       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
3951     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3952   }
3953 
3954   /// Compute the shadow address for a given va_arg.
3955   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3956                                    unsigned ArgOffset, unsigned ArgSize) {
3957     // Make sure we don't overflow __msan_va_arg_tls.
3958     if (ArgOffset + ArgSize > kParamTLSSize)
3959       return nullptr;
3960     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3961     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3962     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3963                               "_msarg_va_s");
3964   }
3965 
3966   /// Compute the origin address for a given va_arg.
3967   Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
3968     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
3969     // getOriginPtrForVAArgument() is always called after
3970     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
3971     // overflow.
3972     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3973     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
3974                               "_msarg_va_o");
3975   }
3976 
3977   void unpoisonVAListTagForInst(IntrinsicInst &I) {
3978     IRBuilder<> IRB(&I);
3979     Value *VAListTag = I.getArgOperand(0);
3980     Value *ShadowPtr, *OriginPtr;
3981     const Align Alignment = Align(8);
3982     std::tie(ShadowPtr, OriginPtr) =
3983         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
3984                                /*isStore*/ true);
3985 
3986     // Unpoison the whole __va_list_tag.
3987     // FIXME: magic ABI constants.
3988     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3989                      /* size */ 24, Alignment, false);
3990     // We shouldn't need to zero out the origins, as they're only checked for
3991     // nonzero shadow.
3992   }
3993 
3994   void visitVAStartInst(VAStartInst &I) override {
3995     if (F.getCallingConv() == CallingConv::Win64)
3996       return;
3997     VAStartInstrumentationList.push_back(&I);
3998     unpoisonVAListTagForInst(I);
3999   }
4000 
4001   void visitVACopyInst(VACopyInst &I) override {
4002     if (F.getCallingConv() == CallingConv::Win64) return;
4003     unpoisonVAListTagForInst(I);
4004   }
4005 
4006   void finalizeInstrumentation() override {
4007     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4008            "finalizeInstrumentation called twice");
4009     if (!VAStartInstrumentationList.empty()) {
4010       // If there is a va_start in this function, make a backup copy of
4011       // va_arg_tls somewhere in the function entry block.
4012       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4013       VAArgOverflowSize =
4014           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4015       Value *CopySize =
4016         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
4017                       VAArgOverflowSize);
4018       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4019       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4020       if (MS.TrackOrigins) {
4021         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4022         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4023                          Align(8), CopySize);
4024       }
4025     }
4026 
4027     // Instrument va_start.
4028     // Copy va_list shadow from the backup copy of the TLS contents.
4029     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4030       CallInst *OrigInst = VAStartInstrumentationList[i];
4031       IRBuilder<> IRB(OrigInst->getNextNode());
4032       Value *VAListTag = OrigInst->getArgOperand(0);
4033 
4034       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4035       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4036           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4037                         ConstantInt::get(MS.IntptrTy, 16)),
4038           PointerType::get(RegSaveAreaPtrTy, 0));
4039       Value *RegSaveAreaPtr =
4040           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4041       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4042       const Align Alignment = Align(16);
4043       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4044           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4045                                  Alignment, /*isStore*/ true);
4046       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4047                        AMD64FpEndOffset);
4048       if (MS.TrackOrigins)
4049         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4050                          Alignment, AMD64FpEndOffset);
4051       Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4052       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4053           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4054                         ConstantInt::get(MS.IntptrTy, 8)),
4055           PointerType::get(OverflowArgAreaPtrTy, 0));
4056       Value *OverflowArgAreaPtr =
4057           IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4058       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4059       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4060           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4061                                  Alignment, /*isStore*/ true);
4062       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4063                                              AMD64FpEndOffset);
4064       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4065                        VAArgOverflowSize);
4066       if (MS.TrackOrigins) {
4067         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4068                                         AMD64FpEndOffset);
4069         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4070                          VAArgOverflowSize);
4071       }
4072     }
4073   }
4074 };
4075 
4076 /// MIPS64-specific implementation of VarArgHelper.
4077 struct VarArgMIPS64Helper : public VarArgHelper {
4078   Function &F;
4079   MemorySanitizer &MS;
4080   MemorySanitizerVisitor &MSV;
4081   Value *VAArgTLSCopy = nullptr;
4082   Value *VAArgSize = nullptr;
4083 
4084   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4085 
4086   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
4087                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4088 
4089   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4090     unsigned VAArgOffset = 0;
4091     const DataLayout &DL = F.getParent()->getDataLayout();
4092     for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
4093               End = CB.arg_end();
4094          ArgIt != End; ++ArgIt) {
4095       Triple TargetTriple(F.getParent()->getTargetTriple());
4096       Value *A = *ArgIt;
4097       Value *Base;
4098       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4099       if (TargetTriple.getArch() == Triple::mips64) {
4100         // Adjusting the shadow for argument with size < 8 to match the placement
4101         // of bits in big endian system
4102         if (ArgSize < 8)
4103           VAArgOffset += (8 - ArgSize);
4104       }
4105       Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
4106       VAArgOffset += ArgSize;
4107       VAArgOffset = alignTo(VAArgOffset, 8);
4108       if (!Base)
4109         continue;
4110       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4111     }
4112 
4113     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
4114     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4115     // a new class member i.e. it is the total size of all VarArgs.
4116     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4117   }
4118 
4119   /// Compute the shadow address for a given va_arg.
4120   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4121                                    unsigned ArgOffset, unsigned ArgSize) {
4122     // Make sure we don't overflow __msan_va_arg_tls.
4123     if (ArgOffset + ArgSize > kParamTLSSize)
4124       return nullptr;
4125     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4126     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4127     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4128                               "_msarg");
4129   }
4130 
4131   void visitVAStartInst(VAStartInst &I) override {
4132     IRBuilder<> IRB(&I);
4133     VAStartInstrumentationList.push_back(&I);
4134     Value *VAListTag = I.getArgOperand(0);
4135     Value *ShadowPtr, *OriginPtr;
4136     const Align Alignment = Align(8);
4137     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4138         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4139     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4140                      /* size */ 8, Alignment, false);
4141   }
4142 
4143   void visitVACopyInst(VACopyInst &I) override {
4144     IRBuilder<> IRB(&I);
4145     VAStartInstrumentationList.push_back(&I);
4146     Value *VAListTag = I.getArgOperand(0);
4147     Value *ShadowPtr, *OriginPtr;
4148     const Align Alignment = Align(8);
4149     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4150         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4151     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4152                      /* size */ 8, Alignment, false);
4153   }
4154 
4155   void finalizeInstrumentation() override {
4156     assert(!VAArgSize && !VAArgTLSCopy &&
4157            "finalizeInstrumentation called twice");
4158     IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4159     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4160     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4161                                     VAArgSize);
4162 
4163     if (!VAStartInstrumentationList.empty()) {
4164       // If there is a va_start in this function, make a backup copy of
4165       // va_arg_tls somewhere in the function entry block.
4166       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4167       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4168     }
4169 
4170     // Instrument va_start.
4171     // Copy va_list shadow from the backup copy of the TLS contents.
4172     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4173       CallInst *OrigInst = VAStartInstrumentationList[i];
4174       IRBuilder<> IRB(OrigInst->getNextNode());
4175       Value *VAListTag = OrigInst->getArgOperand(0);
4176       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4177       Value *RegSaveAreaPtrPtr =
4178           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4179                              PointerType::get(RegSaveAreaPtrTy, 0));
4180       Value *RegSaveAreaPtr =
4181           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4182       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4183       const Align Alignment = Align(8);
4184       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4185           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4186                                  Alignment, /*isStore*/ true);
4187       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4188                        CopySize);
4189     }
4190   }
4191 };
4192 
4193 /// AArch64-specific implementation of VarArgHelper.
4194 struct VarArgAArch64Helper : public VarArgHelper {
4195   static const unsigned kAArch64GrArgSize = 64;
4196   static const unsigned kAArch64VrArgSize = 128;
4197 
4198   static const unsigned AArch64GrBegOffset = 0;
4199   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
4200   // Make VR space aligned to 16 bytes.
4201   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
4202   static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
4203                                              + kAArch64VrArgSize;
4204   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
4205 
4206   Function &F;
4207   MemorySanitizer &MS;
4208   MemorySanitizerVisitor &MSV;
4209   Value *VAArgTLSCopy = nullptr;
4210   Value *VAArgOverflowSize = nullptr;
4211 
4212   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4213 
4214   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4215 
4216   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
4217                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4218 
4219   ArgKind classifyArgument(Value* arg) {
4220     Type *T = arg->getType();
4221     if (T->isFPOrFPVectorTy())
4222       return AK_FloatingPoint;
4223     if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4224         || (T->isPointerTy()))
4225       return AK_GeneralPurpose;
4226     return AK_Memory;
4227   }
4228 
4229   // The instrumentation stores the argument shadow in a non ABI-specific
4230   // format because it does not know which argument is named (since Clang,
4231   // like x86_64 case, lowers the va_args in the frontend and this pass only
4232   // sees the low level code that deals with va_list internals).
4233   // The first seven GR registers are saved in the first 56 bytes of the
4234   // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
4235   // the remaining arguments.
4236   // Using constant offset within the va_arg TLS array allows fast copy
4237   // in the finalize instrumentation.
4238   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4239     unsigned GrOffset = AArch64GrBegOffset;
4240     unsigned VrOffset = AArch64VrBegOffset;
4241     unsigned OverflowOffset = AArch64VAEndOffset;
4242 
4243     const DataLayout &DL = F.getParent()->getDataLayout();
4244     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4245          ++ArgIt) {
4246       Value *A = *ArgIt;
4247       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4248       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4249       ArgKind AK = classifyArgument(A);
4250       if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
4251         AK = AK_Memory;
4252       if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
4253         AK = AK_Memory;
4254       Value *Base;
4255       switch (AK) {
4256         case AK_GeneralPurpose:
4257           Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
4258           GrOffset += 8;
4259           break;
4260         case AK_FloatingPoint:
4261           Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
4262           VrOffset += 16;
4263           break;
4264         case AK_Memory:
4265           // Don't count fixed arguments in the overflow area - va_start will
4266           // skip right over them.
4267           if (IsFixed)
4268             continue;
4269           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4270           Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
4271                                            alignTo(ArgSize, 8));
4272           OverflowOffset += alignTo(ArgSize, 8);
4273           break;
4274       }
4275       // Count Gp/Vr fixed arguments to their respective offsets, but don't
4276       // bother to actually store a shadow.
4277       if (IsFixed)
4278         continue;
4279       if (!Base)
4280         continue;
4281       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4282     }
4283     Constant *OverflowSize =
4284       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
4285     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4286   }
4287 
4288   /// Compute the shadow address for a given va_arg.
4289   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4290                                    unsigned ArgOffset, unsigned ArgSize) {
4291     // Make sure we don't overflow __msan_va_arg_tls.
4292     if (ArgOffset + ArgSize > kParamTLSSize)
4293       return nullptr;
4294     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4295     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4296     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4297                               "_msarg");
4298   }
4299 
4300   void visitVAStartInst(VAStartInst &I) override {
4301     IRBuilder<> IRB(&I);
4302     VAStartInstrumentationList.push_back(&I);
4303     Value *VAListTag = I.getArgOperand(0);
4304     Value *ShadowPtr, *OriginPtr;
4305     const Align Alignment = Align(8);
4306     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4307         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4308     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4309                      /* size */ 32, Alignment, false);
4310   }
4311 
4312   void visitVACopyInst(VACopyInst &I) override {
4313     IRBuilder<> IRB(&I);
4314     VAStartInstrumentationList.push_back(&I);
4315     Value *VAListTag = I.getArgOperand(0);
4316     Value *ShadowPtr, *OriginPtr;
4317     const Align Alignment = Align(8);
4318     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4319         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4320     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4321                      /* size */ 32, Alignment, false);
4322   }
4323 
4324   // Retrieve a va_list field of 'void*' size.
4325   Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4326     Value *SaveAreaPtrPtr =
4327       IRB.CreateIntToPtr(
4328         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4329                       ConstantInt::get(MS.IntptrTy, offset)),
4330         Type::getInt64PtrTy(*MS.C));
4331     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
4332   }
4333 
4334   // Retrieve a va_list field of 'int' size.
4335   Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4336     Value *SaveAreaPtr =
4337       IRB.CreateIntToPtr(
4338         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4339                       ConstantInt::get(MS.IntptrTy, offset)),
4340         Type::getInt32PtrTy(*MS.C));
4341     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
4342     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
4343   }
4344 
4345   void finalizeInstrumentation() override {
4346     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4347            "finalizeInstrumentation called twice");
4348     if (!VAStartInstrumentationList.empty()) {
4349       // If there is a va_start in this function, make a backup copy of
4350       // va_arg_tls somewhere in the function entry block.
4351       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4352       VAArgOverflowSize =
4353           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4354       Value *CopySize =
4355         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
4356                       VAArgOverflowSize);
4357       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4358       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4359     }
4360 
4361     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
4362     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
4363 
4364     // Instrument va_start, copy va_list shadow from the backup copy of
4365     // the TLS contents.
4366     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4367       CallInst *OrigInst = VAStartInstrumentationList[i];
4368       IRBuilder<> IRB(OrigInst->getNextNode());
4369 
4370       Value *VAListTag = OrigInst->getArgOperand(0);
4371 
4372       // The variadic ABI for AArch64 creates two areas to save the incoming
4373       // argument registers (one for 64-bit general register xn-x7 and another
4374       // for 128-bit FP/SIMD vn-v7).
4375       // We need then to propagate the shadow arguments on both regions
4376       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
4377       // The remaining arguments are saved on shadow for 'va::stack'.
4378       // One caveat is it requires only to propagate the non-named arguments,
4379       // however on the call site instrumentation 'all' the arguments are
4380       // saved. So to copy the shadow values from the va_arg TLS array
4381       // we need to adjust the offset for both GR and VR fields based on
4382       // the __{gr,vr}_offs value (since they are stores based on incoming
4383       // named arguments).
4384 
4385       // Read the stack pointer from the va_list.
4386       Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
4387 
4388       // Read both the __gr_top and __gr_off and add them up.
4389       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
4390       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
4391 
4392       Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
4393 
4394       // Read both the __vr_top and __vr_off and add them up.
4395       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
4396       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
4397 
4398       Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
4399 
4400       // It does not know how many named arguments is being used and, on the
4401       // callsite all the arguments were saved.  Since __gr_off is defined as
4402       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
4403       // argument by ignoring the bytes of shadow from named arguments.
4404       Value *GrRegSaveAreaShadowPtrOff =
4405         IRB.CreateAdd(GrArgSize, GrOffSaveArea);
4406 
4407       Value *GrRegSaveAreaShadowPtr =
4408           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4409                                  Align(8), /*isStore*/ true)
4410               .first;
4411 
4412       Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4413                                               GrRegSaveAreaShadowPtrOff);
4414       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
4415 
4416       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
4417                        GrCopySize);
4418 
4419       // Again, but for FP/SIMD values.
4420       Value *VrRegSaveAreaShadowPtrOff =
4421           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
4422 
4423       Value *VrRegSaveAreaShadowPtr =
4424           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4425                                  Align(8), /*isStore*/ true)
4426               .first;
4427 
4428       Value *VrSrcPtr = IRB.CreateInBoundsGEP(
4429         IRB.getInt8Ty(),
4430         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4431                               IRB.getInt32(AArch64VrBegOffset)),
4432         VrRegSaveAreaShadowPtrOff);
4433       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
4434 
4435       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
4436                        VrCopySize);
4437 
4438       // And finally for remaining arguments.
4439       Value *StackSaveAreaShadowPtr =
4440           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
4441                                  Align(16), /*isStore*/ true)
4442               .first;
4443 
4444       Value *StackSrcPtr =
4445         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4446                               IRB.getInt32(AArch64VAEndOffset));
4447 
4448       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
4449                        Align(16), VAArgOverflowSize);
4450     }
4451   }
4452 };
4453 
4454 /// PowerPC64-specific implementation of VarArgHelper.
4455 struct VarArgPowerPC64Helper : public VarArgHelper {
4456   Function &F;
4457   MemorySanitizer &MS;
4458   MemorySanitizerVisitor &MSV;
4459   Value *VAArgTLSCopy = nullptr;
4460   Value *VAArgSize = nullptr;
4461 
4462   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4463 
4464   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
4465                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4466 
4467   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4468     // For PowerPC, we need to deal with alignment of stack arguments -
4469     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
4470     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
4471     // and QPX vectors are aligned to 32 bytes.  For that reason, we
4472     // compute current offset from stack pointer (which is always properly
4473     // aligned), and offset for the first vararg, then subtract them.
4474     unsigned VAArgBase;
4475     Triple TargetTriple(F.getParent()->getTargetTriple());
4476     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
4477     // and 32 bytes for ABIv2.  This is usually determined by target
4478     // endianness, but in theory could be overridden by function attribute.
4479     // For simplicity, we ignore it here (it'd only matter for QPX vectors).
4480     if (TargetTriple.getArch() == Triple::ppc64)
4481       VAArgBase = 48;
4482     else
4483       VAArgBase = 32;
4484     unsigned VAArgOffset = VAArgBase;
4485     const DataLayout &DL = F.getParent()->getDataLayout();
4486     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4487          ++ArgIt) {
4488       Value *A = *ArgIt;
4489       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4490       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4491       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
4492       if (IsByVal) {
4493         assert(A->getType()->isPointerTy());
4494         Type *RealTy = A->getType()->getPointerElementType();
4495         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4496         MaybeAlign ArgAlign = CB.getParamAlign(ArgNo);
4497         if (!ArgAlign || *ArgAlign < Align(8))
4498           ArgAlign = Align(8);
4499         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4500         if (!IsFixed) {
4501           Value *Base = getShadowPtrForVAArgument(
4502               RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
4503           if (Base) {
4504             Value *AShadowPtr, *AOriginPtr;
4505             std::tie(AShadowPtr, AOriginPtr) =
4506                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
4507                                        kShadowTLSAlignment, /*isStore*/ false);
4508 
4509             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
4510                              kShadowTLSAlignment, ArgSize);
4511           }
4512         }
4513         VAArgOffset += alignTo(ArgSize, 8);
4514       } else {
4515         Value *Base;
4516         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4517         uint64_t ArgAlign = 8;
4518         if (A->getType()->isArrayTy()) {
4519           // Arrays are aligned to element size, except for long double
4520           // arrays, which are aligned to 8 bytes.
4521           Type *ElementTy = A->getType()->getArrayElementType();
4522           if (!ElementTy->isPPC_FP128Ty())
4523             ArgAlign = DL.getTypeAllocSize(ElementTy);
4524         } else if (A->getType()->isVectorTy()) {
4525           // Vectors are naturally aligned.
4526           ArgAlign = DL.getTypeAllocSize(A->getType());
4527         }
4528         if (ArgAlign < 8)
4529           ArgAlign = 8;
4530         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4531         if (DL.isBigEndian()) {
4532           // Adjusting the shadow for argument with size < 8 to match the placement
4533           // of bits in big endian system
4534           if (ArgSize < 8)
4535             VAArgOffset += (8 - ArgSize);
4536         }
4537         if (!IsFixed) {
4538           Base = getShadowPtrForVAArgument(A->getType(), IRB,
4539                                            VAArgOffset - VAArgBase, ArgSize);
4540           if (Base)
4541             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4542         }
4543         VAArgOffset += ArgSize;
4544         VAArgOffset = alignTo(VAArgOffset, 8);
4545       }
4546       if (IsFixed)
4547         VAArgBase = VAArgOffset;
4548     }
4549 
4550     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
4551                                                 VAArgOffset - VAArgBase);
4552     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4553     // a new class member i.e. it is the total size of all VarArgs.
4554     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4555   }
4556 
4557   /// Compute the shadow address for a given va_arg.
4558   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4559                                    unsigned ArgOffset, unsigned ArgSize) {
4560     // Make sure we don't overflow __msan_va_arg_tls.
4561     if (ArgOffset + ArgSize > kParamTLSSize)
4562       return nullptr;
4563     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4564     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4565     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4566                               "_msarg");
4567   }
4568 
4569   void visitVAStartInst(VAStartInst &I) override {
4570     IRBuilder<> IRB(&I);
4571     VAStartInstrumentationList.push_back(&I);
4572     Value *VAListTag = I.getArgOperand(0);
4573     Value *ShadowPtr, *OriginPtr;
4574     const Align Alignment = Align(8);
4575     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4576         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4577     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4578                      /* size */ 8, Alignment, false);
4579   }
4580 
4581   void visitVACopyInst(VACopyInst &I) override {
4582     IRBuilder<> IRB(&I);
4583     Value *VAListTag = I.getArgOperand(0);
4584     Value *ShadowPtr, *OriginPtr;
4585     const Align Alignment = Align(8);
4586     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4587         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4588     // Unpoison the whole __va_list_tag.
4589     // FIXME: magic ABI constants.
4590     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4591                      /* size */ 8, Alignment, false);
4592   }
4593 
4594   void finalizeInstrumentation() override {
4595     assert(!VAArgSize && !VAArgTLSCopy &&
4596            "finalizeInstrumentation called twice");
4597     IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4598     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4599     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4600                                     VAArgSize);
4601 
4602     if (!VAStartInstrumentationList.empty()) {
4603       // If there is a va_start in this function, make a backup copy of
4604       // va_arg_tls somewhere in the function entry block.
4605       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4606       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4607     }
4608 
4609     // Instrument va_start.
4610     // Copy va_list shadow from the backup copy of the TLS contents.
4611     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4612       CallInst *OrigInst = VAStartInstrumentationList[i];
4613       IRBuilder<> IRB(OrigInst->getNextNode());
4614       Value *VAListTag = OrigInst->getArgOperand(0);
4615       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4616       Value *RegSaveAreaPtrPtr =
4617           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4618                              PointerType::get(RegSaveAreaPtrTy, 0));
4619       Value *RegSaveAreaPtr =
4620           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4621       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4622       const Align Alignment = Align(8);
4623       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4624           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4625                                  Alignment, /*isStore*/ true);
4626       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4627                        CopySize);
4628     }
4629   }
4630 };
4631 
4632 /// SystemZ-specific implementation of VarArgHelper.
4633 struct VarArgSystemZHelper : public VarArgHelper {
4634   static const unsigned SystemZGpOffset = 16;
4635   static const unsigned SystemZGpEndOffset = 56;
4636   static const unsigned SystemZFpOffset = 128;
4637   static const unsigned SystemZFpEndOffset = 160;
4638   static const unsigned SystemZMaxVrArgs = 8;
4639   static const unsigned SystemZRegSaveAreaSize = 160;
4640   static const unsigned SystemZOverflowOffset = 160;
4641   static const unsigned SystemZVAListTagSize = 32;
4642   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
4643   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
4644 
4645   Function &F;
4646   MemorySanitizer &MS;
4647   MemorySanitizerVisitor &MSV;
4648   Value *VAArgTLSCopy = nullptr;
4649   Value *VAArgTLSOriginCopy = nullptr;
4650   Value *VAArgOverflowSize = nullptr;
4651 
4652   SmallVector<CallInst *, 16> VAStartInstrumentationList;
4653 
4654   enum class ArgKind {
4655     GeneralPurpose,
4656     FloatingPoint,
4657     Vector,
4658     Memory,
4659     Indirect,
4660   };
4661 
4662   enum class ShadowExtension { None, Zero, Sign };
4663 
4664   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
4665                       MemorySanitizerVisitor &MSV)
4666       : F(F), MS(MS), MSV(MSV) {}
4667 
4668   ArgKind classifyArgument(Type *T, bool IsSoftFloatABI) {
4669     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
4670     // only a few possibilities of what it can be. In particular, enums, single
4671     // element structs and large types have already been taken care of.
4672 
4673     // Some i128 and fp128 arguments are converted to pointers only in the
4674     // back end.
4675     if (T->isIntegerTy(128) || T->isFP128Ty())
4676       return ArgKind::Indirect;
4677     if (T->isFloatingPointTy())
4678       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
4679     if (T->isIntegerTy() || T->isPointerTy())
4680       return ArgKind::GeneralPurpose;
4681     if (T->isVectorTy())
4682       return ArgKind::Vector;
4683     return ArgKind::Memory;
4684   }
4685 
4686   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
4687     // ABI says: "One of the simple integer types no more than 64 bits wide.
4688     // ... If such an argument is shorter than 64 bits, replace it by a full
4689     // 64-bit integer representing the same number, using sign or zero
4690     // extension". Shadow for an integer argument has the same type as the
4691     // argument itself, so it can be sign or zero extended as well.
4692     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
4693     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
4694     if (ZExt) {
4695       assert(!SExt);
4696       return ShadowExtension::Zero;
4697     }
4698     if (SExt) {
4699       assert(!ZExt);
4700       return ShadowExtension::Sign;
4701     }
4702     return ShadowExtension::None;
4703   }
4704 
4705   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4706     bool IsSoftFloatABI = CB.getCalledFunction()
4707                               ->getFnAttribute("use-soft-float")
4708                               .getValueAsString() == "true";
4709     unsigned GpOffset = SystemZGpOffset;
4710     unsigned FpOffset = SystemZFpOffset;
4711     unsigned VrIndex = 0;
4712     unsigned OverflowOffset = SystemZOverflowOffset;
4713     const DataLayout &DL = F.getParent()->getDataLayout();
4714     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4715          ++ArgIt) {
4716       Value *A = *ArgIt;
4717       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4718       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4719       // SystemZABIInfo does not produce ByVal parameters.
4720       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
4721       Type *T = A->getType();
4722       ArgKind AK = classifyArgument(T, IsSoftFloatABI);
4723       if (AK == ArgKind::Indirect) {
4724         T = PointerType::get(T, 0);
4725         AK = ArgKind::GeneralPurpose;
4726       }
4727       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
4728         AK = ArgKind::Memory;
4729       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
4730         AK = ArgKind::Memory;
4731       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
4732         AK = ArgKind::Memory;
4733       Value *ShadowBase = nullptr;
4734       Value *OriginBase = nullptr;
4735       ShadowExtension SE = ShadowExtension::None;
4736       switch (AK) {
4737       case ArgKind::GeneralPurpose: {
4738         // Always keep track of GpOffset, but store shadow only for varargs.
4739         uint64_t ArgSize = 8;
4740         if (GpOffset + ArgSize <= kParamTLSSize) {
4741           if (!IsFixed) {
4742             SE = getShadowExtension(CB, ArgNo);
4743             uint64_t GapSize = 0;
4744             if (SE == ShadowExtension::None) {
4745               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
4746               assert(ArgAllocSize <= ArgSize);
4747               GapSize = ArgSize - ArgAllocSize;
4748             }
4749             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
4750             if (MS.TrackOrigins)
4751               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
4752           }
4753           GpOffset += ArgSize;
4754         } else {
4755           GpOffset = kParamTLSSize;
4756         }
4757         break;
4758       }
4759       case ArgKind::FloatingPoint: {
4760         // Always keep track of FpOffset, but store shadow only for varargs.
4761         uint64_t ArgSize = 8;
4762         if (FpOffset + ArgSize <= kParamTLSSize) {
4763           if (!IsFixed) {
4764             // PoP says: "A short floating-point datum requires only the
4765             // left-most 32 bit positions of a floating-point register".
4766             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
4767             // don't extend shadow and don't mind the gap.
4768             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
4769             if (MS.TrackOrigins)
4770               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
4771           }
4772           FpOffset += ArgSize;
4773         } else {
4774           FpOffset = kParamTLSSize;
4775         }
4776         break;
4777       }
4778       case ArgKind::Vector: {
4779         // Keep track of VrIndex. No need to store shadow, since vector varargs
4780         // go through AK_Memory.
4781         assert(IsFixed);
4782         VrIndex++;
4783         break;
4784       }
4785       case ArgKind::Memory: {
4786         // Keep track of OverflowOffset and store shadow only for varargs.
4787         // Ignore fixed args, since we need to copy only the vararg portion of
4788         // the overflow area shadow.
4789         if (!IsFixed) {
4790           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
4791           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
4792           if (OverflowOffset + ArgSize <= kParamTLSSize) {
4793             SE = getShadowExtension(CB, ArgNo);
4794             uint64_t GapSize =
4795                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
4796             ShadowBase =
4797                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
4798             if (MS.TrackOrigins)
4799               OriginBase =
4800                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
4801             OverflowOffset += ArgSize;
4802           } else {
4803             OverflowOffset = kParamTLSSize;
4804           }
4805         }
4806         break;
4807       }
4808       case ArgKind::Indirect:
4809         llvm_unreachable("Indirect must be converted to GeneralPurpose");
4810       }
4811       if (ShadowBase == nullptr)
4812         continue;
4813       Value *Shadow = MSV.getShadow(A);
4814       if (SE != ShadowExtension::None)
4815         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
4816                                       /*Signed*/ SE == ShadowExtension::Sign);
4817       ShadowBase = IRB.CreateIntToPtr(
4818           ShadowBase, PointerType::get(Shadow->getType(), 0), "_msarg_va_s");
4819       IRB.CreateStore(Shadow, ShadowBase);
4820       if (MS.TrackOrigins) {
4821         Value *Origin = MSV.getOrigin(A);
4822         unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
4823         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4824                         kMinOriginAlignment);
4825       }
4826     }
4827     Constant *OverflowSize = ConstantInt::get(
4828         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
4829     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4830   }
4831 
4832   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
4833     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4834     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4835   }
4836 
4837   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
4838     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
4839     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4840     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
4841                               "_msarg_va_o");
4842   }
4843 
4844   void unpoisonVAListTagForInst(IntrinsicInst &I) {
4845     IRBuilder<> IRB(&I);
4846     Value *VAListTag = I.getArgOperand(0);
4847     Value *ShadowPtr, *OriginPtr;
4848     const Align Alignment = Align(8);
4849     std::tie(ShadowPtr, OriginPtr) =
4850         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
4851                                /*isStore*/ true);
4852     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4853                      SystemZVAListTagSize, Alignment, false);
4854   }
4855 
4856   void visitVAStartInst(VAStartInst &I) override {
4857     VAStartInstrumentationList.push_back(&I);
4858     unpoisonVAListTagForInst(I);
4859   }
4860 
4861   void visitVACopyInst(VACopyInst &I) override { unpoisonVAListTagForInst(I); }
4862 
4863   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
4864     Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4865     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4866         IRB.CreateAdd(
4867             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4868             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
4869         PointerType::get(RegSaveAreaPtrTy, 0));
4870     Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4871     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4872     const Align Alignment = Align(8);
4873     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4874         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
4875                                /*isStore*/ true);
4876     // TODO(iii): copy only fragments filled by visitCallBase()
4877     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4878                      SystemZRegSaveAreaSize);
4879     if (MS.TrackOrigins)
4880       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4881                        Alignment, SystemZRegSaveAreaSize);
4882   }
4883 
4884   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
4885     Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4886     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4887         IRB.CreateAdd(
4888             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4889             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
4890         PointerType::get(OverflowArgAreaPtrTy, 0));
4891     Value *OverflowArgAreaPtr =
4892         IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4893     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4894     const Align Alignment = Align(8);
4895     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4896         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4897                                Alignment, /*isStore*/ true);
4898     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4899                                            SystemZOverflowOffset);
4900     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4901                      VAArgOverflowSize);
4902     if (MS.TrackOrigins) {
4903       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4904                                       SystemZOverflowOffset);
4905       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4906                        VAArgOverflowSize);
4907     }
4908   }
4909 
4910   void finalizeInstrumentation() override {
4911     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4912            "finalizeInstrumentation called twice");
4913     if (!VAStartInstrumentationList.empty()) {
4914       // If there is a va_start in this function, make a backup copy of
4915       // va_arg_tls somewhere in the function entry block.
4916       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4917       VAArgOverflowSize =
4918           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4919       Value *CopySize =
4920           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
4921                         VAArgOverflowSize);
4922       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4923       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4924       if (MS.TrackOrigins) {
4925         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4926         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4927                          Align(8), CopySize);
4928       }
4929     }
4930 
4931     // Instrument va_start.
4932     // Copy va_list shadow from the backup copy of the TLS contents.
4933     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
4934          VaStartNo < VaStartNum; VaStartNo++) {
4935       CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
4936       IRBuilder<> IRB(OrigInst->getNextNode());
4937       Value *VAListTag = OrigInst->getArgOperand(0);
4938       copyRegSaveArea(IRB, VAListTag);
4939       copyOverflowArea(IRB, VAListTag);
4940     }
4941   }
4942 };
4943 
4944 /// A no-op implementation of VarArgHelper.
4945 struct VarArgNoOpHelper : public VarArgHelper {
4946   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
4947                    MemorySanitizerVisitor &MSV) {}
4948 
4949   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
4950 
4951   void visitVAStartInst(VAStartInst &I) override {}
4952 
4953   void visitVACopyInst(VACopyInst &I) override {}
4954 
4955   void finalizeInstrumentation() override {}
4956 };
4957 
4958 } // end anonymous namespace
4959 
4960 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
4961                                         MemorySanitizerVisitor &Visitor) {
4962   // VarArg handling is only implemented on AMD64. False positives are possible
4963   // on other platforms.
4964   Triple TargetTriple(Func.getParent()->getTargetTriple());
4965   if (TargetTriple.getArch() == Triple::x86_64)
4966     return new VarArgAMD64Helper(Func, Msan, Visitor);
4967   else if (TargetTriple.isMIPS64())
4968     return new VarArgMIPS64Helper(Func, Msan, Visitor);
4969   else if (TargetTriple.getArch() == Triple::aarch64)
4970     return new VarArgAArch64Helper(Func, Msan, Visitor);
4971   else if (TargetTriple.getArch() == Triple::ppc64 ||
4972            TargetTriple.getArch() == Triple::ppc64le)
4973     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
4974   else if (TargetTriple.getArch() == Triple::systemz)
4975     return new VarArgSystemZHelper(Func, Msan, Visitor);
4976   else
4977     return new VarArgNoOpHelper(Func, Msan, Visitor);
4978 }
4979 
4980 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
4981   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
4982     return false;
4983 
4984   MemorySanitizerVisitor Visitor(F, *this, TLI);
4985 
4986   // Clear out readonly/readnone attributes.
4987   AttrBuilder B;
4988   B.addAttribute(Attribute::ReadOnly)
4989       .addAttribute(Attribute::ReadNone)
4990       .addAttribute(Attribute::WriteOnly)
4991       .addAttribute(Attribute::ArgMemOnly)
4992       .addAttribute(Attribute::Speculatable);
4993   F.removeAttributes(AttributeList::FunctionIndex, B);
4994 
4995   return Visitor.runOnFunction();
4996 }
4997